newVersion = segment.getVersion(); } else { finalSegmentsToPublish.add(publishedSegment); timeline.add(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().createChunk(publishedSegment)); .put("end", segment.getInterval().getEnd().toString()) .put("partitioned", !(segment.getShardSpec() instanceof NoneShardSpec)) .put("version", segment.getVersion()) .put("used", true) .put("payload", JSON_MAPPER.writeValueAsBytes(segment))
private static VersionedIntervalTimeline<String, DataSegment> getTimelineForIntervalWithHandle(final Handle handle, final String dataSource, final Interval interval, final MetadataStorageTablesConfig dbTables) throws IOException { Query<Map<String, Object>> sql = handle.createQuery(String.format( "SELECT payload FROM %s WHERE used = true AND dataSource = ? AND start <= ? AND \"end\" >= ?", dbTables.getSegmentsTable())) .bind(0, dataSource) .bind(1, interval.getEnd().toString()) .bind(2, interval.getStart().toString()); final VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(Ordering.natural()); try (ResultIterator<byte[]> dbSegments = sql.map(ByteArrayMapper.FIRST).iterator()) { while (dbSegments.hasNext()) { final byte[] payload = dbSegments.next(); DataSegment segment = JSON_MAPPER.readValue(payload, DataSegment.class); timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment)); } } return timeline; }
DataSegment persistedSegment = Iterables.getOnlyElement(dataSegmentList); Assert.assertEquals(dataSegment, persistedSegment); Assert.assertEquals(dataSegment.getVersion(), persistedSegment.getVersion()); Path expectedFinalHadoopPath =
Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum());
Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(1, persistedSegment.getShardSpec().getPartitionNum());
Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec);
@Override public String apply(DataSegment input) { return input.getVersion(); } }
public boolean segmentsAreFromSamePartitionSet( final Set<DataSegment> segments ) { // Verify that these segments are all in the same partition set Preconditions.checkArgument(!segments.isEmpty(), "segments nonempty"); final DataSegment firstSegment = segments.iterator().next(); for (final DataSegment segment : segments) { if (!segment.getDataSource().equals(firstSegment.getDataSource()) || !segment.getInterval().equals(firstSegment.getInterval()) || !segment.getVersion().equals(firstSegment.getVersion())) { return false; } } return true; }
@Override public boolean apply(TaskLock taskLock) { final boolean versionOk = allowOlderVersions ? taskLock.getVersion().compareTo(segment.getVersion()) >= 0 : taskLock.getVersion().equals(segment.getVersion()); return versionOk && taskLock.getDataSource().equals(segment.getDataSource()) && taskLock.getInterval().contains(segment.getInterval()); } }
public static SegmentIdentifier fromDataSegment(final DataSegment segment) { return new SegmentIdentifier( segment.getDataSource(), segment.getInterval(), segment.getVersion(), segment.getShardSpec() ); } }
@Override public void publishSegment(final DataSegment segment) throws IOException { publishSegment( segment.getIdentifier(), segment.getDataSource(), DateTimes.nowUtc().toString(), segment.getInterval().getStart().toString(), segment.getInterval().getEnd().toString(), (segment.getShardSpec() instanceof NoneShardSpec) ? false : true, segment.getVersion(), true, jsonMapper.writeValueAsBytes(segment) ); }
@Override public String apply(DataSegment x) { return String.format( "%s_%s_%s_%s", x.getInterval().getStart(), x.getInterval().getEnd(), x.getVersion(), x.getShardSpec().getPartitionNum() ); } }
private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) { String segmentId = segment.getIdentifier(); synchronized (lock) { log.debug("Adding segment[%s] for server[%s]", segment, server); SegmentLoadInfo segmentLoadInfo = segmentLoadInfos.get(segmentId); if (segmentLoadInfo == null) { // servers escape the scope of this object so use ConcurrentSet segmentLoadInfo = new SegmentLoadInfo(segment); VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = timelines.get(segment.getDataSource()); if (timeline == null) { timeline = new VersionedIntervalTimeline<>(Ordering.natural()); timelines.put(segment.getDataSource(), timeline); } timeline.add( segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segmentLoadInfo) ); segmentLoadInfos.put(segmentId, segmentLoadInfo); } segmentLoadInfo.addServer(server); } }
private void serverAddedSegment(final DruidServer server, final DataSegment segment) { String segmentId = segment.getIdentifier(); synchronized (lock) { log.debug("Adding segment[%s] for server[%s]", segment, server); ServerSelector selector = selectors.get(segmentId); if (selector == null) { selector = new ServerSelector(segment, serverSelectorStrategy); VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource()); if (timeline == null) { timeline = new VersionedIntervalTimeline<String, ServerSelector>(Ordering.natural()); timelines.put(segment.getDataSource(), timeline); } timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector)); selectors.put(segmentId, selector); } QueryableDruidServer queryableDruidServer = clients.get(server.getName()); if (queryableDruidServer == null) { queryableDruidServer = addServer(server); } selector.addServer(queryableDruidServer); } }
@Override public VersionedIntervalTimeline<String, DataSegment> withHandle(Handle handle) throws IOException { final VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<String, DataSegment>( Ordering.natural() ); final ResultIterator<Map<String, Object>> dbSegments = handle.createQuery( String.format( "SELECT payload FROM %s WHERE used = true AND dataSource = :dataSource", dbTables.getSegmentsTable() ) ) .bind("dataSource", dataSource) .iterator(); while (dbSegments.hasNext()) { final Map<String, Object> dbSegment = dbSegments.next(); DataSegment segment = jsonMapper.readValue( (String) dbSegment.get("payload"), DataSegment.class ); timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment)); } dbSegments.close(); return timeline; } }
@Override public Void withHandle(Handle handle) throws Exception { handle.createStatement(statement) .bind("id", segment.getIdentifier()) .bind("dataSource", segment.getDataSource()) .bind("created_date", new DateTime().toString()) .bind("start", segment.getInterval().getStart().toString()) .bind("end", segment.getInterval().getEnd().toString()) .bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? 0 : 1) .bind("version", segment.getVersion()) .bind("used", true) .bind("payload", jsonMapper.writeValueAsString(segment)) .execute(); return null; } }
@Override public String apply(DataSegment x) { return StringUtils.format( "%s_%s_%s_%s", x.getInterval().getStart(), x.getInterval().getEnd(), x.getVersion(), x.getShardSpec().getPartitionNum() ); } }
private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) { String segmentId = segment.getIdentifier(); synchronized (lock) { log.debug("Adding segment[%s] for server[%s]", segment, server); ServerSelector selector = selectors.get(segmentId); if (selector == null) { selector = new ServerSelector(segment, tierSelectorStrategy); VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource()); if (timeline == null) { timeline = new VersionedIntervalTimeline<>(Ordering.natural()); timelines.put(segment.getDataSource(), timeline); } timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector)); selectors.put(segmentId, selector); } QueryableDruidServer queryableDruidServer = clients.get(server.getName()); if (queryableDruidServer == null) { queryableDruidServer = addServer(baseView.getInventoryValue(server.getName())); } selector.addServerAndUpdateSegment(queryableDruidServer, segment); runTimelineCallbacks(callback -> callback.segmentAdded(server, segment)); } }
static boolean isHandOffComplete(List<ImmutableSegmentLoadInfo> serverView, SegmentDescriptor descriptor) { for (ImmutableSegmentLoadInfo segmentLoadInfo : serverView) { if (segmentLoadInfo.getSegment().getInterval().contains(descriptor.getInterval()) && segmentLoadInfo.getSegment().getShardSpec().getPartitionNum() == descriptor.getPartitionNumber() && segmentLoadInfo.getSegment().getVersion().compareTo(descriptor.getVersion()) >= 0 && Iterables.any( segmentLoadInfo.getServers(), new Predicate<DruidServerMetadata>() { @Override public boolean apply(DruidServerMetadata input) { return input.segmentReplicatable(); } } )) { return true; } } return false; }
/** * Given a druid data segment constructs an object to hold the information of this partition. * * @param segment The druid data segments that corresponds to a specific partition of a druid segment. */ public SegmentInfo(DataSegment segment) { this.dataSource = segment.getDataSource(); this.interval = segment.getInterval(); this.dimensions = segment.getDimensions(); this.metrics = segment.getMetrics(); this.version = segment.getVersion(); ShardSpec spec = segment.getShardSpec(); this.shardSpec = spec instanceof NumberedShardSpec ? (NumberedShardSpec) spec : new NumberedShardSpec((NoneShardSpec) spec); this.size = segment.getSize(); this.identifier = segment.getIdentifier(); }