private static ShardSpec getNextPartitionShardSpec(ShardSpec shardSpec) { if (shardSpec instanceof LinearShardSpec) { return new LinearShardSpec(shardSpec.getPartitionNum() + 1); } else if (shardSpec instanceof NumberedShardSpec) { return new NumberedShardSpec(shardSpec.getPartitionNum(), ((NumberedShardSpec) shardSpec).getPartitions()); } else { // Druid only support appending more partitions to Linear and Numbered ShardSpecs. throw new IllegalStateException(String.format("Cannot expand shard spec [%s]", shardSpec)); } }
private static VersionedIntervalTimeline<String, DataSegment> getTimelineForIntervalWithHandle(final Handle handle, final String dataSource, final Interval interval, final MetadataStorageTablesConfig dbTables) throws IOException { Query<Map<String, Object>> sql = handle.createQuery(String.format( "SELECT payload FROM %s WHERE used = true AND dataSource = ? AND start <= ? AND \"end\" >= ?", dbTables.getSegmentsTable())) .bind(0, dataSource) .bind(1, interval.getEnd().toString()) .bind(2, interval.getStart().toString()); final VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(Ordering.natural()); try (ResultIterator<byte[]> dbSegments = sql.map(ByteArrayMapper.FIRST).iterator()) { while (dbSegments.hasNext()) { final byte[] payload = dbSegments.next(); DataSegment segment = JSON_MAPPER.readValue(payload, DataSegment.class); timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment)); } } return timeline; }
if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject() .getShardSpec() .getPartitionNum()) { max = SegmentIdentifier.fromDataSegment(existing.getObject()); timeline.add(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().createChunk(publishedSegment));
if (actualSpec.isInChunk(inputRow)) { return Optional.of( new Bucket( hadoopyShardSpec.getShardNum(), timeBucket.get().getStart(), actualSpec.getPartitionNum()
@Override public ShardSpec getShardSpec(long timestamp, InputRow row) { for (ShardSpec spec : shardSpecs) { if (spec.isInChunk(timestamp, row)) { return spec; } } throw new ISE("row[%s] doesn't fit in any shard[%s]", row, shardSpecs); } };
/** * Return a shardSpec for the given interval and input row. * * @param interval interval for shardSpec * @param row input row * * @return a shardSpec */ ShardSpec getShardSpec(Interval interval, InputRow row) { final List<ShardSpec> shardSpecs = map.get(interval); if (shardSpecs == null || shardSpecs.isEmpty()) { throw new ISE("Failed to get shardSpec for interval[%s]", interval); } return shardSpecs.get(0).getLookup(shardSpecs).getShardSpec(row.getTimestampFromEpoch(), row); } }
Map<String, Range<String>> domain = shard.getDomain(); for (Map.Entry<String, Range<String>> entry : domain.entrySet()) { String dimension = entry.getKey();
segment.getVersion() ); if ((entry != null) && (entry.getChunk(segment.getShardSpec().getPartitionNum()) != null)) { log.warn("Told to load a adapter for a segment[%s] that already exists", segment.getIdentifier()); return false; segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(new ReferenceCountingSegment(adapter)) ); synchronized (dataSourceSizes) {
/** * Should we index this inputRow? Decision is based on our interval and shardSpec. * * @param inputRow the row to check * * @return true or false */ private boolean shouldIndex(final Schema schema, final Interval interval, final InputRow inputRow) { return interval.contains(inputRow.getTimestampFromEpoch()) && schema.getShardSpec().isInChunk(inputRow); }
entry.getKey(), actualSpec.getLookup( Lists.transform( entry.getValue(), new Function<HadoopyShardSpec, ShardSpec>()
Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum());
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
); if ((entry != null) && (entry.getChunk(segment.getShardSpec().getPartitionNum()) != null)) { log.warn("Told to load a adapter for a segment[%s] that already exists", segment.getIdentifier()); resultSupplier.set(false); segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(new ReferenceCountingSegment(adapter)) ); dataSourceState.addSegment(segment);
if (currentOpenSegment.getShardSpec().getPartitionNum() != partitionNumber || !currentOpenSegment.getInterval().equals(interval)) { pushSegments(ImmutableList.of(currentOpenSegment));
private void serverAddedSegment(final DruidServerMetadata server, final DataSegment segment) { String segmentId = segment.getIdentifier(); synchronized (lock) { log.debug("Adding segment[%s] for server[%s]", segment, server); SegmentLoadInfo segmentLoadInfo = segmentLoadInfos.get(segmentId); if (segmentLoadInfo == null) { // servers escape the scope of this object so use ConcurrentSet segmentLoadInfo = new SegmentLoadInfo(segment); VersionedIntervalTimeline<String, SegmentLoadInfo> timeline = timelines.get(segment.getDataSource()); if (timeline == null) { timeline = new VersionedIntervalTimeline<>(Ordering.natural()); timelines.put(segment.getDataSource(), timeline); } timeline.add( segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segmentLoadInfo) ); segmentLoadInfos.put(segmentId, segmentLoadInfo); } segmentLoadInfo.addServer(server); } }
interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(currentOpenSegment.getShardSpec().getPartitionNum() + 1)); pushSegments(Lists.newArrayList(currentOpenSegment)); LOG.info("Creating new partition for segment {}, partition num {}", retVal.getIdentifierAsString(), retVal.getShardSpec().getPartitionNum()); currentOpenSegment = retVal; return retVal;
private void serverAddedSegment(final DruidServer server, final DataSegment segment) { String segmentId = segment.getIdentifier(); synchronized (lock) { log.debug("Adding segment[%s] for server[%s]", segment, server); ServerSelector selector = selectors.get(segmentId); if (selector == null) { selector = new ServerSelector(segment, serverSelectorStrategy); VersionedIntervalTimeline<String, ServerSelector> timeline = timelines.get(segment.getDataSource()); if (timeline == null) { timeline = new VersionedIntervalTimeline<String, ServerSelector>(Ordering.natural()); timelines.put(segment.getDataSource(), timeline); } timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(selector)); selectors.put(segmentId, selector); } QueryableDruidServer queryableDruidServer = clients.get(server.getName()); if (queryableDruidServer == null) { queryableDruidServer = addServer(server); } selector.addServer(queryableDruidServer); } }
Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(1, persistedSegment.getShardSpec().getPartitionNum());
); timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum()); Path expectedFinalHadoopPath =