private static ShardSpec getNextPartitionShardSpec(ShardSpec shardSpec) { if (shardSpec instanceof LinearShardSpec) { return new LinearShardSpec(shardSpec.getPartitionNum() + 1); } else if (shardSpec instanceof NumberedShardSpec) { return new NumberedShardSpec(shardSpec.getPartitionNum(), ((NumberedShardSpec) shardSpec).getPartitions()); } else { // Druid only support appending more partitions to Linear and Numbered ShardSpecs. throw new IllegalStateException(String.format("Cannot expand shard spec [%s]", shardSpec)); } }
private DataSegment createSegment(String location) throws IOException { return createSegment(location, new Interval(100, 170, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); }
interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(0)); return currentOpenSegment; } else if (currentOpenSegment.getInterval().equals(interval)) { interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(currentOpenSegment.getShardSpec().getPartitionNum() + 1)); pushSegments(Lists.newArrayList(currentOpenSegment)); LOG.info("Creating new partition for segment {}, partition num {}", retVal.getIdentifierAsString(), retVal.getShardSpec().getPartitionNum()); currentOpenSegment = retVal; return retVal; interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(0)); pushSegments(Lists.newArrayList(currentOpenSegment)); LOG.info("Creating segment {}", retVal.getIdentifierAsString());
if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject() .getShardSpec() .getPartitionNum()) { max = SegmentIdentifier.fromDataSegment(existing.getObject()); timeline.add(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().createChunk(publishedSegment));
new Interval(100, 150, DateTimeZone.UTC), "v0", NoneShardSpec.instance()), createSegment(new Path(taskDirPath, "index_old_2.zip").toString(), new Interval(200, 250, DateTimeZone.UTC), "v0", new LinearShardSpec(0)), createSegment(new Path(taskDirPath, "index_old_3.zip").toString(), new Interval(250, 300, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(taskDirPath.toString()); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath =
private static VersionedIntervalTimeline<String, DataSegment> getTimelineForIntervalWithHandle(final Handle handle, final String dataSource, final Interval interval, final MetadataStorageTablesConfig dbTables) throws IOException { Query<Map<String, Object>> sql = handle.createQuery(String.format( "SELECT payload FROM %s WHERE used = true AND dataSource = ? AND start <= ? AND \"end\" >= ?", dbTables.getSegmentsTable())) .bind(0, dataSource) .bind(1, interval.getEnd().toString()) .bind(2, interval.getStart().toString()); final VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(Ordering.natural()); try (ResultIterator<byte[]> dbSegments = sql.map(ByteArrayMapper.FIRST).iterator()) { while (dbSegments.hasNext()) { final byte[] payload = dbSegments.next(); DataSegment segment = JSON_MAPPER.readValue(payload, DataSegment.class); timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment)); } } return timeline; }
@Override public boolean isInChunk(long timestamp, InputRow inputRow) { return (((long) hash(timestamp, inputRow)) - getPartitionNum()) % getPartitions() == 0; }
private static BiFunction<Integer, Integer, ShardSpec> getShardSpecCreateFunction( Integer numShards, ObjectMapper jsonMapper ) { Preconditions.checkNotNull(numShards, "numShards"); if (numShards == 1) { return (shardId, totalNumShards) -> NoneShardSpec.instance(); } else { return (shardId, totalNumShards) -> new HashBasedNumberedShardSpec(shardId, totalNumShards, null, jsonMapper); } }
@Override public String toString() { return "HashBasedNumberedShardSpec{" + "partitionNum=" + getPartitionNum() + ", partitions=" + getPartitions() + ", partitionDimensions=" + getPartitionDimensions() + '}'; }
@Override public String toString() { return "HashBasedNumberedShardSpec{" + "partitionNum=" + getPartitionNum() + ", partitions=" + getPartitions() + '}'; }
@Override public <T> PartitionChunk<T> createChunk(T obj) { return new LinearPartitionChunk<T>(partitionNum, obj); }
@Override public boolean abuts(final PartitionChunk<T> other) { return other instanceof NumberedPartitionChunk && other.getChunkNumber() == chunkNumber + 1; }
public static <T> NumberedPartitionChunk<T> make( int chunkNumber, int chunks, T obj ) { return new NumberedPartitionChunk<T>(chunkNumber, chunks, obj); }
if (currentOpenSegment.getShardSpec().getPartitionNum() != partitionNumber || !currentOpenSegment.getInterval().equals(interval)) { pushSegments(ImmutableList.of(currentOpenSegment)); interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(partitionNumber)); interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(partitionNumber));
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)), createSegment(new Path(taskDirPath, "index_old_2.zip").toString(), new Interval(150, 200, DateTimeZone.UTC), "v0", new LinearShardSpec(0)), createSegment(new Path(taskDirPath, "index_old_3.zip").toString(), new Interval(200, 300, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(taskDirPath.toString()); new Interval(100, 300, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath =
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY))); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath = Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum());
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(180, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath1 = new Interval(200, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath2 = new Interval(100, 200, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath3 =
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY))); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath = new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(1)); Path segmentPath = Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum()); Path expectedFinalHadoopPath =
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(180, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)); Path descriptorPath = Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(1, persistedSegment.getShardSpec().getPartitionNum());