Map<String, ?> lastOffset = context.offsetStorageReader().offset(partition); long lastId = lastOffset == null ? 0L : (Long) lastOffset.get("id");
/** * Loads the connector's persistent offset (if present) via the given loader. */ @Override protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } }
Map<String, Object> existingOffset = context.offsetStorageReader().offset(sourceInfo.partition()); LoggingContext.PreviousContext previousContext = taskContext.configureLoggingContext(CONTEXT_NAME); try {
/** * Loads the connector's persistent offset (if present) via the given loader. */ protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } } }
boolean snapshotEventsAreInserts = true; Map<String, String> partition = Collect.hashMapOf(SourceInfo.SERVER_PARTITION_KEY, serverName); Map<String, ?> offsets = getRestartOffset(context.offsetStorageReader().offset(partition)); final SourceInfo source; if (offsets != null) {
context.offsetStorageReader().offsets(partitions).forEach(source::setOffsetFor);
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid VerifiableSourceTask configuration", e); } partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; else seqno = 0; startingSeqno = seqno; throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); log.info("Started VerifiableSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
/** * Loads the current saved offsets. */ private void loadOffsets() { List<Map<String, String>> partitions = new ArrayList<>(); for (String db : databases) { Map<String, String> partition = Collections.singletonMap("mongodb", db); partitions.add(partition); } offsets.putAll(context.offsetStorageReader().offsets(partitions)); } }
@Override public void start(Map<String, String> props) { final long throughput; try { name = props.get(NAME_CONFIG); id = Integer.parseInt(props.get(ID_CONFIG)); topic = props.get(TOPIC_CONFIG); maxNumMsgs = Long.parseLong(props.get(NUM_MSGS_CONFIG)); multipleSchema = Boolean.parseBoolean(props.get(MULTIPLE_SCHEMA_CONFIG)); partitionCount = Integer.parseInt(props.containsKey(PARTITION_COUNT_CONFIG) ? props.get(PARTITION_COUNT_CONFIG) : "1"); throughput = Long.parseLong(props.get(THROUGHPUT_CONFIG)); } catch (NumberFormatException e) { throw new ConnectException("Invalid SchemaSourceTask configuration", e); } throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); partition = Collections.singletonMap(ID_FIELD, id); Map<String, Object> previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) { seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; } else { seqno = 0; } startingSeqno = seqno; count = 0; log.info("Started SchemaSourceTask {}-{} producing to topic {} resuming from seqno {}", name, id, topic, startingSeqno); }
private Map<Map<String, String>, Map<String, Object>> loadAndGetOffsets(OffsetStorageReader reader, String jobUrls) { String[] jobUrlArray = jobUrls.split(","); logger.debug("Total jobs: {}. Loading offsets from Connect.", jobUrlArray.length); Collection<Map<String, String>> partitions = new ArrayList<>(jobUrlArray.length); for (String jobUrl : jobUrlArray) { partitions.add(Collections.singletonMap(JenkinsSourceTask.JOB_NAME, urlDecode(extractJobName(jobUrl)))); } return reader.offsets(partitions); } }
private void initializeLastVariables(){ Map<String, Object> lastSourceOffset = null; lastSourceOffset = context.offsetStorageReader().offset(sourcePartition()); if( lastSourceOffset == null){ // we haven't fetched anything yet, so we initialize to 7 days ago nextQuerySince = config.getSince(); lastIssueNumber = -1; } else { Object updatedAt = lastSourceOffset.get(UPDATED_AT_FIELD); Object issueNumber = lastSourceOffset.get(NUMBER_FIELD); Object nextPage = lastSourceOffset.get(NEXT_PAGE_FIELD); if(updatedAt != null && (updatedAt instanceof String)){ nextQuerySince = Instant.parse((String) updatedAt); } if(issueNumber != null && (issueNumber instanceof String)){ lastIssueNumber = Integer.valueOf((String) issueNumber); } if (nextPage != null && (nextPage instanceof String)){ nextPageToVisit = Integer.valueOf((String) nextPage); } } }
/** * Loads the connector's persistent offset (if present) via the given loader. */ protected OffsetContext getPreviousOffset(OffsetContext.Loader loader) { Map<String, ?> partition = loader.getPartition(); Map<String, Object> previousOffset = context.offsetStorageReader() .offsets(Collections.singleton(partition)) .get(partition); if (previousOffset != null) { OffsetContext offsetContext = loader.load(previousOffset); LOGGER.info("Found previous offset {}", offsetContext); return offsetContext; } else { return null; } } }
private void expectOffsetLookupReturnNone() { EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offset(EasyMock.anyObject(Map.class))).andReturn(null); } }
partitionIds.stream().map(TopicPartitionSerDe::asMap).collect(Collectors.toList()); Map<Map<String, Object>, Map<String, Object>> offsets = context.offsetStorageReader().offsets(partitionMaps); if (offsets == null) { return;
@Override public FileReader offer(FileMetadata metadata, OffsetStorageReader offsetStorageReader) throws IOException { Map<String, Object> partition = new HashMap<String, Object>() {{ put("path", metadata.getPath()); //TODO manage blocks //put("blocks", metadata.getBlocks().toString()); }}; FileSystem current = fileSystems.stream() .filter(fs -> metadata.getPath().startsWith(fs.getWorkingDirectory().toString())) .findFirst().orElse(null); FileReader reader; try { reader = ReflectionUtils.makeReader((Class<? extends FileReader>) conf.getClass(FsSourceTaskConfig.FILE_READER_CLASS), current, new Path(metadata.getPath()), conf.originals()); } catch (Throwable t) { throw new ConnectException("An error has occurred when creating reader for file: " + metadata.getPath(), t); } Map<String, Object> offset = offsetStorageReader.offset(partition); if (offset != null && offset.get("offset") != null) { reader.seek(() -> (Long) offset.get("offset")); } return reader; }
leaderTopicPartition.toTopicPartitionString())) .collect(Collectors.toList()); Map<String, Long> topicPartitionStringsOffsets = context.offsetStorageReader().offsets(offsetLookupPartitions) .entrySet().stream() .filter(e -> e != null && e.getKey() != null && e.getKey().get(TOPIC_PARTITION_KEY) != null
try { stream = Files.newInputStream(Paths.get(filename)); Map<String, Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); if (offset != null) { Object lastRecordedOffset = offset.get(POSITION_FIELD);
break; offsets = context.offsetStorageReader().offsets(partitions);
try { stream = new FileInputStream(filename); Map<String, Object> offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); if (offset != null) { Object lastRecordedOffset = offset.get(POSITION_FIELD);
private void mockConsumerInitialization() throws Exception { TopicPartition firstTopicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION); Collection<TopicPartition> topicPartitions = new ArrayList<>(); topicPartitions.add(firstTopicPartition); Map<TopicPartition, Long> endOffsets = Collections.singletonMap(firstTopicPartition, FIRST_OFFSET); EasyMock.expect(context.offsetStorageReader()).andReturn(offsetStorageReader); EasyMock.expect(offsetStorageReader.offsets(EasyMock.<List<Map<String, String>>>anyObject())) .andReturn(new HashMap<>()); PowerMock.expectNew(KafkaConsumer.class, new Class[] { Properties.class }, config.getKafkaConsumerProperties()) .andReturn(consumer); EasyMock.expect(consumer.endOffsets(topicPartitions)).andReturn(endOffsets); consumer.assign(topicPartitions); EasyMock.expectLastCall(); consumer.seek(firstTopicPartition, FIRST_OFFSET); EasyMock.expectLastCall(); }