@Override public Map<String, SystemStreamMetadata> apply() { throw new SamzaException("Failed to get system stream metadata"); } };
private static void validateFactory(TaskFactory factory) { if (factory == null) { throw new SamzaException("Either the task class name or the task factory instance is required."); } if (!(factory instanceof StreamTaskFactory) && !(factory instanceof AsyncStreamTaskFactory)) { throw new SamzaException(String.format("TaskFactory must be either StreamTaskFactory or AsyncStreamTaskFactory. %s is not supported", factory.getClass())); } }
protected static String getStreamName(String jobName, String jobId) { if (jobName == null) { throw new SamzaException("job name is null. Please specify job.name"); } if (jobId == null) { jobId = "1"; } String streamName = "__samza_" + jobName + "_" + jobId + "_logs"; return streamName.replace("-", "_"); }
public SystemAdmin getSystemAdmin(String systemName) { if (!systemAdminMap.containsKey(systemName)) { throw new SamzaException("Cannot get systemAdmin for system " + systemName); } return systemAdminMap.get(systemName); }
@Override public Startpoint fromBytes(byte[] bytes) { try { LinkedHashMap<String, String> deserialized = mapper.readValue(bytes, LinkedHashMap.class); Class<? extends Startpoint> startpointClass = (Class<? extends Startpoint>) Class.forName(deserialized.get(STARTPOINT_CLASS)); return mapper.readValue(deserialized.get(STARTPOINT_OBJ), startpointClass); } catch (Exception e) { throw new SamzaException(String.format("Exception in de-serializing startpoint bytes: %s", Arrays.toString(bytes)), e); } }
public static SingleFileHdfsReader getHdfsReader(ReaderType readerType, SystemStreamPartition systemStreamPartition) { switch (readerType) { case AVRO: return new AvroFileHdfsReader(systemStreamPartition); default: throw new SamzaException("Unsupported reader type: " + readerType); } }
@Override public byte[] toBytes(SamzaSqlRelMessage p) { try { ObjectMapper mapper = new ObjectMapper(); // Enable object typing to handle nested records mapper.enableDefaultTyping(); return mapper.writeValueAsString(p).getBytes("UTF-8"); } catch (Exception e) { throw new SamzaException(e); } } }
/** * Store the table {@code records} with specified {@code keys}. This method must be thread-safe. * The default implementation calls putAllAsync and blocks on the completion afterwards. * @param records table records to be written */ default void putAll(List<Entry<K, V>> records) { try { putAllAsync(records).get(); } catch (InterruptedException | ExecutionException e) { throw new SamzaException("PUT_ALL failed for " + records, e); } }
@Override public Map<K, V> getAll(List<K> keys) { try { return getAllAsync(keys).get(); } catch (Exception e) { throw new SamzaException("GET_ALL failed for " + keys, e); } }
@Override public byte[] toBytes(LoggingEvent object) { byte[] bytes = null; if (object != null) { try { bytes = object.getMessage().toString().getBytes(ENCODING); } catch (UnsupportedEncodingException e) { throw new SamzaException("can not be encoded to byte[]", e); } } return bytes; }
private void startSubscription() { //subscribe to all the registered TopicPartitions LOG.info("{}: Consumer subscribes to {}", this, topicPartitionsToSSP.keySet()); try { synchronized (kafkaConsumer) { // we are using assign (and not subscribe), so we need to specify both topic and partition kafkaConsumer.assign(topicPartitionsToSSP.keySet()); } } catch (Exception e) { throw new SamzaException("Consumer subscription failed for " + this, e); } }
@Override public void put(K key, V value) { try { putAsync(key, value).get(); } catch (Exception e) { throw new SamzaException(e); } }
@Override public void delete(K key) { try { deleteAsync(key).get(); } catch (Exception e) { throw new SamzaException(e); } }
@Override public void deleteAll(List<K> keys) { try { deleteAllAsync(keys).get(); } catch (Exception e) { throw new SamzaException(e); } }
@Override public CompletableFuture<Void> deleteAsync(K key) { Preconditions.checkNotNull(writeFn, "null write function"); Preconditions.checkNotNull(key, "null key"); return instrument(() -> asyncTable.deleteAsync(key), metrics.numDeletes, metrics.deleteNs) .exceptionally(e -> { throw new SamzaException(String.format("Failed to delete the record for " + key), (Throwable) e); }); }
@Override public String getSchema(SystemStream systemStream) { String fileName = String.format("%s.avsc", systemStream.getStream()); File file = new File(schemaDir, fileName); try { return Schema.parse(file).toString(); } catch (IOException e) { throw new SamzaException(e); } } }
@Override public void run() { ThreadUtil.logThreadDump("Thread dump at task callback timeout"); String msg = "Callback for task {} " + callback.taskName + " timed out after " + timeout + " ms."; callback.failure(new SamzaException(msg)); } };
@Override public CompletableFuture<Void> putAllAsync(Collection<Entry<K, V>> records) { return failsafe(retryPolicy, retryMetrics, retryExecutor) .future(() -> writeFn.putAllAsync(records)) .exceptionally(e -> { throw new SamzaException("Failed to put records after retries.", e); }); }
private void addTable(String tableId, Config config) { if (tableContexts.containsKey(tableId)) { throw new SamzaException("Table " + tableId + " already exists"); } JavaTableConfig tableConfig = new JavaTableConfig(config); String providerFactoryClassName = tableConfig.getTableProviderFactory(tableId); TableProviderFactory tableProviderFactory = Util.getObj(providerFactoryClassName, TableProviderFactory.class); TableCtx ctx = new TableCtx(); ctx.tableProvider = tableProviderFactory.getTableProvider(tableId); tableContexts.put(tableId, ctx); }
protected IndexRequest getRequest(OutgoingMessageEnvelope envelope) { String[] parts = envelope.getSystemStream().getStream().split("/"); if (parts.length != 2) { throw new SamzaException("Elasticsearch stream name must match pattern {index}/{type}"); } String index = parts[0]; String type = parts[1]; return Requests.indexRequest(index).type(type); }