reconnectAsync(client); continue; reconnectAsync(client); LOG.error("Not authorized to fetch DRPC request from DRPC server", aze); } catch (TException e) { reconnectAsync(client); LOG.error("Failed to fetch DRPC request from DRPC server", e); } catch (Exception e) { checkFutures(); } else { DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
private static Map<Node, String> genSpoutIds(Collection<SpoutNode> spoutNodes) { Map<Node, String> ret = new HashMap<>(); int ctr = 0; for (SpoutNode n : spoutNodes) { if (n.type == SpoutNode.SpoutType.BATCH) { // if Batch spout then id contains txId ret.put(n, "spout-" + n.txId); } else if (n.type == SpoutNode.SpoutType.DRPC) { //if DRPC spout then id contains function ret.put(n, "spout-" + ((DRPCSpout) n.spout).get_function() + ctr); ctr++; } else { ret.put(n, "spout" + ctr); ctr++; } } return ret; }
/** * Creates and initializes a Subscriber that reads from the DRPC servers. Intended to be used inside a Storm * spout in a Storm topology. * * @param config The config containing the String function in {@link DRPCConfig#DRPC_FUNCTION}, the Storm configuration * {@link Map} as {@link com.yahoo.bullet.storm.BulletStormConfig#STORM_CONFIG} and the Storm * {@link TopologyContext} as {@link com.yahoo.bullet.storm.BulletStormConfig#STORM_CONTEXT}. * @param maxUnCommittedQueries The maximum number of queries that can be read without committing them. */ public DRPCQuerySubscriber(BulletConfig config, int maxUnCommittedQueries) { super(maxUnCommittedQueries); collector = new DRPCOutputCollector(); emittedIDs = new HashMap<>(); // Get the Storm Config that has all the relevant cluster settings and properties Map stormConfig = config.getRequiredConfigAs(DRPCConfig.STORM_CONFIG, Map.class); // Get the TopologyContext TopologyContext context = config.getRequiredConfigAs(DRPCConfig.STORM_CONTEXT, TopologyContext.class); // Wrap the collector in a SpoutOutputCollector (it just delegates to the underlying DRPCOutputCollector) SpoutOutputCollector spoutOutputCollector = new SpoutOutputCollector(collector); // Get the DRPC function we should subscribe to String function = config.getRequiredConfigAs(DRPCConfig.DRPC_FUNCTION, String.class); spout = new DRPCSpout(function); spout.open(stormConfig, context, spoutOutputCollector); }
reconnect(client); continue; reconnect(client); LOG.error("Not authorized to fetch DRPC result from DRPC server", aze); } catch (TException e) { reconnect(client); LOG.error("Failed to fetch DRPC result from DRPC server", e); } catch (Exception e) { checkFutures(); } else { DistributedRPCInvocations.Iface drpc = (DistributedRPCInvocations.Iface) ServiceRegistry.getService(_local_drpc_id);
@Override public void close() { log.warn("Failing all pending requests: {}", emittedIDs); emittedIDs.values().forEach(spout::fail); log.info("Closing spout..."); spout.close(); } }
@Override public List<PubSubMessage> getMessages() throws PubSubException { // Try and read from DRPC. The DRPCSpout does a sleep for 1 ms if there are no tuples, so we don't have to do it. spout.nextTuple(); if (!collector.haveOutput()) { return null; } // The DRPCSpout only should have emitted one tuple List<List<Object>> tuples = collector.reset(); log.debug("Have a message through DRPC {}", tuples); List<Object> tupleAndID = tuples.get(0); // The first object is the actual DRPCSpout tuple and the second is the DRPC messageID. List<Object> tuple = (List<Object>) tupleAndID.get(0); Object drpcID = tupleAndID.get(1); // The first object in the tuple is our PubSubMessage as JSON String pubSubMessageJSON = (String) tuple.get(0); // The second object in the tuple is the serialized returnInfo added by the DRPCSpout String returnInfo = (String) tuple.get(1); log.debug("Read message\n{}\nfrom DRPC with return information {}", pubSubMessageJSON, returnInfo); PubSubMessage pubSubMessage = PubSubMessage.fromJSON(pubSubMessageJSON); // Add returnInfo as metadata. Cannot add it to pubSubMessage String id = pubSubMessage.getId(); String content = pubSubMessage.getContent(); int sequence = pubSubMessage.getSequence(); PubSubMessage message = new PubSubMessage(id, content, new Metadata(null, returnInfo), sequence); emittedIDs.put(ImmutablePair.of(id, sequence), drpcID); return Collections.singletonList(message); }
/** * Creates and initializes a Subscriber that reads from the DRPC servers. Intended to be used inside a Storm * spout in a Storm topology. * * @param config The config containing the String function in {@link DRPCConfig#DRPC_FUNCTION}, the Storm configuration * {@link Map} as {@link com.yahoo.bullet.storm.BulletStormConfig#STORM_CONFIG} and the Storm * {@link TopologyContext} as {@link com.yahoo.bullet.storm.BulletStormConfig#STORM_CONTEXT}. * @param maxUnCommittedQueries The maximum number of queries that can be read without committing them. */ public DRPCQuerySubscriber(BulletConfig config, int maxUnCommittedQueries) { super(maxUnCommittedQueries); collector = new DRPCOutputCollector(); emittedIDs = new HashMap<>(); // Get the Storm Config that has all the relevant cluster settings and properties Map stormConfig = config.getRequiredConfigAs(DRPCConfig.STORM_CONFIG, Map.class); // Get the TopologyContext TopologyContext context = config.getRequiredConfigAs(DRPCConfig.STORM_CONTEXT, TopologyContext.class); // Wrap the collector in a SpoutOutputCollector (it just delegates to the underlying DRPCOutputCollector) SpoutOutputCollector spoutOutputCollector = new SpoutOutputCollector(collector); // Get the DRPC function we should subscribe to String function = config.getRequiredConfigAs(DRPCConfig.DRPC_FUNCTION, String.class); spout = new DRPCSpout(function); spout.open(stormConfig, context, spoutOutputCollector); }
@Override public void close() { log.warn("Failing all pending requests: {}", emittedIDs); emittedIDs.values().forEach(spout::fail); log.info("Closing spout..."); spout.close(); } }
@Override public List<PubSubMessage> getMessages() throws PubSubException { // Try and read from DRPC. The DRPCSpout does a sleep for 1 ms if there are no tuples, so we don't have to do it. spout.nextTuple(); if (!collector.haveOutput()) { return null; } // The DRPCSpout only should have emitted one tuple List<List<Object>> tuples = collector.reset(); log.debug("Have a message through DRPC {}", tuples); List<Object> tupleAndID = tuples.get(0); // The first object is the actual DRPCSpout tuple and the second is the DRPC messageID. List<Object> tuple = (List<Object>) tupleAndID.get(0); Object drpcID = tupleAndID.get(1); // The first object in the tuple is our PubSubMessage as JSON String pubSubMessageJSON = (String) tuple.get(0); // The second object in the tuple is the serialized returnInfo added by the DRPCSpout String returnInfo = (String) tuple.get(1); log.debug("Read message\n{}\nfrom DRPC with return information {}", pubSubMessageJSON, returnInfo); PubSubMessage pubSubMessage = PubSubMessage.fromJSON(pubSubMessageJSON); // Add returnInfo as metadata. Cannot add it to pubSubMessage String id = pubSubMessage.getId(); String content = pubSubMessage.getContent(); int sequence = pubSubMessage.getSequence(); PubSubMessage message = new PubSubMessage(id, content, new Metadata(null, returnInfo), sequence); emittedIDs.put(ImmutablePair.of(id, sequence), drpcID); return Collections.singletonList(message); }
public Stream newDRPCStream(String function) { return newDRPCStream(new DRPCSpout(function)); }
private static Map<Node, String> genSpoutIds(Collection<SpoutNode> spoutNodes) { Map<Node, String> ret = new HashMap<>(); int ctr = 0; for(SpoutNode n: spoutNodes) { if (n.type == SpoutNode.SpoutType.BATCH) { // if Batch spout then id contains txId ret.put(n, "spout-" + n.txId); } else if (n.type == SpoutNode.SpoutType.DRPC){ //if DRPC spout then id contains function ret.put(n, "spout-" + ((DRPCSpout) n.spout).get_function() + ctr); ctr++; } else { ret.put(n, "spout" + ctr); ctr++; } } return ret; }
public StormTopology createLocalTopology(ILocalDRPC drpc) { return createTopology(new DRPCSpout(function, drpc)); }
public StormTopology createRemoteTopology() { return createTopology(new DRPCSpout(function)); }
public static void main(String[] args) throws Exception { TopologyBuilder builder = new TopologyBuilder(); DRPCSpout spout = new DRPCSpout("exclamation"); builder.setSpout("drpc", spout); builder.setBolt("exclaim", new ExclamationBolt(), 3).shuffleGrouping("drpc"); builder.setBolt("return", new ReturnResults(), 3).shuffleGrouping("exclaim"); Config conf = new Config(); StormSubmitter.submitTopology("exclaim", conf, builder.createTopology()); try (DRPCClient drpc = DRPCClient.getConfiguredClient(conf)) { System.out.println(drpc.execute("exclamation", "aaa")); System.out.println(drpc.execute("exclamation", "bbb")); } }
public Stream newDRPCStream(String function) { return newDRPCStream(new DRPCSpout(function)); }
public StormTopology createLocalTopology(ILocalDRPC drpc) { return createTopology(new DRPCSpout(_function, drpc)); }
public StormTopology createRemoteTopology() { return createTopology(new DRPCSpout(_function)); }