@Override public void accept(T protocol) { InetSocketAddress socketAddress = this.getSocketBinding().getSocketAddress(); protocol.setBindAddress(socketAddress.getAddress()); protocol.setBindPort(socketAddress.getPort()); protocol.setThreadFactory(new ClassLoaderThreadFactory(new DefaultThreadFactory("jgroups", false, true), JChannelFactory.class.getClassLoader())); protocol.setThreadPool(this.threadPoolFactory.get().apply(protocol.getThreadFactory())); protocol.setInternalThreadPoolThreadFactory(new ClassLoaderThreadFactory(new DefaultThreadFactory("jgroups-int", false, false), JChannelFactory.class.getClassLoader())); // Because we provide the transport with a thread pool, TP.init() won't auto-create the internal thread pool // So create one explicitly matching the logic in TP.init() but with our thread factory QueuelessThreadPoolFactory factory = new QueuelessThreadPoolFactory() .setMaxThreads(Math.max(4, Runtime.getRuntime().availableProcessors())) .setKeepAliveTime(30000) ; protocol.setInternalThreadPool(factory.apply(protocol.getInternalThreadPoolThreadFactory())); protocol.setValue("enable_diagnostics", this.diagnosticsSocketBinding != null); if (this.diagnosticsSocketBinding != null) { InetSocketAddress address = this.diagnosticsSocketBinding.get().getSocketAddress(); protocol.setValue("diagnostics_addr", address.getAddress()); protocol.setValue("diagnostics_port", address.getPort()); } }
setInAllThreadFactories(cluster_name != null? cluster_name.toString() : null, local_addr, thread_naming_pattern); diagnostics_ttl, log, getSocketFactory(), getThreadFactory(), diagnostics_passcode) .transport(this); int num_cores=Runtime.getRuntime().availableProcessors(); int max_internal_size=Math.max(4, num_cores); log.debug("thread pool min/max/keep-alive: %d/%d/%d use_fork_join=%b, internal pool: %d/%d/%d (%d cores available)", thread_pool_min_threads, thread_pool_max_threads, thread_pool_keep_alive_time, use_fork_join_pool, 0, max_internal_size, 30000, num_cores); thread_pool=createThreadPool(thread_pool_min_threads, thread_pool_max_threads, thread_pool_keep_alive_time, "abort", new SynchronousQueue<>(), thread_factory, log, use_fork_join_pool, use_common_fork_join_pool); internal_pool=createThreadPool(0, max_internal_size, 30000, "abort", new SynchronousQueue<>(), internal_thread_factory, log, false, false); m.put("external_port", external_port); if(!m.isEmpty()) up(new Event(Event.CONFIG, m)); setMessageProcessingPolicy(message_processing_policy); else msg_processing_policy.init(this);
protected void sendSingleMessage(final Message msg) { Address dest=msg.getDest(); try { Util.writeMessage(msg, output, dest == null); transport.doSend(output.buffer(), 0, output.position(), dest); if(transport.statsEnabled()) transport.incrNumSingleMsgsSent(1); } catch(SocketException | SocketTimeoutException sock_ex) { log.trace(Util.getMessage("SendFailure"), transport.localAddress(), (dest == null? "cluster" : dest), msg.size(), sock_ex.toString(), msg.printHeaders()); } catch(Throwable e) { log.error(Util.getMessage("SendFailure"), transport.localAddress(), (dest == null? "cluster" : dest), msg.size(), e.toString(), msg.printHeaders()); } }
public void init(TP transport) { this.transport=transport; log=transport.getLog(); output=new ByteArrayDataOutputStream(transport.getMaxBundleSize() + MSG_OVERHEAD); } public void start() {}
protected void sendMessageList(final Address dest, final Address src, final List<Message> list) { try { Util.writeMessageList(dest, src, transport.cluster_name.chars(), list, output, dest == null, transport.getId()); transport.doSend(output.buffer(), 0, output.position(), dest); } catch(SocketException | SocketTimeoutException sock_ex) { log.debug(Util.getMessage("FailureSendingMsgBundle"), transport.localAddress(),sock_ex); } catch(Throwable e) { log.error(Util.getMessage("FailureSendingMsgBundle"), transport.localAddress(), e); } }
public void init(String props, String name, AddressGenerator generator, int bind_port) throws Throwable { channel=new JChannel(props).addAddressGenerator(generator).setName(name); if(bind_port > 0) { TP transport=channel.getProtocolStack().getTransport(); transport.setBindPort(bind_port); } disp=new RpcDispatcher(channel, this).setMembershipListener(this).setMethodLookup(id -> METHODS[id]) .setMarshaller(new UPerfMarshaller()); channel.connect(groupname); local_addr=channel.getAddress(); try { MBeanServer server=Util.getMBeanServer(); JmxConfigurator.registerChannel(channel, server, "jgroups", channel.getClusterName(), true); } catch(Throwable ex) { System.err.println("registering the channel in JMX failed: " + ex); } if(members.size() < 2) return; Address coord=members.get(0); Config config=disp.callRemoteMethod(coord, new MethodCall(GET_CONFIG), new RequestOptions(ResponseMode.GET_ALL, 5000)); if(config != null) { applyConfig(config); System.out.println("Fetched config from " + coord + ": " + config + "\n"); } else System.err.println("failed to fetch config from " + coord); }
InetAddress bind_addr; // detect address by using Azure's SDK JChannel ch=new JChannel("config.xml"); TP transport=ch.getProtocolStack().getTransport(); transport.setBindAddress(bind_addr); ch.connect("mycluster");
return; Address dest=msg_queue[start].getDest(); int numMsgs=1; for(int i=start + 1; i < MSG_BUF_SIZE; ++i) { Message msg=msg_queue[i]; if(msg != null && (dest == msg.getDest() || (Objects.equals(dest, msg.getDest())))) { msg.setDest(dest); // avoid further equals() calls numMsgs++; Util.writeMessageListHeader(dest, msg_queue[start].getSrc(), transport.cluster_name.chars(), numMsgs, output, dest == null); for(int i=start; i < MSG_BUF_SIZE; ++i) { Message msg=msg_queue[i]; if(msg != null && msg.getDest() == dest) { msg.writeToNoAddrs(msg.getSrc(), output, transport.getId()); msg_queue[i]=null; transport.doSend(output.buffer(), 0, output.position(), dest); log.error("Failed to send message", e);
int max_bundle_size=transport.getMaxBundleSize(); byte[] cluster_name=transport.cluster_name.chars(); int sent_msgs=0; break; Address dest=msg.dest(); try { output.position(0); Util.writeMessageListHeader(dest, msg.src(), cluster_name, 1, output, dest == null); output.position(current_pos); transport.doSend(output.buffer(), 0, output.position(), dest); if(transport.statsEnabled()) transport.incrBatchesSent(num_msgs); log.error("failed to send message(s)", ex);
public void handleConnect() { if (cluster_name == null || local_addr == null) log.error(Util.getMessage("GroupaddrOrLocaladdrIsNullCannotRegisterWithGossipRouterS")); else { InetAddress bind_addr=getTransport().getBindAddress(); log.trace("registering " + local_addr + " under " + cluster_name + " with GossipRouter"); stubManager.destroyStubs(); PhysicalAddress physical_addr = (PhysicalAddress) down_prot.down(new Event(Event.GET_PHYSICAL_ADDRESS, local_addr)); stubManager = new RouterStubManager(this, cluster_name, local_addr, NameCache.get(local_addr), physical_addr, reconnect_interval).useNio(this.use_nio); for (InetSocketAddress host : initial_hosts) { RouterStub stub=stubManager.createAndRegisterStub(new IpAddress(bind_addr, 0), new IpAddress(host.getAddress(), host.getPort())); stub.socketConnectionTimeout(sock_conn_timeout); } stubManager.connectStubs(); } }
public void init() throws Exception { if(xmit_from_random_member && discard_delivered_msgs) { discard_delivered_msgs=false; log.debug("%s: xmit_from_random_member set to true: changed discard_delivered_msgs to false", local_addr); transport.registerProbeHandler(this); if(!transport.supportsMulticasting()) { if(use_mcast_xmit) { log.debug(Util.getMessage("NoMulticastTransport"), "use_mcast_xmit", transport.getName(), "use_mcast_xmit"); use_mcast_xmit=false; log.debug(Util.getMessage("NoMulticastTransport"), "use_mcast_xmit_req", transport.getName(), "use_mcast_xmit_req"); use_mcast_xmit_req=false; int estimated_max_msgs_in_xmit_req=(transport.getMaxBundleSize() -50) * Global.LONG_SIZE; int old_max_xmit_size=max_xmit_req_size; if(max_xmit_req_size <= 0)
tp_bind_port=transport.getBindPort(); if(tp_bind_port <= 0) throw new IllegalArgumentException(String.format("%s only works with %s.bind_port > 0", log.warn("namespace not set; clustering disabled"); return; // no further initialization necessary log.info("namespace %s set; clustering enabled", namespace); Map<String,String> headers=new HashMap<>(); StreamProvider streamProvider; log.debug("KubePING configuration: " + toString());
public void init() throws Exception { super.init(); TP transport=getTransport(); sends_can_block=transport instanceof TCP; // UDP and TCP_NIO2 won't block time_service=transport.getTimeService(); if(time_service == null) throw new IllegalStateException("time service from transport is null"); last_sync_sent=new ExpiryCache<>(sync_min_interval); // max bundle size (minus overhead) divided by <long size> times bits per long // Example: for 8000 missing messages, SeqnoList has a serialized size of 1012 bytes, for 64000 messages, the // serialized size is 8012 bytes. Therefore, for a serialized size of 64000 bytes, we can retransmit a max of // 8 * 64000 = 512'000 seqnos // see SeqnoListTest.testSerialization3() int estimated_max_msgs_in_xmit_req=(transport.getMaxBundleSize() -50) * Global.LONG_SIZE; int old_max_xmit_size=max_xmit_req_size; if(max_xmit_req_size <= 0) max_xmit_req_size=estimated_max_msgs_in_xmit_req; else max_xmit_req_size=Math.min(max_xmit_req_size, estimated_max_msgs_in_xmit_req); if(old_max_xmit_size != max_xmit_req_size) log.trace("%s: set max_xmit_req_size from %d to %d", local_addr, old_max_xmit_size, max_xmit_req_size); boolean regular_pool_enabled=(boolean)transport.getValue("thread_pool_enabled"); if(!regular_pool_enabled) log.info("the thread pool is disabled; %s could be removed (JGRP-2069)", getClass().getSimpleName()); }
public void init() throws Exception { super.init(); int old_frag_size=frag_size; if(frag_size <=0) throw new Exception("frag_size=" + old_frag_size + ", new frag_size=" + frag_size + ": new frag_size is invalid"); TP transport=getTransport(); if(transport != null) { int max_bundle_size=transport.getMaxBundleSize(); if(frag_size >= max_bundle_size) throw new IllegalArgumentException("frag_size (" + frag_size + ") has to be < TP.max_bundle_size (" + max_bundle_size + ")"); } Map<String,Object> info=new HashMap<>(1); info.put("frag_size", frag_size); down_prot.down(new Event(Event.CONFIG, info)); }
public void send(Message msg) throws Exception { if(msg == null) throw new IllegalArgumentException("message must not be null"); num_threads.incrementAndGet(); int tmp_index=getWriteIndex(); // decrements write_permits // System.out.printf("[%d] tmp_index=%d\n", Thread.currentThread().getId(), tmp_index); if(tmp_index == -1) { log.warn("buf is full (num_permits: %d, bundler: %s)\n", write_permits.get(), toString()); num_threads.decrementAndGet(); return; } buf[tmp_index]=msg; long acc_bytes=accumulated_bytes.addAndGet(msg.size()); int current_threads=num_threads.decrementAndGet(); boolean no_other_threads=current_threads == 0; boolean unpark=(acc_bytes >= transport.getMaxBundleSize() && accumulated_bytes.compareAndSet(acc_bytes, 0)) || no_other_threads; // only 2 threads at a time should do this (1st cond and 2nd cond), so we have to reduce this to // 1 thread as advanceWriteIndex() is not thread safe if(unpark && unparking.compareAndSet(false, true)) { int num_advanced=advanceWriteIndex(); size.addAndGet(num_advanced); if(num_advanced > 0) { Thread thread=bundler_thread.getThread(); if(thread != null) LockSupport.unpark(thread); } unparking.set(false); } }
ThreadFactory stackFactory = tp.getThreadFactory(); if (stackFactory == null) stackFactory = new DefaultThreadFactory(Util.getGlobalThreadGroup(), "", false); tp.setThreadFactory(stackFactory); ThreadFactory timerFactory = tp.getTimerThreadFactory(); if (timerFactory == null) timerFactory = new LazyThreadFactory(Util.getGlobalThreadGroup(), "Timer", true, true); tp.setTimerThreadFactory(timerFactory); if (tp.isDefaulThreadPoolEnabled()) ThreadFactory defaultPoolFactory = tp.getDefaultThreadPoolThreadFactory(); if (defaultPoolFactory == null) pool_thread_group=new ThreadGroup(Util.getGlobalThreadGroup(), "Thread Pools"); defaultPoolFactory = new DefaultThreadFactory(pool_thread_group, "Incoming", false, true); tp.setThreadFactory(defaultPoolFactory); if (tp.isOOBThreadPoolEnabled()) ThreadFactory oobPoolFactory = tp.getOOBThreadPoolThreadFactory(); if (oobPoolFactory == null) pool_thread_group=new ThreadGroup(Util.getGlobalThreadGroup(), "Thread Pools"); oobPoolFactory = new DefaultThreadFactory(pool_thread_group, "OOB", false, true); tp.setThreadFactory(oobPoolFactory);
SocketBinding binding = transportConfig.getSocketBinding(); if (binding != null) { SocketFactory factory = transport.getSocketFactory(); if (!(factory instanceof ManagedSocketFactory)) { transport.setSocketFactory(new ManagedSocketFactory(factory, binding.getSocketBindings())); if (!(transport.getThreadFactory() instanceof ThreadFactoryAdapter)) { transport.setThreadFactory(new ThreadFactoryAdapter(threadFactory)); if (!(transport.getDefaultThreadPool() instanceof ManagedExecutorService)) { transport.setDefaultThreadPool(new ManagedExecutorService(defaultExecutor)); if (!(transport.getOOBThreadPool() instanceof ManagedExecutorService)) { transport.setOOBThreadPool(new ManagedExecutorService(oobExecutor)); if (!(transport.getTimer() instanceof TimerSchedulerAdapter)) { this.setValue(transport, "timer", new TimerSchedulerAdapter(new ManagedScheduledExecutorService(timerExecutor)));
public void send(Message msg) throws Exception { num_senders.incrementAndGet(); long size=msg.size(); lock.lock(); try { if(count + size >= transport.getMaxBundleSize()) sendBundledMessages(); addMessage(msg, size); // at this point, we haven't sent our message yet ! if(num_senders.decrementAndGet() == 0) // no other sender threads present at this time sendBundledMessages(); // else there are other sender threads waiting, so our message will be sent by a different thread } finally { lock.unlock(); } } }
public void run() { while(running) { Message msg=null; try { if((msg=queue.take()) == null) // block until first message is available continue; long size=msg.size(); if(count + size >= transport.getMaxBundleSize()) { num_sends_because_full_queue++; fill_count.add(count); _sendBundledMessages(); } for(;;) { Address dest=msg.dest(); if(!Util.match(dest, target_dest) || count + size >= transport.getMaxBundleSize()) _sendBundledMessages(); _addMessage(msg, size); msg=queue.poll(); if(msg == null) break; size=msg.size(); } _sendBundledMessages(); } catch(Throwable t) { } } }
@Override public void init() throws Exception { super.init(); validateProperties(); transportPort = getTransport().getBindPort(); if (transportPort <= 0) { log.warn("Unable to discover transport port. This may prevent members from being discovered."); } if (dns_resolver == null) { if (dns_address == null || dns_address.length() == 0) { dns_resolver = new DefaultDNSResolver(dns_context_factory, dns_address); } else { dns_resolver = new AddressedDNSResolver(dns_context_factory, dns_address); } } }