public String getHostAndTlsPort() { if (enableTlsPort) { return HostAndPort.fromParts(host, tlsPort).toString(); } return null; }
/** * Returns host and port together as something that can be used as part of a URI. */ public String getHostAndPort() { if (enablePlaintextPort) { if (plaintextPort < 0) { return HostAndPort.fromString(host).toString(); } else { return HostAndPort.fromParts(host, plaintextPort).toString(); } } return null; }
@Override public String toString() { return StringUtils.format("%s:%s", scheme, hostAndPort.toString()); }
public static String buildQuorumEntry(HostAndPort hostAndPort, int defaultPort) { String s = hostAndPort.toString(); if (hostAndPort.hasPort()) { return s; } else { return s + ":" + defaultPort; } }
/** * Retrieves the host and port from the specified URI. * * @param uriString URI to retrieve the host and port from * @return the host and port from the URI as a String * @throws URISyntaxException if the specified URI is invalid or cannot be parsed */ public static String getHostAndPortFromUri(String uriString) throws URISyntaxException { URI uri = new URI(uriString); if (uri.getPort() == -1) { return uri.getHost(); } else { return HostAndPort.fromParts(uri.getHost(), uri.getPort()).toString(); } }
private SimpleConsumer getSimpleConsumer(HostAndPort hostAndPort) { return this.getSimpleConsumer(hostAndPort.toString()); }
private SimpleConsumer getSimpleConsumer(HostAndPort hostAndPort) { return this.getSimpleConsumer(hostAndPort.toString()); }
public void updateConfiguration(Configuration config) { copy(resourcesConfiguration, config); // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local config.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, NoOpDNSToSwitchMapping.class, DNSToSwitchMapping.class); if (socksProxy != null) { config.setClass(HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, SocksSocketFactory.class, SocketFactory.class); config.set(HADOOP_SOCKS_SERVER_KEY, socksProxy.toString()); } if (domainSocketPath != null) { config.setStrings(DFS_DOMAIN_SOCKET_PATH_KEY, domainSocketPath); } // only enable short circuit reads if domain socket path is properly configured if (!config.get(DFS_DOMAIN_SOCKET_PATH_KEY, "").trim().isEmpty()) { config.setBooleanIfUnset(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); } config.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, toIntExact(dfsTimeout.toMillis())); config.setInt(IPC_PING_INTERVAL_KEY, toIntExact(ipcPingInterval.toMillis())); config.setInt(IPC_CLIENT_CONNECT_TIMEOUT_KEY, toIntExact(dfsConnectTimeout.toMillis())); config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, dfsConnectMaxRetries); if (isHdfsWireEncryptionEnabled) { config.set(HADOOP_RPC_PROTECTION, "privacy"); config.setBoolean("dfs.encrypt.data.transfer", true); } config.setInt("fs.cache.max-size", fileSystemMaxCacheSize); config.setInt(LineRecordReader.MAX_LINE_LENGTH, textMaxLineLength); configureCompression(config, compressionCodec); s3ConfigurationUpdater.updateConfiguration(config); }
public void testToString() { // With ports. assertEquals("foo:101", "" + HostAndPort.fromString("foo:101")); assertEquals(":102", HostAndPort.fromString(":102").toString()); assertEquals("[1::2]:103", HostAndPort.fromParts("1::2", 103).toString()); assertEquals("[::1]:104", HostAndPort.fromString("[::1]:104").toString()); // Without ports. assertEquals("foo", "" + HostAndPort.fromString("foo")); assertEquals("", HostAndPort.fromString("").toString()); assertEquals("[1::2]", HostAndPort.fromString("1::2").toString()); assertEquals("[::1]", HostAndPort.fromString("[::1]").toString()); // Garbage in, garbage out. assertEquals("[::]]:107", HostAndPort.fromParts("::]", 107).toString()); assertEquals("[[:]]:108", HostAndPort.fromString("[[:]]:108").toString()); }
public static FileBlockInfo createRandom() { FileBlockInfo result = new FileBlockInfo(); Random random = new Random(); BlockInfo blockInfo = BlockInfoTest.createRandom(); long offset = random.nextLong(); List<String> ufsLocations = new ArrayList<>(); long numUfsLocations = random.nextInt(10); for (int i = 0; i < numUfsLocations; i++) { ufsLocations.add(HostAndPort.fromParts(CommonUtils.randomAlphaNumString(random.nextInt(10)), random.nextInt(Constants.MAX_PORT)).toString()); } result.setBlockInfo(blockInfo); result.setOffset(offset); result.setUfsLocations(ufsLocations); return result; } }
/** * Removes a port from a host+port if the string contains the specified port. If the host+port does not contain * a port, or contains another port, the string is returned unaltered. For example, if hostWithPort is the * string {@code www.website.com:443}, this method will return {@code www.website.com}. * * <b>Note:</b> The hostWithPort string is not a URI and should not contain a scheme or resource. This method does * not attempt to validate the specified host; it <i>might</i> throw IllegalArgumentException if there was a problem * parsing the hostname, but makes no guarantees. In general, it should be validated externally, if necessary. * * @param hostWithPort string containing a hostname and optional port * @param portNumber port to remove from the string * @return string with the specified port removed, or the original string if it did not contain the portNumber */ public static String removeMatchingPort(String hostWithPort, int portNumber) { HostAndPort parsedHostAndPort = HostAndPort.fromString(hostWithPort); if (parsedHostAndPort.hasPort() && parsedHostAndPort.getPort() == portNumber) { // HostAndPort.getHostText() strips brackets from ipv6 addresses, so reparse using fromHost return HostAndPort.fromHost(parsedHostAndPort.getHost()).toString(); } else { return hostWithPort; } }
/** * Converts a proto type to a wire type. * * @param fileBlockPInfo the proto type to convert * @return the converted wire type */ public static FileBlockInfo fromProto(alluxio.grpc.FileBlockInfo fileBlockPInfo) { return new FileBlockInfo().setBlockInfo(fromProto(fileBlockPInfo.getBlockInfo())) .setOffset(fileBlockPInfo.getOffset()).setUfsLocations( fileBlockPInfo.getUfsLocationsCount() > 0 ? fileBlockPInfo.getUfsStringLocationsList() : map(addr -> HostAndPort.fromParts(addr.getHost(), addr.getDataPort()).toString(), fileBlockPInfo.getUfsLocationsList())); }
SimpleConsumer consumer = null; try { log.info("Finding new leader from Kafka brokers, try broker [%s]", broker.toString()); consumer = new SimpleConsumer(broker.getHostText(), broker.getPort(), SO_TIMEOUT_MILLIS, BUFFER_SIZE, leaderLookupClientId); TopicMetadataResponse resp = consumer.send(new TopicMetadataRequest(Collections.singletonList(topic)));
BlockLocation[] locations = alluxioHadoopFs.getFileBlockLocations(file, start, len); assertEquals(1, locations.length); Collections.sort(expectedWorkerNames, (x, y) -> x.toString().compareTo(y.toString())); String[] actualNames = locations[0].getNames(); String[] actualHosts = locations[0].getHosts();
AsyncQueryForwardingServlet.makeURI( "http", HostAndPort.fromParts("2a00:1450:4007:805::1007", 1234).toString(), "/some/path", "param=1¶m2=%E2%82%AC"
@Inject public SingularityLeaderLatch(@Named(SingularityMainModule.HTTP_HOST_AND_PORT) final HostAndPort httpHostAndPort, final CuratorFramework curatorFramework, final Set<LeaderLatchListener> listeners) throws Exception { super(checkNotNull(curatorFramework, "curatorFramework is null"), LEADER_PATH, httpHostAndPort.toString()); checkNotNull(listeners, "listeners is null"); for (LeaderLatchListener listener : listeners) { addListener(listener); } }
@Override public String getFullPathToNode(HostAndPort hostAndPort) { String path = String.format("%s/%s",getParentPath() ,hostAndPort.toString()); return path; } }
workUnit.setProp(PARTITION_ID, partition.getId()); workUnit.setProp(LEADER_ID, partition.getLeader().getId()); workUnit.setProp(LEADER_HOSTANDPORT, partition.getLeader().getHostAndPort().toString()); workUnit.setProp(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY, offsets.getStartOffset()); workUnit.setProp(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY, offsets.getLatestOffset());
@Override public void registerServer(LoadBalanceZookeeperConf configuration, int pqsPort, String zookeeperConnectString, String pqsHost) throws Exception { this.client = CuratorFrameworkFactory.newClient(zookeeperConnectString, new ExponentialBackoffRetry(1000,10)); this.client.start(); HostAndPort hostAndPort = HostAndPort.fromParts(pqsHost,pqsPort); String path = configuration.getFullPathToNode(hostAndPort); String node = hostAndPort.toString(); this.client.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(path ,node.getBytes(StandardCharsets.UTF_8)); Stat stat = this.client.setACL().withACL(configuration.getAcls()).forPath(path); if (stat != null) { LOG.info(" node created with right ACL"); } else { LOG.error("could not create node with right ACL. So, system would exit now."); throw new RuntimeException(" Unable to connect to Zookeeper"); } }
Assert.assertEquals(HostAndPort.fromString(DruidNode.getDefaultHost()).toString(), node.getHostAndPort());