private DescribeConfigsRequest createDescribeConfigsRequest(int version) { return new DescribeConfigsRequest.Builder(asList( new ConfigResource(ConfigResource.Type.BROKER, "0"), new ConfigResource(ConfigResource.Type.TOPIC, "topic"))) .build((short) version); }
@Override protected Struct toStruct(short version) { Struct struct = new Struct(ApiKeys.ALTER_CONFIGS.responseSchema(version)); struct.set(THROTTLE_TIME_MS, throttleTimeMs); List<Struct> resourceStructs = new ArrayList<>(errors.size()); for (Map.Entry<ConfigResource, ApiError> entry : errors.entrySet()) { Struct resourceStruct = struct.instance(RESOURCES_KEY_NAME); ConfigResource resource = entry.getKey(); entry.getValue().write(resourceStruct); resourceStruct.set(RESOURCE_TYPE_KEY_NAME, resource.type().id()); resourceStruct.set(RESOURCE_NAME_KEY_NAME, resource.name()); resourceStructs.add(resourceStruct); } struct.set(RESOURCES_KEY_NAME, resourceStructs.toArray(new Struct[0])); return struct; }
@Override public AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options) { final Map<ConfigResource, KafkaFutureImpl<Void>> allFutures = new HashMap<>(); // We must make a separate AlterConfigs request for every BROKER resource we want to alter // and send the request to that specific broker. Other resources are grouped together into // a single request that may be sent to any broker. final Collection<ConfigResource> unifiedRequestResources = new ArrayList<>(); for (ConfigResource resource : configs.keySet()) { if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) { NodeProvider nodeProvider = new ConstantNodeIdProvider(Integer.parseInt(resource.name())); allFutures.putAll(alterConfigs(configs, options, Collections.singleton(resource), nodeProvider)); } else unifiedRequestResources.add(resource); } if (!unifiedRequestResources.isEmpty()) allFutures.putAll(alterConfigs(configs, options, unifiedRequestResources, new LeastLoadedNodeProvider())); return new AlterConfigsResult(new HashMap<>(allFutures)); }
private DescribeConfigsRequest createDescribeConfigsRequestWithConfigEntries(int version) { Map<ConfigResource, Collection<String>> resources = new HashMap<>(); resources.put(new ConfigResource(ConfigResource.Type.BROKER, "0"), asList("foo", "bar")); resources.put(new ConfigResource(ConfigResource.Type.TOPIC, "topic"), null); resources.put(new ConfigResource(ConfigResource.Type.TOPIC, "topic a"), Collections.<String>emptyList()); return new DescribeConfigsRequest.Builder(resources).build((short) version); }
@Override protected Struct toStruct() { Struct struct = new Struct(ApiKeys.DESCRIBE_CONFIGS.requestSchema(version())); List<Struct> resourceStructs = new ArrayList<>(resources().size()); for (Map.Entry<ConfigResource, Collection<String>> entry : resourceToConfigNames.entrySet()) { ConfigResource resource = entry.getKey(); Struct resourceStruct = struct.instance(RESOURCES_KEY_NAME); resourceStruct.set(RESOURCE_TYPE_KEY_NAME, resource.type().id()); resourceStruct.set(RESOURCE_NAME_KEY_NAME, resource.name()); String[] configNames = entry.getValue() == null ? null : entry.getValue().toArray(new String[0]); resourceStruct.set(CONFIG_NAMES_KEY_NAME, configNames); resourceStructs.add(resourceStruct); } struct.set(RESOURCES_KEY_NAME, resourceStructs.toArray(new Struct[0])); struct.setIfExists(INCLUDE_SYNONYMS, includeSynonyms); return struct; }
if (resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) { brokerFutures.put(resource, new KafkaFutureImpl<>()); brokerResources.add(resource); final KafkaFutureImpl<Config> brokerFuture = entry.getValue(); final ConfigResource resource = entry.getKey(); final int nodeId = Integer.parseInt(resource.name()); runnable.call(new Call("describeBrokerConfigs", calcDeadlineMs(now, options.timeoutMs()), new ConstantNodeIdProvider(nodeId)) {
private AlterConfigsResponse createAlterConfigsResponse() { Map<ConfigResource, ApiError> errors = new HashMap<>(); errors.put(new ConfigResource(ConfigResource.Type.BROKER, "0"), ApiError.NONE); errors.put(new ConfigResource(ConfigResource.Type.TOPIC, "topic"), new ApiError(Errors.INVALID_REQUEST, "This request is invalid")); return new AlterConfigsResponse(20, errors); }
@Override protected Struct toStruct() { Struct struct = new Struct(ApiKeys.ALTER_CONFIGS.requestSchema(version())); struct.set(VALIDATE_ONLY_KEY_NAME, validateOnly); List<Struct> resourceStructs = new ArrayList<>(configs.size()); for (Map.Entry<ConfigResource, Config> entry : configs.entrySet()) { Struct resourceStruct = struct.instance(RESOURCES_KEY_NAME); ConfigResource resource = entry.getKey(); resourceStruct.set(RESOURCE_TYPE_KEY_NAME, resource.type().id()); resourceStruct.set(RESOURCE_NAME_KEY_NAME, resource.name()); Config config = entry.getValue(); List<Struct> configEntryStructs = new ArrayList<>(config.entries.size()); for (ConfigEntry configEntry : config.entries) { Struct configEntriesStruct = resourceStruct.instance(CONFIG_ENTRIES_KEY_NAME); configEntriesStruct.set(CONFIG_NAME, configEntry.name); configEntriesStruct.set(CONFIG_VALUE, configEntry.value); configEntryStructs.add(configEntriesStruct); } resourceStruct.set(CONFIG_ENTRIES_KEY_NAME, configEntryStructs.toArray(new Struct[0])); resourceStructs.add(resourceStruct); } struct.set(RESOURCES_KEY_NAME, resourceStructs.toArray(new Struct[0])); return struct; }
public AlterConfigsResponse(Struct struct) { throttleTimeMs = struct.get(THROTTLE_TIME_MS); Object[] resourcesArray = struct.getArray(RESOURCES_KEY_NAME); errors = new HashMap<>(resourcesArray.length); for (Object resourceObj : resourcesArray) { Struct resourceStruct = (Struct) resourceObj; ApiError error = new ApiError(resourceStruct); ConfigResource.Type resourceType = ConfigResource.Type.forId(resourceStruct.getByte(RESOURCE_TYPE_KEY_NAME)); String resourceName = resourceStruct.getString(RESOURCE_NAME_KEY_NAME); errors.put(new ConfigResource(resourceType, resourceName), error); } }
@Override public DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options) { Map<ConfigResource, KafkaFuture<Config>> configescriptions = new HashMap<>(); for (ConfigResource resource : resources) { if (resource.type() == ConfigResource.Type.TOPIC) { Map<String, String> configs = allTopics.get(resource.name()).configs; List<ConfigEntry> configEntries = new ArrayList<>(); for (Map.Entry<String, String> entry : configs.entrySet()) { configEntries.add(new ConfigEntry(entry.getKey(), entry.getValue())); } KafkaFutureImpl<Config> future = new KafkaFutureImpl<>(); future.complete(new Config(configEntries)); configescriptions.put(resource, future); } else { throw new UnsupportedOperationException("Not implemented yet"); } } return new DescribeConfigsResult(configescriptions); }
public DescribeConfigsRequest(Struct struct, short version) { super(ApiKeys.DESCRIBE_CONFIGS, version); Object[] resourcesArray = struct.getArray(RESOURCES_KEY_NAME); resourceToConfigNames = new HashMap<>(resourcesArray.length); for (Object resourceObj : resourcesArray) { Struct resourceStruct = (Struct) resourceObj; ConfigResource.Type resourceType = ConfigResource.Type.forId(resourceStruct.getByte(RESOURCE_TYPE_KEY_NAME)); String resourceName = resourceStruct.getString(RESOURCE_NAME_KEY_NAME); Object[] configNamesArray = resourceStruct.getArray(CONFIG_NAMES_KEY_NAME); List<String> configNames = null; if (configNamesArray != null) { configNames = new ArrayList<>(configNamesArray.length); for (Object configNameObj : configNamesArray) configNames.add((String) configNameObj); } resourceToConfigNames.put(new ConfigResource(resourceType, resourceName), configNames); } this.includeSynonyms = struct.hasField(INCLUDE_SYNONYMS) ? struct.getBoolean(INCLUDE_SYNONYMS) : false; }
private AlterConfigsRequest createAlterConfigsRequest() { Map<ConfigResource, AlterConfigsRequest.Config> configs = new HashMap<>(); List<AlterConfigsRequest.ConfigEntry> configEntries = asList( new AlterConfigsRequest.ConfigEntry("config_name", "config_value"), new AlterConfigsRequest.ConfigEntry("another_name", "another value") ); configs.put(new ConfigResource(ConfigResource.Type.BROKER, "0"), new AlterConfigsRequest.Config(configEntries)); configs.put(new ConfigResource(ConfigResource.Type.TOPIC, "topic"), new AlterConfigsRequest.Config(Collections.<AlterConfigsRequest.ConfigEntry>emptyList())); return new AlterConfigsRequest((short) 0, configs, false); }
private Config getKafkaBrokerConfig(AdminClient admin) throws Exception { final Collection<Node> nodes = admin.describeCluster().nodes().get(KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); if (nodes.isEmpty()) { throw new ConnectException("No brokers available to obtain default settings"); } String nodeId = nodes.iterator().next().idString(); Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, nodeId)); final Map<ConfigResource, Config> configs = admin.describeConfigs(resources).all().get( KAFKA_QUERY_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS ); if (configs.isEmpty()) { throw new ConnectException("No configs have been received"); } return configs.values().iterator().next(); } }
private DescribeConfigsResponse createDescribeConfigsResponse() { Map<ConfigResource, DescribeConfigsResponse.Config> configs = new HashMap<>(); List<DescribeConfigsResponse.ConfigSynonym> synonyms = Collections.emptyList(); List<DescribeConfigsResponse.ConfigEntry> configEntries = asList( new DescribeConfigsResponse.ConfigEntry("config_name", "config_value", DescribeConfigsResponse.ConfigSource.DYNAMIC_BROKER_CONFIG, true, false, synonyms), new DescribeConfigsResponse.ConfigEntry("another_name", "another value", DescribeConfigsResponse.ConfigSource.DEFAULT_CONFIG, false, true, synonyms) ); configs.put(new ConfigResource(ConfigResource.Type.BROKER, "0"), new DescribeConfigsResponse.Config( ApiError.NONE, configEntries)); configs.put(new ConfigResource(ConfigResource.Type.TOPIC, "topic"), new DescribeConfigsResponse.Config( ApiError.NONE, Collections.<DescribeConfigsResponse.ConfigEntry>emptyList())); return new DescribeConfigsResponse(200, configs); }
public AlterConfigsRequest(Struct struct, short version) { super(ApiKeys.ALTER_CONFIGS, version); validateOnly = struct.getBoolean(VALIDATE_ONLY_KEY_NAME); Object[] resourcesArray = struct.getArray(RESOURCES_KEY_NAME); configs = new HashMap<>(resourcesArray.length); for (Object resourcesObj : resourcesArray) { Struct resourcesStruct = (Struct) resourcesObj; ConfigResource.Type resourceType = ConfigResource.Type.forId(resourcesStruct.getByte(RESOURCE_TYPE_KEY_NAME)); String resourceName = resourcesStruct.getString(RESOURCE_NAME_KEY_NAME); ConfigResource resource = new ConfigResource(resourceType, resourceName); Object[] configEntriesArray = resourcesStruct.getArray(CONFIG_ENTRIES_KEY_NAME); List<ConfigEntry> configEntries = new ArrayList<>(configEntriesArray.length); for (Object configEntriesObj: configEntriesArray) { Struct configEntriesStruct = (Struct) configEntriesObj; String configName = configEntriesStruct.getString(CONFIG_NAME); String configValue = configEntriesStruct.getString(CONFIG_VALUE); configEntries.add(new ConfigEntry(configName, configValue)); } Config config = new Config(configEntries); configs.put(resource, config); } }
@Test public void testDescribeConfigs() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(new DescribeConfigsResponse(0, Collections.singletonMap(new ConfigResource(ConfigResource.Type.BROKER, "0"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.emptySet())))); DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton( new ConfigResource(ConfigResource.Type.BROKER, "0"))); result2.all().get(); } }
ConfigResource.Type resourceType = ConfigResource.Type.forId(resourceStruct.getByte(RESOURCE_TYPE_KEY_NAME)); String resourceName = resourceStruct.getString(RESOURCE_NAME_KEY_NAME); ConfigResource resource = new ConfigResource(resourceType, resourceName);
Collections.singletonMap(new ConfigResource(ConfigResource.Type.TOPIC, "foo"), new DescribeConfigsResponse.Config(ApiError.NONE, Collections.emptySet())))); DescribeConfigsResult result2 = env.adminClient().describeConfigs(Collections.singleton( new ConfigResource(ConfigResource.Type.TOPIC, "foo"))); time.sleep(5000); result2.values().get(new ConfigResource(ConfigResource.Type.TOPIC, "foo")).get();
env.adminClient().describeConfigs(Collections.singleton(new ConfigResource(ConfigResource.Type.BROKER, "0"))).all().get(); fail("Expected an authentication error."); } catch (ExecutionException e) {