/** * Little helper to set the table byte array. If it's different than the last * table we reset the byte array. Otherwise we just use the existing array. * @param table The table we're operating against */ private void setTable(final String table) { if (!lastTable.equals(table)) { lastTable = table; BigtableTableName tableName = options .getInstanceName() .toTableName(table); lastTableBytes = tableName .toString() .getBytes(); synchronized(this) { if (bulkMutation != null) { try { bulkMutation.flush(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } bulkMutation = session.createBulkMutation(tableName); } } }
/** {@inheritDoc} */ @Override public String toString() { return MoreObjects.toStringHelper(AbstractBigtableConnection.class) .add("project", options.getProjectId()) .add("instance", options.getInstanceId()) .add("dataHost", options.getDataHost()) .add("tableAdminHost", options.getAdminHost()) .toString(); }
public BigtableDataClientWrapper(BigtableDataClient bigtableDataClient, BigtableOptions options) { this.delegate = bigtableDataClient; this.requestContext = RequestContext.create(options.getProjectId(), options.getInstanceId(), options.getAppProfileId()); }
/** {@inheritDoc} */ @Override public String toString() { return MoreObjects.toStringHelper(getClass()) .add("project", options.getProjectId()) .add("instance", options.getInstanceId()) .add("adminHost", options.getAdminHost()) .toString(); }
this.options = opts; Preconditions.checkArgument( !Strings.isNullOrEmpty(options.getProjectId()), PROJECT_ID_EMPTY_OR_NULL); Preconditions.checkArgument( !Strings.isNullOrEmpty(options.getInstanceId()), INSTANCE_ID_EMPTY_OR_NULL); Preconditions.checkArgument( !Strings.isNullOrEmpty(options.getUserAgent()), USER_AGENT_EMPTY_OR_NULL); LOG.info( "Opening connection for projectId %s, instanceId %s, " + "on data host %s, admin host %s.", options.getProjectId(), options.getInstanceId(), options.getDataHost(), options.getAdminHost()); LOG.info("Bigtable options: %s.", options); .add(new GoogleCloudResourcePrefixInterceptor(options.getInstanceName().toString())); RetryOptions retryOptions = options.getRetryOptions(); CredentialOptions credentialOptions = options.getCredentialOptions(); new CallOptionsFactory.ConfiguredCallOptionsFactory(options.getCallOptionsConfig()); dataClient = new BigtableDataGrpcClient(dataChannel, sharedPools.getRetryExecutor(), options);
System.out.println(String.format("User Agent: %s", options.getUserAgent())); System.out.println(String.format("Project ID: %s", options.getProjectId())); System.out.println(String.format("Instance Id: %s", options.getInstanceId())); System.out.println(String.format("Admin host: %s", options.getAdminHost())); System.out.println(String.format("Data host: %s", options.getDataHost())); Credentials credentials = CredentialFactory.getCredentials(options.getCredentialOptions()); try { System.out.println("Attempting credential refresh..."); try (Connection conn = ConnectionFactory.createConnection(fullConfiguration)) { try (Admin admin = conn.getAdmin()) { System.out.println(String.format("Tables in cluster %s:", options.getInstanceId())); TableName[] tableNames = admin.listTableNames(); if (tableNames.length == 0) {
/** * <p>Constructor for HBaseRequestAdapter.</p> * * @param options a {@link BigtableOptions} object. * @param tableName a {@link TableName} object. * @param mutationAdapters a {@link MutationAdapters} object. */ public HBaseRequestAdapter(BigtableOptions options, TableName tableName, MutationAdapters mutationAdapters) { this(tableName, options.getInstanceName().toTableName(tableName.getQualifierAsString()), mutationAdapters, RequestContext.create( InstanceName.of(options.getProjectId(), options.getInstanceId()), options.getAppProfileId() )); }
/** * <p>modifyColumns.</p> * * @param tableName a {@link org.apache.hadoop.hbase.TableName} object. * @param modifications a {@link ModifyTableBuilder} object. */ private CompletableFuture<Void> modifyColumns(TableName tableName, ModifyTableBuilder modifications) { ModifyColumnFamiliesRequest request = modifications.build(); return bigtableTableAdminClient .modifyColumnFamilyAsync(request.toProto(options.getProjectId(), options.getInstanceId())) .thenApply(r -> null); }
public AbstractBigtableRegionLocator (TableName tableName, BigtableOptions options, BigtableDataClient client) { this.tableName = tableName; this.client = client; this.bigtableTableName = options.getInstanceName().toTableName(tableName.getNameAsString()); ServerName serverName = ServerName.valueOf(options.getDataHost(), options.getPort(), 0); this.adapter = getSampledRowKeysAdapter(tableName, serverName); }
/** {@inheritDoc} */ @Override public String toString() { return MoreObjects.toStringHelper(AbstractBigtableTable.class) .add("hashCode", "0x" + Integer.toHexString(hashCode())) .add("project", options.getProjectId()) .add("instance", options.getInstanceId()) .add("table", tableName.getNameAsString()) .add("host", options.getDataHost()) .toString(); }
.getCredentialsInterceptor(options.getCredentialOptions(), options.getRetryOptions()); if (credentialsInterceptor != null) { interceptorList.add(credentialsInterceptor); if (options.getInstanceName() != null) { interceptorList .add(new GoogleCloudResourcePrefixInterceptor(options.getInstanceName().toString()));
@Test public void testGetBigtableServiceWithConfigurator() { SerializableFunction<BigtableOptions.Builder, BigtableOptions.Builder> configurator = (SerializableFunction<BigtableOptions.Builder, BigtableOptions.Builder>) input -> input .setInstanceId(INSTANCE_ID.get() + INSTANCE_ID.get()) .setProjectId(PROJECT_ID.get() + PROJECT_ID.get()) .setBulkOptions(new BulkOptions.Builder().setUseBulkApi(true).build()); BigtableService service = config .withProjectId(PROJECT_ID) .withInstanceId(INSTANCE_ID) .withBigtableOptionsConfigurator(configurator) .getBigtableService(PipelineOptionsFactory.as(GcpOptions.class)); assertEquals(PROJECT_ID.get(), service.getBigtableOptions().getProjectId()); assertEquals(INSTANCE_ID.get(), service.getBigtableOptions().getInstanceId()); assertEquals(true, service.getBigtableOptions().getBulkOptions().useBulkApi()); }
private synchronized static ResourceLimiter initializeResourceLimiter(BigtableOptions options) { BigtableInstanceName instanceName = options.getInstanceName(); String key = instanceName.toString(); ResourceLimiter resourceLimiter = resourceLimiterMap.get(key); if (resourceLimiter == null) { int maxInflightRpcs = options.getBulkOptions().getMaxInflightRpcs(); long maxMemory = options.getBulkOptions().getMaxMemory(); ResourceLimiterStats stats = ResourceLimiterStats.getInstance(instanceName); resourceLimiter = new ResourceLimiter(stats, maxMemory, maxInflightRpcs); BulkOptions bulkOptions = options.getBulkOptions(); if (bulkOptions.isEnableBulkMutationThrottling()) { resourceLimiter.throttle(bulkOptions.getBulkMutationRpcTargetMs()); } resourceLimiterMap.put(key, resourceLimiter); } return resourceLimiter; }
private IOException logAndCreateIOException(String type, byte[] row, Throwable t) { LOG.error("Encountered exception when executing " + type + ".", t); return new DoNotRetryIOException( makeGenericExceptionMessage( type, options.getProjectId(), tableName.getQualifierAsString(), row), t); }
/** * Constructor for the utility. Prefer * {@link BigtableClusterUtilities#forInstance(String, String)} or * {@link BigtableClusterUtilities#forAllInstances(String)} rather than this method. * @param options that specify projectId, instanceId, credentials and retry options. * @throws GeneralSecurityException * @throws IOException */ public BigtableClusterUtilities(final BigtableOptions options) throws IOException, GeneralSecurityException { this.instanceName = Preconditions.checkNotNull( options.getInstanceName(), "ProjectId and instanceId have to be set in the options. Use '-' for all instanceIds."); channel = BigtableSession.createChannelPool(options.getAdminHost(), options); client = new BigtableInstanceGrpcClient(channel); }
@Test public void testWriteWithBigTableOptionsSetsBulkOptionsAndRetryOptions() { final int maxInflightRpcs = 1; final int initialBackoffMillis = -1; BigtableOptions.Builder optionsBuilder = BIGTABLE_OPTIONS.toBuilder(); BulkOptions.Builder bulkOptionsBuilder = new BulkOptions.Builder(); bulkOptionsBuilder.setMaxInflightRpcs(maxInflightRpcs); RetryOptions.Builder retryOptionsBuilder = new RetryOptions.Builder(); retryOptionsBuilder.setInitialBackoffMillis(initialBackoffMillis); optionsBuilder .setBulkOptions(bulkOptionsBuilder.build()) .setRetryOptions(retryOptionsBuilder.build()); BigtableIO.Write write = BigtableIO.write().withBigtableOptions(optionsBuilder.build()); BigtableOptions options = write.getBigtableOptions(); assertEquals(true, options.getBulkOptions().useBulkApi()); assertEquals(maxInflightRpcs, options.getBulkOptions().getMaxInflightRpcs()); assertEquals(initialBackoffMillis, options.getRetryOptions().getInitialBackoffMillis()); assertThat( options.getBulkOptions(), Matchers.equalTo(bulkOptionsBuilder.setUseBulkApi(true).build())); assertThat(options.getRetryOptions(), Matchers.equalTo(retryOptionsBuilder.build())); }
public long getWriteBufferSize() { return this.options.getBulkOptions().getMaxMemory(); }
/** Tests that credentials are not used from PipelineOptions if supplied by BigtableOptions. */ @Test public void testDontUsePipelineOptionsCredentialsIfSpecifiedInBigtableOptions() throws Exception { BigtableOptions options = BIGTABLE_OPTIONS .toBuilder() .setCredentialOptions(CredentialOptions.nullCredential()) .build(); GcpOptions pipelineOptions = PipelineOptionsFactory.as(GcpOptions.class); pipelineOptions.setGcpCredential(new TestCredential()); BigtableService readService = BigtableIO.read() .withBigtableOptions(options) .withTableId("TEST-TABLE") .getBigtableConfig() .getBigtableService(pipelineOptions); BigtableService writeService = BigtableIO.write() .withBigtableOptions(options) .withTableId("TEST-TABLE") .getBigtableConfig() .getBigtableService(pipelineOptions); assertEquals( CredentialType.None, readService.getBigtableOptions().getCredentialOptions().getCredentialType()); assertEquals( CredentialType.None, writeService.getBigtableOptions().getCredentialOptions().getCredentialType()); }
private RetriesExhaustedWithDetailsException createRetriesExhaustedWithDetailsException(Throwable e, Row action) { return new RetriesExhaustedWithDetailsException(Arrays.asList(e), Arrays.asList(action), Arrays.asList(options.getDataHost().toString())); }
private ManagedChannel getDataChannelPool() throws IOException { String host = options.getDataHost(); int channelCount = options.getChannelCount(); if (options.useCachedChannel()) { synchronized (BigtableSession.class) { // TODO: Ensure that the host and channelCount are the same. if (cachedDataChannelPool == null) { cachedDataChannelPool = createChannelPool(host, channelCount); } return cachedDataChannelPool; } } return createManagedPool(host, channelCount); }