/** * Checks whether or not an RPC can be retried once more. * @param rpc The RPC we're going to attempt to execute. * @return {@code true} if this RPC already had too many attempts, * {@code false} otherwise (in which case it's OK to retry once more). * @throws NonRecoverableException if the request has had too many attempts * already. */ boolean cannotRetryRequest(final HBaseRpc rpc) { return rpc.attempt > config.getInt("hbase.client.retries.number"); }
RegionClientPipeline() { timeout_handler = new IdleStateHandler(timer, 0, 0, config.getInt("hbase.hbase.ipc.client.connection.idle_timeout")); }
/** Schedule a timer to retry {@link #getRootRegion} after some time. */ private void retryGetRootRegionLater() { newTimeout(new TimerTask() { public void run(final Timeout timeout) { if (!getRootRegion()) { // Try to read the znodes connectZK(); // unless we need to connect first. } } }, config.getInt("hbase.zookeeper.getroot.retry_delay") /* milliseconds */); }
/** * Constructor. * @param hbase_client The HBase client this instance belongs to. */ public RegionClient(final HBaseClient hbase_client) { this.hbase_client = hbase_client; check_write_status = hbase_client.getConfig().getBoolean( "hbase.region_client.check_channel_write_status"); inflight_limit = hbase_client.getConfig().getInt( "hbase.region_client.inflight_limit"); pending_limit = hbase_client.getConfig().getInt( "hbase.region_client.pending_limit"); batch_size = hbase_client.getConfig().getInt("hbase.rpcs.batch.size"); }
/** * Connects to ZooKeeper. * @throws NonRecoverableException if something from which we can't * recover happened -- e.g. us being unable to resolve the hostname * of any of the zookeeper servers. */ private void connectZK() { try { // Session establishment is asynchronous, so this won't block. synchronized (this) { if (zk != null) { // Already connected. return; } zk = new ZooKeeper(quorum_spec, config.getInt("hbase.zookeeper.session.timeout"), this); } } catch (UnknownHostException e) { // No need to retry, we usually cannot recover from this. throw new NonRecoverableException("Cannot connect to ZooKeeper," + " is the quorum specification valid? " + quorum_spec, e); } catch (IOException e) { LOG.error("Failed to connect to ZooKeeper", e); // XXX don't retry recursively, create a timer with an exponential // backoff and schedule the reconnection attempt for later. connectZK(); } }
/** * Package private timer constructor that provides a useful name for the * timer thread. * @param config The config object used to pull out the tick interval * @param name A name to stash in the timer * @return A timer */ static HashedWheelTimer newTimer(final Config config, final String name) { class TimerThreadNamer implements ThreadNameDeterminer { @Override public String determineThreadName(String currentThreadName, String proposedThreadName) throws Exception { return "AsyncHBase Timer " + name + " #" + TIMER_THREAD_ID.incrementAndGet(); } } if (config == null) { return new HashedWheelTimer(Executors.defaultThreadFactory(), new TimerThreadNamer(), 100, MILLISECONDS, 512); } return new HashedWheelTimer(Executors.defaultThreadFactory(), new TimerThreadNamer(), config.getShort("hbase.timer.tick"), MILLISECONDS, config.getInt("hbase.timer.ticks_per_wheel")); }
zkclient = new ZKClient(quorum_spec, base_path); config = new Config(); rpc_timeout = config.getInt("hbase.rpc.timeout"); timer = newTimer(config, "HBaseClient"); rpc_timeout_timer = newTimer(config, "RPC Timeout Timer"); flush_interval = config.getShort("hbase.rpcs.buffered_flush_interval"); increment_buffer_size = config.getInt("hbase.increments.buffer_size"); nsre_low_watermark = config.getInt("hbase.nsre.low_watermark"); nsre_high_watermark = config.getInt("hbase.nsre.high_watermark"); if (config.properties.containsKey("hbase.increments.durable")) { increment_buffer_durable = config.getBoolean("hbase.increments.durable");
config.getString("hbase.zookeeper.znode.parent")); this.config = config; rpc_timeout = config.getInt("hbase.rpc.timeout"); timer = newTimer(config, "HBaseClient"); rpc_timeout_timer = newTimer(config, "RPC Timeout Timer"); flush_interval = config.getShort("hbase.rpcs.buffered_flush_interval"); increment_buffer_size = config.getInt("hbase.increments.buffer_size"); nsre_low_watermark = config.getInt("hbase.nsre.low_watermark"); nsre_high_watermark = config.getInt("hbase.nsre.high_watermark"); if (config.properties.containsKey("hbase.increments.durable")) { increment_buffer_durable = config.getBoolean("hbase.increments.durable");
throw new IllegalArgumentException("Negative: " + increment_buffer_size); final int current = config.getInt("hbase.increments.buffer_size"); if (current == increment_buffer_size) { return current;
new BossThreadNamer()); final int num_workers = config.hasProperty("hbase.workers.size") ? config.getInt("hbase.workers.size") : Runtime.getRuntime().availableProcessors() * 2; final NioWorkerPool worker_pool = new NioWorkerPool(executor,
config.getInt("hbase.ipc.client.socket.timeout.connect")); socket_config.setTcpNoDelay( config.getBoolean("hbase.ipc.client.tcpnodelay")); config.getInt("hbase.ipc.client.socket.write.high_watermark")); config.getInt("hbase.ipc.client.socket.write.low_watermark")); config.getInt("hbase.ipc.client.socket.sendBufferSize")); config.getInt("hbase.ipc.client.socket.receiveBufferSize"));