private static List<RangePartition> getRangePartitionList(KuduTable table, long deadline) { List<RangePartition> rangePartitions = new ArrayList<>(); if (!table.getPartitionSchema().getRangeSchema().getColumns().isEmpty()) { try { Iterator var4 = table.getTabletsLocations(deadline).iterator(); while (var4.hasNext()) { LocatedTablet tablet = (LocatedTablet) var4.next(); Partition partition = tablet.getPartition(); if (Iterators.all(partition.getHashBuckets().iterator(), Predicates.equalTo(0))) { RangePartition rangePartition = buildRangePartition(table, partition); rangePartitions.add(rangePartition); } } } catch (Exception e) { throw new RuntimeException(e); } } return rangePartitions; }
private void init() { String tableName = kuduScanSpec.getTableName(); Collection<DrillbitEndpoint> endpoints = kuduStoragePlugin.getContext().getBits(); Map<String,DrillbitEndpoint> endpointMap = Maps.newHashMap(); for (DrillbitEndpoint endpoint : endpoints) { endpointMap.put(endpoint.getAddress(), endpoint); } try { List<LocatedTablet> locations = kuduStoragePlugin.getClient().openTable(tableName).getTabletsLocations(10000); for (LocatedTablet tablet : locations) { KuduWork work = new KuduWork(tablet.getPartition().getPartitionKeyStart(), tablet.getPartition().getPartitionKeyEnd()); for (Replica replica : tablet.getReplicas()) { String host = replica.getRpcHost(); DrillbitEndpoint ep = endpointMap.get(host); if (ep != null) { work.getByteMap().add(ep, DEFAULT_TABLET_SIZE); } } kuduWorkList.add(work); } } catch (Exception e) { throw new RuntimeException(e); } }
@Override public KuduInputSplit[] createInputSplits(int minNumSplits) throws IOException { startTableContext(); Preconditions.checkNotNull(tableContext,"tableContext should not be null"); List<KuduScanToken> tokens = tableContext.scanTokens(tableFilters, tableProjections, rowsLimit); KuduInputSplit[] splits = new KuduInputSplit[tokens.size()]; for (int i = 0; i < tokens.size(); i++) { KuduScanToken token = tokens.get(i); List<String> locations = new ArrayList<>(token.getTablet().getReplicas().size()); for (LocatedTablet.Replica replica : token.getTablet().getReplicas()) { locations.add(getLocation(replica.getRpcHost(), replica.getRpcPort())); } KuduInputSplit split = new KuduInputSplit( token.serialize(), i, locations.toArray(new String[locations.size()]) ); splits[i] = split; } if (splits.length < minNumSplits) { LOG.warn(" The minimum desired number of splits with your configured parallelism level " + "is {}. Current kudu splits = {}. {} instances will remain idle.", minNumSplits, splits.length, (minNumSplits - splits.length) ); } return splits; }
/** * Helper method to kill a tablet server that serves the given tablet's * leader. The currently running test case will be failed if the tablet has no * leader after some retries, or if the tablet server was already killed. * * This method is thread-safe. * @param tablet a RemoteTablet which will get its leader killed * @throws Exception */ public void killTabletLeader(RemoteTablet tablet) throws Exception { killTabletLeader(new LocatedTablet(tablet)); }
@Test public void testNoLeader() throws Exception { final int requestBatchSize = 10; CreateTableOptions options = getBasicCreateTableOptions(); KuduTable table = client.createTable( "testNoLeader-" + System.currentTimeMillis(), basicSchema, options); // Lookup the current locations so that we can pass some valid information to discoverTablets. List<LocatedTablet> tablets = asyncClient .locateTable(table, null, null, requestBatchSize, DEFAULT_SLEEP) .join(DEFAULT_SLEEP); LocatedTablet tablet = tablets.get(0); LocatedTablet.Replica leader = tablet.getLeaderReplica(); // Fake a master lookup that only returns one follower for the tablet. List<Master.TabletLocationsPB> tabletLocations = new ArrayList<>(); Master.TabletLocationsPB.Builder tabletPb = Master.TabletLocationsPB.newBuilder(); tabletPb.setPartition(ProtobufUtils.getFakePartitionPB()); tabletPb.setTabletId(ByteString.copyFrom(tablet.getTabletId())); tabletPb.addReplicas(ProtobufUtils.getFakeTabletReplicaPB( "master", leader.getRpcHost(), leader.getRpcPort(), Metadata.RaftPeerPB.Role.FOLLOWER)); tabletLocations.add(tabletPb.build()); try { asyncClient.discoverTablets(table, new byte[0], requestBatchSize, tabletLocations, 1000); fail("discoverTablets should throw an exception if there's no leader"); } catch (NoLeaderFoundException ex) { // Expected. } }
Slice tabletId = new Slice(tablet.getTabletId());
/** * Return the current leader, or null if there is none. */ public Replica getLeaderReplica() { return getOneOfRoleOrNull(Role.LEADER); }
/** * Finds the RPC port of the given tablet's leader tserver. * @param tablet a LocatedTablet * @return the host and port of the given tablet's leader tserver * @throws Exception if we are unable to find the leader tserver */ public HostAndPort findLeaderTabletServer(LocatedTablet tablet) throws Exception { LocatedTablet.Replica leader = null; DeadlineTracker deadlineTracker = new DeadlineTracker(); deadlineTracker.setDeadline(DEFAULT_SLEEP); while (leader == null) { if (deadlineTracker.timedOut()) { fail("Timed out while trying to find a leader for this table"); } leader = tablet.getLeaderReplica(); if (leader == null) { LOG.info("Sleeping while waiting for a tablet LEADER to arise, currently slept {} ms", deadlineTracker.getElapsedMillis()); Thread.sleep(50); } } return new HostAndPort(leader.getRpcHost(), leader.getRpcPort()); }
/** * Picks at random a tablet server that serves tablets from the passed table and restarts it. * @param table table to query for a TS to restart * @throws Exception */ public void restartTabletServer(KuduTable table) throws Exception { List<LocatedTablet> tablets = table.getTabletsLocations(DEFAULT_SLEEP); if (tablets.isEmpty()) { fail("Table " + table.getName() + " doesn't have any tablets"); } LocatedTablet tablet = tablets.get(0); LocatedTablet.Replica replica = tablet.getReplicas().get(randomForTSRestart.nextInt(tablet.getReplicas().size())); HostAndPort hp = new HostAndPort(replica.getRpcHost(), replica.getRpcPort()); miniCluster.killTabletServer(hp); miniCluster.startTabletServer(hp); }
entry.getUpperBoundPartitionKey())); return Deferred.fromResult(new LocatedTablet(entry.getTablet()));
/** * DEPRECATED: use {@link #getPartition()} */ @Deprecated() public byte[] getEndKey() { return getPartition().getPartitionKeyEnd(); }
/** * Helper method to easily kill a tablet server that serves the given table's only tablet's * leader. The currently running test case will be failed if there's more than one tablet, * if the tablet has no leader after some retries, or if the tablet server was already killed. * * This method is thread-safe. * @param table a KuduTable which will get its single tablet's leader killed. * @throws Exception */ public void killTabletLeader(KuduTable table) throws Exception { List<LocatedTablet> tablets = table.getTabletsLocations(DEFAULT_SLEEP); if (tablets.isEmpty() || tablets.size() > 1) { fail("Currently only support killing leaders for tables containing 1 tablet, table " + table.getName() + " has " + tablets.size()); } LocatedTablet tablet = tablets.get(0); if (tablet.getReplicas().size() == 1) { fail("Table " + table.getName() + " only has 1 tablet, please enable replication"); } HostAndPort hp = findLeaderTabletServer(tablet); miniCluster.killTabletServer(hp); }
/** * Kills a tablet server that serves the given tablet's leader and restarts it. * @param tablet a RemoteTablet which will get its leader killed and restarted * @throws Exception */ public void restartTabletServer(RemoteTablet tablet) throws Exception { HostAndPort hp = findLeaderTabletServer(new LocatedTablet(tablet)); miniCluster.killTabletServer(hp); miniCluster.startTabletServer(hp); }
/** * DEPRECATED: use {@link #getPartition()} */ @Deprecated public byte[] getStartKey() { return getPartition().getPartitionKeyStart(); }
private KuduTable createTableWithSplitsAndTest(String tableNamePrefix, int splitsCount) throws Exception { String newTableName = tableNamePrefix + "-" + splitsCount; CreateTableOptions builder = getBasicCreateTableOptions(); if (splitsCount != 0) { for (int i = 1; i <= splitsCount; i++) { PartialRow row = BASIC_SCHEMA.newPartialRow(); row.addInt(0, i); builder.addSplitRow(row); } } KuduTable table = client.createTable(newTableName, BASIC_SCHEMA, builder); List<LocatedTablet> tablets = table.getTabletsLocations(DEFAULT_SLEEP); assertEquals(splitsCount + 1, tablets.size()); assertEquals(splitsCount + 1, table.asyncGetTabletsLocations(DEFAULT_SLEEP).join().size()); for (LocatedTablet tablet : tablets) { assertEquals(3, tablet.getReplicas().size()); } return table; }
@Override public Deferred<LocatedTablet> call(List<LocatedTablet> tablets) { Preconditions.checkArgument(tablets.size() <= 1, "found more than one tablet for a single partition key"); if (tablets.isEmpty()) { // Most likely this indicates a non-covered range, but since this // could race with an alter table partitioning operation (which // clears the local table locations cache), we check again. TableLocationsCache.Entry entry = getTableLocationEntry(table.getTableId(), partitionKey); if (entry == null) { // This should be extremely rare, but a potential source of tight loops. LOG.debug("Table location expired before it could be processed; retrying."); return Deferred.fromError(new RecoverableException(Status.NotFound( "Table location expired before it could be processed"))); } if (entry.isNonCoveredRange()) { return Deferred.fromError( new NonCoveredRangeException(entry.getLowerBoundPartitionKey(), entry.getUpperBoundPartitionKey())); } return Deferred.fromResult(new LocatedTablet(entry.getTablet())); } return Deferred.fromResult(tablets.get(0)); } });
@Override public byte[] partitionKey() { return tablet.getPartition().getPartitionKeyStart(); }
ret.add(new LocatedTablet(entry.getTablet()));
@Override public int compareTo(KuduScanToken other) { if (!message.getTableName().equals(other.message.getTableName())) { throw new IllegalArgumentException("Scan tokens from different tables may not be compared"); } return tablet.getPartition().compareTo(other.getTablet().getPartition()); }
public void add(Operation operation, int index) { assert Bytes.memcmp(operation.partitionKey(), tablet.getPartition().getPartitionKeyStart()) >= 0 && (tablet.getPartition().getPartitionKeyEnd().length == 0 || Bytes.memcmp(operation.partitionKey(), tablet.getPartition().getPartitionKeyEnd()) < 0); operations.add(operation); operationIndexes.add(index); }