public IndexManager(Configuration hbaseConf, HBaseTableFactory tableFactory) throws IOException { this.hbaseConf = hbaseConf; this.tableFactory = tableFactory != null ? tableFactory : new HBaseTableFactoryImpl(hbaseConf); }
@Override public HTableInterface getTable(HTableDescriptor tableDescriptor, boolean create) throws IOException, InterruptedException { return getTable(tableDescriptor, getSplitKeys(tableDescriptor.getName()), create); }
@Override public byte[][] getSplitKeys(byte[] tableName) { return getTableConfig(tableName).getSplitKeys(); } }
@Override public HTableInterface getTable(HTableDescriptor tableDescriptor) throws IOException, InterruptedException { return getTable(tableDescriptor, true); }
configure(tableDescriptor);
@Override public HTableInterface getTable(HTableDescriptor tableDescriptor, byte[][] splitKeys) throws IOException, InterruptedException { return getTable(tableDescriptor, splitKeys, true); }
public HBaseBlobStoreAccess(Configuration hbaseConf, boolean clientMode) throws IOException, InterruptedException { this(new HBaseTableFactoryImpl(hbaseConf), clientMode); }
@Override public void configure(HTableDescriptor tableDescriptor) { TableConfig tableConfig = getTableConfig(tableDescriptor.getName());
typeMgr = new HBaseTypeManager(idGenerator, conf, zk, new HBaseTableFactoryImpl(conf));
public static BlobManager getBlobManager(ZooKeeperItf zk, Configuration configuration) throws IOException, InterruptedException { HBaseTableFactory hbaseTableFactory = new HBaseTableFactoryImpl(configuration); URI dfsUri = getDfsUri(zk); FileSystem fs = FileSystem.get(DfsUri.getBaseDfsUri(dfsUri), configuration); Path blobRootPath = new Path(DfsUri.getDfsPath(dfsUri)); BlobStoreAccess dfsBlobStoreAccess = new DFSBlobStoreAccess(fs, blobRootPath); BlobStoreAccess hbaseBlobStoreAccess = new HBaseBlobStoreAccess(configuration, true); BlobStoreAccess inlineBlobStoreAccess = new InlineBlobStoreAccess(); List<BlobStoreAccess> blobStoreAccesses = Arrays.asList(dfsBlobStoreAccess, hbaseBlobStoreAccess, inlineBlobStoreAccess); SizeBasedBlobStoreAccessFactory blobStoreAccessFactory = new SizeBasedBlobStoreAccessFactory(blobStoreAccesses, getBlobStoreAccessConfig(zk)); return new BlobManagerImpl(hbaseTableFactory, blobStoreAccessFactory, true); }
/** * Factory method for creation of a {@code BulkIngester} that operates on a non-default repository table. * * @param zkConnString connection string for ZooKeeper * @param timeout ZooKeeper session timeout * @param tableName name of the repository table to write to */ public static BulkIngester newBulkIngester(String zkConnString, int timeout, String repositoryName, String tableName, boolean bulkMode) { try { ZooKeeperItf zk = ZkUtil.connect(zkConnString, timeout); // we need a lily client for non bulk access LilyClient lilyClient = new LilyClient(zk); // we need an HBaseRepository for bulk access Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", zkConnString); HBaseTableFactory hbaseTableFactory = new HBaseTableFactoryImpl(conf); HBaseRepository hbaseRepository = createHBaseRepository(repositoryName, tableName, zk, conf, hbaseTableFactory); return new BulkIngester( lilyClient, hbaseRepository, LilyHBaseSchema.getRecordTable(hbaseTableFactory, hbaseRepository.getRepositoryName(), hbaseRepository.getTableName()), bulkMode); } catch (Exception e) { ExceptionUtil.handleInterrupt(e); throw new RuntimeException(e); } }
private RepositoryManager constructRepositoryManager(ServerNode server) throws IOException, InterruptedException { IdGeneratorImpl idGenerator = new IdGeneratorImpl(); Configuration hbaseConf = getNewOrExistingConfiguration(zk); BlobManager blobManager = getBlobManager(zk, hbaseConf); InetSocketAddress lilySocketAddr = parseAddressAndPort(server.lilyAddressAndPort); AvroLilyTransceiver transceiver = new AvroLilyTransceiver(lilySocketAddr, keepAlive); HBaseTableFactoryImpl tableFactory = new HBaseTableFactoryImpl(hbaseConf); AvroConverter avroConverter = new AvroConverter(); RemoteTypeManager remoteTypeManager = new RemoteTypeManager(lilySocketAddr, avroConverter, idGenerator, zk, schemaCache, keepAlive); RecordFactory recordFactory = new RecordFactoryImpl(); RepositoryManager repositoryManager = new RemoteRepositoryManager(remoteTypeManager, idGenerator, recordFactory, transceiver, avroConverter, blobManager, tableFactory, repositoryModel); return repositoryManager; }
public void setupCore() throws Exception { if (coreSetup) { return; } hbaseProxy = new HBaseProxy(); hbaseProxy.start(); hadoopConf = hbaseProxy.getConf(); zk = ZkUtil.connect(hbaseProxy.getZkConnectString(), 10000); hbaseTableFactory = new HBaseTableFactoryImpl(hadoopConf); repositoryModel = new RepositoryModelImpl(zk); repositoryMaster = new RepositoryMaster(zk, repositoryModel, new DummyLilyInfo(), Collections.<RepositoryMasterHook>singletonList(new CoreRepositoryMasterHook(hbaseTableFactory, hbaseProxy.getConf()))); repositoryMaster.start(); coreSetup = true; }