@BeforeClass public static void setup_TestEncodedFieldPaths() throws Exception { try (Table table = DBTests.createOrReplaceTable(TABLE_NAME, ImmutableMap.of("codes", "codes"))) { tableCreated = true; tablePath = table.getPath().toUri().getPath(); DBTests.createIndex(TABLE_NAME, INDEX_NAME, new String[] {"age"}, new String[] {"name.last", "data.salary"}); DBTests.admin().getTableIndexes(table.getPath(), true); try (final InputStream in = TestEncodedFieldPaths.class.getResourceAsStream(JSON_FILE_URL); final DocumentStream stream = Json.newDocumentStream(in);) { table.insertOrReplace(stream); table.flush(); } // wait for the indexes to sync DBTests.waitForRowCount(table.getPath(), 5, INDEX_FLUSH_TIMEOUT); DBTests.waitForIndexFlush(table.getPath(), INDEX_FLUSH_TIMEOUT); } finally { test("ALTER SESSION SET `planner.disable_full_table_scan` = true"); } }
@Override public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException { this.vectorWriter = new VectorContainerWriter(output, unionEnabled); this.vectorWriterMutator = output; this.operatorContext = context; try { table.setOption(TableOption.EXCLUDEID, !includeId); documentStream = table.find(condition, scannedFields); documentIterator = documentStream.iterator(); setupWriter(); } catch (DBException ex) { throw new ExecutionSetupException(ex); } }
/** * closeTable * * @param table table to be closed. */ public void closeTable(Table table) { if (!tableCachingEnabled && table != null) { table.close(); } }
@BeforeClass public static void setup_TestSimpleJson() throws Exception { try (Table table = DBTests.createOrReplaceTable(TABLE_NAME); InputStream in = MaprDBTestsSuite.getJsonStream(JSON_FILE_URL); DocumentStream stream = Json.newDocumentStream(in)) { tableCreated = true; tablePath = table.getPath().toUri().getPath(); for (Document document : stream) { table.insert(document, "business_id"); } table.flush(); } }
List<ScanRange> ranges = table.getMetaTable().getScanRanges(plugin.getRestrictedScanRangeSizeMB()); logger.debug("Num scan ranges for table {} = {}", table.getName(), ranges.size());
System.out.println("creating table " + tableName); table = MapRDB.createTable(tableName); table.setOption(Table.TableOption.BUFFERWRITE, true); } else { table = MapRDB.getTable(tableName); table.insertOrReplace(tick.getTradeSequenceNumber(), document);
@Override public String toString() { StringBuilder sb = new StringBuilder("MaprDBJsonRecordReader[Table=") .append(table != null ? table.getPath() : null); if (reader != null) { sb.append(", Document ID=") .append(IdCodec.asString(reader.getId())); } sb.append(", reader=") .append(reader) .append(']'); return sb.toString(); } }
public void readToInitSchema() { DBDocumentReaderBase reader = null; vectorWriter.setPosition(0); try (DocumentStream dstream = table.find()) { reader = (DBDocumentReaderBase) dstream.iterator().next().asReader(); documentWriter.writeDBDocument(vectorWriter, reader); } catch(UserException e) { throw UserException.unsupportedError(e) .addContext(String.format("Table: %s, document id: '%s'", getTable().getPath(), reader == null ? null : IdCodec.asString(reader.getId()))) .build(logger); } catch (SchemaChangeException e) { if (getIgnoreSchemaChange()) { logger.warn("{}. Dropping the row from result.", e.getMessage()); logger.debug("Stack trace:", e); } else { throw dataReadError(logger, e); } } finally { vectorWriter.setPosition(0); } }
/** * Get the estimated average rowsize. DO NOT call this API directly. * Call the stats API instead which modifies the counts based on preference options. * @param index, to use for generating the estimate * @return row count post filtering */ public MapRDBStatisticsPayload getAverageRowSizeStats(IndexDescriptor index) { IndexDesc indexDesc = null; double avgRowSize = AVG_ROWSIZE_UNKNOWN; if (index != null) { indexDesc = (IndexDesc)((MapRDBIndexDescriptor)index).getOriginalDesc(); } // If no index is specified, get it from the primary table if (indexDesc == null && scanSpec.isSecondaryIndex()) { throw new UnsupportedOperationException("getAverageRowSizeStats should be invoked on primary table"); } // Get the index table or primary table and use the DB API to get the estimated number of rows. For size estimates, // we assume that all the columns would be read from the disk. final Table table = this.formatPlugin.getJsonTableCache().getTable(scanSpec.getTableName(), indexDesc, getUserName()); if (table != null) { final MetaTable metaTable = table.getMetaTable(); if (metaTable != null) { avgRowSize = metaTable.getAverageRowSize(); } } logger.debug("index_plan_info: getEstimatedRowCount obtained from DB Client for {}: indexName: {}, indexInfo: {}, " + "avgRowSize: {}, estimatedSize {}", this, (indexDesc == null ? "null" : indexDesc.getIndexName()), (indexDesc == null ? "null" : indexDesc.getIndexInfo()), avgRowSize); return new MapRDBStatisticsPayload(ROWCOUNT_UNKNOWN, ROWCOUNT_UNKNOWN, avgRowSize); }
private void createIndex(Table table, String[] indexDef) throws Exception { if (indexDef == null) { // don't create index here. indexes may have been created return; } for (int i = 0; i < indexDef.length / 3; ++i) { String indexCmd = String.format("maprcli table index add" + " -path " + table.getPath() + " -index %s" + " -indexedfields '%s'" + ((indexDef[3 * i + 2].length()==0)?"":" -includedfields '%s'") + ((indexDef[3 * i].startsWith("hash"))? " -hashed true" : ""), indexDefInCommand(indexDef[3 * i]), // index name indexDefInCommand(indexDef[3 * i + 1]), // indexedfields indexDefInCommand(indexDef[3 * i + 2])); // includedfields System.out.println(indexCmd); TestCluster.runCommand(indexCmd); DBTests.admin().getTableIndexes(table.getPath(), true); } }
public void readToInitSchema() { DBDocumentReaderBase reader = null; vectorWriter.setPosition(0); try (DocumentStream dstream = table.find()) { reader = (DBDocumentReaderBase) dstream.iterator().next().asReader(); documentWriter.writeDBDocument(vectorWriter, reader); } catch(UserException e) { throw UserException.unsupportedError(e) .addContext(String.format("Table: %s, document id: '%s'", getTable().getPath(), reader == null ? null : IdCodec.asString(reader.getId()))) .build(logger); } catch (SchemaChangeException e) { if (getIgnoreSchemaChange()) { logger.warn("{}. Dropping the row from result.", e.getMessage()); logger.debug("Stack trace:", e); } else { throw dataReadError(logger, e); } } finally { vectorWriter.setPosition(0); } }
final Table t = this.formatPlugin.getJsonTableCache().getTable( scanSpec.getTableName(), scanSpec.getIndexDesc(), getUserName()); final MetaTable metaTable = t.getMetaTable();
List<ScanRange> ranges = table.getMetaTable().getScanRanges(plugin.getRestrictedScanRangeSizeMB()); logger.debug("Num scan ranges for table {} = {}", table.getName(), ranges.size());
@BeforeClass public static void setup_TestSimpleJson() throws Exception { // We create a large table with auto-split set to disabled. // Without intra-tablet partitioning, this test should run with only one minor fragment try (Table table = DBTests.createOrReplaceTable(TABLE_NAME, false /*autoSplit*/); InputStream in = MaprDBTestsSuite.getJsonStream(JSON_FILE_URL); DocumentStream stream = Json.newDocumentStream(in)) { tableCreated = true; tablePath = table.getPath().toUri().getPath(); List<Document> docs = Lists.newArrayList(stream); for (char ch = 'A'; ch <= 'T'; ch++) { for (int rowIndex = 0; rowIndex < 5000; rowIndex++) { for (int i = 0; i < docs.size(); i++) { final Document document = docs.get(i); final String id = String.format("%c%010d%03d", ch, rowIndex, i); document.set("documentId", rowIndex); table.insertOrReplace(id, document); } } } table.flush(); DBTests.waitForRowCount(table.getPath(), TOTAL_ROW_COUNT); setSessionOption("planner.width.max_per_node", 5); } }
throw UserException.unsupportedError(e) .addContext(String.format("Table: %s, document id: '%s'", table.getPath(), document.asReader() == null ? null : IdCodec.asString(((DBDocumentReaderBase)document.asReader()).getId())))
@Override public void setup(OperatorContext context, OutputMutator output) throws ExecutionSetupException { this.vectorWriter = new VectorContainerWriter(output, unionEnabled); this.vectorWriterMutator = output; this.operatorContext = context; try { table.setOption(TableOption.EXCLUDEID, !includeId); documentStream = table.find(condition, scannedFields); documentIterator = documentStream.iterator(); setupWriter(); } catch (DBException ex) { throw new ExecutionSetupException(ex); } } /*
private void init() { try { // Get the fullTableRowCount only once i.e. if not already obtained before. if (fullTableRowCount == 0) { final Table t = this.formatPlugin.getJsonTableCache().getTable( scanSpec.getTableName(), scanSpec.getIndexDesc(), getUserName()); final MetaTable metaTable = t.getMetaTable(); // For condition null, we get full table stats. com.mapr.db.scan.ScanStats stats = metaTable.getScanStats(); fullTableRowCount = stats.getEstimatedNumRows(); fullTableEstimatedSize = stats.getEstimatedSize(); // MapRDB client can return invalid rowCount i.e. 0, especially right after table // creation. It takes 15 minutes before table stats are obtained and cached in client. // If we get 0 rowCount, fallback to getting rowCount using old admin API. if (fullTableRowCount == 0) { PluginCost pluginCostModel = formatPlugin.getPluginCostModel(); final int avgColumnSize = pluginCostModel.getAverageColumnSize(this); final int numColumns = (columns == null || columns.isEmpty() || Utilities.isStarQuery(columns)) ? STAR_COLS : columns.size(); MapRDBTableStats tableStats = new MapRDBTableStats(formatPlugin.getFsConf(), scanSpec.getTableName()); fullTableRowCount = tableStats.getNumRows(); fullTableEstimatedSize = fullTableRowCount * numColumns * avgColumnSize; } } } catch (Exception e) { throw new DrillRuntimeException("Error getting region info for table: " + scanSpec.getTableName() + (scanSpec.getIndexDesc() == null ? "" : (", index: " + scanSpec.getIndexName())), e); } }
@Override public void close() { if (documentStream != null) { documentStream.close(); } if (table != null) { table.close(); } }
} catch (UserException e) { throw UserException.unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'", getTable().getPath(), reader == null ? null : IdCodec.asString(reader.getId()))).build(logger); } catch (SchemaChangeException e) { if (getIgnoreSchemaChange()) {