@Override public synchronized DrillBuf capacity(int newCapacity) { if (newCapacity == length) { return this; } Preconditions.checkArgument(newCapacity >= 0); if (newCapacity < length) { length = newCapacity; return this; } throw new UnsupportedOperationException("Buffers don't support resizing that increases the size."); }
public RpcConfig build() { Preconditions.checkArgument(timeout > -1, "Timeout must be a positive number or zero for disabled."); Preconditions.checkArgument(name != null, "RpcConfig name must be set."); return new RpcConfig(name, sendMap, receiveMap, timeout, executor); }
@Override public ByteBuf retain(int increment) { Preconditions.checkArgument(increment > 0, "retain(%d) argument is not positive", increment); if (isEmpty) { return this; } if (BaseAllocator.DEBUG) { historicalLog.recordEvent("retain(%d)", increment); } final int originalReferenceCount = refCnt.getAndAdd(increment); Preconditions.checkArgument(originalReferenceCount > 0); return this; }
@Override @JsonIgnore public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new RestrictedJsonTableGroupScan(this); }
@Override @JsonIgnore public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new JsonTableGroupScan(this); }
@Override @JsonIgnore public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new KuduGroupScan(this); }
@Override public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new KuduSubScan(kuduStoragePlugin, tabletScanSpecList, columns); }
@Override @JsonIgnore public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new HBaseGroupScan(this); }
@Override public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) throws ExecutionSetupException { Preconditions.checkArgument(children.isEmpty()); return new KafkaGroupScan(this); }
public JsonParser asParser(){ Preconditions.checkArgument(this.root != null, "Attempted to grab JSONOptions as Parser when no root node was stored. You can only convert non-opaque JSONOptions values to parsers."); return new TreeTraversingParser(root); }
@Override @JsonIgnore public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new OpenTSDBGroupScan(this); }
@Override public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new HiveDrillNativeParquetRowGroupScan(getUserName(), hiveStoragePlugin, rowGroupReadEntries, columns, hivePartitionHolder, confProperties, readerConfig, filter); }
public DrillBuf reallocIfNeeded(final int size) { Preconditions.checkArgument(size >= 0, "reallocation size must be non-negative"); if (this.capacity() >= size) { return this; } if (bufManager != null) { return bufManager.replace(this, size); } else { throw new UnsupportedOperationException("Realloc is only available in the context of an operator's UDFs"); } }
@Override public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new HBaseSubScan(getUserName(), hbaseStoragePlugin, regionScanSpecList, columns); }
@Override public CloseableRecordBatch getBatch(ExecutorFragmentContext context, HiveDrillNativeParquetRowGroupScan rowGroupScan, List<RecordBatch> children) throws ExecutionSetupException { Preconditions.checkArgument(children.isEmpty()); OperatorContext oContext = context.newOperatorContext(rowGroupScan); return getBatch(context, rowGroupScan, oContext); }
@Override public PhysicalOperator getNewWithChildren(List<PhysicalOperator> children) { Preconditions.checkArgument(children.isEmpty()); return new MapRDBSubScan(getUserName(), formatPlugin, regionScanSpecList, columns, tableType); }
@Override public CloseableRecordBatch getBatch(ExecutorFragmentContext context, KafkaSubScan subScan, List<RecordBatch> children) throws ExecutionSetupException { Preconditions.checkArgument(children.isEmpty()); List<SchemaPath> columns = subScan.getColumns() != null ? subScan.getColumns() : GroupScan.ALL_COLUMNS; List<RecordReader> readers = new LinkedList<>(); for (KafkaPartitionScanSpec scanSpec : subScan.getPartitionSubScanSpecList()) { readers.add(new KafkaRecordReader(scanSpec, columns, context, subScan.getKafkaStoragePlugin())); } logger.info("Number of record readers initialized : {}", readers.size()); return new ScanBatch(subScan, context, readers); }
@Override public void load(SerializedField metadata, DrillBuf buffer) { Preconditions.checkArgument(this.field.getName().equals(metadata.getNamePart().getName()), "The field %s doesn't match the provided metadata %s.", this.field, metadata); final int actualLength = metadata.getBufferLength(); final int valueCount = metadata.getValueCount(); final int expectedLength = valueCount * VALUE_WIDTH; assert actualLength == expectedLength : String.format("Expected to load %d bytes but actually loaded %d bytes", expectedLength, actualLength); this.valueCount = valueCount; }
@Override public ScanBatch getBatch(ExecutorFragmentContext context, JdbcSubScan config, List<RecordBatch> children) throws ExecutionSetupException { Preconditions.checkArgument(children.isEmpty()); JdbcStoragePlugin plugin = config.getPlugin(); RecordReader reader = new JdbcRecordReader(plugin.getSource(), config.getSql(), plugin.getName()); return new ScanBatch(config, context, Collections.singletonList(reader)); } }
protected SubDirTestWatcher(File baseDir, boolean createAtBeginning, boolean deleteAtEnd, List<Path> subDirs) { this.baseDir = Preconditions.checkNotNull(baseDir); this.createAtBeginning = createAtBeginning; this.deleteAtEnd = deleteAtEnd; this.subDirs = Preconditions.checkNotNull(subDirs); Preconditions.checkArgument(!subDirs.isEmpty(), "The list of subDirs is empty."); }