@JsonValue public String toPathString() { return namespaceKey.getSchemaPath(); }
public static String makePathString(final List<String> paths) { return new NamespaceKey(paths).getSchemaPath(); }
NamespaceInternalKey(final NamespaceKey path, boolean normalize) { this.namespaceKey = path; this.namespaceFullPath = path.getSchemaPath(); this.keyBytes = null; final List<String> pathComponents = path.getPathComponents(); this.components = pathComponents.size(); if (components == 0) { throw UserException.validationError() .message("Invalid name space key. Given: %s, Expected format: %s", namespaceFullPath, NAMESPACE_PATH_FORMAT) .build(logger); } // Convert each path component into bytes. this.pathComponentBytes = new byte[components][]; for (int i = 0; i < components; ++i) { if (pathComponents.get(i).length() == 0) { throw UserException.validationError() .message("Invalid name space key. Given: %s, Expected format: %s", namespaceFullPath, NAMESPACE_PATH_FORMAT) .build(logger); } if (normalize) { this.pathComponentBytes[i] = pathComponents.get(i).toLowerCase().getBytes(UTF_8); } else { this.pathComponentBytes[i] = pathComponents.get(i).getBytes(UTF_8); } } }
boolean hasAccess = plugin.get().hasAccessPermission(username, namespaceKey, config); permissionCheck.stop(); metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), PermissionCheckAccessType.PERMISSION_CACHE_MISS.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS)); return hasAccess; metadataStatsCollector.addDatasetStat(namespaceKey.getSchemaPath(), permissionCheckAccessType.name(), permissionCheck.elapsed(TimeUnit.MILLISECONDS));
@Override public Iterable<Job> getJobsForParent(final NamespaceKey datasetPath, int limit) { SearchQuery query = SearchQueryUtils.and( SearchQueryUtils.newTermQuery(PARENT_DATASET, datasetPath.getSchemaPath()), JobIndexKeys.UI_EXTERNAL_JOBS_FILTER); FindByCondition condition = new FindByCondition() .setCondition(query) .setLimit(limit); return findJobs(condition); }
final String pathString = path.getSchemaPath();
@Override public void upgrade(UpgradeContext context) throws Exception { final NamespaceService namespaceService = new NamespaceServiceImpl(context.getKVStoreProvider()); try { for (SourceConfig source : namespaceService.getSources()) { if (!"HIVE".equalsIgnoreCase(ConnectionReader.toType(source))) { continue; } System.out.printf(" Handling Hive source %s%n", source.getName()); for (NamespaceKey datasetPath : namespaceService.getAllDatasets(new NamespaceKey(source.getName()))) { final DatasetConfig datasetConfig = namespaceService.getDataset(datasetPath); if (datasetConfig.getReadDefinition() == null || datasetConfig.getReadDefinition().getExtendedProperty() == null) { continue; } System.out.printf(" Clearing read definition of table %s%n", datasetPath.getSchemaPath()); datasetConfig.setReadDefinition(null); namespaceService.addOrUpdateDataset(datasetPath, datasetConfig); } } } catch (NamespaceException e) { throw new RuntimeException("Hive121BasedInputSplits failed", e); } }
private UserBitShared.DatasetProfile buildDatasetProfile(final DremioTable t) { try { if (t instanceof ViewTable) { final ViewTable view = (ViewTable) t; return UserBitShared.DatasetProfile.newBuilder() .setDatasetPath(t.getPath().getSchemaPath()) .setType(UserBitShared.DatasetType.VDS) .setSql(view.getView().getSql()) .build(); } else { Boolean allowApproxStats = false; PhysicalDataset physicalDataset = t.getDatasetConfig().getPhysicalDataset(); if (physicalDataset != null) { allowApproxStats = physicalDataset.getAllowApproxStats(); } return UserBitShared.DatasetProfile.newBuilder() .setDatasetPath(t.getPath().getSchemaPath()) .setType(UserBitShared.DatasetType.PDS) .setBatchSchema(ByteString.copyFrom(t.getSchema().serialize())) .setAllowApproxStats(allowApproxStats) .build(); } } catch (Exception e) { logger.warn("Couldn't build dataset profile for table {}", t.getPath().getSchemaPath(), e); } return null; }
System.out.printf(" Compressing Table '%s'...", datasetPath.getSchemaPath());
writer.write(QUERY_TYPE, jobInfo.getQueryType().name()); writer.write(JOB_STATE, jobAttempt.getState().name()); writer.write(DATASET, datasetPath.getSchemaPath()); writer.write(DATASET_VERSION, jobInfo.getDatasetVersion()); writer.write(START_TIME, jobInfo.getStartTime()); parentDatasetPath.getSchemaPath(), Joiner.on(PathUtils.getPathDelimiter()).join(parentDatasetPath.getPathComponents())); allDatasetPath.getSchemaPath(), Joiner.on(PathUtils.getPathDelimiter()).join(allDatasetPath.getPathComponents()));
String response = table.getName().getSchemaPath(); assertTrue(!(response.contains(".hidden"))); if(response.contains("nothidden")) {
public RelRoot expandView(ViewTable view) { final RelRoot root; try { root = DremioSqlToRelConverter.expandView(view.getPath(), view.getViewOwner(), view.getView().getSql(), view.getView().getWorkspaceSchemaPath(), sqlConverter); } catch (Exception ex) { throw UserException.planError(ex) .message("Error while expanding view %s",view.getPath()) .addContext("View SQL", view.getView().getSql()) .build(logger); } checkRowTypeConsistency(root.validatedRowType, view.getView().getRowType(sqlConverter.getCluster().getTypeFactory()), view.getPath().getSchemaPath()); return root; }
final NamespaceKey canonicalKey = new NamespaceKey(datasetConfig.getFullPathList()); final NamespaceTable namespaceTable = new NamespaceTable(new TableMetadataImpl(plugin.getId(), datasetConfig, options.getSchemaConfig().getUserName(), DatasetSplitsPointer.of(userNamespaceService, datasetConfig))); options.getStatsCollector().addDatasetStat(canonicalKey.getSchemaPath(), MetadataAccessType.CACHED_METADATA.name(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); return namespaceTable; options.getStatsCollector().addDatasetStat(canonicalKey.getSchemaPath(), MetadataAccessType.PARTIAL_METADATA.name(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
writer.write(DatasetIndexKeys.DATASET_ID, new NamespaceKey(container.getFullPathList()).getSchemaPath());