@Override public Table create(final Schema schema, final PartitionSpec spec, final Map<String, String> properties, final String table) { throw new MetacatNotSupportedException("Not supported"); }
/** * Creates the qualified name. * * @param nameSupplier supplier * @return name */ public QualifiedName qualifyName(final Supplier<QualifiedName> nameSupplier) { try { return nameSupplier.get(); } catch (Exception e) { log.error("Invalid qualified name", e); throw new MetacatBadRequestException(e.getMessage()); } }
case 501: //NOT IMPLEMENTED case 415: //UNSUPPORTED_MEDIA_TYPE return new MetacatNotSupportedException(message); case 400: //BAD_REQUEST return new MetacatBadRequestException(message); case 403: //Forbidden return new MetacatUnAuthorizedException(message); case 404: //NOT_FOUND return new MetacatNotFoundException(message); case 409: //CONFLICT return new MetacatAlreadyExistsException(message); case 412: // PRECONDITION_FAILED return new MetacatPreconditionFailedException(message); case 429: return new MetacatTooManyRequestsException(message); case 500: //INTERNAL_SERVER_ERROR case 503: //SERVICE_UNAVAILABLE return new RetryableException(message, null); default: return new MetacatException(message);
collectRequestExceptionMetrics(tags, e.getClass().getSimpleName()); log.error(e.getMessage(), e); throw new MetacatNotSupportedException("Catalog does not support the operation"); } catch (DatabaseAlreadyExistsException | TableAlreadyExistsException | PartitionAlreadyExistsException e) { collectRequestExceptionMetrics(tags, e.getClass().getSimpleName()); log.error(e.getMessage(), e); throw new MetacatAlreadyExistsException(e.getMessage()); } catch (NotFoundException | MetacatNotFoundException e) { collectRequestExceptionMetrics(tags, e.getClass().getSimpleName()); log.error(e.getMessage(), e); throw new MetacatNotFoundException( String.format("Unable to locate for %s. Details: %s", name, e.getMessage())); } catch (InvalidMetaException | IllegalArgumentException e) { collectRequestExceptionMetrics(tags, e.getClass().getSimpleName()); log.error(e.getMessage(), e); throw new MetacatBadRequestException( String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage())); } catch (TablePreconditionFailedException e) { collectRequestExceptionMetrics(tags, e.getClass().getSimpleName()); log.error(e.getMessage(), e); throw new MetacatPreconditionFailedException( String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage())); } catch (ConnectorException e) { if (ex.getMessage().contains("too many connections") || ex.getMessage().contains("Timeout: Pool empty")) { throw new MetacatTooManyRequestsException(ex.getMessage()); throw new MetacatException(message, e);
/** * Constructor. * * @param connectorContext connector context * @param restTemplate rest template */ public DruidHttpClientImpl(final ConnectorContext connectorContext, final RestTemplate restTemplate) { this.restTemplate = restTemplate; final Map<String, String> config = connectorContext.getConfiguration(); final String coordinatorUri = config.get(DruidConfigConstants.DRUID_COORDINATOR_URI); if (coordinatorUri == null) { throw new MetacatException("Druid cluster ending point not provided."); } try { new URI(coordinatorUri); } catch (URISyntaxException exception) { throw new MetacatException("Druid ending point invalid"); } this.druidURI = coordinatorUri; log.info("druid server uri={}", this.druidURI); }
/** * Check at database level. */ private void checkPermit(final Map<QualifiedName, Set<String>> accessACL, final String userName, final QualifiedName name, final MetacatOperation op) { final Set<String> users = accessACL.get(QualifiedName.ofDatabase(name.getCatalogName(), name.getDatabaseName())); if ((users != null) && !users.isEmpty() && !users.contains(userName)) { throw new MetacatUnAuthorizedException(String.format("%s is not permitted for %s %s", userName, op.name(), name )); } }
/** * {@inheritDoc} */ @Nonnull @Override public List<CatalogMappingDto> getCatalogNames() { if (connectorManager.getCatalogs().isEmpty()) { throw new MetacatNotFoundException("Unable to locate any catalogs"); } return connectorManager.getCatalogs().stream() .map(catalog -> new CatalogMappingDto(catalog.getCatalogName(), catalog.getType())) .distinct() .collect(Collectors.toList()); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String tables) { throw new MetacatNotSupportedException("Not supported"); }
private void validAndUpdateVirtualView(final Table table) { if (isVirtualView(table) && Strings.isNullOrEmpty(table.getViewOriginalText())) { throw new MetacatBadRequestException( String.format("Invalid view creation for %s/%s. Missing viewOrginialText", table.getDbName(), table.getDbName())); } if (Strings.isNullOrEmpty(table.getViewExpandedText())) { //set viewExpandedText to viewOriginalTest table.setViewExpandedText(table.getViewOriginalText()); } //setting dummy string to view to avoid dropping view issue in hadoop Path org.apache.hadoop.fs if (Strings.isNullOrEmpty(table.getSd().getLocation())) { table.getSd().setLocation("file://tmp/" + table.getDbName() + "/" + table.getTableName()); } }
/** * Check at database level. */ private void checkPermit(final Map<QualifiedName, Set<String>> accessACL, final String userName, final QualifiedName name, final MetacatOperation op) { final Set<String> users = accessACL.get(QualifiedName.ofDatabase(name.getCatalogName(), name.getDatabaseName())); if ((users != null) && !users.isEmpty() && !users.contains(userName)) { throw new MetacatUnAuthorizedException(String.format("%s is not permitted for %s %s", userName, op.name(), name )); } }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String database, final String table) { throw new MetacatNotSupportedException("not supported"); }
private void validAndUpdateVirtualView(final Table table) { if (isVirtualView(table) && Strings.isNullOrEmpty(table.getViewOriginalText())) { throw new MetacatBadRequestException( String.format("Invalid view creation for %s/%s. Missing viewOrginialText", table.getDbName(), table.getDbName())); } if (Strings.isNullOrEmpty(table.getViewExpandedText())) { //set viewExpandedText to viewOriginalTest table.setViewExpandedText(table.getViewOriginalText()); } //setting dummy string to view to avoid dropping view issue in hadoop Path org.apache.hadoop.fs if (Strings.isNullOrEmpty(table.getSd().getLocation())) { table.getSd().setLocation("file://tmp/" + table.getDbName() + "/" + table.getTableName()); } }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String tables) { throw new MetacatNotSupportedException("Not supported"); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final String database, final String table) { throw new MetacatNotSupportedException("not supported"); }
@Override public Table create(final Schema schema, final PartitionSpec spec, final Map<String, String> properties, final String table) { throw new MetacatNotSupportedException("Not supported"); }
/** * {@inheritDoc}. */ @Override public List<String> getPartitionUris( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest); }
/** * {@inheritDoc}. */ @Override public List<String> getPartitionUris( final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest); }
/** * Number of partitions for the given table. * * @param tableName tableName * @return Number of partitions */ @Override public int getPartitionCount( final ConnectorRequestContext requestContext, final QualifiedName tableName, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionCount(requestContext, tableName); }
/** * Number of partitions for the given table. * * @param tableName tableName * @return Number of partitions */ @Override public int getPartitionCount( final ConnectorRequestContext requestContext, final QualifiedName tableName, final TableInfo tableInfo ) { if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) { throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!"); } return directSqlGetPartition.getPartitionCount(requestContext, tableName); }
/** * {@inheritDoc}. */ @Override public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) { try { this.metacatHiveClient.dropDatabase(name.getDatabaseName()); } catch (NoSuchObjectException exception) { throw new DatabaseNotFoundException(name, exception); } catch (MetaException exception) { throw new InvalidMetaException(name, exception); } catch (InvalidOperationException exception) { throw new MetacatNotSupportedException(exception.getMessage()); } catch (TException exception) { throw new ConnectorException(String.format("Failed delete hive database %s", name), exception); } }