Tabnine Logo
com.netflix.metacat.common.exception
Code IndexAdd Tabnine to your IDE (free)

How to use com.netflix.metacat.common.exception

Best Java code snippets using com.netflix.metacat.common.exception (Showing top 20 results out of 315)

origin: Netflix/metacat

@Override
public Table create(final Schema schema,
          final PartitionSpec spec,
          final Map<String, String> properties,
          final String table) {
  throw new MetacatNotSupportedException("Not supported");
}
origin: Netflix/metacat

/**
 * Creates the qualified name.
 *
 * @param nameSupplier supplier
 * @return name
 */
public QualifiedName qualifyName(final Supplier<QualifiedName> nameSupplier) {
  try {
    return nameSupplier.get();
  } catch (Exception e) {
    log.error("Invalid qualified name", e);
    throw new MetacatBadRequestException(e.getMessage());
  }
}
origin: Netflix/metacat

case 501: //NOT IMPLEMENTED
case 415: //UNSUPPORTED_MEDIA_TYPE
  return new MetacatNotSupportedException(message);
case 400: //BAD_REQUEST
  return new MetacatBadRequestException(message);
case 403: //Forbidden
  return new MetacatUnAuthorizedException(message);
case 404: //NOT_FOUND
  return new MetacatNotFoundException(message);
case 409: //CONFLICT
  return new MetacatAlreadyExistsException(message);
case 412: // PRECONDITION_FAILED
  return new MetacatPreconditionFailedException(message);
case 429:
  return new MetacatTooManyRequestsException(message);
case 500: //INTERNAL_SERVER_ERROR
case 503: //SERVICE_UNAVAILABLE
  return new RetryableException(message, null);
default:
  return new MetacatException(message);
origin: Netflix/metacat

  collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
  log.error(e.getMessage(), e);
  throw new MetacatNotSupportedException("Catalog does not support the operation");
} catch (DatabaseAlreadyExistsException | TableAlreadyExistsException | PartitionAlreadyExistsException e) {
  collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
  log.error(e.getMessage(), e);
  throw new MetacatAlreadyExistsException(e.getMessage());
} catch (NotFoundException | MetacatNotFoundException e) {
  collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
  log.error(e.getMessage(), e);
  throw new MetacatNotFoundException(
    String.format("Unable to locate for %s. Details: %s", name, e.getMessage()));
} catch (InvalidMetaException | IllegalArgumentException e) {
  collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
  log.error(e.getMessage(), e);
  throw new MetacatBadRequestException(
    String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (TablePreconditionFailedException e) {
  collectRequestExceptionMetrics(tags, e.getClass().getSimpleName());
  log.error(e.getMessage(), e);
  throw new MetacatPreconditionFailedException(
    String.format("%s.%s", e.getMessage(), e.getCause() == null ? "" : e.getCause().getMessage()));
} catch (ConnectorException e) {
    if (ex.getMessage().contains("too many connections")
      || ex.getMessage().contains("Timeout: Pool empty")) {
      throw new MetacatTooManyRequestsException(ex.getMessage());
  throw new MetacatException(message, e);
origin: Netflix/metacat

/**
 * Constructor.
 *
 * @param connectorContext connector context
 * @param restTemplate     rest template
 */
public DruidHttpClientImpl(final ConnectorContext connectorContext,
              final RestTemplate restTemplate) {
  this.restTemplate = restTemplate;
  final Map<String, String> config = connectorContext.getConfiguration();
  final String coordinatorUri = config.get(DruidConfigConstants.DRUID_COORDINATOR_URI);
  if (coordinatorUri == null) {
    throw new MetacatException("Druid cluster ending point not provided.");
  }
  try {
    new URI(coordinatorUri);
  } catch (URISyntaxException exception) {
    throw new MetacatException("Druid ending point invalid");
  }
  this.druidURI = coordinatorUri;
  log.info("druid server uri={}", this.druidURI);
}
origin: Netflix/metacat

/**
 * Check at database level.
 */
private void checkPermit(final Map<QualifiedName, Set<String>> accessACL,
             final String userName,
             final QualifiedName name,
             final MetacatOperation op) {
  final Set<String> users =
    accessACL.get(QualifiedName.ofDatabase(name.getCatalogName(), name.getDatabaseName()));
  if ((users != null) && !users.isEmpty() && !users.contains(userName)) {
    throw new MetacatUnAuthorizedException(String.format("%s is not permitted for %s %s",
      userName, op.name(), name
    ));
  }
}
origin: Netflix/metacat

/**
 * {@inheritDoc}
 */
@Nonnull
@Override
public List<CatalogMappingDto> getCatalogNames() {
  if (connectorManager.getCatalogs().isEmpty()) {
    throw new MetacatNotFoundException("Unable to locate any catalogs");
  }
  return connectorManager.getCatalogs().stream()
    .map(catalog -> new CatalogMappingDto(catalog.getCatalogName(), catalog.getType()))
    .distinct()
    .collect(Collectors.toList());
}
origin: com.netflix.metacat/metacat-connector-hive

@Override
public Table create(final Schema schema, final PartitionSpec spec, final String tables) {
  throw new MetacatNotSupportedException("Not supported");
}
origin: Netflix/metacat

private void validAndUpdateVirtualView(final Table table) {
  if (isVirtualView(table)
    && Strings.isNullOrEmpty(table.getViewOriginalText())) {
    throw new MetacatBadRequestException(
      String.format("Invalid view creation for %s/%s. Missing viewOrginialText",
        table.getDbName(),
        table.getDbName()));
  }
  if (Strings.isNullOrEmpty(table.getViewExpandedText())) {
    //set viewExpandedText to viewOriginalTest
    table.setViewExpandedText(table.getViewOriginalText());
  }
  //setting dummy string to view to avoid dropping view issue in hadoop Path org.apache.hadoop.fs
  if (Strings.isNullOrEmpty(table.getSd().getLocation())) {
    table.getSd().setLocation("file://tmp/" + table.getDbName() + "/" + table.getTableName());
  }
}
origin: com.netflix.metacat/metacat-common-server

/**
 * Check at database level.
 */
private void checkPermit(final Map<QualifiedName, Set<String>> accessACL,
             final String userName,
             final QualifiedName name,
             final MetacatOperation op) {
  final Set<String> users =
    accessACL.get(QualifiedName.ofDatabase(name.getCatalogName(), name.getDatabaseName()));
  if ((users != null) && !users.isEmpty() && !users.contains(userName)) {
    throw new MetacatUnAuthorizedException(String.format("%s is not permitted for %s %s",
      userName, op.name(), name
    ));
  }
}
origin: Netflix/metacat

@Override
public Table create(final Schema schema, final PartitionSpec spec, final String database, final String table) {
  throw new MetacatNotSupportedException("not supported");
}
origin: com.netflix.metacat/metacat-connector-hive

private void validAndUpdateVirtualView(final Table table) {
  if (isVirtualView(table)
    && Strings.isNullOrEmpty(table.getViewOriginalText())) {
    throw new MetacatBadRequestException(
      String.format("Invalid view creation for %s/%s. Missing viewOrginialText",
        table.getDbName(),
        table.getDbName()));
  }
  if (Strings.isNullOrEmpty(table.getViewExpandedText())) {
    //set viewExpandedText to viewOriginalTest
    table.setViewExpandedText(table.getViewOriginalText());
  }
  //setting dummy string to view to avoid dropping view issue in hadoop Path org.apache.hadoop.fs
  if (Strings.isNullOrEmpty(table.getSd().getLocation())) {
    table.getSd().setLocation("file://tmp/" + table.getDbName() + "/" + table.getTableName());
  }
}
origin: Netflix/metacat

@Override
public Table create(final Schema schema, final PartitionSpec spec, final String tables) {
  throw new MetacatNotSupportedException("Not supported");
}
origin: com.netflix.metacat/metacat-connector-hive

@Override
public Table create(final Schema schema, final PartitionSpec spec, final String database, final String table) {
  throw new MetacatNotSupportedException("not supported");
}
origin: com.netflix.metacat/metacat-connector-hive

@Override
public Table create(final Schema schema,
          final PartitionSpec spec,
          final Map<String, String> properties,
          final String table) {
  throw new MetacatNotSupportedException("Not supported");
}
origin: Netflix/metacat

/**
 * {@inheritDoc}.
 */
@Override
public List<String> getPartitionUris(
  final ConnectorRequestContext requestContext,
  final QualifiedName tableName,
  final PartitionListRequest partitionsRequest,
  final TableInfo tableInfo
) {
  if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
    throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!");
  }
  return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest);
}
origin: com.netflix.metacat/metacat-connector-hive

/**
 * {@inheritDoc}.
 */
@Override
public List<String> getPartitionUris(
  final ConnectorRequestContext requestContext,
  final QualifiedName tableName,
  final PartitionListRequest partitionsRequest,
  final TableInfo tableInfo
) {
  if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
    throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!");
  }
  return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest);
}
origin: Netflix/metacat

/**
 * Number of partitions for the given table.
 *
 * @param tableName tableName
 * @return Number of partitions
 */
@Override
public int getPartitionCount(
  final ConnectorRequestContext requestContext,
  final QualifiedName tableName,
  final TableInfo tableInfo
) {
  if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
    throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!");
  }
  return directSqlGetPartition.getPartitionCount(requestContext, tableName);
}
origin: com.netflix.metacat/metacat-connector-hive

/**
 * Number of partitions for the given table.
 *
 * @param tableName tableName
 * @return Number of partitions
 */
@Override
public int getPartitionCount(
  final ConnectorRequestContext requestContext,
  final QualifiedName tableName,
  final TableInfo tableInfo
) {
  if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
    throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!");
  }
  return directSqlGetPartition.getPartitionCount(requestContext, tableName);
}
origin: Netflix/metacat

/**
 * {@inheritDoc}.
 */
@Override
public void delete(final ConnectorRequestContext requestContext, final QualifiedName name) {
  try {
    this.metacatHiveClient.dropDatabase(name.getDatabaseName());
  } catch (NoSuchObjectException exception) {
    throw new DatabaseNotFoundException(name, exception);
  } catch (MetaException exception) {
    throw new InvalidMetaException(name, exception);
  } catch (InvalidOperationException exception) {
    throw new MetacatNotSupportedException(exception.getMessage());
  } catch (TException exception) {
    throw new ConnectorException(String.format("Failed delete hive database %s", name), exception);
  }
}
com.netflix.metacat.common.exception

Most used classes

  • MetacatException
    Base exception for Metacat errors exposed externally.
  • MetacatAlreadyExistsException
    Metacat exception for already exists entities.
  • MetacatBadRequestException
    TODO: This should be replaced by a BadRequestException from JAX-RS 2.x once we support the newer JAX
  • MetacatNotFoundException
    TODO: This should be replaced by a NotFoundException from JAX-RS 2.x once we support the newer JAX-R
  • MetacatNotSupportedException
    Metacat not supported exception.
  • MetacatUnAuthorizedException,
  • MetacatTooManyRequestsException,
  • MetacatUserMetadataException
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now