Tabnine Logo
Deadline.stopTimer
Code IndexAdd Tabnine to your IDE (free)

How to use
stopTimer
method
in
org.apache.hadoop.hive.metastore.Deadline

Best Java code snippets using org.apache.hadoop.hive.metastore.Deadline.stopTimer (Showing top 20 results out of 315)

origin: apache/hive

@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
 try {
  Deadline.registerIfNot(socketTimeout);
  boolean isTimerStarted = Deadline.startTimer(method.getName());
  try {
   return method.invoke(base, args);
  } finally {
   if (isTimerStarted) {
    Deadline.stopTimer();
   }
  }
 } catch (UndeclaredThrowableException e) {
  throw e.getCause();
 } catch (InvocationTargetException e) {
  throw e.getCause();
 }
}
origin: apache/hive

private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) {
 try {
  Deadline.startTimer("getPartitions");
  List<Partition> partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
  Deadline.stopTimer();
  sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName),
    StringUtils.normalizeIdentifier(dbName),
    StringUtils.normalizeIdentifier(tblName), partitions);
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
 }
}
origin: apache/hive

} finally {
 if (isStarted) {
  Deadline.stopTimer();
origin: apache/hive

AggrStats aggrStatsAllPartitions =
  rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
Deadline.stopTimer();
AggrStats aggrStatsAllButDefaultPartition =
  rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
Deadline.stopTimer();
sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName),
  StringUtils.normalizeIdentifier(dbName),
origin: apache/hive

Deadline.startTimer("getPartitions");
partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
Deadline.stopTimer();
List<String> partNames = new ArrayList<>(partitions.size());
for (Partition p : partitions) {
 partitionColStats = rawStore.getPartitionColumnStatistics(catName, dbName,
   tblName, partNames, colNames);
 Deadline.stopTimer();
 aggrStatsAllPartitions =
   rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
 Deadline.stopTimer();
 aggrStatsAllButDefaultPartition =
   rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
 Deadline.stopTimer();
tableColStats =
  rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
Deadline.stopTimer();
origin: apache/hive

private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) {
 boolean committed = false;
 rawStore.openTransaction();
 try {
  Table table = rawStore.getTable(catName, dbName, tblName);
  List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
  List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
  // Get partition column stats for this table
  Deadline.startTimer("getPartitionColumnStatistics");
  List<ColumnStatistics> partitionColStats =
    rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
  Deadline.stopTimer();
  sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats);
  List<Partition> parts = rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
  // Also save partitions for consistency as they have the stats state.
  for (Partition part : parts) {
   sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part);
  }
  committed = rawStore.commitTransaction();
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
 } finally {
  if (!committed) {
   sharedCache.removeAllPartitionColStatsFromCache(catName, dbName, tblName);
   rawStore.rollbackTransaction();
  }
 }
}
origin: apache/hive

private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
 boolean committed = false;
 rawStore.openTransaction();
 try {
  Table table = rawStore.getTable(catName, dbName, tblName);
  if (!table.isSetPartitionKeys()) {
   List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
   Deadline.startTimer("getTableColumnStatistics");
   ColumnStatistics tableColStats =
     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
   Deadline.stopTimer();
   if (tableColStats != null) {
    sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
      StringUtils.normalizeIdentifier(dbName),
      StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
    // Update the table to get consistent stats state.
    sharedCache.alterTableInCache(catName, dbName, tblName, table);
   }
  }
  committed = rawStore.commitTransaction();
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Unable to refresh table column stats for table: " + tblName, e);
 } finally {
  if (!committed) {
   sharedCache.removeAllTableColStatsFromCache(catName, dbName, tblName);
   rawStore.rollbackTransaction();
  }
 }
}
origin: apache/hive

 return;
Deadline.stopTimer();
 return;
Deadline.stopTimer();
origin: apache/hive

Deadline.stopTimer();
origin: Netflix/metacat

  @Override
  public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
    Deadline.registerIfNot(timeout);
    try {
      Deadline.startTimer(method.getName());
      final Object object = method.invoke(metacatHMSHandler, args);
      Deadline.stopTimer();
      return object;
    } catch (InvocationTargetException e) {
      for (Throwable ex : Throwables.getCausalChain(e)) {
        if (ex instanceof JDODataStoreException) {
          throw ex;
        }
      }
      throw e.getCause();
    }
  }
}
origin: com.netflix.metacat/metacat-connector-hive

  @Override
  public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
    Deadline.registerIfNot(timeout);
    try {
      Deadline.startTimer(method.getName());
      final Object object = method.invoke(metacatHMSHandler, args);
      Deadline.stopTimer();
      return object;
    } catch (InvocationTargetException e) {
      for (Throwable ex : Throwables.getCausalChain(e)) {
        if (ex instanceof JDODataStoreException) {
          throw ex;
        }
      }
      throw e.getCause();
    }
  }
}
origin: org.apache.hive/hive-standalone-metastore

@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
 try {
  Deadline.registerIfNot(socketTimeout);
  boolean isTimerStarted = Deadline.startTimer(method.getName());
  try {
   return method.invoke(base, args);
  } finally {
   if (isTimerStarted) {
    Deadline.stopTimer();
   }
  }
 } catch (UndeclaredThrowableException e) {
  throw e.getCause();
 } catch (InvocationTargetException e) {
  throw e.getCause();
 }
}
origin: com.facebook.presto.hive/hive-apache

@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
 Object ret = null;
 boolean isTimerStarted = false;
 try {
  try {
   if (!Deadline.isStarted()) {
    Deadline.startTimer(method.getName());
    isTimerStarted = true;
   }
  } catch (MetaException e) {
   // Deadline was not registered yet.
   long timeout = HiveConf.getTimeVar(hiveConf,
     HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
   Deadline.registerIfNot(timeout);
   Deadline.startTimer(method.getName());
   isTimerStarted = true;
  }
  ret = method.invoke(base, args);
  if (isTimerStarted) {
   Deadline.stopTimer();
  }
 } catch (UndeclaredThrowableException e) {
  throw e.getCause();
 } catch (InvocationTargetException e) {
  throw e.getCause();
 }
 return ret;
}
origin: org.spark-project.hive/hive-metastore

@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
 Object ret = null;
 boolean isTimerStarted = false;
 try {
  try {
   if (!Deadline.isStarted()) {
    Deadline.startTimer(method.getName());
    isTimerStarted = true;
   }
  } catch (MetaException e) {
   // Deadline was not registered yet.
   long timeout = HiveConf.getTimeVar(hiveConf,
     HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
   Deadline.registerIfNot(timeout);
   Deadline.startTimer(method.getName());
   isTimerStarted = true;
  }
  ret = method.invoke(base, args);
  if (isTimerStarted) {
   Deadline.stopTimer();
  }
 } catch (UndeclaredThrowableException e) {
  throw e.getCause();
 } catch (InvocationTargetException e) {
  throw e.getCause();
 }
 return ret;
}
origin: org.apache.hive/hive-standalone-metastore

private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) {
 try {
  Deadline.startTimer("getPartitions");
  List<Partition> partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
  Deadline.stopTimer();
  sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName),
    StringUtils.normalizeIdentifier(dbName),
    StringUtils.normalizeIdentifier(tblName), partitions);
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
 }
}
origin: org.apache.hive/hive-standalone-metastore

private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) {
 try {
  Table table = rawStore.getTable(catName, dbName, tblName);
  List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
  List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
  // Get partition column stats for this table
  Deadline.startTimer("getPartitionColumnStatistics");
  List<ColumnStatistics> partitionColStats =
    rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
  Deadline.stopTimer();
  sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats);
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
 }
}
origin: org.spark-project.hive/hive-metastore

Deadline.stopTimer();
return object;
origin: org.apache.hive/hive-standalone-metastore

AggrStats aggrStatsAllPartitions =
  rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
Deadline.stopTimer();
AggrStats aggrStatsAllButDefaultPartition =
  rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
Deadline.stopTimer();
sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName),
  StringUtils.normalizeIdentifier(dbName),
origin: com.facebook.presto.hive/hive-apache

Deadline.stopTimer();
return object;
origin: org.apache.hive/hive-standalone-metastore

private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
 try {
  Table table = rawStore.getTable(catName, dbName, tblName);
  if (!table.isSetPartitionKeys()) {
   List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
   Deadline.startTimer("getTableColumnStatistics");
   ColumnStatistics tableColStats =
     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
   Deadline.stopTimer();
   if (tableColStats != null) {
    sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
      StringUtils.normalizeIdentifier(dbName),
      StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
   }
  }
 } catch (MetaException | NoSuchObjectException e) {
  LOG.info("Unable to refresh table column stats for table: " + tblName, e);
 }
}
org.apache.hadoop.hive.metastoreDeadlinestopTimer

Javadoc

end the time after a method is done.

Popular methods of Deadline

  • registerIfNot
    register a Deadline threadlocal object to current thread.
  • startTimer
    start the timer before a method is invoked.
  • checkTimeout
    Check whether the long running method timeout.
  • getCurrentDeadline
  • resetTimeout
    reset the timeout value of this timer.
  • <init>
  • check
  • clear
    remove the registered Deadline threadlocal object from current thread.
  • removeCurrentDeadline
  • setCurrentDeadline
  • isStarted
    Check whether the timer is started.
  • newMetaException
    convert DeadlineException to MetaException
  • isStarted,
  • newMetaException

Popular in Java

  • Reading from database using SQL prepared statement
  • notifyDataSetChanged (ArrayAdapter)
  • setContentView (Activity)
  • setScale (BigDecimal)
  • Point (java.awt)
    A point representing a location in (x,y) coordinate space, specified in integer precision.
  • URLConnection (java.net)
    A connection to a URL for reading or writing. For HTTP connections, see HttpURLConnection for docume
  • Timestamp (java.sql)
    A Java representation of the SQL TIMESTAMP type. It provides the capability of representing the SQL
  • Arrays (java.util)
    This class contains various methods for manipulating arrays (such as sorting and searching). This cl
  • UUID (java.util)
    UUID is an immutable representation of a 128-bit universally unique identifier (UUID). There are mul
  • Options (org.apache.commons.cli)
    Main entry-point into the library. Options represents a collection of Option objects, which describ
  • Top plugins for WebStorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now