Tabnine Logo
ObjectUtils
Code IndexAdd Tabnine to your IDE (free)

How to use
ObjectUtils
in
org.elasticsearch.hadoop.util

Best Java code snippets using org.elasticsearch.hadoop.util.ObjectUtils (Showing top 20 results out of 315)

origin: elastic/elasticsearch-hadoop

public static <T> T instantiate(String className, Settings settings) {
  return instantiate(className, null, settings);
}
origin: elastic/elasticsearch-hadoop

public EsStorage(String... configuration) {
  if (!ObjectUtils.isEmpty(configuration)) {
    try {
      for (String string : configuration) {
        // replace ; with line separators
        properties.load(new StringReader(string));
        log.trace(properties.toString());
      }
    } catch (IOException ex) {
      throw new EsHadoopIllegalArgumentException("Cannot parse options " + Arrays.toString(configuration), ex);
    }
  }
}
origin: elastic/elasticsearch-hadoop

for (Object o : ObjectUtils.toObjectArray(value)) {
  Result result = doWrite(o, generator, parentField);
  if (!result.isSuccesful()) {
origin: elastic/elasticsearch-hadoop

static void checkSparkLibraryCompatibility(boolean throwOnIncompatible) {
  // check whether the correct es-hadoop is used with the correct Spark version
  boolean isSpark13Level = ObjectUtils.isClassPresent("org.apache.spark.sql.DataFrame", SparkConf.class.getClassLoader());
  boolean isSpark20Level = ObjectUtils.isClassPresent("org.apache.spark.sql.streaming.StreamingQuery", SparkConf.class.getClassLoader());
  CompatibilityLevel compatibilityLevel = ObjectUtils.instantiate("org.elasticsearch.spark.sql.SparkSQLCompatibilityLevel", CompatUtils.class.getClassLoader());
  boolean isEshForSpark20 = "20".equals(compatibilityLevel.versionId());
  String esSupportedSparkVersion = compatibilityLevel.versionDescription();
  String errorMessage = null;
  if (!(isSpark13Level || isSpark20Level)) {
    String sparkVersion = getSparkVersionOr("1.0-1.2");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with unsupported Spark version %s",
        esSupportedSparkVersion, sparkVersion);
  } else if (isSpark20Level != isEshForSpark20) { // XOR can be applied as well but != increases readability
    String sparkVersion = getSparkVersionOr(isSpark13Level ? "1.3-1.6" : "2.0+");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with Spark %s",
        esSupportedSparkVersion, sparkVersion);
  }
  if (errorMessage != null) {
    if (throwOnIncompatible) {
      throw new EsHadoopIllegalStateException(errorMessage);
    } else {
      LogFactory.getLog("org.elasticsearch.spark.rdd.EsSpark").warn(errorMessage);
    }
  }
}
origin: elastic/elasticsearch-hadoop

static void checkSparkLibraryCompatibility(boolean throwOnIncompatible) {
  // check whether the correct es-hadoop is used with the correct Spark version
  boolean isSpark13Level = ObjectUtils.isClassPresent("org.apache.spark.sql.DataFrame", SparkConf.class.getClassLoader());
  boolean isSpark20Level = ObjectUtils.isClassPresent("org.apache.spark.sql.streaming.StreamingQuery", SparkConf.class.getClassLoader());
  CompatibilityLevel compatibilityLevel = ObjectUtils.instantiate("org.elasticsearch.spark.sql.SparkSQLCompatibilityLevel", CompatUtils.class.getClassLoader());
  boolean isEshForSpark20 = "20".equals(compatibilityLevel.versionId());
  String esSupportedSparkVersion = compatibilityLevel.versionDescription();
  String errorMessage = null;
  if (!(isSpark13Level || isSpark20Level)) {
    String sparkVersion = getSparkVersionOr("1.0-1.2");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with unsupported Spark version %s",
        esSupportedSparkVersion, sparkVersion);
  } else if (isSpark20Level != isEshForSpark20) { // XOR can be applied as well but != increases readability
    String sparkVersion = getSparkVersionOr(isSpark13Level ? "1.3-1.6" : "2.0+");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with Spark %s",
        esSupportedSparkVersion, sparkVersion);
  }
  if (errorMessage != null) {
    if (throwOnIncompatible) {
      throw new EsHadoopIllegalStateException(errorMessage);
    } else {
      LogFactory.getLog("org.elasticsearch.spark.rdd.EsSpark").warn(errorMessage);
    }
  }
}
origin: elastic/elasticsearch-hadoop

public static <T> T instantiate(String className, ClassLoader loader, Settings settings) {
  T obj = instantiate(className, loader);
  if (obj instanceof SettingsAware) {
    ((SettingsAware) obj).setSettings(settings);
  }
  return obj;
}
origin: elastic/elasticsearch-hadoop

public void process(BytesArray storage) {
  // no extractors, no lookups
  if (ObjectUtils.isEmpty(paths)) {
    return;
  }
  results.clear();
  if (log.isTraceEnabled()) {
    log.trace(String.format("About to look for paths [%s] in doc [%s]", Arrays.toString(paths), storage));
  }
  results.addAll(ParsingUtils.values(new JacksonJsonParser(storage.bytes(), 0, storage.length()), paths));
}
origin: org.elasticsearch/elasticsearch-spark-13

static void checkSparkLibraryCompatibility(boolean throwOnIncompatible) {
  // check whether the correct es-hadoop is used with the correct Spark version
  boolean isSpark13Level = ObjectUtils.isClassPresent("org.apache.spark.sql.DataFrame", SparkConf.class.getClassLoader());
  boolean isSpark20Level = ObjectUtils.isClassPresent("org.apache.spark.sql.streaming.StreamingQuery", SparkConf.class.getClassLoader());
  CompatibilityLevel compatibilityLevel = ObjectUtils.instantiate("org.elasticsearch.spark.sql.SparkSQLCompatibilityLevel", CompatUtils.class.getClassLoader());
  boolean isEshForSpark20 = "20".equals(compatibilityLevel.versionId());
  String esSupportedSparkVersion = compatibilityLevel.versionDescription();
  String errorMessage = null;
  if (!(isSpark13Level || isSpark20Level)) {
    String sparkVersion = getSparkVersionOr("1.0-1.2");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with unsupported Spark version %s",
        esSupportedSparkVersion, sparkVersion);
  } else if (isSpark20Level != isEshForSpark20) { // XOR can be applied as well but != increases readability
    String sparkVersion = getSparkVersionOr(isSpark13Level ? "1.3-1.6" : "2.0+");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with Spark %s",
        esSupportedSparkVersion, sparkVersion);
  }
  if (errorMessage != null) {
    if (throwOnIncompatible) {
      throw new EsHadoopIllegalStateException(errorMessage);
    } else {
      LogFactory.getLog("org.elasticsearch.spark.rdd.EsSpark").warn(errorMessage);
    }
  }
}
origin: org.elasticsearch/elasticsearch-spark

for (Object o : ObjectUtils.toObjectArray(value)) {
  Result result = doWrite(o, generator, parentField);
  if (!result.isSuccesful()) {
origin: elastic/elasticsearch-hadoop

  protected FieldExtractor createFieldExtractor(String fieldName) {
    settings.setProperty(ConstantFieldExtractor.PROPERTY, fieldName);
    return ObjectUtils.instantiate(settings.getMappingDefaultClassExtractor(), settings);
  }
}
origin: elastic/elasticsearch-hadoop

@Override
public void compile(String pattern) {
  this.pattern = pattern;
  // break it down into index/type
  String[] split = pattern.split("/");
  Assert.isTrue(!ObjectUtils.isEmpty(split), "invalid pattern given " + pattern);
  // check pattern
  hasPattern = pattern.contains("{") && pattern.contains("}");
  index = parse(split[0].trim());
  if (split.length > 1) {
    // Assert the pattern is only at most 2, and at the least 1
    Assert.isTrue(split.length == 2, "invalid pattern given " + pattern);
    type = parse(split[1].trim());
  } else {
    type = null;
  }
}
origin: org.elasticsearch/elasticsearch-hadoop

static void checkSparkLibraryCompatibility(boolean throwOnIncompatible) {
  // check whether the correct es-hadoop is used with the correct Spark version
  boolean isSpark13Level = ObjectUtils.isClassPresent("org.apache.spark.sql.DataFrame", SparkConf.class.getClassLoader());
  boolean isSpark20Level = ObjectUtils.isClassPresent("org.apache.spark.sql.streaming.StreamingQuery", SparkConf.class.getClassLoader());
  CompatibilityLevel compatibilityLevel = ObjectUtils.instantiate("org.elasticsearch.spark.sql.SparkSQLCompatibilityLevel", CompatUtils.class.getClassLoader());
  boolean isEshForSpark20 = "20".equals(compatibilityLevel.versionId());
  String esSupportedSparkVersion = compatibilityLevel.versionDescription();
  String errorMessage = null;
  if (!(isSpark13Level || isSpark20Level)) {
    String sparkVersion = getSparkVersionOr("1.0-1.2");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with unsupported Spark version %s",
        esSupportedSparkVersion, sparkVersion);
  } else if (isSpark20Level != isEshForSpark20) { // XOR can be applied as well but != increases readability
    String sparkVersion = getSparkVersionOr(isSpark13Level ? "1.3-1.6" : "2.0+");
    errorMessage = String.format("Incorrect classpath detected; Elasticsearch Spark compiled for Spark %s but used with Spark %s",
        esSupportedSparkVersion, sparkVersion);
  }
  if (errorMessage != null) {
    if (throwOnIncompatible) {
      throw new EsHadoopIllegalStateException(errorMessage);
    } else {
      LogFactory.getLog("org.elasticsearch.spark.rdd.EsSpark").warn(errorMessage);
    }
  }
}
origin: org.elasticsearch/elasticsearch-hadoop-mr

for (Object o : ObjectUtils.toObjectArray(value)) {
  Result result = doWrite(o, generator, parentField);
  if (!result.isSuccesful()) {
origin: elastic/elasticsearch-hadoop

  @Override
  protected FieldExtractor createFieldExtractor(String fieldName) {
    settings.setProperty(ConstantFieldExtractor.PROPERTY, fieldName);
    return ObjectUtils.<FieldExtractor> instantiate(settings.getMappingDefaultClassExtractor(), settings);
  }
}
origin: elastic/elasticsearch-hadoop

public static void setFilters(Settings settings, String... filters) {
  // clear any filters inside the settings
  settings.setProperty(InternalConfigurationOptions.INTERNAL_ES_QUERY_FILTERS, "");
  if (ObjectUtils.isEmpty(filters)) {
    return;
  }
  settings.setProperty(InternalConfigurationOptions.INTERNAL_ES_QUERY_FILTERS, IOUtils.serializeToBase64(filters));
}
origin: org.elasticsearch/elasticsearch-hadoop

for (Object o : ObjectUtils.toObjectArray(value)) {
  Result result = doWrite(o, generator, parentField);
  if (!result.isSuccesful()) {
origin: elastic/elasticsearch-hadoop

public JsonTemplatedBulk(Collection<Object> beforeObject, Collection<Object> afterObject,
    JsonFieldExtractors jsonExtractors, Settings settings) {
  super(beforeObject, afterObject, new NoOpValueWriter());
  this.jsonExtractors = jsonExtractors;
  this.jsonWriter = ObjectUtils.instantiate(settings.getSerializerBytesConverterClassName(), settings);
  this.settings = settings;
}
origin: org.elasticsearch/elasticsearch-hadoop

public EsStorage(String... configuration) {
  if (!ObjectUtils.isEmpty(configuration)) {
    try {
      for (String string : configuration) {
        // replace ; with line separators
        properties.load(new StringReader(string));
        log.trace(properties.toString());
      }
    } catch (IOException ex) {
      throw new EsHadoopIllegalArgumentException("Cannot parse options " + Arrays.toString(configuration), ex);
    }
  }
}
origin: org.elasticsearch/elasticsearch-spark-13

for (Object o : ObjectUtils.toObjectArray(value)) {
  Result result = doWrite(o, generator, parentField);
  if (!result.isSuccesful()) {
origin: elastic/elasticsearch-hadoop

public HttpRetryHandler(Settings settings) {
  String retryPolicyName = settings.getBatchWriteRetryPolicy();
  if (ConfigurationOptions.ES_BATCH_WRITE_RETRY_POLICY_SIMPLE.equals(retryPolicyName)) {
    retryPolicyName = SimpleHttpRetryPolicy.class.getName();
  }
  else if (ConfigurationOptions.ES_BATCH_WRITE_RETRY_POLICY_NONE.equals(retryPolicyName)) {
    retryPolicyName = NoHttpRetryPolicy.class.getName();
  }
  HttpRetryPolicy retryPolicy = ObjectUtils.instantiate(retryPolicyName, settings);
  this.retry = retryPolicy.init();
  this.retryLimit = settings.getBatchWriteRetryCount();
  this.retryTime = settings.getBatchWriteRetryWait();
}
org.elasticsearch.hadoop.utilObjectUtils

Most used methods

  • instantiate
  • isEmpty
  • toObjectArray
  • isClassPresent

Popular in Java

  • Creating JSON documents from java classes using gson
  • findViewById (Activity)
  • onCreateOptionsMenu (Activity)
  • getSupportFragmentManager (FragmentActivity)
  • Point (java.awt)
    A point representing a location in (x,y) coordinate space, specified in integer precision.
  • URLConnection (java.net)
    A connection to a URL for reading or writing. For HTTP connections, see HttpURLConnection for docume
  • Locale (java.util)
    Locale represents a language/country/variant combination. Locales are used to alter the presentatio
  • Queue (java.util)
    A collection designed for holding elements prior to processing. Besides basic java.util.Collection o
  • BoxLayout (javax.swing)
  • StringUtils (org.apache.commons.lang)
    Operations on java.lang.String that arenull safe. * IsEmpty/IsBlank - checks if a String contains
  • Top Sublime Text plugins
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now