congrats Icon
New! Announcing our next generation AI code completions
Read here
Tabnine Logo
StringUtils.split
Code IndexAdd Tabnine to your IDE (free)

How to use
split
method
in
org.apache.hadoop.util.StringUtils

Best Java code snippets using org.apache.hadoop.util.StringUtils.split (Showing top 20 results out of 513)

origin: org.apache.hadoop/hadoop-common

/**
 * Split a string using the default separator
 * @param str a string that may have escaped separator
 * @return an array of strings
 */
public static String[] split(String str) {
 return split(str, ESCAPE_CHAR, COMMA);
}

origin: apache/hive

public static Set<String> getNestedColumnPaths(Configuration conf) {
 String skips =
  conf.get(READ_NESTED_COLUMN_PATH_CONF_STR, READ_NESTED_COLUMN_PATH_CONF_STR_DEFAULT);
 return new HashSet<>(Arrays.asList(StringUtils.split(skips)));
}
origin: voldemort/voldemort

@Override
protected FileStatus[] listStatus(JobConf job) throws IOException {
  String dirs = job.get("mapred.input.dir", "");
  String[] list = StringUtils.split(dirs);
  List<FileStatus> status = new ArrayList<FileStatus>();
  for(int i = 0; i < list.length; i++) {
    status.addAll(getAllSubFileStatus(job, new Path(list[i])));
  }
  return status.toArray(new FileStatus[0]);
}
origin: apache/hive

/**
 * Take an encode strings and decode it into an array of strings.
 */
public static String[] decodeArray(String s) {
 if (s == null)
  return null;
 String[] escaped = StringUtils.split(s);
 String[] plain = new String[escaped.length];
 for (int i = 0; i < escaped.length; ++i)
  plain[i] = StringUtils.unEscapeString(escaped[i]);
 return plain;
}
origin: org.apache.hadoop/hadoop-common

/**
 * Convert SOME_STUFF to SomeStuff
 *
 * @param s input string
 * @return camelized string
 */
public static String camelize(String s) {
 StringBuilder sb = new StringBuilder();
 String[] words = split(StringUtils.toLowerCase(s), ESCAPE_CHAR,  '_');
 for (String word : words)
  sb.append(org.apache.commons.lang3.StringUtils.capitalize(word));
 return sb.toString();
}
origin: apache/hive

/**
 * Returns an array of column ids(start from zero) which is set in the given
 * parameter <tt>conf</tt>.
 */
public static List<Integer> getReadColumnIDs(Configuration conf) {
 String skips = conf.get(READ_COLUMN_IDS_CONF_STR, READ_COLUMN_IDS_CONF_STR_DEFAULT);
 String[] list = StringUtils.split(skips);
 List<Integer> result = new ArrayList<Integer>(list.length);
 for (String element : list) {
  // it may contain duplicates, remove duplicates
  Integer toAdd = Integer.parseInt(element);
  if (!result.contains(toAdd)) {
   result.add(toAdd);
  }
  // NOTE: some code uses this list to correlate with column names, and yet these lists may
  //       contain duplicates, which this call will remove and the other won't. As far as I can
  //       tell, no code will actually use these two methods together; all is good if the code
  //       gets the ID list without relying on this method. Or maybe it just works by magic.
 }
 return result;
}
origin: apache/hive

/**
 * Get the list of input {@link Path}s for the map-reduce job.
 *
 * @param conf The configuration of the job
 * @return the list of input {@link Path}s for the map-reduce job.
 */
static Path[] getInputPaths(Configuration conf) throws IOException {
 String dirs = conf.get("mapred.input.dir");
 if (dirs == null) {
  throw new IOException("Configuration mapred.input.dir is not defined.");
 }
 String [] list = StringUtils.split(dirs);
 Path[] result = new Path[list.length];
 for (int i = 0; i < list.length; i++) {
  result[i] = new Path(StringUtils.unEscapeString(list[i]));
 }
 return result;
}
origin: apache/hive

/**
 * When a Pig job is submitted and it uses HCat, WebHCat may be configured to ship hive tar
 * to the target node.  Pig on the target node needs some env vars configured.
 */
private static void handlePigEnvVars(Configuration conf, Map<String, String> env) {
 if(conf.get(PigConstants.HIVE_HOME) != null) {
  env.put(PigConstants.HIVE_HOME, new File(conf.get(PigConstants.HIVE_HOME)).getAbsolutePath());
 }
 if(conf.get(PigConstants.HCAT_HOME) != null) {
  env.put(PigConstants.HCAT_HOME, new File(conf.get(PigConstants.HCAT_HOME)).getAbsolutePath());
 }
 if(conf.get(PigConstants.PIG_OPTS) != null) {
  StringBuilder pigOpts = new StringBuilder();
  for(String prop : StringUtils.split(conf.get(PigConstants.PIG_OPTS))) {
   pigOpts.append("-D").append(StringUtils.unEscapeString(prop)).append(" ");
  }
  env.put(PigConstants.PIG_OPTS, pigOpts.toString());
 }
}
origin: apache/drill

/**
 * Get the list of input {@link Path}s for the map-reduce job.
 *
 * @param conf The configuration of the job
 * @return the list of input {@link Path}s for the map-reduce job.
 */
static Path[] getInputPaths(Configuration conf) throws IOException {
 String dirs = conf.get("mapred.input.dir");
 if (dirs == null) {
  throw new IOException("Configuration mapred.input.dir is not defined.");
 }
 String [] list = StringUtils.split(dirs);
 Path[] result = new Path[list.length];
 for (int i = 0; i < list.length; i++) {
  result[i] = new Path(StringUtils.unEscapeString(list[i]));
 }
 return result;
}
origin: apache/hbase

String[] splitKvp = StringUtils.split(kvp, delimiter);
origin: apache/hive

/**
 * @see  #HIVE_PROPS_NAME
 */
public Collection<String> hiveProps() {
 String[] props= StringUtils.split(get(HIVE_PROPS_NAME));
 //since raw data was (possibly) escaped to make split work,
 //now need to remove escape chars so they don't interfere with downstream processing
 if (props == null) {
  return Collections.emptyList();
 } else {
  for(int i = 0; i < props.length; i++) {
   props[i] = TempletonUtils.unEscapeString(props[i]);
  }
  return Arrays.asList(props);
 }
}
origin: apache/hive

/**
 * Return the columns which contains required nested attribute level
 * E.g., given struct a:<x:int, y:int> while 'x' is required and 'y' is not, the method will return
 * a pruned struct for 'a' which only contains the attribute 'x'
 *
 * @param nestedColPaths the paths for required nested attribute
 * @return a map from the column to its selected nested column paths, of which the keys are all lower-cased.
 */
private static Map<String, FieldNode> getPrunedNestedColumns(Set<String> nestedColPaths) {
 Map<String, FieldNode> resMap = new HashMap<>();
 if (nestedColPaths.isEmpty()) {
  return resMap;
 }
 for (String s : nestedColPaths) {
  String c = StringUtils.split(s, '.')[0].toLowerCase();
  if (!resMap.containsKey(c)) {
   FieldNode f = NestedColumnFieldPruningUtils.addNodeByPath(null, s);
   resMap.put(c, f);
  } else {
   resMap.put(c, NestedColumnFieldPruningUtils.addNodeByPath(resMap.get(c), s));
  }
 }
 return resMap;
}
origin: apache/drill

/**
 * Return the columns which contains required nested attribute level
 * E.g., given struct a:<x:int, y:int> while 'x' is required and 'y' is not, the method will return
 * a pruned struct for 'a' which only contains the attribute 'x'
 *
 * @param nestedColPaths the paths for required nested attribute
 * @return a map from the column to its selected nested column paths, of which the keys are all lower-cased.
 */
private static Map<String, FieldNode> getPrunedNestedColumns(Set<String> nestedColPaths) {
 Map<String, FieldNode> resMap = new HashMap<>();
 if (nestedColPaths.isEmpty()) {
  return resMap;
 }
 for (String s : nestedColPaths) {
  String c = StringUtils.split(s, '.')[0].toLowerCase();
  if (!resMap.containsKey(c)) {
   FieldNode f = NestedColumnFieldPruningUtils.addNodeByPath(null, s);
   resMap.put(c, f);
  } else {
   resMap.put(c, NestedColumnFieldPruningUtils.addNodeByPath(resMap.get(c), s));
  }
 }
 return resMap;
}
origin: org.apache.hadoop/hadoop-common

final String[] kvPairs = StringUtils.split(settings);
for (String kv : kvPairs) {
 final String[] kvPair = StringUtils.split(kv, '=');
 if (kvPair.length != 2) {
  throw new IllegalArgumentException(kv);
origin: org.apache.hadoop/hadoop-hdfs

/**
 * Splits an absolute {@code path} into an array of path components.
 * @throws AssertionError if the given path is invalid.
 * @return array of path components.
 */
public static String[] getPathNames(String path) {
 checkAbsolutePath(path);
 return StringUtils.split(path, Path.SEPARATOR_CHAR);
}
origin: org.apache.hadoop/hadoop-hdfs

public static List<InetSocketAddress> getAddressesList(URI uri)
  throws IOException{
 String authority = uri.getAuthority();
 Preconditions.checkArgument(authority != null && !authority.isEmpty(),
   "URI has no authority: " + uri);
 String[] parts = StringUtils.split(authority, ';');
 for (int i = 0; i < parts.length; i++) {
  parts[i] = parts[i].trim();
 }
 List<InetSocketAddress> addrs = Lists.newArrayList();
 for (String addr : parts) {
  InetSocketAddress isa = NetUtils.createSocketAddr(
    addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT);
  if (isa.isUnresolved()) {
   throw new UnknownHostException(addr);
  }
  addrs.add(isa);
 }
 return addrs;
}
origin: apache/accumulo

String[] fields = StringUtils.split(strFields, '\\', ',');
for (String field : fields) {
 String[] keyValue = StringUtils.split(field, '\\', '=');
 String key = keyValue[0];
 String value = keyValue[1];
origin: org.apache.hadoop/hadoop-hdfs

private static void setRenameReservedMapInternal(String renameReserved) {
 Collection<String> pairs =
   StringUtils.getTrimmedStringCollection(renameReserved);
 for (String p : pairs) {
  String[] pair = StringUtils.split(p, '/', '=');
  Preconditions.checkArgument(pair.length == 2,
    "Could not parse key-value pair " + p);
  String key = pair[0];
  String value = pair[1];
  Preconditions.checkArgument(DFSUtil.isReservedPathComponent(key),
    "Unknown reserved path " + key);
  Preconditions.checkArgument(DFSUtil.isValidNameForComponent(value),
    "Invalid rename path for " + key + ": " + value);
  LOG.info("Will rename reserved path " + key + " to " + value);
  renameReservedMap.put(key, value);
 }
}
origin: apache/phoenix

@VisibleForTesting
void configureSpnegoAuthentication(HttpServer.Builder builder, UserGroupInformation ugi) {
 String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB);
 File keytab = new File(keytabPath);
 String httpKeytabPath =
     getConf().get(QueryServices.QUERY_SERVER_HTTP_KEYTAB_FILENAME_ATTRIB, null);
 String httpPrincipal =
     getConf().get(QueryServices.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB, null);
 // Backwards compat for a configuration key change
 if (httpPrincipal == null) {
  httpPrincipal =
      getConf().get(QueryServices.QUERY_SERVER_KERBEROS_HTTP_PRINCIPAL_ATTRIB_LEGACY, null);
 }
 File httpKeytab = null;
 if (null != httpKeytabPath) {
   httpKeytab = new File(httpKeytabPath);
 }
 String realmsString = getConf().get(QueryServices.QUERY_SERVER_KERBEROS_ALLOWED_REALMS, null);
 String[] additionalAllowedRealms = null;
 if (null != realmsString) {
  additionalAllowedRealms = StringUtils.split(realmsString, ',');
 }
 if (null != httpKeytabPath && null != httpPrincipal) {
  builder.withSpnego(httpPrincipal, additionalAllowedRealms).withAutomaticLogin(httpKeytab);
 } else {
  builder.withSpnego(ugi.getUserName(), additionalAllowedRealms)
      .withAutomaticLogin(keytab);
 }
}
origin: org.apache.hive/hive-serde

public static Set<String> getNestedColumnPaths(Configuration conf) {
 String skips =
  conf.get(READ_NESTED_COLUMN_PATH_CONF_STR, READ_NESTED_COLUMN_PATH_CONF_STR_DEFAULT);
 return new HashSet<>(Arrays.asList(StringUtils.split(skips)));
}
org.apache.hadoop.utilStringUtilssplit

Javadoc

Split a string using the default separator

Popular methods of StringUtils

  • stringifyException
    Make a string representation of the exception.
  • join
    Concatenates strings, using a separator.
  • arrayToString
  • toLowerCase
    Converts all of the characters in this String to lower case with Locale.ENGLISH.
  • escapeString
  • startupShutdownMessage
    Print a log message for starting up and shutting down
  • getStrings
    Returns an arraylist of strings.
  • toUpperCase
    Converts all of the characters in this String to upper case with Locale.ENGLISH.
  • byteToHexString
    Given an array of bytes it will convert the bytes to a hex string representation of the bytes
  • formatTime
    Given the time in long milliseconds, returns a String in the format Xhrs, Ymins, Z sec.
  • unEscapeString
  • getStringCollection
    Returns a collection of strings.
  • unEscapeString,
  • getStringCollection,
  • byteDesc,
  • formatPercent,
  • getTrimmedStrings,
  • equalsIgnoreCase,
  • format,
  • formatTimeDiff,
  • getTrimmedStringCollection

Popular in Java

  • Making http requests using okhttp
  • setContentView (Activity)
  • scheduleAtFixedRate (ScheduledExecutorService)
  • getApplicationContext (Context)
  • String (java.lang)
  • SocketTimeoutException (java.net)
    This exception is thrown when a timeout expired on a socket read or accept operation.
  • Date (java.sql)
    A class which can consume and produce dates in SQL Date format. Dates are represented in SQL as yyyy
  • HashSet (java.util)
    HashSet is an implementation of a Set. All optional operations (adding and removing) are supported.
  • IOUtils (org.apache.commons.io)
    General IO stream manipulation utilities. This class provides static utility methods for input/outpu
  • Project (org.apache.tools.ant)
    Central representation of an Ant project. This class defines an Ant project with all of its targets,
  • PhpStorm for WordPress
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyStudentsTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now