private static String[] doParseDelimited(String delimited, CSVFormat format) { try (CSVParser parser = CSVParser.parse(delimited, format)) { Iterator<CSVRecord> records = parser.iterator(); return records.hasNext() ? StreamSupport.stream(records.next().spliterator(), false).toArray(String[]::new) : EMPTY_STRING; } catch (IOException e) { throw new IllegalStateException(e); // Can't happen } }
@Override public List<Object> deserialize(ByteBuffer ser) { try { String data = new String(Utils.toByteArray(ser), StandardCharsets.UTF_8); CSVParser parser = CSVParser.parse(data, CSVFormat.RFC4180); CSVRecord record = parser.getRecords().get(0); Preconditions.checkArgument(record.size() == fieldNames.size(), "Invalid schema"); ArrayList<Object> list = new ArrayList<>(fieldNames.size()); for (int i = 0; i < record.size(); i++) { list.add(record.get(i)); } return list; } catch (IOException e) { throw new RuntimeException(e); } }
CSVParser parser = CSVParser.parse(responseStr, CSVFormat.DEFAULT);
final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, CSVFormat.DEFAULT) .getRecords();
/** * Upserts data from CSV file. * * Data is batched up based on connection batch size. * Column PDataType is read from metadata and is used to convert * column value to correct type before upsert. * * The constructor determines the format for the CSV files. * * @param fileName * @throws Exception */ public void upsert(String fileName) throws Exception { CSVParser parser = CSVParser.parse(new File(fileName), Charsets.UTF_8, format); upsert(parser); }
private List<String[]> getRecords(String content, CSVFormat format) throws IOException { List<String[]> records = new ArrayList<>(); CSVParser parser = CSVParser.parse(content, format); for (CSVRecord record : parser.getRecords()) { String[] line = new String[record.size()]; for (int i = 0; i < line.length; i++) { line[i] = record.get(i); } records.add(line); } return records; } }
/** * Creates a parser for the given {@link Path}. * * @param path * a CSV file. Must not be null. * @param charset * A Charset * @param format * the CSVFormat used for CSV parsing. Must not be null. * @return a new parser * @throws IllegalArgumentException * If the parameters of the format are inconsistent or if either file or format are null. * @throws IOException * If an I/O error occurs * @since 1.5 */ public static CSVParser parse(final Path path, final Charset charset, final CSVFormat format) throws IOException { Assertions.notNull(path, "path"); Assertions.notNull(format, "format"); return parse(Files.newInputStream(path), charset, format); }
@Override public CSVRecord createRecord(Object... columnValues) throws IOException { for (int i = 0; i < columnValues.length; i++) { if (columnValues[i] == null) { // Joiner.join throws on nulls, replace with empty string. columnValues[i] = ""; } if (columnValues[i] instanceof List) { columnValues[i] = Joiner.on(ARRAY_SEP).join((List<?>) columnValues[i]); } } String inputRecord = Joiner.on(',').join(columnValues); return Iterables.getFirst(CSVParser.parse(inputRecord, CSVFormat.DEFAULT), null); }
/** * Creates a CSV parser using the given {@link CSVFormat}. * * <p> * If you do not read all records from the given {@code reader}, you should call {@link #close()} on the parser, * unless you close the {@code reader}. * </p> * * @param inputStream * an InputStream containing CSV-formatted input. Must not be null. * @param charset * a Charset. * @param format * the CSVFormat used for CSV parsing. Must not be null. * @return a new CSVParser configured with the given reader and format. * @throws IllegalArgumentException * If the parameters of the format are inconsistent or if either reader or format are null. * @throws IOException * If there is a problem reading the header or skipping the first record * @since 1.5 */ @SuppressWarnings("resource") public static CSVParser parse(final InputStream inputStream, final Charset charset, final CSVFormat format) throws IOException { Assertions.notNull(inputStream, "inputStream"); Assertions.notNull(format, "format"); return parse(new InputStreamReader(inputStream, charset), format); }
public synchronized List<Result> read() throws IOException { CSVParser parser = null; util.ensureBaseResultDirExists(); try { File file = new File(resultFileName); parser = CSVParser.parse(file, Charset.defaultCharset(), CSVFormat.DEFAULT); List<CSVRecord> records = parser.getRecords(); List<Result> results = new ArrayList<>(); String header = null; for (CSVRecord record : records) { // First record is the CSV Header if (record.getRecordNumber() == 1) { header = record.toString(); continue; } List<ResultValue> resultValues = new ArrayList<>(); for (String val : record.toString().split(PherfConstants.RESULT_FILE_DELIMETER)) { resultValues.add(new ResultValue(val)); } Result result = new Result(resultFileDetails, header, resultValues); results.add(result); } return results; } finally { parser.close(); } }
private String[] toFirstRecordValues(final String expected, final CSVFormat format) throws IOException { return CSVParser.parse(expected, format).getRecords().get(0).values(); }
private void doOneRandom(final CSVFormat format) throws Exception { final Random r = new Random(); final int nLines = r.nextInt(4) + 1; final int nCol = r.nextInt(3) + 1; // nLines=1;nCol=2; final String[][] lines = generateLines(nLines, nCol); final StringWriter sw = new StringWriter(); try (final CSVPrinter printer = new CSVPrinter(sw, format)) { for (int i = 0; i < nLines; i++) { // for (int j=0; j<lines[i].length; j++) System.out.println("### VALUE=:" + printable(lines[i][j])); printer.printRecord((Object[]) lines[i]); } printer.flush(); } final String result = sw.toString(); // System.out.println("### :" + printable(result)); try (final CSVParser parser = CSVParser.parse(result, format)) { final List<CSVRecord> parseResult = parser.getRecords(); final String[][] expected = lines.clone(); for (int i = 0; i < expected.length; i++) { expected[i] = expectNulls(expected[i], format); } Utils.compare("Printer output :" + printable(result), expected, parseResult); } }
try (final CSVParser parser = CSVParser.parse(resource, Charset.forName("UTF-8"), format)) { for (final CSVRecord record : parser) { String parsed = Arrays.toString(record.values());
try (final CSVParser parser = CSVParser.parse(new File(BASE, split[0]), Charset.defaultCharset(), format)) { for (final CSVRecord record : parser) { String parsed = Arrays.toString(record.values());
@Deprecated public static boolean[] readCSVFile(String csvFileName, HashMap<Integer, Rule> rules, LinkedHashMap<String, TypeDefinition> typeDefinition, CSVFormat csvFormat, boolean caseSensitive, boolean[] ruleSupports) { try { Iterable<CSVRecord> recordsIterator = CSVParser.parse(new File(csvFileName), StandardCharsets.UTF_8, csvFormat); ruleSupports = readCSV(recordsIterator, rules, typeDefinition, caseSensitive, ruleSupports); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return ruleSupports; }
public static boolean[] readCSVString(String csvString, HashMap<Integer, Rule> rules, LinkedHashMap<String, TypeDefinition> typeDefinition, CSVFormat csvFormat, boolean caseSensitive, boolean[] ruleSupports) { try { Iterable<CSVRecord> recordsIterator = CSVParser.parse(csvString, csvFormat); ruleSupports = readCSV(recordsIterator, rules, typeDefinition, caseSensitive, ruleSupports); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return ruleSupports; }
@Deprecated public static boolean[] readCSVString(String csvString, HashMap<Integer, Rule> rules, LinkedHashMap<String, TypeDefinition> typeDefinition, CSVFormat csvFormat, boolean caseSensitive, boolean[] ruleSupports) { try { Iterable<CSVRecord> recordsIterator = CSVParser.parse(csvString, csvFormat); ruleSupports = readCSV(recordsIterator, rules, typeDefinition, caseSensitive, ruleSupports); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return ruleSupports; }
public static boolean[] readCSVFile(String csvFileName, HashMap<Integer, Rule> rules, LinkedHashMap<String, TypeDefinition> typeDefinition, CSVFormat csvFormat, boolean caseSensitive, boolean[] ruleSupports) { try { Iterable<CSVRecord> recordsIterator = CSVParser.parse(new File(csvFileName), StandardCharsets.UTF_8, csvFormat); ruleSupports = readCSV(recordsIterator, rules, typeDefinition, caseSensitive, ruleSupports); } catch (FileNotFoundException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); } return ruleSupports; }
private static String[] doParseDelimited(String delimited, CSVFormat format) { try (CSVParser parser = CSVParser.parse(delimited, format)) { Iterator<CSVRecord> records = parser.iterator(); return records.hasNext() ? StreamSupport.stream(records.next().spliterator(), false).toArray(String[]::new) : EMPTY_STRING; } catch (IOException e) { throw new IllegalStateException(e); // Can't happen } }
/** * Upserts data from CSV file. * * Data is batched up based on connection batch size. * Column PDataType is read from metadata and is used to convert * column value to correct type before upsert. * * The constructor determines the format for the CSV files. * * @param fileName * @throws Exception */ public void upsert(String fileName) throws Exception { CSVParser parser = CSVParser.parse(new File(fileName), Charsets.UTF_8, format); upsert(parser); }