CsvSchema csvSchema = csvSchemaBuilder.build();
.withSharedAttribute("collection", query.collection) .withSharedAttribute("apiKey", masterKey)) .with(builder.build()).readValue(url); } else if (query.type == AVRO) { URLConnection conn = url.openConnection();
.withSharedAttribute("collection", collection) .withSharedAttribute("apiKey", apiKey)) .with(builder.build()).readValue(buff);
/** * Accessor for creating a "default" CSV schema instance, with following * settings: *<ul> * <li>Does NOT use header line * </li> * <li>Uses double quotes ('"') for quoting of field values (if necessary) * </li> * <li>Uses comma (',') as the field separator * </li> * <li>Uses Unix linefeed ('\n') as row separator * </li> * <li>Does NOT use any escape characters * </li> * <li>Does NOT have any columns defined * </li> * </ul> */ public static CsvSchema emptySchema() { return builder().build(); }
@Test public final void testRunSorterTSV() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build(); verifyCSV(testInput4, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput4, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterSecondColumnThenFirst() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput3, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1, 0), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterFirstColumn() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput1, 1, 2, 4, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0), true); } verifyCSV(testOutput, 1, 2, 4, mapper, schema); }
protected CsvSchema _schemaFor(JavaType pojoType, SimpleLookupCache<JavaType,CsvSchema> schemas, boolean typed) { synchronized (schemas) { CsvSchema s = schemas.get(pojoType); if (s != null) { return s; } } final AnnotationIntrospector intr = _deserializationConfig.getAnnotationIntrospector(); CsvSchema.Builder builder = CsvSchema.builder(); _addSchemaProperties(builder, intr, typed, pojoType, null); CsvSchema result = builder.build(); synchronized (schemas) { schemas.put(pojoType, result); } return result; }
protected CsvSchema _schemaFor(JavaType pojoType, LRUMap<JavaType,CsvSchema> schemas, boolean typed) { synchronized (schemas) { CsvSchema s = schemas.get(pojoType); if (s != null) { return s; } } final AnnotationIntrospector intr = _deserializationConfig.getAnnotationIntrospector(); CsvSchema.Builder builder = CsvSchema.builder(); _addSchemaProperties(builder, intr, typed, pojoType, null); CsvSchema result = builder.build(); synchronized (schemas) { schemas.put(pojoType, result); } return result; }
/** * 反序列化Recources目录下的Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param name 文件名 * @param separator cloumn的分隔符 */ public static <V> List<V> fromCsvRecource(String name, String separator, Class<V> c) { try (InputStream inputStream = getResourceStream(name); InputStreamReader reader = getResourceReader(inputStream)) { if (reader == null) { return null; } CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(reader).readAll(); } catch (IOException e) { log.error("jackson from csv recource error, name: {}, type: {}", name, c, e); return null; } }
public void write(CsvRoute route, OutputStream target, int startIndex, int endIndex) throws IOException { List<CsvPosition> positions = route.getPositions(); CsvSchema.Builder builder = new CsvSchema.Builder(); if (positions.size() > 0) for (String key : positions.get(0).getRowAsMap().keySet()) builder = builder.addColumn(key); CsvSchema schema = builder.build().withHeader().withColumnSeparator(getColumnSeparator()); try(SequenceWriter writer = new CsvMapper().writer(schema).writeValues(target)) { for (int i = startIndex; i < endIndex; i++) { CsvPosition position = positions.get(i); writer.write(position.getRowAsMap()); } } } }
/** * Extrapolate the CSV columns from the row keys. * * @param row A row. * @return A constructed CSV schema. */ public CsvSchema buildCsvSchema(final Map<String, Object> row) { CsvSchema.Builder builder = CsvSchema.builder(); Set<String> fields = row.keySet(); for (String field : fields) { builder.addColumn(field); } return builder.build(); } }
@Override public boolean configure(Properties properties) { CsvSchema.Builder builder = CsvSchema.builder(); Arrays.stream(columns).forEach(i -> builder.addColumn(i.toString())); Arrays.stream(features).forEach(i -> { CsvParser.Feature feature = CsvParser.Feature.valueOf(i.toString().toUpperCase(Locale.ENGLISH)); mapper.enable(feature); }); builder.setColumnSeparator(separator); builder.setNullValue(nullValue); schema = builder.build(); return super.configure(properties); }
/** * 反序列化Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param separator cloumn的分隔符 * @param path 文件路径 */ public static <V> List<V> fromCsvFile(String path, String separator, Class<V> c) { try { CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(new File(path)).readAll(); } catch (IOException e) { log.error("jackson from csv error, path: {}, type: {}", path, c, e); return null; } }
/** * Builds the CSV header. * * @param columns Columns to use for building the header * * @return CSV schema with the header */ private static CsvSchema buildCsvHeaders(List<String> columns) { CsvSchema.Builder builder = CsvSchema.builder(); columns.stream().forEachOrdered(builder::addColumn); return builder.setUseHeader(true).build(); } }
@Override public void newTable(final String tableName, final List<ColumnInfo> columnsForTable) { currentTableName = tableName; final CsvSchema.Builder builder = CsvSchema.builder(); // Remove quoting of character which applies (somewhat arbitrarily, Tatu???) for string whose length is greater than MAX_QUOTE_CHECK = 24 -- See CVSWriter#_mayNeedQuotes builder.disableQuoteChar(); builder.setColumnSeparator('|'); for (final ColumnInfo columnInfo : columnsForTable) { builder.addColumn(columnInfo.getColumnName(), getColumnTypeFromSqlType(columnInfo.getDataType())); } currentCSVSchema = builder.build(); writer = mapper.writer(currentCSVSchema); shouldWriteHeader = true; }
public CSVWritingWorker(final BlockingQueue<T> queue, final LogFile logFileRecord, final int maxFlushIntervalSeconds, final String nullRepresentation, final LogFileTracker tracker) { super(queue, logFileRecord, maxFlushIntervalSeconds, tracker); if (nullRepresentation != null) { schema = CSV_SCHEMA.rebuild().setNullValue(nullRepresentation).build(); } else { schema = CSV_SCHEMA; } }
@Override public void newTable(final String tableName, final List<ColumnInfo> columnsForTable) { currentTableName = tableName; final CsvSchema.Builder builder = CsvSchema.builder(); for (final ColumnInfo columnInfo : columnsForTable) { builder.addColumn(columnInfo.getColumnName(), getColumnTypeFromSqlType(columnInfo.getDataType())); } currentCSVSchema = builder.build(); writer = mapper.writer(currentCSVSchema); shouldWriteHeader = true; }
MappingIterator<Map> it = mapper.readerFor(Map.class).with(schema.build()).readValues(csvFile); while (it.hasNext()) { Map<String, Object> row = it.next();
@Override public void newTable(final String tableName, final List<ColumnInfo> columnsForTable) { currentTableName = tableName; final CsvSchema.Builder builder = CsvSchema.builder(); // Remove quoting of character which applies (somewhat arbitrarily, Tatu???) for string whose length is greater than MAX_QUOTE_CHECK = 24 -- See CVSWriter#_mayNeedQuotes builder.disableQuoteChar(); builder.setColumnSeparator('|'); for (final ColumnInfo columnInfo : columnsForTable) { builder.addColumn(columnInfo.getColumnName(), getColumnTypeFromSqlType(columnInfo.getDataType())); } currentCSVSchema = builder.build(); writer = mapper.writer(currentCSVSchema); shouldWriteHeader = true; }