public CsvMapper configure(CsvGenerator.Feature f, boolean state) { return state ? enable(f) : disable(f); }
public CsvMapper configure(CsvParser.Feature f, boolean state) { return state ? enable(f) : disable(f); }
public CsvMapper configure(CsvGenerator.Feature f, boolean state) { return state ? enable(f) : disable(f); }
public CsvMapper configure(CsvParser.Feature f, boolean state) { return state ? enable(f) : disable(f); }
CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); File csvFile = new File("input.csv"); // or from String, URL etc MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(csvFile);
CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); File csvFile = new File("input.csv"); // or from String, URL etc MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(csvFile);
private static void configSpecial() { //使用系统换行符 yamlMapper.enable(YAMLGenerator.Feature.USE_PLATFORM_LINE_BREAKS); //允许注释 yamlMapper.enable(JsonParser.Feature.ALLOW_COMMENTS); yamlMapper.enable(JsonParser.Feature.ALLOW_YAML_COMMENTS); //允许注释 propsMapper.enable(JavaPropsParser.Feature.ALLOW_COMMENTS); propsMapper.enable(JavaPropsParser.Feature.ALLOW_YAML_COMMENTS); //去掉头尾空格 csvMapper.enable(CsvParser.Feature.TRIM_SPACES); //忽略空行 csvMapper.enable(CsvParser.Feature.SKIP_EMPTY_LINES); csvMapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); }
public CsvMapper(CsvFactory f) { super(f); // As per #11: default to alphabetic ordering enable(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY); _untypedSchemas = new LRUMap<JavaType,CsvSchema>(8,32); _typedSchemas = new LRUMap<JavaType,CsvSchema>(8,32); }
public BibtexManagerImpl(){ if (escapeW3C == null){ synchronized(initLock){ if (escapeW3C == null){ CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); try { MappingIterator<String[]> it = mapper.reader(String[].class).readValues(getClass().getResourceAsStream("/org/orcid/core/manager/impl/escape_bibtex.txt")); ImmutableMap.Builder<Character,String> builder = new ImmutableMap.Builder<Character,String>(); while (it.hasNext()){ String[] row = it.next(); if (row.length == 2) builder.put((char)row[1].trim().charAt(0), row[0]); } escapeW3C = builder.build(); } catch (Exception e) { throw new RuntimeException(e); } } } } }
public BibtexManagerImpl(){ if (escapeW3C == null){ synchronized(initLock){ if (escapeW3C == null){ CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); try { MappingIterator<String[]> it = mapper.reader(String[].class).readValues(getClass().getResourceAsStream("escape_bibtex.txt")); ImmutableMap.Builder<Character,String> builder = new ImmutableMap.Builder<Character,String>(); while (it.hasNext()){ String[] row = it.next(); if (row.length == 2) builder.put((char)row[1].trim().charAt(0), row[0]); } escapeW3C = builder.build(); } catch (Exception e) { throw new RuntimeException(e); } } } } }
@Override public boolean configure(Properties properties) { CsvSchema.Builder builder = CsvSchema.builder(); Arrays.stream(columns).forEach(i -> builder.addColumn(i.toString())); Arrays.stream(features).forEach(i -> { CsvParser.Feature feature = CsvParser.Feature.valueOf(i.toString().toUpperCase(Locale.ENGLISH)); mapper.enable(feature); }); builder.setColumnSeparator(separator); builder.setNullValue(nullValue); schema = builder.build(); return super.configure(properties); }
public static LinkRelationTypes load() throws IOException { CsvMapper mapper = new CsvMapper(); mapper.enable(Feature.WRAP_AS_ARRAY); URL resource = LinkRelationTypes.class.getResource("/link-relations.csv"); MappingIterator<String[]> itr = mapper.readerFor(String[].class).readValues(resource); // Skip header itr.next(); Map<String, LinkRelationType> rels = new LinkedHashMap<>(); while (itr.hasNext()) { String[] row = itr.next(); String name = row[0]; String description = row[1]; String href = toHref(row[2]); rels.put(name, new LinkRelationType(name, description, href)); } return new LinkRelationTypes(rels); }
private static <T> List<T> importCsvRules(Class<T> type, Class<?> mixin, String rulesString) throws Exception { CsvSchema csvSchema = CsvSchema.emptySchema().withHeader().withColumnSeparator(';'); CsvMapper mapper = new CsvMapper(); mapper.addMixIn(type, mixin); mapper.enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, true); MappingIterator<T> readValues = mapper.readerFor(type).with(csvSchema).readValues(rulesString); return readValues.readAll(); }
@Test public final void testRunSorterSecondColumnThenFirst() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput3, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1, 0), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterFirstColumnThenSecond() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput3, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterFirstColumn() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput1, 1, 2, 4, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0), true); } verifyCSV(testOutput, 1, 2, 4, mapper, schema); }
@Test public final void testRunSorterSecondColumn() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput1, 1, 2, 4, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1), true); } verifyCSV(testOutput, 1, 2, 4, mapper, schema); }
@Test public final void testRunSorterTSV() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build(); verifyCSV(testInput4, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput4, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterTSVMultipleHeaderLines() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build(); verifyCSV(testInput5, 10, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput5, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 10, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 10, 2, 5, mapper, schema); }