CsvSchema bootstrap = CsvSchema.emptySchema().withHeader(); CsvMapper csvMapper = new CsvMapper(); MappingIterator<Map<?, ?>> mappingIterator = csvMapper.reader(Map.class).with(bootstrap).readValues(file);
public static List<Map<String, String>> read(File file) throws JsonProcessingException, IOException { List<Map<String, String>> response = new LinkedList<Map<String, String>>(); CsvMapper mapper = new CsvMapper(); CsvSchema schema = CsvSchema.emptySchema().withHeader(); MappingIterator<Map<String, String>> iterator = mapper.reader(Map.class) .with(schema) .readValues(file); while (iterator.hasNext()) { response.add(iterator.next()); } return response; }
"1/2/09 4:53,Product2,1500\n"; EventList actual = mapper.reader(EventList.class).with(ContextAttributes.getEmpty() .withSharedAttribute("project", "project") .withSharedAttribute("collection", "collection")
String input = "1,2\n3,4"; StringReader reader = new StringReader(input); CsvMapper m = new CsvMapper(); CsvSchema schema = m.schemaFor(FooBar.class).withoutHeader().withLineSeparator("\n").withColumnSeparator(','); try { MappingIterator<FooBar> r = m.reader(FooBar.class).with(schema).readValues(reader); while (r.hasNext()) { System.out.println(r.nextValue()); } } catch (JsonProcessingException e) { e.printStackTrace(); } catch (IOException e) { e.printStackTrace(); }
//get a file stream in utf format for this file (since they are often not in utf by Charset charset = new AutoDetectReader(new FileInputStream(file)).getCharset(); String f = FileUtils.readFileToString(file, charset); CsvMapper mapper = new CsvMapper(); CsvSchema schema = CsvSchema.emptySchema().withHeader(); MappingIterator<Map<String, String>> it = mapper.reader(Map.class).with(schema).readValues(f.getBytes());
CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); File csvFile = new File("input.csv"); // or from String, URL etc MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(csvFile);
CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); File csvFile = new File("input.csv"); // or from String, URL etc MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(csvFile);
public BibtexManagerImpl(){ if (escapeW3C == null){ synchronized(initLock){ if (escapeW3C == null){ CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); try { MappingIterator<String[]> it = mapper.reader(String[].class).readValues(getClass().getResourceAsStream("/org/orcid/core/manager/impl/escape_bibtex.txt")); ImmutableMap.Builder<Character,String> builder = new ImmutableMap.Builder<Character,String>(); while (it.hasNext()){ String[] row = it.next(); if (row.length == 2) builder.put((char)row[1].trim().charAt(0), row[0]); } escapeW3C = builder.build(); } catch (Exception e) { throw new RuntimeException(e); } } } } }
public BibtexManagerImpl(){ if (escapeW3C == null){ synchronized(initLock){ if (escapeW3C == null){ CsvMapper mapper = new CsvMapper(); mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); try { MappingIterator<String[]> it = mapper.reader(String[].class).readValues(getClass().getResourceAsStream("escape_bibtex.txt")); ImmutableMap.Builder<Character,String> builder = new ImmutableMap.Builder<Character,String>(); while (it.hasNext()){ String[] row = it.next(); if (row.length == 2) builder.put((char)row[1].trim().charAt(0), row[0]); } escapeW3C = builder.build(); } catch (Exception e) { throw new RuntimeException(e); } } } } }
/** * 反序列化Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param separator cloumn的分隔符 * @param path 文件路径 */ public static <V> List<V> fromCsvFile(String path, String separator, Class<V> c) { try { CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(new File(path)).readAll(); } catch (IOException e) { log.error("jackson from csv error, path: {}, type: {}", path, c, e); return null; } }
/** * Convenience method which is functionally equivalent to: *<pre> * reader(pojoType).withSchema(typedSchemaFor(pojoType)); *</pre> * that is, constructs a {@link ObjectReader} which both binds to * specified type and uses "strict" {@link CsvSchema} introspected from * specified type (one where typing is inferred). */ public ObjectReader readerWithTypedSchemaFor(Class<?> pojoType) { JavaType type = constructType(pojoType); /* sanity check: not useful for structured types, since * schema type will need to differ from data-bind type */ if (type.isArrayType() || type.isCollectionLikeType()) { throw new IllegalArgumentException("Type can NOT be a Collection or array type"); } return reader(type).with(typedSchemaFor(type)); }
/** * Create a new iterator from the given reader. * * @param csvReader A reader of CSV rows. */ public InnerRowIterator(final Reader csvReader) { // Construct our schema. CsvSchema schema = CsvSchema.emptySchema().withUseHeader(true); CsvMapper mapper = new CsvMapper(); TypeReference<LinkedHashMap<String, Object>> typeRef = new TypeReference<LinkedHashMap<String, Object>>() { }; try { ObjectReader reader = mapper.reader(schema).withType(typeRef); innerIterator = reader.readValues(csvReader); } catch (IOException ioe) { logger.error("CSV File does not exist."); } }
/** * 反序列化Recources目录下的Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param name 文件名 * @param separator cloumn的分隔符 */ public static <V> List<V> fromCsvRecource(String name, String separator, Class<V> c) { try (InputStream inputStream = getResourceStream(name); InputStreamReader reader = getResourceReader(inputStream)) { if (reader == null) { return null; } CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(reader).readAll(); } catch (IOException e) { log.error("jackson from csv recource error, name: {}, type: {}", name, c, e); return null; } }
/** * Creates a Jackson object reader based on a mapper. Has a special handling * for CSV media types. * * @return The Jackson object reader. */ protected ObjectReader createObjectReader() { ObjectReader result = null; if (MediaType.TEXT_CSV.isCompatible(getMediaType())) { CsvMapper csvMapper = (CsvMapper) getObjectMapper(); CsvSchema csvSchema = createCsvSchema(csvMapper); result = csvMapper.reader(getObjectClass()).with(csvSchema); } else { result = getObjectMapper().reader(getObjectClass()); } return result; }
private static OffsetSerDe csvOffsetSerDe() { CsvMapper csvMapper = new CsvMapper().configure(CsvGenerator.Feature.STRICT_CHECK_FOR_QUOTING, true); CsvSchema schema = csvMapper.schemaFor(OffsetInfo.class).withLineSeparator(""); return new OffsetSerDe( csvMapper.writer(schema), csvMapper.reader(schema).forType(OffsetInfo.class)); }
private MappingIterator<CsvMapperData> readValues(byte[] csvContent) throws TermCsvLoaderException { CsvMapper csvMapper = new CsvMapper(); CsvSchema csvSchema = csvMapper.schemaFor(CsvMapperData.class). withColumnSeparator(CSV_SEPARATOR). withSkipFirstDataRow(true).withHeader(). withoutColumns(); ObjectReader objectReader = csvMapper.reader(CsvMapperData.class).with(csvSchema); try { return objectReader.readValues(csvContent); } catch (IOException e) { throw new TermCsvLoaderException(e); } } }
private MappingIterator<CsvMapperData> readValues(byte[] csvContent) throws ReferenceCsvLoaderException { CsvMapper csvMapper = new CsvMapper(); CsvSchema csvSchema = csvMapper.schemaFor(CsvMapperData.class). withColumnSeparator(CSV_SEPARATOR). withSkipFirstDataRow(true).withHeader(). withoutColumns(); ObjectReader objectReader = csvMapper.reader(CsvMapperData.class).with(csvSchema); try { return objectReader.readValues(csvContent); } catch (IOException e) { throw new ReferenceCsvLoaderException(e); } } }
private MappingIterator<CsvMapperData> readValues(byte[] csvContent) throws TermCsvLoaderException { CsvMapper csvMapper = new CsvMapper(); CsvSchema csvSchema = csvMapper.schemaFor(CsvMapperData.class). withColumnSeparator(CSV_SEPARATOR). withSkipFirstDataRow(true).withHeader(). withoutColumns(); ObjectReader objectReader = csvMapper.reader(CsvMapperData.class).with(csvSchema); try { return objectReader.readValues(csvContent); } catch (IOException e) { throw new TermCsvLoaderException(e); } } }
private MappingIterator<CsvMapperData> readValues(byte[] csvContent) throws ReferenceCsvLoaderException { CsvMapper csvMapper = new CsvMapper(); CsvSchema csvSchema = csvMapper.schemaFor(CsvMapperData.class). withColumnSeparator(CSV_SEPARATOR). withSkipFirstDataRow(true).withHeader(). withoutColumns(); ObjectReader objectReader = csvMapper.reader(CsvMapperData.class).with(csvSchema); try { return objectReader.readValues(csvContent); } catch (IOException e) { throw new ReferenceCsvLoaderException(e); } } }
private MappingIterator<Map<String,String>> parseCsv(String csv) throws JsonProcessingException, IOException { return new CsvMapper().reader(Map.class) .with(CsvSchema.emptySchema().withHeader()) // use first row as header .readValues(csv); }