@Override public TrueFxTicker deserialize(JsonParser parser, DeserializationContext context) throws IOException, JsonProcessingException { ArrayNode array = mapper.readerFor(TrueFxTicker.class).with(schema).readTree(parser); String pair = array.get(0).asText(); long timestamp = array.get(1).asLong(); BigDecimal bid = new BigDecimal(array.get(2).asText()); BigDecimal bidBP = new BigDecimal(array.get(3).asText()); BigDecimal ask = new BigDecimal(array.get(4).asText()); BigDecimal askBP = new BigDecimal(array.get(5).asText()); BigDecimal low = new BigDecimal(array.get(6).asText()); BigDecimal high = new BigDecimal(array.get(7).asText()); BigDecimal open = new BigDecimal(array.get(8).asText()); return new TrueFxTicker(pair, timestamp, bid, bidBP, ask, askBP, low, high, open); } }
ObjectReader objReader = mapper.readerFor(String[].class) .with(csvSchema) .withFeatures(features.toArray(new CsvParser.Feature[features.size()]));
/** * Initialize the tokenizer. * * @param filterCsvSchema Schema to initialize the tokenizer for * * @return an initialized ObjectReader */ protected static ObjectReader init(CsvSchema filterCsvSchema) { return new CsvMapper() .disable(CsvParser.Feature.WRAP_AS_ARRAY) .readerFor(String[].class) .with(filterCsvSchema); }
/** * Convenience method which is functionally equivalent to: *<pre> * reader(pojoType).withSchema(typedSchemaFor(pojoType)); *</pre> * that is, constructs a {@link ObjectReader} which both binds to * specified type and uses "strict" {@link CsvSchema} introspected from * specified type (one where typing is inferred). */ public ObjectReader readerWithTypedSchemaFor(Class<?> pojoType) { JavaType type = constructType(pojoType); // sanity check: not useful for structured types, since // schema type will need to differ from data-bind type if (type.isArrayType() || type.isCollectionLikeType()) { throw new IllegalArgumentException("Type can NOT be a Collection or array type"); } return readerFor(type).with(typedSchemaFor(type)); }
MappingIterator<Map> it = mapper.readerFor(Map.class).with(schema.build()).readValues(csvFile); while (it.hasNext()) { Map<String, Object> row = it.next();
it = mapper.readerFor(Map.class) .with(schema) .readValues(file);
/** * Convenience method which is functionally equivalent to: *<pre> * reader(pojoType).withSchema(typedSchemaFor(pojoType)); *</pre> * that is, constructs a {@link ObjectReader} which both binds to * specified type and uses "strict" {@link CsvSchema} introspected from * specified type (one where typing is inferred). */ public ObjectReader readerWithTypedSchemaFor(Class<?> pojoType) { JavaType type = constructType(pojoType); // sanity check: not useful for structured types, since // schema type will need to differ from data-bind type if (type.isArrayType() || type.isCollectionLikeType()) { throw new IllegalArgumentException("Type can NOT be a Collection or array type"); } return readerFor(type).with(typedSchemaFor(type)); }
public static LinkRelationTypes load() throws IOException { CsvMapper mapper = new CsvMapper(); mapper.enable(Feature.WRAP_AS_ARRAY); URL resource = LinkRelationTypes.class.getResource("/link-relations.csv"); MappingIterator<String[]> itr = mapper.readerFor(String[].class).readValues(resource); // Skip header itr.next(); Map<String, LinkRelationType> rels = new LinkedHashMap<>(); while (itr.hasNext()) { String[] row = itr.next(); String name = row[0]; String description = row[1]; String href = toHref(row[2]); rels.put(name, new LinkRelationType(name, description, href)); } return new LinkRelationTypes(rels); }
/** * Convenience method which is functionally equivalent to: *<pre> * reader(pojoType).withSchema(schemaFor(pojoType)); *</pre> * that is, constructs a {@link ObjectReader} which both binds to * specified type and uses "loose" {@link CsvSchema} introspected from * specified type (one without strict inferred typing). *<p> * @param pojoType Type used both for data-binding (result type) and for * schema introspection. NOTE: must NOT be an array or Collection type, since * these only make sense for data-binding (like arrays of objects to bind), * but not for schema construction (no CSV types can be mapped to arrays * or Collections) */ public ObjectReader readerWithSchemaFor(Class<?> pojoType) { JavaType type = constructType(pojoType); /* sanity check: not useful for structured types, since * schema type will need to differ from data-bind type */ if (type.isArrayType() || type.isCollectionLikeType()) { throw new IllegalArgumentException("Type can NOT be a Collection or array type"); } return readerFor(type).with(schemaFor(type)); }
try (FileReader fr = new FileReader(file); BufferedReader reader = new BufferedReader(fr, DEFAULT_BUFFER_SIZE)) { MappingIterator<Map<String, String>> it = CSV_MAPPER.readerFor(Map.class) .with(schema) .readValues(reader);
/** * Convenience method which is functionally equivalent to: *<pre> * reader(pojoType).withSchema(schemaFor(pojoType)); *</pre> * that is, constructs a {@link ObjectReader} which both binds to * specified type and uses "loose" {@link CsvSchema} introspected from * specified type (one without strict inferred typing). *<p> * @param pojoType Type used both for data-binding (result type) and for * schema introspection. NOTE: must NOT be an array or Collection type, since * these only make sense for data-binding (like arrays of objects to bind), * but not for schema construction (no CSV types can be mapped to arrays * or Collections) */ public ObjectReader readerWithSchemaFor(Class<?> pojoType) { JavaType type = constructType(pojoType); /* sanity check: not useful for structured types, since * schema type will need to differ from data-bind type */ if (type.isArrayType() || type.isCollectionLikeType()) { throw new IllegalArgumentException("Type can NOT be a Collection or array type"); } return readerFor(type).with(schemaFor(type)); }
protected boolean read(Reader reader, ParserContext<CsvRoute> context) throws IOException { List<CsvPosition> positions = new ArrayList<>(); CsvSchema schema = CsvSchema.emptySchema().withHeader().withColumnSeparator(getColumnSeparator()); ObjectReader objectReader = new CsvMapper().readerFor(LinkedHashMap.class).with(schema); try { MappingIterator<LinkedHashMap<String, String>> iterator = objectReader.readValues(reader); while (iterator.hasNext()) { LinkedHashMap<String, String> rowAsMap = iterator.next(); if (containsGarbage(rowAsMap)) { log.warning(format("Found garbage in '%s'", rowAsMap)); return false; } CsvPosition position = new CsvPosition(rowAsMap); // skip positions without any reasonable data to make format less greedy if(position.getLongitude() == null && position.getLatitude() == null && position.getDescription() == null) continue; positions.add(position); } } finally { reader.close(); } if (positions.size() > 0) { context.appendRoute(new CsvRoute(this, null, positions)); return true; } else return false; }
private static <T> List<T> importCsvRules(Class<T> type, Class<?> mixin, String rulesString) throws Exception { CsvSchema csvSchema = CsvSchema.emptySchema().withHeader().withColumnSeparator(';'); CsvMapper mapper = new CsvMapper(); mapper.addMixIn(type, mixin); mapper.enable(DeserializationFeature.ACCEPT_EMPTY_STRING_AS_NULL_OBJECT); mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, true); MappingIterator<T> readValues = mapper.readerFor(type).with(csvSchema).readValues(rulesString); return readValues.readAll(); }
private MappingIterator<TruckEvent> readTruckEventsFromCsv(InputStream csvStream) throws IOException { CsvSchema bootstrap = CsvSchema.builder() // driverId,truckId,eventTime,eventType,longitude,latitude,eventKey,correlationId,driverName,routeId,routeName,eventDate .addColumn("driverId", CsvSchema.ColumnType.NUMBER) .addColumn("truckId", CsvSchema.ColumnType.NUMBER) .addColumn("eventTime", CsvSchema.ColumnType.STRING) .addColumn("eventType", CsvSchema.ColumnType.STRING) .addColumn("longitude", CsvSchema.ColumnType.NUMBER) .addColumn("latitude", CsvSchema.ColumnType.NUMBER) .addColumn("eventKey", CsvSchema.ColumnType.STRING) .addColumn("correlationId", CsvSchema.ColumnType.NUMBER) .addColumn("driverName", CsvSchema.ColumnType.STRING) .addColumn("routeId", CsvSchema.ColumnType.NUMBER) .addColumn("routeName", CsvSchema.ColumnType.STRING) .addColumn("eventDate", CsvSchema.ColumnType.STRING) // .addColumn("miles", CsvSchema.ColumnType.NUMBER) .build().withHeader(); CsvMapper csvMapper = new CsvMapper(); return csvMapper.readerFor(TruckEvent.class).with(bootstrap).readValues(csvStream); }
ObjectReader reader = CSV_MAPPER.readerFor( CsvEventDataValue.class ) .with( CSV_SCHEMA.withSkipFirstDataRow( skipFirst ) );