throw new RakamException("Invalid column separator", BAD_REQUEST); builder.setColumnSeparator(column_seperator.get(0).charAt(0)); .withSharedAttribute("collection", collection) .withSharedAttribute("apiKey", apiKey)) .with(builder.build()).readValue(buff);
.setColumnSeparator(csvFormat.getDelimiter()) .setLineSeparator(csvFormat.getRecordSeparator()) .setAllowComments("#" .equals(CharUtils.toString(csvFormat.getCommentMarker()))) .setUseHeader(false); csvSchemaBuilder = (csvFormat.getQuoteCharacter() == null) ? csvSchemaBuilder : csvSchemaBuilder.setQuoteChar(csvFormat.getQuoteCharacter()); csvSchemaBuilder = (csvFormat.getEscapeCharacter() == null) ? csvSchemaBuilder : csvSchemaBuilder.setEscapeChar(csvFormat.getEscapeCharacter()); csvSchemaBuilder = csvSchemaBuilder.setSkipFirstDataRow(true); CsvSchema csvSchema = csvSchemaBuilder.build();
CsvSchema.Builder builder = _schema.rebuild().clearColumns(); builder.addColumn(name, prev.getType()); } else { builder.addColumn(name); CsvSchema newSchema = builder.build(); int size = newSchema.size(); if (size < 2) { // 1 just because we may get 'empty' header name setSchema(builder.build());
CsvSchema.Builder builder = _schema.rebuild().clearColumns(); builder.addColumn(name, prev.getType()); } else { builder.addColumn(name); CsvSchema newSchema = builder.build(); int size = newSchema.size(); if (size < 2) { // 1 just because we may get 'empty' header name setSchema(builder.build());
CsvSchema.Builder builder = _schema.rebuild().clearColumns(); builder.addColumn(name, prev.getType()); } else { builder.addColumn(name); CsvSchema newSchema = builder.build(); int size = newSchema.size(); if (size < 2) { // 1 just because we may get 'empty' header name setSchema(builder.build());
public void testDatabindingThirdPartyPojoWithMixinAnnotations() throws JsonProcessingException, IOException { CsvSchema schema = CsvSchema.builder() .setColumnSeparator('\t') .addColumn("geoNameId") .addColumn("name") .addColumn("asciiName") .addColumn("alternateNames") .addColumn("latitude", CsvSchema.ColumnType.NUMBER) .addColumn("longitude", CsvSchema.ColumnType.NUMBER) .addColumn("featureClass") .addColumn("featureCode") .addColumn("countryCode") .addColumn("countryCode2") .addColumn("adminCode1") .addColumn("adminCode2") .addColumn("adminCode3") .addColumn("adminCode4") .addColumn("population") .addColumn("elevation", CsvSchema.ColumnType.NUMBER) .addColumn("dem", CsvSchema.ColumnType.NUMBER) .addColumn("timezoneCode") .addColumn("lastModified") .build(); CsvMapper mapper = new CsvMapper(); mapper.addMixInAnnotations(Toponym.class, ToponymMixIn1.class);
.setUseHeader(true) .addColumn("scenarioName") .addColumn("scenarioLoop", CsvSchema.ColumnType.NUMBER) .addColumn("stepName") .addColumn("stepLoop", CsvSchema.ColumnType.NUMBER) .addColumn("correlationId") .addColumn("requestTimeStamp") .addColumn("responseDelayMilliSec", CsvSchema.ColumnType.NUMBER) .addColumn("responseTimeStamp") .addColumn("result") .build();
@Test public final void testWriteFullCode() throws Exception { List<String> headers = Arrays.asList("TestHeader1", "TestHeader2"); List<List<String>> dataSource = Arrays.asList(); // Or alternatively, // List<List<String>> dataSource = Arrays.asList(Arrays.asList("TestValue1", "TestValue2")); java.io.Writer writer = new StringWriter(); CsvSchema.Builder builder = CsvSchema.builder(); for (String nextHeader : headers) { builder = builder.addColumn(nextHeader); } CsvSchema schema = builder.setUseHeader(true).build(); try (SequenceWriter csvWriter = new CsvMapper().writerWithDefaultPrettyPrinter().with(schema).forType( List.class).writeValues(writer);) { for (List<String> nextRow : dataSource) { csvWriter.write(nextRow); } // Check to see whether dataSource is empty // and if so write a single empty list to trigger header output if (dataSource.isEmpty()) { csvWriter.write(Arrays.asList()); } } System.out.println(writer.toString()); }
/** * Mutant factory method that will try to combine columns of this schema with those * from `toAppend`, starting with columns of this instance, and ignoring * duplicates (if any) from argument `toAppend`. * All settings aside from column sets are copied from `this` instance. *<p> * As with all `withXxx()` methods this method never modifies `this` but either * returns it unmodified (if no new columns found from `toAppend`), or constructs * a new instance and returns that. * * @since 2.9 */ public CsvSchema withColumnsFrom(CsvSchema toAppend) { int addCount = toAppend.size(); if (addCount == 0) { return this; } Builder b = rebuild(); for (int i = 0; i < addCount; ++i) { Column col = toAppend.column(i); if (column(col.getName()) == null) { b.addColumn(col); } } return b.build(); }
/** * Mutant factory method that will try to combine columns of this schema with those * from `toAppend`, starting with columns of this instance, and ignoring * duplicates (if any) from argument `toAppend`. * All settings aside from column sets are copied from `this` instance. *<p> * As with all `withXxx()` methods this method never modifies `this` but either * returns it unmodified (if no new columns found from `toAppend`), or constructs * a new instance and returns that. * * @since 2.9 */ public CsvSchema withColumnsFrom(CsvSchema toAppend) { int addCount = toAppend.size(); if (addCount == 0) { return this; } Builder b = rebuild(); for (int i = 0; i < addCount; ++i) { Column col = toAppend.column(i); if (column(col.getName()) == null) { b.addColumn(col); } } return b.build(); }
protected CsvSchema _schemaFor(JavaType pojoType, LRUMap<JavaType,CsvSchema> schemas, boolean typed) { synchronized (schemas) { CsvSchema s = schemas.get(pojoType); if (s != null) { return s; } } BeanDescription beanDesc = getSerializationConfig().introspect(pojoType); CsvSchema.Builder builder = CsvSchema.builder(); for (BeanPropertyDefinition prop : beanDesc.findProperties()) { // ignore setter-only properties: if (prop.couldSerialize()) { if (typed) { builder.addColumn(prop.getName(), _determineType(prop.getAccessor().getRawType())); } else { builder.addColumn(prop.getName()); } } } CsvSchema result = builder.build(); synchronized (schemas) { schemas.put(pojoType, result); } return result; }
@Override protected void startSerialize( RootNode rootNode, OutputStream outputStream ) throws Exception { csvGenerator = CSV_FACTORY.createGenerator( outputStream ); CsvSchema.Builder schemaBuilder = CsvSchema.builder() .setUseHeader( true ); // build schema for ( Node child : rootNode.getChildren() ) { if ( child.isCollection() ) { if ( !child.getChildren().isEmpty() ) { Node node = child.getChildren().get( 0 ); for ( Node property : node.getChildren() ) { if ( property.isSimple() ) { schemaBuilder.addColumn( property.getName() ); } } } } } csvGenerator.setSchema( schemaBuilder.build() ); }
/** * Accessor for creating a "default" CSV schema instance, with following * settings: *<ul> * <li>Does NOT use header line * </li> * <li>Uses double quotes ('"') for quoting of field values (if necessary) * </li> * <li>Uses comma (',') as the field separator * </li> * <li>Uses Unix linefeed ('\n') as row separator * </li> * <li>Does NOT use any escape characters * </li> * <li>Does NOT have any columns defined * </li> * </ul> */ public static CsvSchema emptySchema() { return builder().build(); }
/** * Builds the CSV header. * * @param responseData Data object containing all the result information * * @return The CSV schema with the header */ public CsvSchema buildCsvHeaders(ResponseData responseData) { CsvSchema.Builder builder = CsvSchema.builder(); Stream.concat( Stream.of("dateTime"), Stream.concat( responseData.getRequestedApiDimensionFields() .entrySet().stream().flatMap(responseData::generateDimensionColumnHeaders), responseData.getApiMetricColumns().stream().map(MetricColumn::getName) ) ).forEachOrdered(builder::addColumn); return builder.setUseHeader(true).build(); } }
private MappingIterator<TruckEvent> readTruckEventsFromCsv(InputStream csvStream) throws IOException { CsvSchema bootstrap = CsvSchema.builder() // driverId,truckId,eventTime,eventType,longitude,latitude,eventKey,correlationId,driverName,routeId,routeName,eventDate .addColumn("driverId", CsvSchema.ColumnType.NUMBER) .addColumn("truckId", CsvSchema.ColumnType.NUMBER) .addColumn("eventTime", CsvSchema.ColumnType.STRING) .addColumn("eventType", CsvSchema.ColumnType.STRING) .addColumn("longitude", CsvSchema.ColumnType.NUMBER) .addColumn("latitude", CsvSchema.ColumnType.NUMBER) .addColumn("eventKey", CsvSchema.ColumnType.STRING) .addColumn("correlationId", CsvSchema.ColumnType.NUMBER) .addColumn("driverName", CsvSchema.ColumnType.STRING) .addColumn("routeId", CsvSchema.ColumnType.NUMBER) .addColumn("routeName", CsvSchema.ColumnType.STRING) .addColumn("eventDate", CsvSchema.ColumnType.STRING) // .addColumn("miles", CsvSchema.ColumnType.NUMBER) .build().withHeader(); CsvMapper csvMapper = new CsvMapper(); return csvMapper.readerFor(TruckEvent.class).with(bootstrap).readValues(csvStream); }
@Test public final void testRunSorterFirstColumn() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput1, 1, 2, 4, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0), true); } verifyCSV(testOutput, 1, 2, 4, mapper, schema); }