.setUseHeader(false);
/** * Builds the CSV header. * * @param responseData Data object containing all the result information * * @return The CSV schema with the header */ public CsvSchema buildCsvHeaders(ResponseData responseData) { CsvSchema.Builder builder = CsvSchema.builder(); Stream.concat( Stream.of("dateTime"), Stream.concat( responseData.getRequestedApiDimensionFields() .entrySet().stream().flatMap(responseData::generateDimensionColumnHeaders), responseData.getApiMetricColumns().stream().map(MetricColumn::getName) ) ).forEachOrdered(builder::addColumn); return builder.setUseHeader(true).build(); } }
/** * Builds the CSV header. * * @param columns Columns to use for building the header * * @return CSV schema with the header */ private static CsvSchema buildCsvHeaders(List<String> columns) { CsvSchema.Builder builder = CsvSchema.builder(); columns.stream().forEachOrdered(builder::addColumn); return builder.setUseHeader(true).build(); } }
.setUseHeader(true) .addColumn("scenarioName") .addColumn("scenarioLoop", CsvSchema.ColumnType.NUMBER)
@Test public final void testWriteFullCode() throws Exception { List<String> headers = Arrays.asList("TestHeader1", "TestHeader2"); List<List<String>> dataSource = Arrays.asList(); // Or alternatively, // List<List<String>> dataSource = Arrays.asList(Arrays.asList("TestValue1", "TestValue2")); java.io.Writer writer = new StringWriter(); CsvSchema.Builder builder = CsvSchema.builder(); for (String nextHeader : headers) { builder = builder.addColumn(nextHeader); } CsvSchema schema = builder.setUseHeader(true).build(); try (SequenceWriter csvWriter = new CsvMapper().writerWithDefaultPrettyPrinter().with(schema).forType( List.class).writeValues(writer);) { for (List<String> nextRow : dataSource) { csvWriter.write(nextRow); } // Check to see whether dataSource is empty // and if so write a single empty list to trigger header output if (dataSource.isEmpty()) { csvWriter.write(Arrays.asList()); } } System.out.println(writer.toString()); }
@Override protected void startSerialize( RootNode rootNode, OutputStream outputStream ) throws Exception { csvGenerator = CSV_FACTORY.createGenerator( outputStream ); CsvSchema.Builder schemaBuilder = CsvSchema.builder() .setUseHeader( true ); // build schema for ( Node child : rootNode.getChildren() ) { if ( child.isCollection() ) { if ( !child.getChildren().isEmpty() ) { Node node = child.getChildren().get( 0 ); for ( Node property : node.getChildren() ) { if ( property.isSimple() ) { schemaBuilder.addColumn( property.getName() ); } } } } } csvGenerator.setSchema( schemaBuilder.build() ); }
@Test public final void testRunSorterTSVMultipleHeaderLines() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build(); verifyCSV(testInput5, 10, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput5, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 10, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 10, 2, 5, mapper, schema); }
@Test public final void testRunSorterSecondColumn() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput1, 1, 2, 4, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1), true); } verifyCSV(testOutput, 1, 2, 4, mapper, schema); }
@Test public final void testRunSorterTSV() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build(); verifyCSV(testInput4, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput4, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterSecondColumnThenFirst() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput3, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(1, 0), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterFirstColumnThenSecond() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput3, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput3, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterFirstColumn() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).build(); verifyCSV(testInput1, 1, 2, 4, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput1, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0), true); } verifyCSV(testOutput, 1, 2, 4, mapper, schema); }
/** * 反序列化Recources目录下的Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param name 文件名 * @param separator cloumn的分隔符 */ public static <V> List<V> fromCsvRecource(String name, String separator, Class<V> c) { try (InputStream inputStream = getResourceStream(name); InputStreamReader reader = getResourceReader(inputStream)) { if (reader == null) { return null; } CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(reader).readAll(); } catch (IOException e) { log.error("jackson from csv recource error, name: {}, type: {}", name, c, e); return null; } }
/** * 反序列化Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param separator cloumn的分隔符 * @param path 文件路径 */ public static <V> List<V> fromCsvFile(String path, String separator, Class<V> c) { try { CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(new File(path)).readAll(); } catch (IOException e) { log.error("jackson from csv error, path: {}, type: {}", path, c, e); return null; } }