.setColumnSeparator(csvFormat.getDelimiter()) .setLineSeparator(csvFormat.getRecordSeparator())
throw new RakamException("Invalid column separator", BAD_REQUEST); builder.setColumnSeparator(column_seperator.charAt(0));
throw new RakamException("Invalid column separator", BAD_REQUEST); builder.setColumnSeparator(column_seperator.get(0).charAt(0));
public void testDatabindingThirdPartyPojoWithMixinAnnotations() throws JsonProcessingException, IOException { CsvSchema schema = CsvSchema.builder() .setColumnSeparator('\t') .addColumn("geoNameId") .addColumn("name")
@Test public final void testRunSorterTSV() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build(); verifyCSV(testInput4, 1, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput4, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 1, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 1, 2, 5, mapper, schema); }
@Test public final void testRunSorterTSVMultipleHeaderLines() throws Exception { CsvFactory csvFactory = new CsvFactory(); csvFactory.enable(CsvParser.Feature.TRIM_SPACES); // csvFactory.enable(CsvParser.Feature.WRAP_AS_ARRAY); csvFactory.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); CsvMapper mapper = new CsvMapper(csvFactory); mapper.enable(CsvParser.Feature.TRIM_SPACES); // mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY); mapper.configure(JsonParser.Feature.ALLOW_YAML_COMMENTS, true); // mapper.configure(DeserializationFeature.ACCEPT_SINGLE_VALUE_AS_ARRAY, // true); CsvSchema schema = CsvSchema.builder().setUseHeader(false).setColumnSeparator('\t').build(); verifyCSV(testInput5, 10, 2, 5, mapper, schema); try (Reader inputReader = Files.newBufferedReader(testInput5, StandardCharsets.UTF_8)) { CSVSorter.runSorter(inputReader, testOutput, 10, schema, CSVSorter.getComparator(0, 1), true); } verifyCSV(testOutput, 10, 2, 5, mapper, schema); }
/** * 反序列化Recources目录下的Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param name 文件名 * @param separator cloumn的分隔符 */ public static <V> List<V> fromCsvRecource(String name, String separator, Class<V> c) { try (InputStream inputStream = getResourceStream(name); InputStreamReader reader = getResourceReader(inputStream)) { if (reader == null) { return null; } CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(reader).readAll(); } catch (IOException e) { log.error("jackson from csv recource error, name: {}, type: {}", name, c, e); return null; } }
/** * 反序列化Csv文件(受限于CSV的格式,Jackson不支持深层次结构的CSV反序列化,不支持嵌套类) * @param separator cloumn的分隔符 * @param path 文件路径 */ public static <V> List<V> fromCsvFile(String path, String separator, Class<V> c) { try { CsvSchema schema = CsvSchema.builder().setColumnSeparator(separator.charAt(0)).setUseHeader(true).build(); return (List<V>) csvMapper.reader(schema).forType(c).readValues(new File(path)).readAll(); } catch (IOException e) { log.error("jackson from csv error, path: {}, type: {}", path, c, e); return null; } }
@Override public void newTable(final String tableName, final List<ColumnInfo> columnsForTable) { currentTableName = tableName; final CsvSchema.Builder builder = CsvSchema.builder(); // Remove quoting of character which applies (somewhat arbitrarily, Tatu???) for string whose length is greater than MAX_QUOTE_CHECK = 24 -- See CVSWriter#_mayNeedQuotes builder.disableQuoteChar(); builder.setColumnSeparator('|'); for (final ColumnInfo columnInfo : columnsForTable) { builder.addColumn(columnInfo.getColumnName(), getColumnTypeFromSqlType(columnInfo.getDataType())); } currentCSVSchema = builder.build(); writer = mapper.writer(currentCSVSchema); shouldWriteHeader = true; }
@Override public boolean configure(Properties properties) { CsvSchema.Builder builder = CsvSchema.builder(); Arrays.stream(columns).forEach(i -> builder.addColumn(i.toString())); Arrays.stream(features).forEach(i -> { CsvParser.Feature feature = CsvParser.Feature.valueOf(i.toString().toUpperCase(Locale.ENGLISH)); mapper.enable(feature); }); builder.setColumnSeparator(separator); builder.setNullValue(nullValue); schema = builder.build(); return super.configure(properties); }
schema.addColumn("timezone", CsvSchema.ColumnType.STRING); schema.addColumn("modification date", CsvSchema.ColumnType.STRING); schema.setColumnSeparator('\t'); schema.setEscapeChar('"');
@Override public void newTable(final String tableName, final List<ColumnInfo> columnsForTable) { currentTableName = tableName; final CsvSchema.Builder builder = CsvSchema.builder(); // Remove quoting of character which applies (somewhat arbitrarily, Tatu???) for string whose length is greater than MAX_QUOTE_CHECK = 24 -- See CVSWriter#_mayNeedQuotes builder.disableQuoteChar(); builder.setColumnSeparator('|'); for (final ColumnInfo columnInfo : columnsForTable) { builder.addColumn(columnInfo.getColumnName(), getColumnTypeFromSqlType(columnInfo.getDataType())); } currentCSVSchema = builder.build(); writer = mapper.writer(currentCSVSchema); shouldWriteHeader = true; }