public Path makeSegmentPartitionInfoPath(Interval bucketInterval) { return new Path( StringUtils.format( "%s/%s_%s/partitions.json", makeIntermediatePath(), ISODateTimeFormat.basicDateTime().print(bucketInterval.getStart()), ISODateTimeFormat.basicDateTime().print(bucketInterval.getEnd()) ) ); }
formatter = ISODateTimeFormat.basicDate(); } else if ("basicDateTime".equals(input) || "basic_date_time".equals(input)) { formatter = ISODateTimeFormat.basicDateTime(); } else if ("basicDateTimeNoMillis".equals(input) || "basic_date_time_no_millis".equals(input)) { formatter = ISODateTimeFormat.basicDateTimeNoMillis();
/** * Due to https://issues.apache.org/jira/browse/HDFS-13 ":" are not allowed in * path names. So we format paths differently for HDFS. */ @Override public String getStorageDir(DataSegment segment, boolean useUniquePath) { // This is only called by HdfsDataSegmentPusher.push(), which will always set useUniquePath to false since any // 'uniqueness' will be applied not to the directory but to the filename along with the shard number. This is done // to avoid performance issues due to excessive HDFS directories. Hence useUniquePath is ignored here and we // expect it to be false. Preconditions.checkArgument( !useUniquePath, "useUniquePath must be false for HdfsDataSegmentPusher.getStorageDir()" ); return JOINER.join( segment.getDataSource(), StringUtils.format( "%s_%s", segment.getInterval().getStart().toString(ISODateTimeFormat.basicDateTime()), segment.getInterval().getEnd().toString(ISODateTimeFormat.basicDateTime()) ), segment.getVersion().replace(':', '_') ); }
public DateTimeFormatChecker() { supportedFormatters.put("yyyyMMdd", ISODateTimeFormat.basicDate()); supportedFormatters.put("yyyyMMdd'T'HHmmss.SSSZ", ISODateTimeFormat.basicDateTime()); supportedFormatters.put("yyyyMMdd'T'HHmmssZ", ISODateTimeFormat.basicDateTimeNoMillis()); supportedFormatters.put("HHmmss.SSSZ", ISODateTimeFormat.basicTime()); supportedFormatters.put("HHmmssZ", ISODateTimeFormat.basicTimeNoMillis()); supportedFormatters.put("yyyy-MM-dd", ISODateTimeFormat.date()); supportedFormatters.put("yyyy-MM-dd HH:mm:ss", DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss")); supportedFormatters.put("yyyy-MM-dd HH:mm:ss.SSSSSS", DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSSSSS")); // Greenplum DB NOW() 함수 포맷 supportedFormatters.put("yyyy-MM-dd'T'HH:mm:ss.SSS", ISODateTimeFormat.dateHourMinuteSecondMillis()); supportedFormatters.put("yyyy-MM-dd'T'HH:mm:ss.SSSZZ", ISODateTimeFormat.dateTime()); supportedFormatters.put("yyyy-MM-dd'T'HH:mm:ssZZ", ISODateTimeFormat.dateTimeNoMillis()); supportedFormatters.put("HH:mm:ss.SSS", ISODateTimeFormat.hourMinuteSecondMillis()); supportedFormatters.put("HH:mm:ss", ISODateTimeFormat.hourMinuteSecond()); supportedFormatters.put("HH:mm", ISODateTimeFormat.hourMinute()); }
@Override public String getStorageDir(DataSegment dataSegment, boolean useUniquePath) { String seg = JOINER.join( dataSegment.getDataSource(), StringUtils.format( "%s_%s", // Use ISODateTimeFormat.basicDateTime() format, to avoid using colons in file path. dataSegment.getInterval().getStart().toString(ISODateTimeFormat.basicDateTime()), dataSegment.getInterval().getEnd().toString(ISODateTimeFormat.basicDateTime()) ), dataSegment.getVersion().replace(':', '_'), dataSegment.getShardSpec().getPartitionNum(), useUniquePath ? DataSegmentPusher.generateUniquePath() : null ); log.info("DataSegment: [%s]", seg); // Replace colons with underscores, since they are not supported through wasb:// prefix return seg; }
@Nullable @Override public String apply(@Nullable DateTime input) { return ISODateTimeFormat.basicDateTime().print(input); } });
@Nullable @Override public String apply(@Nullable DateTime input) { return ISODateTimeFormat.basicDateTime().print(input); } });
@Override public void write(JsonWriter out, Date date) throws IOException { if (date == null) { out.nullValue(); } else { String value; if (dateFormat != null) { value = dateFormat.format(date); } else { value = ISODateTimeFormat.basicDateTime().print(date.getTime()); } out.value(value); } }
@Override public void write(JsonWriter out, Date date) throws IOException { if (date == null) { out.nullValue(); } else { String value; if (dateFormat != null) { value = dateFormat.format(date); } else { value = ISODateTimeFormat.basicDateTime().print(date.getTime()); } out.value(value); } }
private void logTrace() { StringBuilder errMsg = new StringBuilder(); DateTimeFormatter fmt = ISODateTimeFormat.basicDateTime(); if (trace != null) { errMsg.append("Trace: \n"); for (TraceEntry traceEntry : trace) { String ts = fmt.print(new DateTime(traceEntry.timeMs, DateTimeZone.UTC)); errMsg.append(ts + " -> " + traceEntry.nextOrHasNext + ":" + traceEntry.label + "\n"); } } else { errMsg.append("Tracing not enabled."); } log.error(errMsg.toString()); }
private static EntityTag asETag(final Date time) { final String utcTime = ISODateTimeFormat.basicDateTime().print(new DateTime(time)); return new EntityTag(utcTime, true); } }
@NotNull @Override public SMOutputElement serialize( @NotNull SMOutputElement serializeTo, @NotNull DateTime object, @Nullable Object context ) throws IOException, XMLStreamException { serializeTo.addCharacters( ISODateTimeFormat.basicDateTime().print( object ) ); return serializeTo; }
@NotNull @Override public DateTime deserialize( @NotNull XMLStreamReader deserializeFrom, @Nullable Object context ) throws IOException, XMLStreamException { String text = getText( deserializeFrom ); try { return ISODateTimeFormat.basicDateTime().parseDateTime( text ); } catch ( IllegalArgumentException ignore ) { //Maybe it is a long return new DateTime( Long.parseLong( text ) ); } } }
private static EntityTag asETag(final Date time) { final String utcTime = ISODateTimeFormat.basicDateTime().print(new DateTime(time)); return new EntityTag(utcTime, true); }
public Path makeSegmentPartitionInfoPath(Interval bucketInterval) { return new Path( String.format( "%s/%s_%s/partitions.json", makeIntermediatePath(), ISODateTimeFormat.basicDateTime().print(bucketInterval.getStart()), ISODateTimeFormat.basicDateTime().print(bucketInterval.getEnd()) ) ); }
public Path makeSegmentPartitionInfoPath(Interval bucketInterval) { return new Path( StringUtils.format( "%s/%s_%s/partitions.json", makeIntermediatePath(), ISODateTimeFormat.basicDateTime().print(bucketInterval.getStart()), ISODateTimeFormat.basicDateTime().print(bucketInterval.getEnd()) ) ); }
public Path makeSegmentPartitionInfoPath(Interval bucketInterval) { return new Path( StringUtils.format( "%s/%s_%s/partitions.json", makeIntermediatePath(), ISODateTimeFormat.basicDateTime().print(bucketInterval.getStart()), ISODateTimeFormat.basicDateTime().print(bucketInterval.getEnd()) ) ); }
@Override public java.sql.Date read(JsonReader in) throws IOException { switch (in.peek()) { case NULL: in.nextNull(); return null; default: String date = in.nextString(); try { if (dateFormat != null) { return new java.sql.Date(dateFormat.parse(date).getTime()); } return new java.sql.Date(ISODateTimeFormat.basicDateTime().parseMillis(date)); } catch (ParseException e) { throw new JsonParseException(e); } } } }
@ProcessElement public void processElement(ProcessContext c) throws Exception { KV<Instant, Iterable<KV<K, V>>> kv = c.element(); // Create a writer on the sink and use it brutally to write all records to one file. Sink.Writer<KV<K, V>, ?> writer = sink.createWriteOperation().createWriter(c.getPipelineOptions()); writer.open(UUID.randomUUID().toString()); for (KV<K, V> v : kv.getValue()) writer.write(v); // Use the write result to move the file to the expected output name. Object writeResult = writer.close(); if (writer instanceof ConfigurableHDFSFileSink.HDFSWriter) { String attemptResultName = String.valueOf(writeResult); String timeslice = ISODateTimeFormat.basicDateTime().print(kv.getKey().getMillis()); String resultName = "output-" + timeslice + "-" + timeslice + "-00001-of-00001"; ((ConfigurableHDFSFileSink.HDFSWriter) writer).commitManually(attemptResultName, resultName); } } }
@ProcessElement public void processElement(ProcessContext c) throws Exception { KV<Instant, Iterable<KV<K, V>>> kv = c.element(); // Create a writer on the sink and use it brutally to write all records to one file. Sink.Writer<KV<K, V>, ?> writer = sink.createWriteOperation().createWriter(c.getPipelineOptions()); writer.open(UUID.randomUUID().toString()); for (KV<K, V> v : kv.getValue()) writer.write(v); // Use the write result to move the file to the expected output name. Object writeResult = writer.close(); if (writer instanceof ConfigurableHDFSFileSink.HDFSWriter) { String attemptResultName = String.valueOf(writeResult); String timeslice = ISODateTimeFormat.basicDateTime().print(kv.getKey().getMillis()); String resultName = "output-" + timeslice + "-" + timeslice + "-00001-of-00001"; ((ConfigurableHDFSFileSink.HDFSWriter) writer).commitManually(attemptResultName, resultName); } } }