/** * #func 计算相对年<br> * #desc amount可以为负数 * * @author hedan * @version 4.0 */ public static Date addYears(Date date, int amount) { return org.apache.commons.lang.time.DateUtils.addYears(date, amount); }
public Date diff(Integer i) { return DateUtils.addYears(AbstractDateParamFilter.this.clock.current(), i.intValue()); }
public static SequenceNumber bytes2SequenceN(byte[] bytes) { long nodeIds = ByteBuffer.wrap(Arrays.copyOfRange(bytes, 0, 4)).getInt() & 0xFFFFFFFFL; //sgip协议里时间不带年份信息,这里判断下年份信息 String year = DateFormatUtils.format(CachedMillisecondClock.INS.now(), "yyyy"); String t = String.format("%1$s%2$010d",year, ByteBuffer.wrap(Arrays.copyOfRange(bytes, 4, 8)).getInt() & 0xFFFFFFFFL); Date d ; try { d = DateUtils.parseDate(t, datePattern); //如果正好是年末,这个时间有可能差一年,则必须取上一年 //这里判断取200天,防止因不同主机时间不同步造成误差 if(d.getTime() - CachedMillisecondClock.INS.now() > 86400000L * 200){ d = DateUtils.addYears(d, -1); } } catch (ParseException e) { d = new Date(); e.printStackTrace(); } long sequenceId = ByteBuffer.wrap(Arrays.copyOfRange(bytes, 8, 12)).getInt() & 0xFFFFFFFFL; SequenceNumber sn = new SequenceNumber(d.getTime(),nodeIds,sequenceId); return sn; }
@Override public void truncateLog() { // Get date one year ago Date oneYearAgo = DateUtils.addYears(new Date(), -1); int deletedEntries = 0; log.debug("Deleting admin audit log entries older than {}", oneYearAgo); AdminAuditMessageEntity[] auditMessageEntities; do { // deleting log entries in chunks prevents possible OOM exception in case the log entries contains a huge amount of data // like in https://bitbucket.org/atlassianlabs/automation/issue/23/oom-due-to-verbose-error-logging-to-the auditMessageEntities = ao.find(AdminAuditMessageEntity.class, Query.select().where("DATE < ?", oneYearAgo).limit(MAX_RESULTS)); for (AdminAuditMessageEntity oldEntity : auditMessageEntities) { ao.delete(oldEntity); deletedEntries++; } log.debug("Deleted {} audit log entries", auditMessageEntities.length); } while (auditMessageEntities.length == MAX_RESULTS); log.debug("Deleted audit log entries {}", deletedEntries); }
@Test public void testFloorDate() throws ParseException { Date date = ABSDATE_PARSER.get().parse("2015-01-01-00:00:00,000"); Date curDate = date; for (int i = 0; i < 284; i++) { assertEquals(getFloorDate(curDate, YEARLY), date); curDate = addMilliseconds(curDate, 111111111); } assertEquals(getFloorDate(curDate, YEARLY), DateUtils.addYears(date, 1)); assertEquals(getFloorDate(date, WEEKLY), ABSDATE_PARSER.get().parse("2014-12-28-00:00:00,000")); }
/** * * @param msgIds */ public SequenceNumber(MsgId msgIds) { String strmsgid = msgIds.toString(); setNodeIds(msgIds.getGateId()); //sgip协议里时间不带年份信息,这里判断下年份信息 String year = DateFormatUtils.format(CachedMillisecondClock.INS.now(), "yyyy"); String t = String.format("%1$s%2$s",year, strmsgid.substring(0, 10)); Date d; try { d = DateUtils.parseDate(t, datePattern); //如果正好是年末,这个时间有可能差一年,则必须取上一年 //这里判断取200天,防止因不同主机时间不同步造成误差 if(d.getTime() - CachedMillisecondClock.INS.now() > 86400000L * 200){ d = DateUtils.addYears(d, -1); } } catch (ParseException e) { e.printStackTrace(); d = new Date(); } setTimestamp(d.getTime()); setSequenceId(msgIds.getSequenceId()); } /**
Date nextYear = DateUtils.addYears(startDate, 1); Mutation m = new Mutation(split); if (addShardMarkers) {
/** * Generates a sub query which provides a view of the data where each row is * ranked by the start date, then end date of the data value period, latest first. * The data is partitioned by data element, org unit, category option combo and * attribute option combo. A column {@code pe_rank} defines the rank. Only data * for the last 10 years relative to the period end date is included. */ private String getLastValueSubquerySql( DataQueryParams params ) { Date latest = params.getLatestEndDate(); Date earliest = addYears( latest, LAST_VALUE_YEARS_OFFSET ); List<String> columns = getLastValueSubqueryQuotedColumns( params ); String fromSourceClause = getFromSourceClause( params ) + " as " + ANALYTICS_TBL_ALIAS; String sql = "(select "; for ( String col : columns ) { sql += col + ","; } sql += "row_number() over (" + "partition by dx, ou, co, ao " + "order by peenddate desc, pestartdate desc) as pe_rank " + "from " + fromSourceClause + " " + "where pestartdate >= '" + getMediumDateString( earliest ) + "' " + "and pestartdate <= '" + getMediumDateString( latest ) + "' " + "and (value is not null or textvalue is not null))"; return sql; }
@Test public void testReleaseEntityComparator(){ ReleaseComparator comparator = new ReleaseComparator(); //release today ReleaseEntity currentRelease = new ReleaseEntity(); currentRelease.setInstallationInProductionAt(new Date()); //release in a year ReleaseEntity laterRelease = new ReleaseEntity(); laterRelease.setInstallationInProductionAt(DateUtils.addYears(new Date(), 1)); Assert.assertEquals(-1, comparator.compare(currentRelease, laterRelease)); }
/** * Generates a sub query which provides a view of the data where each row is * ranked by the execution date, latest first. The events are partitioned by * org unit and attribute option combo. A column {@code pe_rank} defines the rank. * Only data for the last 10 years relative to the period end date is included. */ private String getLastValueSubquerySql( EventQueryParams params ) { Assert.isTrue( params.hasValueDimension(), "Last value aggregation type query must have value dimension" ); Date latest = params.getLatestEndDate(); Date earliest = addYears( latest, LAST_VALUE_YEARS_OFFSET ); String valueItem = quote( params.getValue().getDimensionItem() ); List<String> columns = getLastValueSubqueryQuotedColumns( params ); String alias = getPeriodAlias( params ); String timeCol = quote( alias, params.getTimeFieldAsFieldFallback() ); String sql = "(select "; for ( String col : columns ) { sql += col + ","; } sql += "row_number() over (" + "partition by ou, ao " + "order by " + timeCol + " desc) as pe_rank " + "from " + params.getTableName() + " " + "where " + timeCol + " >= '" + getMediumDateString( earliest ) + "' " + "and " + timeCol + " <= '" + getMediumDateString( latest ) + "' " + "and " + valueItem + " is not null)"; return sql; }
firstRelease.setInstallationInProductionAt(DateUtils.addYears(new Date(), -1)); firstReleaseResource.setRelease(firstRelease);
@Test public void testMigrationNonTriggeredInterruptingTimerEvent() { // given Date futureDueDate = DateUtils.addYears(ClockUtil.getCurrentTime(), 1); BpmnModelInstance model = createModel(true, sdf.format(futureDueDate)); ProcessDefinition sourceProcessDefinition = testHelper.deployAndGetDefinition(model); ProcessDefinition targetProcessDefinition = testHelper.deployAndGetDefinition(model); ProcessInstance processInstance = runtimeService.startProcessInstanceById(sourceProcessDefinition.getId()); MigrationPlan migrationPlan = runtimeService.createMigrationPlan(sourceProcessDefinition.getId(), targetProcessDefinition.getId()) .mapEqualActivities() .build(); // when testHelper.migrateProcessInstance(migrationPlan, processInstance); // then List<Job> list = managementService.createJobQuery().list(); assertEquals(1, list.size()); assertEquals(0, taskService.createTaskQuery().taskDefinitionKey("afterTimer").count()); assertEquals(1, taskService.createTaskQuery().taskDefinitionKey("userTask").count()); }
@Test public void testMigrationTwoToOneNonInterruptingTimerEvents() { Date futureDueDate = DateUtils.addYears(ClockUtil.getCurrentTime(), 1); BpmnModelInstance sourceModel = Bpmn.createExecutableProcess() .startEvent("startEvent")
@Test public void testMigrationTwoNonInterruptingTimerEvents() { Date futureDueDate = DateUtils.addYears(ClockUtil.getCurrentTime(), 1); BpmnModelInstance model = Bpmn.createExecutableProcess() .startEvent("startEvent")