/** * Fence any previous writers, and obtain a unique epoch number * for write-access to the journal nodes. * * @return the new, unique epoch number */ Map<AsyncLogger, NewEpochResponseProto> createNewUniqueEpoch() throws IOException { Preconditions.checkState(!loggers.isEpochEstablished(), "epoch already created"); Map<AsyncLogger, GetJournalStateResponseProto> lastPromises = loggers.waitForWriteQuorum(loggers.getJournalState(), getJournalStateTimeoutMs, "getJournalState()"); long maxPromised = Long.MIN_VALUE; for (GetJournalStateResponseProto resp : lastPromises.values()) { maxPromised = Math.max(maxPromised, resp.getLastPromisedEpoch()); } assert maxPromised >= 0; long myEpoch = maxPromised + 1; Map<AsyncLogger, NewEpochResponseProto> resps = loggers.waitForWriteQuorum(loggers.newEpoch(nsInfo, myEpoch), newEpochTimeoutMs, "newEpoch(" + myEpoch + ")"); loggers.setEpoch(myEpoch); return resps; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLastPromisedEpoch()) { hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastPromisedEpoch()); } if (hasHttpPort()) { hash = (37 * hash) + HTTPPORT_FIELD_NUMBER; hash = (53 * hash) + getHttpPort(); } if (hasFromURL()) { hash = (37 * hash) + FROMURL_FIELD_NUMBER; hash = (53 * hash) + getFromURL().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj; boolean result = true; result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch()); if (hasLastPromisedEpoch()) { result = result && (getLastPromisedEpoch() == other.getLastPromisedEpoch()); } result = result && (hasHttpPort() == other.hasHttpPort()); if (hasHttpPort()) { result = result && (getHttpPort() == other.getHttpPort()); } result = result && (hasFromURL() == other.hasFromURL()); if (hasFromURL()) { result = result && getFromURL() .equals(other.getFromURL()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj; boolean result = true; result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch()); if (hasLastPromisedEpoch()) { result = result && (getLastPromisedEpoch() == other.getLastPromisedEpoch()); } result = result && (hasHttpPort() == other.hasHttpPort()); if (hasHttpPort()) { result = result && (getHttpPort() == other.getHttpPort()); } result = result && (hasFromURL() == other.hasFromURL()); if (hasFromURL()) { result = result && getFromURL() .equals(other.getFromURL()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) { return super.equals(obj); } org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj; boolean result = true; result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch()); if (hasLastPromisedEpoch()) { result = result && (getLastPromisedEpoch() == other.getLastPromisedEpoch()); } result = result && (hasHttpPort() == other.hasHttpPort()); if (hasHttpPort()) { result = result && (getHttpPort() == other.getHttpPort()); } result = result && (hasFromURL() == other.hasFromURL()); if (hasFromURL()) { result = result && getFromURL() .equals(other.getFromURL()); } result = result && getUnknownFields().equals(other.getUnknownFields()); return result; }
/** * Fence any previous writers, and obtain a unique epoch number * for write-access to the journal nodes. * * @return the new, unique epoch number */ Map<AsyncLogger, NewEpochResponseProto> createNewUniqueEpoch() throws IOException { Preconditions.checkState(!loggers.isEpochEstablished(), "epoch already created"); Map<AsyncLogger, GetJournalStateResponseProto> lastPromises = loggers.waitForWriteQuorum(loggers.getJournalState(), getJournalStateTimeoutMs, "getJournalState()"); long maxPromised = Long.MIN_VALUE; for (GetJournalStateResponseProto resp : lastPromises.values()) { maxPromised = Math.max(maxPromised, resp.getLastPromisedEpoch()); } assert maxPromised >= 0; long myEpoch = maxPromised + 1; Map<AsyncLogger, NewEpochResponseProto> resps = loggers.waitForWriteQuorum(loggers.newEpoch(nsInfo, myEpoch), newEpochTimeoutMs, "newEpoch(" + myEpoch + ")"); loggers.setEpoch(myEpoch); return resps; }
/** * Fence any previous writers, and obtain a unique epoch number * for write-access to the journal nodes. * * @return the new, unique epoch number */ Map<AsyncLogger, NewEpochResponseProto> createNewUniqueEpoch() throws IOException { Preconditions.checkState(!loggers.isEpochEstablished(), "epoch already created"); Map<AsyncLogger, GetJournalStateResponseProto> lastPromises = loggers.waitForWriteQuorum(loggers.getJournalState(), getJournalStateTimeoutMs, "getJournalState()"); long maxPromised = Long.MIN_VALUE; for (GetJournalStateResponseProto resp : lastPromises.values()) { maxPromised = Math.max(maxPromised, resp.getLastPromisedEpoch()); } assert maxPromised >= 0; long myEpoch = maxPromised + 1; Map<AsyncLogger, NewEpochResponseProto> resps = loggers.waitForWriteQuorum(loggers.newEpoch(nsInfo, myEpoch), newEpochTimeoutMs, "newEpoch(" + myEpoch + ")"); loggers.setEpoch(myEpoch); return resps; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLastPromisedEpoch()) { hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastPromisedEpoch()); } if (hasHttpPort()) { hash = (37 * hash) + HTTPPORT_FIELD_NUMBER; hash = (53 * hash) + getHttpPort(); } if (hasFromURL()) { hash = (37 * hash) + FROMURL_FIELD_NUMBER; hash = (53 * hash) + getFromURL().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasLastPromisedEpoch()) { hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER; hash = (53 * hash) + hashLong(getLastPromisedEpoch()); } if (hasHttpPort()) { hash = (37 * hash) + HTTPPORT_FIELD_NUMBER; hash = (53 * hash) + getHttpPort(); } if (hasFromURL()) { hash = (37 * hash) + FROMURL_FIELD_NUMBER; hash = (53 * hash) + getFromURL().hashCode(); } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; }
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this; if (other.hasLastPromisedEpoch()) { setLastPromisedEpoch(other.getLastPromisedEpoch()); } if (other.hasHttpPort()) { setHttpPort(other.getHttpPort()); } if (other.hasFromURL()) { bitField0_ |= 0x00000004; fromURL_ = other.fromURL_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this; if (other.hasLastPromisedEpoch()) { setLastPromisedEpoch(other.getLastPromisedEpoch()); } if (other.hasHttpPort()) { setHttpPort(other.getHttpPort()); } if (other.hasFromURL()) { bitField0_ |= 0x00000004; fromURL_ = other.fromURL_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) { if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this; if (other.hasLastPromisedEpoch()) { setLastPromisedEpoch(other.getLastPromisedEpoch()); } if (other.hasHttpPort()) { setHttpPort(other.getHttpPort()); } if (other.hasFromURL()) { bitField0_ |= 0x00000004; fromURL_ = other.fromURL_; onChanged(); } this.mergeUnknownFields(other.getUnknownFields()); return this; }