QuotaProtos.TimedQuota timedQuota = proto.getTimedQuota(); builder.append(", LIMIT => "); if (timedQuota.hasSoftLimit()) { switch (getThrottleType()) { case REQUEST_NUMBER: case WRITE_NUMBER: case READ_NUMBER: builder.append(String.format("%dreq", timedQuota.getSoftLimit())); break; case REQUEST_SIZE: case WRITE_SIZE: case READ_SIZE: builder.append(sizeToString(timedQuota.getSoftLimit())); break; } else if (timedQuota.hasShare()) { builder.append(String.format("%.2f%%", timedQuota.getShare())); builder.append(timeToString(ProtobufUtil.toTimeUnit(timedQuota.getTimeUnit()))); if (timedQuota.hasScope()) { builder.append(", SCOPE => "); builder.append(timedQuota.getScope().toString());
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = timedQuota_.toBuilder();
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = reqNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = reqSize_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = writeNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000008) == 0x00000008)) { subBuilder = writeSize_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000010) == 0x00000010)) { subBuilder = readNum_.toBuilder(); org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000020) == 0x00000020)) { subBuilder = readSize_.toBuilder();
org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.Builder subBuilder = null; if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = timedQuota_.toBuilder();
assertEquals(t.getSoftLimit(), limit); assertEquals(t.getTimeUnit(), ProtobufUtil.toProtoTimeUnit(tu));
result = result && (hasTimeUnit() == other.hasTimeUnit()); if (hasTimeUnit()) { result = result && timeUnit_ == other.timeUnit_; result = result && (hasSoftLimit() == other.hasSoftLimit()); if (hasSoftLimit()) { result = result && (getSoftLimit() == other.getSoftLimit()); result = result && (hasShare() == other.hasShare()); if (hasShare()) { result = result && ( java.lang.Float.floatToIntBits(getShare()) == java.lang.Float.floatToIntBits( other.getShare())); result = result && (hasScope() == other.hasScope()); if (hasScope()) { result = result && scope_ == other.scope_;
QuotaProtos.TimedQuota timedQuota = proto.getTimedQuota(); builder.append(", LIMIT => "); if (timedQuota.hasSoftLimit()) { switch (getThrottleType()) { case REQUEST_NUMBER: case WRITE_NUMBER: case READ_NUMBER: builder.append(String.format("%dreq", timedQuota.getSoftLimit())); break; case REQUEST_SIZE: case WRITE_SIZE: case READ_SIZE: builder.append(sizeToString(timedQuota.getSoftLimit())); break; } else if (timedQuota.hasShare()) { builder.append(String.format("%.2f%%", timedQuota.getShare())); builder.append(timeToString(ProtobufUtil.toTimeUnit(timedQuota.getTimeUnit()))); if (timedQuota.hasScope()) { builder.append(", SCOPE => "); builder.append(timedQuota.getScope().toString());
GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl(null, null, ns, quota); QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() .setSoftLimit(500).build(); assertTrue(throttle.hasReqNum()); QuotaProtos.TimedQuota reqNumQuota = throttle.getReqNum(); assertEquals(REQUEST_THROTTLE.getSoftLimit(), reqNumQuota.getSoftLimit()); assertEquals(writeQuota.getSoftLimit(), writeNumQuota.getSoftLimit());
@Override protected ThrottleSettings merge(QuotaSettings other) throws IOException { if (other instanceof ThrottleSettings) { ThrottleSettings otherThrottle = (ThrottleSettings) other; // Make sure this and the other target the same "subject" validateQuotaTarget(other); QuotaProtos.ThrottleRequest.Builder builder = proto.toBuilder(); if (!otherThrottle.proto.hasType()) { return null; } QuotaProtos.ThrottleRequest otherProto = otherThrottle.proto; if (otherProto.hasTimedQuota()) { if (otherProto.hasTimedQuota()) { validateTimedQuota(otherProto.getTimedQuota()); } if (!proto.getType().equals(otherProto.getType())) { throw new IllegalArgumentException( "Cannot merge a ThrottleRequest for " + proto.getType() + " with " + otherProto.getType()); } QuotaProtos.TimedQuota.Builder timedQuotaBuilder = proto.getTimedQuota().toBuilder(); timedQuotaBuilder.mergeFrom(otherProto.getTimedQuota()); QuotaProtos.ThrottleRequest mergedReq = builder.setTimedQuota( timedQuotaBuilder.build()).build(); return new ThrottleSettings(getUserName(), getTableName(), getNamespace(), mergedReq); } } return this; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasTimeUnit()) { hash = (37 * hash) + TIME_UNIT_FIELD_NUMBER; hash = (53 * hash) + timeUnit_; } if (hasSoftLimit()) { hash = (37 * hash) + SOFT_LIMIT_FIELD_NUMBER; hash = (53 * hash) + org.apache.hbase.thirdparty.com.google.protobuf.Internal.hashLong( getSoftLimit()); } if (hasShare()) { hash = (37 * hash) + SHARE_FIELD_NUMBER; hash = (53 * hash) + java.lang.Float.floatToIntBits( getShare()); } if (hasScope()) { hash = (37 * hash) + SCOPE_FIELD_NUMBER; hash = (53 * hash) + scope_; } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
/** * <code>optional .hbase.pb.TimedQuota timed_quota = 2;</code> */ public Builder mergeTimedQuota(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (timedQuotaBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && timedQuota_ != null && timedQuota_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { timedQuota_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(timedQuota_).mergeFrom(value).buildPartial(); } else { timedQuota_ = value; } onChanged(); } else { timedQuotaBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota read_size = 6;</code> */ public Builder mergeReadSize(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (readSizeBuilder_ == null) { if (((bitField0_ & 0x00000020) == 0x00000020) && readSize_ != null && readSize_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { readSize_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(readSize_).mergeFrom(value).buildPartial(); } else { readSize_ = value; } onChanged(); } else { readSizeBuilder_.mergeFrom(value); } bitField0_ |= 0x00000020; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota req_num = 1;</code> */ public Builder mergeReqNum(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (reqNumBuilder_ == null) { if (((bitField0_ & 0x00000001) == 0x00000001) && reqNum_ != null && reqNum_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { reqNum_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(reqNum_).mergeFrom(value).buildPartial(); } else { reqNum_ = value; } onChanged(); } else { reqNumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000001; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota read_num = 5;</code> */ public Builder mergeReadNum(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (readNumBuilder_ == null) { if (((bitField0_ & 0x00000010) == 0x00000010) && readNum_ != null && readNum_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { readNum_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(readNum_).mergeFrom(value).buildPartial(); } else { readNum_ = value; } onChanged(); } else { readNumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000010; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota write_size = 4;</code> */ public Builder mergeWriteSize(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (writeSizeBuilder_ == null) { if (((bitField0_ & 0x00000008) == 0x00000008) && writeSize_ != null && writeSize_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { writeSize_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(writeSize_).mergeFrom(value).buildPartial(); } else { writeSize_ = value; } onChanged(); } else { writeSizeBuilder_.mergeFrom(value); } bitField0_ |= 0x00000008; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota write_num = 3;</code> */ public Builder mergeWriteNum(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (writeNumBuilder_ == null) { if (((bitField0_ & 0x00000004) == 0x00000004) && writeNum_ != null && writeNum_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { writeNum_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(writeNum_).mergeFrom(value).buildPartial(); } else { writeNum_ = value; } onChanged(); } else { writeNumBuilder_.mergeFrom(value); } bitField0_ |= 0x00000004; return this; } /**
/** * <code>optional .hbase.pb.TimedQuota req_size = 2;</code> */ public Builder mergeReqSize(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota value) { if (reqSizeBuilder_ == null) { if (((bitField0_ & 0x00000002) == 0x00000002) && reqSize_ != null && reqSize_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) { reqSize_ = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.newBuilder(reqSize_).mergeFrom(value).buildPartial(); } else { reqSize_ = value; } onChanged(); } else { reqSizeBuilder_.mergeFrom(value); } bitField0_ |= 0x00000002; return this; } /**
@Test public void testMergeThrottle() throws IOException { QuotaProtos.Quotas quota = QuotaProtos.Quotas.newBuilder() .setThrottle(THROTTLE).build(); QuotaProtos.TimedQuota writeQuota = REQUEST_THROTTLE.toBuilder() .setSoftLimit(500).build(); // Unset the req throttle, set a write throttle QuotaProtos.ThrottleRequest writeThrottle = QuotaProtos.ThrottleRequest.newBuilder() .setTimedQuota(writeQuota).setType(QuotaProtos.ThrottleType.WRITE_NUMBER).build(); GlobalQuotaSettingsImpl settings = new GlobalQuotaSettingsImpl("joe", null, null, quota); GlobalQuotaSettingsImpl merged = settings.merge( new ThrottleSettings("joe", null, null, writeThrottle)); QuotaProtos.Throttle mergedThrottle = merged.getThrottleProto(); // Verify the request throttle is in place assertTrue(mergedThrottle.hasReqNum()); QuotaProtos.TimedQuota actualReqNum = mergedThrottle.getReqNum(); assertEquals(REQUEST_THROTTLE.getSoftLimit(), actualReqNum.getSoftLimit()); // Verify the write throttle is in place assertTrue(mergedThrottle.hasWriteNum()); QuotaProtos.TimedQuota actualWriteNum = mergedThrottle.getWriteNum(); assertEquals(writeQuota.getSoftLimit(), actualWriteNum.getSoftLimit()); }
/** * Build a protocol buffer TimedQuota * * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @param scope the quota scope * @return the protocol buffer TimedQuota */ public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, final QuotaScope scope) { return QuotaProtos.TimedQuota.newBuilder() .setSoftLimit(limit) .setTimeUnit(toProtoTimeUnit(timeUnit)) .setScope(toProtoQuotaScope(scope)) .build(); }
public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota other) { if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota.getDefaultInstance()) return this; if (other.hasTimeUnit()) { setTimeUnit(other.getTimeUnit()); } if (other.hasSoftLimit()) { setSoftLimit(other.getSoftLimit()); } if (other.hasShare()) { setShare(other.getShare()); } if (other.hasScope()) { setScope(other.getScope()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }