public Builder mergeFrom(com.google.cloud.videointelligence.v1beta1.VideoSegment other) { if (other == com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance()) return this; if (other.getStartTimeOffset() != 0L) { setStartTimeOffset(other.getStartTimeOffset()); } if (other.getEndTimeOffset() != 0L) { setEndTimeOffset(other.getEndTimeOffset()); } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + START_TIME_OFFSET_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getStartTimeOffset()); hash = (37 * hash) + END_TIME_OFFSET_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEndTimeOffset()); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public Builder newBuilderForType() { return newBuilder(); }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.videointelligence.v1beta1.VideoSegment)) { return super.equals(obj); } com.google.cloud.videointelligence.v1beta1.VideoSegment other = (com.google.cloud.videointelligence.v1beta1.VideoSegment) obj; boolean result = true; result = result && (getStartTimeOffset() == other.getStartTimeOffset()); result = result && (getEndTimeOffset() == other.getEndTimeOffset()); result = result && unknownFields.equals(other.unknownFields); return result; }
@java.lang.Override public com.google.cloud.videointelligence.v1beta1.VideoSegment getDefaultInstanceForType() { return com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance(); }
if (!parseUnknownFieldProto3(input, unknownFields, extensionRegistry, tag)) { done = true; } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable();
@java.lang.Override public com.google.cloud.videointelligence.v1beta1.VideoSegment buildPartial() { com.google.cloud.videointelligence.v1beta1.VideoSegment result = new com.google.cloud.videointelligence.v1beta1.VideoSegment(this); result.startTimeOffset_ = startTimeOffset_; result.endTimeOffset_ = endTimeOffset_; onBuilt(); return result; }
@java.lang.Override public com.google.cloud.videointelligence.v1beta1.VideoSegment build() { com.google.cloud.videointelligence.v1beta1.VideoSegment result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); if (hasSegment()) { hash = (37 * hash) + SEGMENT_FIELD_NUMBER; hash = (53 * hash) + getSegment().hashCode(); } hash = (37 * hash) + CONFIDENCE_FIELD_NUMBER; hash = (53 * hash) + java.lang.Float.floatToIntBits(getConfidence()); hash = (37 * hash) + LEVEL_FIELD_NUMBER; hash = (53 * hash) + level_; hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.videointelligence.v1beta1.LabelLocation)) { return super.equals(obj); } com.google.cloud.videointelligence.v1beta1.LabelLocation other = (com.google.cloud.videointelligence.v1beta1.LabelLocation) obj; boolean result = true; result = result && (hasSegment() == other.hasSegment()); if (hasSegment()) { result = result && getSegment().equals(other.getSegment()); } result = result && (java.lang.Float.floatToIntBits(getConfidence()) == java.lang.Float.floatToIntBits(other.getConfidence())); result = result && level_ == other.level_; result = result && unknownFields.equals(other.unknownFields); return result; }
/** * * * <pre> * Video segment. Set to [-1, -1] for video-level labels. * Set to [timestamp, timestamp] for frame-level labels. * Otherwise, corresponds to one of `AnnotateSpec.segments` * (if specified) or to shot boundaries (if requested). * </pre> * * <code>.google.cloud.videointelligence.v1beta1.VideoSegment segment = 1;</code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment getSegment() { return segment_ == null ? com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance() : segment_; } /**
/** * * * <pre> * Video segment. Set to [-1, -1] for video-level labels. * Set to [timestamp, timestamp] for frame-level labels. * Otherwise, corresponds to one of `AnnotateSpec.segments` * (if specified) or to shot boundaries (if requested). * </pre> * * <code>.google.cloud.videointelligence.v1beta1.VideoSegment segment = 1;</code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment getSegment() { if (segmentBuilder_ == null) { return segment_ == null ? com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance() : segment_; } else { return segmentBuilder_.getMessage(); } } /**
/** * * * <pre> * Video segment. Set to [-1, -1] for video-level labels. * Set to [timestamp, timestamp] for frame-level labels. * Otherwise, corresponds to one of `AnnotateSpec.segments` * (if specified) or to shot boundaries (if requested). * </pre> * * <code>.google.cloud.videointelligence.v1beta1.VideoSegment segment = 1;</code> */ public Builder mergeSegment(com.google.cloud.videointelligence.v1beta1.VideoSegment value) { if (segmentBuilder_ == null) { if (segment_ != null) { segment_ = com.google.cloud.videointelligence.v1beta1.VideoSegment.newBuilder(segment_) .mergeFrom(value) .buildPartial(); } else { segment_ = value; } onChanged(); } else { segmentBuilder_.mergeFrom(value); } return this; } /**
/** * * * <pre> * Video segment. Set to [-1, -1] for video-level labels. * Set to [timestamp, timestamp] for frame-level labels. * Otherwise, corresponds to one of `AnnotateSpec.segments` * (if specified) or to shot boundaries (if requested). * </pre> * * <code>.google.cloud.videointelligence.v1beta1.VideoSegment segment = 1;</code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegmentOrBuilder getSegmentOrBuilder() { if (segmentBuilder_ != null) { return segmentBuilder_.getMessageOrBuilder(); } else { return segment_ == null ? com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance() : segment_; } } /**
/** * * * <pre> * All locations where a face was detected. * Faces are detected and tracked on a per-video basis * (as opposed to across multiple videos). * </pre> * * <code>repeated .google.cloud.videointelligence.v1beta1.VideoSegment segments = 2;</code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment.Builder addSegmentsBuilder() { return getSegmentsFieldBuilder() .addBuilder(com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance()); } /**
/** * * * <pre> * Shot annotations. Each shot is represented as a video segment. * </pre> * * <code>repeated .google.cloud.videointelligence.v1beta1.VideoSegment shot_annotations = 4; * </code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment.Builder addShotAnnotationsBuilder() { return getShotAnnotationsFieldBuilder() .addBuilder(com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance()); } /**
/** * * * <pre> * Video segments to annotate. The segments may overlap and are not required * to be contiguous or span the whole video. If unspecified, each video * is treated as a single segment. * </pre> * * <code>repeated .google.cloud.videointelligence.v1beta1.VideoSegment segments = 1;</code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment.Builder addSegmentsBuilder() { return getSegmentsFieldBuilder() .addBuilder(com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance()); } /**
/** * * * <pre> * Shot annotations. Each shot is represented as a video segment. * </pre> * * <code>repeated .google.cloud.videointelligence.v1beta1.VideoSegment shot_annotations = 4; * </code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment.Builder addShotAnnotationsBuilder(int index) { return getShotAnnotationsFieldBuilder() .addBuilder( index, com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance()); } /**
/** * * * <pre> * All locations where a face was detected. * Faces are detected and tracked on a per-video basis * (as opposed to across multiple videos). * </pre> * * <code>repeated .google.cloud.videointelligence.v1beta1.VideoSegment segments = 2;</code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment.Builder addSegmentsBuilder( int index) { return getSegmentsFieldBuilder() .addBuilder( index, com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance()); } /**
/** * * * <pre> * Video segments to annotate. The segments may overlap and are not required * to be contiguous or span the whole video. If unspecified, each video * is treated as a single segment. * </pre> * * <code>repeated .google.cloud.videointelligence.v1beta1.VideoSegment segments = 1;</code> */ public com.google.cloud.videointelligence.v1beta1.VideoSegment.Builder addSegmentsBuilder( int index) { return getSegmentsFieldBuilder() .addBuilder( index, com.google.cloud.videointelligence.v1beta1.VideoSegment.getDefaultInstance()); } /**