@java.lang.Override public Builder newBuilderForType() { return newBuilder(); }
public Builder mergeFrom(com.google.cloud.speech.v1.StreamingRecognizeRequest other) { if (other == com.google.cloud.speech.v1.StreamingRecognizeRequest.getDefaultInstance()) return this; switch (other.getStreamingRequestCase()) { case STREAMING_CONFIG: { mergeStreamingConfig(other.getStreamingConfig()); break; } case AUDIO_CONTENT: { setAudioContent(other.getAudioContent()); break; } case STREAMINGREQUEST_NOT_SET: { break; } } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; }
@java.lang.Override public com.google.cloud.speech.v1.StreamingRecognizeRequest buildPartial() { com.google.cloud.speech.v1.StreamingRecognizeRequest result = new com.google.cloud.speech.v1.StreamingRecognizeRequest(this); if (streamingRequestCase_ == 1) { if (streamingConfigBuilder_ == null) { result.streamingRequest_ = streamingRequest_; } else { result.streamingRequest_ = streamingConfigBuilder_.build(); } } if (streamingRequestCase_ == 2) { result.streamingRequest_ = streamingRequest_; } result.streamingRequestCase_ = streamingRequestCase_; onBuilt(); return result; }
@java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof com.google.cloud.speech.v1.StreamingRecognizeRequest)) { return super.equals(obj); } com.google.cloud.speech.v1.StreamingRecognizeRequest other = (com.google.cloud.speech.v1.StreamingRecognizeRequest) obj; boolean result = true; result = result && getStreamingRequestCase().equals(other.getStreamingRequestCase()); if (!result) return false; switch (streamingRequestCase_) { case 1: result = result && getStreamingConfig().equals(other.getStreamingConfig()); break; case 2: result = result && getAudioContent().equals(other.getAudioContent()); break; case 0: default: } result = result && unknownFields.equals(other.unknownFields); return result; }
@java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); switch (streamingRequestCase_) { case 1: hash = (37 * hash) + STREAMING_CONFIG_FIELD_NUMBER; hash = (53 * hash) + getStreamingConfig().hashCode(); break; case 2: hash = (37 * hash) + AUDIO_CONTENT_FIELD_NUMBER; hash = (53 * hash) + getAudioContent().hashCode(); break; case 0: default: } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; }
@java.lang.Override public com.google.cloud.speech.v1.StreamingRecognizeRequest getDefaultInstanceForType() { return com.google.cloud.speech.v1.StreamingRecognizeRequest.getDefaultInstance(); }
io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.speech.v1.StreamingRecognizeRequest .getDefaultInstance())) .setResponseMarshaller( io.grpc.protobuf.ProtoUtils.marshaller(
@Test public void streamingRecognize() throws Exception { byte[] audioBytes = Resources.toByteArray(new URL("https://storage.googleapis.com/gapic-toolkit/hello.flac")); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder().setConfig(config()).build(); ResponseApiStreamingObserver<StreamingRecognizeResponse> responseObserver = new ResponseApiStreamingObserver<>(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = speechClient.streamingRecognizeCallable().bidiStreamingCall(responseObserver); // The first request must **only** contain the audio configuration: requestObserver.onNext( StreamingRecognizeRequest.newBuilder().setStreamingConfig(streamingConfig).build()); // Subsequent requests must **only** contain the audio data. requestObserver.onNext( StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(audioBytes)) .build()); // Mark transmission as completed after sending the data. requestObserver.onCompleted(); List<StreamingRecognizeResponse> responses = responseObserver.future().get(); Truth.assertThat(responses.size()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResultsCount()).isGreaterThan(0); Truth.assertThat(responses.get(0).getResults(0).getAlternativesCount()).isGreaterThan(0); String text = responses.get(0).getResults(0).getAlternatives(0).getTranscript(); Truth.assertThat(text).isEqualTo("hello"); }
io.grpc.protobuf.ProtoUtils.marshaller( com.google.cloud.speech.v1.StreamingRecognizeRequest .getDefaultInstance())) .setResponseMarshaller( io.grpc.protobuf.ProtoUtils.marshaller(
@Test @SuppressWarnings("all") public void streamingRecognizeTest() throws Exception { StreamingRecognizeResponse expectedResponse = StreamingRecognizeResponse.newBuilder().build(); mockSpeech.addResponse(expectedResponse); StreamingRecognizeRequest request = StreamingRecognizeRequest.newBuilder().build(); MockStreamObserver<StreamingRecognizeResponse> responseObserver = new MockStreamObserver<>(); BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse> callable = client.streamingRecognizeCallable(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = callable.bidiStreamingCall(responseObserver); requestObserver.onNext(request); requestObserver.onCompleted(); List<StreamingRecognizeResponse> actualResponses = responseObserver.future().get(); Assert.assertEquals(1, actualResponses.size()); Assert.assertEquals(expectedResponse, actualResponses.get(0)); }
@Test @SuppressWarnings("all") public void streamingRecognizeExceptionTest() throws Exception { StatusRuntimeException exception = new StatusRuntimeException(Status.INVALID_ARGUMENT); mockSpeech.addException(exception); StreamingRecognizeRequest request = StreamingRecognizeRequest.newBuilder().build(); MockStreamObserver<StreamingRecognizeResponse> responseObserver = new MockStreamObserver<>(); BidiStreamingCallable<StreamingRecognizeRequest, StreamingRecognizeResponse> callable = client.streamingRecognizeCallable(); ApiStreamObserver<StreamingRecognizeRequest> requestObserver = callable.bidiStreamingCall(responseObserver); requestObserver.onNext(request); try { List<StreamingRecognizeResponse> actualResponses = responseObserver.future().get(); Assert.fail("No exception thrown"); } catch (ExecutionException e) { Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); InvalidArgumentException apiException = (InvalidArgumentException) e.getCause(); Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); } } }
StreamingRecognizeRequest.newBuilder() .setStreamingConfig(streamingRecognitionConfig) StreamingRecognizeRequest.newBuilder() .setStreamingConfig(streamingRecognitionConfig) .build(); StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(sharedQueue.take())) .build();
StreamingRecognizeRequest.newBuilder() .setStreamingConfig(streamingRecognitionConfig) StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(data)) .build();
StreamingRecognizeRequest.newBuilder().setStreamingConfig(config).build()); StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(data)) .build());
StreamingRecognizeRequest.newBuilder().setStreamingConfig(config).build()); StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(data)) .build());
/** * Recognizes the speech audio. This method should be called every time a chunk of byte buffer * is ready. * * @param data The audio data. * @param size The number of elements that are actually relevant in the {@code data}. */ public void recognize(byte[] data, int size) { if (mRequestObserver == null) { return; } // Call the streaming recognition API mRequestObserver.onNext(StreamingRecognizeRequest.newBuilder() .setAudioContent(ByteString.copyFrom(data, 0, size)) .build()); }
/** * Starts recognizing speech audio. * * @param sampleRate The sample rate of the audio. */ public void startRecognizing(int sampleRate) { if (mApi == null) { Log.w(TAG, "API not ready. Ignoring the request."); return; } // Configure the API mRequestObserver = mApi.streamingRecognize(mResponseObserver); StreamingRecognitionConfig streamingConfig = StreamingRecognitionConfig.newBuilder() .setConfig(RecognitionConfig.newBuilder() .setLanguageCode("en-US") .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16) .setSampleRateHertz(sampleRate) .build() ) .setInterimResults(true) .setSingleUtterance(true) .build(); StreamingRecognizeRequest streamingRecognizeRequest = StreamingRecognizeRequest.newBuilder(). setStreamingConfig(streamingConfig).build(); mRequestObserver.onNext(streamingRecognizeRequest); }
StreamingRecognizeRequest.newBuilder() .setAudioContent(audioBytes) .build());
StreamingRecognizeRequest.newBuilder() .setStreamingConfig(streamingRecognitionConfig) .build());