/** * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be * null if delivery of events is not required. * @param eventListener A listener of events. May be null if delivery of events is not required. * @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output. */ public FfmpegAudioRenderer( @Nullable Handler eventHandler, @Nullable AudioRendererEventListener eventListener, AudioProcessor... audioProcessors) { this( eventHandler, eventListener, new DefaultAudioSink(/* audioCapabilities= */ null, audioProcessors), /* enableFloatOutput= */ false); }
@Override public long getCurrentPositionUs(boolean sourceEnded) { if (!isInitialized() || startMediaTimeState == START_NOT_SET) { return CURRENT_POSITION_NOT_SET; } long positionUs = audioTrackPositionTracker.getCurrentPositionUs(sourceEnded); positionUs = Math.min(positionUs, framesToDurationUs(getWrittenFrames())); return startMediaTimeUs + applySkipping(applySpeedup(positionUs)); }
@Override public void playToEndOfStream() throws WriteException { if (handledEndOfStream || !isInitialized()) { return; } if (drainAudioProcessorsToEndOfStream()) { // The audio processors have drained, so drain the underlying audio track. audioTrackPositionTracker.handleEndOfStream(getWrittenFrames()); audioTrack.stop(); bytesUntilNextAvSync = 0; handledEndOfStream = true; } }
private void setupAudioProcessors() { ArrayList<AudioProcessor> newAudioProcessors = new ArrayList<>(); for (AudioProcessor audioProcessor : getAvailableAudioProcessors()) { if (audioProcessor.isActive()) { newAudioProcessors.add(audioProcessor); } else { audioProcessor.flush(); } } int count = newAudioProcessors.size(); activeAudioProcessors = newAudioProcessors.toArray(new AudioProcessor[count]); outputBuffers = new ByteBuffer[count]; flushAudioProcessors(); }
throws InitializationException, WriteException { Assertions.checkArgument(inputBuffer == null || buffer == inputBuffer); if (!isInitialized()) { initialize(); if (playing) { play(); if (!audioTrackPositionTracker.mayHandleBuffer(getWrittenFrames())) { return false; framesPerEncodedSample = getFramesPerEncodedSample(outputEncoding, buffer); if (framesPerEncodedSample == 0) { if (!drainAudioProcessorsToEndOfStream()) { newPlaybackParameters, Math.max(0, presentationTimeUs), framesToDurationUs(getWrittenFrames()))); setupAudioProcessors(); + inputFramesToDurationUs( getSubmittedFrames() - trimmingAudioProcessor.getTrimmedFrameCount()); if (startMediaTimeState == START_IN_SYNC && Math.abs(expectedPresentationTimeUs - presentationTimeUs) > 200000) { processBuffers(presentationTimeUs); } else { writeBuffer(inputBuffer, presentationTimeUs);
shouldConvertHighResIntPcmToFloat = enableConvertHighResIntPcmToFloat && supportsOutput(channelCount, C.ENCODING_PCM_32BIT) && Util.isEncodingHighResolutionIntegerPcm(inputEncoding); if (isInputPcm) { trimmingAudioProcessor.setTrimFrameCount(trimStartFrames, trimEndFrames); channelMappingAudioProcessor.setChannelMap(outputChannels); for (AudioProcessor audioProcessor : getAvailableAudioProcessors()) { try { flush |= audioProcessor.configure(sampleRate, channelCount, encoding); int channelConfig = getChannelConfig(channelCount, isInputPcm); if (channelConfig == AudioFormat.CHANNEL_INVALID) { throw new ConfigurationException("Unsupported channel count: " + channelCount); && isInitialized() && outputEncoding == encoding && outputSampleRate == sampleRate reset(); outputPcmFrameSize = isInputPcm ? Util.getPcmFrameSize(outputEncoding, channelCount) : C.LENGTH_UNSET; bufferSize = specifiedBufferSize != 0 ? specifiedBufferSize : getDefaultBufferSize();
private int getDefaultBufferSize() { if (isInputPcm) { int minBufferSize = AudioTrack.getMinBufferSize(outputSampleRate, outputChannelConfig, outputEncoding); Assertions.checkState(minBufferSize != ERROR_BAD_VALUE); int multipliedBufferSize = minBufferSize * BUFFER_MULTIPLICATION_FACTOR; int minAppBufferSize = (int) durationUsToFrames(MIN_BUFFER_DURATION_US) * outputPcmFrameSize; int maxAppBufferSize = (int) Math.max(minBufferSize, durationUsToFrames(MAX_BUFFER_DURATION_US) * outputPcmFrameSize); return Util.constrainValue(multipliedBufferSize, minAppBufferSize, maxAppBufferSize); } else { int rate = getMaximumEncodedRateBytesPerSecond(outputEncoding); if (outputEncoding == C.ENCODING_AC3) { rate *= AC3_BUFFER_MULTIPLICATION_FACTOR; } return (int) (PASSTHROUGH_BUFFER_DURATION_US * rate / C.MICROS_PER_SECOND); } }
@Override public void reset() { if (isInitialized()) { submittedPcmBytes = 0; submittedEncodedFrames = 0; inputBuffer = null; outputBuffer = null; flushAudioProcessors(); handledEndOfStream = false; drainingAudioProcessorIndex = C.INDEX_UNSET;
private long applySkipping(long positionUs) { return positionUs + framesToDurationUs(audioProcessorChain.getSkippedOutputFrameCount()); }
private AudioTrack initializeAudioTrack() throws InitializationException { AudioTrack audioTrack; if (Util.SDK_INT >= 21) { audioTrack = createAudioTrackV21(); } else { int streamType = Util.getStreamTypeForAudioUsage(audioAttributes.usage);
/** * @param eventHandler A handler to use when delivering events to {@code eventListener}. May be * null if delivery of events is not required. * @param eventListener A listener of events. May be null if delivery of events is not required. * @param audioCapabilities The audio capabilities for playback on this device. May be null if the * default capabilities (no encoded audio passthrough support) should be assumed. * @param drmSessionManager For use with encrypted media. May be null if support for encrypted * media is not required. * @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions. * For example a media file may start with a short clear region so as to allow playback to * begin in parallel with key acquisition. This parameter specifies whether the renderer is * permitted to play clear regions of encrypted media files before {@code drmSessionManager} * has obtained the keys necessary to decrypt encrypted regions of the media. * @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output. */ public SimpleDecoderAudioRenderer( @Nullable Handler eventHandler, @Nullable AudioRendererEventListener eventListener, @Nullable AudioCapabilities audioCapabilities, @Nullable DrmSessionManager<ExoMediaCrypto> drmSessionManager, boolean playClearSamplesWithoutKeys, AudioProcessor... audioProcessors) { this(eventHandler, eventListener, drmSessionManager, playClearSamplesWithoutKeys, new DefaultAudioSink(audioCapabilities, audioProcessors)); }
eventHandler, eventListener, new DefaultAudioSink(audioCapabilities, audioProcessors));