/** * {@inheritDoc} */ @Override public AudioFormat clone() { AudioFormat af = new AudioFormat( getNBits(), getSampleRateKHz(), getNumChannels() ); af.setBigEndian( isBigEndian ); af.setSigned( isSigned ); return af; }
/** * Updates the sample rate in the source processor's format to match the output format. * * @param ap The source processor * @param output The output format * @return The fixed source format */ private static AudioFormat getFormatSR( final AudioProcessor ap, final AudioFormat output ) { if( ap == null ) return output; final AudioFormat f = ap.getFormat().clone(); f.setSampleRateKHz( output.getSampleRateKHz() ); return f; }
/** * Get a Java Sound API AudioFormat object using this object's * properties. * * @return The Java Sound API Audio Format object. */ public javax.sound.sampled.AudioFormat getJavaAudioFormat() { // Convert the OpenIMAJ audio format to a Java Sound audio format object return new javax.sound.sampled.AudioFormat( (int)this.getSampleRateKHz() * 1000, this.getNBits(), this.getNumChannels(), this.isSigned(), this.isBigEndian() ); } }
/** * {@inheritDoc} */ @Override public String toString() { return "[Audio: "+getSampleRateKHz()+"KHz, "+getNBits()+"bit, "+ getNumChannels()+" channel"+(getNumChannels()>1?"s":"")+ ", "+(isSigned?"signed":"unsigned")+", "+ (isBigEndian?"big-endian":"little-endian")+"]"; }
XuggleAudio.this.currentTimecode); XuggleAudio.this.currentSamples.getFormat().setNumChannels( XuggleAudio.this.getFormat().getNumChannels()); XuggleAudio.this.currentSamples.getFormat().setSigned( XuggleAudio.this.getFormat().isSigned()); XuggleAudio.this.currentSamples.getFormat().setBigEndian( XuggleAudio.this.getFormat().isBigEndian()); XuggleAudio.this.currentSamples.getFormat().setSampleRateKHz( XuggleAudio.this.getFormat().getSampleRateKHz());
/** * */ public Synthesizer() { this.setFormat( new AudioFormat( 16, 44.1, 1 ) ); }
/** * Returns the affected audio stream. * * @param as * The audio stream to affect * @return The affected audio stream */ public static AudioStream getStream(final AudioStream as) { // Effect chain: // // -> Mono // -> Band-pass filter (LPF + HPF) // -> Sample rate to 16KHz // -> Bit rate to 8-bit // final MultichannelToMonoProcessor m2m2 = new MultichannelToMonoProcessor(as); final double fc = 1000; // mid-point 1000Hz final double q = 1600; // HPF @ 200Hz, LPF @ 1800Hz final EQFilter lpf = new EQFilter(m2m2, EQType.LPF, fc + q / 2); final EQFilter hpf = new EQFilter(lpf, EQType.HPF, fc - q / 2); final SampleRateConverter src2 = new SampleRateConverter(hpf, SampleRateConversionAlgorithm.LINEAR_INTERPOLATION, new AudioFormat(m2m2.getFormat().getNBits(), 16, m2m2.getFormat().getNumChannels())); final BitDepthConverter xa2 = new BitDepthConverter(src2, BitDepthConversionAlgorithm.NEAREST, new AudioFormat(8, src2.getFormat().getSampleRateKHz(), src2.getFormat().getNumChannels())); return xa2; }
/** * {@inheritDoc} * * Note that because we cannot use native methods for copying parts of an * array, we must use Java methods so this will be considerably slower than * {@link #getSampleChunk()}. * * @see org.openimaj.audio.samples.SampleBuffer#getSampleChunk(int) */ @Override public SampleChunk getSampleChunk(final int channel) { if (channel > this.format.getNumChannels()) throw new IllegalArgumentException("Cannot generate sample chunk " + "for channel " + channel + " as sample only has " + this.format.getNumChannels() + " channels."); if (channel == 0 && this.format.getNumChannels() == 1) return this.getSampleChunk(); final byte[] newSamples = new byte[this.size()]; for (int i = 0; i < this.size(); i++) newSamples[i] = this.byteBuffer[i * this.format.getNumChannels() + channel]; final AudioFormat af = this.format.clone(); af.setNumChannels(1); return new SampleChunk(newSamples, af); }
/** * Construct with given stream and window parameters. Note that the window * parameters are given in milliseconds and converted into a number of * samples by the method. * * @param stream The audio stream * @param windowSizeMillis The window size in milliseconds * @param overlapMillis The overlap between windows in milliseconds */ public EffectiveSoundPressure( final AudioStream stream, final int windowSizeMillis, final int overlapMillis ) { super( stream, (int) (stream.getFormat().getSampleRateKHz() * windowSizeMillis * stream.getFormat().getNumChannels()) ); this.setWindowStep( (int) (stream.getFormat().getSampleRateKHz() * overlapMillis * stream.getFormat().getNumChannels()) ); }
/** * {@inheritDoc} * * Note that because we cannot use native methods for copying parts of an * array, we must use Java methods so this will be considerably slower than * {@link #getSampleChunk()}. * * @see org.openimaj.audio.samples.SampleBuffer#getSampleChunk(int) */ @Override public SampleChunk getSampleChunk(final int channel) { if (channel > this.format.getNumChannels()) throw new IllegalArgumentException("Cannot generate sample chunk " + "for channel " + channel + " as sample only has " + this.format.getNumChannels() + " channels."); if (channel == 0 && this.format.getNumChannels() == 1) return this.getSampleChunk(); final byte[] newSamples = new byte[this.size() * 2]; final ShortBuffer sb = ByteBuffer.wrap(newSamples).order( this.format.isBigEndian() ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN).asShortBuffer(); for (int i = 0; i < this.size() / this.format.getNumChannels(); i++) sb.put(i, this.shortBuffer.get(i * this.format.getNumChannels() + channel)); final AudioFormat af = this.format.clone(); af.setNumChannels(1); return new SampleChunk(newSamples, af); }
/** * {@inheritDoc} * * @see org.openimaj.audio.samples.SampleBuffer#asDoubleChannelArray() */ @Override public double[][] asDoubleChannelArray() { final int nc = this.format.getNumChannels(); final double[][] s = new double[nc][this.samples.length / nc]; for (int c = 0; c < nc; c++) for (int sa = 0; sa < this.samples.length / nc; sa++) s[c][sa] = this.samples[sa * nc + c]; return s; }
/** * Chainable constructor. * @param as The audio stream to process * @param af The format to process. */ protected BeatDetector( final AudioStream as, final AudioFormat af ) { super( as ); this.filter1Out = 0.0f; this.filter2Out = 0.0f; this.peakEnv = 0.0f; this.beatTrigger = false; this.prevBeatPulse = false; this.format = af; this.setSampleRate( (float)(af.getSampleRateKHz()*1000f) ); }
/** * Updates the number of bits in the source processor's format to match the output format. * * @param ap The source processor * @param output The output format * @return The fixed source format */ private static AudioFormat getFormatBits( final AudioProcessor ap , final AudioFormat output ) { if( ap == null ) return output; final AudioFormat f = ap.getFormat().clone(); f.setNBits( output.getNBits() ); return f; }
/** * Constructor that takes the input and output formats. * @param input The input format * @param output The output format */ public AudioConverter( final AudioFormat input, final AudioFormat output ) { this.inputFormat = input.clone(); this.setFormat( output.clone() ); this.processor = AudioConverter.calculateProcess( input, output ); }
/** * Returns the number of samples in this sample chunk. If there are 128 * stereo samples, this method will return 256. That is, it does not * normalise for the number of channels. However, it does normalise for the * size of each sample. So if this is a 16-bit buffer of 256 bytes length, * this method will return 128. * * @return the number of samples in this sample chunk. */ public int getNumberOfSamples() { return this.samples.length / (this.format.getNBits() / 8); }
/** * Create a processor for the given audio stream. The output * of this audio stream will be a mono stream. * * @param a The audio stream to process. */ public MultichannelToMonoProcessor( final AudioStream a ) { super( a ); this.setFormat( this.getFormat().clone().setNumChannels( 1 ) ); }
/** * Create a new sample buffer with the given format and the given number of * samples. It does not scale for the number of channels in the audio * format, so you must pre-multiply the number of samples by the number of * channels if you are only counting samples per channel. * * @param af * The {@link AudioFormat} of the samples * @param nSamples * The number of samples */ public SampleBuffer8Bit(final AudioFormat af, final int nSamples) { this.format = af.clone(); if (this.format == null || this.format.getNBits() != 8) throw new IllegalArgumentException("Number of bits " + "must be 8 if you're instantiating an 8 bit " + "sample buffer. However " + (this.format == null ? "format object was null." : "number of bits in format was " + this.format.getNBits())); this.byteBuffer = new byte[nSamples]; }
/** * Construct a new audio format object with the * given number of bits and sample rate. * * @param nBits The number of bits in each sample * @param sampleRate The sample rate in kilohertz * @param nChannels The number of channels */ public AudioFormat( int nBits, double sampleRate, int nChannels ) { this.nBits = nBits; this.sampleRateKHz = sampleRate; this.setNumChannels( nChannels ); }
/** * Returns a {@link ByteBuffer} that can be used to create views of the * samples in the object. For example, to get short integers, you can get * {@link #getSamplesAsByteBuffer()}.asShortBuffer() * * @return A {@link ByteBuffer} */ public ByteBuffer getSamplesAsByteBuffer() { if (this.samples == null) return null; ByteOrder bo = null; if (this.format.isBigEndian()) bo = ByteOrder.BIG_ENDIAN; else bo = ByteOrder.LITTLE_ENDIAN; return ByteBuffer.wrap(this.samples).order(bo); }