/** * If the samples are represented as a set of doubles, you can set them * here. The assumed format will be a single channel at 44.1KHz. * @param samples The sample data. */ public void setData( final double[] samples ) { final FloatSampleBuffer fsb = new FloatSampleBuffer( samples, new AudioFormat( -1, 44.1, 1 ) ); super.setData( fsb ); }
/** * */ public Synthesizer() { this.setFormat( new AudioFormat( 16, 44.1, 1 ) ); }
/** * */ public Synthesizer() { this.setFormat( new AudioFormat( 16, 44.1, 1 ) ); }
/** * Constructor * @param nPolyphony Number of voices polyphony allowed */ public PolyphonicSynthesizer( final int nPolyphony ) { super( new AudioFormat( 16, 44.1, 1 ) ); this.playingSynths = new HashMap<Integer, Synthesizer>(); for( int i = 0; i < nPolyphony; i++ ) { final Synthesizer s = new Synthesizer(); this.voicePool.add( s ); // super.addStream( s, 1f ); } }
/** * Constructor * @param nPolyphony Number of voices polyphony allowed */ public PolyphonicSynthesizer( final int nPolyphony ) { super( new AudioFormat( 16, 44.1, 1 ) ); this.playingSynths = new HashMap<Integer, Synthesizer>(); for( int i = 0; i < nPolyphony; i++ ) { final Synthesizer s = new Synthesizer(); this.voicePool.add( s ); // super.addStream( s, 1f ); } }
/** * Returns a set of values that represent the response of this filter bank * when the linear frequency is split in the given number of bins. The * result will have <code>nSpectrumBins</code> length. * * @param nSpectrumBins The number of bins in a spectrum. * @param maxFreq The maximum frequency (sample rate) * @return The response curve. */ public float[] getResponseCurve( final int nSpectrumBins, final double maxFreq ) { final float[][] curve = new float[1][nSpectrumBins]; for( int i = 0; i < nSpectrumBins; i++ ) curve[0][i] = 1f; return this.process( curve, new AudioFormat( 8, maxFreq/500, 1 ) )[0]; }
/** * Returns a set of values that represent the response of this filter bank * when the linear frequency is split in the given number of bins. The * result will have <code>nSpectrumBins</code> length. * * @param nSpectrumBins The number of bins in a spectrum. * @param maxFreq The maximum frequency (sample rate) * @return The response curve. */ public float[] getResponseCurve( final int nSpectrumBins, final double maxFreq ) { final float[][] curve = new float[1][nSpectrumBins]; for( int i = 0; i < nSpectrumBins; i++ ) curve[0][i] = 1f; return this.process( curve, new AudioFormat( 8, maxFreq/500, 1 ) )[0]; }
public static void main(String[] args) { try { // final Video<MBFImage> video = new VideoCapture(320, 240); final JavaSoundAudioGrabber audio = new JavaSoundAudioGrabber(new AudioFormat(16, 44.1, 2)); audio.setMaxBufferSize(1024); new Thread(audio).start(); Thread.sleep(100); // final VideoDisplay<MBFImage> display = // VideoDisplay.createVideoDisplay(video, audio); new Thread(new AudioPlayer(audio)).start(); } catch (final Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } } }
public static void main(String[] args) { try { // final Video<MBFImage> video = new VideoCapture(320, 240); final JavaSoundAudioGrabber audio = new JavaSoundAudioGrabber(new AudioFormat(16, 44.1, 2)); audio.setMaxBufferSize(1024); new Thread(audio).start(); Thread.sleep(100); // final VideoDisplay<MBFImage> display = // VideoDisplay.createVideoDisplay(video, audio); new Thread(new AudioPlayer(audio)).start(); } catch (final Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } } }
@Override public Component getComponent(final int width, final int height) throws IOException{ final Component comp = super.getComponent(width, height); this.xa = new JavaSoundAudioGrabber(new AudioFormat( 16, 96.1, 1 )); this.xa.setMaxBufferSize( this.sampleChunkSize ); new Thread( this.xa ).start(); this.source = new FrequencyAudioSource(this.xa); this.source.addFrequencyListener(this,new Pair<Integer>(30,3400)); this.spectra = null; return comp; }
/** * * @param args */ public static void main( final String[] args ) { try { System.out.println( AudioUtils.getDevices() ); System.out.println( AudioUtils.getJavaOutputLine( "Line 1/2 (M-Audio Delta 44)", new AudioFormat( 16, 44.1, 2 ) ) ); } catch( final LineUnavailableException e ) { e.printStackTrace(); } } }
/** * * @param args */ public static void main( final String[] args ) { try { System.out.println( AudioUtils.getDevices() ); System.out.println( AudioUtils.getJavaOutputLine( "Line 1/2 (M-Audio Delta 44)", new AudioFormat( 16, 44.1, 2 ) ) ); } catch( final LineUnavailableException e ) { e.printStackTrace(); } } }
final int frameStep = 300; final AudioFormat targetFormat = new AudioFormat(16, 11.025, 1); final AudioStream audioConv = new SampleRateConverter(new MultichannelToMonoProcessor(audio), SampleRateConverter.SampleRateConversionAlgorithm.LINEAR_INTERPOLATION, targetFormat);
/** * @param args */ public static void main( final String[] args ) { final Synthesizer synth = new Synthesizer(); final AudioFormat o = new AudioFormat( 8, 22.05, 1 ); System.out.println( "Converting from "+synth.getFormat()+" to "+o ); final AudioConverter ac = new AudioConverter( synth, o ); AudioFramePlot.drawChart( ac ); } }
/** * @param args */ public static void main( final String[] args ) { final Synthesizer synth = new Synthesizer(); final AudioFormat o = new AudioFormat( 8, 22.05, 1 ); System.out.println( "Converting from "+synth.getFormat()+" to "+o ); final AudioConverter ac = new AudioConverter( synth, o ); AudioFramePlot.drawChart( ac ); } }
final int frameStep = 300; final AudioFormat targetFormat = new AudioFormat(16, 11.025, 1); final AudioStream audioConv = new SampleRateConverter(new MultichannelToMonoProcessor(audio), SampleRateConverter.SampleRateConversionAlgorithm.LINEAR_INTERPOLATION, targetFormat);
/** * Main method * * @param args * command-line args (not used) * @throws InterruptedException */ public static void main(final String[] args) throws InterruptedException { // Construct a new audio waveform visualisation final AudioSpectrogram aw = new AudioSpectrogram(440, 600); aw.showWindow("Spectrogram"); // Start a sound grabber that will grab from your default microphone final JavaSoundAudioGrabber jsag = new JavaSoundAudioGrabber(new AudioFormat(16, 44.1, 1)); new Thread(jsag).start(); // Wait until the grabber has started (sometimes it takes a while) while (jsag.isStopped()) Thread.sleep(50); // Then send each of the frames to the visualisation SampleChunk sc = null; while ((sc = jsag.nextSampleChunk()) != null) aw.setData(sc); } }
/** * {@inheritDoc} */ @Override public AudioFormat clone() { AudioFormat af = new AudioFormat( getNBits(), getSampleRateKHz(), getNumChannels() ); af.setBigEndian( isBigEndian ); af.setSigned( isSigned ); return af; }
/** * Returns the affected audio stream. * * @param as * The audio stream to affect * @return The affected audio stream */ public static AudioStream getStream(final AudioStream as) { // Effect chain: // // -> Mono // -> Band-pass filter (LPF + HPF) // -> Sample rate to 16KHz // -> Bit rate to 8-bit // final MultichannelToMonoProcessor m2m2 = new MultichannelToMonoProcessor(as); final double fc = 1000; // mid-point 1000Hz final double q = 1600; // HPF @ 200Hz, LPF @ 1800Hz final EQFilter lpf = new EQFilter(m2m2, EQType.LPF, fc + q / 2); final EQFilter hpf = new EQFilter(lpf, EQType.HPF, fc - q / 2); final SampleRateConverter src2 = new SampleRateConverter(hpf, SampleRateConversionAlgorithm.LINEAR_INTERPOLATION, new AudioFormat(m2m2.getFormat().getNBits(), 16, m2m2.getFormat().getNumChannels())); final BitDepthConverter xa2 = new BitDepthConverter(src2, BitDepthConversionAlgorithm.NEAREST, new AudioFormat(8, src2.getFormat().getSampleRateKHz(), src2.getFormat().getNumChannels())); return xa2; }
/** * {@inheritDoc} */ @Override public AudioFormat clone() { AudioFormat af = new AudioFormat( getNBits(), getSampleRateKHz(), getNumChannels() ); af.setBigEndian( isBigEndian ); af.setSigned( isSigned ); return af; }