private AudioSampleEntry mergeAudioSampleEntries(AudioSampleEntry ase1, AudioSampleEntry ase2) { AudioSampleEntry ase = new AudioSampleEntry(ase2.getType()); if (ase1.getBytesPerFrame() == ase2.getBytesPerFrame()) { ase.setBytesPerFrame(ase1.getBytesPerFrame()); } else { LOG.error("BytesPerFrame differ"); return null; if (ase1.getBytesPerPacket() == ase2.getBytesPerPacket()) { ase.setBytesPerPacket(ase1.getBytesPerPacket()); } else { return null; if (ase1.getBytesPerSample() == ase2.getBytesPerSample()) { ase.setBytesPerSample(ase1.getBytesPerSample()); } else { LOG.error("BytesPerSample differ"); return null; if (ase1.getChannelCount() == ase2.getChannelCount()) { ase.setChannelCount(ase1.getChannelCount()); } else { return null; if (ase1.getPacketSize() == ase2.getPacketSize()) { ase.setPacketSize(ase1.getPacketSize()); } else { LOG.error("ChannelCount differ"); return null;
private void parse() throws IOException { if (!readVariables()) { throw new IOException(); } audioSampleEntry = new AudioSampleEntry(type); audioSampleEntry.setChannelCount(channelCount); audioSampleEntry.setSampleRate(samplerate); audioSampleEntry.setDataReferenceIndex(1); audioSampleEntry.setSampleSize(16); audioSampleEntry.addBox(ddts); trackMetaData.setCreationTime(new Date()); trackMetaData.setModificationTime(new Date()); trackMetaData.setLanguage(lang); trackMetaData.setTimescale(samplerate); // Audio tracks always use samplerate as timescale }
@Override public void getBox(WritableByteChannel writableByteChannel) throws IOException { writableByteChannel.write(getHeader()); ByteBuffer byteBuffer = ByteBuffer.allocate(28 + (soundVersion == 1 ? 16 : 0) IsoTypeWriter.writeUInt32(byteBuffer, getSampleRate()); } else { IsoTypeWriter.writeUInt32(byteBuffer, getSampleRate() << 16); writeContainer(writableByteChannel);
String codecName = ase.getType(); log.debug("Sample size: {}", ase.getSampleSize()); long ats = ase.getSampleRate(); audioChannels = ase.getChannelCount(); log.debug("Channels: {}", audioChannels); if (ase.getBoxes(ESDescriptorBox.class).size() > 0) { ESDescriptorBox esds = ase.getBoxes(ESDescriptorBox.class).get(0); if (esds == null) { log.debug("esds not found in default path"); AppleWaveBox wave = ase.getBoxes(AppleWaveBox.class).get(0); if (wave != null) { log.debug("wave atom found");
public AC3TrackImpl(DataSource dataSource, String lang) throws IOException { super(dataSource.toString()); this.dataSource = dataSource; this.trackMetaData.setLanguage(lang); samples = readSamples(); audioSampleEntry = createAudioSampleEntry(); trackMetaData.setCreationTime(new Date()); trackMetaData.setModificationTime(new Date()); trackMetaData.setLanguage(lang); trackMetaData.setTimescale(audioSampleEntry.getSampleRate()); // Audio tracks always use samplerate as timescale trackMetaData.setVolume(1); }
dataSource.read(owmaSpecifics); addBox(new Box() { initContainer(dataSource, contentSize - 28 - (soundVersion == 1 ? 16 : 0)
@Override public String toString() { return "AudioSampleEntry{" + "bytesPerSample=" + bytesPerSample + ", bytesPerFrame=" + bytesPerFrame + ", bytesPerPacket=" + bytesPerPacket + ", samplesPerPacket=" + samplesPerPacket + ", packetSize=" + packetSize + ", compressionId=" + compressionId + ", soundVersion=" + soundVersion + ", sampleRate=" + sampleRate + ", sampleSize=" + sampleSize + ", channelCount=" + channelCount + ", boxes=" + getBoxes() + '}'; }
String codecName = ase.getType(); log.debug("Sample size: {}", ase.getSampleSize()); long ats = ase.getSampleRate(); audioChannels = ase.getChannelCount(); log.debug("Channels: {}", audioChannels); if (ase.getBoxes(ESDescriptorBox.class).size() > 0) { ESDescriptorBox esds = ase.getBoxes(ESDescriptorBox.class).get(0); if (esds == null) { log.debug("esds not found in default path"); AppleWaveBox wave = ase.getBoxes(AppleWaveBox.class).get(0); if (wave != null) { log.debug("wave atom found");
if (ase == null) { ase = (AudioSampleEntry) sampleEntry; } else if (ase.getSampleRate() != ((AudioSampleEntry) sampleEntry).getSampleRate()) { throw new RuntimeException("Multiple SampleEntries and different sample rates is not supported"); if (ase.getSampleRate() < minSampleRate) { minSampleRate = ase.getSampleRate(); long sc = testTrack.getSamples().size(); double stretch = (double) sc / refSampleCount; if (ase == null) { ase = (AudioSampleEntry) sampleEntry; } else if (ase.getSampleRate() != ((AudioSampleEntry) sampleEntry).getSampleRate()) { throw new RuntimeException("Multiple SampleEntries and different sample rates is not supported"); double factor = (double) ase.getSampleRate() / (double) minSampleRate; if (factor != Math.rint(factor)) { // Not an integer throw new RuntimeException("Sample rates must be a multiple of the lowest sample rate to create a correct file!");
@Override public void getBox(WritableByteChannel writableByteChannel) throws IOException { writableByteChannel.write(getHeader()); ByteBuffer byteBuffer = ByteBuffer.allocate(28 + (soundVersion == 1 ? 16 : 0) IsoTypeWriter.writeUInt32(byteBuffer, getSampleRate()); } else { IsoTypeWriter.writeUInt32(byteBuffer, getSampleRate() << 16); writeContainer(writableByteChannel);
dataSource.read(owmaSpecifics); addBox(new Box() { initContainer(dataSource, contentSize - 28 - (soundVersion == 1 ? 16 : 0)
@Override public String toString() { return "AudioSampleEntry{" + "bytesPerSample=" + bytesPerSample + ", bytesPerFrame=" + bytesPerFrame + ", bytesPerPacket=" + bytesPerPacket + ", samplesPerPacket=" + samplesPerPacket + ", packetSize=" + packetSize + ", compressionId=" + compressionId + ", soundVersion=" + soundVersion + ", sampleRate=" + sampleRate + ", sampleSize=" + sampleSize + ", channelCount=" + channelCount + ", boxes=" + getBoxes() + '}'; }
private AudioSampleEntry mergeAudioSampleEntries(AudioSampleEntry ase1, AudioSampleEntry ase2) { AudioSampleEntry ase = new AudioSampleEntry(ase2.getType()); if (ase1.getBytesPerFrame() == ase2.getBytesPerFrame()) { ase.setBytesPerFrame(ase1.getBytesPerFrame()); } else { LOG.error("BytesPerFrame differ"); return null; if (ase1.getBytesPerPacket() == ase2.getBytesPerPacket()) { ase.setBytesPerPacket(ase1.getBytesPerPacket()); } else { return null; if (ase1.getBytesPerSample() == ase2.getBytesPerSample()) { ase.setBytesPerSample(ase1.getBytesPerSample()); } else { LOG.error("BytesPerSample differ"); return null; if (ase1.getChannelCount() == ase2.getChannelCount()) { ase.setChannelCount(ase1.getChannelCount()); } else { return null; if (ase1.getPacketSize() == ase2.getPacketSize()) { ase.setPacketSize(ase1.getPacketSize()); } else { LOG.error("ChannelCount differ"); return null;
audioSampleEntry = new AudioSampleEntry("ec-3"); audioSampleEntry.setChannelCount(2); // According to ETSI TS 102 366 Annex F audioSampleEntry.setSampleRate(samplerate); audioSampleEntry.setDataReferenceIndex(1); audioSampleEntry.setSampleSize(16); audioSampleEntry.addBox(ec3);
String codecName = ase.getType(); log.debug("Sample size: {}", ase.getSampleSize()); long ats = ase.getSampleRate(); audioChannels = ase.getChannelCount(); log.debug("Channels: {}", audioChannels); if (ase.getBoxes(ESDescriptorBox.class).size() > 0) { ESDescriptorBox esds = ase.getBoxes(ESDescriptorBox.class).get(0); if (esds == null) { log.debug("esds not found in default path"); AppleWaveBox wave = ase.getBoxes(AppleWaveBox.class).get(0); if (wave != null) { log.debug("wave atom found");
public AC3TrackImpl(DataSource dataSource, String lang) throws IOException { super(dataSource.toString()); this.dataSource = dataSource; this.trackMetaData.setLanguage(lang); samples = readSamples(); audioSampleEntry = createAudioSampleEntry(); trackMetaData.setCreationTime(new Date()); trackMetaData.setModificationTime(new Date()); trackMetaData.setLanguage(lang); trackMetaData.setTimescale(audioSampleEntry.getSampleRate()); // Audio tracks always use samplerate as timescale trackMetaData.setVolume(1); }
audioSampleEntry = new AudioSampleEntry("mp4a"); audioSampleEntry.setChannelCount(firstHeader.channelCount); audioSampleEntry.setSampleRate(firstHeader.sampleRate); audioSampleEntry.setDataReferenceIndex(1); audioSampleEntry.setSampleSize(16); audioSampleEntry.addBox(esds);
String codecName = ase.getType(); log.debug("Sample size: {}", ase.getSampleSize()); long ats = ase.getSampleRate(); audioChannels = ase.getChannelCount(); log.debug("Channels: {}", audioChannels); if (ase.getBoxes(ESDescriptorBox.class).size() > 0) { ESDescriptorBox esds = ase.getBoxes(ESDescriptorBox.class).get(0); if (esds == null) { log.debug("esds not found in default path"); AppleWaveBox wave = ase.getBoxes(AppleWaveBox.class).get(0); if (wave != null) { log.debug("wave atom found");