public void writeInt(int value) { this.hdos.writeInt(value); }
public void writeInt(int value) { this.hdos.writeInt(value); }
/** * Writes an <code>int</code> value, which is * comprised of four bytes, to the output stream. * The byte values to be written, in the order * shown, are: * <p><pre><code> * (byte)(0xff & (v >> 24)) * (byte)(0xff & (v >> 16)) * (byte)(0xff & (v >>    8)) * (byte)(0xff & v) * </code></pre><p> * The bytes written by this method may be read * by the <code>readInt</code> method of interface * <code>DataInput</code> , which will then * return an <code>int</code> equal to <code>v</code>. * * @param v the <code>int</code> value to be written. */ public final void writeInt(int v) { if (DEBUG) debug(" int=" + v); ensureCapacity(4); if (this.overflowBuf != null) { this.overflowBuf.writeInt(v); return; } this.buffer.putInt(v); }
/** * Writes an <code>int</code> value, which is * comprised of four bytes, to the output stream. * The byte values to be written, in the order * shown, are: * <p><pre><code> * (byte)(0xff & (v >> 24)) * (byte)(0xff & (v >> 16)) * (byte)(0xff & (v >>    8)) * (byte)(0xff & v) * </code></pre><p> * The bytes written by this method may be read * by the <code>readInt</code> method of interface * <code>DataInput</code> , which will then * return an <code>int</code> equal to <code>v</code>. * * @param v the <code>int</code> value to be written. */ public final void writeInt(int v) { // if (logger.isTraceEnabled()) logger.trace(" int={}", v); ensureCapacity(4); if (this.overflowBuf != null) { this.overflowBuf.writeInt(v); return; } this.buffer.putInt(v); }
/** * Write a clear with an RVV record. */ private void writeClearRecord(DiskRegionView dr, RegionVersionVector rvv) { try { HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT); hdos.write(IFREC_CLEAR_REGION_WITH_RVV_ID); writeDiskRegionID(hdos, dr.getId()); //We only need the memberToVersionMap for clear purposes Map<DiskStoreID, RegionVersionHolder> memberToVersion = rvv.getMemberToVersion(); hdos.writeInt(memberToVersion.size()); for(Map.Entry<DiskStoreID, RegionVersionHolder> entry : memberToVersion.entrySet()) { InternalDataSerializer.invokeToData(entry.getKey(), hdos); synchronized(entry.getValue()) { InternalDataSerializer.invokeToData(entry.getValue(), hdos); } } hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, false); // don't do stats for these small records } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae, true); } throw dae; } }
/** * Write a clear with an RVV record. */ private void writeClearRecord(DiskRegionView dr, RegionVersionVector rvv) { try { HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT); hdos.write(IFREC_CLEAR_REGION_WITH_RVV_ID); writeDiskRegionID(hdos, dr.getId()); //We only need the memberToVersionMap for clear purposes Map<DiskStoreID, RegionVersionHolder> memberToVersion = rvv.getMemberToVersion(); hdos.writeInt(memberToVersion.size()); for(Map.Entry<DiskStoreID, RegionVersionHolder> entry : memberToVersion.entrySet()) { InternalDataSerializer.invokeToData(entry.getKey(), hdos); synchronized(entry.getValue()) { InternalDataSerializer.invokeToData(entry.getValue(), hdos); } } hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, false); // don't do stats for these small records } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae); } throw dae; } }
private static OffHeapByteSource prepareValueForDelta(Delta delta, MemoryAllocator ma) { HeapDataOutputStream hdos = new HeapDataOutputStream(); final boolean isListOfDeltas; try { if (delta instanceof ListOfDeltas) { List<Delta> deltas = ((ListOfDeltas) delta).getDeltas(); isListOfDeltas = true; // TODO:Asif : use compact int hdos.writeInt(deltas.size()); for (Delta aDelta : deltas) { SerializableDelta sd = (SerializableDelta) aDelta; InternalDataSerializer.invokeToData(sd, hdos); } } else { isListOfDeltas = false; InternalDataSerializer.invokeToData((SerializableDelta)delta, hdos); } } catch (IOException ioe) { throw new GemFireXDRuntimeException(ioe); } byte[] data = hdos.toByteArray(); OffHeapByteSource chunk = (OffHeapByteSource) ma.allocate(data.length, isListOfDeltas ? OffHeapDeltas.TYPE : OffHeapDelta.TYPE); chunk.writeBytes(0, data); return chunk; }
private static OffHeapByteSource prepareValueForDelta(Delta delta, MemoryAllocator ma) { HeapDataOutputStream hdos = new HeapDataOutputStream(); final boolean isListOfDeltas; try { if (delta instanceof ListOfDeltas) { List<Delta> deltas = ((ListOfDeltas) delta).getDeltas(); isListOfDeltas = true; // TODO:Asif : use compact int hdos.writeInt(deltas.size()); for (Delta aDelta : deltas) { SerializableDelta sd = (SerializableDelta) aDelta; InternalDataSerializer.invokeToData(sd, hdos); } } else { isListOfDeltas = false; InternalDataSerializer.invokeToData((SerializableDelta)delta, hdos); } } catch (IOException ioe) { throw new GemFireXDRuntimeException(ioe); } byte[] data = hdos.toByteArray(); OffHeapByteSource chunk = (OffHeapByteSource) ma.allocate(data.length, isListOfDeltas ? OffHeapDeltas.TYPE : OffHeapDelta.TYPE); chunk.writeBytes(0, data); return chunk; }
private static OffHeapByteSource prepareValueForDelta(Delta delta, MemoryAllocator ma) { HeapDataOutputStream hdos = new HeapDataOutputStream(); final boolean isListOfDeltas; try { if (delta instanceof ListOfDeltas) { List<Delta> deltas = ((ListOfDeltas) delta).getDeltas(); isListOfDeltas = true; // TODO:Asif : use compact int hdos.writeInt(deltas.size()); for (Delta aDelta : deltas) { SerializableDelta sd = (SerializableDelta) aDelta; InternalDataSerializer.invokeToData(sd, hdos); } } else { isListOfDeltas = false; InternalDataSerializer.invokeToData((SerializableDelta)delta, hdos); } } catch (IOException ioe) { throw new GemFireXDRuntimeException(ioe); } byte[] data = hdos.toByteArray(); OffHeapByteSource chunk = (OffHeapByteSource) ma.allocate(data.length, isListOfDeltas ? OffHeapDeltas.TYPE : OffHeapDelta.TYPE); chunk.writeBytes(0, data); return chunk; }
private void writeCanonicalId(int id, Object object) { try { HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT); hdos.write(IFREC_ADD_CANONICAL_MEMBER_ID); hdos.writeInt(id); DataSerializer.writeObject(object, hdos); hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, true); } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae, true); } throw dae; } }
private void writeCanonicalId(int id, Object object) { try { HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT); hdos.write(IFREC_ADD_CANONICAL_MEMBER_ID); hdos.writeInt(id); DataSerializer.writeObject(object, hdos); hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, true); } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae); } throw dae; } }
bb.write(drv.getLruAlgorithm()); bb.write(drv.getLruAction()); bb.writeInt(drv.getLruLimit()); bb.writeInt(drv.getConcurrencyLevel()); bb.writeInt(drv.getInitialCapacity()); bb.writeFloat(drv.getLoadFactor()); bb.write((byte)(drv.getStatisticsEnabled()?1:0)); bb.writeLong(drv.getUUID()); bb.writeUTF(drv.getPartitionName()); bb.writeInt(drv.getStartingBucketId()); bb.writeBoolean(false); // griddb flag, preserve for backwards compatibility
bb.write(drv.getLruAlgorithm()); bb.write(drv.getLruAction()); bb.writeInt(drv.getLruLimit()); bb.writeInt(drv.getConcurrencyLevel()); bb.writeInt(drv.getInitialCapacity()); bb.writeFloat(drv.getLoadFactor()); bb.write((byte)(drv.getStatisticsEnabled()?1:0)); final EnumSet<DiskRegionFlag> flags = drv.getFlags(); bb.writeUTF(drv.getPartitionName()); bb.writeInt(drv.getStartingBucketId()); bb.writeUTF(drv.getCompressorClassName() == null ? "" : drv.getCompressorClassName()); bb.writeBoolean(flags.contains(DiskRegionFlag.IS_WITH_VERSIONING));
out.writeInt(getPort());
private void writePRCreate(String name, PRPersistentConfig config) { try { int nameLength = estimateByteSize(name); String colocatedWith = config.getColocatedWith(); colocatedWith = colocatedWith == null ? "" : colocatedWith; int colocatedLength = estimateByteSize(colocatedWith); HeapDataOutputStream hdos = new HeapDataOutputStream(1+nameLength+4+colocatedLength+1, Version.CURRENT); hdos.write(IFREC_PR_CREATE); hdos.writeUTF(name); hdos.writeInt(config.getTotalNumBuckets()); hdos.writeUTF(colocatedWith); hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, false); } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae); } throw dae; } }
private void writePRCreate(String name, PRPersistentConfig config) { try { int nameLength = estimateByteSize(name); String colocatedWith = config.getColocatedWith(); colocatedWith = colocatedWith == null ? "" : colocatedWith; int colocatedLength = estimateByteSize(colocatedWith); HeapDataOutputStream hdos = new HeapDataOutputStream(1+nameLength+4+colocatedLength+1, Version.CURRENT); hdos.write(IFREC_PR_CREATE); hdos.writeUTF(name); hdos.writeInt(config.getTotalNumBuckets()); hdos.writeUTF(colocatedWith); hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, false); } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae, true); } throw dae; } }