/** Write the final part of file header. */ void finalizeFileHeader() throws IOException { out.write(sync); // write the sync bytes out.flush(); // flush header }
/** Write the final part of file header. */ void finalizeFileHeader() throws IOException { out.write(sync); // write the sync bytes out.flush(); // flush header }
@Override public void flush() throws IOException { final List<IOException> ioExceptions = new ArrayList<IOException>(); for (int i = opSet.nextSetBit(0); i >= 0; i = opSet.nextSetBit(i + 1)) { try { outputStreams[i].flush(); } catch (Throwable t) { osException(i, "flush", t, ioExceptions); } } mayThrow(ioExceptions); }
/** * Persists a *.metadata file to a specific directory in HDFS. * * @param directoryPath where to write the metadata file. * @param outputFs {@link org.apache.hadoop.fs.FileSystem} where to write the file * @param metadataFileName name of the file (including extension) * @param metadata {@link voldemort.store.readonly.ReadOnlyStorageMetadata} to persist on HDFS * @throws IOException if the FileSystem operations fail */ private void writeMetadataFile(Path directoryPath, FileSystem outputFs, String metadataFileName, ReadOnlyStorageMetadata metadata) throws IOException { Path metadataPath = new Path(directoryPath, metadataFileName); FSDataOutputStream metadataStream = outputFs.create(metadataPath); outputFs.setPermission(metadataPath, new FsPermission(HADOOP_FILE_PERMISSION)); metadataStream.write(metadata.toJsonString().getBytes()); metadataStream.flush(); metadataStream.close(); }
/** {@inheritDoc} */ @Override public synchronized void flush() throws IOException { writeStart(); try { os.flush(); } finally { writeEnd(); } }
public synchronized void close() throws IOException { if (bufferedRecords > 0) { flushRecords(); } clearColumnBuffers(); if (out != null) { // Close the underlying stream if we own it... out.flush(); out.close(); out = null; } for (int i = 0; i < columnNumber; i++) { LOG.info("Column#" + i + " : Plain Total Column Value Length: " + plainTotalColumnLength[i] + ", Compr Total Column Value Length: " + comprTotalColumnLength[i]); } } }
@Override public void sync(boolean forceSync) throws IOException { FSDataOutputStream fsdos = this.output; if (fsdos == null) { return; // Presume closed } fsdos.flush(); if (forceSync) { fsdos.hsync(); } else { fsdos.hflush(); } }
@Override public void sync() throws IOException { serializer.flush(); outStream.flush(); hflushOrSync(outStream); }
@Nullable @Override public Object call() throws Exception { s.write("test".getBytes()); s.flush(); // Flush data to the broken output stream. return null; } }, IOException.class, null);
@Nullable @Override public Object call() throws Exception { s.write("test".getBytes()); s.flush(); return null; } }, IOException.class, null);
public synchronized void close() throws IOException { if (bufferedRecords > 0) { flushRecords(); } clearColumnBuffers(); if (out != null) { // Close the underlying stream if we own it... out.flush(); out.close(); out = null; } for (int i = 0; i < columnNumber; i++) { LOG.info("Column#" + i + " : Plain Total Column Value Length: " + plainTotalColumnLength[i] + ", Compr Total Column Value Length: " + comprTotalColumnLength[i]); } } }
@Override public void close(boolean abort) throws IOException { if (out == null) { FileSystem fs = finalOutPath.getFileSystem(jc); out = fs.create(finalOutPath); } for (Writable r : records) r.write(out); records.clear(); out.flush(); out.close(); } };
@Override public void close() throws IOException { serializer.flush(); serializer.beforeClose(); outStream.flush(); hflushOrSync(outStream); outStream.close(); unregisterCurrentStream(); }
@Override public FSDataOutputStream call() throws IOException { try { FileSystem fs = FSUtils.getCurrentFileSystem(this.conf); FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf, HConstants.DATA_FILE_UMASK_KEY); Path tmpDir = getTmpDir(conf); this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE); fs.mkdirs(tmpDir); final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms); out.writeBytes(InetAddress.getLocalHost().toString()); // Add a note into the file we write on why hbase2 is writing out an hbck1 lock file. out.writeBytes(" Written by an hbase-2.x Master to block an " + "attempt by an hbase-1.x HBCK tool making modification to state. " + "See 'HBCK must match HBase server version' in the hbase refguide."); out.flush(); return out; } catch(RemoteException e) { if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){ return null; } else { throw e; } } }
/** Write and flush the file header. */ private void writeFileHeader() throws IOException { out.write(VERSION); Text.writeString(out, keyClass.getName()); Text.writeString(out, valClass.getName()); out.writeBoolean(this.isCompressed()); out.writeBoolean(this.isBlockCompressed()); if (this.isCompressed()) { Text.writeString(out, (codec.getClass()).getName()); } this.metadata.write(out); out.write(sync); // write the sync bytes out.flush(); // flush header }
@Override public void sync() throws IOException { // We must use finish() and resetState() here -- flush() is apparently not // supported by the compressed output streams (it's a no-op). // Also, since resetState() writes headers, avoid calling it without an // additional write/append operation. // Note: There are bugs in Hadoop & JDK w/ pure-java gzip; see HADOOP-8522. serializer.flush(); if (!isFinished) { cmpOut.finish(); isFinished = true; } fsOut.flush(); hflushOrSync(this.fsOut); }
@Override public void flush() throws IOException { if (out == null) { FileSystem fs = path.getFileSystem(options.getConfiguration()); out = fs.create(path); } for (Writable r : records) r.write(out); records.clear(); out.flush(); }
/** Close the file. */ @Override public synchronized void close() throws IOException { keySerializer.close(); uncompressedValSerializer.close(); if (compressedValSerializer != null) { compressedValSerializer.close(); } CodecPool.returnCompressor(compressor); compressor = null; if (out != null) { // Close the underlying stream iff we own it... if (ownOutputStream) { out.close(); } else { out.flush(); } out = null; } }
/** {@inheritDoc} */ @Test @Override public void testClientReconnect() throws Exception { Path filePath = new Path(PRIMARY_URI, "file1"); final FSDataOutputStream s = fs.create(filePath); // Open the stream before stopping IGFS. try { restartServerNodesExceptOne(); // Check that client is again operational. assertTrue(fs.mkdirs(new Path(PRIMARY_URI, "dir1/dir2"))); s.write("test".getBytes()); s.flush(); // Flush data to the broken output stream. assertTrue(fs.exists(filePath)); } finally { U.closeQuiet(s); // Safety. } }
@Override public void close() throws IOException { serializer.flush(); serializer.beforeClose(); if (!isFinished) { cmpOut.finish(); isFinished = true; } fsOut.flush(); hflushOrSync(fsOut); cmpOut.close(); if (compressor != null) { CodecPool.returnCompressor(compressor); compressor = null; } unregisterCurrentStream(); }