Refine search
/*** * Serialize and write Spec to a file. * @param specPath Spec file name. * @param spec Spec object to write. * @throws IOException */ protected void writeSpecToFile(Path specPath, Spec spec) throws IOException { byte[] serializedSpec = this.specSerDe.serialize(spec); try (FSDataOutputStream os = fs.create(specPath, true)) { os.write(serializedSpec); } }
@Override public void execute() throws IOException { URI fsURI; try { fsURI = new URI(this.fsUriString); } catch (URISyntaxException e) { throw new IOException("can not build URI " + this.fsUriString, e); } FileSystem fs = FileSystem.get(fsURI, new Configuration()); Path filenamePath = new Path(this.targetDirPath, ReplicaHadoopFsEndPoint.WATERMARK_FILE); if (fs.exists(filenamePath)) { fs.delete(filenamePath, false); } FSDataOutputStream fout = fs.create(filenamePath); fout.write(WatermarkMetadataUtil.serialize(this.watermark).getBytes(Charsets.UTF_8)); fout.close(); this.completed = true; }
@Override public String writeSlice(String workingDir, AppendDictSliceKey key, AppendDictNode slice) throws IOException { //write new slice String sliceFile = IndexFormatV2.sliceFileName(key); Path path = new Path(workingDir, sliceFile); logger.trace("write slice with key {} into file {}", key, path); try (FSDataOutputStream out = fileSystem.create(path, true, BUFFER_SIZE)) { byte[] bytes = slice.buildTrieBytes(); out.write(bytes); } return sliceFile; }
/** * Write the .regioninfo file on-disk. */ public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs, final Path regionInfoDir, RegionInfo regionInfo) throws IOException { final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo); Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR); // First check to get the permissions FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // Write the RegionInfo file content FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null); try { out.write(content); } finally { out.close(); } }
/** @throws Exception If failed. */ @Test public void testZeroReplicationFactor() throws Exception { // This test doesn't make sense for any mode except of PRIMARY. if (mode == PRIMARY) { Path igfsHome = new Path(primaryFsUri); Path file = new Path(igfsHome, "someFile"); try (FSDataOutputStream out = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()), Options.CreateOpts.repFac((short)1))) { out.write(new byte[1024 * 1024]); } IgniteFileSystem igfs = grid(0).fileSystem("igfs"); IgfsPath filePath = new IgfsPath("/someFile"); IgfsFile fileInfo = igfs.info(filePath); Collection<IgfsBlockLocation> locations = igfs.affinity(filePath, 0, fileInfo.length()); assertEquals(1, locations.size()); IgfsBlockLocation location = F.first(locations); assertEquals(1, location.nodeIds().size()); } }
Path tmpMapFile = new Path(this.versionIdentifier.getParent(), this.versionIdentifier.getName() + TMP_SUFFIX); try (FSDataOutputStream fsout = this.fs.create(tmpMapFile)) { for (String key : this.sanitizedNameToDatasetURNMap.keySet()) { fsout.write(key.getBytes(Charsets.UTF_8)); fsout.writeByte('\n'); fsout.write(this.sanitizedNameToDatasetURNMap.get(key).getBytes(Charsets.UTF_8)); fsout.writeByte('\n'); new Path(this.versionIdentifier.getParent(), this.versionIdentifier.getName() + "_" + System.currentTimeMillis()))) { throw new IOException(
public ChecksumFSOutputSummer(ChecksumFileSystem fs, Path file, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress, FsPermission permission) throws IOException { super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, fs.getBytesPerSum())); int bytesPerSum = fs.getBytesPerSum(); this.datas = fs.getRawFileSystem().create(file, permission, overwrite, bufferSize, replication, blockSize, progress); int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize); this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), permission, true, sumBufferSize, replication, blockSize, null); sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length); sums.writeInt(bytesPerSum); }
/** * Write the .regioninfo file on-disk. */ private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs, final Path regionInfoFile, final byte[] content) throws IOException { // First check to get the permissions FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // Write the RegionInfo file content FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null); try { out.write(content); } finally { out.close(); } }
protected void serveHdfs(InputStream csv) throws IOException { if (FSUtils.isBareS3NBucketWithoutTrailingSlash(path)) { path += "/"; } Path p = new Path(path); org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(p.toUri(), PersistHdfs.CONF); if( !force && fs.exists(p) ) throw new IllegalArgumentException("File " + path + " already exists."); fs.mkdirs(p.getParent()); FSDataOutputStream s = fs.create(p); byte[] buffer = new byte[1024]; try { int len; while ((len = csv.read(buffer)) > 0) { s.write(buffer, 0, len); } } finally { s.close(); Log.info("Key '" + src_key.toString() + "' was written to " + path.toString() + "."); } }
private void createFiles(FileSystem fs, Path parentDir, int numOfFiles) throws IOException { Random random = new Random(); for (int i = 0; i < numOfFiles; i++) { int xMega = 1 + random.nextInt(3); // size of each file is between 1~3M try (FSDataOutputStream fsdos = fs.create(new Path(parentDir, "file-" + i))) { for (int m = 0; m < xMega; m++) { byte[] M = new byte[1024 * 1024]; random.nextBytes(M); fsdos.write(M); } } } }
@Override public void iterationDone(Model model, int iteration, int epoch) { if (printIterations <= 0) { printIterations = 1; } String newScore = ""; if (iteration % printIterations == 0) { double score = model.score(); newScore += "Score at iteration {" + iteration + "} is {" + score + "}"; log.info(newScore); } FileSystem nfs = null; try { nfs = CommonUtils.openHdfsConnect(); Path path = new Path(pathStr); //although using append function isn't best ways, but currently it still solve the score log existing or not FSDataOutputStream out = nfs.append(path);//. .create(path); out.write(newScore.getBytes()); out.write("\n".getBytes()); out.hsync(); out.close(); CommonUtils.closeHdfsConnect(nfs); } catch (RemoteException e) { if (nfs != null) { CommonUtils.closeHdfsConnect(nfs); } } catch (Exception e) { throw new RuntimeException(e); } }
/** * This creates a version file in {@code deltaOrBaseDir} * @param deltaOrBaseDir - where to create the version file */ public static void writeVersionFile(Path deltaOrBaseDir, FileSystem fs) throws IOException { Path formatFile = getVersionFilePath(deltaOrBaseDir); if(!fs.exists(formatFile)) { try (FSDataOutputStream strm = fs.create(formatFile, false)) { strm.write(UTF8.encode(String.valueOf(ORC_ACID_VERSION)).array()); } catch (IOException ioe) { LOG.error("Failed to create " + formatFile + " due to: " + ioe.getMessage(), ioe); throw ioe; } } } public static Path getVersionFilePath(Path deltaOrBase) {
private Path createDummyFile() throws SemanticException { Path dummyPath = new Path(ctx.getMRScratchDir(), "dummy_path"); Path dummyFile = new Path(dummyPath, "dummy_file"); FSDataOutputStream fout = null; try { FileSystem fs = dummyFile.getFileSystem(conf); if (fs.exists(dummyFile)) { return dummyPath; } fout = fs.create(dummyFile); fout.write(1); fout.close(); } catch (IOException e) { throw new SemanticException(e); } finally { IOUtils.closeStream(fout); } return dummyPath; }
private void createFiles(FileSystem fs, Path parentDir, int numOfFiles) throws IOException { Random random = new Random(); for (int i = 0; i < numOfFiles; i++) { int xMega = 1 + random.nextInt(3); // size of each file is between 1~3M try (FSDataOutputStream fsdos = fs.create(new Path(parentDir, "file-" + i))) { for (int m = 0; m < xMega; m++) { byte[] M = new byte[1024 * 1024]; random.nextBytes(M); fsdos.write(M); } } } }
/** @throws Exception If failed. */ @Test public void testCreateBase() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3"); Path file = new Path(dir, "someFile"); assertPathDoesNotExist(fs, file); FsPermission fsPerm = new FsPermission((short)644); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(fsPerm)); // Try to write something in file. os.write("abc".getBytes()); os.close(); // Check file status. FileStatus fileStatus = fs.getFileStatus(file); assertFalse(fileStatus.isDirectory()); assertEquals(file, fileStatus.getPath()); assertEquals(fsPerm, fileStatus.getPermission()); }
private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd) throws IOException { FSDataOutputStream out = fs.create(p, false); try { // We used to write this file out as a serialized HTD Writable followed by two '\n's and then // the toString version of HTD. Now we just write out the pb serialization. out.write(TableDescriptorBuilder.toByteArray(htd)); } finally { out.close(); } }
/** @throws Exception If failed. */ @Test public void testStatus() throws Exception { Path file1 = new Path("/file1"); try (FSDataOutputStream file = fs.create(file1, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault()))) { file.write(new byte[1024 * 1024]); } FsStatus status = fs.getFsStatus(); assertEquals(getClientFsUser(), fs.getFileStatus(file1).getOwner()); assertEquals(4, grid(0).cluster().nodes().size()); long used = 0, max = 0; for (int i = 0; i < 4; i++) { IgniteFileSystem igfs = grid(i).fileSystem("igfs"); IgfsMetrics metrics = igfs.metrics(); used += metrics.localSpaceSize(); max += metrics.maxSpaceSize(); } assertEquals(used, status.getUsed()); assertEquals(max, status.getCapacity()); }