@Override public OutputStream openOutputStream() throws IOException { final FileSystem fs = path.getFileSystem(config); return fs.create(path, overwrite); }
private BufferedWriter writer(Context withinContext, Path dataPath) throws IOException { Path filesPath = new Path(dataPath, EximUtil.FILES_NAME); FileSystem fs = dataPath.getFileSystem(withinContext.hiveConf); return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath))); }
private BufferedWriter writer() throws IOException { Path exportToFile = new Path(exportRootDataDir, EximUtil.FILES_NAME); if (exportFileSystem.exists(exportToFile)) { throw new IllegalArgumentException( exportToFile.toString() + " already exists and cant export data from path(dir) " + dataPathList); } logger.debug("exporting data files in dir : " + dataPathList + " to " + exportToFile); return new BufferedWriter( new OutputStreamWriter(exportFileSystem.create(exportToFile)) ); }
@Override public void execute() throws IOException { URI fsURI; try { fsURI = new URI(this.fsUriString); } catch (URISyntaxException e) { throw new IOException("can not build URI " + this.fsUriString, e); } FileSystem fs = FileSystem.get(fsURI, new Configuration()); Path filenamePath = new Path(this.targetDirPath, ReplicaHadoopFsEndPoint.WATERMARK_FILE); if (fs.exists(filenamePath)) { fs.delete(filenamePath, false); } FSDataOutputStream fout = fs.create(filenamePath); fout.write(WatermarkMetadataUtil.serialize(this.watermark).getBytes(Charsets.UTF_8)); fout.close(); this.completed = true; }
@Test public void testDoubleCreateSemantics() throws Exception { //1 create an already existing open file w/o override flag Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1"); try (FSDataOutputStream os1 = fs.create(file1, false)) { fs.create(file1, false); // should fail fail("Create did not throw an exception"); } catch (RemoteException e) { Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass()); } //2 close file and retry creation try { fs.create(file1, false); // should still fail fail("Create did not throw an exception"); } catch (FileAlreadyExistsException e) { // expecting this exception } //3 delete file and retry creation fs.delete(file1, false); try (FSDataOutputStream os2 = fs.create(file1, false)) { Assert.assertNotNull(os2); } }
private Path createDummyFile() throws SemanticException { Path dummyPath = new Path(ctx.getMRScratchDir(), "dummy_path"); Path dummyFile = new Path(dummyPath, "dummy_file"); FSDataOutputStream fout = null; try { FileSystem fs = dummyFile.getFileSystem(conf); if (fs.exists(dummyFile)) { return dummyPath; } fout = fs.create(dummyFile); fout.write(1); fout.close(); } catch (IOException e) { throw new SemanticException(e); } finally { IOUtils.closeStream(fout); } return dummyPath; }
public static OutputStream makePathAndOutputStream(JobContext job, Path outputPath, boolean deleteExisting) throws IOException { OutputStream retVal; FileSystem fs = outputPath.getFileSystem(job.getConfiguration()); Class<? extends CompressionCodec> codecClass; CompressionCodec codec = null; if (FileOutputFormat.getCompressOutput(job)) { codecClass = FileOutputFormat.getOutputCompressorClass(job, GzipCodec.class); codec = ReflectionUtils.newInstance(codecClass, job.getConfiguration()); outputPath = new Path(outputPath + codec.getDefaultExtension()); } if (fs.exists(outputPath)) { if (deleteExisting) { fs.delete(outputPath, false); } else { throw new ISE("outputPath[%s] must not exist.", outputPath); } } if (FileOutputFormat.getCompressOutput(job)) { retVal = codec.createOutputStream(fs.create(outputPath, false)); } else { retVal = fs.create(outputPath, false); } return retVal; }
@VisibleForTesting static void serializeJobState(FileSystem fs, Path mrJobDir, Configuration conf, JobState jobState, Job job) throws IOException { Path jobStateFilePath = new Path(mrJobDir, JOB_STATE_FILE_NAME); // Write the job state with an empty task set (work units are read by the mapper from a different file) try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(jobStateFilePath))) { jobState.write(dataOutputStream, false, conf.getBoolean(SERIALIZE_PREVIOUS_WORKUNIT_STATES_KEY, DEFAULT_SERIALIZE_PREVIOUS_WORKUNIT_STATES)); } job.getConfiguration().set(ConfigurationKeys.JOB_STATE_FILE_PATH_KEY, jobStateFilePath.toString()); DistributedCache.addCacheFile(jobStateFilePath.toUri(), job.getConfiguration()); job.getConfiguration().set(ConfigurationKeys.JOB_STATE_DISTRIBUTED_CACHE_NAME, jobStateFilePath.getName()); }
@Test public void testVersion() throws DeserializationException, IOException { final Path rootdir = htu.getDataTestDir(); final FileSystem fs = rootdir.getFileSystem(conf); assertNull(FSUtils.getVersion(fs, rootdir)); // Write out old format version file. See if we can read it in and convert. Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); FSDataOutputStream s = fs.create(versionFile); final String version = HConstants.FILE_SYSTEM_VERSION; s.writeUTF(version); s.close(); assertTrue(fs.exists(versionFile)); FileStatus [] status = fs.listStatus(versionFile); assertNotNull(status); assertTrue(status.length > 0); String newVersion = FSUtils.getVersion(fs, rootdir); assertEquals(version.length(), newVersion.length()); assertEquals(version, newVersion); // File will have been converted. Exercise the pb format assertEquals(version, FSUtils.getVersion(fs, rootdir)); FSUtils.checkVersion(fs, rootdir, true); }
protected RegionInfo createRegion(Configuration conf, final Table htbl, byte[] startKey, byte[] endKey) throws IOException { Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME); HTableDescriptor htd = htbl.getTableDescriptor(); RegionInfo hri = RegionInfoBuilder.newBuilder(htbl.getName()) .setStartKey(startKey) .setEndKey(endKey) .build(); LOG.info("manually adding regioninfo and hdfs data: " + hri.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); Path p = new Path(FSUtils.getTableDir(rootDir, htbl.getName()), hri.getEncodedName()); fs.mkdirs(p); Path riPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); FSDataOutputStream out = fs.create(riPath); out.write(RegionInfo.toDelimitedByteArray(hri)); out.close(); // add to meta. MetaTableAccessor.addRegionToMeta(TEST_UTIL.getConnection(), hri); meta.close(); return hri; }
public static void writeMmCommitManifest(List<Path> commitPaths, Path specPath, FileSystem fs, String taskId, Long writeId, int stmtId, String unionSuffix, boolean isInsertOverwrite) throws HiveException { if (commitPaths.isEmpty()) { return; } // We assume one FSOP per task (per specPath), so we create it in specPath. Path manifestPath = getManifestDir(specPath, writeId, stmtId, unionSuffix, isInsertOverwrite); manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION); Utilities.FILE_OP_LOGGER.info("Writing manifest to {} with {}", manifestPath, commitPaths); try { // Don't overwrite the manifest... should fail if we have collisions. try (FSDataOutputStream out = fs.create(manifestPath, false)) { if (out == null) { throw new HiveException("Failed to create manifest at " + manifestPath); } out.writeInt(commitPaths.size()); for (Path path : commitPaths) { out.writeUTF(path.toString()); } } } catch (IOException e) { throw new HiveException(e); } }
taskContext.progress(); final FileSystem fs = taskContext.getWorkingDirectory().getFileSystem(taskContext.getConfiguration()); final Path taskAttemptDir = getTaskPath( context.getJobID(), taskContext.getWorkingDirectory() ); final Path taskAttemptFile = new Path(taskAttemptDir, DATA_FILE_KEY); final Path taskAttemptSuccess = new Path(taskAttemptDir, DATA_SUCCESS_KEY); try (final OutputStream outputStream = fs.create(taskAttemptFile, false, 1 << 10, commitProgressable)) { outputStream.write(HadoopDruidConverterConfig.jsonMapper.writeValueAsBytes(newSegment)); fs.create(taskAttemptSuccess, false).close();
private static void saveState (FileSystem fs, Path dir, State state) throws IOException { Path tmpFile = new Path(dir, STATE_FILE + ".tmp"); Path newFile = new Path(dir, STATE_FILE); fs.delete(tmpFile, false); try (DataOutputStream dataOutputStream = new DataOutputStream(fs.create(new Path(dir, STATE_FILE + ".tmp")))) { state.write(dataOutputStream); } // Caution: We are deleting right before renaming because rename doesn't support atomic overwrite options from FileSystem API. fs.delete(newFile, false); fs.rename(tmpFile, newFile); }
@Override public void put(String jobName, String datasetUrn, CommitSequence commitSequence) throws IOException { datasetUrn = sanitizeDatasetUrn(datasetUrn); if (exists(jobName, datasetUrn)) { throw new IOException(String.format("CommitSequence already exists for job %s, dataset %s", jobName, datasetUrn)); } Path jobPath = new Path(this.rootPath, jobName); this.fs.mkdirs(jobPath); Path datasetPath = new Path(jobPath, datasetUrn); try (DataOutputStream dos = this.fs.create(datasetPath)) { dos.writeBytes(GSON.toJson(commitSequence)); } }
/** * TODO: fix it. Persist the manifest file. * @throws IOException IOException when storing the manifest file. */ public void store(Configuration conf) throws BackupException { byte[] data = backupImage.toProto().toByteArray(); // write the file, overwrite if already exist Path manifestFilePath = new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(), backupImage.getBackupId()), MANIFEST_FILE_NAME); try (FSDataOutputStream out = manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) { out.write(data); } catch (IOException e) { throw new BackupException(e.getMessage()); } LOG.info("Manifest file stored to " + manifestFilePath); }
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir) throws IOException { // get the existing store files FileSystem fs = services.getMasterFileSystem().getFileSystem(); fs.mkdirs(storedir); // create the store files in the parent for (int i = 0; i < count; i++) { Path storeFile = new Path(storedir, "_store" + i); FSDataOutputStream dos = fs.create(storeFile, true); dos.writeBytes("Some data: " + i); dos.close(); } LOG.debug("Adding " + count + " store files to the storedir:" + storedir); // make sure the mock store files are there FileStatus[] storeFiles = fs.listStatus(storedir); assertEquals("Didn't have expected store files", count, storeFiles.length); return storeFiles; }
private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd) throws IOException { FSDataOutputStream out = fs.create(p, false); try { // We used to write this file out as a serialized HTD Writable followed by two '\n's and then // the toString version of HTD. Now we just write out the pb serialization. out.write(TableDescriptorBuilder.toByteArray(htd)); } finally { out.close(); } }