public static org.apache.hadoop.fs.FileSystem asFileSystem( FileSystem fs ) { return fs == null ? null : (org.apache.hadoop.fs.FileSystem) fs.getDelegate(); }
private Path getPath( Configuration conf, FileSystem fs, String outputPath ) { return fs.asPath( conf.getDefaultFileSystemURL(), outputPath ); }
@VisibleForTesting void cleanOutputPath( Configuration conf ) throws IOException { if ( cleanOutputPath ) { FileSystem fs = hadoopShim.getFileSystem( conf ); Path path = getOutputPath( conf, fs ); String outputPath = path.toUri().toString(); if ( log.isBasic() ) { log.logBasic( BaseMessages.getString( PKG, JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_CLEANING_OUTPUT_PATH, outputPath ) ); } try { if ( !fs.exists( path ) ) { // If the path does not exist one could think of it as "already cleaned" return; } if ( !fs.delete( path, true ) ) { if ( log.isBasic() ) { log.logBasic( BaseMessages .getString( PKG, JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_FAILED_TO_CLEAN_OUTPUT_PATH, outputPath ) ); } } } catch ( IOException ex ) { throw new IOException( BaseMessages.getString( PKG, JOB_ENTRY_HADOOP_TRANS_JOB_EXECUTOR_ERROR_CLEANING_OUTPUT_PATH, outputPath ), ex ); } } }
@Override public FileSystem getFileSystem() { try { return (FileSystem) hadoopShim.getFileSystem( finalUri, configuration, null ).getDelegate(); } catch ( IOException e ) { LOGGER.debug( "Error looking up/creating the file system ", e ); return null; } catch ( InterruptedException e ) { LOGGER.debug( "Error looking up/creating the file system ", e ); return null; } } } );
protected void stageMetaStoreForHadoop( Configuration conf, FileSystem fs, String installPath ) throws Exception { java.nio.file.Path localMetaStoreSnapshotDirPath; Path hdfsMetaStoreDirForCurrentJobPath; FileObject localMetaStoreSnapshotDirObject; localMetaStoreSnapshotDirPath = Files.createTempDirectory( XmlUtil.META_FOLDER_NAME ); localMetaStoreSnapshotDirObject = KettleVFS.getFileObject( localMetaStoreSnapshotDirPath.toString() ); hdfsMetaStoreDirForCurrentJobPath = fs.asPath( installPath, XmlUtil.META_FOLDER_NAME ); //fill local metastore snapshot by the existing named cluster snapshotMetaStore( localMetaStoreSnapshotDirPath.toString() ); hadoopShim.getDistributedCacheUtil().stageForCache( localMetaStoreSnapshotDirObject, fs, hdfsMetaStoreDirForCurrentJobPath, true, true ); hadoopShim.getDistributedCacheUtil().addCachedFiles( conf, fs, hdfsMetaStoreDirForCurrentJobPath, null ); }
@Override public HadoopFileSystem create( NamedCluster namedCluster, URI uri ) throws IOException { final URI finalUri = uri != null ? uri : URI.create( "" ); final HadoopShim hadoopShim = hadoopConfiguration.getHadoopShim(); final Configuration configuration = hadoopShim.createConfiguration(); FileSystem fileSystem = (FileSystem) hadoopShim.getFileSystem( configuration ).getDelegate(); if ( fileSystem instanceof LocalFileSystem ) { LOGGER.error( "Got a local filesystem, was expecting an hdfs connection" ); throw new IOException( "Got a local filesystem, was expecting an hdfs connection" ); } return new HadoopFileSystemImpl( new HadoopFileSystemCallable() { @Override public FileSystem getFileSystem() { try { return (FileSystem) hadoopShim.getFileSystem( finalUri, configuration, null ).getDelegate(); } catch ( IOException e ) { LOGGER.debug( "Error looking up/creating the file system ", e ); return null; } catch ( InterruptedException e ) { LOGGER.debug( "Error looking up/creating the file system ", e ); return null; } } } ); } }
Path kettleEnvInstallDir = fs.asPath( installPath, installId ); FileObject pmrLibArchive = pmrArchiveGetter.getPmrArchive( conf );