@Override public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); this.uri = name; }
@Override public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); this.uri = URI.create(SCHEME + "://" + SCHEME); }
@Override public void initialize(URI name, Configuration conf) throws IOException { if (fs.getConf() == null) { fs.initialize(name, conf); } String scheme = name.getScheme(); if (!scheme.equals(fs.getUri().getScheme())) { swapScheme = scheme; } }
/** Called after a new FileSystem instance is constructed. * @param name a uri whose authority section names the host, port, etc. * for this FileSystem * @param conf the configuration */ @Override public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); // this is less than ideal, but existing filesystems sometimes neglect // to initialize the embedded filesystem if (fs.getConf() == null) { fs.initialize(name, conf); } String scheme = name.getScheme(); if (!scheme.equals(fs.getUri().getScheme())) { swapScheme = scheme; } }
@Override public void initialize(URI uri, Configuration conf) throws IOException { if (this.underlyingFs == null) { throw new IllegalStateException("Underlying fs has not been defined."); } this.underlyingFs.initialize(replaceScheme(uri, this.replacementScheme, this.underlyingScheme), conf); }
@Override public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); }
@Override public void initialize(URI name, Configuration conf) throws IOException { // create a proxy for the local filesystem // the scheme/authority serving as the proxy is derived // from the supplied URI this.scheme = name.getScheme(); String nameUriString = name.toString(); String authority = name.getAuthority() != null ? name.getAuthority() : ""; String proxyUriString = scheme + "://" + authority + "/"; fs = ShimLoader.getHadoopShims().createProxyFileSystem(localFs, URI.create(proxyUriString)); fs.initialize(name, conf); }
/** {@inheritDoc} */ @Override public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); String initWorkDir = conf.get(HadoopFileSystemsUtils.LOC_FS_WORK_DIR_PROP); if (initWorkDir != null) setWorkingDirectory(new Path(initWorkDir)); }
@Nullable @Override public Object call() throws Exception { fs.initialize(primaryFsUri, primaryFsCfg); return null; } }, IOException.class, "File system is stopped.");
private void useDifferentDFSClient() throws IOException { // make fs act as a different client now // initialize will create a new DFSClient with a new client ID fs.initialize(fs.getUri(), conf); }
@Override public void initialize(URI uriInfo, Configuration conf) throws IOException { super.initialize(uriInfo, conf); setConfigurationFromURI(uriInfo, conf); setConf(conf); this.uri = uriInfo; }
protected DelegateToFileSystem(URI theUri, FileSystem theFsImpl, Configuration conf, String supportedScheme, boolean authorityRequired) throws IOException, URISyntaxException { super(theUri, supportedScheme, authorityRequired, getDefaultPortIfDefined(theFsImpl)); fsImpl = theFsImpl; fsImpl.initialize(theUri, conf); fsImpl.statistics = getStatistics(); }
@Override public void initialize(URI name, Configuration conf) throws IOException { super.initialize(name, conf); State state = HadoopUtils.getStateFromConf(conf); this.fsHelper = new SftpFsHelper(state); try { this.fsHelper.connect(); } catch (FileBasedHelperException e) { throw new IOException(e); } }
/** * Create and initialize a new instance of a FileSystem. * @param uri URI containing the FS schema and FS details * @param conf configuration to use to look for the FS instance declaration * and to pass to the {@link FileSystem#initialize(URI, Configuration)}. * @return the initialized filesystem. * @throws IOException problems loading or initializing the FileSystem */ private static FileSystem createFileSystem(URI uri, Configuration conf) throws IOException { Tracer tracer = FsTracer.get(conf); try(TraceScope scope = tracer.newScope("FileSystem#createFileSystem")) { scope.addKVAnnotation("scheme", uri.getScheme()); Class<?> clazz = getFileSystemClass(uri.getScheme(), conf); FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf); fs.initialize(uri, conf); return fs; } }
this.hadoopFileSystem.initialize(fsUri, hadoopConf);
@Override public FileSystem getFileSystem() throws Exception { org.apache.hadoop.fs.FileSystem fs = new RawLocalFileSystem(); fs.initialize(LocalFileSystem.getLocalFsURI(), new Configuration()); return new HadoopFileSystem(fs); }
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException { FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem()); rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration()); LogCopier.Builder builder = LogCopier.newBuilder() .useSrcFileSystem(this.fs) .useDestFileSystem(rawLocalFs) .readFrom(getHdfsLogDir(appWorkDir)) .writeTo(sinkLogDir) .acceptsLogFileExtensions(ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR)); if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)) { builder.useMaxBytesPerLogFile(config.getBytes(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)); } if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)) { builder.useScheduler(config.getString(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)); } return builder.build(); }
super.initialize(uri, conf); setConf(conf);
@BeforeClass public static void setUpOneTime() throws Exception { fs = new LocalFileSystem(); fs.initialize(fs.getWorkingDirectory().toUri(), new Configuration()); HiveConf hiveConf = new HiveConf(); hiveConf.setInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME, 0); // Hack to initialize cache with 0 expiry time causing it to return a new hive client every time // Otherwise the cache doesn't play well with the second test method with the client gets closed() in the // tearDown() of the previous test HCatUtil.getHiveMetastoreClient(hiveConf); MapCreate.writeCount = 0; MapRead.readCount = 0; }
@Override public synchronized void initialize(URI uri, Configuration conf) throws IOException { if (this.client == null) { super.initialize(uri, conf); State state = HadoopUtils.getStateFromConf(conf); Credential credential = new GoogleCommon.CredentialBuilder(state.getProp(SOURCE_CONN_PRIVATE_KEY), state.getPropAsList(API_SCOPES)) .fileSystemUri(state.getProp(PRIVATE_KEY_FILESYSTEM_URI)) .proxyUrl(state.getProp(SOURCE_CONN_USE_PROXY_URL)) .port(state.getProp(SOURCE_CONN_USE_PROXY_PORT)) .serviceAccountId(state.getProp(SOURCE_CONN_USERNAME)) .build(); this.client = new Drive.Builder(credential.getTransport(), GoogleCommon.getJsonFactory(), credential) .setApplicationName(Preconditions.checkNotNull(state.getProp(APPLICATION_NAME), "ApplicationName is required")) .build(); this.pageSize = state.getPropAsInt(PAGE_SIZE, DEFAULT_PAGE_SIZE); } }