/** * @param name URI passed to constructor. * @param cfg Configuration passed to constructor. * @throws IOException If initialization failed. */ @SuppressWarnings("ConstantConditions") private void initialize(URI name, Configuration cfg) throws IOException { enterBusy(); try { if (rmtClient != null) throw new IOException("File system is already initialized: " + rmtClient); A.notNull(name, "name"); A.notNull(cfg, "cfg"); if (!IGFS_SCHEME.equals(name.getScheme())) throw new IOException( "Illegal file system URI [expected=" + IGFS_SCHEME + "://[name]/[optional_path], actual=" + name + ']'); uriAuthority = name.getAuthority(); // Override sequential reads before prefetch if needed. seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0); if (seqReadsBeforePrefetch > 0) seqReadsBeforePrefetchOverride = true; // In Ignite replication factor is controlled by data cache affinity. // We use replication factor to force the whole file to be stored on local node. dfltReplication = (short) cfg.getInt("dfs.replication", 3); // Get file colocation control flag. colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false); preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false); // Get log directory. String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR); File logDirFile = U.resolveIgnitePath(logDirCfg); String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null; rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user); // Handshake. IgfsHandshakeResponse handshake = rmtClient.handshake(logDir); grpBlockSize = handshake.blockSize(); IgfsPaths paths = handshake.secondaryPaths(); Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false); if (handshake.sampling() != null ? handshake.sampling() : logEnabled) { // Initiate client logger. if (logDir == null) throw new IOException("Failed to resolve log directory: " + logDirCfg); Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE); clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize); } else clientLog = IgfsLogger.disabledLogger(); try { modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes()); } catch (IgniteCheckedException ice) { throw new IOException(ice); } boolean initSecondary = paths.defaultMode() == PROXY; if (!initSecondary && paths.pathModes() != null) { for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) { IgfsMode mode = pathMode.getValue(); if (mode == PROXY) { initSecondary = true; break; } } } if (initSecondary) { try { factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader()); } catch (IgniteCheckedException e) { throw new IOException("Failed to get secondary file system factory.", e); } if (factory == null) throw new IOException( "Failed to get secondary file system factory (did you set " + IgniteHadoopIgfsSecondaryFileSystem.class.getName() + " as \"secondaryFIleSystem\" in " + FileSystemConfiguration.class.getName() + "?)"); assert factory != null; if (factory instanceof LifecycleAware) ((LifecycleAware) factory).start(); try { FileSystem secFs = factory.get(user); secondaryUri = secFs.getUri(); A.ensure(secondaryUri != null, "Secondary file system uri should not be null."); } catch (IOException e) { throw new IOException( "Failed to connect to the secondary file system: " + secondaryUri, e); } } } finally { leaveBusy(); } }
/** * Gets cached or creates a {@link FileSystem}. * * @return The secondary file system. */ private FileSystem secondaryFileSystem() throws IOException { assert factory != null; return factory.get(user); }