/** {@inheritDoc} */ @Override public void setOwner(Path p, String usr, String grp) throws IOException { A.notNull(p, "p"); A.notNull(usr, "username"); A.notNull(grp, "grpName"); enterBusy(); try { if (mode(p) == PROXY) secondaryFileSystem().setOwner(toSecondary(p), usr, grp); else if (rmtClient.update( convert(p), F.asMap(IgfsUtils.PROP_USER_NAME, usr, IgfsUtils.PROP_GROUP_NAME, grp)) == null) { throw new IOException( "Failed to set file permission (file not found?)" + " [path=" + p + ", username="******", grpName=" + grp + ']'); } } finally { leaveBusy(); } }
/** {@inheritDoc} */ @Override public R get(long timeout, TimeUnit unit) throws IgniteCheckedException { A.ensure(timeout >= 0, "timeout cannot be negative: " + timeout); A.notNull(unit, "unit"); try { return get0(unit.toNanos(timeout)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IgniteInterruptedCheckedException( "Got interrupted while waiting for future to complete.", e); } }
/** {@inheritDoc} */ @Override public boolean delete(Path f, boolean recursive) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = modeRslvr.resolveMode(path); if (mode == PROXY) { if (clientLog.isLogEnabled()) clientLog.logDelete(path, PROXY, recursive); return secondaryFileSystem().delete(toSecondary(f), recursive); } boolean res = rmtClient.delete(path, recursive); if (clientLog.isLogEnabled()) clientLog.logDelete(path, mode, recursive); return res; } finally { leaveBusy(); } }
/** {@inheritDoc} */ @Override public CacheQuery<T> timeout(long timeout) { A.ensure(timeout >= 0, "timeout >= 0"); this.timeout = timeout; return this; }
/** {@inheritDoc} */ @Override public CacheQuery<T> pageSize(int pageSize) { A.ensure(pageSize > 0, "pageSize > 0"); this.pageSize = pageSize; return this; }
/** {@inheritDoc} */ @Override public void renameInternal(Path src, Path dst) throws IOException { A.notNull(src, "src"); A.notNull(dst, "dst"); enterBusy(); try { IgfsPath srcPath = convert(src); IgfsPath dstPath = convert(dst); IgfsMode srcMode = modeRslvr.resolveMode(srcPath); if (clientLog.isLogEnabled()) clientLog.logRename(srcPath, srcMode, dstPath); if (srcMode == PROXY) secondaryFileSystem().rename(toSecondary(src), toSecondary(dst)); else rmtClient.rename(srcPath, dstPath); } finally { leaveBusy(); } }
/** {@inheritDoc} */ @Override public FileStatus[] listStatus(Path f) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = modeRslvr.resolveMode(path); if (mode == PROXY) { FileStatus[] arr = secondaryFileSystem().listStatus(toSecondary(f)); if (arr == null) throw new FileNotFoundException("File " + f + " does not exist."); for (int i = 0; i < arr.length; i++) arr[i] = toPrimary(arr[i]); if (clientLog.isLogEnabled()) { String[] fileArr = new String[arr.length]; for (int i = 0; i < arr.length; i++) fileArr[i] = arr[i].getPath().toString(); clientLog.logListDirectory(path, PROXY, fileArr); } return arr; } else { Collection<IgfsFile> list = rmtClient.listFiles(path); if (list == null) throw new FileNotFoundException("File " + f + " does not exist."); List<IgfsFile> files = new ArrayList<>(list); FileStatus[] arr = new FileStatus[files.size()]; for (int i = 0; i < arr.length; i++) arr[i] = convert(files.get(i)); if (clientLog.isLogEnabled()) { String[] fileArr = new String[arr.length]; for (int i = 0; i < arr.length; i++) fileArr[i] = arr[i].getPath().toString(); clientLog.logListDirectory(path, mode, fileArr); } return arr; } } finally { leaveBusy(); } }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public boolean addAll(final Collection<? extends T> items) { A.notNull(items, "items"); try { boolean retVal; int cnt = 0; while (true) { try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { Long idx = (Long) cache.invoke(queueKey, new AddProcessor(id, items.size())).get(); if (idx != null) { checkRemoved(idx); Map<GridCacheQueueItemKey, T> putMap = new HashMap<>(); for (T item : items) { putMap.put(itemKey(idx), item); idx++; } cache.putAll(putMap); retVal = true; } else retVal = false; tx.commit(); break; } catch (ClusterTopologyCheckedException e) { if (e instanceof ClusterGroupEmptyCheckedException) throw e; if (cnt++ == MAX_UPDATE_RETRIES) throw e; else { U.warn(log, "Failed to add item, will retry [err=" + e + ']'); U.sleep(RETRY_DELAY); } } } return retVal; } catch (IgniteCheckedException e) { throw U.convertException(e); } }
/** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public boolean offer(final T item) throws IgniteException { A.notNull(item, "item"); try { boolean retVal; int cnt = 0; while (true) { try { try (IgniteInternalTx tx = cache.txStartEx(PESSIMISTIC, REPEATABLE_READ)) { Long idx = (Long) cache.invoke(queueKey, new AddProcessor(id, 1)).get(); if (idx != null) { checkRemoved(idx); cache.getAndPut(itemKey(idx), item); retVal = true; } else retVal = false; tx.commit(); break; } } catch (ClusterTopologyCheckedException e) { if (e instanceof ClusterGroupEmptyCheckedException) throw e; if (cnt++ == MAX_UPDATE_RETRIES) throw e; else { U.warn(log, "Failed to add item, will retry [err=" + e + ']'); U.sleep(RETRY_DELAY); } } } return retVal; } catch (IgniteCheckedException e) { throw U.convertException(e); } }
/** {@inheritDoc} */ @Override public FileStatus getFileStatus(Path f) throws IOException { A.notNull(f, "f"); enterBusy(); try { if (mode(f) == PROXY) return toPrimary(secondaryFileSystem().getFileStatus(toSecondary(f))); else { IgfsFile info = rmtClient.info(convert(f)); if (info == null) throw new FileNotFoundException("File not found: " + f); return convert(info); } } finally { leaveBusy(); } }
/** {@inheritDoc} */ @Override public void setPermission(Path p, FsPermission perm) throws IOException { enterBusy(); try { A.notNull(p, "p"); if (mode(p) == PROXY) secondaryFileSystem().setPermission(toSecondary(p), perm); else { if (rmtClient.update(convert(p), permission(perm)) == null) throw new IOException( "Failed to set file permission (file not found?)" + " [path=" + p + ", perm=" + perm + ']'); } } finally { leaveBusy(); } }
/** {@inheritDoc} */ @Override public void mkdir(Path f, FsPermission perm, boolean createParent) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = modeRslvr.resolveMode(path); if (mode == PROXY) { if (clientLog.isLogEnabled()) clientLog.logMakeDirectory(path, PROXY); secondaryFileSystem().mkdirs(toSecondary(f), perm); } else { rmtClient.mkdirs(path, permission(perm)); if (clientLog.isLogEnabled()) clientLog.logMakeDirectory(path, mode); } } finally { leaveBusy(); } }
/** * Create server IPC endpoint. * * @param endpointCfg Endpoint configuration. * @param mgmt Management flag. * @return Server endpoint. * @throws IgniteCheckedException If failed. */ private IpcServerEndpoint createEndpoint(IgfsIpcEndpointConfiguration endpointCfg, boolean mgmt) throws IgniteCheckedException { A.notNull(endpointCfg, "endpointCfg"); IgfsIpcEndpointType typ = endpointCfg.getType(); if (typ == null) throw new IgniteCheckedException("Failed to create server endpoint (type is not specified)"); switch (typ) { case SHMEM: { IpcSharedMemoryServerEndpoint endpoint = new IpcSharedMemoryServerEndpoint( igfsCtx.kernalContext().config().getWorkDirectory()); endpoint.setPort(endpointCfg.getPort()); endpoint.setSize(endpointCfg.getMemorySize()); endpoint.setTokenDirectoryPath(endpointCfg.getTokenDirectoryPath()); return endpoint; } case TCP: { IpcServerTcpEndpoint endpoint = new IpcServerTcpEndpoint(); endpoint.setHost(endpointCfg.getHost()); endpoint.setPort(endpointCfg.getPort()); endpoint.setManagement(mgmt); return endpoint; } default: throw new IgniteCheckedException( "Failed to create server endpoint (type is unknown): " + typ); } }
/** {@inheritDoc} */ @Override public BlockLocation[] getFileBlockLocations(Path path, long start, long len) throws IOException { A.notNull(path, "path"); IgfsPath igfsPath = convert(path); enterBusy(); try { if (modeRslvr.resolveMode(igfsPath) == PROXY) return secondaryFileSystem().getFileBlockLocations(path, start, len); else { long now = System.currentTimeMillis(); List<IgfsBlockLocation> affinity = new ArrayList<>(rmtClient.affinity(igfsPath, start, len)); BlockLocation[] arr = new BlockLocation[affinity.size()]; for (int i = 0; i < arr.length; i++) arr[i] = convert(affinity.get(i)); if (LOG.isDebugEnabled()) LOG.debug( "Fetched file locations [path=" + path + ", fetchTime=" + (System.currentTimeMillis() - now) + ", locations=" + Arrays.asList(arr) + ']'); return arr; } } finally { leaveBusy(); } }
/** * Constructs LRU eviction policy with maximum size. * * @param max Maximum allowed size of cache before entry will start getting evicted. */ public LruEvictionPolicy(int max) { A.ensure(max >= 0, "max >= 0"); this.max = max; }
/** {@inheritDoc} */ @Override public void setMaxMemorySize(long maxMemSize) { A.ensure(maxMemSize >= 0, "maxMemSize >= 0"); this.maxMemSize = maxMemSize; }
/** {@inheritDoc} */ @Override public void setBatchSize(int batchSize) { A.ensure(batchSize > 0, "batchSize > 0"); this.batchSize = batchSize; }
/** * Sets maximum allowed size of cache before entry will start getting evicted. * * @param max Maximum allowed size of cache before entry will start getting evicted. */ @Override public void setMaxSize(int max) { A.ensure(max >= 0, "max >= 0"); this.max = max; }
/** * Initializes tuple with given object count. * * @param cnt Count of objects to be stored in the tuple. */ public GridTupleV(int cnt) { A.ensure(cnt > 0, "cnt > 0"); vals = new Object[cnt]; }
/** * Sets given values starting at provided position in the tuple. * * @param pos Position to start from. * @param v Values to set. */ public void set(int pos, Object... v) { A.ensure(pos > 0, "pos > 0"); A.ensure(v.length + pos <= vals.length, "v.length + pos <= vals.length"); if (v.length > 0) System.arraycopy(v, 0, vals, pos, v.length); }
/** * Sets value at given index. * * @param i Index to set. * @param v Value to set. * @param <V> Value type. */ public <V> void set(int i, V v) { A.ensure(i < vals.length, "i < vals.length"); vals[i] = v; }
/** * Retrieves value at given index. * * @param i Index of the value to get. * @param <V> Value type. * @return Value at given index. */ @SuppressWarnings({"unchecked"}) public <V> V get(int i) { A.ensure(i < vals.length, "i < vals.length"); return (V) vals[i]; }
/** * Sets given values starting at {@code 0} position. * * @param v Values to set. */ public void set(Object... v) { A.ensure(v.length <= vals.length, "v.length <= vals.length"); if (v.length > 0) System.arraycopy(v, 0, vals, 0, v.length); }
/** {@inheritDoc} */ @SuppressWarnings("deprecation") @Override public FSDataOutputStream createInternal( Path f, EnumSet<CreateFlag> flag, FsPermission perm, int bufSize, short replication, long blockSize, Progressable progress, Options.ChecksumOpt checksumOpt, boolean createParent) throws IOException { A.notNull(f, "f"); enterBusy(); boolean overwrite = flag.contains(CreateFlag.OVERWRITE); boolean append = flag.contains(CreateFlag.APPEND); boolean create = flag.contains(CreateFlag.CREATE); OutputStream out = null; try { IgfsPath path = convert(f); IgfsMode mode = modeRslvr.resolveMode(path); if (LOG.isDebugEnabled()) LOG.debug( "Opening output stream in create [thread=" + Thread.currentThread().getName() + "path=" + path + ", overwrite=" + overwrite + ", bufSize=" + bufSize + ']'); if (mode == PROXY) { FSDataOutputStream os = secondaryFileSystem() .create(toSecondary(f), perm, flag, bufSize, replication, blockSize, progress); if (clientLog.isLogEnabled()) { long logId = IgfsLogger.nextId(); if (append) clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID. else clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize); return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId)); } else return os; } else { Map<String, String> permMap = F.asMap( IgfsUtils.PROP_PERMISSION, toString(perm), IgfsUtils.PROP_PREFER_LOCAL_WRITES, Boolean.toString(preferLocFileWrites)); // Create stream and close it in the 'finally' section if any sequential operation failed. HadoopIgfsStreamDelegate stream; long logId = -1; if (append) { stream = rmtClient.append(path, create, permMap); if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logAppend(logId, path, mode, bufSize); } if (LOG.isDebugEnabled()) LOG.debug( "Opened output stream in append [path=" + path + ", delegate=" + stream + ']'); } else { stream = rmtClient.create( path, overwrite, colocateFileWrites, replication, blockSize, permMap); if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize); } if (LOG.isDebugEnabled()) LOG.debug( "Opened output stream in create [path=" + path + ", delegate=" + stream + ']'); } assert stream != null; HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); out = new BufferedOutputStream(igfsOut, bufSize); FSDataOutputStream res = new FSDataOutputStream(out, null, 0); // Mark stream created successfully. out = null; return res; } } finally { // Close if failed during stream creation. if (out != null) U.closeQuiet(out); leaveBusy(); } }
/** {@inheritDoc} */ @Override public FSDataInputStream open(Path f, int bufSize) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); IgfsMode mode = modeRslvr.resolveMode(path); if (mode == PROXY) { FSDataInputStream is = secondaryFileSystem().open(toSecondary(f), bufSize); if (clientLog.isLogEnabled()) { // At this point we do not know file size, so we perform additional request to remote FS // to get it. FileStatus status = secondaryFileSystem().getFileStatus(toSecondary(f)); long size = status != null ? status.getLen() : -1; long logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, PROXY, bufSize, size); return new FSDataInputStream(new HadoopIgfsProxyInputStream(is, clientLog, logId)); } else return is; } else { HadoopIgfsStreamDelegate stream = seqReadsBeforePrefetchOverride ? rmtClient.open(path, seqReadsBeforePrefetch) : rmtClient.open(path); long logId = -1; if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logOpen(logId, path, mode, bufSize, stream.length()); } if (LOG.isDebugEnabled()) LOG.debug( "Opening input stream [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); HadoopIgfsInputStream igfsIn = new HadoopIgfsInputStream(stream, stream.length(), bufSize, LOG, clientLog, logId); if (LOG.isDebugEnabled()) LOG.debug("Opened input stream [path=" + path + ", delegate=" + stream + ']'); return new FSDataInputStream(igfsIn); } } finally { leaveBusy(); } }
/** * Constructs random eviction policy with maximum size. * * @param max Maximum allowed size of cache before entry will start getting evicted. */ public RandomEvictionPolicy(int max) { A.ensure(max > 0, "max > 0"); this.max = max; }
/** * @param name URI passed to constructor. * @param cfg Configuration passed to constructor. * @throws IOException If initialization failed. */ @SuppressWarnings("ConstantConditions") private void initialize(URI name, Configuration cfg) throws IOException { enterBusy(); try { if (rmtClient != null) throw new IOException("File system is already initialized: " + rmtClient); A.notNull(name, "name"); A.notNull(cfg, "cfg"); if (!IGFS_SCHEME.equals(name.getScheme())) throw new IOException( "Illegal file system URI [expected=" + IGFS_SCHEME + "://[name]/[optional_path], actual=" + name + ']'); uriAuthority = name.getAuthority(); // Override sequential reads before prefetch if needed. seqReadsBeforePrefetch = parameter(cfg, PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, uriAuthority, 0); if (seqReadsBeforePrefetch > 0) seqReadsBeforePrefetchOverride = true; // In Ignite replication factor is controlled by data cache affinity. // We use replication factor to force the whole file to be stored on local node. dfltReplication = (short) cfg.getInt("dfs.replication", 3); // Get file colocation control flag. colocateFileWrites = parameter(cfg, PARAM_IGFS_COLOCATED_WRITES, uriAuthority, false); preferLocFileWrites = cfg.getBoolean(PARAM_IGFS_PREFER_LOCAL_WRITES, false); // Get log directory. String logDirCfg = parameter(cfg, PARAM_IGFS_LOG_DIR, uriAuthority, DFLT_IGFS_LOG_DIR); File logDirFile = U.resolveIgnitePath(logDirCfg); String logDir = logDirFile != null ? logDirFile.getAbsolutePath() : null; rmtClient = new HadoopIgfsWrapper(uriAuthority, logDir, cfg, LOG, user); // Handshake. IgfsHandshakeResponse handshake = rmtClient.handshake(logDir); grpBlockSize = handshake.blockSize(); IgfsPaths paths = handshake.secondaryPaths(); Boolean logEnabled = parameter(cfg, PARAM_IGFS_LOG_ENABLED, uriAuthority, false); if (handshake.sampling() != null ? handshake.sampling() : logEnabled) { // Initiate client logger. if (logDir == null) throw new IOException("Failed to resolve log directory: " + logDirCfg); Integer batchSize = parameter(cfg, PARAM_IGFS_LOG_BATCH_SIZE, uriAuthority, DFLT_IGFS_LOG_BATCH_SIZE); clientLog = IgfsLogger.logger(uriAuthority, handshake.igfsName(), logDir, batchSize); } else clientLog = IgfsLogger.disabledLogger(); try { modeRslvr = new IgfsModeResolver(paths.defaultMode(), paths.pathModes()); } catch (IgniteCheckedException ice) { throw new IOException(ice); } boolean initSecondary = paths.defaultMode() == PROXY; if (!initSecondary && paths.pathModes() != null) { for (T2<IgfsPath, IgfsMode> pathMode : paths.pathModes()) { IgfsMode mode = pathMode.getValue(); if (mode == PROXY) { initSecondary = true; break; } } } if (initSecondary) { try { factory = (HadoopFileSystemFactory) paths.getPayload(getClass().getClassLoader()); } catch (IgniteCheckedException e) { throw new IOException("Failed to get secondary file system factory.", e); } if (factory == null) throw new IOException( "Failed to get secondary file system factory (did you set " + IgniteHadoopIgfsSecondaryFileSystem.class.getName() + " as \"secondaryFIleSystem\" in " + FileSystemConfiguration.class.getName() + "?)"); assert factory != null; if (factory instanceof LifecycleAware) ((LifecycleAware) factory).start(); try { FileSystem secFs = factory.get(user); secondaryUri = secFs.getUri(); A.ensure(secondaryUri != null, "Secondary file system uri should not be null."); } catch (IOException e) { throw new IOException( "Failed to connect to the secondary file system: " + secondaryUri, e); } } } finally { leaveBusy(); } }