/** * Remove particular entry from the TRASH directory. * * @param name Entry name. * @param id Entry ID. * @return {@code True} in case the entry really was deleted form the file system by this call. * @throws IgniteCheckedException If failed. */ private boolean delete(String name, IgniteUuid id) throws IgniteCheckedException { assert name != null; assert id != null; while (true) { IgfsFileInfo info = meta.info(id); if (info != null) { if (info.isDirectory()) { deleteDirectory(TRASH_ID, id); if (meta.delete(TRASH_ID, name, id)) return true; } else { assert info.isFile(); // Delete file content first. // In case this node crashes, other node will re-delete the file. data.delete(info).get(); boolean ret = meta.delete(TRASH_ID, name, id); if (evts.isRecordable(EVT_IGFS_FILE_PURGED)) { if (info.path() != null) evts.record( new IgfsEvent( info.path(), igfsCtx.kernalContext().discovery().localNode(), EVT_IGFS_FILE_PURGED)); else LT.warn(log, null, "Removing file without path info: " + info); } return ret; } } else return false; // Entry was deleted concurrently. } }
/** Perform cleanup of the trash directory. */ private void delete() { IgfsFileInfo info = null; try { info = meta.info(TRASH_ID); } catch (ClusterTopologyServerNotFoundException e) { LT.warn(log, e, "Server nodes not found."); } catch (IgniteCheckedException e) { U.error(log, "Cannot obtain trash directory info.", e); } if (info != null) { for (Map.Entry<String, IgfsListingEntry> entry : info.listing().entrySet()) { IgniteUuid fileId = entry.getValue().fileId(); if (log.isDebugEnabled()) log.debug( "Deleting IGFS trash entry [name=" + entry.getKey() + ", fileId=" + fileId + ']'); try { if (!cancelled) { if (delete(entry.getKey(), fileId)) { if (log.isDebugEnabled()) log.debug( "Sending delete confirmation message [name=" + entry.getKey() + ", fileId=" + fileId + ']'); sendDeleteMessage(new IgfsDeleteMessage(fileId)); } } else break; } catch (IgniteInterruptedCheckedException ignored) { // Ignore this exception while stopping. } catch (IgniteCheckedException e) { U.error(log, "Failed to delete entry from the trash directory: " + entry.getKey(), e); sendDeleteMessage(new IgfsDeleteMessage(fileId, e)); } } } }
/** * Send delete message to all meta cache nodes in the grid. * * @param msg Message to send. */ private void sendDeleteMessage(IgfsDeleteMessage msg) { assert msg != null; Collection<ClusterNode> nodes = meta.metaCacheNodes(); for (ClusterNode node : nodes) { try { igfsCtx.send(node, topic, msg, GridIoPolicy.SYSTEM_POOL); } catch (IgniteCheckedException e) { U.warn( log, "Failed to send IGFS delete message to node [nodeId=" + node.id() + ", msg=" + msg + ", err=" + e.getMessage() + ']'); } } }
/** * Remove particular entry from the trash directory or subdirectory. * * @param parentId Parent ID. * @param id Entry id. * @throws IgniteCheckedException If delete failed for some reason. */ private void deleteDirectory(IgniteUuid parentId, IgniteUuid id) throws IgniteCheckedException { assert parentId != null; assert id != null; while (true) { IgfsFileInfo info = meta.info(id); if (info != null) { assert info.isDirectory(); Map<String, IgfsListingEntry> listing = info.listing(); if (listing.isEmpty()) return; // Directory is empty. Map<String, IgfsListingEntry> delListing; if (listing.size() <= MAX_DELETE_BATCH) delListing = listing; else { delListing = new HashMap<>(MAX_DELETE_BATCH, 1.0f); int i = 0; for (Map.Entry<String, IgfsListingEntry> entry : listing.entrySet()) { delListing.put(entry.getKey(), entry.getValue()); if (++i == MAX_DELETE_BATCH) break; } } GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<>(); // Delegate to child folders. for (IgfsListingEntry entry : delListing.values()) { if (!cancelled) { if (entry.isDirectory()) deleteDirectory(id, entry.fileId()); else { IgfsFileInfo fileInfo = meta.info(entry.fileId()); if (fileInfo != null) { assert fileInfo.isFile(); fut.add(data.delete(fileInfo)); } } } else return; } fut.markInitialized(); // Wait for data cache to delete values before clearing meta cache. try { fut.get(); } catch (IgniteFutureCancelledCheckedException ignore) { // This future can be cancelled only due to IGFS shutdown. cancelled = true; return; } // Actual delete of folder content. Collection<IgniteUuid> delIds = meta.delete(id, delListing); if (delListing == listing && delListing.size() == delIds.size()) break; // All entries were deleted. } else break; // Entry was deleted concurrently. } }
/** * Check how prefetch override works. * * @throws Exception IF failed. */ public void testOpenPrefetchOverride() throws Exception { create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE)); // Write enough data to the secondary file system. final int blockSize = IGFS_BLOCK_SIZE; IgfsOutputStream out = igfsSecondary.append(FILE, false); int totalWritten = 0; while (totalWritten < blockSize * 2 + chunk.length) { out.write(chunk); totalWritten += chunk.length; } out.close(); awaitFileClose(igfsSecondary.asSecondary(), FILE); // Instantiate file system with overridden "seq reads before prefetch" property. Configuration cfg = new Configuration(); cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG)); int seqReads = SEQ_READS_BEFORE_PREFETCH + 1; cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads); FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg); // Read the first two blocks. Path fsHome = new Path(PRIMARY_URI); Path dir = new Path(fsHome, DIR.name()); Path subdir = new Path(dir, SUBDIR.name()); Path file = new Path(subdir, FILE.name()); FSDataInputStream fsIn = fs.open(file); final byte[] readBuf = new byte[blockSize * 2]; fsIn.readFully(0, readBuf, 0, readBuf.length); // Wait for a while for prefetch to finish (if any). IgfsMetaManager meta = igfs.context().meta(); IgfsFileInfo info = meta.info(meta.fileId(FILE)); IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2); IgniteCache<IgfsBlockKey, byte[]> dataCache = igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName()); for (int i = 0; i < 10; i++) { if (dataCache.containsKey(key)) break; else U.sleep(100); } fsIn.close(); // Remove the file from the secondary file system. igfsSecondary.delete(FILE, false); // Try reading the third block. Should fail. GridTestUtils.assertThrows( log, new Callable<Object>() { @Override public Object call() throws Exception { IgfsInputStream in0 = igfs.open(FILE); in0.seek(blockSize * 2); try { in0.read(readBuf); } finally { U.closeQuiet(in0); } return null; } }, IOException.class, "Failed to read data due to secondary file system exception: /dir/subdir/file"); }