/** * @return Collection of readers after check. * @throws GridCacheEntryRemovedException If removed. */ public Collection<ReaderId> checkReaders() throws GridCacheEntryRemovedException { synchronized (mux) { checkObsolete(); if (!readers.isEmpty()) { List<ReaderId> rmv = null; for (ReaderId reader : readers) { if (!cctx.discovery().alive(reader.nodeId())) { if (rmv == null) rmv = new LinkedList<ReaderId>(); rmv.add(reader); } } if (rmv != null) { readers = new LinkedList<ReaderId>(readers); for (ReaderId rdr : rmv) readers.remove(rdr); readers = Collections.unmodifiableList(readers); } } return readers; } }
/** {@inheritDoc} */ @Override public void explicitUndeploy(UUID nodeId, String rsrcName) { Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); if (dep.hasName(rsrcName)) { if (!dep.isUndeployed()) { dep.undeploy(); dep.onRemoved(); // Undeploy. i2.remove(); undeployed.add(dep); if (log.isInfoEnabled()) log.info("Undeployed per-version class loader: " + dep); } break; } } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(null, undeployed); }
/** {@inheritDoc} */ @Override protected boolean hasReaders() throws GridCacheEntryRemovedException { synchronized (mux) { checkReaders(); return !readers.isEmpty(); } }
private boolean findAllTestedAndInjectableFieldsInTestClassHierarchy( @NotNull Class<?> testClass) { Class<?> classWithFields = testClass; do { Field[] fields = classWithFields.getDeclaredFields(); findTestedAndInjectableFields(fields); classWithFields = classWithFields.getSuperclass(); } while (classWithFields.getClassLoader() != null); return !testedFields.isEmpty(); }
@NotNull public static Future<Void> asyncDelete(@NotNull Collection<File> files) { List<File> tempFiles = new ArrayList<File>(); for (File file : files) { final File tempFile = renameToTempFileOrDelete(file); if (tempFile != null) { tempFiles.add(tempFile); } } if (!tempFiles.isEmpty()) { return startDeletionThread(tempFiles.toArray(new File[tempFiles.size()])); } return new FixedFuture<Void>(null); }
/** * Performs flush. * * @throws GridException If failed. */ private void doFlush() throws GridException { lastFlushTime = U.currentTimeMillis(); List<GridFuture> activeFuts0 = null; int doneCnt = 0; for (GridFuture<?> f : activeFuts) { if (!f.isDone()) { if (activeFuts0 == null) activeFuts0 = new ArrayList<>((int) (activeFuts.size() * 1.2)); activeFuts0.add(f); } else { f.get(); doneCnt++; } } if (activeFuts0 == null || activeFuts0.isEmpty()) return; while (true) { Queue<GridFuture<?>> q = null; for (Buffer buf : bufMappings.values()) { GridFuture<?> flushFut = buf.flush(); if (flushFut != null) { if (q == null) q = new ArrayDeque<>(bufMappings.size() * 2); q.add(flushFut); } } if (q != null) { assert !q.isEmpty(); boolean err = false; for (GridFuture fut = q.poll(); fut != null; fut = q.poll()) { try { fut.get(); } catch (GridException e) { if (log.isDebugEnabled()) log.debug("Failed to flush buffer: " + e); err = true; } } if (err) // Remaps needed - flush buffers. continue; } doneCnt = 0; for (int i = 0; i < activeFuts0.size(); i++) { GridFuture f = activeFuts0.get(i); if (f == null) doneCnt++; else if (f.isDone()) { f.get(); doneCnt++; activeFuts0.set(i, null); } else break; } if (doneCnt == activeFuts0.size()) return; } }
/** * This method is called to map or split grid task into multiple grid jobs. This is the first * method that gets called when task execution starts. * * @param data Task execution argument. Can be {@code null}. This is the same argument as the one * passed into {@code Grid#execute(...)} methods. * @param subgrid Nodes available for this task execution. Note that order of nodes is guaranteed * to be randomized by container. This ensures that every time you simply iterate through grid * nodes, the order of nodes will be random which over time should result into all nodes being * used equally. * @return Map of grid jobs assigned to subgrid node. Unless {@link * GridComputeTaskContinuousMapper} is injected into task, if {@code null} or empty map is * returned, exception will be thrown. * @throws GridException If mapping could not complete successfully. This exception will be thrown * out of {@link GridComputeTaskFuture#get()} method. */ @Override public Map<? extends GridComputeJob, GridNode> map( List<GridNode> subgrid, @Nullable final Collection<Integer> data) throws GridException { assert !subgrid.isEmpty(); // Give preference to wanted node. Otherwise, take the first one. GridNode targetNode = F.find( subgrid, subgrid.get(0), new GridPredicate<GridNode>() { @Override public boolean apply(GridNode e) { return preferredNode.equals(e.id()); } }); return Collections.singletonMap( new GridComputeJobAdapter() { @GridLoggerResource private GridLogger log; @GridInstanceResource private Grid grid; @Override public Object execute() throws GridException { log.info("Going to put data: " + data.size()); GridCache<Object, Object> cache = grid.cache(cacheName); assert cache != null; Map<Integer, T2<Integer, Collection<Integer>>> putMap = groupData(data); for (Map.Entry<Integer, T2<Integer, Collection<Integer>>> entry : putMap.entrySet()) { T2<Integer, Collection<Integer>> pair = entry.getValue(); Object affKey = pair.get1(); // Group lock partition. try (GridCacheTx tx = cache.txStartPartition( cache.affinity().partition(affKey), optimistic ? OPTIMISTIC : PESSIMISTIC, REPEATABLE_READ, 0, pair.get2().size())) { for (Integer val : pair.get2()) cache.put(val, val); tx.commit(); } } log.info("Finished put data: " + data.size()); return data; } /** * Groups values by partitions. * * @param data Data to put. * @return Grouped map. */ private Map<Integer, T2<Integer, Collection<Integer>>> groupData(Iterable<Integer> data) { GridCache<Object, Object> cache = grid.cache(cacheName); Map<Integer, T2<Integer, Collection<Integer>>> res = new HashMap<>(); for (Integer val : data) { int part = cache.affinity().partition(val); T2<Integer, Collection<Integer>> tup = res.get(part); if (tup == null) { tup = new T2<Integer, Collection<Integer>>(val, new LinkedList<Integer>()); res.put(part, tup); } tup.get2().add(val); } return res; } }, targetNode); }
/** {@inheritDoc} */ @Override public void onKernalStart() throws GridException { discoLsnr = new GridLocalEventListener() { @Override public void onEvent(GridEvent evt) { assert evt instanceof GridDiscoveryEvent; assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED; GridDiscoveryEvent discoEvt = (GridDiscoveryEvent) evt; Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); if (log.isDebugEnabled()) log.debug("Processing node departure event: " + evt); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); dep.removeParticipant(discoEvt.eventNodeId()); if (!dep.hasParticipants()) { if (dep.deployMode() == SHARED) { if (!dep.isUndeployed()) { dep.undeploy(); // Undeploy. i2.remove(); assert !dep.isRemoved(); dep.onRemoved(); undeployed.add(dep); if (log.isDebugEnabled()) log.debug( "Undeployed class loader as there are no participating " + "nodes: " + dep); } } else if (log.isDebugEnabled()) log.debug("Preserving deployment without node participants: " + dep); } else if (log.isDebugEnabled()) log.debug("Keeping deployment as it still has participants: " + dep); } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(discoEvt.eventNodeId(), undeployed); } }; ctx.event().addLocalEventListener(discoLsnr, EVT_NODE_FAILED, EVT_NODE_LEFT); Collection<SharedDeployment> undeployed = new LinkedList<SharedDeployment>(); synchronized (mux) { for (Iterator<List<SharedDeployment>> i1 = cache.values().iterator(); i1.hasNext(); ) { List<SharedDeployment> deps = i1.next(); for (Iterator<SharedDeployment> i2 = deps.iterator(); i2.hasNext(); ) { SharedDeployment dep = i2.next(); for (UUID nodeId : dep.getParticipantNodeIds()) if (ctx.discovery().node(nodeId) == null) dep.removeParticipant(nodeId); if (!dep.hasParticipants()) { if (dep.deployMode() == SHARED) { if (!dep.isUndeployed()) { dep.undeploy(); // Undeploy. i2.remove(); dep.onRemoved(); undeployed.add(dep); if (log.isDebugEnabled()) log.debug("Undeployed class loader as there are no participating nodes: " + dep); } } else if (log.isDebugEnabled()) log.debug("Preserving deployment without node participants: " + dep); } else if (log.isDebugEnabled()) log.debug("Keeping deployment as it still has participants: " + dep); } if (deps.isEmpty()) i1.remove(); } } recordUndeployed(null, undeployed); if (log.isDebugEnabled()) log.debug("Registered deployment discovery listener: " + discoLsnr); }
/** {@inheritDoc} */ @Override public GridDeployment getDeployment(GridDeploymentMetadata meta) { assert meta != null; assert ctx.config().isPeerClassLoadingEnabled(); // Validate metadata. assert meta.classLoaderId() != null; assert meta.senderNodeId() != null; assert meta.sequenceNumber() >= -1; assert meta.parentLoader() == null; if (log.isDebugEnabled()) log.debug("Starting to peer-load class based on deployment metadata: " + meta); while (true) { List<SharedDeployment> depsToCheck = null; SharedDeployment dep = null; synchronized (mux) { // Check obsolete request. if (isDeadClassLoader(meta)) return null; List<SharedDeployment> deps = cache.get(meta.userVersion()); if (deps != null) { assert !deps.isEmpty(); for (SharedDeployment d : deps) { if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId()) || meta.senderNodeId().equals(ctx.localNodeId())) { // Done. dep = d; break; } } if (dep == null) { GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta); if (!redeployCheck.get1()) { // Checking for redeployment encountered invalid state. if (log.isDebugEnabled()) log.debug("Checking for redeployment encountered invalid state: " + meta); return null; } dep = redeployCheck.get2(); if (dep == null) { // Find existing deployments that need to be checked // whether they should be reused for this request. for (SharedDeployment d : deps) { if (!d.isPendingUndeploy() && !d.isUndeployed()) { if (depsToCheck == null) depsToCheck = new LinkedList<SharedDeployment>(); if (log.isDebugEnabled()) log.debug("Adding deployment to check: " + d); depsToCheck.add(d); } } // If no deployment can be reused, create a new one. if (depsToCheck == null) { dep = createNewDeployment(meta, false); deps.add(dep); } } } } else { GridTuple2<Boolean, SharedDeployment> redeployCheck = checkRedeploy(meta); if (!redeployCheck.get1()) { // Checking for redeployment encountered invalid state. if (log.isDebugEnabled()) log.debug("Checking for redeployment encountered invalid state: " + meta); return null; } dep = redeployCheck.get2(); if (dep == null) // Create peer class loader. dep = createNewDeployment(meta, true); } } if (dep != null) { if (log.isDebugEnabled()) log.debug("Found SHARED or CONTINUOUS deployment after first check: " + dep); // Cache the deployed class. Class<?> cls = dep.deployedClass(meta.className(), meta.alias()); if (cls == null) { U.warn( log, "Failed to load peer class (ignore if class got undeployed during preloading) [alias=" + meta.alias() + ", dep=" + dep + ']'); return null; } return dep; } assert meta.parentLoader() == null; assert depsToCheck != null; assert !depsToCheck.isEmpty(); /* * Logic below must be performed outside of synchronization * because it involves network calls. */ // Check if class can be loaded from existing nodes. // In most cases this loop will find something. for (SharedDeployment d : depsToCheck) { // Load class. Note, that remote node will not load this class. // The class will only be loaded on this node. Class<?> cls = d.deployedClass(meta.className(), meta.alias()); if (cls != null) { synchronized (mux) { if (!d.isUndeployed() && !d.isPendingUndeploy()) { if (!addParticipant(d, meta)) return null; if (log.isDebugEnabled()) log.debug( "Acquired deployment after verifying it's availability on " + "existing nodes [depCls=" + cls + ", dep=" + d + ", meta=" + meta + ']'); return d; } } } else if (log.isDebugEnabled()) { log.debug( "Deployment cannot be reused (class does not exist on participating nodes) [dep=" + d + ", meta=" + meta + ']'); } } // We are here either because all participant nodes failed // or the class indeed should have a separate deployment. for (SharedDeployment d : depsToCheck) { // Temporary class loader. ClassLoader temp = new GridDeploymentClassLoader( GridUuid.randomUuid(), meta.userVersion(), meta.deploymentMode(), true, ctx, ctxLdr, meta.classLoaderId(), meta.senderNodeId(), meta.sequenceNumber(), comm, ctx.config().getNetworkTimeout(), log, ctx.config().getPeerClassLoadingClassPathExclude(), 0, false); String path = U.classNameToResourceName(d.sampleClassName()); // We check if any random class from existing deployment can be // loaded from sender node. If it can, then we reuse existing // deployment. InputStream rsrcIn = temp.getResourceAsStream(path); if (rsrcIn != null) { // We don't need the actual stream. U.closeQuiet(rsrcIn); synchronized (mux) { if (d.isUndeployed() || d.isPendingUndeploy()) continue; // Add new node prior to loading the class, so we attempt // to load the class from the latest node. if (!addParticipant(d, meta)) { if (log.isDebugEnabled()) log.debug( "Failed to add participant to deployment " + "[meta=" + meta + ", dep=" + dep + ']'); return null; } } Class<?> depCls = d.deployedClass(meta.className(), meta.alias()); if (depCls == null) { U.error( log, "Failed to peer load class after loading it as a resource [alias=" + meta.alias() + ", dep=" + dep + ']'); return null; } if (log.isDebugEnabled()) log.debug( "Acquired deployment class after verifying other class " + "availability on sender node [depCls=" + depCls + ", rndCls=" + d.sampleClass() + ", sampleClsName=" + d.sampleClassName() + ", meta=" + meta + ']'); return d; } else if (log.isDebugEnabled()) log.debug( "Deployment cannot be reused (random class could not be loaded from sender node) [dep=" + d + ", meta=" + meta + ']'); } synchronized (mux) { if (log.isDebugEnabled()) log.debug( "None of the existing class-loaders fit (will try to create a new one): " + meta); // Check obsolete request. if (isDeadClassLoader(meta)) return null; // Check that deployment picture has not changed. List<SharedDeployment> deps = cache.get(meta.userVersion()); if (deps != null) { assert !deps.isEmpty(); boolean retry = false; for (SharedDeployment d : deps) { // Double check if sender was already added. if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) { dep = d; retry = false; break; } // New deployment was added while outside of synchronization. // Need to recheck it again. if (!d.isPendingUndeploy() && !d.isUndeployed() && !depsToCheck.contains(d)) retry = true; } if (retry) { if (log.isDebugEnabled()) log.debug("Retrying due to concurrency issues: " + meta); // Outer while loop. continue; } if (dep == null) { // No new deployments were added, so we can safely add ours. dep = createNewDeployment(meta, false); deps.add(dep); if (log.isDebugEnabled()) log.debug( "Adding new deployment within second check [dep=" + dep + ", meta=" + meta + ']'); } } else { dep = createNewDeployment(meta, true); if (log.isDebugEnabled()) log.debug( "Created new deployment within second check [dep=" + dep + ", meta=" + meta + ']'); } } if (dep != null) { // Cache the deployed class. Class<?> cls = dep.deployedClass(meta.className(), meta.alias()); if (cls == null) { U.warn( log, "Failed to load peer class (ignore if class got undeployed during preloading) [alias=" + meta.alias() + ", dep=" + dep + ']'); return null; } } return dep; } }