/** * Get the list of authorized apps for the user based on the user's LEA. * * <p>No additional filtering is done on the results. E.g. if a user is a non-admin, the admin * apps will still show up in the list, or if an app is disabled it will still show up. * * @param principal * @return list of app IDs, or null if it couldn't be determined */ @SuppressWarnings("unchecked") public boolean isAuthorizedForApp(Entity app, SLIPrincipal principal) { if (principal.isAdminRealmAuthenticated()) { return isAdminVisible(app); } else { if (isAutoAuthorized(app)) { return true; } else if (!isOperatorApproved(app)) { return false; } else { Set<String> edOrgs = helper.locateDirectEdorgs(principal.getEntity()); NeutralQuery appAuthCollQuery = new NeutralQuery(); appAuthCollQuery.addCriteria(new NeutralCriteria("applicationId", "=", app.getEntityId())); appAuthCollQuery.addCriteria( new NeutralCriteria("edorgs.authorizedEdorg", NeutralCriteria.CRITERIA_IN, edOrgs)); Entity authorizedApps = repo.findOne("applicationAuthorization", appAuthCollQuery); if (authorizedApps != null) { if (isAutoApproved(app)) { return true; } else { // query approved edorgs List<String> approvedDistricts = new ArrayList<String>((List<String>) app.getBody().get("authorized_ed_orgs")); List<String> myDistricts = helper.getDistricts(edOrgs); approvedDistricts.retainAll(myDistricts); return !approvedDistricts.isEmpty(); } } } } return false; }
/** * Actually rearrange the exertions in the job according to the sorting * * @param topXrt * @param sortedExertions * @throws CycleDetectedException * @throws ContextException */ private void reorderJob(Exertion topXrt, List<Mogram> sortedExertions) { List<Mogram> sortedSubset = new ArrayList(sortedExertions); sortedSubset.retainAll(topXrt.getMograms()); if (topXrt.getFlowType() != null && topXrt.getFlowType().equals(Strategy.Flow.AUTO)) { ((ServiceExertion) topXrt).setFlowType(setFlow(topXrt, sortedSubset)); logger.info("FLOW for exertion: " + topXrt.getName() + " set to: " + topXrt.getFlowType()); } List<String> exertionsBefore = new ArrayList<String>(); for (Mogram xrt : topXrt.getMograms()) exertionsBefore.add(xrt.getName()); List<String> exertionsAfter = new ArrayList<String>(); for (Mogram xrt : sortedExertions) exertionsAfter.add(xrt.getName()); if (!topXrt.getMograms().equals(sortedSubset)) { logger.info("Order of exertions for " + topXrt.getName() + " will be changed: "); logger.info("From: " + exertionsBefore); logger.info("To: " + exertionsAfter); topXrt.getMograms().removeAll(sortedSubset); topXrt.getMograms().addAll(sortedSubset); } for (Iterator i = topXrt.getMograms().iterator(); i.hasNext(); ) { Exertion xrt = (Exertion) i.next(); if (xrt instanceof Job) { reorderJob(xrt, sortedExertions); } } }
/** * Checks that given function has any decorators from {@code abc} module. * * @param element Python function to check * @param context type evaluation context. If it doesn't allow switch to AST, decorators will be * compared by the text of the last component of theirs qualified names. * @see PyKnownDecoratorUtil.KnownDecorator */ public static boolean hasAbstractDecorator( @NotNull PyDecoratable element, @NotNull TypeEvalContext context) { final List<KnownDecorator> knownDecorators = getKnownDecorators(element, context); if (knownDecorators.isEmpty()) { return false; } knownDecorators.retainAll(ABSTRACT_DECORATORS); return !knownDecorators.isEmpty(); }
public static void main(String[] args) { Random rand = new Random(47); List<Pet> pets = Pets.arrayList(7); print("1: " + pets); Hamster h = new Hamster(); pets.add(h); // Automatically resizes print("2: " + pets); print("3: " + pets.contains(h)); pets.remove(h); // Remove by object Pet p = pets.get(2); print("4: " + p + " " + pets.indexOf(p)); Pet cymric = new Cymric(); print("5: " + pets.indexOf(cymric)); print("6: " + pets.remove(cymric)); // Must be the exact object: print("7: " + pets.remove(p)); print("8: " + pets); pets.add(3, new Mouse()); // Insert at an index print("9: " + pets); List<Pet> sub = pets.subList(1, 4); print("subList: " + sub); print("10: " + pets.containsAll(sub)); Collections.sort(sub); // In-place sort print("sorted subList: " + sub); // Order is not important in containsAll(): print("11: " + pets.containsAll(sub)); Collections.shuffle(sub, rand); // Mix it up print("shuffled subList: " + sub); print("12: " + pets.containsAll(sub)); List<Pet> copy = new ArrayList<Pet>(pets); sub = Arrays.asList(pets.get(1), pets.get(4)); print("sub: " + sub); copy.retainAll(sub); print("13: " + copy); copy = new ArrayList<Pet>(pets); // Get a fresh copy copy.remove(2); // Remove by index print("14: " + copy); copy.removeAll(sub); // Only removes exact objects print("15: " + copy); copy.set(1, new Mouse()); // Replace an element print("16: " + copy); copy.addAll(2, sub); // Insert a list in the middle print("17: " + copy); print("18: " + pets.isEmpty()); pets.clear(); // Remove all elements print("19: " + pets); print("20: " + pets.isEmpty()); pets.addAll(Pets.arrayList(4)); print("21: " + pets); Object[] o = pets.toArray(); print("22: " + o[3]); Pet[] pa = pets.toArray(new Pet[0]); print("23: " + pa[3].id()); }
public static <T> List<T> intersection( Collection<? extends Collection<T>> availableValuesByDescriptor) { List<T> result = new ArrayList<T>(); Iterator<? extends Collection<T>> iterator = availableValuesByDescriptor.iterator(); if (iterator.hasNext()) { Collection<T> firstSet = iterator.next(); result.addAll(firstSet); while (iterator.hasNext()) { Collection<T> next = iterator.next(); result.retainAll(next); } } return result; }
@NotNull @Override public GlobalSearchScope intersectWith(@NotNull GlobalSearchScope scope) { if (scope instanceof FileTypeRestrictionScope) { FileTypeRestrictionScope restrict = (FileTypeRestrictionScope) scope; if (restrict.myBaseScope == myBaseScope) { List<FileType> intersection = new ArrayList<FileType>(Arrays.asList(restrict.myFileTypes)); intersection.retainAll(Arrays.asList(myFileTypes)); return new FileTypeRestrictionScope( myBaseScope, intersection.toArray(new FileType[intersection.size()])); } } return super.intersectWith(scope); }
public static void main(String[] args) { System.out.println(a); System.out.println(a2); System.out.println("a.contains(" + s[0] + ") = " + a.contains(s[0])); System.out.println("a.containsAll(a2) = " + a.containsAll(a2)); System.out.println("a.isEmpty() = " + a.isEmpty()); System.out.println("a.indexOf(" + s[5] + ") = " + a.indexOf(s[5])); // Traverse backwards: ListIterator lit = a.listIterator(a.size()); while (lit.hasPrevious()) System.out.print(lit.previous() + " "); System.out.println(); // Set the elements to different values: for (int i = 0; i < a.size(); i++) a.set(i, "47"); System.out.println(a); // Compiles, but won't run: lit.add("X"); // Unsupported operation a.clear(); // Unsupported a.add("eleven"); // Unsupported a.addAll(a2); // Unsupported a.retainAll(a2); // Unsupported a.remove(s[0]); // Unsupported a.removeAll(a2); // Unsupported }
public boolean retainAll(Collection c) { boolean b = list.retainAll(c); repaginate(); return b; }
public boolean retainAll(Collection<?> c) { return _delegate.retainAll(c); }
public boolean retainAll(Collection c) { setDirty(true); return _delegate.retainAll(c); }
/** * Recalculates the list of forwarded endpoints based on the current values of the various * parameters of this instance ({@link #lastN}, {@link #conferenceSpeechActivityEndpoints}, {@link * #pinnedEndpoints}). * * @param newConferenceEndpoints A list of endpoints which entered the conference since the last * call to this method. They need not be asked for keyframes, because they were never filtered * by this {@link #LastNController(VideoChannel)}. * @return the list of IDs of endpoints which were added to {@link #forwardedEndpoints} (i.e. of * endpoints * "entering last-n") as a result of this call. Returns {@code null} if no * endpoints were added. */ private synchronized List<String> update(List<String> newConferenceEndpoints) { List<String> newForwardedEndpoints = new LinkedList<>(); String ourEndpointId = getEndpointId(); if (conferenceSpeechActivityEndpoints == INITIAL_EMPTY_LIST) { conferenceSpeechActivityEndpoints = getIDs(channel.getConferenceSpeechActivity().getEndpoints()); newConferenceEndpoints = conferenceSpeechActivityEndpoints; } if (lastN < 0 && currentLastN < 0) { // Last-N is disabled, we forward everything. newForwardedEndpoints.addAll(conferenceSpeechActivityEndpoints); if (ourEndpointId != null) { newForwardedEndpoints.remove(ourEndpointId); } } else { // Here we have lastN >= 0 || currentLastN >= 0 which implies // currentLastN >= 0. // Pinned endpoints are always forwarded. newForwardedEndpoints.addAll(getPinnedEndpoints()); // As long as they are still endpoints in the conference. newForwardedEndpoints.retainAll(conferenceSpeechActivityEndpoints); if (newForwardedEndpoints.size() > currentLastN) { // What do we want in this case? It looks like a contradictory // request from the client, but maybe it makes for a good API // on the client to allow the pinned to override last-n. // Unfortunately, this will not play well with Adaptive-Last-N // or changes to Last-N for other reasons. } else if (newForwardedEndpoints.size() < currentLastN) { for (String endpointId : conferenceSpeechActivityEndpoints) { if (newForwardedEndpoints.size() < currentLastN) { if (!endpointId.equals(ourEndpointId) && !newForwardedEndpoints.contains(endpointId)) { newForwardedEndpoints.add(endpointId); } } else { break; } } } } List<String> enteringEndpoints; if (forwardedEndpoints.equals(newForwardedEndpoints)) { // We want forwardedEndpoints != INITIAL_EMPTY_LIST forwardedEndpoints = newForwardedEndpoints; enteringEndpoints = null; } else { enteringEndpoints = new ArrayList<>(newForwardedEndpoints); enteringEndpoints.removeAll(forwardedEndpoints); if (logger.isDebugEnabled()) { logger.debug( "Forwarded endpoints changed: " + forwardedEndpoints.toString() + " -> " + newForwardedEndpoints.toString() + ". Entering: " + enteringEndpoints.toString()); } forwardedEndpoints = Collections.unmodifiableList(newForwardedEndpoints); if (lastN >= 0 || currentLastN >= 0) { // TODO: we may want to do this asynchronously. channel.sendLastNEndpointsChangeEventOnDataChannel(forwardedEndpoints, enteringEndpoints); } } // If lastN is disabled, the endpoints entering forwardedEndpoints were // never filtered, so they don't need to be asked for keyframes. if (lastN < 0 && currentLastN < 0) { enteringEndpoints = null; } if (enteringEndpoints != null && newConferenceEndpoints != null) { // Endpoints just entering the conference need not be asked for // keyframes. enteringEndpoints.removeAll(newConferenceEndpoints); } return enteringEndpoints; }
public boolean retainAll(Collection<?> arg0) { return list.retainAll(arg0); }
// Trying to optimize the search for 4-cliques a bit. private Set<Set<Integer>> findPureClusters2( List<Integer> _variables, Map<Node, Set<Node>> adjacencies) { System.out.println("Original variables = " + variables); Set<Set<Integer>> clusters = new HashSet<Set<Integer>>(); List<Integer> allVariables = new ArrayList<Integer>(); Set<Node> foundVariables = new HashSet<Node>(); for (int i = 0; i < this.variables.size(); i++) allVariables.add(i); for (int x : _variables) { Node nodeX = variables.get(x); if (foundVariables.contains(nodeX)) continue; List<Node> adjX = new ArrayList<Node>(adjacencies.get(nodeX)); adjX.removeAll(foundVariables); if (adjX.size() < 3) continue; for (Node nodeY : adjX) { if (foundVariables.contains(nodeY)) continue; List<Node> commonXY = new ArrayList<Node>(adjacencies.get(nodeY)); commonXY.retainAll(adjX); commonXY.removeAll(foundVariables); for (Node nodeZ : commonXY) { if (foundVariables.contains(nodeZ)) continue; List<Node> commonXZ = new ArrayList<Node>(commonXY); commonXZ.retainAll(adjacencies.get(nodeZ)); commonXZ.removeAll(foundVariables); for (Node nodeW : commonXZ) { if (foundVariables.contains(nodeW)) continue; if (!adjacencies.get(nodeY).contains(nodeW)) { continue; } int y = variables.indexOf(nodeY); int w = variables.indexOf(nodeW); int z = variables.indexOf(nodeZ); Set<Integer> cluster = quartet(x, y, z, w); // Note that purity needs to be assessed with respect to all of the variables in order // to // remove all latent-measure impurities between pairs of latents. if (pure(cluster, allVariables)) { O: for (int o : _variables) { if (cluster.contains(o)) continue; cluster.add(o); if (!clique(cluster, adjacencies)) { cluster.remove(o); continue O; } // if (!allVariablesDependent(cluster)) { // cluster.remove(o); // continue O; // } List<Integer> _cluster = new ArrayList<Integer>(cluster); ChoiceGenerator gen2 = new ChoiceGenerator(_cluster.size(), 4); int[] choice2; int count = 0; while ((choice2 = gen2.next()) != null) { int x2 = _cluster.get(choice2[0]); int y2 = _cluster.get(choice2[1]); int z2 = _cluster.get(choice2[2]); int w2 = _cluster.get(choice2[3]); Set<Integer> quartet = quartet(x2, y2, z2, w2); // Optimizes for large clusters. if (quartet.contains(o)) { if (++count > 2) continue O; } if (quartet.contains(o) && !pure(quartet, allVariables)) { cluster.remove(o); continue O; } } } System.out.println( "Cluster found: " + variablesForIndices(new ArrayList<Integer>(cluster))); clusters.add(cluster); foundVariables.addAll(variablesForIndices(new ArrayList<Integer>(cluster))); } } } } } return clusters; }
// Finds clusters of size 4 or higher. private Set<Set<Integer>> findPureClusters( List<Integer> _variables, Map<Node, Set<Node>> adjacencies) { // System.out.println("Original variables = " + variables); Set<Set<Integer>> clusters = new HashSet<Set<Integer>>(); List<Integer> allVariables = new ArrayList<Integer>(); for (int i = 0; i < this.variables.size(); i++) allVariables.add(i); VARIABLES: while (!_variables.isEmpty()) { if (_variables.size() < 4) break; for (int x : _variables) { Node nodeX = variables.get(x); List<Node> adjX = new ArrayList<Node>(adjacencies.get(nodeX)); adjX.retainAll(variablesForIndices(new ArrayList<Integer>(_variables))); for (Node node : new ArrayList<Node>(adjX)) { if (adjacencies.get(node).size() < 3) { adjX.remove(node); } } if (adjX.size() < 3) { continue; } ChoiceGenerator gen = new ChoiceGenerator(adjX.size(), 3); int[] choice; while ((choice = gen.next()) != null) { Node nodeY = adjX.get(choice[0]); Node nodeZ = adjX.get(choice[1]); Node nodeW = adjX.get(choice[2]); int y = variables.indexOf(nodeY); int w = variables.indexOf(nodeW); int z = variables.indexOf(nodeZ); Set<Integer> cluster = quartet(x, y, z, w); if (!clique(cluster, adjacencies)) { continue; } // Note that purity needs to be assessed with respect to all of the variables in order to // remove all latent-measure impurities between pairs of latents. if (pure(cluster, allVariables)) { // Collections.shuffle(_variables); O: for (int o : _variables) { if (cluster.contains(o)) continue; cluster.add(o); List<Integer> _cluster = new ArrayList<Integer>(cluster); if (!clique(cluster, adjacencies)) { cluster.remove(o); continue O; } // if (!allVariablesDependent(cluster)) { // cluster.remove(o); // continue O; // } ChoiceGenerator gen2 = new ChoiceGenerator(_cluster.size(), 4); int[] choice2; int count = 0; while ((choice2 = gen2.next()) != null) { int x2 = _cluster.get(choice2[0]); int y2 = _cluster.get(choice2[1]); int z2 = _cluster.get(choice2[2]); int w2 = _cluster.get(choice2[3]); Set<Integer> quartet = quartet(x2, y2, z2, w2); // Optimizes for large clusters. if (quartet.contains(o)) { if (++count > 50) continue O; } if (quartet.contains(o) && !pure(quartet, allVariables)) { cluster.remove(o); continue O; } } } System.out.println( "Cluster found: " + variablesForIndices(new ArrayList<Integer>(cluster))); clusters.add(cluster); _variables.removeAll(cluster); continue VARIABLES; } } } break; } return clusters; }
/** * Computes, for each node in the graph, its set of (pre-)dominators. Supply a successor graph if * you want post-dominators. * * @param <T> type of the graph nodes * @param predecessors a graph, represented as a predecessor map * @return a map from each node to a list of its pre-dominators */ public static <T> Map<T, List<T>> dominators(Map<T, List</*@KeyFor("#1")*/ T>> predecessors) { // Map</*@KeyFor({"preds","dom"})*/ T,List</*@KeyFor({"preds","dom"})*/ T>> dom = new // HashMap</*@KeyFor({"preds","dom"})*/ T,List</*@KeyFor({"preds","dom"})*/ T>>(); Map<T, List<T>> dom = new HashMap<T, List<T>>(); @SuppressWarnings("keyfor") // every element of pred's value will be a key for dom Map<T, List</*@KeyFor({"dom"})*/ T>> preds = predecessors; List<T> nodes = new ArrayList<T>(preds.keySet()); // Compute roots & non-roots, for convenience List</*@KeyFor({"preds","dom"})*/ T> roots = new ArrayList<T>(); List</*@KeyFor({"preds","dom"})*/ T> non_roots = new ArrayList<T>(); // Initialize result: for roots just the root, otherwise everything for (T node : preds.keySet()) { if (preds.get(node).isEmpty()) { // This is a root. Its only dominator is itself. Set<T> set = Collections.singleton(node); dom.put(node, new ArrayList<T>(set)); roots.add(node); } else { // Initially, set all nodes as dominators; // will later remove nodes that aren't dominators. dom.put(node, new ArrayList<T>(nodes)); non_roots.add(node); } } assert roots.size() + non_roots.size() == nodes.size(); // Invariants: // preds and dom have the same keyset. // All of the following are keys for both preds and dom: // * every key in pred // * elery element of every pred value // * every key in dom // * elery element of every dom value // So, the type of pred is now // // rather than its original type // Map<T,List</*@KeyFor("preds")*/ T>> preds boolean changed = true; while (changed) { changed = false; for (T node : non_roots) { List<T> new_doms = null; assert preds.containsKey(node); for (T pred : preds.get(node)) { assert dom.containsKey(pred); /*@NonNull*/ List<T> dom_of_pred = dom.get(pred); if (new_doms == null) { // make copy because we may side-effect new_doms new_doms = new ArrayList<T>(dom_of_pred); } else { new_doms.retainAll(dom_of_pred); } } assert new_doms != null : "@AssumeAssertion(nullness): the loop was entered at least once because this is a non-root, which has at least one predecessor"; new_doms.add(node); assert dom.containsKey(node); if (!dom.get(node).equals(new_doms)) { dom.put(node, new_doms); changed = true; } } } for (T node : preds.keySet()) { // TODO: The following two assert statements would be easier to read // than the one combined one, but a bug (TODO: Jonathan will add a // bug number) prevents it from type-checking. // assert dom.containsKey(node); // assert dom.get(node).contains(node); assert dom.containsKey(node) && dom.get(node).contains(node); } return dom; }
private void addFrozenItems(List<LookupElement> items, LinkedHashSet<LookupElement> model) { myFrozenItems.retainAll(items); model.addAll(myFrozenItems); }
public boolean retainAll(Collection c) { return documents.retainAll(c); }
/** * Sets the new view and sends a VIEW_CHANGE event up and down the stack. If the view is a * MergeView (subclass of View), then digest will be non-null and has to be set before installing * the view. */ public void installView(View new_view, Digest digest) { ViewId vid = new_view.getVid(); List<Address> mbrs = new_view.getMembers(); ltime = Math.max( vid.getId(), ltime); // compute the logical time, regardless of whether the view is accepted // Discards view with id lower than or equal to our own. Will be installed without check if it // is the first view if (view != null) { ViewId view_id = view.getViewId(); int rc = vid.compareToIDs(view_id); if (rc <= 0) { if (log.isWarnEnabled() && rc < 0 && log_view_warnings) { // only scream if view is smaller, silently discard same views log.warn( local_addr + ": received view < current view;" + " discarding it (current vid: " + view_id + ", new vid: " + vid + ')'); } return; } } /* Check for self-inclusion: if I'm not part of the new membership, I just discard it. This ensures that messages sent in view V1 are only received by members of V1 */ if (!mbrs.contains(local_addr)) { if (log.isWarnEnabled() && log_view_warnings) log.warn(local_addr + ": not member of view " + new_view.getViewId() + "; discarding it"); return; } if (digest != null) { if (new_view instanceof MergeView) mergeDigest(digest); else setDigest(digest); } if (log.isDebugEnabled()) log.debug(local_addr + ": installing view " + new_view); Event view_event; synchronized (members) { view = new View(new_view.getVid(), new_view.getMembers()); view_event = new Event(Event.VIEW_CHANGE, new_view); // Set the membership. Take into account joining members if (!mbrs.isEmpty()) { members.set(mbrs); tmp_members.set(members); joining.removeAll(mbrs); // remove all members in mbrs from joining // remove all elements from 'leaving' that are not in 'mbrs' leaving.retainAll(mbrs); tmp_members.add(joining); // add members that haven't yet shown up in the membership tmp_members.remove( leaving); // remove members that haven't yet been removed from the membership // add to prev_members for (Address addr : mbrs) { if (!prev_members.contains(addr)) prev_members.add(addr); } } Address coord = determineCoordinator(); if (coord != null && coord.equals(local_addr) && !haveCoordinatorRole()) { becomeCoordinator(); } else { if (haveCoordinatorRole() && !local_addr.equals(coord)) { becomeParticipant(); merge_ack_collector.reset(null); // we don't need this one anymore } } } // - Changed order of passing view up and down (http://jira.jboss.com/jira/browse/JGRP-347) // - Changed it back (bela Sept 4 2007): http://jira.jboss.com/jira/browse/JGRP-564 // - Moved sending up view_event out of the synchronized block (bela Nov 2011) down_prot.down(view_event); // needed e.g. by failure detector or UDP up_prot.up(view_event); List<Address> tmp_mbrs = new_view.getMembers(); ack_collector.retainAll(tmp_mbrs); merge_ack_collector.retainAll(tmp_mbrs); if (new_view instanceof MergeView) merger.forceCancelMerge(); if (stats) { num_views++; prev_views.add(new Tuple<View, Long>(new_view, System.currentTimeMillis())); } }
public static void main(String[] args) { int numItr = 100; int listSize = 100; for (int i = 0; i < numItr; i++) { List s1 = newList(); AddRandoms(s1, listSize); List s2 = newList(); AddRandoms(s2, listSize); List intersection = clone(s1); intersection.retainAll(s2); List diff1 = clone(s1); diff1.removeAll(s2); List diff2 = clone(s2); diff2.removeAll(s1); List union = clone(s1); union.addAll(s2); if (diff1.removeAll(diff2)) fail("List algebra identity 2 failed"); if (diff1.removeAll(intersection)) fail("List algebra identity 3 failed"); if (diff2.removeAll(diff1)) fail("List algebra identity 4 failed"); if (diff2.removeAll(intersection)) fail("List algebra identity 5 failed"); if (intersection.removeAll(diff1)) fail("List algebra identity 6 failed"); if (intersection.removeAll(diff1)) fail("List algebra identity 7 failed"); intersection.addAll(diff1); intersection.addAll(diff2); if (!(intersection.containsAll(union) && union.containsAll(intersection))) fail("List algebra identity 1 failed"); Iterator e = union.iterator(); while (e.hasNext()) intersection.remove(e.next()); if (!intersection.isEmpty()) fail("Copy nonempty after deleting all elements."); e = union.iterator(); while (e.hasNext()) { Object o = e.next(); if (!union.contains(o)) fail("List doesn't contain one of its elements."); e.remove(); } if (!union.isEmpty()) fail("List nonempty after deleting all elements."); s1.clear(); if (s1.size() != 0) fail("Clear didn't reduce size to zero."); s1.addAll(0, s2); if (!(s1.equals(s2) && s2.equals(s1))) fail("addAll(int, Collection) doesn't work."); // Reverse List for (int j = 0, n = s1.size(); j < n; j++) s1.set(j, s1.set(n - j - 1, s1.get(j))); // Reverse it again for (int j = 0, n = s1.size(); j < n; j++) s1.set(j, s1.set(n - j - 1, s1.get(j))); if (!(s1.equals(s2) && s2.equals(s1))) fail("set(int, Object) doesn't work"); } List s = newList(); for (int i = 0; i < listSize; i++) s.add(new Integer(i)); if (s.size() != listSize) fail("Size of [0..n-1] != n"); List even = clone(s); Iterator it = even.iterator(); while (it.hasNext()) if (((Integer) it.next()).intValue() % 2 == 1) it.remove(); it = even.iterator(); while (it.hasNext()) if (((Integer) it.next()).intValue() % 2 == 1) fail("Failed to remove all odd nubmers."); List odd = clone(s); for (int i = 0; i < (listSize / 2); i++) odd.remove(i); for (int i = 0; i < (listSize / 2); i++) if (((Integer) odd.get(i)).intValue() % 2 != 1) fail("Failed to remove all even nubmers."); List all = clone(odd); for (int i = 0; i < (listSize / 2); i++) all.add(2 * i, even.get(i)); if (!all.equals(s)) fail("Failed to reconstruct ints from odds and evens."); all = clone(odd); ListIterator itAll = all.listIterator(all.size()); ListIterator itEven = even.listIterator(even.size()); while (itEven.hasPrevious()) { itAll.previous(); itAll.add(itEven.previous()); itAll.previous(); // ??? } itAll = all.listIterator(); while (itAll.hasNext()) { Integer i = (Integer) itAll.next(); itAll.set(new Integer(i.intValue())); } itAll = all.listIterator(); it = s.iterator(); while (it.hasNext()) if (it.next() == itAll.next()) fail("Iterator.set failed to change value."); if (!all.equals(s)) fail("Failed to reconstruct ints with ListIterator."); it = all.listIterator(); int i = 0; while (it.hasNext()) { Object o = it.next(); if (all.indexOf(o) != all.lastIndexOf(o)) fail("Apparent duplicate detected."); if (all.subList(i, all.size()).indexOf(o) != 0 || all.subList(i + 1, all.size()).indexOf(o) != -1) fail("subList/indexOf is screwy."); if (all.subList(0, i + 1).lastIndexOf(o) != i) fail("subList/lastIndexOf is screwy."); i++; } List l = newList(); AddRandoms(l, listSize); Integer[] ia = (Integer[]) l.toArray(new Integer[0]); if (!l.equals(Arrays.asList(ia))) fail("toArray(Object[]) is hosed (1)"); ia = new Integer[listSize]; Integer[] ib = (Integer[]) l.toArray(ia); if (ia != ib || !l.equals(Arrays.asList(ia))) fail("toArray(Object[]) is hosed (2)"); ia = new Integer[listSize + 1]; ia[listSize] = new Integer(69); ib = (Integer[]) l.toArray(ia); if (ia != ib || ia[listSize] != null || !l.equals(Arrays.asList(ia).subList(0, listSize))) fail("toArray(Object[]) is hosed (3)"); }
/** * 简单描述该方法的实现功能 * * @param ast QueryTreeNode * @param isSelect 是否是select语句 * @param rrs 数据路由集合 * @param schema 数据库名 the name of datebase * @param ctx ShardingParseInfo(分片) * @param sql 执行sql * @param cachePool * @return 一个数据路由集合 * @throws SQLNonTransientException * @author mycat */ private RouteResultset tryRouteForTables( QueryTreeNode ast, boolean isSelect, RouteResultset rrs, SchemaConfig schema, ShardingParseInfo ctx, String sql, LayerCachePool cachePool) throws SQLNonTransientException { Map<String, TableConfig> tables = schema.getTables(); Map<String, Map<String, Set<ColumnRoutePair>>> tbCondMap = ctx.tablesAndConditions; if (tbCondMap.size() == 1) { // only one table in this sql Map.Entry<String, Map<String, Set<ColumnRoutePair>>> entry = tbCondMap.entrySet().iterator().next(); TableConfig tc = getTableConfig(schema, entry.getKey()); if (tc.getRule() == null && tc.getDataNodes().size() == 1) { rrs.setCacheAble(isSelect); // 20140625 修复 配置为全局表单节点的语句不会自动加上limit sql = addSQLLmit(schema, rrs, ast, sql); return RouterUtil.routeToSingleNode(rrs, tc.getDataNodes().get(0), sql); } Map<String, Set<ColumnRoutePair>> colConds = entry.getValue(); return tryRouteForTable( ast, schema, rrs, isSelect, sql, tc, colConds.get(tc.getPartitionColumn()), colConds, cachePool); } else if (!ctx.joinList.isEmpty()) { for (JoinRel joinRel : ctx.joinList) { TableConfig rootc = schema.getJoinRel2TableMap().get(joinRel.joinSQLExp); if (rootc == null) { if (LOGGER.isDebugEnabled()) { LOGGER.debug( "can't find join relation in schema " + schema.getName() + " :" + joinRel.joinSQLExp + " maybe global table join"); } } else { if (rootc.getName().equals(joinRel.tableA)) { // table a is root table tbCondMap.remove(joinRel.tableB); } else if (rootc.getName().equals(joinRel.tableB)) { // table B is root table tbCondMap.remove(joinRel.tableA); } else if (tbCondMap.containsKey(rootc.getName())) { // contains root table in sql ,then remove all child tbCondMap.remove(joinRel.tableA); tbCondMap.remove(joinRel.tableB); } else { // both there A and B are not root table,remove any // one tbCondMap.remove(joinRel.tableA); } } } } if (tbCondMap.size() > 1) { Set<String> curRNodeSet = new LinkedHashSet<String>(); Set<String> routePairSet = new LinkedHashSet<String>(); // 拆分字段后路由节点 String curTableName = null; Map<String, List<String>> globalTableDataNodesMap = new LinkedHashMap<String, List<String>>(); for (Entry<String, Map<String, Set<ColumnRoutePair>>> e : tbCondMap.entrySet()) { String tableName = e.getKey(); Map<String, Set<ColumnRoutePair>> col2ValMap = e.getValue(); TableConfig tc = tables.get(tableName); if (tc == null) { String msg = "can't find table define in schema " + tableName + " schema:" + schema.getName(); LOGGER.warn(msg); throw new SQLNonTransientException(msg); } else if (tc.getTableType() == TableConfig.TYPE_GLOBAL_TABLE) { // add to globalTablelist globalTableDataNodesMap.put(tc.getName(), tc.getDataNodes()); continue; } Collection<String> newDataNodes = null; String partColmn = tc.getPartitionColumn(); Set<ColumnRoutePair> col2Val = partColmn == null ? null : col2ValMap.get(partColmn); if (col2Val == null || col2Val.isEmpty()) { if (tc.isRuleRequired()) { throw new IllegalArgumentException( "route rule for table " + tableName + " is required: " + sql); } newDataNodes = tc.getDataNodes(); } else { // match table with where condtion of partion colum values newDataNodes = RouterUtil.ruleCalculate(tc, col2Val); } if (curRNodeSet.isEmpty()) { curTableName = tc.getName(); curRNodeSet.addAll(newDataNodes); if (col2Val != null && !col2Val.isEmpty()) { routePairSet.addAll(newDataNodes); } } else { if (col2Val == null || col2Val.isEmpty()) { if (curRNodeSet.retainAll(newDataNodes) && routePairSet.isEmpty()) { String errMsg = "invalid route in sql, multi tables found but datanode has no intersection " + " sql:" + sql; LOGGER.warn(errMsg); throw new SQLNonTransientException(errMsg); } } else { if (routePairSet.isEmpty()) { routePairSet.addAll(newDataNodes); } else if (!checkIfValidMultiTableRoute(routePairSet, newDataNodes) || (curRNodeSet.retainAll(newDataNodes) && routePairSet.isEmpty())) { String errMsg = "invalid route in sql, " + routePairSet + " route to :" + Arrays.toString(routePairSet.toArray()) + " ,but " + tc.getName() + " to " + Arrays.toString(newDataNodes.toArray()) + " sql:" + sql; LOGGER.warn(errMsg); throw new SQLNonTransientException(errMsg); } } // if (!checkIfValidMultiTableRoute(curRNodeSet, // newDataNodes)) { // String errMsg = "invalid route in sql, " + curTableName // + " route to :" // + Arrays.toString(curRNodeSet.toArray()) // + " ,but " + tc.getName() + " to " // + Arrays.toString(newDataNodes.toArray()) // + " sql:" + sql; // LOGGER.warn(errMsg); // throw new SQLNonTransientException(errMsg); // } } } // only global table contains in sql if (!globalTableDataNodesMap.isEmpty() && curRNodeSet.isEmpty()) { List<String> resultList = Lists.newArrayList(); for (List<String> nodeList : globalTableDataNodesMap.values()) { if (resultList.isEmpty()) { resultList = nodeList; } else { if (resultList.retainAll(nodeList) && resultList.isEmpty()) { String errMsg = "invalid route in sql, multi global tables found but datanode has no intersection " + " sql:" + sql; LOGGER.warn(errMsg); throw new SQLNonTransientException(errMsg); } } } if (resultList.size() == 1) { rrs.setCacheAble(true); sql = addSQLLmit(schema, rrs, ast, sql); rrs = RouterUtil.routeToSingleNode(rrs, resultList.get(0), sql); } else { // mulit routes ,not cache route result rrs.setCacheAble(false); rrs = RouterUtil.routeToSingleNode(rrs, getRandomDataNode(resultList), sql); } return rrs; } else if (!globalTableDataNodesMap.isEmpty() && !curRNodeSet.isEmpty()) { // judge if global table contains all dataNodes of other tables for (Map.Entry<String, List<String>> entry : globalTableDataNodesMap.entrySet()) { if (!entry.getValue().containsAll(curRNodeSet)) { String errMsg = "invalid route in sql, " + curTableName + " route to :" + Arrays.toString(curRNodeSet.toArray()) + " ,but " + entry.getKey() + " to " + Arrays.toString(entry.getValue().toArray()) + " sql:" + sql; LOGGER.warn(errMsg); throw new SQLNonTransientException(errMsg); } } } if (curRNodeSet.size() > 1) { LOGGER.warn( "multi route tables found in this sql ,tables:" + Arrays.toString(tbCondMap.keySet().toArray()) + " sql:" + sql); return routeToMultiNode(schema, isSelect, isSelect, ast, rrs, curRNodeSet, sql); } else { return RouterUtil.routeToSingleNode(rrs, curRNodeSet.iterator().next(), sql); } } else { // only one table Map.Entry<String, Map<String, Set<ColumnRoutePair>>> entry = tbCondMap.entrySet().iterator().next(); Map<String, Set<ColumnRoutePair>> allColValues = entry.getValue(); TableConfig tc = getTableConfig(schema, entry.getKey()); return tryRouteForTable( ast, schema, rrs, isSelect, sql, tc, allColValues.get(tc.getPartitionColumn()), allColValues, cachePool); } }