@NonNull public IAndroidTarget[] getMissingTargets() { synchronized (mLocalPackages) { if (mCachedMissingTargets == null) { Map<MissingTarget, MissingTarget> result = Maps.newHashMap(); Set<ISystemImage> seen = Sets.newHashSet(); for (IAndroidTarget target : getTargets()) { Collections.addAll(seen, target.getSystemImages()); } for (LocalPkgInfo local : getPkgsInfos(PkgType.PKG_ADDON_SYS_IMAGE)) { LocalAddonSysImgPkgInfo info = (LocalAddonSysImgPkgInfo) local; ISystemImage image = info.getSystemImage(); if (!seen.contains(image)) { addOrphanedSystemImage(image, info.getDesc(), result); } } for (LocalPkgInfo local : getPkgsInfos(PkgType.PKG_SYS_IMAGE)) { LocalSysImgPkgInfo info = (LocalSysImgPkgInfo) local; ISystemImage image = info.getSystemImage(); if (!seen.contains(image)) { addOrphanedSystemImage(image, info.getDesc(), result); } } mCachedMissingTargets = result.keySet(); } return mCachedMissingTargets.toArray(new IAndroidTarget[mCachedMissingTargets.size()]); } }
/** * get all fields annotated with a given annotation * * <p>depends on FieldAnnotationsScanner configured */ public Set<Field> getFieldsAnnotatedWith(final Class<? extends Annotation> annotation) { final Set<Field> result = Sets.newHashSet(); for (String annotated : store.get(index(FieldAnnotationsScanner.class), annotation.getName())) { result.add(getFieldFromString(annotated, loaders())); } return result; }
private Variance calculateArgumentProjectionKindFromSuper( @NotNull TypeProjection argument, @NotNull List<TypeProjectionAndVariance> projectionsFromSuper) { Set<Variance> projectionKindsInSuper = Sets.newLinkedHashSet(); for (TypeProjectionAndVariance projectionAndVariance : projectionsFromSuper) { projectionKindsInSuper.add(projectionAndVariance.typeProjection.getProjectionKind()); } Variance defaultProjectionKind = argument.getProjectionKind(); if (projectionKindsInSuper.size() == 0) { return defaultProjectionKind; } else if (projectionKindsInSuper.size() == 1) { Variance projectionKindInSuper = projectionKindsInSuper.iterator().next(); if (defaultProjectionKind == INVARIANT || defaultProjectionKind == projectionKindInSuper) { return projectionKindInSuper; } else { reportError( "Incompatible projection kinds in type arguments of super methods' return types: " + projectionsFromSuper + ", defined in current: " + argument); return defaultProjectionKind; } } else { reportError( "Incompatible projection kinds in type arguments of super methods' return types: " + projectionsFromSuper); return defaultProjectionKind; } }
private boolean isInDistanceInternal( int distance, NetworkNode from, NetworkNode to, TwoNetworkNodes cachePairKey) { if (from.equals(to)) return true; if (distance == 0) return false; if (SimpleNetwork.areNodesConnecting(from, to)) return true; // Breadth-first search of the network Set<NetworkNode> visitedNodes = Sets.newHashSet(); visitedNodes.add(from); Set<NetworkNode> networkingNodesToTest = Sets.newHashSet(); listConnectedNotVisitedNetworkingNodes(visitedNodes, from, networkingNodesToTest); int distanceSearched = 1; while (distanceSearched < distance) { distanceSearched++; for (NetworkNode nodeToTest : networkingNodesToTest) { if (SimpleNetwork.areNodesConnecting(nodeToTest, to)) { distanceCache.put(cachePairKey, distanceSearched); return true; } visitedNodes.add(nodeToTest); } Set<NetworkNode> nextNetworkingNodesToTest = Sets.newHashSet(); for (NetworkNode nodeToTest : networkingNodesToTest) listConnectedNotVisitedNetworkingNodes(visitedNodes, nodeToTest, nextNetworkingNodesToTest); networkingNodesToTest = nextNetworkingNodesToTest; } return false; }
@NotNull private static Set<DeclarationDescriptor> getTopLevelDescriptorsByFqName( @NotNull KotlinCodeAnalyzer resolveSession, @NotNull FqName fqName) { FqName parentFqName = fqName.parent(); Set<DeclarationDescriptor> descriptors = new HashSet<DeclarationDescriptor>(); LazyPackageDescriptor parentFragment = resolveSession.getPackageFragment(parentFqName); if (parentFragment != null) { // Filter out extension properties descriptors.addAll( KotlinPackage.filter( parentFragment.getMemberScope().getProperties(fqName.shortName()), new Function1<VariableDescriptor, Boolean>() { @Override public Boolean invoke(VariableDescriptor descriptor) { return descriptor.getReceiverParameter() == null; } })); } ContainerUtil.addIfNotNull(descriptors, resolveSession.getPackageFragment(fqName)); descriptors.addAll(resolveSession.getTopLevelClassDescriptors(fqName)); return descriptors; }
final Set<UUID> getPlayers() { ImmutableSet.Builder<UUID> setBuilder = ImmutableSet.builder(); if (pool != null) { try (Jedis rsc = pool.getResource()) { List<String> keys = new ArrayList<>(); for (String i : getServerIds()) { keys.add("proxy:" + i + ":usersOnline"); } if (!keys.isEmpty()) { Set<String> users = rsc.sunion(keys.toArray(new String[keys.size()])); if (users != null && !users.isEmpty()) { for (String user : users) { try { setBuilder = setBuilder.add(UUID.fromString(user)); } catch (IllegalArgumentException ignored) { } } } } } catch (JedisConnectionException e) { // Redis server has disappeared! getLogger() .log( Level.SEVERE, "Unable to get connection from pool - did your Redis server go away?", e); throw new RuntimeException("Unable to get all players online", e); } } return setBuilder.build(); }
public Set<String> getIndexedClasses() { Set<String> classes = newHashSet(); Set<JavaClass> vertexSet = graph.vertexSet(); for (JavaClass each : vertexSet) { classes.add(each.getName()); } return classes; }
private void addMergedValues(@NotNull MergeInstruction instruction) { Set<PseudoValue> result = new LinkedHashSet<PseudoValue>(); for (PseudoValue value : instruction.getInputValues()) { result.addAll(getMergedValues(value)); result.add(value); } mergedValues.put(instruction.getOutputValue(), result); }
private void collectAndCacheReachableInstructions() { Set<Instruction> reachableInstructions = collectReachableInstructions(); for (Instruction instruction : mutableInstructionList) { if (reachableInstructions.contains(instruction)) { instructions.add(instruction); } } markDeadInstructions(); }
LifecycleTransaction( Tracker tracker, OperationType operationType, Iterable<SSTableReader> readers) { this.tracker = tracker; this.operationType = operationType; for (SSTableReader reader : readers) { originals.add(reader); marked.add(reader); identities.add(reader.instanceId); } }
/** * get all types scanned. this is effectively similar to getting all subtypes of Object. * * <p>depends on SubTypesScanner configured with {@code SubTypesScanner(false)}, otherwise {@code * ReflectionsException} is thrown * * <p><i>note using this might be a bad practice. it is better to get types matching some * criteria, such as {@link #getSubTypesOf(Class)} or {@link #getTypesAnnotatedWith(Class)}</i> * * @return Set of String, and not of Class, in order to avoid definition of all types in PermGen */ public Set<String> getAllTypes() { Set<String> allTypes = Sets.newHashSet(store.getAll(index(SubTypesScanner.class), Object.class.getName())); if (allTypes.isEmpty()) { throw new ReflectionsException( "Couldn't find subtypes of Object. " + "Make sure SubTypesScanner initialized to include Object class - new SubTypesScanner(false)"); } return allTypes; }
public PColumn newColumn(int position, ColumnDef def, PrimaryKeyConstraint pkConstraint) throws SQLException { try { Set<String> pkColumnNames = pkConstraint == null ? Collections.<String>emptySet() : pkConstraint.getColumnNames(); String columnName = def.getColumnDefName().getColumnName().getName(); PName familyName = null; if (def.isPK() && !pkColumnNames.isEmpty()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_ALREADY_EXISTS) .setColumnName(columnName) .build() .buildException(); } boolean isPK = def.isPK() || pkColumnNames.contains(columnName); if (def.getColumnDefName().getFamilyName() != null) { String family = def.getColumnDefName().getFamilyName().getName(); if (isPK) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.PRIMARY_KEY_WITH_FAMILY_NAME) .setColumnName(columnName) .setFamilyName(family) .build() .buildException(); } else if (!def.isNull()) { throw new SQLExceptionInfo.Builder(SQLExceptionCode.KEY_VALUE_NOT_NULL) .setColumnName(columnName) .setFamilyName(family) .build() .buildException(); } familyName = new PNameImpl(family); } else if (!isPK) { familyName = QueryConstants.DEFAULT_COLUMN_FAMILY_NAME; } ColumnModifier columnModifier = def.getColumnModifier(); if (pkConstraint != null && pkConstraint.getColumnModifier(columnName) != null) { columnModifier = pkConstraint.getColumnModifier(columnName); } PColumn column = new PColumnImpl( new PNameImpl(columnName), familyName, def.getDataType(), def.getMaxLength(), def.getScale(), def.isNull(), position, columnModifier); return column; } catch (IllegalArgumentException e) { // Based on precondition check in constructor throw new SQLException(e); } }
public Collection<InetAddress> pendingEndpointsFor(Token token, String keyspaceName) { Map<Range<Token>, Collection<InetAddress>> ranges = getPendingRanges(keyspaceName); if (ranges.isEmpty()) return Collections.emptyList(); Set<InetAddress> endpoints = new HashSet<InetAddress>(); for (Map.Entry<Range<Token>, Collection<InetAddress>> entry : ranges.entrySet()) { if (entry.getKey().contains(token)) endpoints.addAll(entry.getValue()); } return endpoints; }
/** remove the reader from the set we're modifying */ public void cancel(SSTableReader cancel) { logger.debug("Cancelling {} from transaction", cancel); assert originals.contains(cancel) : "may only cancel a reader in the 'original' set: " + cancel + " vs " + originals; assert !(staged.contains(cancel) || logged.contains(cancel)) : "may only cancel a reader that has not been updated or obsoleted in this transaction: " + cancel; originals.remove(cancel); marked.remove(cancel); maybeFail(unmarkCompacting(singleton(cancel), null)); }
private void markDeadInstructions() { Set<Instruction> instructionSet = Sets.newHashSet(instructions); for (Instruction instruction : mutableInstructionList) { if (!instructionSet.contains(instruction)) { ((InstructionImpl) instruction).setMarkedAsDead(true); for (Instruction nextInstruction : instruction.getNextInstructions()) { nextInstruction.getPreviousInstructions().remove(instruction); } } } }
@NotNull private static Set<LocalFunctionDeclarationInstruction> getLocalDeclarations( @NotNull Pseudocode pseudocode) { Set<LocalFunctionDeclarationInstruction> localDeclarations = Sets.newLinkedHashSet(); for (Instruction instruction : ((PseudocodeImpl) pseudocode).mutableInstructionList) { if (instruction instanceof LocalFunctionDeclarationInstruction) { localDeclarations.add((LocalFunctionDeclarationInstruction) instruction); localDeclarations.addAll( getLocalDeclarations(((LocalFunctionDeclarationInstruction) instruction).getBody())); } } return localDeclarations; }
/** * If no experiments were specified, inject into statisticsQuery a superset of all experiments for * which stats exist across all attributes * * @param statisticsQuery * @param statisticsStorage */ private static void setQueryExperiments( StatisticsQueryCondition statisticsQuery, StatisticsStorage statisticsStorage) { Set<ExperimentInfo> exps = statisticsQuery.getExperiments(); if (exps .isEmpty()) { // No experiments conditions were specified - assemble a superset of all // experiments for which stats exist across all attributes for (EfAttribute attr : statisticsQuery.getAttributes()) { Map<ExperimentInfo, ConciseSet> expsToStats = getStatisticsForAttribute(statisticsQuery.getStatisticsType(), attr, statisticsStorage); if (expsToStats != null) exps.addAll(expsToStats.keySet()); } statisticsQuery.inExperiments(exps); } }
/** * remove the provided readers from this Transaction, and return a new Transaction to manage them * only permitted to be called if the current Transaction has never been used */ public LifecycleTransaction split(Collection<SSTableReader> readers) { logger.debug("Splitting {} into new transaction", readers); checkUnused(); for (SSTableReader reader : readers) assert identities.contains(reader.instanceId) : "may only split the same reader instance the transaction was opened with: " + reader; for (SSTableReader reader : readers) { identities.remove(reader.instanceId); originals.remove(reader); marked.remove(reader); } return new LifecycleTransaction(tracker, operationType, readers); }
/** * update a reader: if !original, this is a reader that is being introduced by this transaction; * otherwise it must be in the originals() set, i.e. a reader guarded by this transaction */ public void update(SSTableReader reader, boolean original) { assert !staged.update.contains(reader) : "each reader may only be updated once per checkpoint: " + reader; assert !identities.contains(reader.instanceId) : "each reader instance may only be provided as an update once: " + reader; // check it isn't obsolete, and that it matches the original flag assert !(logged.obsolete.contains(reader) || staged.obsolete.contains(reader)) : "may not update a reader that has been obsoleted"; assert original == originals.contains(reader) : String.format( "the 'original' indicator was incorrect (%s provided): %s", original, reader); staged.update.add(reader); identities.add(reader.instanceId); if (!isOffline()) reader.setupKeyCache(); }
/** * Update token map with a set of token/endpoint pairs in normal state. * * <p>Prefer this whenever there are multiple pairs to update, as each update (whether a single or * multiple) is expensive (CASSANDRA-3831). * * @param endpointTokens */ public void updateNormalTokens(Multimap<InetAddress, Token> endpointTokens) { if (endpointTokens.isEmpty()) return; lock.writeLock().lock(); try { boolean shouldSortTokens = false; for (InetAddress endpoint : endpointTokens.keySet()) { Collection<Token> tokens = endpointTokens.get(endpoint); assert tokens != null && !tokens.isEmpty(); bootstrapTokens.removeValue(endpoint); tokenToEndpointMap.removeValue(endpoint); topology.addEndpoint(endpoint); leavingEndpoints.remove(endpoint); removeFromMoving(endpoint); // also removing this endpoint from moving for (Token token : tokens) { InetAddress prev = tokenToEndpointMap.put(token, endpoint); if (!endpoint.equals(prev)) { if (prev != null) logger.warn("Token {} changing ownership from {} to {}", token, prev, endpoint); shouldSortTokens = true; } } } if (shouldSortTokens) sortedTokens = sortTokens(); } finally { lock.writeLock().unlock(); } }
private Throwable checkpoint(Throwable accumulate) { if (logger.isDebugEnabled()) logger.debug("Checkpointing update:{}, obsolete:{}", staged.update, staged.obsolete); if (staged.isEmpty()) return accumulate; Set<SSTableReader> toUpdate = toUpdate(); Set<SSTableReader> fresh = copyOf(fresh()); // check the current versions of the readers we're replacing haven't somehow been replaced by // someone else checkNotReplaced(filterIn(toUpdate, staged.update)); // ensure any new readers are in the compacting set, since we aren't done with them yet // and don't want anyone else messing with them // apply atomically along with updating the live set of readers tracker.apply( compose(updateCompacting(emptySet(), fresh), updateLiveSet(toUpdate, staged.update))); // log the staged changes and our newly marked readers marked.addAll(fresh); logged.log(staged); // setup our tracker, and mark our prior versions replaced, also releasing our references to // them // we do not replace/release obsoleted readers, since we may need to restore them on rollback accumulate = setReplaced(filterOut(toUpdate, staged.obsolete), accumulate); accumulate = release(selfRefs(filterOut(toUpdate, staged.obsolete)), accumulate); staged.clear(); return accumulate; }
private void findParents(JavaClass jclass, Set<JavaClass> changedParents) { for (JavaClass parent : getParents(jclass)) { if (changedParents.add(parent)) { findParents(parent, changedParents); } } }
/** * mark this reader as for obsoletion. this does not actually obsolete the reader until commit() * is called, but on checkpoint() the reader will be removed from the live set */ public void obsolete(SSTableReader reader) { logger.debug("Staging for obsolescence {}", reader); // check this is: a reader guarded by the transaction, an instance we have already worked with // and that we haven't already obsoleted it, nor do we have other changes staged for it assert identities.contains(reader.instanceId) : "only reader instances that have previously been provided may be obsoleted: " + reader; assert originals.contains(reader) : "only readers in the 'original' set may be obsoleted: " + reader + " vs " + originals; assert !(logged.obsolete.contains(reader) || staged.obsolete.contains(reader)) : "may not obsolete a reader that has already been obsoleted: " + reader; assert !staged.update.contains(reader) : "may not obsolete a reader that has a staged update (must checkpoint first): " + reader; assert current(reader) == reader : "may only obsolete the latest version of the reader: " + reader; staged.obsolete.add(reader); }
/** * Builds an "Other Topics" cluster that groups those documents from <code>allDocument</code> that * were not referenced in any cluster in <code>clusters</code>. * * @param allDocuments all documents to check against * @param clusters list of clusters with assigned documents * @param label label for the "Other Topics" group * @return the "Other Topics" cluster */ public static Cluster buildOtherTopics( List<Document> allDocuments, List<Cluster> clusters, String label) { final Set<Document> unclusteredDocuments = Sets.newLinkedHashSet(allDocuments); final Set<Document> assignedDocuments = Sets.newHashSet(); for (Cluster cluster : clusters) { collectAllDocuments(cluster, assignedDocuments); } unclusteredDocuments.removeAll(assignedDocuments); final Cluster otherTopics = new Cluster(label); otherTopics.addDocuments(unclusteredDocuments); otherTopics.setOtherTopics(true); return otherTopics; }
public String toString() { StringBuilder sb = new StringBuilder(); lock.readLock().lock(); try { Set<InetAddress> eps = tokenToEndPointMap.inverse().keySet(); if (!eps.isEmpty()) { sb.append("Normal Tokens:"); sb.append(System.getProperty("line.separator")); for (InetAddress ep : eps) { sb.append(ep); sb.append(":"); sb.append(tokenToEndPointMap.inverse().get(ep)); sb.append(System.getProperty("line.separator")); } } if (!bootstrapTokens.isEmpty()) { sb.append("Bootstrapping Tokens:"); sb.append(System.getProperty("line.separator")); for (Map.Entry<Token, InetAddress> entry : bootstrapTokens.entrySet()) { sb.append(entry.getValue() + ":" + entry.getKey()); sb.append(System.getProperty("line.separator")); } } if (!leavingEndPoints.isEmpty()) { sb.append("Leaving EndPoints:"); sb.append(System.getProperty("line.separator")); for (InetAddress ep : leavingEndPoints) { sb.append(ep); sb.append(System.getProperty("line.separator")); } } if (!pendingRanges.isEmpty()) { sb.append("Pending Ranges:"); sb.append(System.getProperty("line.separator")); sb.append(printPendingRanges()); } } finally { lock.readLock().unlock(); } return sb.toString(); }
/** * Returns all the fields that match the given pattern. If the pattern is prefixed with a type * then the fields will be returned with a type prefix. */ public Set<String> simpleMatchToIndexNames(String pattern) { if (!Regex.isSimpleMatchPattern(pattern)) { return ImmutableSet.of(pattern); } int dotIndex = pattern.indexOf('.'); if (dotIndex != -1) { String possibleType = pattern.substring(0, dotIndex); DocumentMapper possibleDocMapper = mappers.get(possibleType); if (possibleDocMapper != null) { Set<String> typedFields = Sets.newHashSet(); for (String indexName : possibleDocMapper.mappers().simpleMatchToIndexNames(pattern)) { typedFields.add(possibleType + "." + indexName); } return typedFields; } } return fieldMappers.simpleMatchToIndexNames(pattern); }
private static void test() { String customerKey = "customer"; String productKey = "product"; IntStream.range(0, 5) .forEach( index -> customerCache.put( customerKey + index, new Customer(customerKey + index, "name" + index))); IntStream.range(0, 100) .forEach( index -> productCache.put( productKey + index, new Product(productKey + index, "category" + (index % 5)))); Random random = new Random(); Set<String> productIds = new HashSet<>(); int activeProductSize = productCache.size() / 10; for (int i = 0; i < 1000; i++) { Customer customer = customerCache.get(customerKey + random.nextInt(customerCache.size())); int randomSuffix = random.nextInt(activeProductSize); if (randomSuffix < 3) { randomSuffix = random.nextInt(productCache.size()); } Product product = productCache.get(productKey + randomSuffix); if (!productIds.contains(product.id)) { purchasesCache.put(customer.id, product.id); productIds.add(product.id); } } createProductAssociativityGraphPerCategory(); ProductRecommendationSystem recommendationSystem = new ProductRecommendationSystem(7); for (int i = 0; i < activeProductSize * 2; i++) { Customer customer = customerCache.get(customerKey + random.nextInt(customerCache.size())); Product product = productCache.get(productKey + random.nextInt(activeProductSize)); List<Product> recommendations = recommendationSystem.purchase(customer, product); System.out.printf("%s%n", recommendations); } }
/** * @param gcBefore * @return */ private List<SSTableReader> getNextBackgroundSSTables(final int gcBefore) { if (!isEnabled() || cfs.getSSTables().isEmpty()) return Collections.emptyList(); Set<SSTableReader> uncompacting = Sets.intersection(sstables, cfs.getUncompactingSSTables()); // Find fully expired SSTables. Those will be included no matter what. Set<SSTableReader> expired = CompactionController.getFullyExpiredSSTables( cfs, uncompacting, cfs.getOverlappingSSTables(uncompacting), gcBefore); Set<SSTableReader> candidates = Sets.newHashSet(filterSuspectSSTables(uncompacting)); List<SSTableReader> compactionCandidates = new ArrayList<>(getNextNonExpiredSSTables(Sets.difference(candidates, expired), gcBefore)); if (!expired.isEmpty()) { logger.debug("Including expired sstables: {}", expired); compactionCandidates.addAll(expired); } return compactionCandidates; }
public boolean isLeaving(InetAddress endpoint) { assert endpoint != null; lock.readLock().lock(); try { return leavingEndPoints.contains(endpoint); } finally { lock.readLock().unlock(); } }
public void addLeavingEndPoint(InetAddress endpoint) { assert endpoint != null; lock.writeLock().lock(); try { leavingEndPoints.add(endpoint); } finally { lock.writeLock().unlock(); } }