/** * This is the standard liveness calculation (Dragon Book, section 10.6). At each BB (and its * corresponding usage), we evaluate "in" using use and def. in = use U (out \ def) where out = U * succ.in, for all successors * * <p>this algorithm has been modified to treat catch blocks as occurring anywhere ie, that vars * defined in a try may never be set however, this only applies to vars that have been defined at * least once (ie, born) */ public boolean evalLiveIn(ArrayList<Usage> succUsage, ArrayList<Handler> handUsage) { BitSet out = new BitSet(nLocals); BitSet old_in = (BitSet) in.clone(); if (handUsage == null) handUsage = new ArrayList(); if (succUsage.size() == 0) { in = use; } else { // calculate out = U succ.in out = (BitSet) succUsage.get(0).in.clone(); for (int i = 1; i < succUsage.size(); i++) { out.or(succUsage.get(i).in); } // calc out \ def == out & ~def == ~(out | def) // unless a var has been def'd in all catch blocks, assume it may fail to def BitSet def1 = (BitSet) def.clone(); for (Handler handle : handUsage) def1.and(handle.catchBB.usage.def); def1.flip(0, nLocals); out.and(def1); for (Handler handler : handUsage) out.or(handler.catchBB.usage.use); // catch block vars may be def'd in this block, but we can't easily know if the // def has occurred before the throw // if the var has never been def'd (or was a parameter), then it can't be live out.and(born); out.or(use); in = out; } return !(in.equals(old_in)); }
/** * Returns an exact copy of the game. This may be used for forward searches such as minimax. The * copying is relatively efficient. * * @return the game */ public Game copy() { Game copy = new Game(); copy.seed = seed; copy.rnd = new Random(seed); copy.laberintoActua = laberintoActua; copy.pills = (BitSet) pills.clone(); copy.powerPills = (BitSet) powerPills.clone(); copy.indiceDeLaberinto = indiceDeLaberinto; copy.cuentaElLvl = cuentaElLvl; copy.tiempoLvlActual = tiempoLvlActual; copy.tiempoTotal = tiempoTotal; copy.score = score; copy.fastamasComerMultiplicador = fastamasComerMultiplicador; copy.juegoTerminado = juegoTerminado; copy.timeOfLastGlobalReversal = timeOfLastGlobalReversal; copy.pacmanFueComido = pacmanFueComido; copy.pastillaFueComida = pastillaFueComida; copy.pildoraPoderFueComida = pildoraPoderFueComida; copy.pacman = pacman.copy(); copy.fantasmaComido = new EnumMap<GHOST, Boolean>(GHOST.class); copy.fantasmas = new EnumMap<GHOST, Ghost>(GHOST.class); for (GHOST ghostType : GHOST.values()) { copy.fantasmas.put(ghostType, fantasmas.get(ghostType).copy()); copy.fantasmaComido.put(ghostType, fantasmaComido.get(ghostType)); } return copy; }
/** Variant of {@link #trimFields(RelNode, BitSet, Set)} for {@link SortRel}. */ public TrimResult trimFields(SortRel sort, BitSet fieldsUsed, Set<RelDataTypeField> extraFields) { final RelDataType rowType = sort.getRowType(); final int fieldCount = rowType.getFieldCount(); final RelCollation collation = sort.getCollation(); final RelNode input = sort.getChild(); // We use the fields used by the consumer, plus any fields used as sort // keys. BitSet inputFieldsUsed = (BitSet) fieldsUsed.clone(); for (RelFieldCollation field : collation.getFieldCollations()) { inputFieldsUsed.set(field.getFieldIndex()); } // Create input with trimmed columns. final Set<RelDataTypeField> inputExtraFields = Collections.emptySet(); TrimResult trimResult = trimChild(sort, input, inputFieldsUsed, inputExtraFields); RelNode newInput = trimResult.left; final Mapping inputMapping = trimResult.right; // If the input is unchanged, and we need to project all columns, // there's nothing we can do. if (newInput == input && inputMapping.isIdentity() && fieldsUsed.cardinality() == fieldCount) { return new TrimResult(sort, Mappings.createIdentity(fieldCount)); } final SortRel newSort = sort.copy(sort.getTraitSet(), newInput, RexUtil.apply(inputMapping, collation)); assert newSort.getClass() == sort.getClass(); // The result has the same mapping as the input gave us. Sometimes we // return fields that the consumer didn't ask for, because the filter // needs them for its condition. return new TrimResult(newSort, inputMapping); }
private CompositeModuleInterpreterActor(Factory factory) { super( factory.interpreterProperties, factory.stagingArea.getAnnotatedExecutionTrace(), factory.moduleId); module = factory.module; stagingArea = factory.stagingArea; interpreterPropsProvider = factory.interpreterPropsProvider; recomputedInPorts = factory.recomputedInPorts; requestedOutPorts = factory.requestedOutPorts; int numSubmodules = module.getModules().size(); childExecutors = new ActorRef[numSubmodules]; outPortsRequiringValue = (BitSet) requestedOutPorts.clone(); childActorMap = new HashMap<>(numSubmodules); dependencyGraph = new DependencyGraph(module, requestedOutPorts); for (InPortNode inPortNode : dependencyGraph.inPortNodes()) { HasValue hasValue = factory.inPortsHasValueList.get(inPortNode.getElement().getInIndex()); assert hasValue != HasValue.PENDING_VALUE_CHECK; inPortNode.setHasValue(hasValue); } inPortReceivedMessage = new BitSet(module.getInPorts().size()); submoduleOutPortsReceivedMessage = new BitSet[numSubmodules]; for (RuntimeModule submodule : module.getModules()) { submoduleOutPortsReceivedMessage[submodule.getIndex()] = new BitSet(submodule.getOutPorts().size()); } computeResumeState = new ComputeResumeState(dependencyGraph, recomputedInPorts, this::asynchronousHasValueCheck); }
static { URISave = new BitSet(256); int i; for (i = 'a'; i <= 'z'; i++) { URISave.set(i); } for (i = 'A'; i <= 'Z'; i++) { URISave.set(i); } for (i = '0'; i <= '9'; i++) { URISave.set(i); } URISave.set('-'); URISave.set('_'); URISave.set('.'); URISave.set('!'); URISave.set('~'); URISave.set('*'); URISave.set('\''); URISave.set('('); URISave.set(')'); URISaveEx = (BitSet) URISave.clone(); URISaveEx.set('/'); }
/** * Get the set of alternate scripts found in the identifiers. That is, when a character can be in * two scripts, then the set consisting of those scripts will be returned. * * @return the set of explicit scripts. * @internal * @deprecated This API is ICU internal only. */ @Deprecated public Set<BitSet> getAlternates() { Set<BitSet> result = new HashSet<BitSet>(); for (BitSet item : scriptSetSet) { result.add((BitSet) item.clone()); } return result; }
/** * evolve the born value a single iteration by mixing in either pred or combo * * @param pred if combo is null, use pred.born * @param combo if non-null, the value to mix in * @return true if the evolution resulted in a change in the born value */ boolean evalBornIn(Usage pred, BitSet combo) { BitSet old = (BitSet) born.clone(); if (combo == null) combo = pred.born; if (firstBorn) born.or(combo); else born.and(combo); firstBorn = false; return !old.equals(born); }
public synchronized void commit() { Object[] o = observers.toArray(); for (int i = 0; i < o.length; i++) { BitSet tmp = (BitSet) changeMap.clone(); ObserverEntry oe = (ObserverEntry) o[i]; tmp.and(oe.ids); if (tmp.length() != 0) oe.observer.update(this, oe.tag); } }
public void testItShouldNotFullyIntersectTheFingerprintDerivedFromASubstructure() { BitSet benzene = fingerprinter.getFingerprint(Molecules.createBenzene()); BitSet phenol = fingerprinter.getFingerprint(Molecules.createPhenol()); BitSet intersection = (BitSet) phenol.clone(); intersection.and(benzene); assertFalse(match(phenol, benzene)); }
/** Fetch unique identity for passed composition. */ int getIdentity(BitSet components) { Object[] bitsets = composition.getData(); int size = composition.size(); for (int i = NO_COMPONENTS; size > i; i++) { // want to start from 1 so that 0 can mean null if (components.equals(bitsets[i])) return i; } composition.add((BitSet) components.clone()); return size; }
/** * Update the piece availabilities for a given peer * * @param peerID String * @param has BitSet */ public synchronized void peerAvailability(String peerID, BitSet has) { this.peerAvailabilies.put(peerID, has); BitSet interest = (BitSet) (has.clone()); interest.andNot(this.isComplete); DownloadTask dt = this.task.get(peerID); if (dt != null) { if (interest.cardinality() > 0 && !dt.peer.isInteresting()) { dt.ms.addMessageToQueue(new Message_PP(PeerProtocol.INTERESTED, 2)); dt.peer.setInteresting(true); } } dt = null; }
@Override public final void filter(List<IRow> data, BitSet mask, BitSet mask_filteredOutInfluenceRanking) { if (!isFiltered()) return; if (!maskInvalid.isEmpty()) { BitSet todo = (BitSet) maskInvalid.clone(); todo.and(mask); updateMask(todo, data, this.mask); maskInvalid.andNot(todo); } mask.and(this.mask); if (!isRankIndependentFilter) // mark that the masked out are persistent mask_filteredOutInfluenceRanking.and(this.mask); }
// Creates an exact copy of the game public Game copy() { G copy = new G(); copy.pills = (BitSet) pills.clone(); copy.powerPills = (BitSet) powerPills.clone(); copy.curMaze = curMaze; copy.totLevel = totLevel; copy.levelTime = levelTime; copy.totalTime = totalTime; copy.score = score; copy.ghostEatMultiplier = ghostEatMultiplier; copy.gameOver = gameOver; copy.curPacManLoc = curPacManLoc; copy.lastPacManDir = lastPacManDir; copy.livesRemaining = livesRemaining; copy.extraLife = extraLife; copy.curGhostLocs = Arrays.copyOf(curGhostLocs, curGhostLocs.length); copy.lastGhostDirs = Arrays.copyOf(lastGhostDirs, lastGhostDirs.length); copy.edibleTimes = Arrays.copyOf(edibleTimes, edibleTimes.length); copy.lairTimes = Arrays.copyOf(lairTimes, lairTimes.length); return copy; }
// deep clone public CachedRow createClone() throws CloneNotSupportedException { CachedRow cr = (CachedRow) super.clone(); Object[] cd = new Object[columnData.length]; for (int i = 0; i < columnData.length; i++) { cd[i] = columnData[i]; } cr.columnData = cd; cr.isInsert = isInsert; cr.isDelete = isDelete; cr.isUpdate = isUpdate; cr.mask = (BitSet) mask.clone(); cr.nonUpdateable = nonUpdateable; // cr.originalColumnData return cr; }
static { RFC2396_UNRESERVED_CHARACTERS.set('a', 'z' + 1); RFC2396_UNRESERVED_CHARACTERS.set('A', 'Z' + 1); RFC2396_UNRESERVED_CHARACTERS.set('0', '9' + 1); RFC2396_UNRESERVED_CHARACTERS.set('-'); RFC2396_UNRESERVED_CHARACTERS.set('_'); RFC2396_UNRESERVED_CHARACTERS.set('.'); RFC2396_UNRESERVED_CHARACTERS.set('!'); RFC2396_UNRESERVED_CHARACTERS.set('~'); RFC2396_UNRESERVED_CHARACTERS.set('*'); RFC2396_UNRESERVED_CHARACTERS.set('\''); RFC2396_UNRESERVED_CHARACTERS.set('('); RFC2396_UNRESERVED_CHARACTERS.set(')'); RFC2396_UNRESERVED_WITH_SLASH_CHARACTERS = (BitSet) RFC2396_UNRESERVED_CHARACTERS.clone(); RFC2396_UNRESERVED_WITH_SLASH_CHARACTERS.set('/'); }
BitSet addClause(BooleanQuery bq, BitSet result) { final BitSet rnd = sets[r.nextInt(sets.length)]; Query q = new ConstantScoreQuery( new Filter() { @Override public DocIdSet getDocIdSet(IndexReader reader) { return new DocIdBitSet(rnd); }; }); bq.add(q, BooleanClause.Occur.MUST); if (validate) { if (result == null) result = (BitSet) rnd.clone(); else result.and(rnd); } return result; }
/** Variant of {@link #trimFields(RelNode, BitSet, Set)} for {@link FilterRel}. */ public TrimResult trimFields( FilterRel filter, BitSet fieldsUsed, Set<RelDataTypeField> extraFields) { final RelDataType rowType = filter.getRowType(); final int fieldCount = rowType.getFieldCount(); final RexNode conditionExpr = filter.getCondition(); final RelNode input = filter.getChild(); // We use the fields used by the consumer, plus any fields used in the // filter. BitSet inputFieldsUsed = (BitSet) fieldsUsed.clone(); final Set<RelDataTypeField> inputExtraFields = new LinkedHashSet<RelDataTypeField>(extraFields); RelOptUtil.InputFinder inputFinder = new RelOptUtil.InputFinder(inputFieldsUsed, inputExtraFields); conditionExpr.accept(inputFinder); // Create input with trimmed columns. TrimResult trimResult = trimChild(filter, input, inputFieldsUsed, inputExtraFields); RelNode newInput = trimResult.left; final Mapping inputMapping = trimResult.right; // If the input is unchanged, and we need to project all columns, // there's nothing we can do. if (newInput == input && fieldsUsed.cardinality() == fieldCount) { return new TrimResult(filter, Mappings.createIdentity(fieldCount)); } // Build new project expressions, and populate the mapping. final RexVisitor<RexNode> shuttle = new RexPermuteInputsShuttle(inputMapping, newInput); RexNode newConditionExpr = conditionExpr.accept(shuttle); final FilterRel newFilter = new FilterRel(filter.getCluster(), newInput, newConditionExpr); assert newFilter.getClass() == filter.getClass(); // The result has the same mapping as the input gave us. Sometimes we // return fields that the consumer didn't ask for, because the filter // needs them for its condition. return new TrimResult(newFilter, inputMapping); }
public List<TOE> next() { List<TOE> ret = new LinkedList<TOE>(); for (int i = 0; i < N; i++) for (int j = 0; j < M; j++) // if(G.get(i*M+j)) { BitSet Gnew = (BitSet) (G.clone()); int[] x = {0, 1, -1, 0, 0}; int[] y = {0, 0, 0, 1, -1}; for (int k = 0; k < 5; k++) { int xi = x[k]; int yi = y[k]; if ((i + xi) >= 0 && (i + xi) < N && (j + yi) >= 0 && (j + yi) < M) { Gnew.flip((i + xi) * M + (j + yi)); } } TOE to = new TOE(N, M, Gnew); to.mx = i; to.my = j; ret.add(to); } return ret; }
public boolean accepts(int index, Automaton automaton, Term value) { Automaton.State state = automaton.states[index]; if (visited.get(index)) { return false; } else if (state.kind != Type.K_UNION && state.kind != Type.K_NEGATION) { visited.clear(); } switch (state.kind) { case Type.K_ANY: return true; // easy case Type.K_VOID: return false; // easy case Type.K_LIST: case Type.K_SET: { if (value.kind != state.kind) { return false; } int child = automaton.states[index].children[0]; Term[] values = value.children; for (int i = 0; i != values.length; ++i) { Term vchild = values[i]; if (!accepts(child, automaton, vchild)) { return false; } } return true; } case Type.K_FUNCTION: case Type.K_METHOD: { int[] schildren = state.children; Term[] vchildren = value.children; if (schildren.length != vchildren.length) { return false; } int length = schildren.length; // First, do parameters (which are contravariant). for (int i = 2; i < length; ++i) { int schild = schildren[i]; Term vchild = vchildren[i]; if (accepts(schild, automaton, vchild)) { return false; } } // Second, do return values (which are covariant) if (!accepts(schildren[2], automaton, vchildren[2])) { return false; } // Third, do return values (which should be contra-variant) return true; } case Type.K_NEGATION: { int child = automaton.states[index].children[0]; visited.set(index); return !accepts(child, automaton, value); } case Type.K_UNION: { int[] children = automaton.states[index].children; visited.set(index); BitSet copy = visited; for (int child : children) { visited = (BitSet) copy.clone(); if (accepts(child, automaton, value)) { return true; } } copy.clear(); return false; } } return super.accepts(index, automaton, value); }
@Override public ImmutableBitmap difference(ImmutableBitmap otherBitmap) { WrappedBitSetBitmap retval = new WrappedBitSetBitmap((BitSet) bitmap.clone()); retval.andNot((WrappedBitSetBitmap) otherBitmap); return retval; }
@Override public ImmutableBitmap intersection(ImmutableBitmap otherBitmap) { WrappedBitSetBitmap retval = new WrappedBitSetBitmap((BitSet) bitmap.clone()); retval.and((WrappedBitSetBitmap) otherBitmap); return retval; }
/** * Searches the attribute subset space by best first search * * @param ASEval the attribute evaluator to guide the search * @param data the training instances. * @return an array (not necessarily ordered) of selected attribute indexes * @throws Exception if the search can't be completed */ public int[] search(ASEvaluation ASEval, Instances data) throws Exception { m_totalEvals = 0; if (!(ASEval instanceof SubsetEvaluator)) { throw new Exception(ASEval.getClass().getName() + " is not a " + "Subset evaluator!"); } if (ASEval instanceof UnsupervisedSubsetEvaluator) { m_hasClass = false; } else { m_hasClass = true; m_classIndex = data.classIndex(); } SubsetEvaluator ASEvaluator = (SubsetEvaluator) ASEval; m_numAttribs = data.numAttributes(); int i, j; int best_size = 0; int size = 0; int done; int sd = m_searchDirection; BitSet best_group, temp_group; int stale; double best_merit; double merit; boolean z; boolean added; Link2 tl; Hashtable lookup = new Hashtable(m_cacheSize * m_numAttribs); int insertCount = 0; int cacheHits = 0; LinkedList2 bfList = new LinkedList2(m_maxStale); best_merit = -Double.MAX_VALUE; stale = 0; best_group = new BitSet(m_numAttribs); m_startRange.setUpper(m_numAttribs - 1); if (!(getStartSet().equals(""))) { m_starting = m_startRange.getSelection(); } // If a starting subset has been supplied, then initialise the bitset if (m_starting != null) { for (i = 0; i < m_starting.length; i++) { if ((m_starting[i]) != m_classIndex) { best_group.set(m_starting[i]); } } best_size = m_starting.length; m_totalEvals++; } else { if (m_searchDirection == SELECTION_BACKWARD) { setStartSet("1-last"); m_starting = new int[m_numAttribs]; // init initial subset to all attributes for (i = 0, j = 0; i < m_numAttribs; i++) { if (i != m_classIndex) { best_group.set(i); m_starting[j++] = i; } } best_size = m_numAttribs - 1; m_totalEvals++; } } // evaluate the initial subset best_merit = ASEvaluator.evaluateSubset(best_group); // add the initial group to the list and the hash table Object[] best = new Object[1]; best[0] = best_group.clone(); bfList.addToList(best, best_merit); BitSet tt = (BitSet) best_group.clone(); String hashC = tt.toString(); lookup.put(hashC, new Double(best_merit)); while (stale < m_maxStale) { added = false; if (m_searchDirection == SELECTION_BIDIRECTIONAL) { // bi-directional search done = 2; sd = SELECTION_FORWARD; } else { done = 1; } // finished search? if (bfList.size() == 0) { stale = m_maxStale; break; } // copy the attribute set at the head of the list tl = bfList.getLinkAt(0); temp_group = (BitSet) (tl.getData()[0]); temp_group = (BitSet) temp_group.clone(); // remove the head of the list bfList.removeLinkAt(0); // count the number of bits set (attributes) int kk; for (kk = 0, size = 0; kk < m_numAttribs; kk++) { if (temp_group.get(kk)) { size++; } } do { for (i = 0; i < m_numAttribs; i++) { if (sd == SELECTION_FORWARD) { z = ((i != m_classIndex) && (!temp_group.get(i))); } else { z = ((i != m_classIndex) && (temp_group.get(i))); } if (z) { // set the bit (attribute to add/delete) if (sd == SELECTION_FORWARD) { temp_group.set(i); size++; } else { temp_group.clear(i); size--; } /* if this subset has been seen before, then it is already in the list (or has been fully expanded) */ tt = (BitSet) temp_group.clone(); hashC = tt.toString(); if (lookup.containsKey(hashC) == false) { merit = ASEvaluator.evaluateSubset(temp_group); m_totalEvals++; // insert this one in the hashtable if (insertCount > m_cacheSize * m_numAttribs) { lookup = new Hashtable(m_cacheSize * m_numAttribs); insertCount = 0; } hashC = tt.toString(); lookup.put(hashC, new Double(merit)); insertCount++; } else { merit = ((Double) lookup.get(hashC)).doubleValue(); cacheHits++; } // insert this one in the list Object[] add = new Object[1]; add[0] = tt.clone(); bfList.addToList(add, merit); if (m_debug) { System.out.print("Group: "); printGroup(tt, m_numAttribs); System.out.println("Merit: " + merit); } // is this better than the best? if (sd == SELECTION_FORWARD) { z = ((merit - best_merit) > 0.00001); } else { if (merit == best_merit) { z = (size < best_size); } else { z = (merit > best_merit); } } if (z) { added = true; stale = 0; best_merit = merit; // best_size = (size + best_size); best_size = size; best_group = (BitSet) (temp_group.clone()); } // unset this addition(deletion) if (sd == SELECTION_FORWARD) { temp_group.clear(i); size--; } else { temp_group.set(i); size++; } } } if (done == 2) { sd = SELECTION_BACKWARD; } done--; } while (done > 0); /* if we haven't added a new attribute subset then full expansion of this node hasen't resulted in anything better */ if (!added) { stale++; } } m_bestMerit = best_merit; return attributeList(best_group); }
/** * Run the LAPIN algorithm * * @param input the input file path * @param minsupRel the minsup threshold as a percentage */ private void lapin(String input, double minsupRel) throws IOException { if (DEBUG) { System.out.println( "=== First database scan to count number of sequences and support of single items ==="); } // FIRST DATABASE SCAN: SCAN THE DATABASE TO COUNT // - THE NUMBER OF SEQUENCES // - THE SUPPORT OF EACH SINGLE ITEM // - THE LARGEST ITEM ID int sequenceCount = 0; int largestItemID = 0; // This map will store for each item (key) the first position where the item appears in each // sequence where it appears (value) Map<Integer, List<Position>> mapItemFirstOccurrences = new HashMap<Integer, List<Position>>(); try { // Read the input file BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(new File(input)))); String thisLine; // for each sequence of the input fiel while ((thisLine = reader.readLine()) != null) { // we use a set to remember which item have been seen already Set<Integer> itemsAlreadySeen = new HashSet<Integer>(); // to know the itemset number short itemsetID = 0; // for each token in this line for (String integer : thisLine.split(" ")) { // if it is the end of an itemset if ("-1".equals(integer)) { itemsetID++; } else if ("-2".equals(integer)) { // if it is the end of line // nothing to do here } else { // otherwise, it is an item Integer item = Integer.valueOf(integer); // if this item was not seen already in that sequence if (itemsAlreadySeen.contains(item) == false) { // Get the list of positions of that item List<Position> list = mapItemFirstOccurrences.get(item); // if that list is null, create a new list if (list == null) { list = new ArrayList<Position>(); mapItemFirstOccurrences.put(item, list); } // Add the position of the item in that sequence to the list of first positions // of that item Position position = new Position(sequenceCount, itemsetID); list.add(position); // Remember that we have seen this item itemsAlreadySeen.add(item); // Check if the item is the largest item until now if (item > largestItemID) { largestItemID = item; } } } } // Increase the count of sequences from the input file sequenceCount++; } reader.close(); } catch (Exception e) { e.printStackTrace(); } ; // Initialize the list of tables tables = new Table[sequenceCount]; // Calculate absolute minimum support as a number of sequences minsup = (int) Math.ceil(minsupRel * sequenceCount); if (minsup == 0) { minsup = 1; } if (DEBUG) { System.out.println("Number of items: " + mapItemFirstOccurrences.size()); System.out.println("Sequence count: " + sequenceCount); System.out.println("Abs. minsup: " + minsup + " sequences"); System.out.println("Rel. minsup: " + minsupRel + " %"); System.out.println("=== Determining the frequent items ==="); } // // For each frequent item, save it and add it to the list of frequent items List<Integer> frequentItems = new ArrayList<Integer>(); for (Entry<Integer, List<Position>> entry : mapItemFirstOccurrences.entrySet()) { // Get the border created by this item List<Position> itemBorder = entry.getValue(); // if the item is frequent if (itemBorder.size() >= minsup) { // Output the item and add it to the list of frequent items Integer item = entry.getKey(); savePattern(item, itemBorder.size()); frequentItems.add(item); if (DEBUG) { System.out.println(" Item " + item + " is frequent with support = " + itemBorder.size()); } } } if (DEBUG) { System.out.println("=== Second database scan to construct item-is-exist tables ==="); } // sort the frequent items (useful when generating 2-IE-sequences, later on). Collections.sort(frequentItems); // SECOND DATABASE SCAN: // Now we will read the database again to create the Item-is-exist-table // and SE-position-lists and count support of 2-IE-sequences matrixPairCount = new SparseTriangularMatrix(largestItemID + 1); // Initialise the IE position lists and SE position lists sePositionList = new SEPositionList[sequenceCount]; iePositionList = new IEPositionList[sequenceCount]; try { // Prepare to read the file BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(new File(input)))); String thisLine; // For each sequence in the file int currentSequenceID = 0; while ((thisLine = reader.readLine()) != null) { // (1) ------- PARSE THE SEQUENCE BACKWARD TO CREATE THE ITEM-IS-EXIST TABLE FOR THATS // SEQUENCE // AND COUNT THE SUPPORT OF 2-IE-Sequences // We will also use a structure to remember in which sequence we have seen each pair of // items // Note that in this structure, we will add +1 to the sid because by default the matrix is // filled with 0 // and we don't want to think that the first sequence was already seen for all pairs. AbstractTriangularMatrix matrixPairLastSeenInSID = new SparseTriangularMatrix(largestItemID + 1); // We count the number of positions (number of itemsets). // To do that we count the number of "-" symbols in the file. // We need to subtract 1 because the end of line "-2" contains "-". int positionCount = -1; for (char caracter : thisLine.toCharArray()) { if (caracter == '-') { positionCount++; } } // Now we will scan the sequence again. // This time we will remember which item were seen already Set<Integer> itemsAlreadySeen = new HashSet<Integer>(); // During this scan, we will create the table for this sequence Table table = new Table(); // To do that, we first create an initial position vector for that table BitSet currentBitset = new BitSet(mapItemFirstOccurrences.size()); // OK ? // This variable will be used to remember if a new item appeared in the current itemset boolean seenNewItem = false; // We will scan the sequence backward, starting from the end because // we should not create a bit vector for all positions but for only // the positions that are different from the previous one. String[] tokens = thisLine.split(" "); // This is the number of itemsets int currentPosition = positionCount; // to keep the current itemset in memory List<Integer> currentItemset = new ArrayList<Integer>(); // For each token in that sequence for (int i = tokens.length - 1; i >= 0; i--) { // get the token String token = tokens[i]; // if we reached the end of an itemset if ("-1".equals(token)) { // update the triangular matrix for counting 2-IE-sequences // by comparing each pairs of items in the current itemset for (int k = 0; k < currentItemset.size(); k++) { Integer item1 = currentItemset.get(k); for (int m = k + 1; m < currentItemset.size(); m++) { Integer item2 = currentItemset.get(m); // if that pair is frequent int sid = matrixPairLastSeenInSID.getSupportForItems(item1, item2); // and if we have not seen this sequence yet if (sid != currentSequenceID + 1) { // increment support count of this pair matrixPairCount.incrementCount(item1, item2); // remember that we have seen this pair so that we don't count it again matrixPairLastSeenInSID.setSupport(item1, item2, currentSequenceID + 1); } } } currentItemset.clear(); // Decrease the current index of the position (itemset) in the sequence currentPosition--; // if the bit vector has changed since previous position, then // we need to add a new bit vector to the table if (seenNewItem) { // create the position vector and add it to the item-is-exist table PositionVector vector = new PositionVector(currentPosition, (BitSet) currentBitset.clone()); table.add(vector); } } else if ("-2".equals(token)) { // if end of sequence, nothing to do } else { // otherwise, it is an item Integer item = Integer.valueOf(token); if (mapItemFirstOccurrences.get(item).size() >= minsup) { // only for frequent items // if first time that we see this item if (itemsAlreadySeen.contains(item) == false) { // remember that we have seen a new item seenNewItem = true; // remember that we have seen this item itemsAlreadySeen.add(item); // add this item to the current bit vector currentBitset.set(item); } // add this item to the current itemset currentItemset.add(item); } } } // Lastly, // update the triangular matrix for counting 2-IE-sequences one more time // for the case where the pair is in first position of the sequence // by considering each pair of items in the last itemset. // This is done like it was done above, so I will not comment this part of the code again. for (int k = 0; k < currentItemset.size(); k++) { Integer item1 = currentItemset.get(k); for (int m = k + 1; m < currentItemset.size(); m++) { Integer item2 = currentItemset.get(m); // if th int sid = matrixPairLastSeenInSID.getSupportForItems(item1, item2); if (sid != currentSequenceID + 1) { matrixPairCount.incrementCount(item1, item2); matrixPairLastSeenInSID.setSupport(item1, item2, currentSequenceID + 1); } } } // If a new item was seen // Add an extra row to the item-is-exist table that will be called -1 with all items in // this sequence if (seenNewItem) { PositionVector vector = new PositionVector(-1, (BitSet) currentBitset.clone()); table.add(vector); } // // // // Initialize the IE lists and SE lists for that sequence // which will be filled with the next database scan. sePositionList[currentSequenceID] = new SEPositionList(itemsAlreadySeen); iePositionList[currentSequenceID] = new IEPositionList(); if (DEBUG) { System.out.println("Table for sequence " + currentSequenceID + " : " + thisLine); System.out.println(table.toString()); } // put the current table in the array of item-is-exist-tables tables[currentSequenceID] = table; // we will process the next sequence id currentSequenceID++; } reader.close(); } catch (Exception e) { e.printStackTrace(); } // THIRD SCAN TO // PARSE THE SEQUENCE FORWARD TO CREATE THE SE-POSITION LIST OF THAT SEQUENCE // AND IEPositionList for frequent 2-IE-SEQUENCES try { BufferedReader reader = new BufferedReader(new InputStreamReader(new FileInputStream(new File(input)))); String thisLine; // For each sequence int currentSequenceID = 0; while ((thisLine = reader.readLine()) != null) { // We will scan the sequence backward, starting from the end. String[] tokens = thisLine.split(" "); // to keep the current itemset in memory List<Integer> currentItemset = new ArrayList<Integer>(); // this variable will be used to remember which itemset we are visiting short itemsetID = 0; // empty the object to track the current itemset (if it was used for the previous sequence) currentItemset.clear(); // for each token of the current sequence for (int i = 0; i < tokens.length; i++) { String token = tokens[i]; // if we reached the end of an itemset if ("-1".equals(token)) { // if the current itemset contains more than one item if (currentItemset.size() > 1) { // update the position list for 2-IE-sequences for (int k = 0; k < currentItemset.size(); k++) { Integer item1 = currentItemset.get(k); for (int m = k + 1; m < currentItemset.size(); m++) { Integer item2 = currentItemset.get(m); // if the pair is frequent int support = matrixPairCount.getSupportForItems(item1, item2); if (support >= minsup) { iePositionList[currentSequenceID].register(item1, item2, itemsetID); } } } } // increase itemsetID itemsetID++; // clear itemset currentItemset.clear(); } else if ("-2".equals(token)) { // if the end of a sequence, nothing special to do } else { // otherwise, the current token is an item Integer item = Integer.valueOf(token); // if the item is frequent if (mapItemFirstOccurrences.get(item).size() >= minsup) { // we add the current position to the item SE-position list sePositionList[currentSequenceID].register(item, itemsetID); // we add the item to the current itemset currentItemset.add(item); } } } if (DEBUG) { System.out.println("SE Position list for sequence " + currentSequenceID); System.out.println(sePositionList[currentSequenceID]); System.out.println("IE Position list for sequence " + currentSequenceID); System.out.println(iePositionList[currentSequenceID]); } iePositionList[currentSequenceID].sort(); // sort the IE-position list // update the sequence id for the next sequence currentSequenceID++; } reader.close(); } catch (Exception e) { e.printStackTrace(); } if (DEBUG) { System.out.println("=== Starting sequential pattern generation ==="); } // For each frequent item, call the recursive method to explore larger patterns for (int i = 0; i < frequentItems.size(); i++) { // Get the item int item1 = frequentItems.get(i); // Get the border for that item List<Position> item1Border = mapItemFirstOccurrences.get(item1); if (DEBUG) { System.out.println("=== Considering item " + item1); System.out.println(" Border of " + item1); for (Position pos : item1Border) { System.out.println(" seq: " + pos.sid + " itemset: " + pos.position); } } // if the border contains at least minsup sequence (if the item is frequent) if (item1Border.size() >= minsup) { // Create an object prefix to represent the sequential pattern containing the item Prefix prefix = new Prefix(); List<Integer> itemset = new ArrayList<Integer>(1); itemset.add(item1); prefix.itemsets.add(itemset); // make a recursive call to find s-extensions of this prefix genPatterns( prefix, item1Border, frequentItems, frequentItems, item1, true); // true, to disallow I-extension because we explore 2-IE sequences separately } // For each frequent 2-IE sequences stating with item1, we will explore 2-IE sequences // by considering each frequent item larger than item1 for (int k = i + 1; k < frequentItems.size(); k++) { // We consider item2 int item2 = frequentItems.get(k); // Get the support of item1, item2 int support = matrixPairCount.getSupportForItems(item1, item2); // if the pair {item1, item2} is frequent if (support >= minsup) { // get the list of position of item2 List<Position> item2Border = mapItemFirstOccurrences.get(item2); // Create the border by using the 2-IE position list List<Position> ie12Border = new ArrayList<Position>(); // We will loop over the border of item1 or item2 (the smallest one) List<Position> borderToUse; if (item2Border.size() < item1Border.size()) { borderToUse = item2Border; } else { borderToUse = item1Border; } // For each sequence of the border that we consider for (Position sequenceToUse : borderToUse) { // Get the sequence id int sid = sequenceToUse.sid; // For this sequence, we will get the position list of each item List<Short> listPosition1 = sePositionList[sid].getListForItem(item1); List<Short> listPosition2 = sePositionList[sid].getListForItem(item2); // if one of them is null, that means that both item1 and item2 do not appear in that // sequence // so we continue to the next sequence if (listPosition1 == null || listPosition2 == null) { continue; } // otherwise // find the first common position of item1 and item2 in the sequence int index1 = 0; int index2 = 0; // we do that by the following while loop while (index1 < listPosition1.size() && index2 < listPosition2.size()) { short position1 = listPosition1.get(index1); short position2 = listPosition2.get(index2); if (position1 < position2) { index1++; } else if (position1 > position2) { index2++; } else { // we have found the position, so we add it to the new border and // then stop because we do not want to add more than one position for // the same sequence in the new border ie12Border.add(new Position(sid, position1)); break; } } } if (DEBUG) { System.out.println( "=== Considering the 2-IE sequence {" + item1 + "," + item2 + "} with support " + support); System.out.println(" Border of {" + item1 + "," + item2 + "}"); for (Position pos : ie12Border) { System.out.println(" seq: " + pos.sid + " itemset: " + pos.position); } } // finally, we create the prefix for the pattern {item1, item2} Prefix prefix = new Prefix(); List<Integer> itemset = new ArrayList<Integer>(2); itemset.add(item1); itemset.add(item2); prefix.itemsets.add(itemset); // save the pattern savePattern(prefix, support); // perform recursive call to extend that pattern genPatterns( prefix, ie12Border, frequentItems, frequentItems, item2, false); // false, to allow I-extension } } } // Record the maximum memory usage MemoryLogger.getInstance().checkMemory(); writer.close(); }
@Override public BinaryImage clone() { return new BinaryImage((BitSet) bits_.clone(), width_); }
/** Variant of {@link #trimFields(RelNode, BitSet, Set)} for {@link JoinRel}. */ public TrimResult trimFields(JoinRel join, BitSet fieldsUsed, Set<RelDataTypeField> extraFields) { final RelDataType rowType = join.getRowType(); final int fieldCount = rowType.getFieldCount(); final RexNode conditionExpr = join.getCondition(); final int systemFieldCount = join.getSystemFieldList().size(); // Add in fields used in the condition. BitSet fieldsUsedPlus = (BitSet) fieldsUsed.clone(); final Set<RelDataTypeField> combinedInputExtraFields = new LinkedHashSet<RelDataTypeField>(extraFields); RelOptUtil.InputFinder inputFinder = new RelOptUtil.InputFinder(fieldsUsedPlus, combinedInputExtraFields); conditionExpr.accept(inputFinder); // If no system fields are used, we can remove them. int systemFieldUsedCount = 0; for (int i = 0; i < systemFieldCount; ++i) { if (fieldsUsed.get(i)) { ++systemFieldUsedCount; } } final int newSystemFieldCount; if (systemFieldUsedCount == 0) { newSystemFieldCount = 0; } else { newSystemFieldCount = systemFieldCount; } int offset = systemFieldCount; int changeCount = 0; int newFieldCount = newSystemFieldCount; List<RelNode> newInputs = new ArrayList<RelNode>(2); List<Mapping> inputMappings = new ArrayList<Mapping>(); List<Integer> inputExtraFieldCounts = new ArrayList<Integer>(); for (RelNode input : join.getInputs()) { final RelDataType inputRowType = input.getRowType(); final int inputFieldCount = inputRowType.getFieldCount(); // Compute required mapping. BitSet inputFieldsUsed = new BitSet(inputFieldCount); for (int bit : Util.toIter(fieldsUsedPlus)) { if (bit >= offset && bit < offset + inputFieldCount) { inputFieldsUsed.set(bit - offset); } } // If there are system fields, we automatically use the // corresponding field in each input. if (newSystemFieldCount > 0) { // calling with newSystemFieldCount == 0 should be safe but hits // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6222207 inputFieldsUsed.set(0, newSystemFieldCount); } // FIXME: We ought to collect extra fields for each input // individually. For now, we assume that just one input has // on-demand fields. Set<RelDataTypeField> inputExtraFields = input.getRowType().getField("_extra") == null ? Collections.<RelDataTypeField>emptySet() : combinedInputExtraFields; inputExtraFieldCounts.add(inputExtraFields.size()); TrimResult trimResult = trimChild(join, input, inputFieldsUsed, inputExtraFields); newInputs.add(trimResult.left); if (trimResult.left != input) { ++changeCount; } final Mapping inputMapping = trimResult.right; inputMappings.add(inputMapping); // Move offset to point to start of next input. offset += inputFieldCount; newFieldCount += inputMapping.getTargetCount() + inputExtraFields.size(); } Mapping mapping = Mappings.create(MappingType.InverseSurjection, fieldCount, newFieldCount); for (int i = 0; i < newSystemFieldCount; ++i) { mapping.set(i, i); } offset = systemFieldCount; int newOffset = newSystemFieldCount; for (int i = 0; i < inputMappings.size(); i++) { Mapping inputMapping = inputMappings.get(i); for (IntPair pair : inputMapping) { mapping.set(pair.source + offset, pair.target + newOffset); } offset += inputMapping.getSourceCount(); newOffset += inputMapping.getTargetCount() + inputExtraFieldCounts.get(i); } if (changeCount == 0 && mapping.isIdentity()) { return new TrimResult(join, Mappings.createIdentity(fieldCount)); } // Build new join. final RexVisitor<RexNode> shuttle = new RexPermuteInputsShuttle(mapping, newInputs.get(0), newInputs.get(1)); RexNode newConditionExpr = conditionExpr.accept(shuttle); final JoinRel newJoin = join.copy(join.getTraitSet(), newConditionExpr, newInputs.get(0), newInputs.get(1)); return new TrimResult(newJoin, mapping); }
/** * Convenience function to obtain a value from a data structure. * * @param map - the data structure. * @param key - the key for the said data structure. * @return the bitset contained at map.get(key) or an empty bitset */ private <K> BitSet get(Map<K, BitSet> map, K key) { BitSet set = map.get(key); if (set == null) return new BitSet(); return (BitSet) set.clone(); }
/** * Get the scripts found in the identifiers. * * @return the set of explicit scripts. * @internal * @deprecated This API is ICU internal only. */ @Deprecated public BitSet getScripts() { return (BitSet) requiredScripts.clone(); }
/** * Compute reachability/until probabilities. i.e. compute the min/max probability of reaching a * state in {@code target}, while remaining in those in @{code remain}. * * @param stpg The STPG * @param remain Remain in these states (optional: null means "all") * @param target Target states * @param min1 Min or max probabilities for player 1 (true=lower bound, false=upper bound) * @param min2 Min or max probabilities for player 2 (true=min, false=max) * @param init Optionally, an initial solution vector (may be overwritten) * @param known Optionally, a set of states for which the exact answer is known Note: if 'known' * is specified (i.e. is non-null, 'init' must also be given and is used for the exact values. */ public ModelCheckerResult computeReachProbs( STPG stpg, BitSet remain, BitSet target, boolean min1, boolean min2, double init[], BitSet known) throws PrismException { ModelCheckerResult res = null; BitSet no, yes; int i, n, numYes, numNo; long timer, timerProb0, timerProb1; boolean genAdv; // Check for some unsupported combinations if (solnMethod == SolnMethod.VALUE_ITERATION && valIterDir == ValIterDir.ABOVE && !(precomp && prob0)) { throw new PrismException( "Precomputation (Prob0) must be enabled for value iteration from above"); } // Are we generating an optimal adversary? genAdv = exportAdv; // Start probabilistic reachability timer = System.currentTimeMillis(); if (verbosity >= 1) mainLog.println("\nStarting probabilistic reachability..."); // Check for deadlocks in non-target state (because breaks e.g. prob1) stpg.checkForDeadlocks(target); // Store num states n = stpg.getNumStates(); // Optimise by enlarging target set (if more info is available) if (init != null && known != null) { BitSet targetNew = new BitSet(n); for (i = 0; i < n; i++) { targetNew.set(i, target.get(i) || (known.get(i) && init[i] == 1.0)); } target = targetNew; } // Precomputation timerProb0 = System.currentTimeMillis(); if (precomp && prob0) { no = prob0(stpg, remain, target, min1, min2); } else { no = new BitSet(); } timerProb0 = System.currentTimeMillis() - timerProb0; timerProb1 = System.currentTimeMillis(); if (precomp && prob1 && !genAdv) { yes = prob1(stpg, remain, target, min1, min2); } else { yes = (BitSet) target.clone(); } timerProb1 = System.currentTimeMillis() - timerProb1; // Print results of precomputation numYes = yes.cardinality(); numNo = no.cardinality(); if (verbosity >= 1) mainLog.println( "target=" + target.cardinality() + ", yes=" + numYes + ", no=" + numNo + ", maybe=" + (n - (numYes + numNo))); // Compute probabilities switch (solnMethod) { case VALUE_ITERATION: res = computeReachProbsValIter(stpg, no, yes, min1, min2, init, known); break; case GAUSS_SEIDEL: res = computeReachProbsGaussSeidel(stpg, no, yes, min1, min2, init, known); break; default: throw new PrismException("Unknown STPG solution method " + solnMethod); } // Finished probabilistic reachability timer = System.currentTimeMillis() - timer; if (verbosity >= 1) mainLog.println("Probabilistic reachability took " + timer / 1000.0 + " seconds."); // Update time taken res.timeTaken = timer / 1000.0; res.timeProb0 = timerProb0 / 1000.0; res.timePre = (timerProb0 + timerProb1) / 1000.0; return res; }
private BitSetCover calculateCoverRecursively( int indexNextCandidate, BitSet visited, double accumulatedWeight) { // Check memoization table if (memo.containsKey(visited)) { return memo.get(visited).copy(); // Cache hit } // Find the next unvisited vertex WITH neighbors (if a vertex has no neighbors, then we don't // need to select it // because it doesn't cover any edges) int indexNextVertex = -1; Set<V> neighbors = Collections.emptySet(); for (int index = visited.nextClearBit(indexNextCandidate); index >= 0 && index < N; index = visited.nextClearBit(index + 1)) { neighbors = new LinkedHashSet<>(neighborIndex.neighborsOf(vertices.get(index))); for (Iterator<V> it = neighbors.iterator(); it.hasNext(); ) // Exclude all visited vertices if (visited.get(vertexIDDictionary.get(it.next()))) it.remove(); if (!neighbors.isEmpty()) { indexNextVertex = index; break; } } // Base case 1: all vertices have been visited if (indexNextVertex == -1) { // We've visited all vertices, return the base case BitSetCover vertexCover = new BitSetCover(N, 0); if (accumulatedWeight <= upperBoundOnVertexCoverWeight) { // Found new a solution that matches our bound. // Tighten the bound. upperBoundOnVertexCoverWeight = accumulatedWeight - 1; } return vertexCover; // Base case 2 (pruning): this vertex cover can never be better than the best cover we already // have. Return a cover with a large weight, such that the other branch will be preferred over // this branch. } else if (accumulatedWeight >= upperBoundOnVertexCoverWeight) { return new BitSetCover(N, N); } // Recursion // TODO JK: Can we use a lower bound or estimation which of these 2 branches produces a better // solution? If one of them is more likely to produce a better solution, // then that branch should be explored first! Futhermore, if the lower bound+accumulated cost > // upperBoundOnVertexCoverWeight, then we may prune. // Create 2 branches (N(v) denotes the set of neighbors of v. G_{v} indicates the graph obtained // by removing vertex v and all vertices incident to it.): // Right branch (N(v) are added to the cover, and we solve for G_{N(v) \cup v }.): BitSet visitedRightBranch = (BitSet) visited.clone(); visitedRightBranch.set(indexNextVertex); for (V v : neighbors) visitedRightBranch.set(vertexIDDictionary.get(v)); double weight = this.getWeight(neighbors); BitSetCover rightCover = calculateCoverRecursively( indexNextVertex + 1, visitedRightBranch, accumulatedWeight + weight); rightCover.addAllVertices( neighbors.stream().mapToInt(vertexIDDictionary::get).boxed().collect(Collectors.toList()), weight); // Left branch (vertex v is added to the cover, and we solve for G_{v}): BitSet visitedLeftBranch = (BitSet) visited.clone(); visitedLeftBranch.set(indexNextVertex); weight = vertexWeightMap.get(vertices.get(indexNextVertex)); BitSetCover leftCover = calculateCoverRecursively( indexNextVertex + 1, visitedLeftBranch, accumulatedWeight + weight); leftCover.addVertex(indexNextVertex, weight); // Delayed update of the left cover // Return the best branch if (leftCover.weight <= rightCover.weight) { memo.put(visited, leftCover.copy()); return leftCover; } else { memo.put(visited, rightCover.copy()); return rightCover; } }
/** * Find out which scripts are in common among the alternates. * * @return the set of scripts that are in common among the alternates. * @internal * @deprecated This API is ICU internal only. */ @Deprecated public BitSet getCommonAmongAlternates() { return (BitSet) commonAmongAlternates.clone(); }