public int doNestedTermConjunctions( IndexSearcher s, int termsInIndex, int maxOuterClauses, int maxClauses, int iter) throws IOException { int ret = 0; long nMatches = 0; for (int i = 0; i < iter; i++) { int oClauses = r.nextInt(maxOuterClauses - 1) + 2; BooleanQuery oq = new BooleanQuery(); for (int o = 0; o < oClauses; o++) { int nClauses = r.nextInt(maxClauses - 1) + 2; // min 2 clauses BooleanQuery bq = new BooleanQuery(); BitSet termflag = new BitSet(termsInIndex); for (int j = 0; j < nClauses; j++) { int tnum; // don't pick same clause twice tnum = r.nextInt(termsInIndex); if (termflag.get(tnum)) tnum = termflag.nextClearBit(tnum); if (tnum < 0 || tnum >= 25) tnum = termflag.nextClearBit(0); termflag.set(tnum); Query tq = new TermQuery(terms[tnum]); bq.add(tq, BooleanClause.Occur.MUST); } // inner oq.add(bq, BooleanClause.Occur.MUST); } // outer CountingHitCollector hc = new CountingHitCollector(); s.search(oq, hc); nMatches += hc.getCount(); ret += hc.getSum(); } System.out.println("Average number of matches=" + (nMatches / iter)); return ret; }
public void saveToXml(Element ele) { Document doc = ele.getOwnerDocument(); ele.setAttribute("id", String.valueOf(partNumber)); SaveUtil.appendChildTextElement(ele, "title", String.valueOf(title)); SaveUtil.appendChildTextElement(ele, "instrument", String.valueOf(instrument)); for (int t = 0; t < getTrackCount(); t++) { if (!isTrackEnabled(t)) continue; TrackInfo trackInfo = abcSong.getSequenceInfo().getTrackInfo(t); Element trackEle = (Element) ele.appendChild(doc.createElement("track")); trackEle.setAttribute("id", String.valueOf(t)); if (trackInfo.hasName()) trackEle.setAttribute("name", trackInfo.getName()); if (trackTranspose[t] != 0) SaveUtil.appendChildTextElement(trackEle, "transpose", String.valueOf(trackTranspose[t])); if (trackVolumeAdjust[t] != 0) SaveUtil.appendChildTextElement( trackEle, "volumeAdjust", String.valueOf(trackVolumeAdjust[t])); if (instrument.isPercussion) { BitSet[] enabledSetByTrack = isCowbellPart() ? cowbellsEnabled : drumsEnabled; BitSet enabledSet = (enabledSetByTrack == null) ? null : enabledSetByTrack[t]; if (enabledSet != null) { Element drumsEnabledEle = ele.getOwnerDocument().createElement("drumsEnabled"); trackEle.appendChild(drumsEnabledEle); if (isCowbellPart()) { drumsEnabledEle.setAttribute("defaultEnabled", String.valueOf(false)); // Only store the drums that are enabled for (int i = enabledSet.nextSetBit(0); i >= 0; i = enabledSet.nextSetBit(i + 1)) { Element drumEle = ele.getOwnerDocument().createElement("note"); drumsEnabledEle.appendChild(drumEle); drumEle.setAttribute("id", String.valueOf(i)); drumEle.setAttribute("isEnabled", String.valueOf(true)); } } else { drumsEnabledEle.setAttribute("defaultEnabled", String.valueOf(true)); // Only store the drums that are disabled for (int i = enabledSet.nextClearBit(0); i >= 0; i = enabledSet.nextClearBit(i + 1)) { if (i >= MidiConstants.NOTE_COUNT) break; Element drumEle = ele.getOwnerDocument().createElement("note"); drumsEnabledEle.appendChild(drumEle); drumEle.setAttribute("id", String.valueOf(i)); drumEle.setAttribute("isEnabled", String.valueOf(false)); } } } if (!isCowbellPart()) { if (drumNoteMap[t] != null) drumNoteMap[t].saveToXml((Element) trackEle.appendChild(doc.createElement("drumMap"))); } } } }
/** * Splits events of a row if they overlap an island. Islands are areas between the token which are * included in the result. * * @param row * @param graph * @param text * @param startTokenIndex token index of the first token in the match * @param endTokenIndex token index of the last token in the match */ private static void splitRowsOnIslands( Row row, final SDocumentGraph graph, STextualDS text, long startTokenIndex, long endTokenIndex) { BitSet tokenCoverage = new BitSet(); // get the sorted token List<SToken> sortedTokenList = graph.getSortedTokenByText(); // add all token belonging to the right text to the bit set ListIterator<SToken> itToken = sortedTokenList.listIterator(); while (itToken.hasNext()) { SToken t = itToken.next(); if (text == null || text == CommonHelper.getTextualDSForNode(t, graph)) { RelannisNodeFeature feat = (RelannisNodeFeature) t.getFeature(ANNIS_NS, FEAT_RELANNIS_NODE).getValue(); long tokenIndexRaw = feat.getTokenIndex(); tokenIndexRaw = clip(tokenIndexRaw, startTokenIndex, endTokenIndex); int tokenIndex = (int) (tokenIndexRaw - startTokenIndex); tokenCoverage.set(tokenIndex); } } ListIterator<GridEvent> itEvents = row.getEvents().listIterator(); while (itEvents.hasNext()) { GridEvent event = itEvents.next(); BitSet eventBitSet = new BitSet(); eventBitSet.set(event.getLeft(), event.getRight() + 1); // restrict event bitset on the locations where token are present eventBitSet.and(tokenCoverage); // if there is is any 0 bit before the right border there is a break in the event // and we need to split it if (eventBitSet.nextClearBit(event.getLeft()) <= event.getRight()) { // remove the original event row.removeEvent(itEvents); // The event bitset now marks all the locations which the event should // cover. // Make a list of new events for each connected range in the bitset int subElement = 0; int offset = eventBitSet.nextSetBit(0); while (offset >= 0) { int end = eventBitSet.nextClearBit(offset) - 1; if (offset < end) { GridEvent newEvent = new GridEvent(event); newEvent.setId(event.getId() + "_islandsplit_" + subElement++); newEvent.setLeft(offset); newEvent.setRight(end); row.addEvent(itEvents, newEvent); } offset = eventBitSet.nextSetBit(end + 1); } } // end if we need to split } }
/** * Remove the occurrence of a given value in a copied slice of array defined by the array part * from [begin, begin+length). * * @param values the input array * @param begin start index of the array to include * @param length number of elements to include from begin * @param removedValue the value to be removed from the sliced array * @return the copy of the sliced array after removing the removedValue */ private static double[] removeAndSlice( final double[] values, final int begin, final int length, final double removedValue) { MathArrays.verifyValues(values, begin, length); final double[] temp; // BitSet(length) to indicate where the removedValue is located final BitSet bits = new BitSet(length); for (int i = begin; i < begin + length; i++) { if (Precision.equalsIncludingNaN(removedValue, values[i])) { bits.set(i - begin); } } // Check if empty then create a new copy if (bits.isEmpty()) { temp = copyOf(values, begin, length); // Nothing removed, just copy } else if (bits.cardinality() == length) { temp = new double[0]; // All removed, just empty } else { // Some removable, so new temp = new double[length - bits.cardinality()]; int start = begin; // start index from source array (i.e values) int dest = 0; // dest index in destination array(i.e temp) int nextOne = -1; // nextOne is the index of bit set of next one int bitSetPtr = 0; // bitSetPtr is start index pointer of bitset while ((nextOne = bits.nextSetBit(bitSetPtr)) != -1) { final int lengthToCopy = nextOne - bitSetPtr; System.arraycopy(values, start, temp, dest, lengthToCopy); dest += lengthToCopy; start = begin + (bitSetPtr = bits.nextClearBit(nextOne)); } // Copy any residue past start index till begin+length if (start < begin + length) { System.arraycopy(values, start, temp, dest, begin + length - start); } } return temp; }
@Test public void testBitSetNextClearBit() throws Exception { BitSet bit = new BitSet(); bit.set(0); int position = bit.nextClearBit(0); assertThat(position, is(1)); }
private int registerItem(Item item, String name, int idHint) { if (item instanceof ItemBlock) // ItemBlock, adjust id and clear the slot already occupied by the corresponding // block { Block block = ((ItemBlock) item).field_150939_a; int id = iBlockRegistry.getId(block); if (id == -1) // ItemBlock before its Block { if (idHint < 0 || availabilityMap.get(idHint) || idHint > MAX_BLOCK_ID) // non-suitable id, allocate one in the block id range, add would // use the item id range otherwise { id = availabilityMap.nextClearBit( MIN_BLOCK_ID); // find suitable id here, iItemRegistry would search from // MIN_ITEM_ID if (id > MAX_BLOCK_ID) throw new RuntimeException( String.format("Invalid id %d - maximum id range exceeded.", id)); FMLLog.fine( "Allocated id %d for ItemBlock %s in the block id range, original id requested: %d.", id, name, idHint); } else // idHint is suitable without changes { id = idHint; } } else // ItemBlock after its Block { FMLLog.fine( "Found matching Block %s for ItemBlock %s at id %d, original id requested: %d", block, item, id, idHint); freeSlot( id, item); // temporarily free the slot occupied by the Block for the item registration } idHint = id; } int itemId = iItemRegistry.add(idHint, name, item, availabilityMap); if (item instanceof ItemBlock) // verify { if (itemId != idHint) throw new IllegalStateException( String.format("ItemBlock at block id %d insertion failed, got id %d.", idHint, itemId)); verifyItemBlockName((ItemBlock) item); } // block the Block Registry slot with the same id useSlot(itemId); return itemId; }
/** * Return the next free dimension ID. Note: you are not guaranteed a contiguous block of free ids. * Always call for each individual ID you wish to get. * * @return the next free dimension ID */ public static int getNextFreeDimId() { int next = 0; while (true) { next = dimensionMap.nextClearBit(next); if (dimensions.containsKey(next)) { dimensionMap.set(next); } else { return next; } } }
@Override public int nextId() { try { lock.lock(); int id; if (nextMinId == Integer.MIN_VALUE) { id = Integer.MIN_VALUE; } else { id = idList.nextClearBit(nextMinId); } if (id == Integer.MIN_VALUE) { throw new IllegalStateException("All id's are used"); } idList.set(id); nextMinId = id + 1; return id; } finally { lock.unlock(); } }
/** * Allocate a block of memory that will be tracked in the MemoryManager's page table; this is * intended for allocating large blocks of memory that will be shared between operators. */ public MemoryBlock allocatePage(long size) { if (size > MAXIMUM_PAGE_SIZE_BYTES) { throw new IllegalArgumentException( "Cannot allocate a page with more than " + MAXIMUM_PAGE_SIZE_BYTES + " bytes"); } final int pageNumber; synchronized (this) { pageNumber = allocatedPages.nextClearBit(0); if (pageNumber >= PAGE_TABLE_SIZE) { throw new IllegalStateException( "Have already allocated a maximum of " + PAGE_TABLE_SIZE + " pages"); } allocatedPages.set(pageNumber); } final MemoryBlock page = executorMemoryManager.allocate(size); page.pageNumber = pageNumber; pageTable[pageNumber] = page; if (logger.isTraceEnabled()) { logger.trace("Allocate page number {} ({} bytes)", pageNumber, size); } return page; }
@Nullable private Range calcContinousRange(final BitSet mask) { int lowestByte = mask.nextSetBit(0); int highestByte; if (lowestByte >= 0) { highestByte = mask.nextClearBit(lowestByte); if (highestByte > 0) { int nextChunk = mask.nextSetBit(highestByte); if (nextChunk < 0) { myContinuousRange.start = lowestByte; myContinuousRange.end = highestByte; return myContinuousRange; } else { return null; } } else { myContinuousRange.start = lowestByte; myContinuousRange.end = PAGE_SIZE; return myContinuousRange; } } else { return null; } }
@Override Val apply(Env env, Env.StackHelp stk, AST asts[]) { Frame fr = stk.track(asts[1].exec(env)).getFrame(); Frame returningFrame; long nrows = fr.numRows(); if (asts[2] instanceof ASTNumList) { final ASTNumList nums = (ASTNumList) asts[2]; long[] rows = nums._isList ? nums.expand8Sort() : null; if (rows != null) { if (rows.length == 0) { // Empty inclusion list? } else if (rows[0] >= 0) { // Positive (inclusion) list if (rows[rows.length - 1] > nrows) throw new IllegalArgumentException("Row must be an integer from 0 to " + (nrows - 1)); } else { // Negative (exclusion) list // Invert the list to make a positive list, ignoring out-of-bounds values BitSet bs = new BitSet((int) nrows); for (int i = 0; i < rows.length; i++) { int idx = (int) (-rows[i] - 1); // The positive index if (idx >= 0 && idx < nrows) bs.set(idx); // Set column to EXCLUDE } rows = new long[(int) nrows - bs.cardinality()]; for (int i = bs.nextClearBit(0), j = 0; i < nrows; i = bs.nextClearBit(i + 1)) rows[j++] = i; } } final long[] ls = rows; returningFrame = new MRTask() { @Override public void map(Chunk[] cs, NewChunk[] ncs) { if (nums.cnt() == 0) return; long start = cs[0].start(); long end = start + cs[0]._len; long min = ls == null ? (long) nums.min() : ls[0], max = ls == null ? (long) nums.max() - 1 : ls[ls.length - 1]; // exclusive max to inclusive max when stride == 1 // [ start, ..., end ] the chunk // 1 [] nums out left: nums.max() < start // 2 [] nums out rite: nums.min() > end // 3 [ nums ] nums run left: nums.min() < start && nums.max() <= // end // 4 [ nums ] nums run in : start <= nums.min() && nums.max() <= // end // 5 [ nums ] nums run rite: start <= nums.min() && end < // nums.max() if (!(max < start || min > end)) { // not situation 1 or 2 above long startOffset = (min > start ? min : start); // situation 4 and 5 => min > start; for (int i = (int) (startOffset - start); i < cs[0]._len; ++i) { if ((ls == null && nums.has(start + i)) || (ls != null && Arrays.binarySearch(ls, start + i) >= 0)) { for (int c = 0; c < cs.length; ++c) { if (cs[c] instanceof CStrChunk) ncs[c].addStr(cs[c], i); else if (cs[c] instanceof C16Chunk) ncs[c].addUUID(cs[c], i); else if (cs[c].isNA(i)) ncs[c].addNA(); else ncs[c].addNum(cs[c].atd(i)); } } } } } }.doAll(fr.types(), fr).outputFrame(fr.names(), fr.domains()); } else if ((asts[2] instanceof ASTNum)) { long[] rows = new long[] {(long) (((ASTNum) asts[2])._v.getNum())}; returningFrame = fr.deepSlice(rows, null); } else if ((asts[2] instanceof ASTExec) || (asts[2] instanceof ASTId)) { Frame predVec = stk.track(asts[2].exec(env)).getFrame(); if (predVec.numCols() != 1) throw new IllegalArgumentException( "Conditional Row Slicing Expression evaluated to " + predVec.numCols() + " columns. Must be a boolean Vec."); returningFrame = fr.deepSlice(predVec, null); } else throw new IllegalArgumentException( "Row slicing requires a number-list as the last argument, but found a " + asts[2].getClass()); return new ValFrame(returningFrame); }
public static void removeEmptySpace( LinkedHashMap<String, ArrayList<Row>> rowsByAnnotation, Row tokenRow) { List<Range<Integer>> gaps = new LinkedList<>(); BitSet totalOccupancyGrid = new BitSet(); for (Map.Entry<String, ArrayList<Row>> layer : rowsByAnnotation.entrySet()) { for (Row r : layer.getValue()) { totalOccupancyGrid.or(r.getOccupancyGridCopy()); } } // We always include the token row in the occupancy grid since it is not // a gap. Otherwise empty token would trigger gaps if the token list // is included in the visualizer output. // See https://github.com/korpling/ANNIS/issues/281 for the corresponding // bug report. if (tokenRow != null) { totalOccupancyGrid.or(tokenRow.getOccupancyGridCopy()); } // The Range class can give us the next bit that is not set. Use this // to detect gaps. A gap starts from the next non-set bit and goes to // the next set bit. Range<Integer> gap = Range.closed(-1, totalOccupancyGrid.nextSetBit(0)); while (true) { int gapStart = totalOccupancyGrid.nextClearBit(gap.upperEndpoint() + 1); int gapEnd = totalOccupancyGrid.nextSetBit(gapStart); if (gapEnd <= 0) { break; } gap = Range.closed(gapStart, gapEnd - 1); gaps.add(gap); } int gapID = 0; int totalOffset = 0; for (Range<Integer> gRaw : gaps) { // adjust the space range itself Range<Integer> g = Range.closed(gRaw.lowerEndpoint() - totalOffset, gRaw.upperEndpoint() - totalOffset); int offset = g.upperEndpoint() - g.lowerEndpoint(); totalOffset += offset; for (Entry<String, ArrayList<Row>> rowEntry : rowsByAnnotation.entrySet()) { ArrayList<Row> rows = rowEntry.getValue(); for (Row r : rows) { List<GridEvent> eventsCopy = new LinkedList<>(r.getEvents()); for (GridEvent e : eventsCopy) { if (e.getLeft() >= g.upperEndpoint()) { r.removeEvent(e); e.setLeft(e.getLeft() - offset); e.setRight(e.getRight() - offset); r.addEvent(e); } } // add a special space event String spaceCaption = ""; if ("tok".equalsIgnoreCase(rowEntry.getKey())) { spaceCaption = "(...)"; } GridEvent spaceEvent = new GridEvent("gap-" + gapID, g.lowerEndpoint(), g.lowerEndpoint(), spaceCaption); spaceEvent.setSpace(true); r.addEvent(spaceEvent); gapID++; } } } }
synchronized void put(long hash, K key, V value, boolean ifPresent, boolean ifAbsent) { // search for the previous entry int h = smallMap.startSearch(hash); boolean foundSmall = false, foundLarge = false; while (true) { int pos = smallMap.nextPos(); if (pos < 0) { K key2 = key instanceof CharSequence ? (K) key.toString() : key; final DirectStore store = map.get(key2); if (store == null) { if (ifPresent && !ifAbsent) return; break; } if (ifAbsent) return; bytes.storePositionAndSize(store, 0, store.size()); foundLarge = true; break; } else { bytes.storePositionAndSize(store, pos * smallEntrySize, smallEntrySize); K key2 = getKey(); if (equals(key, key2)) { if (ifAbsent && !ifPresent) return; foundSmall = true; break; } } } tmpBytes.clear(); if (csKey) //noinspection ConstantConditions tmpBytes.writeUTFΔ((CharSequence) key); else tmpBytes.writeObject(key); long startOfValuePos = tmpBytes.position(); if (bytesMarshallable) ((BytesMarshallable) value).writeMarshallable(tmpBytes); else tmpBytes.writeObject(value); long size = tmpBytes.position(); if (size <= smallEntrySize) { if (foundSmall) { bytes.position(0); bytes.write(tmpBytes, 0, size); return; } else if (foundLarge) { remove(hash, key); } // look for a free spot. int position = h & (entriesPerSegment - 1); int free = usedSet.nextClearBit(position); if (free >= entriesPerSegment) free = usedSet.nextClearBit(0); if (free < entriesPerSegment) { bytes.storePositionAndSize(store, free * smallEntrySize, smallEntrySize); bytes.write(tmpBytes, 0, size); smallMap.put(h, free); usedSet.set(free); this.size++; return; } } if (foundSmall) { remove(hash, key); } else if (foundLarge) { // can it be reused. if (bytes.capacity() <= size || bytes.capacity() - size < (size >> 3)) { bytes.write(tmpBytes, startOfValuePos, size); return; } remove(hash, key); } size = size - startOfValuePos; DirectStore store = new DirectStore(bmf, size); bytes.storePositionAndSize(store, 0, size); bytes.write(tmpBytes, startOfValuePos, size); K key2 = key instanceof CharSequence ? (K) key.toString() : key; map.put(key2, store); offHeapUsed += size; this.size++; }
private BitSetCover calculateCoverRecursively( int indexNextCandidate, BitSet visited, double accumulatedWeight) { // Check memoization table if (memo.containsKey(visited)) { return memo.get(visited).copy(); // Cache hit } // Find the next unvisited vertex WITH neighbors (if a vertex has no neighbors, then we don't // need to select it // because it doesn't cover any edges) int indexNextVertex = -1; Set<V> neighbors = Collections.emptySet(); for (int index = visited.nextClearBit(indexNextCandidate); index >= 0 && index < N; index = visited.nextClearBit(index + 1)) { neighbors = new LinkedHashSet<>(neighborIndex.neighborsOf(vertices.get(index))); for (Iterator<V> it = neighbors.iterator(); it.hasNext(); ) // Exclude all visited vertices if (visited.get(vertexIDDictionary.get(it.next()))) it.remove(); if (!neighbors.isEmpty()) { indexNextVertex = index; break; } } // Base case 1: all vertices have been visited if (indexNextVertex == -1) { // We've visited all vertices, return the base case BitSetCover vertexCover = new BitSetCover(N, 0); if (accumulatedWeight <= upperBoundOnVertexCoverWeight) { // Found new a solution that matches our bound. // Tighten the bound. upperBoundOnVertexCoverWeight = accumulatedWeight - 1; } return vertexCover; // Base case 2 (pruning): this vertex cover can never be better than the best cover we already // have. Return a cover with a large weight, such that the other branch will be preferred over // this branch. } else if (accumulatedWeight >= upperBoundOnVertexCoverWeight) { return new BitSetCover(N, N); } // Recursion // TODO JK: Can we use a lower bound or estimation which of these 2 branches produces a better // solution? If one of them is more likely to produce a better solution, // then that branch should be explored first! Futhermore, if the lower bound+accumulated cost > // upperBoundOnVertexCoverWeight, then we may prune. // Create 2 branches (N(v) denotes the set of neighbors of v. G_{v} indicates the graph obtained // by removing vertex v and all vertices incident to it.): // Right branch (N(v) are added to the cover, and we solve for G_{N(v) \cup v }.): BitSet visitedRightBranch = (BitSet) visited.clone(); visitedRightBranch.set(indexNextVertex); for (V v : neighbors) visitedRightBranch.set(vertexIDDictionary.get(v)); double weight = this.getWeight(neighbors); BitSetCover rightCover = calculateCoverRecursively( indexNextVertex + 1, visitedRightBranch, accumulatedWeight + weight); rightCover.addAllVertices( neighbors.stream().mapToInt(vertexIDDictionary::get).boxed().collect(Collectors.toList()), weight); // Left branch (vertex v is added to the cover, and we solve for G_{v}): BitSet visitedLeftBranch = (BitSet) visited.clone(); visitedLeftBranch.set(indexNextVertex); weight = vertexWeightMap.get(vertices.get(indexNextVertex)); BitSetCover leftCover = calculateCoverRecursively( indexNextVertex + 1, visitedLeftBranch, accumulatedWeight + weight); leftCover.addVertex(indexNextVertex, weight); // Delayed update of the left cover // Return the best branch if (leftCover.weight <= rightCover.weight) { memo.put(visited, leftCover.copy()); return leftCover; } else { memo.put(visited, rightCover.copy()); return rightCover; } }
int firstZero(BitSet t) { return t.nextClearBit(0); }