/** * Returns statistical information about the sequence. * * <p>In the presence of multiple threads or processes accessing an active sequence, the * information returned by this method may be out-of-date. * * <p>The getStats method cannot be transaction-protected. For this reason, it should be called in * a thread of control that has no open cursors or active transactions. * * @param config The statistics returned; if null, default statistics are returned. * @return Sequence statistics. * @throws SequenceIntegrityException if the sequence record has been deleted. */ public SequenceStats getStats(StatsConfig config) throws DatabaseException { if (config == null) { config = StatsConfig.DEFAULT; } if (!config.getFast()) { /* * storedValue may have been updated by another handle since it * was last read by this handle. Fetch the last written value. * READ_UNCOMMITTED must be used to avoid lock conflicts. */ Cursor cursor = db.openCursor(null, null); try { readDataRequired(cursor, LockMode.READ_UNCOMMITTED); } finally { cursor.close(); } } StatGroup stats = new StatGroup(SequenceStatDefinition.GROUP_NAME, SequenceStatDefinition.GROUP_DESC); new IntStat(stats, SEQUENCE_GETS, nGets); new IntStat(stats, SEQUENCE_CACHED_GETS, nCachedGets); new IntStat(stats, SEQUENCE_CACHE_SIZE, cacheSize); new LongStat(stats, SEQUENCE_STORED_VALUE, storedValue); new LongStat(stats, SEQUENCE_CACHE_VALUE, cacheValue); new LongStat(stats, SEQUENCE_CACHE_LAST, cacheLast); new LongStat(stats, SEQUENCE_RANGE_MIN, rangeMin); new LongStat(stats, SEQUENCE_RANGE_MAX, rangeMax); SequenceStats seqStats = new SequenceStats(stats); if (config.getClear()) { nGets = 0; nCachedGets = 0; } return seqStats; }
/** * Opens a sequence handle, adding the sequence record if appropriate. * * @throws IllegalArgumentException via Database.openSequence. * @throws IllegalStateException via Database.openSequence. */ Sequence(Database db, Transaction txn, DatabaseEntry key, SequenceConfig config) throws SequenceNotFoundException, SequenceExistsException { if (db.getDatabaseImpl().getSortedDuplicates()) { throw new UnsupportedOperationException( "Sequences not supported in databases configured for " + "duplicates"); } SequenceConfig useConfig = (config != null) ? config : SequenceConfig.DEFAULT; if (useConfig.getRangeMin() >= useConfig.getRangeMax()) { throw new IllegalArgumentException("Minimum sequence value must be less than the maximum"); } if (useConfig.getInitialValue() > useConfig.getRangeMax() || useConfig.getInitialValue() < useConfig.getRangeMin()) { throw new IllegalArgumentException("Initial sequence value is out of range"); } if (useConfig.getRangeMin() > useConfig.getRangeMax() - useConfig.getCacheSize()) { throw new IllegalArgumentException("The cache size is larger than the sequence range"); } if (useConfig.getAutoCommitNoSync()) { autoCommitConfig = DbInternal.getDefaultTxnConfig(db.getEnvironment()).clone(); autoCommitConfig.overrideDurability(Durability.COMMIT_NO_SYNC); } else { /* Use the environment's default transaction config. */ autoCommitConfig = null; } this.db = db; this.key = copyEntry(key); logger = db.getEnvironment().getEnvironmentImpl().getLogger(); /* Perform an auto-commit transaction to create the sequence. */ Locker locker = null; Cursor cursor = null; OperationStatus status = OperationStatus.NOTFOUND; try { locker = LockerFactory.getReadableLocker( db.getEnvironment(), txn, db.isTransactional(), false /*retainNonTxnLocks*/, false /*readCommitedIsolation*/); cursor = new Cursor(db, locker, null); boolean sequenceExists = readData(cursor, null); boolean isWritableLocker = !db.getConfig().getTransactional() || (locker.isTransactional() && !DbInternal.getEnvironmentImpl(db.getEnvironment()).isReplicated()); if (sequenceExists) { if (useConfig.getAllowCreate() && useConfig.getExclusiveCreate()) { throw new SequenceExistsException( "ExclusiveCreate=true and the sequence record " + "already exists."); } } else { if (useConfig.getAllowCreate()) { if (!isWritableLocker) { if (cursor != null) { cursor.close(); } locker.operationEnd(OperationStatus.SUCCESS); locker = LockerFactory.getWritableLocker( db.getEnvironment(), txn, db.isTransactional(), false, db.getDatabaseImpl().isReplicated(), autoCommitConfig); cursor = new Cursor(db, locker, null); } /* Get the persistent fields from the config. */ rangeMin = useConfig.getRangeMin(); rangeMax = useConfig.getRangeMax(); increment = !useConfig.getDecrement(); wrapAllowed = useConfig.getWrap(); storedValue = useConfig.getInitialValue(); /* * To avoid dependence on SerializableIsolation, try * putNoOverwrite first. If it fails, then try to get an * existing record. */ status = cursor.putNoOverwrite(key, makeData()); if (!readData(cursor, null)) { /* A retry loop should be performed here. */ throw new IllegalStateException("Sequence record removed during openSequence."); } status = OperationStatus.SUCCESS; } else { throw new SequenceNotFoundException( "AllowCreate=false and the sequence record " + "does not exist."); } } } finally { if (cursor != null) { cursor.close(); } if (locker != null) { locker.operationEnd(status); } } /* * cacheLast is initialized such that the cache will be considered * empty the first time get() is called. */ cacheSize = useConfig.getCacheSize(); cacheValue = storedValue; cacheLast = increment ? (storedValue - 1) : (storedValue + 1); }
/** * Returns the next available element in the sequence and changes the sequence value by <code> * delta</code>. The value of <code>delta</code> must be greater than zero. If there are enough * cached values in the sequence handle then they will be returned. Otherwise the next value will * be fetched from the database and incremented (decremented) by enough to cover the <code>delta * </code> and the next batch of cached values. * * <p>This method is synchronized to protect updating of the cached value, since multiple threads * may share a single handle. Multiple handles for the same database/key may be used to increase * concurrency. * * <p>The <code>txn</code> handle must be null if the sequence handle was opened with a non-zero * cache size. * * <p>For maximum concurrency, a non-zero cache size should be specified prior to opening the * sequence handle, the <code>txn</code> handle should be <code>null</code>, and {@link * com.sleepycat.je.SequenceConfig#setAutoCommitNoSync SequenceConfig.setAutoCommitNoSync} should * be called to disable log flushes. * * @param txn For a transactional database, an explicit transaction may be specified, or null may * be specified to use auto-commit. For a non-transactional database, null must be specified. * @param delta the amount by which to increment or decrement the sequence * @return the next available element in the sequence * @throws SequenceOverflowException if the end of the sequence is reached and wrapping is not * configured. * @throws SequenceIntegrityException if the sequence record has been deleted. * @throws OperationFailureException if one of the <a * href="../je/OperationFailureException.html#writeFailures">Write Operation Failures</a> * occurs. * @throws EnvironmentFailureException if an unexpected, internal or environment-wide failure * occurs. * @throws IllegalArgumentException if the delta is less than or equal to zero, or larger than the * size of the sequence's range. */ public synchronized long get(Transaction txn, int delta) throws DatabaseException { /* Check parameters, being careful of overflow. */ if (delta <= 0) { throw new IllegalArgumentException("Sequence delta must be greater than zero"); } if (rangeMin > rangeMax - delta) { throw new IllegalArgumentException("Sequence delta is larger than the range"); } /* Status variables for tracing. */ boolean cached = true; boolean wrapped = false; /* * Determine whether we have exceeded the cache. The cache size is * always <= Integer.MAX_VALUE, so we don't have to worry about * overflow here as long as we subtract the two long values first. */ if ((increment && delta > ((cacheLast - cacheValue) + 1)) || (!increment && delta > ((cacheValue - cacheLast) + 1))) { cached = false; /* * We need to allocate delta or cacheSize values, whichever is * larger, by incrementing or decrementing the stored value by * adjust. */ int adjust = (delta > cacheSize) ? delta : cacheSize; /* Perform an auto-commit transaction to update the sequence. */ Locker locker = null; Cursor cursor = null; OperationStatus status = OperationStatus.NOTFOUND; try { locker = LockerFactory.getWritableLocker( db.getEnvironment(), txn, db.isTransactional(), false, // retainNonTxnLocks db.getDatabaseImpl().isReplicated(), // autoTxnIsReplicated autoCommitConfig); cursor = new Cursor(db, locker, null); /* Get the existing record. */ readDataRequired(cursor, LockMode.RMW); /* If we would have wrapped when not allowed, overflow. */ if (overflow) { throw new SequenceOverflowException("Sequence overflow " + storedValue); } /* * Handle wrapping. The range size can be larger than a long * can hold, so to avoid arithmetic overflow we use BigInteger * arithmetic. Since we are going to write, the BigInteger * overhead is acceptable. */ BigInteger availBig; if (increment) { /* Available amount: rangeMax - storedValue */ availBig = BigInteger.valueOf(rangeMax).subtract(BigInteger.valueOf(storedValue)); } else { /* Available amount: storedValue - rangeMin */ availBig = BigInteger.valueOf(storedValue).subtract(BigInteger.valueOf(rangeMin)); } if (availBig.compareTo(BigInteger.valueOf(adjust)) < 0) { /* If availBig < adjust then availBig fits in an int. */ int availInt = (int) availBig.longValue(); if (availInt < delta) { if (wrapAllowed) { /* Wrap to the opposite range end point. */ storedValue = increment ? rangeMin : rangeMax; wrapped = true; } else { /* Signal an overflow next time. */ overflow = true; adjust = 0; } } else { /* * If the delta fits in the cache available, don't wrap * just to allocate the full cacheSize; instead, * allocate as much as is available. */ adjust = availInt; } } /* Negate the adjustment for decrementing. */ if (!increment) { adjust = -adjust; } /* Set the stored value one past the cached amount. */ storedValue += adjust; /* Write the new stored value. */ cursor.put(key, makeData()); status = OperationStatus.SUCCESS; } finally { if (cursor != null) { cursor.close(); } if (locker != null) { locker.operationEnd(status); } } /* The cache now contains the range: [cacheValue, storedValue) */ cacheValue = storedValue - adjust; cacheLast = storedValue + (increment ? (-1) : 1); } /* Return the current value and increment/decrement it by delta. */ long retVal = cacheValue; if (increment) { cacheValue += delta; } else { cacheValue -= delta; } /* Increment stats. */ nGets += 1; if (cached) { nCachedGets += 1; } /* Trace this method at the FINEST level. */ if (logger.isLoggable(Level.FINEST)) { LoggerUtils.finest( logger, db.getEnvironment().getEnvironmentImpl(), "Sequence.get" + " value=" + retVal + " cached=" + cached + " wrapped=" + wrapped); } return retVal; }