private void putUpdateStoredBlock(StoredBlock storedBlock, boolean wasUndoable) throws SQLException { try { PreparedStatement s = conn.get() .prepareStatement( "INSERT INTO headers(hash, chainWork, height, header, wasUndoable)" + " VALUES(?, ?, ?, ?, ?)"); // We skip the first 4 bytes because (on prodnet) the minimum target has 4 0-bytes byte[] hashBytes = new byte[28]; System.arraycopy(storedBlock.getHeader().getHash().getBytes(), 3, hashBytes, 0, 28); s.setBytes(1, hashBytes); s.setBytes(2, storedBlock.getChainWork().toByteArray()); s.setInt(3, storedBlock.getHeight()); s.setBytes(4, storedBlock.getHeader().unsafeRimbitSerialize()); s.setBoolean(5, wasUndoable); s.executeUpdate(); s.close(); } catch (SQLException e) { // It is possible we try to add a duplicate StoredBlock if we upgraded // In that case, we just update the entry to mark it wasUndoable if (!(e.getSQLState().equals(POSTGRES_DUPLICATE_KEY_ERROR_CODE)) || !wasUndoable) throw e; PreparedStatement s = conn.get().prepareStatement("UPDATE headers SET wasUndoable=? WHERE hash=?"); s.setBoolean(1, true); // We skip the first 4 bytes because (on prodnet) the minimum target has 4 0-bytes byte[] hashBytes = new byte[28]; System.arraycopy(storedBlock.getHeader().getHash().getBytes(), 3, hashBytes, 0, 28); s.setBytes(2, hashBytes); s.executeUpdate(); s.close(); } }
/** Gets the median timestamp of the last 11 blocks */ private static long getMedianTimestampOfRecentBlocks(StoredBlock storedBlock, BlockStore store) throws BlockStoreException { long[] timestamps = new long[11]; int unused = 9; timestamps[10] = storedBlock.getHeader().getTimeSeconds(); while (unused >= 0 && (storedBlock = storedBlock.getPrev(store)) != null) timestamps[unused--] = storedBlock.getHeader().getTimeSeconds(); Arrays.sort(timestamps, unused + 1, 11); return timestamps[unused + (11 - unused) / 2]; }
private synchronized boolean add(Block block, boolean tryConnecting) throws BlockStoreException, VerificationException, ScriptException { if (System.currentTimeMillis() - statsLastTime > 1000) { // More than a second passed since last stats logging. log.info("{} blocks per second", statsBlocksAdded); statsLastTime = System.currentTimeMillis(); statsBlocksAdded = 0; } // We check only the chain head for double adds here to avoid potentially expensive block chain // misses. if (block.equals(chainHead.getHeader())) { // Duplicate add of the block at the top of the chain, can be a natural artifact of the // download process. return true; } // Prove the block is internally valid: hash is lower than target, merkle root is correct and so // on. try { block.verify(); } catch (VerificationException e) { log.error("Failed to verify block:", e); log.error(block.toString()); throw e; } // Try linking it to a place in the currently known blocks. StoredBlock storedPrev = blockStore.get(block.getPrevBlockHash()); if (storedPrev == null) { // We can't find the previous block. Probably we are still in the process of downloading the // chain and a // block was solved whilst we were doing it. We put it to one side and try to connect it later // when we // have more blocks. log.warn("Block does not connect: {}", block.getHashAsString()); unconnectedBlocks.add(block); return false; } else { // It connects to somewhere on the chain. Not necessarily the top of the best known chain. // // Create a new StoredBlock from this block. It will throw away the transaction data so when // block goes // out of scope we will reclaim the used memory. StoredBlock newStoredBlock = storedPrev.build(block); checkDifficultyTransitions(storedPrev, newStoredBlock); blockStore.put(newStoredBlock); // block.transactions may be null here if we received only a header and not a full block. This // does not // happen currently but might in future if getheaders is implemented. connectBlock(newStoredBlock, storedPrev, block.transactions); } if (tryConnecting) tryConnectingUnconnected(); statsBlocksAdded++; return true; }
/** * Returns an estimate of when the given block will be reached, assuming a perfect 10 minute * average for each block. This is useful for turning transaction lock times into human readable * times. Note that a height in the past will still be estimated, even though the time of solving * is actually known (we won't scan backwards through the chain to obtain the right answer). */ public Date estimateBlockTime(int height) { synchronized (chainHeadLock) { long offset = height - chainHead.getHeight(); long headTime = chainHead.getHeader().getTimeSeconds(); long estimated = (headTime * 1000) + (1000L * 30L * 1L * offset); // 2 blocks per minute return new Date(estimated); } }
/** Constructs a BlockChain connected to the given list of listeners (eg, wallets) and a store. */ public AbstractBlockChain( Context context, List<BlockChainListener> listeners, BlockStore blockStore) throws BlockStoreException { this.blockStore = blockStore; chainHead = blockStore.getChainHead(); log.info("chain head is at height {}:\n{}", chainHead.getHeight(), chainHead.getHeader()); this.params = context.getParams(); this.listeners = new CopyOnWriteArrayList<ListenerRegistration<BlockChainListener>>(); for (BlockChainListener l : listeners) addListener(l, Threading.SAME_THREAD); }
/** * Constructs a BlockChain connected to the given wallet and store. To obtain a {@link Wallet} you * can construct one from scratch, or you can deserialize a saved wallet from disk using {@link * Wallet#loadFromFile(java.io.File)} * * <p>For the store you can use a {@link MemoryBlockStore} if you don't care about saving the * downloaded data, or a {@link BoundedOverheadBlockStore} if you'd like to ensure fast startup * the next time you run the program. */ public BlockChain(NetworkParameters params, Wallet wallet, BlockStore blockStore) { try { this.blockStore = blockStore; chainHead = blockStore.getChainHead(); log.info("chain head is:\n{}", chainHead.getHeader()); } catch (BlockStoreException e) { throw new RuntimeException(e); } this.params = params; this.wallet = wallet; }
/** * Called as part of connecting a block when the new block results in a different chain having * higher total work. */ private void handleNewBestChain(StoredBlock newChainHead) throws BlockStoreException, VerificationException { // This chain has overtaken the one we currently believe is best. Reorganize is required. // // Firstly, calculate the block at which the chain diverged. We only need to examine the // chain from beyond this block to find differences. StoredBlock splitPoint = findSplit(newChainHead, chainHead); log.info("Re-organize after split at height {}", splitPoint.getHeight()); log.info("Old chain head: {}", chainHead.getHeader().getHashAsString()); log.info("New chain head: {}", newChainHead.getHeader().getHashAsString()); log.info("Split at block: {}", splitPoint.getHeader().getHashAsString()); // Then build a list of all blocks in the old part of the chain and the new part. List<StoredBlock> oldBlocks = getPartialChain(chainHead, splitPoint); List<StoredBlock> newBlocks = getPartialChain(newChainHead, splitPoint); // Now inform the wallet. This is necessary so the set of currently active transactions (that we // can spend) // can be updated to take into account the re-organize. We might also have received new coins we // didn't have // before and our previous spends might have been undone. wallet.reorganize(oldBlocks, newBlocks); // Update the pointer to the best known block. setChainHead(newChainHead); }
@Override public void setChainHead(StoredBlock chainHead) throws BlockStoreException { final MappedByteBuffer buffer = this.buffer; if (buffer == null) throw new BlockStoreException("Store closed"); lock.lock(); try { lastChainHead = chainHead; byte[] headHash = chainHead.getHeader().getHash().getBytes(); buffer.position(8); buffer.put(headHash); } finally { lock.unlock(); } }
public void setChainHead(StoredBlock chainHead) throws BlockStoreException { Sha256Hash hash = chainHead.getHeader().getHash(); this.chainHeadHash = hash; this.chainHeadBlock = chainHead; maybeConnect(); try { PreparedStatement s = conn.get().prepareStatement("UPDATE settings SET value = ? WHERE name = ?"); s.setString(2, CHAIN_HEAD_SETTING); s.setBytes(1, hash.getBytes()); s.executeUpdate(); s.close(); } catch (SQLException ex) { throw new BlockStoreException(ex); } }
public void setVerifiedChainHead(StoredBlock chainHead) throws BlockStoreException { Sha256Hash hash = chainHead.getHeader().getHash(); this.verifiedChainHeadHash = hash; this.verifiedChainHeadBlock = chainHead; maybeConnect(); try { PreparedStatement s = conn.get().prepareStatement("UPDATE settings SET value = ? WHERE name = ?"); s.setString(2, VERIFIED_CHAIN_HEAD_SETTING); s.setBytes(1, hash.getBytes()); s.executeUpdate(); s.close(); } catch (SQLException ex) { throw new BlockStoreException(ex); } if (this.chainHeadBlock.getHeight() < chainHead.getHeight()) setChainHead(chainHead); removeUndoableBlocksWhereHeightIsLessThan(chainHead.getHeight() - fullStoreDepth); }
private void connectBlock( StoredBlock newStoredBlock, StoredBlock storedPrev, List<Transaction> newTransactions) throws BlockStoreException, VerificationException { if (storedPrev.equals(chainHead)) { // This block connects to the best known block, it is a normal continuation of the system. setChainHead(newStoredBlock); log.trace("Chain is now {} blocks high", chainHead.getHeight()); if (newTransactions != null) sendTransactionsToWallet(newStoredBlock, NewBlockType.BEST_CHAIN, newTransactions); } else { // This block connects to somewhere other than the top of the best known chain. We treat these // differently. // // Note that we send the transactions to the wallet FIRST, even if we're about to re-organize // this block // to become the new best chain head. This simplifies handling of the re-org in the Wallet // class. boolean haveNewBestChain = newStoredBlock.moreWorkThan(chainHead); if (haveNewBestChain) { log.info("Block is causing a re-organize"); } else { StoredBlock splitPoint = findSplit(newStoredBlock, chainHead); String splitPointHash = splitPoint != null ? splitPoint.getHeader().getHashAsString() : "?"; log.info( "Block forks the chain at {}, but it did not cause a reorganize:\n{}", splitPointHash, newStoredBlock); } // We may not have any transactions if we received only a header. That never happens today but // will in // future when getheaders is used as an optimization. if (newTransactions != null) { sendTransactionsToWallet(newStoredBlock, NewBlockType.SIDE_CHAIN, newTransactions); } if (haveNewBestChain) handleNewBestChain(newStoredBlock); } }
@Override public void put(StoredBlock block) throws BlockStoreException { final MappedByteBuffer buffer = this.buffer; if (buffer == null) throw new BlockStoreException("Store closed"); lock.lock(); try { int cursor = getRingCursor(buffer); if (cursor == getFileSize()) { // Wrapped around. cursor = FILE_PROLOGUE_BYTES; } buffer.position(cursor); Sha256Hash hash = block.getHeader().getHash(); notFoundCache.remove(hash); buffer.put(hash.getBytes()); block.serializeCompact(buffer); setRingCursor(buffer, buffer.position()); blockCache.put(hash, block); } finally { lock.unlock(); } }
/** * Called as part of connecting a block when the new block results in a different chain having * higher total work. * * <p>if (shouldVerifyTransactions) Either newChainHead needs to be in the block store as a * FullStoredBlock, or (block != null && block.transactions != null) */ private void handleNewBestChain( StoredBlock storedPrev, StoredBlock newChainHead, Block block, boolean expensiveChecks) throws BlockStoreException, VerificationException, PrunedException { checkState(lock.isHeldByCurrentThread()); // This chain has overtaken the one we currently believe is best. Reorganize is required. // // Firstly, calculate the block at which the chain diverged. We only need to examine the // chain from beyond this block to find differences. StoredBlock head = getChainHead(); final StoredBlock splitPoint = findSplit(newChainHead, head, blockStore); log.info("Re-organize after split at height {}", splitPoint.getHeight()); log.info("Old chain head: {}", head.getHeader().getHashAsString()); log.info("New chain head: {}", newChainHead.getHeader().getHashAsString()); log.info("Split at block: {}", splitPoint.getHeader().getHashAsString()); // Then build a list of all blocks in the old part of the chain and the new part. final LinkedList<StoredBlock> oldBlocks = getPartialChain(head, splitPoint, blockStore); final LinkedList<StoredBlock> newBlocks = getPartialChain(newChainHead, splitPoint, blockStore); // Disconnect each transaction in the previous main chain that is no longer in the new main // chain StoredBlock storedNewHead = splitPoint; if (shouldVerifyTransactions()) { for (StoredBlock oldBlock : oldBlocks) { try { disconnectTransactions(oldBlock); } catch (PrunedException e) { // We threw away the data we need to re-org this deep! We need to go back to a peer with // full // block contents and ask them for the relevant data then rebuild the indexs. Or we could // just // give up and ask the human operator to help get us unstuck (eg, rescan from the genesis // block). // TODO: Retry adding this block when we get a block with hash e.getHash() throw e; } } StoredBlock cursor; // Walk in ascending chronological order. for (Iterator<StoredBlock> it = newBlocks.descendingIterator(); it.hasNext(); ) { cursor = it.next(); Block cursorBlock = cursor.getHeader(); if (expensiveChecks && cursorBlock.getTimeSeconds() <= getMedianTimestampOfRecentBlocks(cursor.getPrev(blockStore), blockStore)) throw new VerificationException("Block's timestamp is too early during reorg"); TransactionOutputChanges txOutChanges; if (cursor != newChainHead || block == null) txOutChanges = connectTransactions(cursor); else txOutChanges = connectTransactions(newChainHead.getHeight(), block); storedNewHead = addToBlockStore(storedNewHead, cursorBlock.cloneAsHeader(), txOutChanges); } } else { // (Finally) write block to block store storedNewHead = addToBlockStore(storedPrev, newChainHead.getHeader()); } // Now inform the listeners. This is necessary so the set of currently active transactions (that // we can spend) // can be updated to take into account the re-organize. We might also have received new coins we // didn't have // before and our previous spends might have been undone. for (final ListenerRegistration<BlockChainListener> registration : listeners) { if (registration.executor == Threading.SAME_THREAD) { // Short circuit the executor so we can propagate any exceptions. // TODO: Do we really need to do this or should it be irrelevant? registration.listener.reorganize(splitPoint, oldBlocks, newBlocks); } else { registration.executor.execute( new Runnable() { @Override public void run() { try { registration.listener.reorganize(splitPoint, oldBlocks, newBlocks); } catch (VerificationException e) { log.error("Block chain listener threw exception during reorg", e); } } }); } } // Update the pointer to the best known block. setChainHead(storedNewHead); }
// expensiveChecks enables checks that require looking at blocks further back in the chain // than the previous one when connecting (eg median timestamp check) // It could be exposed, but for now we just set it to shouldVerifyTransactions() private void connectBlock( final Block block, StoredBlock storedPrev, boolean expensiveChecks, @Nullable final List<Sha256Hash> filteredTxHashList, @Nullable final Map<Sha256Hash, Transaction> filteredTxn) throws BlockStoreException, VerificationException, PrunedException { checkState(lock.isHeldByCurrentThread()); boolean filtered = filteredTxHashList != null && filteredTxn != null; // Check that we aren't connecting a block that fails a checkpoint check if (!params.passesCheckpoint(storedPrev.getHeight() + 1, block.getHash())) throw new VerificationException( "Block failed checkpoint lockin at " + (storedPrev.getHeight() + 1)); if (shouldVerifyTransactions()) { checkNotNull(block.transactions); for (Transaction tx : block.transactions) if (!tx.isFinal(storedPrev.getHeight() + 1, block.getTimeSeconds())) throw new VerificationException("Block contains non-final transaction"); } StoredBlock head = getChainHead(); if (storedPrev.equals(head)) { if (filtered && filteredTxn.size() > 0) { log.debug( "Block {} connects to top of best chain with {} transaction(s) of which we were sent {}", block.getHashAsString(), filteredTxHashList.size(), filteredTxn.size()); for (Sha256Hash hash : filteredTxHashList) log.debug(" matched tx {}", hash); } if (expensiveChecks && block.getTimeSeconds() <= getMedianTimestampOfRecentBlocks(head, blockStore)) throw new VerificationException("Block's timestamp is too early"); // This block connects to the best known block, it is a normal continuation of the system. TransactionOutputChanges txOutChanges = null; if (shouldVerifyTransactions()) txOutChanges = connectTransactions(storedPrev.getHeight() + 1, block); StoredBlock newStoredBlock = addToBlockStore( storedPrev, block.transactions == null ? block : block.cloneAsHeader(), txOutChanges); setChainHead(newStoredBlock); log.debug("Chain is now {} blocks high, running listeners", newStoredBlock.getHeight()); informListenersForNewBlock( block, NewBlockType.BEST_CHAIN, filteredTxHashList, filteredTxn, newStoredBlock); } else { // This block connects to somewhere other than the top of the best known chain. We treat these // differently. // // Note that we send the transactions to the wallet FIRST, even if we're about to re-organize // this block // to become the new best chain head. This simplifies handling of the re-org in the Wallet // class. StoredBlock newBlock = storedPrev.build(block); boolean haveNewBestChain = newBlock.moreWorkThan(head); if (haveNewBestChain) { log.info("Block is causing a re-organize"); } else { StoredBlock splitPoint = findSplit(newBlock, head, blockStore); if (splitPoint != null && splitPoint.equals(newBlock)) { // newStoredBlock is a part of the same chain, there's no fork. This happens when we // receive a block // that we already saw and linked into the chain previously, which isn't the chain head. // Re-processing it is confusing for the wallet so just skip. log.warn( "Saw duplicated block in main chain at height {}: {}", newBlock.getHeight(), newBlock.getHeader().getHash()); return; } if (splitPoint == null) { // This should absolutely never happen // (lets not write the full block to disk to keep any bugs which allow this to happen // from writing unreasonable amounts of data to disk) throw new VerificationException("Block forks the chain but splitPoint is null"); } else { // We aren't actually spending any transactions (yet) because we are on a fork addToBlockStore(storedPrev, block); int splitPointHeight = splitPoint.getHeight(); String splitPointHash = splitPoint.getHeader().getHashAsString(); log.info( "Block forks the chain at height {}/block {}, but it did not cause a reorganize:\n{}", splitPointHeight, splitPointHash, newBlock.getHeader().getHashAsString()); } } // We may not have any transactions if we received only a header, which can happen during fast // catchup. // If we do, send them to the wallet but state that they are on a side chain so it knows not // to try and // spend them until they become activated. if (block.transactions != null || filtered) { informListenersForNewBlock( block, NewBlockType.SIDE_CHAIN, filteredTxHashList, filteredTxn, newBlock); } if (haveNewBestChain) handleNewBestChain(storedPrev, newBlock, block, expensiveChecks); } }
public void put(StoredBlock storedBlock, StoredUndoableBlock undoableBlock) throws BlockStoreException { maybeConnect(); // We skip the first 4 bytes because (on prodnet) the minimum target has 4 0-bytes byte[] hashBytes = new byte[28]; System.arraycopy(storedBlock.getHeader().getHash().getBytes(), 3, hashBytes, 0, 28); int height = storedBlock.getHeight(); byte[] transactions = null; byte[] txOutChanges = null; try { ByteArrayOutputStream bos = new ByteArrayOutputStream(); if (undoableBlock.getTxOutChanges() != null) { undoableBlock.getTxOutChanges().serializeToStream(bos); txOutChanges = bos.toByteArray(); } else { int numTxn = undoableBlock.getTransactions().size(); bos.write((int) (0xFF & (numTxn >> 0))); bos.write((int) (0xFF & (numTxn >> 8))); bos.write((int) (0xFF & (numTxn >> 16))); bos.write((int) (0xFF & (numTxn >> 24))); for (Transaction tx : undoableBlock.getTransactions()) tx.rimbitSerialize(bos); transactions = bos.toByteArray(); } bos.close(); } catch (IOException e) { throw new BlockStoreException(e); } try { if (log.isDebugEnabled()) log.debug("Looking for undoable block with hash: " + Utils.bytesToHexString(hashBytes)); PreparedStatement findS = conn.get().prepareStatement("select 1 from undoableBlocks where hash = ?"); findS.setBytes(1, hashBytes); ResultSet rs = findS.executeQuery(); if (rs.next()) { // We already have this output, update it. findS.close(); // Postgres insert-or-updates are very complex (and finnicky). This level of transaction // isolation // seems to work for rimbitj PreparedStatement s = conn.get() .prepareStatement( "UPDATE undoableBlocks SET txOutChanges=?, transactions=?" + " WHERE hash = ?"); s.setBytes(3, hashBytes); if (log.isDebugEnabled()) log.debug("Updating undoable block with hash: " + Utils.bytesToHexString(hashBytes)); if (transactions == null) { s.setBytes(1, txOutChanges); s.setNull(2, Types.BINARY); } else { s.setNull(1, Types.BINARY); s.setBytes(2, transactions); } s.executeUpdate(); s.close(); return; } PreparedStatement s = conn.get() .prepareStatement( "INSERT INTO undoableBlocks(hash, height, txOutChanges, transactions)" + " VALUES(?, ?, ?, ?)"); s.setBytes(1, hashBytes); s.setInt(2, height); if (log.isDebugEnabled()) log.debug( "Inserting undoable block with hash: " + Utils.bytesToHexString(hashBytes) + " at height " + height); if (transactions == null) { s.setBytes(3, txOutChanges); s.setNull(4, Types.BINARY); } else { s.setNull(3, Types.BINARY); s.setBytes(4, transactions); } s.executeUpdate(); s.close(); try { putUpdateStoredBlock(storedBlock, true); } catch (SQLException e) { throw new BlockStoreException(e); } } catch (SQLException e) { if (!e.getSQLState().equals(POSTGRES_DUPLICATE_KEY_ERROR_CODE)) throw new BlockStoreException(e); } }
/** Throws an exception if the blocks difficulty is not correct. */ private void checkDifficultyTransitions(StoredBlock storedPrev, StoredBlock storedNext) throws BlockStoreException, VerificationException { Block prev = storedPrev.getHeader(); Block next = storedNext.getHeader(); // Is this supposed to be a difficulty transition point? if ((storedPrev.getHeight() + 1) % params.interval != 0) { // No ... so check the difficulty didn't actually change. if (next.getDifficultyTarget() != prev.getDifficultyTarget()) throw new VerificationException( "Unexpected change in difficulty at height " + storedPrev.getHeight() + ": " + Long.toHexString(next.getDifficultyTarget()) + " vs " + Long.toHexString(prev.getDifficultyTarget())); return; } // We need to find a block far back in the chain. It's OK that this is expensive because it only // occurs every // two weeks after the initial block chain download. long now = System.currentTimeMillis(); StoredBlock cursor = blockStore.get(prev.getHash()); for (int i = 0; i < params.interval - 1; i++) { if (cursor == null) { // This should never happen. If it does, it means we are following an incorrect or busted // chain. throw new VerificationException( "Difficulty transition point but we did not find a way back to the genesis block."); } cursor = blockStore.get(cursor.getHeader().getPrevBlockHash()); } log.info("Difficulty transition traversal took {}msec", System.currentTimeMillis() - now); Block blockIntervalAgo = cursor.getHeader(); int timespan = (int) (prev.getTime() - blockIntervalAgo.getTime()); // Limit the adjustment step. if (timespan < params.targetTimespan / 4) timespan = params.targetTimespan / 4; if (timespan > params.targetTimespan * 4) timespan = params.targetTimespan * 4; BigInteger newDifficulty = Utils.decodeCompactBits(blockIntervalAgo.getDifficultyTarget()); newDifficulty = newDifficulty.multiply(BigInteger.valueOf(timespan)); newDifficulty = newDifficulty.divide(BigInteger.valueOf(params.targetTimespan)); if (newDifficulty.compareTo(params.proofOfWorkLimit) > 0) { log.warn("Difficulty hit proof of work limit: {}", newDifficulty.toString(16)); newDifficulty = params.proofOfWorkLimit; } int accuracyBytes = (int) (next.getDifficultyTarget() >>> 24) - 3; BigInteger receivedDifficulty = next.getDifficultyTargetAsInteger(); // The calculated difficulty is to a higher precision than received, so reduce here. BigInteger mask = BigInteger.valueOf(0xFFFFFFL).shiftLeft(accuracyBytes * 8); newDifficulty = newDifficulty.and(mask); if (newDifficulty.compareTo(receivedDifficulty) != 0) throw new VerificationException( "Network provided difficulty bits do not match what was calculated: " + receivedDifficulty.toString(16) + " vs " + newDifficulty.toString(16)); }