/**
  * Returns the hashes of the currently stored orphan blocks and then deletes them from this
  * objects storage. Used by Peer when a filter exhaustion event has occurred and thus any orphan
  * blocks that have been downloaded might be inaccurate/incomplete.
  */
 public Set<Sha256Hash> drainOrphanBlocks() {
   lock.lock();
   try {
     Set<Sha256Hash> hashes = new HashSet<Sha256Hash>(orphanBlocks.keySet());
     orphanBlocks.clear();
     return hashes;
   } finally {
     lock.unlock();
   }
 }
 /**
  * An orphan block is one that does not connect to the chain anywhere (ie we can't find its
  * parent, therefore it's an orphan). Typically this occurs when we are downloading the chain and
  * didn't reach the head yet, and/or if a block is solved whilst we are downloading. It's possible
  * that we see a small amount of orphan blocks which chain together, this method tries walking
  * backwards through the known orphan blocks to find the bottom-most.
  *
  * @return from or one of froms parents, or null if "from" does not identify an orphan block
  */
 @Nullable
 public Block getOrphanRoot(Sha256Hash from) {
   lock.lock();
   try {
     OrphanBlock cursor = orphanBlocks.get(from);
     if (cursor == null) return null;
     OrphanBlock tmp;
     while ((tmp = orphanBlocks.get(cursor.block.getPrevBlockHash())) != null) {
       cursor = tmp;
     }
     return cursor.block;
   } finally {
     lock.unlock();
   }
 }
 /** Returns true if the given block is currently in the orphan blocks list. */
 public boolean isOrphan(Sha256Hash block) {
   lock.lock();
   try {
     return orphanBlocks.containsKey(block);
   } finally {
     lock.unlock();
   }
 }
  @Override
  @Nullable
  public StoredBlock get(Sha256Hash hash) throws BlockStoreException {
    final MappedByteBuffer buffer = this.buffer;
    if (buffer == null) throw new BlockStoreException("Store closed");

    lock.lock();
    try {
      StoredBlock cacheHit = blockCache.get(hash);
      if (cacheHit != null) return cacheHit;
      if (notFoundCache.get(hash) != null) return null;

      // Starting from the current tip of the ring work backwards until we have either found the
      // block or
      // wrapped around.
      int cursor = getRingCursor(buffer);
      final int startingPoint = cursor;
      final int fileSize = getFileSize();
      final byte[] targetHashBytes = hash.getBytes();
      byte[] scratch = new byte[32];
      do {
        cursor -= RECORD_SIZE;
        if (cursor < FILE_PROLOGUE_BYTES) {
          // We hit the start, so wrap around.
          cursor = fileSize - RECORD_SIZE;
        }
        // Cursor is now at the start of the next record to check, so read the hash and compare it.
        buffer.position(cursor);
        buffer.get(scratch);
        if (Arrays.equals(scratch, targetHashBytes)) {
          // Found the target.
          StoredBlock storedBlock = StoredBlock.deserializeCompact(params, buffer);
          blockCache.put(hash, storedBlock);
          return storedBlock;
        }
      } while (cursor != startingPoint);
      // Not found.
      notFoundCache.put(hash, notFoundMarker);
      return null;
    } catch (ProtocolException e) {
      throw new RuntimeException(e); // Cannot happen.
    } finally {
      lock.unlock();
    }
  }
  @Override
  public void put(StoredBlock block) throws BlockStoreException {
    final MappedByteBuffer buffer = this.buffer;
    if (buffer == null) throw new BlockStoreException("Store closed");

    lock.lock();
    try {
      int cursor = getRingCursor(buffer);
      if (cursor == getFileSize()) {
        // Wrapped around.
        cursor = FILE_PROLOGUE_BYTES;
      }
      buffer.position(cursor);
      Sha256Hash hash = block.getHeader().getHash();
      notFoundCache.remove(hash);
      buffer.put(hash.getBytes());
      block.serializeCompact(buffer);
      setRingCursor(buffer, buffer.position());
      blockCache.put(hash, block);
    } finally {
      lock.unlock();
    }
  }
 /**
  * For each block in orphanBlocks, see if we can now fit it on top of the chain and if so, do so.
  */
 private void tryConnectingOrphans()
     throws VerificationException, BlockStoreException, PrunedException {
   checkState(lock.isHeldByCurrentThread());
   // For each block in our orphan list, try and fit it onto the head of the chain. If we succeed
   // remove it
   // from the list and keep going. If we changed the head of the list at the end of the round try
   // again until
   // we can't fit anything else on the top.
   //
   // This algorithm is kind of crappy, we should do a topo-sort then just connect them in order,
   // but for small
   // numbers of orphan blocks it does OK.
   int blocksConnectedThisRound;
   do {
     blocksConnectedThisRound = 0;
     Iterator<OrphanBlock> iter = orphanBlocks.values().iterator();
     while (iter.hasNext()) {
       OrphanBlock orphanBlock = iter.next();
       // Look up the blocks previous.
       StoredBlock prev = getStoredBlockInCurrentScope(orphanBlock.block.getPrevBlockHash());
       if (prev == null) {
         // This is still an unconnected/orphan block.
         log.debug("  but it is not connectable right now");
         continue;
       }
       // Otherwise we can connect it now.
       // False here ensures we don't recurse infinitely downwards when connecting huge chains.
       log.info("Connected orphan {}", orphanBlock.block.getHash());
       add(orphanBlock.block, false, orphanBlock.filteredTxHashes, orphanBlock.filteredTxn);
       iter.remove();
       blocksConnectedThisRound++;
     }
     if (blocksConnectedThisRound > 0) {
       log.info("Connected {} orphan blocks.", blocksConnectedThisRound);
     }
   } while (blocksConnectedThisRound > 0);
 }
  // filteredTxHashList contains all transactions, filteredTxn just a subset
  private boolean add(
      Block block,
      boolean tryConnecting,
      @Nullable List<Sha256Hash> filteredTxHashList,
      @Nullable Map<Sha256Hash, Transaction> filteredTxn)
      throws BlockStoreException, VerificationException, PrunedException {
    // TODO: Use read/write locks to ensure that during chain download properties are still low
    // latency.
    lock.lock();
    try {
      // Quick check for duplicates to avoid an expensive check further down (in findSplit). This
      // can happen a lot
      // when connecting orphan transactions due to the dumb brute force algorithm we use.
      if (block.equals(getChainHead().getHeader())) {
        return true;
      }
      if (tryConnecting && orphanBlocks.containsKey(block.getHash())) {
        return false;
      }

      // If we want to verify transactions (ie we are running with full blocks), verify that block
      // has transactions
      if (shouldVerifyTransactions() && block.transactions == null)
        throw new VerificationException("Got a block header while running in full-block mode");

      // Check for already-seen block, but only for full pruned mode, where the DB is
      // more likely able to handle these queries quickly.
      if (shouldVerifyTransactions() && blockStore.get(block.getHash()) != null) {
        return true;
      }

      // Does this block contain any transactions we might care about? Check this up front before
      // verifying the
      // blocks validity so we can skip the merkle root verification if the contents aren't
      // interesting. This saves
      // a lot of time for big blocks.
      boolean contentsImportant = shouldVerifyTransactions();
      if (block.transactions != null) {
        contentsImportant = contentsImportant || containsRelevantTransactions(block);
      }

      // Prove the block is internally valid: hash is lower than target, etc. This only checks the
      // block contents
      // if there is a tx sending or receiving coins using an address in one of our wallets. And
      // those transactions
      // are only lightly verified: presence in a valid connecting block is taken as proof of
      // validity. See the
      // article here for more details: http://code.google.com/p/bitcoinj/wiki/SecurityModel
      try {
        block.verifyHeader();
        if (contentsImportant) block.verifyTransactions();

      } catch (VerificationException e) {
        log.error("Failed to verify block: ", e);
        log.error(block.getHashAsString());
        throw e;
      }

      // Try linking it to a place in the currently known blocks.
      StoredBlock storedPrev = getStoredBlockInCurrentScope(block.getPrevBlockHash());
      if (storedPrev == null) {
        // We can't find the previous block. Probably we are still in the process of downloading the
        // chain and a
        // block was solved whilst we were doing it. We put it to one side and try to connect it
        // later when we
        // have more blocks.
        checkState(tryConnecting, "bug in tryConnectingOrphans");
        log.warn(
            "Block does not connect: {} prev {}",
            block.getHashAsString(),
            block.getPrevBlockHash());
        orphanBlocks.put(block.getHash(), new OrphanBlock(block, filteredTxHashList, filteredTxn));
        return false;
      } else {
        checkState(lock.isHeldByCurrentThread());
        // It connects to somewhere on the chain. Not necessarily the top of the best known chain.
        params.checkDifficultyTransitions(storedPrev, block, blockStore);
        connectBlock(
            block, storedPrev, shouldVerifyTransactions(), filteredTxHashList, filteredTxn);
      }
      if (tryConnecting) tryConnectingOrphans();
      return true;
    } finally {
      lock.unlock();
    }
  }