private void initNewStore(NetworkParameters params) throws Exception {
   byte[] header;
   header = HEADER_MAGIC.getBytes("US-ASCII");
   buffer.put(header);
   // Insert the genesis block.
   lock.lock();
   try {
     setRingCursor(buffer, FILE_PROLOGUE_BYTES);
   } finally {
     lock.unlock();
   }
   Block genesis = params.getGenesisBlock().cloneAsHeader();
   StoredBlock storedGenesis = new StoredBlock(genesis, genesis.getWork(), 0);
   put(storedGenesis);
   setChainHead(storedGenesis);
 }
 /**
  * Processes a received block and tries to add it to the chain. If there's something wrong with
  * the block an exception is thrown. If the block is OK but cannot be connected to the chain at
  * this time, returns false. If the block can be connected to the chain, returns true. Accessing
  * block's transactions in another thread while this method runs may result in undefined behavior.
  */
 public boolean add(Block block) throws VerificationException, PrunedException {
   try {
     return add(block, true, null, null);
   } catch (BlockStoreException e) {
     // TODO: Figure out a better way to propagate this exception to the user.
     throw new RuntimeException(e);
   } catch (VerificationException e) {
     try {
       notSettingChainHead();
     } catch (BlockStoreException e1) {
       throw new RuntimeException(e1);
     }
     throw new VerificationException(
         "Could not verify block " + block.getHashAsString() + "\n" + block.toString(), e);
   }
 }
 OrphanBlock(
     Block block,
     @Nullable List<Sha256Hash> filteredTxHashes,
     @Nullable Map<Sha256Hash, Transaction> filteredTxn) {
   final boolean filtered = filteredTxHashes != null && filteredTxn != null;
   Preconditions.checkArgument(
       (block.transactions == null && filtered) || (block.transactions != null && !filtered));
   if (!shouldVerifyTransactions()) this.block = block.cloneAsHeader();
   else this.block = block;
   this.filteredTxHashes = filteredTxHashes;
   this.filteredTxn = filteredTxn;
 }
 /**
  * Called as part of connecting a block when the new block results in a different chain having
  * higher total work.
  *
  * <p>if (shouldVerifyTransactions) Either newChainHead needs to be in the block store as a
  * FullStoredBlock, or (block != null && block.transactions != null)
  */
 private void handleNewBestChain(
     StoredBlock storedPrev, StoredBlock newChainHead, Block block, boolean expensiveChecks)
     throws BlockStoreException, VerificationException, PrunedException {
   checkState(lock.isHeldByCurrentThread());
   // This chain has overtaken the one we currently believe is best. Reorganize is required.
   //
   // Firstly, calculate the block at which the chain diverged. We only need to examine the
   // chain from beyond this block to find differences.
   StoredBlock head = getChainHead();
   final StoredBlock splitPoint = findSplit(newChainHead, head, blockStore);
   log.info("Re-organize after split at height {}", splitPoint.getHeight());
   log.info("Old chain head: {}", head.getHeader().getHashAsString());
   log.info("New chain head: {}", newChainHead.getHeader().getHashAsString());
   log.info("Split at block: {}", splitPoint.getHeader().getHashAsString());
   // Then build a list of all blocks in the old part of the chain and the new part.
   final LinkedList<StoredBlock> oldBlocks = getPartialChain(head, splitPoint, blockStore);
   final LinkedList<StoredBlock> newBlocks = getPartialChain(newChainHead, splitPoint, blockStore);
   // Disconnect each transaction in the previous main chain that is no longer in the new main
   // chain
   StoredBlock storedNewHead = splitPoint;
   if (shouldVerifyTransactions()) {
     for (StoredBlock oldBlock : oldBlocks) {
       try {
         disconnectTransactions(oldBlock);
       } catch (PrunedException e) {
         // We threw away the data we need to re-org this deep! We need to go back to a peer with
         // full
         // block contents and ask them for the relevant data then rebuild the indexs. Or we could
         // just
         // give up and ask the human operator to help get us unstuck (eg, rescan from the genesis
         // block).
         // TODO: Retry adding this block when we get a block with hash e.getHash()
         throw e;
       }
     }
     StoredBlock cursor;
     // Walk in ascending chronological order.
     for (Iterator<StoredBlock> it = newBlocks.descendingIterator(); it.hasNext(); ) {
       cursor = it.next();
       Block cursorBlock = cursor.getHeader();
       if (expensiveChecks
           && cursorBlock.getTimeSeconds()
               <= getMedianTimestampOfRecentBlocks(cursor.getPrev(blockStore), blockStore))
         throw new VerificationException("Block's timestamp is too early during reorg");
       TransactionOutputChanges txOutChanges;
       if (cursor != newChainHead || block == null) txOutChanges = connectTransactions(cursor);
       else txOutChanges = connectTransactions(newChainHead.getHeight(), block);
       storedNewHead = addToBlockStore(storedNewHead, cursorBlock.cloneAsHeader(), txOutChanges);
     }
   } else {
     // (Finally) write block to block store
     storedNewHead = addToBlockStore(storedPrev, newChainHead.getHeader());
   }
   // Now inform the listeners. This is necessary so the set of currently active transactions (that
   // we can spend)
   // can be updated to take into account the re-organize. We might also have received new coins we
   // didn't have
   // before and our previous spends might have been undone.
   for (final ListenerRegistration<BlockChainListener> registration : listeners) {
     if (registration.executor == Threading.SAME_THREAD) {
       // Short circuit the executor so we can propagate any exceptions.
       // TODO: Do we really need to do this or should it be irrelevant?
       registration.listener.reorganize(splitPoint, oldBlocks, newBlocks);
     } else {
       registration.executor.execute(
           new Runnable() {
             @Override
             public void run() {
               try {
                 registration.listener.reorganize(splitPoint, oldBlocks, newBlocks);
               } catch (VerificationException e) {
                 log.error("Block chain listener threw exception during reorg", e);
               }
             }
           });
     }
   }
   // Update the pointer to the best known block.
   setChainHead(storedNewHead);
 }
  // expensiveChecks enables checks that require looking at blocks further back in the chain
  // than the previous one when connecting (eg median timestamp check)
  // It could be exposed, but for now we just set it to shouldVerifyTransactions()
  private void connectBlock(
      final Block block,
      StoredBlock storedPrev,
      boolean expensiveChecks,
      @Nullable final List<Sha256Hash> filteredTxHashList,
      @Nullable final Map<Sha256Hash, Transaction> filteredTxn)
      throws BlockStoreException, VerificationException, PrunedException {
    checkState(lock.isHeldByCurrentThread());
    boolean filtered = filteredTxHashList != null && filteredTxn != null;
    // Check that we aren't connecting a block that fails a checkpoint check
    if (!params.passesCheckpoint(storedPrev.getHeight() + 1, block.getHash()))
      throw new VerificationException(
          "Block failed checkpoint lockin at " + (storedPrev.getHeight() + 1));
    if (shouldVerifyTransactions()) {
      checkNotNull(block.transactions);
      for (Transaction tx : block.transactions)
        if (!tx.isFinal(storedPrev.getHeight() + 1, block.getTimeSeconds()))
          throw new VerificationException("Block contains non-final transaction");
    }

    StoredBlock head = getChainHead();
    if (storedPrev.equals(head)) {
      if (filtered && filteredTxn.size() > 0) {
        log.debug(
            "Block {} connects to top of best chain with {} transaction(s) of which we were sent {}",
            block.getHashAsString(),
            filteredTxHashList.size(),
            filteredTxn.size());
        for (Sha256Hash hash : filteredTxHashList) log.debug("  matched tx {}", hash);
      }
      if (expensiveChecks
          && block.getTimeSeconds() <= getMedianTimestampOfRecentBlocks(head, blockStore))
        throw new VerificationException("Block's timestamp is too early");

      // This block connects to the best known block, it is a normal continuation of the system.
      TransactionOutputChanges txOutChanges = null;
      if (shouldVerifyTransactions())
        txOutChanges = connectTransactions(storedPrev.getHeight() + 1, block);
      StoredBlock newStoredBlock =
          addToBlockStore(
              storedPrev, block.transactions == null ? block : block.cloneAsHeader(), txOutChanges);
      setChainHead(newStoredBlock);
      log.debug("Chain is now {} blocks high, running listeners", newStoredBlock.getHeight());
      informListenersForNewBlock(
          block, NewBlockType.BEST_CHAIN, filteredTxHashList, filteredTxn, newStoredBlock);
    } else {
      // This block connects to somewhere other than the top of the best known chain. We treat these
      // differently.
      //
      // Note that we send the transactions to the wallet FIRST, even if we're about to re-organize
      // this block
      // to become the new best chain head. This simplifies handling of the re-org in the Wallet
      // class.
      StoredBlock newBlock = storedPrev.build(block);
      boolean haveNewBestChain = newBlock.moreWorkThan(head);
      if (haveNewBestChain) {
        log.info("Block is causing a re-organize");
      } else {
        StoredBlock splitPoint = findSplit(newBlock, head, blockStore);
        if (splitPoint != null && splitPoint.equals(newBlock)) {
          // newStoredBlock is a part of the same chain, there's no fork. This happens when we
          // receive a block
          // that we already saw and linked into the chain previously, which isn't the chain head.
          // Re-processing it is confusing for the wallet so just skip.
          log.warn(
              "Saw duplicated block in main chain at height {}: {}",
              newBlock.getHeight(),
              newBlock.getHeader().getHash());
          return;
        }
        if (splitPoint == null) {
          // This should absolutely never happen
          // (lets not write the full block to disk to keep any bugs which allow this to happen
          //  from writing unreasonable amounts of data to disk)
          throw new VerificationException("Block forks the chain but splitPoint is null");
        } else {
          // We aren't actually spending any transactions (yet) because we are on a fork
          addToBlockStore(storedPrev, block);
          int splitPointHeight = splitPoint.getHeight();
          String splitPointHash = splitPoint.getHeader().getHashAsString();
          log.info(
              "Block forks the chain at height {}/block {}, but it did not cause a reorganize:\n{}",
              splitPointHeight,
              splitPointHash,
              newBlock.getHeader().getHashAsString());
        }
      }

      // We may not have any transactions if we received only a header, which can happen during fast
      // catchup.
      // If we do, send them to the wallet but state that they are on a side chain so it knows not
      // to try and
      // spend them until they become activated.
      if (block.transactions != null || filtered) {
        informListenersForNewBlock(
            block, NewBlockType.SIDE_CHAIN, filteredTxHashList, filteredTxn, newBlock);
      }

      if (haveNewBestChain) handleNewBestChain(storedPrev, newBlock, block, expensiveChecks);
    }
  }
  // filteredTxHashList contains all transactions, filteredTxn just a subset
  private boolean add(
      Block block,
      boolean tryConnecting,
      @Nullable List<Sha256Hash> filteredTxHashList,
      @Nullable Map<Sha256Hash, Transaction> filteredTxn)
      throws BlockStoreException, VerificationException, PrunedException {
    // TODO: Use read/write locks to ensure that during chain download properties are still low
    // latency.
    lock.lock();
    try {
      // Quick check for duplicates to avoid an expensive check further down (in findSplit). This
      // can happen a lot
      // when connecting orphan transactions due to the dumb brute force algorithm we use.
      if (block.equals(getChainHead().getHeader())) {
        return true;
      }
      if (tryConnecting && orphanBlocks.containsKey(block.getHash())) {
        return false;
      }

      // If we want to verify transactions (ie we are running with full blocks), verify that block
      // has transactions
      if (shouldVerifyTransactions() && block.transactions == null)
        throw new VerificationException("Got a block header while running in full-block mode");

      // Check for already-seen block, but only for full pruned mode, where the DB is
      // more likely able to handle these queries quickly.
      if (shouldVerifyTransactions() && blockStore.get(block.getHash()) != null) {
        return true;
      }

      // Does this block contain any transactions we might care about? Check this up front before
      // verifying the
      // blocks validity so we can skip the merkle root verification if the contents aren't
      // interesting. This saves
      // a lot of time for big blocks.
      boolean contentsImportant = shouldVerifyTransactions();
      if (block.transactions != null) {
        contentsImportant = contentsImportant || containsRelevantTransactions(block);
      }

      // Prove the block is internally valid: hash is lower than target, etc. This only checks the
      // block contents
      // if there is a tx sending or receiving coins using an address in one of our wallets. And
      // those transactions
      // are only lightly verified: presence in a valid connecting block is taken as proof of
      // validity. See the
      // article here for more details: http://code.google.com/p/bitcoinj/wiki/SecurityModel
      try {
        block.verifyHeader();
        if (contentsImportant) block.verifyTransactions();

      } catch (VerificationException e) {
        log.error("Failed to verify block: ", e);
        log.error(block.getHashAsString());
        throw e;
      }

      // Try linking it to a place in the currently known blocks.
      StoredBlock storedPrev = getStoredBlockInCurrentScope(block.getPrevBlockHash());
      if (storedPrev == null) {
        // We can't find the previous block. Probably we are still in the process of downloading the
        // chain and a
        // block was solved whilst we were doing it. We put it to one side and try to connect it
        // later when we
        // have more blocks.
        checkState(tryConnecting, "bug in tryConnectingOrphans");
        log.warn(
            "Block does not connect: {} prev {}",
            block.getHashAsString(),
            block.getPrevBlockHash());
        orphanBlocks.put(block.getHash(), new OrphanBlock(block, filteredTxHashList, filteredTxn));
        return false;
      } else {
        checkState(lock.isHeldByCurrentThread());
        // It connects to somewhere on the chain. Not necessarily the top of the best known chain.
        params.checkDifficultyTransitions(storedPrev, block, blockStore);
        connectBlock(
            block, storedPrev, shouldVerifyTransactions(), filteredTxHashList, filteredTxn);
      }
      if (tryConnecting) tryConnectingOrphans();
      return true;
    } finally {
      lock.unlock();
    }
  }