예제 #1
0
파일: Yarrow.java 프로젝트: saces/fred
  public void write_seed(File filename, boolean force) {
    if (!force)
      synchronized (this) {
        long now = System.currentTimeMillis();
        if (now - timeLastWroteSeed <= 60 * 60 * 1000 /* once per hour */) return;
        else timeLastWroteSeed = now;
      }

    FileOutputStream fos = null;
    BufferedOutputStream bos = null;
    DataOutputStream dos = null;
    try {
      fos = new FileOutputStream(filename);
      bos = new BufferedOutputStream(fos);
      dos = new DataOutputStream(bos);

      for (int i = 0; i < 32; i++) dos.writeLong(nextLong());

      dos.flush();
      dos.close();
    } catch (IOException e) {
      Logger.error(this, "IOE while saving the seed file! : " + e.getMessage());
    } finally {
      Closer.close(dos);
      Closer.close(bos);
      Closer.close(fos);
    }
  }
  public void testDecompressException() throws IOException {
    // build 5k array
    byte[] uncompressedData = new byte[5 * 1024];
    for (int i = 0; i < uncompressedData.length; i++) {
      uncompressedData[i] = 1;
    }

    byte[] compressedData = doCompress(uncompressedData);

    Bucket inBucket = new ArrayBucket(compressedData);
    NullBucket outBucket = new NullBucket();
    InputStream decompressorInput = null;
    OutputStream decompressorOutput = null;
    try {
      decompressorInput = inBucket.getInputStream();
      decompressorOutput = outBucket.getOutputStream();
      Compressor.COMPRESSOR_TYPE.GZIP.decompress(
          decompressorInput, decompressorOutput, 4096 + 10, 4096 + 20);
      decompressorInput.close();
      decompressorOutput.close();
    } catch (CompressionOutputSizeException e) {
      // expect this
      return;
    } finally {
      Closer.close(decompressorInput);
      Closer.close(decompressorOutput);
      inBucket.free();
      outBucket.free();
    }
    fail("did not throw expected CompressionOutputSizeException");
  }
  private byte[] doBucketDecompress(byte[] compressedData) throws IOException {
    ByteArrayInputStream decompressorInput = new ByteArrayInputStream(compressedData);
    ByteArrayOutputStream decompressorOutput = new ByteArrayOutputStream();

    Compressor.COMPRESSOR_TYPE.GZIP.decompress(
        decompressorInput, decompressorOutput, 32768, 32768 * 2);

    byte[] outBuf = decompressorOutput.toByteArray();
    try {
      decompressorInput.close();
      decompressorOutput.close();
    } finally {
      Closer.close(decompressorInput);
      Closer.close(decompressorOutput);
    }

    return outBuf;
  }
 @Override
 public synchronized void onSuccess(BaseClientPutter state, ObjectContainer container) {
   try {
     if (logDEBUG)
       Logger.debug(this, "Successfully inserted WoTOwnMessageList at " + state.getURI());
     mMessageManager.onMessageListInsertSucceeded(state.getURI());
   } catch (Exception e) {
     Logger.error(this, "WoTOwnMessageList insert succeeded but onSuccess() failed", e);
   } finally {
     removeInsert(state);
     Closer.close(((ClientPutter) state).getData());
   }
 }
예제 #5
0
 /**
  * It won't reset the Message Digest for you!
  *
  * @param InputStream
  * @param MessageDigest
  * @return
  * @throws IOException
  */
 public static void hash(InputStream is, MessageDigest md) throws IOException {
   try {
     byte[] buf = new byte[4096];
     int readBytes = is.read(buf);
     while (readBytes > -1) {
       md.update(buf, 0, readBytes);
       readBytes = is.read(buf);
     }
     is.close();
   } finally {
     Closer.close(is);
   }
 }
예제 #6
0
파일: Yarrow.java 프로젝트: saces/fred
  /** Seed handling */
  private void read_seed(File filename) {
    FileInputStream fis = null;
    BufferedInputStream bis = null;
    DataInputStream dis = null;

    try {
      fis = new FileInputStream(filename);
      bis = new BufferedInputStream(fis);
      dis = new DataInputStream(bis);

      EntropySource seedFile = new EntropySource();
      for (int i = 0; i < 32; i++) acceptEntropy(seedFile, dis.readLong(), 64);
      dis.close();
    } catch (EOFException f) {
      // Okay.
    } catch (IOException e) {
      Logger.error(this, "IOE trying to read the seedfile from disk : " + e.getMessage());
    } finally {
      Closer.close(dis);
      Closer.close(bis);
      Closer.close(fis);
    }
    fast_pool_reseed();
  }
  @Override
  public synchronized void onFailure(
      InsertException e, BaseClientPutter state, ObjectContainer container) {
    try {
      if (e.getMode() == InsertException.COLLISION) {
        Logger.warning(
            this, "WoTOwnMessageList insert collided, trying to insert with higher index ...");
        try {
          synchronized (mMessageManager) {
            // We must call getOwnMessageList() before calling onMessageListInsertFailed() because
            // the latter will increment the message list's
            // index, resulting in the ID of the message list changing - getIDFromURI would fail
            // with the old state.getURI() if we called it after
            // onMessageListInsertFailed()
            WoTOwnMessageList list =
                (WoTOwnMessageList)
                    mMessageManager.getOwnMessageList(
                        MessageListID.construct(state.getURI()).toString());
            mMessageManager.onMessageListInsertFailed(state.getURI(), true);
            insertMessageList(list);
          }
        } catch (Exception ex) {
          Logger.error(this, "Inserting WoTOwnMessageList with higher index failed", ex);
        }
      } else {
        if (e.isFatal()) Logger.error(this, "WoTOwnMessageList insert failed", e);
        else Logger.warning(this, "WoTOwnMessageList insert failed non-fatally", e);

        mMessageManager.onMessageListInsertFailed(state.getURI(), false);
      }
    } catch (Exception ex) {
      Logger.error(this, "WoTOwnMessageList insert failed and failure handling threw", ex);
    } finally {
      removeInsert(state);
      Closer.close(((ClientPutter) state).getData());
    }
  }
  /**
   * You have to synchronize on this <code>WoTMessageListInserter</code> and then on the <code>
   * WoTMessageManager</code> when using this function.
   */
  private void insertMessageList(WoTOwnMessageList list)
      throws TransformerException, ParserConfigurationException, NoSuchMessageException,
          IOException, InsertException {
    Bucket tempB = mTBF.makeBucket(4096); /* TODO: set to a reasonable value */
    OutputStream os = null;

    try {
      os = tempB.getOutputStream();
      // This is what requires synchronization on the WoTMessageManager: While being marked as
      // "being inserted", message lists cannot be modified anymore,
      // so it must be guranteed that the "being inserted" mark does not change while we encode the
      // XML etc.
      mMessageManager.onMessageListInsertStarted(list);

      mXML.encode(mMessageManager, list, os);
      os.close();
      os = null;
      tempB.setReadOnly();

      /* We do not specifiy a ClientMetaData with mimetype because that would result in the insertion of an additional CHK */
      InsertBlock ib = new InsertBlock(tempB, null, list.getInsertURI());
      InsertContext ictx = mClient.getInsertContext(true);

      ClientPutter pu =
          mClient.insert(
              ib, false, null, false, ictx, this, RequestStarter.INTERACTIVE_PRIORITY_CLASS);
      addInsert(pu);
      tempB = null;

      if (logDEBUG)
        Logger.debug(this, "Started insert of WoTOwnMessageList at request URI " + list.getURI());
    } finally {
      if (tempB != null) tempB.free();
      Closer.close(os);
    }
  }
  /**
   * Return a DDACheckJob : the one we created and have enqueued
   *
   * @param path
   * @param read : is Read access requested ?
   * @param write : is Write access requested ?
   * @return
   * @throws IllegalArgumentException
   *     <p>FIXME: Maybe we need to enqueue a PS job to delete the created file after something like
   *     ... 5 mins ?
   */
  protected DDACheckJob enqueueDDACheck(String path, boolean read, boolean write)
      throws IllegalArgumentException {
    File directory = FileUtil.getCanonicalFile(new File(path));
    if (!directory.exists() || !directory.isDirectory())
      throw new IllegalArgumentException(
          "The specified path isn't a directory! or doesn't exist or the node doesn't have access to it!");

    // See #1856
    DDACheckJob job = null;
    synchronized (inTestDirectories) {
      job = inTestDirectories.get(directory);
    }
    if (job != null)
      throw new IllegalArgumentException("There is already a TestDDA going on for that directory!");

    File writeFile =
        (write
            ? new File(path, "DDACheck-" + server.node.fastWeakRandom.nextInt() + ".tmp")
            : null);
    File readFile = null;
    if (read) {
      try {
        readFile = File.createTempFile("DDACheck-", ".tmp", directory);
        readFile.deleteOnExit();
      } catch (IOException e) {
        // Now we know it: we can't write there ;)
        readFile = null;
      }
    }

    DDACheckJob result =
        new DDACheckJob(server.node.fastWeakRandom, directory, readFile, writeFile);
    synchronized (inTestDirectories) {
      inTestDirectories.put(directory, result);
    }

    if (read && (readFile != null) && readFile.canWrite()) {
      // We don't want to attempt to write before: in case an IOException is raised, we want to
      // inform the
      // client somehow that the node can't write there... And setting readFile to null means we
      // won't inform
      // it on the status (as if it hadn't requested us to do the test).
      FileOutputStream fos = null;
      BufferedOutputStream bos = null;
      try {
        fos = new FileOutputStream(result.readFilename);
        bos = new BufferedOutputStream(fos);
        bos.write(result.readContent.getBytes("UTF-8"));
        bos.flush();
      } catch (IOException e) {
        Logger.error(
            this,
            "Got a IOE while creating the file (" + readFile.toString() + " ! " + e.getMessage());
      } finally {
        Closer.close(bos);
        Closer.close(fos);
      }
    }

    return result;
  }
예제 #10
0
파일: Yarrow.java 프로젝트: saces/fred
  private void seedFromExternalStuff(boolean canBlock) {
    byte[] buf = new byte[32];
    if (File.separatorChar == '/') {
      DataInputStream dis = null;
      FileInputStream fis = null;
      File hwrng = new File("/dev/hwrng");
      if (hwrng.exists() && hwrng.canRead())
        try {
          fis = new FileInputStream(hwrng);
          dis = new DataInputStream(fis);
          dis.readFully(buf);
          consumeBytes(buf);
          dis.readFully(buf);
          consumeBytes(buf);
          dis.close();
        } catch (Throwable t) {
          Logger.normal(this, "Can't read /dev/hwrng even though exists and is readable: " + t, t);
        } finally {
          Closer.close(dis);
          Closer.close(fis);
        }

      boolean isSystemEntropyAvailable = true;
      // Read some bits from /dev/urandom
      try {
        fis = new FileInputStream("/dev/urandom");
        dis = new DataInputStream(fis);
        dis.readFully(buf);
        consumeBytes(buf);
        dis.readFully(buf);
        consumeBytes(buf);
      } catch (Throwable t) {
        Logger.normal(this, "Can't read /dev/urandom: " + t, t);
        // We can't read it; let's skip /dev/random and seed from SecureRandom.generateSeed()
        canBlock = true;
        isSystemEntropyAvailable = false;
      } finally {
        Closer.close(dis);
        Closer.close(fis);
      }
      if (canBlock && isSystemEntropyAvailable)
        // Read some bits from /dev/random
        try {
          fis = new FileInputStream("/dev/random");
          dis = new DataInputStream(fis);
          dis.readFully(buf);
          consumeBytes(buf);
          dis.readFully(buf);
          consumeBytes(buf);
        } catch (Throwable t) {
          Logger.normal(this, "Can't read /dev/random: " + t, t);
        } finally {
          Closer.close(dis);
          Closer.close(fis);
        }
      fis = null;
    } else
      // Force generateSeed(), since we can't read random data from anywhere else.
      // Anyway, Windows's CAPI won't block.
      canBlock = true;
    if (canBlock) {
      // SecureRandom hopefully acts as a proxy for CAPI on Windows
      buf = sr.generateSeed(32);
      consumeBytes(buf);
      buf = sr.generateSeed(32);
      consumeBytes(buf);
    }
    // A few more bits
    consumeString(Long.toHexString(Runtime.getRuntime().freeMemory()));
    consumeString(Long.toHexString(Runtime.getRuntime().totalMemory()));
  }
예제 #11
0
  private void handleTARArchive(
      ArchiveStoreContext ctx,
      FreenetURI key,
      InputStream data,
      String element,
      ArchiveExtractCallback callback,
      MutableBoolean gotElement,
      boolean throwAtExit,
      ClientContext context)
      throws ArchiveFailureException, ArchiveRestartException {
    if (logMINOR) Logger.minor(this, "Handling a TAR Archive");
    TarArchiveInputStream tarIS = null;
    try {
      tarIS = new TarArchiveInputStream(data);

      // MINOR: Assumes the first entry in the tarball is a directory.
      ArchiveEntry entry;

      byte[] buf = new byte[32768];
      HashSet<String> names = new HashSet<String>();
      boolean gotMetadata = false;

      outerTAR:
      while (true) {
        try {
          entry = tarIS.getNextEntry();
        } catch (IllegalArgumentException e) {
          // Annoyingly, it can throw this on some corruptions...
          throw new ArchiveFailureException("Error reading archive: " + e.getMessage(), e);
        }
        if (entry == null) break;
        if (entry.isDirectory()) continue;
        String name = stripLeadingSlashes(entry.getName());
        if (names.contains(name)) {
          Logger.error(this, "Duplicate key " + name + " in archive " + key);
          continue;
        }
        long size = entry.getSize();
        if (name.equals(".metadata")) gotMetadata = true;
        if (size > maxArchivedFileSize && !name.equals(element)) {
          addErrorElement(
              ctx,
              key,
              name,
              "File too big: "
                  + size
                  + " greater than current archived file size limit "
                  + maxArchivedFileSize,
              true);
        } else {
          // Read the element
          long realLen = 0;
          Bucket output = tempBucketFactory.makeBucket(size);
          OutputStream out = output.getOutputStream();

          try {
            int readBytes;
            while ((readBytes = tarIS.read(buf)) > 0) {
              out.write(buf, 0, readBytes);
              readBytes += realLen;
              if (readBytes > maxArchivedFileSize) {
                addErrorElement(
                    ctx,
                    key,
                    name,
                    "File too big: "
                        + maxArchivedFileSize
                        + " greater than current archived file size limit "
                        + maxArchivedFileSize,
                    true);
                out.close();
                out = null;
                output.free();
                continue outerTAR;
              }
            }

          } finally {
            if (out != null) out.close();
          }
          if (size <= maxArchivedFileSize) {
            addStoreElement(ctx, key, name, output, gotElement, element, callback, context);
            names.add(name);
            trimStoredData();
          } else {
            // We are here because they asked for this file.
            callback.gotBucket(output, context);
            gotElement.value = true;
            addErrorElement(
                ctx,
                key,
                name,
                "File too big: "
                    + size
                    + " greater than current archived file size limit "
                    + maxArchivedFileSize,
                true);
          }
        }
      }

      // If no metadata, generate some
      if (!gotMetadata) {
        generateMetadata(ctx, key, names, gotElement, element, callback, context);
        trimStoredData();
      }
      if (throwAtExit) throw new ArchiveRestartException("Archive changed on re-fetch");

      if ((!gotElement.value) && element != null) callback.notInArchive(context);

    } catch (IOException e) {
      throw new ArchiveFailureException("Error reading archive: " + e.getMessage(), e);
    } finally {
      Closer.close(tarIS);
    }
  }
예제 #12
0
  /**
   * Extract data to cache. Call synchronized on ctx.
   *
   * @param key The key the data was fetched from.
   * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP | Metadata.ARCHIVE_TAR.
   * @param data The actual data fetched.
   * @param archiveContext The context for the whole fetch process.
   * @param ctx The ArchiveStoreContext for this key.
   * @param element A particular element that the caller is especially interested in, or null.
   * @param callback A callback to be called if we find that element, or if we don't.
   * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc.
   * @throws ArchiveRestartException
   * @throws ArchiveRestartException If the request needs to be restarted because the archive
   *     changed.
   */
  public void extractToCache(
      FreenetURI key,
      ARCHIVE_TYPE archiveType,
      COMPRESSOR_TYPE ctype,
      final Bucket data,
      ArchiveContext archiveContext,
      ArchiveStoreContext ctx,
      String element,
      ArchiveExtractCallback callback,
      ClientContext context)
      throws ArchiveFailureException, ArchiveRestartException {
    logMINOR = Logger.shouldLog(LogLevel.MINOR, this);

    MutableBoolean gotElement = element != null ? new MutableBoolean() : null;

    if (logMINOR) Logger.minor(this, "Extracting " + key);
    ctx.removeAllCachedItems(this); // flush cache anyway
    final long expectedSize = ctx.getLastSize();
    final long archiveSize = data.size();
    /**
     * Set if we need to throw a RestartedException rather than returning success, after we have
     * unpacked everything.
     */
    boolean throwAtExit = false;
    if ((expectedSize != -1) && (archiveSize != expectedSize)) {
      throwAtExit = true;
      ctx.setLastSize(archiveSize);
    }
    byte[] expectedHash = ctx.getLastHash();
    if (expectedHash != null) {
      byte[] realHash;
      try {
        realHash = BucketTools.hash(data);
      } catch (IOException e) {
        throw new ArchiveFailureException("Error reading archive data: " + e, e);
      }
      if (!Arrays.equals(realHash, expectedHash)) throwAtExit = true;
      ctx.setLastHash(realHash);
    }

    if (archiveSize > archiveContext.maxArchiveSize)
      throw new ArchiveFailureException(
          "Archive too big (" + archiveSize + " > " + archiveContext.maxArchiveSize + ")!");
    else if (archiveSize <= 0)
      throw new ArchiveFailureException("Archive too small! (" + archiveSize + ')');
    else if (logMINOR)
      Logger.minor(this, "Container size (possibly compressed): " + archiveSize + " for " + data);

    InputStream is = null;
    try {
      final ExceptionWrapper wrapper;
      if ((ctype == null) || (ARCHIVE_TYPE.ZIP == archiveType)) {
        if (logMINOR) Logger.minor(this, "No compression");
        is = data.getInputStream();
        wrapper = null;
      } else if (ctype == COMPRESSOR_TYPE.BZIP2) {
        if (logMINOR) Logger.minor(this, "dealing with BZIP2");
        is = new BZip2CompressorInputStream(data.getInputStream());
        wrapper = null;
      } else if (ctype == COMPRESSOR_TYPE.GZIP) {
        if (logMINOR) Logger.minor(this, "dealing with GZIP");
        is = new GZIPInputStream(data.getInputStream());
        wrapper = null;
      } else if (ctype == COMPRESSOR_TYPE.LZMA_NEW) {
        // LZMA internally uses pipe streams, so we may as well do it here.
        // In fact we need to for LZMA_NEW, because of the properties bytes.
        PipedInputStream pis = new PipedInputStream();
        PipedOutputStream pos = new PipedOutputStream();
        pis.connect(pos);
        final OutputStream os = new BufferedOutputStream(pos);
        wrapper = new ExceptionWrapper();
        context.mainExecutor.execute(
            new Runnable() {

              @Override
              public void run() {
                InputStream is = null;
                try {
                  Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress(
                      is = data.getInputStream(), os, data.size(), expectedSize);
                } catch (CompressionOutputSizeException e) {
                  Logger.error(this, "Failed to decompress archive: " + e, e);
                  wrapper.set(e);
                } catch (IOException e) {
                  Logger.error(this, "Failed to decompress archive: " + e, e);
                  wrapper.set(e);
                } finally {
                  try {
                    os.close();
                  } catch (IOException e) {
                    Logger.error(this, "Failed to close PipedOutputStream: " + e, e);
                  }
                  Closer.close(is);
                }
              }
            });
        is = pis;
      } else if (ctype == COMPRESSOR_TYPE.LZMA) {
        if (logMINOR) Logger.minor(this, "dealing with LZMA");
        is = new LzmaInputStream(data.getInputStream());
        wrapper = null;
      } else {
        wrapper = null;
      }

      if (ARCHIVE_TYPE.ZIP == archiveType)
        handleZIPArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context);
      else if (ARCHIVE_TYPE.TAR == archiveType)
        handleTARArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context);
      else
        throw new ArchiveFailureException(
            "Unknown or unsupported archive algorithm " + archiveType);
      if (wrapper != null) {
        Exception e = wrapper.get();
        if (e != null)
          throw new ArchiveFailureException(
              "An exception occured decompressing: " + e.getMessage(), e);
      }
    } catch (IOException ioe) {
      throw new ArchiveFailureException("An IOE occured: " + ioe.getMessage(), ioe);
    } finally {
      Closer.close(is);
    }
  }
  public static void main(String[] args) {
    if (args.length < 1 || args.length > 2) {
      System.err.println(
          "Usage: java freenet.node.simulator.LongTermPushPullTest <unique identifier>");
      System.exit(1);
    }
    String uid = args[0];

    List<String> csvLine = new ArrayList<String>();
    System.out.println("DATE:" + dateFormat.format(today.getTime()));
    csvLine.add(dateFormat.format(today.getTime()));

    System.out.println("Version:" + Version.buildNumber());
    csvLine.add(String.valueOf(Version.buildNumber()));

    int exitCode = 0;
    Node node = null;
    Node node2 = null;
    FileInputStream fis = null;
    File file = new File("many-single-blocks-test-" + uid + ".csv");
    long t1, t2;

    try {

      // INSERT STUFF

      final File dir = new File("longterm-mhk-test-" + uid);
      FileUtil.removeAll(dir);
      RandomSource random =
          NodeStarter.globalTestInit(dir.getPath(), false, LogLevel.ERROR, "", false);
      File seednodes = new File("seednodes.fref");
      if (!seednodes.exists() || seednodes.length() == 0 || !seednodes.canRead()) {
        System.err.println("Unable to read seednodes.fref, it doesn't exist, or is empty");
        System.exit(EXIT_NO_SEEDNODES);
      }

      final File innerDir = new File(dir, Integer.toString(DARKNET_PORT1));
      innerDir.mkdir();
      fis = new FileInputStream(seednodes);
      FileUtil.writeTo(fis, new File(innerDir, "seednodes.fref"));
      fis.close();

      // Create one node
      node =
          NodeStarter.createTestNode(
              DARKNET_PORT1,
              OPENNET_PORT1,
              dir.getPath(),
              false,
              Node.DEFAULT_MAX_HTL,
              0,
              random,
              new PooledExecutor(),
              1000,
              4 * 1024 * 1024,
              true,
              true,
              true,
              true,
              true,
              true,
              true,
              12 * 1024,
              true,
              true,
              false,
              false,
              null);
      Logger.getChain().setThreshold(LogLevel.ERROR);

      // Start it
      node.start(true);
      t1 = System.currentTimeMillis();
      if (!TestUtil.waitForNodes(node)) {
        exitCode = EXIT_FAILED_TARGET;
        return;
      }

      t2 = System.currentTimeMillis();
      System.out.println("SEED-TIME:" + (t2 - t1));
      csvLine.add(String.valueOf(t2 - t1));

      HighLevelSimpleClient client = node.clientCore.makeClient((short) 0, false, false);

      int successes = 0;

      long startInsertsTime = System.currentTimeMillis();

      InsertBatch batch = new InsertBatch(client);

      // Inserts are sloooooow so do them in parallel.

      for (int i = 0; i < INSERTED_BLOCKS; i++) {

        System.err.println("Inserting block " + i);

        RandomAccessBucket single = randomData(node);

        InsertBlock block = new InsertBlock(single, new ClientMetadata(), FreenetURI.EMPTY_CHK_URI);

        batch.startInsert(block);
      }

      batch.waitUntilFinished();
      FreenetURI[] uris = batch.getURIs();
      long[] times = batch.getTimes();
      InsertException[] errors = batch.getErrors();

      for (int i = 0; i < INSERTED_BLOCKS; i++) {
        if (uris[i] != null) {
          csvLine.add(String.valueOf(times[i]));
          csvLine.add(uris[i].toASCIIString());
          System.out.println("Pushed block " + i + " : " + uris[i] + " in " + times[i]);
          successes++;
        } else {
          csvLine.add(InsertException.getShortMessage(errors[i].getMode()));
          csvLine.add("N/A");
          System.out.println("Failed to push block " + i + " : " + errors[i]);
        }
      }

      long endInsertsTime = System.currentTimeMillis();

      System.err.println(
          "Succeeded inserts: "
              + successes
              + " of "
              + INSERTED_BLOCKS
              + " in "
              + (endInsertsTime - startInsertsTime)
              + "ms");

      FetchContext fctx = client.getFetchContext();
      fctx.maxNonSplitfileRetries = 0;
      fctx.maxSplitfileBlockRetries = 0;
      RequestClient requestContext = new RequestClientBuilder().build();

      // PARSE FILE AND FETCH OLD STUFF IF APPROPRIATE

      FreenetURI[] mhkURIs = new FreenetURI[3];
      fis = new FileInputStream(file);
      BufferedReader br = new BufferedReader(new InputStreamReader(fis, ENCODING));
      String line = null;
      GregorianCalendar target = (GregorianCalendar) today.clone();
      target.set(Calendar.HOUR_OF_DAY, 0);
      target.set(Calendar.MINUTE, 0);
      target.set(Calendar.MILLISECOND, 0);
      target.set(Calendar.SECOND, 0);
      GregorianCalendar[] targets = new GregorianCalendar[MAX_N + 1];
      for (int i = 0; i < targets.length; i++) {
        targets[i] = ((GregorianCalendar) target.clone());
        targets[i].add(Calendar.DAY_OF_MONTH, -((1 << i) - 1));
        targets[i].getTime();
      }
      int[] totalFetchesByDelta = new int[MAX_N + 1];
      int[] totalSuccessfulFetchesByDelta = new int[MAX_N + 1];
      long[] totalFetchTimeByDelta = new long[MAX_N + 1];

      loopOverLines:
      while ((line = br.readLine()) != null) {

        for (int i = 0; i < mhkURIs.length; i++) mhkURIs[i] = null;
        // System.out.println("LINE: "+line);
        String[] split = line.split("!");
        Date date = dateFormat.parse(split[0]);
        GregorianCalendar calendar = new GregorianCalendar(TimeZone.getTimeZone("GMT"));
        calendar.setTime(date);
        System.out.println("Date: " + dateFormat.format(calendar.getTime()));
        calendar.set(Calendar.HOUR_OF_DAY, 0);
        calendar.set(Calendar.MINUTE, 0);
        calendar.set(Calendar.MILLISECOND, 0);
        calendar.set(Calendar.SECOND, 0);
        calendar.getTime();
        FreenetURI[] insertedURIs = new FreenetURI[INSERTED_BLOCKS];
        int[] insertTimes = new int[INSERTED_BLOCKS];
        if (split.length < 3) continue;
        int seedTime = Integer.parseInt(split[2]);
        System.out.println("Seed time: " + seedTime);
        if (split.length < 4) continue;

        int token = 3;

        if (split.length < token + INSERTED_BLOCKS * 2) continue;

        for (int i = 0; i < INSERTED_BLOCKS; i++) {
          try {
            insertTimes[i] = Integer.parseInt(split[token]);
          } catch (NumberFormatException e) {
            insertTimes[i] = -1;
          }
          token++;
          try {
            insertedURIs[i] = new FreenetURI(split[token]);
          } catch (MalformedURLException e) {
            insertedURIs[i] = null;
          }
          token++;
          System.out.println("Key insert " + i + " : " + insertedURIs[i] + " in " + insertTimes[i]);
        }
        for (int i = 0; i < targets.length; i++) {
          if (Math.abs(targets[i].getTimeInMillis() - calendar.getTimeInMillis())
              < HOURS.toMillis(12)) {
            System.out.println("Found row for target date " + ((1 << i) - 1) + " days ago.");
            System.out.println("Version: " + split[1]);
            csvLine.add(Integer.toString(i));
            int pulled = 0;
            int inserted = 0;
            for (int j = 0; j < INSERTED_BLOCKS; j++) {
              if (insertedURIs[j] == null) {
                csvLine.add("INSERT FAILED");
                continue;
              }
              inserted++;
              try {
                t1 = System.currentTimeMillis();
                FetchWaiter fw = new FetchWaiter(requestContext);
                client.fetch(insertedURIs[j], 32768, fw, fctx);
                fw.waitForCompletion();
                t2 = System.currentTimeMillis();

                System.out.println("PULL-TIME FOR BLOCK " + j + ": " + (t2 - t1));
                csvLine.add(String.valueOf(t2 - t1));
                pulled++;
              } catch (FetchException e) {
                if (e.getMode() != FetchExceptionMode.ALL_DATA_NOT_FOUND
                    && e.getMode() != FetchExceptionMode.DATA_NOT_FOUND) e.printStackTrace();
                csvLine.add(FetchException.getShortMessage(e.getMode()));
                System.err.println("FAILED PULL FOR BLOCK " + j + ": " + e);
              }
            }
            System.out.println(
                "Pulled "
                    + pulled
                    + " blocks of "
                    + inserted
                    + " from "
                    + ((1 << i) - 1)
                    + " days ago.");
          }
        }

        while (split.length > token + INSERTED_BLOCKS) {
          int delta;
          try {
            delta = Integer.parseInt(split[token]);
          } catch (NumberFormatException e) {
            System.err.println("Unable to parse token " + token + " = \"" + token + "\"");
            System.err.println("This is supposed to be a delta");
            System.err.println(
                "Skipping the rest of the line for date " + dateFormat.format(calendar.getTime()));
            continue loopOverLines;
          }
          System.out.println("Delta: " + ((1 << delta) - 1) + " days");
          token++;
          int totalFetchTime = 0;
          int totalSuccesses = 0;
          int totalFetches = 0;
          for (int i = 0; i < INSERTED_BLOCKS; i++) {
            if (split[token].equals("")) continue;
            int mhkFetchTime = -1;
            totalFetches++;
            try {
              mhkFetchTime = Integer.parseInt(split[token]);
              System.out.println(
                  "Fetched block #" + i + " on " + date + " in " + mhkFetchTime + "ms");
              totalSuccesses++;
              totalFetchTime += mhkFetchTime;
            } catch (NumberFormatException e) {
              System.out.println("Failed block #" + i + " on " + date + " : " + split[token]);
            }
            token++;
          }
          totalFetchesByDelta[delta] += totalFetches;
          totalSuccessfulFetchesByDelta[delta] += totalSuccesses;
          totalFetchTimeByDelta[delta] += totalFetchTime;
          System.err.println(
              "Succeeded: "
                  + totalSuccesses
                  + " of "
                  + totalFetches
                  + " average "
                  + ((double) totalFetchTime) / ((double) totalSuccesses)
                  + "ms for delta "
                  + delta
                  + " on "
                  + dateFormat.format(date));
        }
      }

      System.out.println();
      System.out.println();

      for (int i = 0; i < MAX_N + 1; i++) {
        System.out.println(
            "DELTA: "
                + i
                + " days: Total fetches: "
                + totalFetchesByDelta[i]
                + " total successes "
                + totalSuccessfulFetchesByDelta[i]
                + " = "
                + ((totalSuccessfulFetchesByDelta[i] * 100.0) / totalFetchesByDelta[i])
                + "% in "
                + (totalFetchTimeByDelta[i] * 1.0) / totalSuccessfulFetchesByDelta[i]
                + "ms");
      }

      fis.close();
      fis = null;

    } catch (Throwable t) {
      t.printStackTrace();
      exitCode = EXIT_THREW_SOMETHING;
    } finally {
      try {
        if (node != null) node.park();
      } catch (Throwable tt) {
      }
      try {
        if (node2 != null) node2.park();
      } catch (Throwable tt) {
      }
      Closer.close(fis);
      writeToStatusLog(file, csvLine);

      System.out.println("Exiting with status " + exitCode);
      System.exit(exitCode);
    }
  }