示例#1
0
 @Override
 public OutputStream getOutputStream() throws IOException {
   synchronized (this) {
     if (readOnly) throw new IOException("Read only");
   }
   OutputStream os = underlying.getOutputStream();
   return AEADOutputStream.createAES(os, key, NodeStarter.getGlobalSecureRandom());
 }
  /**
   * You have to synchronize on this <code>WoTMessageListInserter</code> and then on the <code>
   * WoTMessageManager</code> when using this function.
   */
  private void insertMessageList(WoTOwnMessageList list)
      throws TransformerException, ParserConfigurationException, NoSuchMessageException,
          IOException, InsertException {
    Bucket tempB = mTBF.makeBucket(4096); /* TODO: set to a reasonable value */
    OutputStream os = null;

    try {
      os = tempB.getOutputStream();
      // This is what requires synchronization on the WoTMessageManager: While being marked as
      // "being inserted", message lists cannot be modified anymore,
      // so it must be guranteed that the "being inserted" mark does not change while we encode the
      // XML etc.
      mMessageManager.onMessageListInsertStarted(list);

      mXML.encode(mMessageManager, list, os);
      os.close();
      os = null;
      tempB.setReadOnly();

      /* We do not specifiy a ClientMetaData with mimetype because that would result in the insertion of an additional CHK */
      InsertBlock ib = new InsertBlock(tempB, null, list.getInsertURI());
      InsertContext ictx = mClient.getInsertContext(true);

      ClientPutter pu =
          mClient.insert(
              ib, false, null, false, ictx, this, RequestStarter.INTERACTIVE_PRIORITY_CLASS);
      addInsert(pu);
      tempB = null;

      if (logDEBUG)
        Logger.debug(this, "Started insert of WoTOwnMessageList at request URI " + list.getURI());
    } finally {
      if (tempB != null) tempB.free();
      Closer.close(os);
    }
  }
  private void handleZIPArchive(
      ArchiveStoreContext ctx,
      FreenetURI key,
      InputStream data,
      String element,
      ArchiveExtractCallback callback,
      MutableBoolean gotElement,
      boolean throwAtExit,
      ClientContext context)
      throws ArchiveFailureException, ArchiveRestartException {
    if (logMINOR) Logger.minor(this, "Handling a ZIP Archive");
    ZipInputStream zis = null;
    try {
      zis = new ZipInputStream(data);

      // MINOR: Assumes the first entry in the zip is a directory.
      ZipEntry entry;

      byte[] buf = new byte[32768];
      HashSet<String> names = new HashSet<String>();
      boolean gotMetadata = false;

      outerZIP:
      while (true) {
        entry = zis.getNextEntry();
        if (entry == null) break;
        if (entry.isDirectory()) continue;
        String name = stripLeadingSlashes(entry.getName());
        if (names.contains(name)) {
          Logger.error(this, "Duplicate key " + name + " in archive " + key);
          continue;
        }
        long size = entry.getSize();
        if (name.equals(".metadata")) gotMetadata = true;
        if (size > maxArchivedFileSize && !name.equals(element)) {
          addErrorElement(
              ctx,
              key,
              name,
              "File too big: "
                  + maxArchivedFileSize
                  + " greater than current archived file size limit "
                  + maxArchivedFileSize,
              true);
        } else {
          // Read the element
          long realLen = 0;
          Bucket output = tempBucketFactory.makeBucket(size);
          OutputStream out = output.getOutputStream();
          try {

            int readBytes;
            while ((readBytes = zis.read(buf)) > 0) {
              out.write(buf, 0, readBytes);
              readBytes += realLen;
              if (readBytes > maxArchivedFileSize) {
                addErrorElement(
                    ctx,
                    key,
                    name,
                    "File too big: "
                        + maxArchivedFileSize
                        + " greater than current archived file size limit "
                        + maxArchivedFileSize,
                    true);
                out.close();
                out = null;
                output.free();
                continue outerZIP;
              }
            }

          } finally {
            if (out != null) out.close();
          }
          if (size <= maxArchivedFileSize) {
            addStoreElement(ctx, key, name, output, gotElement, element, callback, context);
            names.add(name);
            trimStoredData();
          } else {
            // We are here because they asked for this file.
            callback.gotBucket(output, context);
            gotElement.value = true;
            addErrorElement(
                ctx,
                key,
                name,
                "File too big: "
                    + size
                    + " greater than current archived file size limit "
                    + maxArchivedFileSize,
                true);
          }
        }
      }

      // If no metadata, generate some
      if (!gotMetadata) {
        generateMetadata(ctx, key, names, gotElement, element, callback, context);
        trimStoredData();
      }
      if (throwAtExit) throw new ArchiveRestartException("Archive changed on re-fetch");

      if ((!gotElement.value) && element != null) callback.notInArchive(context);

    } catch (IOException e) {
      throw new ArchiveFailureException("Error reading archive: " + e.getMessage(), e);
    } finally {
      if (zis != null) {
        try {
          zis.close();
        } catch (IOException e) {
          Logger.error(this, "Failed to close stream: " + e, e);
        }
      }
    }
  }
    public void parseSubIndex() throws TaskAbortException {
      synchronized (parsingSubindex) {
        // Transfer all requests waiting on this subindex to the parsing list
        synchronized (waitingOnSubindex) {
          parsingSubindex.addAll(waitingOnSubindex);
          waitingOnSubindex.removeAll(parsingSubindex);
        }
        // Set status of all those about to be parsed to PARSE
        for (FindRequest r : parsingSubindex) r.setStage(FindRequest.Stages.PARSE);

        // Multi-stage parse to minimise memory usage.

        // Stage 1: Extract the declaration (first tag), copy everything before "<files " to one
        // bucket, plus everything after "</files>".
        // Copy the declaration, plus everything between the two (inclusive) to another bucket.

        Bucket mainBucket, filesBucket;

        try {
          InputStream is = bucket.getInputStream();
          mainBucket = pr.getNode().clientCore.tempBucketFactory.makeBucket(-1);
          filesBucket = pr.getNode().clientCore.tempBucketFactory.makeBucket(-1);
          OutputStream mainOS = new BufferedOutputStream(mainBucket.getOutputStream());
          OutputStream filesOS = new BufferedOutputStream(filesBucket.getOutputStream());
          // OutputStream mainOS = new BufferedOutputStream(new FileOutputStream("main.tmp"));
          // OutputStream filesOS = new BufferedOutputStream(new FileOutputStream("files.tmp"));

          BufferedInputStream bis = new BufferedInputStream(is);

          byte greaterThan = ">".getBytes("UTF-8")[0];
          byte[] filesPrefix = "<files ".getBytes("UTF-8");
          byte[] filesPrefixAlt = "<files>".getBytes("UTF-8");
          assert (filesPrefix.length == filesPrefixAlt.length);
          byte[] filesEnd = "</files>".getBytes("UTF-8");

          final int MODE_SEEKING_DECLARATION = 1;
          final int MODE_SEEKING_FILES = 2;
          final int MODE_COPYING_FILES = 3;
          final int MODE_COPYING_REST = 4;
          int mode = MODE_SEEKING_DECLARATION;
          int b;
          byte[] declarationBuf = new byte[100];
          int declarationPtr = 0;
          byte[] prefixBuffer = new byte[filesPrefix.length];
          int prefixPtr = 0;
          byte[] endBuffer = new byte[filesEnd.length];
          int endPtr = 0;
          while ((b = bis.read()) != -1) {
            if (mode == MODE_SEEKING_DECLARATION) {
              if (declarationPtr == declarationBuf.length)
                throw new TaskAbortException("Could not split up XML: declaration too long", null);
              declarationBuf[declarationPtr++] = (byte) b;
              mainOS.write(b);
              filesOS.write(b);
              if (b == greaterThan) {
                mode = MODE_SEEKING_FILES;
              }
            } else if (mode == MODE_SEEKING_FILES) {
              if (prefixPtr != prefixBuffer.length) {
                prefixBuffer[prefixPtr++] = (byte) b;
              } else {
                if (Fields.byteArrayEqual(filesPrefix, prefixBuffer)
                    || Fields.byteArrayEqual(filesPrefixAlt, prefixBuffer)) {
                  mode = MODE_COPYING_FILES;
                  filesOS.write(prefixBuffer);
                  filesOS.write(b);
                } else {
                  mainOS.write(prefixBuffer[0]);
                  System.arraycopy(prefixBuffer, 1, prefixBuffer, 0, prefixBuffer.length - 1);
                  prefixBuffer[prefixBuffer.length - 1] = (byte) b;
                }
              }
            } else if (mode == MODE_COPYING_FILES) {
              if (endPtr != endBuffer.length) {
                endBuffer[endPtr++] = (byte) b;
              } else {
                if (Fields.byteArrayEqual(filesEnd, endBuffer)) {
                  mode = MODE_COPYING_REST;
                  filesOS.write(endBuffer);
                  mainOS.write(b);
                } else {
                  filesOS.write(endBuffer[0]);
                  System.arraycopy(endBuffer, 1, endBuffer, 0, endBuffer.length - 1);
                  endBuffer[endBuffer.length - 1] = (byte) b;
                }
              }
            } else if (mode == MODE_COPYING_REST) {
              mainOS.write(b);
            }
          }

          if (mode != MODE_COPYING_REST)
            throw new TaskAbortException("Could not split up XML: Last mode was " + mode, null);

          mainOS.close();
          filesOS.close();
        } catch (IOException e) {
          throw new TaskAbortException("Could not split XML: ", e);
        }

        if (logMINOR) Logger.minor(this, "Finished splitting XML");

        try {

          SAXParserFactory factory = SAXParserFactory.newInstance();
          factory.setFeature("http://apache.org/xml/features/disallow-doctype-decl", true);
          SAXParser saxParser = factory.newSAXParser();

          // Stage 2: Parse the first bucket, find the keyword we want, find the file id's.

          InputStream is = mainBucket.getInputStream();
          StageTwoHandler stageTwoHandler = new StageTwoHandler();
          saxParser.parse(is, stageTwoHandler);
          if (logMINOR) Logger.minor(this, "Finished stage two XML parse");
          is.close();

          // Stage 3: Parse the second bucket, extract the <file>'s for the specific ID's.

          is = filesBucket.getInputStream();
          StageThreeHandler stageThreeHandler = new StageThreeHandler();
          saxParser.parse(is, stageThreeHandler);
          if (logMINOR) Logger.minor(this, "Finished stage three XML parse");
          is.close();

          Logger.minor(this, "parsing finished " + parsingSubindex.toString());
          for (FindRequest findRequest : parsingSubindex) {
            findRequest.setFinished();
          }
          parsingSubindex.clear();
        } catch (Exception err) {
          Logger.error(this, "Error parsing " + filename, err);
          throw new TaskAbortException("Could not parse XML: ", err);
        }
      }
    }
  public static void main(String[] args)
      throws InvalidThresholdException, IOException, NodeInitException, InterruptedException {
    Node node = null;
    Node secondNode = null;
    try {
      String ipOverride = null;
      if (args.length > 0) ipOverride = args[0];
      File dir = new File("bootstrap-push-pull-test");
      FileUtil.removeAll(dir);
      RandomSource random =
          NodeStarter.globalTestInit(dir.getPath(), false, LogLevel.ERROR, "", false);
      File seednodes = new File("seednodes.fref");
      if (!seednodes.exists() || seednodes.length() == 0 || !seednodes.canRead()) {
        System.err.println("Unable to read seednodes.fref, it doesn't exist, or is empty");
        System.exit(EXIT_NO_SEEDNODES);
      }
      File innerDir = new File(dir, Integer.toString(DARKNET_PORT1));
      innerDir.mkdir();
      FileInputStream fis = new FileInputStream(seednodes);
      FileUtil.writeTo(fis, new File(innerDir, "seednodes.fref"));
      fis.close();
      // Create one node
      Executor executor = new PooledExecutor();
      node =
          NodeStarter.createTestNode(
              DARKNET_PORT1,
              OPENNET_PORT1,
              dir.getPath(),
              false,
              Node.DEFAULT_MAX_HTL,
              0,
              random,
              executor,
              1000,
              5 * 1024 * 1024,
              true,
              true,
              true,
              true,
              true,
              true,
              true,
              12 * 1024,
              false,
              true,
              false,
              false,
              ipOverride);
      // NodeCrypto.DISABLE_GROUP_STRIP = true;
      // Logger.setupStdoutLogging(LogLevel.MINOR,
      // "freenet:NORMAL,freenet.node.NodeDispatcher:MINOR,freenet.node.FNPPacketMangler:MINOR");
      Logger.getChain().setThreshold(LogLevel.ERROR); // kill logging
      // Start it
      node.start(true);
      if (!TestUtil.waitForNodes(node)) {
        node.park();
        System.exit(EXIT_FAILED_TARGET);
      }
      System.err.println("Creating test data: " + TEST_SIZE + " bytes.");
      Bucket data = node.clientCore.tempBucketFactory.makeBucket(TEST_SIZE);
      OutputStream os = data.getOutputStream();
      byte[] buf = new byte[4096];
      for (long written = 0; written < TEST_SIZE; ) {
        node.fastWeakRandom.nextBytes(buf);
        int toWrite = (int) Math.min(TEST_SIZE - written, buf.length);
        os.write(buf, 0, toWrite);
        written += toWrite;
      }
      os.close();
      System.err.println("Inserting test data.");
      HighLevelSimpleClient client = node.clientCore.makeClient((short) 0);
      InsertBlock block = new InsertBlock(data, new ClientMetadata(), FreenetURI.EMPTY_CHK_URI);
      long startInsertTime = System.currentTimeMillis();
      FreenetURI uri;
      try {
        uri = client.insert(block, false, null);
      } catch (InsertException e) {
        System.err.println("INSERT FAILED: " + e);
        e.printStackTrace();
        System.exit(EXIT_INSERT_FAILED);
        return;
      }
      long endInsertTime = System.currentTimeMillis();
      System.err.println(
          "RESULT: Insert took "
              + (endInsertTime - startInsertTime)
              + "ms ("
              + TimeUtil.formatTime(endInsertTime - startInsertTime)
              + ") to "
              + uri
              + " .");
      node.park();

      // Bootstrap a second node.
      File secondInnerDir = new File(dir, Integer.toString(DARKNET_PORT2));
      secondInnerDir.mkdir();
      fis = new FileInputStream(seednodes);
      FileUtil.writeTo(fis, new File(secondInnerDir, "seednodes.fref"));
      fis.close();
      executor = new PooledExecutor();
      secondNode =
          NodeStarter.createTestNode(
              DARKNET_PORT2,
              OPENNET_PORT2,
              dir.getPath(),
              false,
              Node.DEFAULT_MAX_HTL,
              0,
              random,
              executor,
              1000,
              5 * 1024 * 1024,
              true,
              true,
              true,
              true,
              true,
              true,
              true,
              12 * 1024,
              false,
              true,
              false,
              false,
              ipOverride);
      secondNode.start(true);
      if (!TestUtil.waitForNodes(secondNode)) {
        secondNode.park();
        System.exit(EXIT_FAILED_TARGET);
      }

      // Fetch the data
      long startFetchTime = System.currentTimeMillis();
      client = secondNode.clientCore.makeClient((short) 0);
      try {
        client.fetch(uri);
      } catch (FetchException e) {
        System.err.println("FETCH FAILED: " + e);
        e.printStackTrace();
        System.exit(EXIT_FETCH_FAILED);
        return;
      }
      long endFetchTime = System.currentTimeMillis();
      System.err.println(
          "RESULT: Fetch took "
              + (endFetchTime - startFetchTime)
              + "ms ("
              + TimeUtil.formatTime(endFetchTime - startFetchTime)
              + ") of "
              + uri
              + " .");
      secondNode.park();
      System.exit(0);
    } catch (Throwable t) {
      System.err.println("CAUGHT: " + t);
      t.printStackTrace();
      try {
        if (node != null) node.park();
      } catch (Throwable t1) {
      }
      ;
      try {
        if (secondNode != null) secondNode.park();
      } catch (Throwable t1) {
      }
      ;

      System.exit(EXIT_THREW_SOMETHING);
    }
  }
 public OutputStream getOutputStream() throws IOException {
   if (freed) throw new IOException("Already freed");
   return bucket.getOutputStream();
 }