/**
  * Convert a ClientKeyBlock to a Bucket. If an error occurs, report it via onFailure and return
  * null.
  */
 protected Bucket extract(
     ClientKeyBlock block, Object token, ObjectContainer container, ClientContext context) {
   Bucket data;
   try {
     data =
         block.decode(
             context.getBucketFactory(persistent),
             (int) (Math.min(ctx.maxOutputLength, Integer.MAX_VALUE)),
             false);
   } catch (KeyDecodeException e1) {
     if (Logger.shouldLog(Logger.MINOR, this)) Logger.minor(this, "Decode failure: " + e1, e1);
     onFailure(
         new FetchException(FetchException.BLOCK_DECODE_ERROR, e1.getMessage()),
         token,
         container,
         context);
     return null;
   } catch (TooBigException e) {
     onFailure(
         new FetchException(FetchException.TOO_BIG, e.getMessage()), token, container, context);
     return null;
   } catch (IOException e) {
     Logger.error(this, "Could not capture data - disk full?: " + e, e);
     onFailure(new FetchException(FetchException.BUCKET_ERROR, e), token, container, context);
     return null;
   }
   if (Logger.shouldLog(Logger.MINOR, this))
     Logger.minor(
         this, data == null ? "Could not decode: null" : ("Decoded " + data.size() + " bytes"));
   return data;
 }
  private byte[] doCompress(byte[] uncompressedData) throws IOException {
    Bucket inBucket = new ArrayBucket(uncompressedData);
    BucketFactory factory = new ArrayBucketFactory();
    Bucket outBucket = null;

    outBucket = Compressor.COMPRESSOR_TYPE.GZIP.compress(inBucket, factory, 32768, 32768);

    InputStream in = null;
    in = outBucket.getInputStream();
    long size = outBucket.size();
    byte[] outBuf = new byte[(int) size];

    in.read(outBuf);

    return outBuf;
  }
 public synchronized CacheFetchResult getShadowBucket(FreenetURI key, boolean noFilter) {
   Object[] downloads = downloadsByURI.getArray(key);
   if (downloads == null) return null;
   for (Object o : downloads) {
     DownloadRequestStatus download = (DownloadRequestStatus) o;
     Bucket data = download.getDataShadow();
     if (data == null) continue;
     if (data.size() == 0) continue;
     if (noFilter && download.filterData) continue;
     // FIXME it probably *is* worth the effort to allow this when it is overridden on the fetcher,
     // since the user changed the type???
     if (download.overriddenDataType) continue;
     return new CacheFetchResult(
         new ClientMetadata(download.getMIMEType()), new NoFreeBucket(data), download.filterData);
   }
   return null;
 }
 private BlockItem getBlockItem(ObjectContainer container, ClientContext context) {
   try {
     synchronized (this) {
       if (finished) return null;
     }
     if (persistent) {
       if (sourceData == null) {
         Logger.error(
             this,
             "getBlockItem(): sourceData = null but active = " + container.ext().isActive(this));
         return null;
       }
     }
     boolean deactivateBucket = false;
     if (persistent) {
       container.activate(uri, 1);
       deactivateBucket = !container.ext().isActive(sourceData);
       if (deactivateBucket) container.activate(sourceData, 1);
     }
     Bucket data = sourceData.createShadow();
     FreenetURI u = uri;
     if (u.getKeyType().equals("CHK") && !persistent) u = FreenetURI.EMPTY_CHK_URI;
     else u = u.clone();
     if (data == null) {
       data = context.tempBucketFactory.makeBucket(sourceData.size());
       BucketTools.copy(sourceData, data);
     }
     if (persistent) {
       if (deactivateBucket) container.deactivate(sourceData, 1);
       container.deactivate(uri, 1);
     }
     return new BlockItem(
         this, data, isMetadata, compressionCodec, sourceLength, u, hashCode(), persistent);
   } catch (IOException e) {
     fail(new InsertException(InsertException.BUCKET_ERROR, e, null), container, context);
     return null;
   }
 }
Пример #5
0
 @Override
 public long size() {
   return underlying.size() - OVERHEAD;
 }
  /**
   * Extract data to cache. Call synchronized on ctx.
   *
   * @param key The key the data was fetched from.
   * @param archiveType The archive type. Must be Metadata.ARCHIVE_ZIP | Metadata.ARCHIVE_TAR.
   * @param data The actual data fetched.
   * @param archiveContext The context for the whole fetch process.
   * @param ctx The ArchiveStoreContext for this key.
   * @param element A particular element that the caller is especially interested in, or null.
   * @param callback A callback to be called if we find that element, or if we don't.
   * @throws ArchiveFailureException If we could not extract the data, or it was too big, etc.
   * @throws ArchiveRestartException
   * @throws ArchiveRestartException If the request needs to be restarted because the archive
   *     changed.
   */
  public void extractToCache(
      FreenetURI key,
      ARCHIVE_TYPE archiveType,
      COMPRESSOR_TYPE ctype,
      final Bucket data,
      ArchiveContext archiveContext,
      ArchiveStoreContext ctx,
      String element,
      ArchiveExtractCallback callback,
      ClientContext context)
      throws ArchiveFailureException, ArchiveRestartException {
    logMINOR = Logger.shouldLog(LogLevel.MINOR, this);

    MutableBoolean gotElement = element != null ? new MutableBoolean() : null;

    if (logMINOR) Logger.minor(this, "Extracting " + key);
    ctx.removeAllCachedItems(this); // flush cache anyway
    final long expectedSize = ctx.getLastSize();
    final long archiveSize = data.size();
    /**
     * Set if we need to throw a RestartedException rather than returning success, after we have
     * unpacked everything.
     */
    boolean throwAtExit = false;
    if ((expectedSize != -1) && (archiveSize != expectedSize)) {
      throwAtExit = true;
      ctx.setLastSize(archiveSize);
    }
    byte[] expectedHash = ctx.getLastHash();
    if (expectedHash != null) {
      byte[] realHash;
      try {
        realHash = BucketTools.hash(data);
      } catch (IOException e) {
        throw new ArchiveFailureException("Error reading archive data: " + e, e);
      }
      if (!Arrays.equals(realHash, expectedHash)) throwAtExit = true;
      ctx.setLastHash(realHash);
    }

    if (archiveSize > archiveContext.maxArchiveSize)
      throw new ArchiveFailureException(
          "Archive too big (" + archiveSize + " > " + archiveContext.maxArchiveSize + ")!");
    else if (archiveSize <= 0)
      throw new ArchiveFailureException("Archive too small! (" + archiveSize + ')');
    else if (logMINOR)
      Logger.minor(this, "Container size (possibly compressed): " + archiveSize + " for " + data);

    InputStream is = null;
    try {
      final ExceptionWrapper wrapper;
      if ((ctype == null) || (ARCHIVE_TYPE.ZIP == archiveType)) {
        if (logMINOR) Logger.minor(this, "No compression");
        is = data.getInputStream();
        wrapper = null;
      } else if (ctype == COMPRESSOR_TYPE.BZIP2) {
        if (logMINOR) Logger.minor(this, "dealing with BZIP2");
        is = new BZip2CompressorInputStream(data.getInputStream());
        wrapper = null;
      } else if (ctype == COMPRESSOR_TYPE.GZIP) {
        if (logMINOR) Logger.minor(this, "dealing with GZIP");
        is = new GZIPInputStream(data.getInputStream());
        wrapper = null;
      } else if (ctype == COMPRESSOR_TYPE.LZMA_NEW) {
        // LZMA internally uses pipe streams, so we may as well do it here.
        // In fact we need to for LZMA_NEW, because of the properties bytes.
        PipedInputStream pis = new PipedInputStream();
        PipedOutputStream pos = new PipedOutputStream();
        pis.connect(pos);
        final OutputStream os = new BufferedOutputStream(pos);
        wrapper = new ExceptionWrapper();
        context.mainExecutor.execute(
            new Runnable() {

              @Override
              public void run() {
                InputStream is = null;
                try {
                  Compressor.COMPRESSOR_TYPE.LZMA_NEW.decompress(
                      is = data.getInputStream(), os, data.size(), expectedSize);
                } catch (CompressionOutputSizeException e) {
                  Logger.error(this, "Failed to decompress archive: " + e, e);
                  wrapper.set(e);
                } catch (IOException e) {
                  Logger.error(this, "Failed to decompress archive: " + e, e);
                  wrapper.set(e);
                } finally {
                  try {
                    os.close();
                  } catch (IOException e) {
                    Logger.error(this, "Failed to close PipedOutputStream: " + e, e);
                  }
                  Closer.close(is);
                }
              }
            });
        is = pis;
      } else if (ctype == COMPRESSOR_TYPE.LZMA) {
        if (logMINOR) Logger.minor(this, "dealing with LZMA");
        is = new LzmaInputStream(data.getInputStream());
        wrapper = null;
      } else {
        wrapper = null;
      }

      if (ARCHIVE_TYPE.ZIP == archiveType)
        handleZIPArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context);
      else if (ARCHIVE_TYPE.TAR == archiveType)
        handleTARArchive(ctx, key, is, element, callback, gotElement, throwAtExit, context);
      else
        throw new ArchiveFailureException(
            "Unknown or unsupported archive algorithm " + archiveType);
      if (wrapper != null) {
        Exception e = wrapper.get();
        if (e != null)
          throw new ArchiveFailureException(
              "An exception occured decompressing: " + e.getMessage(), e);
      }
    } catch (IOException ioe) {
      throw new ArchiveFailureException("An IOE occured: " + ioe.getMessage(), ioe);
    } finally {
      Closer.close(is);
    }
  }
Пример #7
0
  public void onSuccess(FetchResult result, ClientGetter state, ObjectContainer container) {
    Logger.minor(this, "Succeeded: " + identifier);
    Bucket data = result.asBucket();
    if (persistenceType == PERSIST_FOREVER) {
      if (data != null) container.activate(data, 5);
      if (returnBucket != null) container.activate(returnBucket, 5);
      container.activate(client, 1);
      if (tempFile != null) container.activate(tempFile, 5);
      if (targetFile != null) container.activate(targetFile, 5);
    }
    if (returnBucket != data && !binaryBlob) {
      boolean failed = true;
      synchronized (this) {
        if (finished) {
          Logger.error(
              this,
              "Already finished but onSuccess() for " + this + " data = " + data,
              new Exception("debug"));
          data.free();
          if (persistenceType == PERSIST_FOREVER) data.removeFrom(container);
          return; // Already failed - bucket error maybe??
        }
        if (returnType == ClientGetMessage.RETURN_TYPE_DIRECT && returnBucket == null) {
          // Lost bucket for some reason e.g. bucket error (caused by IOException) on previous try??
          // Recover...
          returnBucket = data;
          failed = false;
        }
      }
      if (failed && persistenceType == PERSIST_FOREVER) {
        if (container.ext().getID(returnBucket) == container.ext().getID(data)) {
          Logger.error(
              this,
              "DB4O BUG DETECTED WITHOUT ARRAY HANDLING! EVIL HORRIBLE BUG! UID(returnBucket)="
                  + container.ext().getID(returnBucket)
                  + " for "
                  + returnBucket
                  + " active="
                  + container.ext().isActive(returnBucket)
                  + " stored = "
                  + container.ext().isStored(returnBucket)
                  + " but UID(data)="
                  + container.ext().getID(data)
                  + " for "
                  + data
                  + " active = "
                  + container.ext().isActive(data)
                  + " stored = "
                  + container.ext().isStored(data));
          // Succeed anyway, hope that the returned bucket is consistent...
          returnBucket = data;
          failed = false;
        }
      }
      if (failed) {
        Logger.error(
            this,
            "returnBucket = " + returnBucket + " but onSuccess() data = " + data,
            new Exception("debug"));
        // Caller guarantees that data == returnBucket
        onFailure(
            new FetchException(FetchException.INTERNAL_ERROR, "Data != returnBucket"),
            null,
            container);
        return;
      }
    }
    boolean dontFree = false;
    // FIXME I don't think this is a problem in this case...? (Disk write while locked..)
    AllDataMessage adm = null;
    synchronized (this) {
      if (succeeded) {
        Logger.error(this, "onSuccess called twice for " + this + " (" + identifier + ')');
        return; // We might be called twice; ignore it if so.
      }
      started = true;
      if (!binaryBlob) this.foundDataMimeType = result.getMimeType();
      else this.foundDataMimeType = BinaryBlob.MIME_TYPE;

      if (returnType == ClientGetMessage.RETURN_TYPE_DIRECT) {
        // Send all the data at once
        // FIXME there should be other options
        // FIXME: CompletionTime is set on finish() : we need to give it current time here
        // but it means we won't always return the same value to clients... Does it matter ?
        adm =
            new AllDataMessage(
                returnBucket,
                identifier,
                global,
                startupTime,
                System.currentTimeMillis(),
                this.foundDataMimeType);
        if (persistenceType == PERSIST_CONNECTION) adm.setFreeOnSent();
        dontFree = true;
        /*
         * } else if(returnType == ClientGetMessage.RETURN_TYPE_NONE) {
        // Do nothing
         */
      } else if (returnType == ClientGetMessage.RETURN_TYPE_DISK) {
        // Write to temp file, then rename over filename
        if (!FileUtil.renameTo(tempFile, targetFile)) {
          postFetchProtocolErrorMessage =
              new ProtocolErrorMessage(
                  ProtocolErrorMessage.COULD_NOT_RENAME_FILE, false, null, identifier, global);
          // Don't delete temp file, user might want it.
        }
        returnBucket = new FileBucket(targetFile, false, true, false, false, false);
      }
      if (persistenceType == PERSIST_FOREVER && progressPending != null) {
        container.activate(progressPending, 1);
        progressPending.removeFrom(container);
      }
      progressPending = null;
      this.foundDataLength = returnBucket.size();
      this.succeeded = true;
      finished = true;
    }
    trySendDataFoundOrGetFailed(null, container);

    if (adm != null) trySendAllDataMessage(adm, null, container);
    if (!dontFree) {
      data.free();
    }
    if (persistenceType == PERSIST_FOREVER) {
      returnBucket.storeTo(container);
      container.store(this);
    }
    finish(container);
    if (client != null) client.notifySuccess(this, container);
  }
Пример #8
0
  /**
   * Create a ClientGet from a request serialized to a SimpleFieldSet. Can throw, and does minimal
   * verification, as is dealing with data supposedly serialized out by the node.
   *
   * @throws IOException
   * @throws FetchException
   */
  public ClientGet(SimpleFieldSet fs, FCPClient client2, FCPServer server)
      throws IOException, FetchException {
    super(fs, client2);

    returnType = ClientGetMessage.parseValidReturnType(fs.get("ReturnType"));
    String f = fs.get("Filename");
    if (f != null) targetFile = new File(f);
    else targetFile = null;
    f = fs.get("TempFilename");
    if (f != null) tempFile = new File(f);
    else tempFile = null;
    boolean ignoreDS = Fields.stringToBool(fs.get("IgnoreDS"), false);
    boolean dsOnly = Fields.stringToBool(fs.get("DSOnly"), false);
    int maxRetries = Integer.parseInt(fs.get("MaxRetries"));
    fctx = new FetchContext(server.defaultFetchContext, FetchContext.IDENTICAL_MASK, false, null);
    fctx.eventProducer.addEventListener(this);
    // ignoreDS
    fctx.localRequestOnly = dsOnly;
    fctx.ignoreStore = ignoreDS;
    fctx.maxNonSplitfileRetries = maxRetries;
    fctx.maxSplitfileBlockRetries = maxRetries;
    binaryBlob = Fields.stringToBool(fs.get("BinaryBlob"), false);
    succeeded = Fields.stringToBool(fs.get("Succeeded"), false);
    if (finished) {
      if (succeeded) {
        foundDataLength = Long.parseLong(fs.get("FoundDataLength"));
        foundDataMimeType = fs.get("FoundDataMimeType");
        SimpleFieldSet fs1 = fs.subset("PostFetchProtocolError");
        if (fs1 != null) postFetchProtocolErrorMessage = new ProtocolErrorMessage(fs1);
      } else {
        getFailedMessage = new GetFailedMessage(fs.subset("GetFailed"), false);
      }
    }
    Bucket ret = null;
    if (returnType == ClientGetMessage.RETURN_TYPE_DISK) {
      if (succeeded) {
        ret = new FileBucket(targetFile, false, true, false, false, false);
      } else {
        ret = new FileBucket(tempFile, false, true, false, false, false);
      }
    } else if (returnType == ClientGetMessage.RETURN_TYPE_NONE) {
      ret = new NullBucket();
    } else if (returnType == ClientGetMessage.RETURN_TYPE_DIRECT) {
      try {
        ret =
            SerializableToFieldSetBucketUtil.create(
                fs.subset("ReturnBucket"),
                server.core.random,
                server.core.persistentTempBucketFactory);
        if (ret == null) throw new CannotCreateFromFieldSetException("ret == null");
      } catch (CannotCreateFromFieldSetException e) {
        Logger.error(this, "Cannot read: " + this + " : " + e, e);
        try {
          // Create a new temp bucket
          if (persistenceType == PERSIST_FOREVER)
            ret = server.core.persistentTempBucketFactory.makeBucket(fctx.maxOutputLength);
          else ret = server.core.tempBucketFactory.makeBucket(fctx.maxOutputLength);
        } catch (IOException e1) {
          Logger.error(this, "Cannot create bucket for temp storage: " + e, e);
          getter = null;
          returnBucket = null;
          throw new FetchException(FetchException.BUCKET_ERROR, e);
        }
      }
    } else {
      throw new IllegalArgumentException();
    }
    if (succeeded) {
      if (foundDataLength < ret.size()) {
        Logger.error(this, "Failing " + identifier + " because lost data");
        succeeded = false;
      }
    }
    if (ret == null)
      Logger.error(
          this, "Impossible: ret = null in SFS constructor for " + this, new Exception("debug"));
    returnBucket = ret;

    String[] allowed = fs.getAll("AllowedMIMETypes");
    if (allowed != null) {
      fctx.allowedMIMETypes = new HashSet<String>();
      for (String a : allowed) fctx.allowedMIMETypes.add(a);
    }

    getter =
        new ClientGetter(
            this,
            uri,
            fctx,
            priorityClass,
            lowLevelClient,
            binaryBlob ? new NullBucket() : returnBucket,
            binaryBlob ? returnBucket : null);

    if (finished && succeeded)
      allDataPending =
          new AllDataMessage(
              returnBucket,
              identifier,
              global,
              startupTime,
              completionTime,
              this.foundDataMimeType);
  }
Пример #9
0
 public long size() {
   return bucket.size();
 }