コード例 #1
0
 @Override
 protected int readChunk(long pos, byte[] buf, int offset, int len, byte[] checksum)
     throws IOException {
   boolean eof = false;
   if (needChecksum()) {
     try {
       long checksumPos = getChecksumFilePos(pos);
       if (checksumPos != sums.getPos()) {
         sums.seek(checksumPos);
       }
       sums.readFully(checksum);
     } catch (EOFException e) {
       eof = true;
     }
     len = bytesPerSum;
   }
   if (pos != datas.getPos()) {
     datas.seek(pos);
   }
   int nread = readFully(datas, buf, offset, len);
   if (eof && nread > 0) {
     throw new ChecksumException("Checksum error: " + file + " at " + pos, pos);
   }
   return nread;
 }
コード例 #2
0
    public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file, int bufferSize)
        throws IOException {
      super(file, fs.getFileStatus(file).getReplication());
      this.datas = fs.getRawFileSystem().open(file, bufferSize);
      this.fs = fs;
      Path sumFile = fs.getChecksumFile(file);
      try {
        int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(), bufferSize);
        sums = fs.getRawFileSystem().open(sumFile, sumBufferSize);

        byte[] version = new byte[CHECKSUM_VERSION.length];
        sums.readFully(version);
        if (!Arrays.equals(version, CHECKSUM_VERSION))
          throw new IOException("Not a checksum file: " + sumFile);
        this.bytesPerSum = sums.readInt();
        set(fs.verifyChecksum, new CRC32(), bytesPerSum, 4);
      } catch (FileNotFoundException e) { // quietly ignore
        set(fs.verifyChecksum, null, 1, 0);
      } catch (IOException e) { // loudly ignore
        LOG.warn(
            "Problem opening checksum file: "
                + file
                + ".  Ignoring exception: "
                + StringUtils.stringifyException(e));
        set(fs.verifyChecksum, null, 1, 0);
      }
    }
コード例 #3
0
ファイル: HarFileSystem.java プロジェクト: aixuebo/had2.6.0
 /** position readable again. */
 @Override
 public void readFully(long pos, byte[] b, int offset, int length) throws IOException {
   if (start + length + pos > end) {
     throw new IOException("Not enough bytes to read.");
   }
   underLyingStream.readFully(pos + start, b, offset, length);
 }
コード例 #4
0
ファイル: PersistHdfs.java プロジェクト: jayfans3/h2o
 private static void addFolder(FileSystem fs, Path p, JsonArray succeeded, JsonArray failed) {
   try {
     if (fs == null) return;
     for (FileStatus file : fs.listStatus(p)) {
       Path pfs = file.getPath();
       if (file.isDir()) {
         addFolder(fs, pfs, succeeded, failed);
       } else {
         Key k = Key.make(pfs.toString());
         long size = file.getLen();
         Value val = null;
         if (pfs.getName().endsWith(Extensions.JSON)) {
           JsonParser parser = new JsonParser();
           JsonObject json = parser.parse(new InputStreamReader(fs.open(pfs))).getAsJsonObject();
           JsonElement v = json.get(Constants.VERSION);
           if (v == null) throw new RuntimeException("Missing version");
           JsonElement type = json.get(Constants.TYPE);
           if (type == null) throw new RuntimeException("Missing type");
           Class c = Class.forName(type.getAsString());
           OldModel model = (OldModel) c.newInstance();
           model.fromJson(json);
         } else if (pfs.getName().endsWith(Extensions.HEX)) { // Hex file?
           FSDataInputStream s = fs.open(pfs);
           int sz = (int) Math.min(1L << 20, size); // Read up to the 1st meg
           byte[] mem = MemoryManager.malloc1(sz);
           s.readFully(mem);
           // Convert to a ValueArray (hope it fits in 1Meg!)
           ValueArray ary = new ValueArray(k, 0).read(new AutoBuffer(mem));
           val = new Value(k, ary, Value.HDFS);
         } else if (size >= 2 * ValueArray.CHUNK_SZ) {
           val =
               new Value(
                   k,
                   new ValueArray(k, size),
                   Value.HDFS); // ValueArray byte wrapper over a large file
         } else {
           val = new Value(k, (int) size, Value.HDFS); // Plain Value
           val.setdsk();
         }
         DKV.put(k, val);
         Log.info("PersistHdfs: DKV.put(" + k + ")");
         JsonObject o = new JsonObject();
         o.addProperty(Constants.KEY, k.toString());
         o.addProperty(Constants.FILE, pfs.toString());
         o.addProperty(Constants.VALUE_SIZE, file.getLen());
         succeeded.add(o);
       }
     }
   } catch (Exception e) {
     Log.err(e);
     JsonObject o = new JsonObject();
     o.addProperty(Constants.FILE, p.toString());
     o.addProperty(Constants.ERROR, e.getMessage());
     failed.add(o);
   }
 }
コード例 #5
0
  /**
   * Check how prefetch override works.
   *
   * @throws Exception IF failed.
   */
  public void testOpenPrefetchOverride() throws Exception {
    create(igfsSecondary, paths(DIR, SUBDIR), paths(FILE));

    // Write enough data to the secondary file system.
    final int blockSize = IGFS_BLOCK_SIZE;

    IgfsOutputStream out = igfsSecondary.append(FILE, false);

    int totalWritten = 0;

    while (totalWritten < blockSize * 2 + chunk.length) {
      out.write(chunk);

      totalWritten += chunk.length;
    }

    out.close();

    awaitFileClose(igfsSecondary.asSecondary(), FILE);

    // Instantiate file system with overridden "seq reads before prefetch" property.
    Configuration cfg = new Configuration();

    cfg.addResource(U.resolveIgniteUrl(PRIMARY_CFG));

    int seqReads = SEQ_READS_BEFORE_PREFETCH + 1;

    cfg.setInt(String.format(PARAM_IGFS_SEQ_READS_BEFORE_PREFETCH, "igfs:grid@"), seqReads);

    FileSystem fs = FileSystem.get(new URI(PRIMARY_URI), cfg);

    // Read the first two blocks.
    Path fsHome = new Path(PRIMARY_URI);
    Path dir = new Path(fsHome, DIR.name());
    Path subdir = new Path(dir, SUBDIR.name());
    Path file = new Path(subdir, FILE.name());

    FSDataInputStream fsIn = fs.open(file);

    final byte[] readBuf = new byte[blockSize * 2];

    fsIn.readFully(0, readBuf, 0, readBuf.length);

    // Wait for a while for prefetch to finish (if any).
    IgfsMetaManager meta = igfs.context().meta();

    IgfsFileInfo info = meta.info(meta.fileId(FILE));

    IgfsBlockKey key = new IgfsBlockKey(info.id(), info.affinityKey(), info.evictExclude(), 2);

    IgniteCache<IgfsBlockKey, byte[]> dataCache =
        igfs.context().kernalContext().cache().jcache(igfs.configuration().getDataCacheName());

    for (int i = 0; i < 10; i++) {
      if (dataCache.containsKey(key)) break;
      else U.sleep(100);
    }

    fsIn.close();

    // Remove the file from the secondary file system.
    igfsSecondary.delete(FILE, false);

    // Try reading the third block. Should fail.
    GridTestUtils.assertThrows(
        log,
        new Callable<Object>() {
          @Override
          public Object call() throws Exception {
            IgfsInputStream in0 = igfs.open(FILE);

            in0.seek(blockSize * 2);

            try {
              in0.read(readBuf);
            } finally {
              U.closeQuiet(in0);
            }

            return null;
          }
        },
        IOException.class,
        "Failed to read data due to secondary file system exception: /dir/subdir/file");
  }