示例#1
0
 public void copyExtent(HashLocPair p)
     throws IOException, BufferClosedException, DataArchivedException {
   if (this.closed) throw new BufferClosedException("Buffer Closed while writing");
   if (this.flushing) throw new BufferClosedException("Buffer Flushing");
   this.lock.lock();
   try {
     if (!this.isDirty() && this.buf != null) {
       this.buf = null;
     }
     if (this.buf != null || this.ar.size() >= LongByteArrayMap.MAX_ELEMENTS_PER_AR) {
       if (this.ar.size() >= LongByteArrayMap.MAX_ELEMENTS_PER_AR)
         SDFSLogger.getLog()
             .debug(
                 "copy extent Chuck Array Size greater than "
                     + LongByteArrayMap.MAX_ELEMENTS_PER_AR
                     + " at "
                     + (this.getFilePosition() + p.pos)
                     + " for file "
                     + this.df.mf.getPath());
       byte[] b = HCServiceProxy.fetchChunk(p.hash, p.hashloc);
       ByteBuffer bf = ByteBuffer.wrap(b);
       byte[] z = new byte[p.nlen];
       bf.position(p.offset);
       bf.get(z);
       this.writeBlock(z, p.pos);
     } else {
       try {
         this.reconstructed = true;
         this.hlAdded = true;
         SparseDataChunk.insertHashLocPair(ar, p);
       } catch (Throwable e) {
         df.errOccured = true;
         throw new IOException(e);
       }
     }
   } finally {
     this.lock.unlock();
   }
 }
示例#2
0
  private void wm(byte[] b, int pos) throws IOException {
    VariableHashEngine hc = (VariableHashEngine) SparseDedupFile.hashPool.borrowObject();

    try {
      List<Finger> fs = hc.getChunks(b);
      AsyncChunkWriteActionListener l =
          new AsyncChunkWriteActionListener() {

            @Override
            public void commandException(Finger result, Throwable e) {
              this.incrementAndGetDNEX();
              SDFSLogger.getLog().error("Error while getting hash", e);
              this.incrementandGetDN();

              synchronized (this) {
                this.notifyAll();
              }
            }

            @Override
            public void commandResponse(Finger result) {
              int _dn = this.incrementandGetDN();
              if (_dn >= this.getMaxSz()) {
                synchronized (this) {
                  this.notifyAll();
                }
              }
            }

            @Override
            public void commandArchiveException(DataArchivedException e) {
              this.incrementAndGetDNEX();
              this.dar = e;
              SDFSLogger.getLog().error("Data has been archived", e);
              this.incrementandGetDN();

              synchronized (this) {
                this.notifyAll();
              }
            }
          };
      l.setMaxSize(fs.size());
      for (Finger f : fs) {
        f.l = l;
        f.dedup = df.mf.isDedup();
        SparseDedupFile.executor.execute(f);
      }
      int wl = 0;
      int tm = 1000;

      int al = 0;
      while (l.getDN() < fs.size() && l.getDNEX() == 0) {
        if (al == 60) {
          int nt = wl / 1000;
          SDFSLogger.getLog()
              .warn("Slow io, waited [" + nt + "] seconds for all writes to complete.");
          al = 0;
        }
        if (Main.writeTimeoutSeconds > 0 && wl > (Main.writeTimeoutSeconds * tm)) {
          int nt = (tm * wl) / 1000;
          df.toOccured = true;
          throw new IOException(
              "Write Timed Out after ["
                  + nt
                  + "] seconds. Expected ["
                  + fs.size()
                  + "] block writes but only ["
                  + l.getDN()
                  + "] were completed");
        }
        if (l.dar != null) throw l.dar;
        if (l.getDNEX() > 0) {
          throw new IOException("Unable to read shard");
        }
        synchronized (l) {
          l.wait(tm);
        }
        al++;
        wl += tm;
      }
      if (l.dar != null) throw l.dar;
      if (l.getDN() < fs.size()) {
        df.toOccured = true;
        throw new IOException(
            "Write Timed Out expected [" + fs.size() + "] but got [" + l.getDN() + "]");
      }
      if (l.getDNEX() > 0) throw new IOException("Write Failed because unable to read shard");
      for (Finger f : fs) {
        HashLocPair p = new HashLocPair();
        try {
          p.hash = f.hash;
          p.hashloc = f.hl;
          p.len = f.len;
          p.offset = 0;
          p.nlen = f.len;
          p.pos = pos;
          pos += f.len;
          int dups = 0;
          if (p.hashloc[0] == 1) dups = f.len;
          df.mf.getIOMonitor().addVirtualBytesWritten(f.len, true);
          df.mf.getIOMonitor().addActualBytesWritten(f.len - dups, true);
          df.mf.getIOMonitor().addDulicateData(dups, true);
          this.prevDoop += dups;
          SparseDataChunk.insertHashLocPair(ar, p);
        } catch (Throwable e) {
          SDFSLogger.getLog().warn("unable to write object finger", e);
          throw e;
          // SDFSLogger.getLog().info("this chunk size is "
          // + f.chunk.length);
        }
      }
      this.hlAdded = true;
    } catch (Throwable e) {
      df.errOccured = true;
      throw new IOException(e);
    } finally {
      SparseDedupFile.hashPool.returnObject(hc);
    }
  }
示例#3
0
  @Override
  public void write(byte[] b, int pos)
      throws BufferClosedException, IOException, DataArchivedException {
    if (SDFSLogger.isDebug()) {
      SDFSLogger.getLog()
          .debug(
              "writing "
                  + df.getMetaFile().getPath()
                  + "df="
                  + df.getGUID()
                  + "fpos="
                  + this.position
                  + " pos="
                  + pos
                  + " len="
                  + b.length);
      if (df.getMetaFile().getPath().endsWith(".vmx")
          || df.getMetaFile().getPath().endsWith(".vmx~")) {
        SDFSLogger.getLog()
            .debug(
                "###### In wb Text of VMX="
                    + df.getMetaFile().getPath()
                    + "="
                    + new String(b, "UTF-8"));
      }
    }
    this.lock.lock();
    try {
      if (this.closed) throw new BufferClosedException("Buffer Closed while writing");
      if (this.flushing) throw new BufferClosedException("Buffer Flushing");

      /*
       * if(pos != 0) SDFSLogger.getLog().info("start at " + pos);
       * if(b.length != this.capacity())
       * SDFSLogger.getLog().info("!capacity " + b.length);
       */
      if (pos == 0 && b.length == Main.CHUNK_LENGTH) {
        this.buf = ByteBuffer.wrap(b);

        this.setDirty(true);
      } else {

        if (this.ar.size() >= LongByteArrayMap.MAX_ELEMENTS_PER_AR) {

          this.writeBlock(b, pos);
          this.ar = new ArrayList<HashLocPair>();
        } else if (this.buf == null
            && this.reconstructed
            && HashFunctionPool.max_hash_cluster > 1) {
          // SDFSLogger.getLog().info("poop " + b.length + " pos=" +
          // pos + "_spos=" + _spos + " bpos=" +bpos );
          if (b.length < VariableHashEngine.minLen) {
            HashLocPair p = new HashLocPair();
            AbstractHashEngine eng = SparseDedupFile.hashPool.borrowObject();
            try {
              p.hash = eng.getHash(b);
              p.hashloc = HCServiceProxy.writeChunk(p.hash, b, true);
              p.len = b.length;
              p.nlen = b.length;
              p.offset = 0;
              p.pos = pos;
              int dups = 0;
              if (p.hashloc[0] == 1) dups = b.length;
              df.mf.getIOMonitor().addVirtualBytesWritten(b.length, true);
              df.mf.getIOMonitor().addActualBytesWritten(b.length - dups, true);
              df.mf.getIOMonitor().addDulicateData(dups, true);
              this.prevDoop += dups;
              SparseDataChunk.insertHashLocPair(ar, p);
              this.hlAdded = true;

              /*
               * HashLocPair _h =null;
               *
               * for(HashLocPair h : ar) { if(_h!=null && h.pos !=
               * (_h.pos + _h.nlen)) {
               * SDFSLogger.getLog().info("data mismatch");
               * SDFSLogger.getLog().info(_h);
               * SDFSLogger.getLog().info(h); } _h=h; }
               */

            } catch (HashtableFullException e) {
              SDFSLogger.getLog().error("unable to write with accelerator", e);
              throw new IOException(e);
            } finally {
              SparseDedupFile.hashPool.returnObject(eng);
            }
          } else {
            this.wm(b, pos);
            /*
             * HashLocPair _h =null;
             *
             * for(HashLocPair h : ar) { if(_h!=null && h.pos !=
             * (_h.pos + _h.nlen)) {
             * SDFSLogger.getLog().info("data mismatch");
             * SDFSLogger.getLog().info(_h);
             * SDFSLogger.getLog().info(h); } _h=h; }
             */
          }
        } else {
          // SDFSLogger.getLog().info("writing at " + pos + " recon="
          // + this.reconstructed + " sz=" + this.ar.size());
          this.writeBlock(b, pos);
        }
      }

      this.bytesWritten = this.bytesWritten + b.length;
    } finally {
      this.lock.unlock();
    }
  }