Esempio n. 1
0
  public long writeCommit(CommitUpdateCommand cmd, int flags) {
    LogCodec codec = new LogCodec(resolver);
    synchronized (this) {
      try {
        long pos = fos.size(); // if we had flushed, this should be equal to channel.position()

        if (pos == 0) {
          writeLogHeader(codec);
          pos = fos.size();
        }
        codec.init(fos);
        codec.writeTag(JavaBinCodec.ARR, 3);
        codec.writeInt(UpdateLog.COMMIT | flags); // should just take one byte
        codec.writeLong(cmd.getVersion());
        codec.writeStr(END_MESSAGE); // ensure these bytes are (almost) last in the file

        endRecord(pos);

        fos.flush(); // flush since this will be the last record in a log fill
        assert fos.size() == channel.size();

        return pos;
      } catch (IOException e) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
      }
    }
  }
Esempio n. 2
0
  private void checkWriteHeader(LogCodec codec, SolrInputDocument optional) throws IOException {

    // Unsynchronized access.  We can get away with an unsynchronized access here
    // since we will never get a false non-zero when the position is in fact 0.
    // rollback() is the only function that can reset to zero, and it blocks updates.
    if (fos.size() != 0) return;

    synchronized (this) {
      if (fos.size() != 0) return; // check again while synchronized
      if (optional != null) {
        addGlobalStrings(optional.getFieldNames());
      }
      writeLogHeader(codec);
    }
  }
Esempio n. 3
0
  public long writeDelete(DeleteUpdateCommand cmd, int flags) {
    LogCodec codec = new LogCodec(resolver);

    try {
      checkWriteHeader(codec, null);

      BytesRef br = cmd.getIndexedId();

      MemOutputStream out = new MemOutputStream(new byte[20 + br.length]);
      codec.init(out);
      codec.writeTag(JavaBinCodec.ARR, 3);
      codec.writeInt(UpdateLog.DELETE | flags); // should just take one byte
      codec.writeLong(cmd.getVersion());
      codec.writeByteArray(br.bytes, br.offset, br.length);

      synchronized (this) {
        long pos = fos.size(); // if we had flushed, this should be equal to channel.position()
        assert pos != 0;
        out.writeAll(fos);
        endRecord(pos);
        // fos.flushBuffer();  // flush later
        return pos;
      }

    } catch (IOException e) {
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
    }
  }
Esempio n. 4
0
 /**
  * takes a snapshot of the current position and number of records for later possible rollback, and
  * returns the position
  */
 public long snapshot() {
   synchronized (this) {
     snapshot_size = fos.size();
     snapshot_numRecords = numRecords;
     return snapshot_size;
   }
 }
Esempio n. 5
0
 // This could mess with any readers or reverse readers that are open, or anything that might try
 // to do a log lookup.
 // This should only be used to roll back buffered updates, not actually applied updates.
 public void rollback(long pos) throws IOException {
   synchronized (this) {
     assert snapshot_size == pos;
     fos.flush();
     raf.setLength(pos);
     fos.setWritten(pos);
     assert fos.size() == pos;
     numRecords = snapshot_numRecords;
   }
 }
Esempio n. 6
0
  protected void writeLogHeader(LogCodec codec) throws IOException {
    long pos = fos.size();
    assert pos == 0;

    Map header = new LinkedHashMap<String, Object>();
    header.put("SOLR_TLOG", 1); // a magic string + version number
    header.put("strings", globalStringList);
    codec.marshal(header, fos);

    endRecord(pos);
  }
Esempio n. 7
0
 public long writeData(Object o) {
   LogCodec codec = new LogCodec(resolver);
   try {
     long pos = fos.size(); // if we had flushed, this should be equal to channel.position()
     codec.init(fos);
     codec.writeVal(o);
     return pos;
   } catch (IOException e) {
     throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
   }
 }
Esempio n. 8
0
  public boolean endsWithCommit() throws IOException {
    long size;
    synchronized (this) {
      fos.flush();
      size = fos.size();
    }

    // the end of the file should have the end message (added during a commit) plus a 4 byte size
    byte[] buf = new byte[END_MESSAGE.length()];
    long pos = size - END_MESSAGE.length() - 4;
    if (pos < 0) return false;
    ChannelFastInputStream is = new ChannelFastInputStream(channel, pos);
    is.read(buf);
    for (int i = 0; i < buf.length; i++) {
      if (buf[i] != END_MESSAGE.charAt(i)) return false;
    }
    return true;
  }
Esempio n. 9
0
  public long write(AddUpdateCommand cmd, int flags) {
    LogCodec codec = new LogCodec(resolver);
    SolrInputDocument sdoc = cmd.getSolrInputDocument();

    try {
      checkWriteHeader(codec, sdoc);

      // adaptive buffer sizing
      int bufSize = lastAddSize; // unsynchronized access of lastAddSize should be fine
      bufSize = Math.min(1024 * 1024, bufSize + (bufSize >> 3) + 256);

      MemOutputStream out = new MemOutputStream(new byte[bufSize]);
      codec.init(out);
      codec.writeTag(JavaBinCodec.ARR, 3);
      codec.writeInt(UpdateLog.ADD | flags); // should just take one byte
      codec.writeLong(cmd.getVersion());
      codec.writeSolrInputDocument(cmd.getSolrInputDocument());
      lastAddSize = (int) out.size();

      synchronized (this) {
        long pos = fos.size(); // if we had flushed, this should be equal to channel.position()
        assert pos != 0;

        /**
         * * System.out.println("###writing at " + pos + " fos.size()=" + fos.size() + "
         * raf.length()=" + raf.length()); if (pos != fos.size()) { throw new
         * RuntimeException("ERROR" + "###writing at " + pos + " fos.size()=" + fos.size() + "
         * raf.length()=" + raf.length()); } *
         */
        out.writeAll(fos);
        endRecord(pos);
        // fos.flushBuffer();  // flush later
        return pos;
      }

    } catch (IOException e) {
      // TODO: reset our file pointer back to "pos", the start of this record.
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error logging add", e);
    }
  }
Esempio n. 10
0
  public long writeDeleteByQuery(DeleteUpdateCommand cmd, int flags) {
    LogCodec codec = new LogCodec(resolver);
    try {
      checkWriteHeader(codec, null);

      MemOutputStream out = new MemOutputStream(new byte[20 + (cmd.query.length())]);
      codec.init(out);
      codec.writeTag(JavaBinCodec.ARR, 3);
      codec.writeInt(UpdateLog.DELETE_BY_QUERY | flags); // should just take one byte
      codec.writeLong(cmd.getVersion());
      codec.writeStr(cmd.query);

      synchronized (this) {
        long pos = fos.size(); // if we had flushed, this should be equal to channel.position()
        out.writeAll(fos);
        endRecord(pos);
        // fos.flushBuffer();  // flush later
        return pos;
      }
    } catch (IOException e) {
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
    }
  }
Esempio n. 11
0
 /** returns the current position in the log file */
 public long position() {
   synchronized (this) {
     return fos.size();
   }
 }
Esempio n. 12
0
 protected void endRecord(long startRecordPosition) throws IOException {
   fos.writeInt((int) (fos.size() - startRecordPosition));
   numRecords++;
 }
Esempio n. 13
0
  TransactionLog(File tlogFile, Collection<String> globalStrings, boolean openExisting) {
    boolean success = false;
    try {
      if (debug) {
        log.debug(
            "New TransactionLog file="
                + tlogFile
                + ", exists="
                + tlogFile.exists()
                + ", size="
                + tlogFile.length()
                + ", openExisting="
                + openExisting);
      }

      this.tlogFile = tlogFile;
      raf = new RandomAccessFile(this.tlogFile, "rw");
      long start = raf.length();
      channel = raf.getChannel();
      os = Channels.newOutputStream(channel);
      fos = new FastOutputStream(os, new byte[65536], 0);
      // fos = FastOutputStream.wrap(os);

      if (openExisting) {
        if (start > 0) {
          readHeader(null);
          raf.seek(start);
          assert channel.position() == start;
          fos.setWritten(start); // reflect that we aren't starting at the beginning
          assert fos.size() == channel.size();
        } else {
          addGlobalStrings(globalStrings);
        }
      } else {
        if (start > 0) {
          log.warn("New transaction log already exists:" + tlogFile + " size=" + raf.length());
          return;
        }

        if (start > 0) {
          raf.setLength(0);
        }
        addGlobalStrings(globalStrings);
      }

      success = true;

      assert ObjectReleaseTracker.track(this);

    } catch (IOException e) {
      throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
    } finally {
      if (!success && raf != null) {
        try {
          raf.close();
        } catch (Exception e) {
          log.error("Error closing tlog file (after error opening)", e);
        }
      }
    }
  }