Example #1
0
 @Test
 public void testPosition() throws IOException {
   FileChannel channel = channel(regularFile(10), READ);
   assertEquals(0, channel.position());
   assertSame(channel, channel.position(100));
   assertEquals(100, channel.position());
 }
Example #2
0
 private void insertRecord(int recordPosition, long value) throws IOException {
   try {
     FileChannel channel = getFileChannel();
     long previousPosition = channel.position();
     channel.position(RECORD_SIZE * recordPosition);
     int trail = (int) (channel.size() - channel.position());
     ByteBuffer trailBuffer = null;
     if (trail > 0) {
       trailBuffer = ByteBuffer.allocate(trail);
       channel.read(trailBuffer);
       trailBuffer.flip();
     }
     ByteBuffer buffer = ByteBuffer.allocate(RECORD_SIZE);
     buffer.put(Record.IN_USE.byteValue());
     buffer.putLong(value);
     buffer.flip();
     channel.position(RECORD_SIZE * recordPosition);
     channel.write(buffer);
     if (trail > 0) {
       channel.write(trailBuffer);
     }
     channel.position(previousPosition);
   } catch (IOException e) {
     throw new RuntimeException(e);
   }
 }
Example #3
0
  /**
   * @param compressed_size or use null if not known
   * @param uncompressed_size or use null if not known
   * @return uncompressed ByteArrayOutputStream
   * @throws IOException
   * @throws DataFormatException
   */
  private ByteArrayOutputStream unCompress(Integer compressed_size, Integer uncompressed_size)
      throws IOException, DataFormatException {
    byte[] uncompressed_data = null;
    byte[] input_data = null;
    ByteArrayOutputStream ret = new ByteArrayOutputStream();
    Inflater decompresser = new Inflater(false);
    long first_seek = fileChannel.position();
    Boolean uncompressing = true;
    while (uncompressing) {
      if (decompresser.needsInput()) {
        input_data = new byte[(compressed_size != null) ? compressed_size.intValue() : 1024];
        fileChannel.read(ByteBuffer.wrap(input_data));
        decompresser.setInput(input_data, 0, input_data.length);
      }
      uncompressed_data =
          new byte
              [(uncompressed_size != null)
                  ? uncompressed_size.intValue()
                  : (input_data.length * 4)];
      decompresser.inflate(uncompressed_data);
      int op = (int) (decompresser.getBytesWritten() - (long) ret.size());
      if (op > 0) ret.write(uncompressed_data, 0, op);

      if (decompresser.finished()) uncompressing = false;
    }
    fileChannel.position(
        (first_seek + decompresser.getBytesRead())); // move file pointer to start of next stream
    decompresser.end();
    return ret;
  }
Example #4
0
  public static void copyFile(File srcFile, File destFile) throws Exception {
    int bufferSize = 2048;

    FileInputStream in = new FileInputStream(srcFile);
    FileOutputStream out = new FileOutputStream(destFile);
    FileChannel inChannel = in.getChannel();
    FileChannel outChannel = out.getChannel();

    ByteBuffer buffer = null;
    int length = -1;
    try {
      while (true) {
        if (inChannel.position() == inChannel.size()) {
          // finish copying
          break;
        } else if (inChannel.size() - inChannel.position() < length) {
          // copy last chunk of data
          length = (int) (inChannel.size() - inChannel.position());
        } else {
          length = bufferSize;
        }

        buffer = ByteBuffer.allocateDirect(length);
        inChannel.read(buffer);
        buffer.flip();
        outChannel.write(buffer);
        outChannel.force(false);
      }
    } finally {
      _close(inChannel);
      _close(in);
      _close(outChannel);
      _close(out);
    }
  }
  public void writeRequest(final OutputStream out) throws IOException {
    int readCount = 0;
    Iterator<OnDatatransferProgressListener> it = null;

    try {
      mChannel.position(mOffset);
      long size = mFile.length();
      if (size == 0) size = -1;
      long maxCount = Math.min(mOffset + mChunkSize, mChannel.size());
      while (mChannel.position() < maxCount) {
        readCount = mChannel.read(mBuffer);
        out.write(mBuffer.array(), 0, readCount);
        mBuffer.clear();
        if (mTransferred < maxCount) { // condition to avoid accumulate progress for repeated chunks
          mTransferred += readCount;
        }
        synchronized (mDataTransferListeners) {
          it = mDataTransferListeners.iterator();
          while (it.hasNext()) {
            it.next().onTransferProgress(readCount, mTransferred, size, mFile.getName());
          }
        }
      }

    } catch (IOException io) {
      Log.e(TAG, io.getMessage());
      throw new RuntimeException(
          "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really",
          io);
    }
  }
Example #6
0
  private void upgradeRatings(BinaryFormat newFormat) throws IOException {
    Preconditions.checkArgument(
        newFormat.getRatingSize() > format.getRatingSize(), "new format is not wider than old");
    logger.info("upgrading {} ratings from {} to {}", index, format, newFormat);

    ByteBuffer oldBuffer = ByteBuffer.allocateDirect(format.getRatingSize());
    ByteBuffer newBuffer = ByteBuffer.allocateDirect(newFormat.getRatingSize());
    MutableRating scratch = new MutableRating();

    long oldPos = BinaryHeader.HEADER_SIZE + index * format.getRatingSize();
    Preconditions.checkState(channel.position() == oldPos, "channel is at the wrong position");
    long newPos = BinaryHeader.HEADER_SIZE + index * newFormat.getRatingSize();
    channel.position(newPos);
    // loop backwards, coping each rating to later in the file
    for (int i = index - 1; i >= 0; i--) {
      oldPos -= format.getRatingSize();
      newPos -= newFormat.getRatingSize();

      // read the old rating
      BinaryUtils.readBuffer(channel, oldBuffer, oldPos);
      oldBuffer.flip();
      format.readRating(oldBuffer, scratch);
      oldBuffer.clear();

      // write the new rating
      newFormat.renderRating(scratch, newBuffer);
      newBuffer.flip();
      BinaryUtils.writeBuffer(channel, newBuffer, newPos);
      newBuffer.clear();
    }
    assert oldPos == BinaryHeader.HEADER_SIZE;
    assert newPos == BinaryHeader.HEADER_SIZE;
    format = newFormat;
    ratingBuffer = ByteBuffer.allocateDirect(newFormat.getRatingSize());
  }
Example #7
0
  public boolean copyFromChannel(FileChannel channel, TableStats stats) throws IOException {
    if (channel.position() < channel.size()) {
      clear();

      buffer.clear();
      channel.read(buffer);
      memorySize = buffer.position();

      while (position < memorySize) {
        long recordPtr = address + position;

        if (remain() < SizeOf.SIZE_OF_INT) {
          channel.position(channel.position() - remain());
          memorySize = (int) (memorySize - remain());
          return true;
        }

        int recordSize = UNSAFE.getInt(recordPtr);

        if (remain() < recordSize) {
          channel.position(channel.position() - remain());
          memorySize = (int) (memorySize - remain());
          return true;
        }

        position += recordSize;
        rowNum++;
      }

      return true;
    } else {
      return false;
    }
  }
Example #8
0
 /**
  * Sets the version for the given neostore file in {@code storeDir}.
  *
  * @param storeDir the store dir to locate the neostore file in.
  * @param version the version to set.
  * @return the previous version before writing.
  */
 public static long setVersion(String storeDir, long version) {
   RandomAccessFile file = null;
   try {
     file = new RandomAccessFile(new File(storeDir, NeoStore.DEFAULT_NAME), "rw");
     FileChannel channel = file.getChannel();
     channel.position(RECORD_SIZE * 2 + 1 /*inUse*/);
     ByteBuffer buffer = ByteBuffer.allocate(8);
     channel.read(buffer);
     buffer.flip();
     long previous = buffer.getLong();
     channel.position(RECORD_SIZE * 2 + 1 /*inUse*/);
     buffer.clear();
     buffer.putLong(version).flip();
     channel.write(buffer);
     return previous;
   } catch (IOException e) {
     throw new RuntimeException(e);
   } finally {
     try {
       if (file != null) file.close();
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
   }
 }
Example #9
0
  @Test
  public void testAppend() throws IOException {
    RegularFile file = regularFile(0);
    FileChannel channel = channel(file, WRITE, APPEND);
    assertEquals(0, channel.position());

    ByteBuffer buf = buffer("1234567890");
    ByteBuffer buf2 = buffer("1234567890");

    assertEquals(10, channel.write(buf));
    assertEquals(10, channel.position());

    buf.flip();
    channel.position(0);
    assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2}));
    assertEquals(30, channel.position());

    buf.flip();
    buf2.flip();
    channel.position(0);
    assertEquals(20, channel.write(new ByteBuffer[] {buf, buf2}, 0, 2));
    assertEquals(50, channel.position());

    buf.flip();
    channel.position(0);
    assertEquals(10, channel.write(buf, 5));
    assertEquals(60, channel.position());

    buf.flip();
    channel.position(0);
    assertEquals(10, channel.transferFrom(new ByteBufferChannel(buf), 0, 10));
    assertEquals(70, channel.position());
  }
Example #10
0
 /**
  * Closes this store. This will cause all buffers and channels to be closed. Requesting an
  * operation from after this method has been invoked is illegal and an exception will be thrown.
  *
  * <p>This method will start by invoking the {@link #closeStorage} method giving the implementing
  * store way to do anything that it needs to do before the fileChannel is closed.
  */
 public void close() {
   if (fileChannel == null) {
     return;
   }
   closeStorage();
   if (windowPool != null) {
     windowPool.close();
     windowPool = null;
   }
   if ((isReadOnly() && !isBackupSlave()) || idGenerator == null || !storeOk) {
     releaseFileLockAndCloseFileChannel();
     return;
   }
   long highId = idGenerator.getHighId();
   int recordSize = -1;
   if (this instanceof AbstractDynamicStore) {
     recordSize = ((AbstractDynamicStore) this).getBlockSize();
   } else if (this instanceof AbstractStore) {
     recordSize = ((AbstractStore) this).getRecordSize();
   }
   idGenerator.close();
   boolean success = false;
   IOException storedIoe = null;
   // hack for WINBLOWS
   if (!readOnly || backupSlave) {
     for (int i = 0; i < 10; i++) {
       try {
         fileChannel.position(highId * recordSize);
         ByteBuffer buffer = ByteBuffer.wrap(UTF8.encode(getTypeAndVersionDescriptor()));
         fileChannel.write(buffer);
         stringLogger.debug(
             "Closing "
                 + storageFileName
                 + ", truncating at "
                 + fileChannel.position()
                 + " vs file size "
                 + fileChannel.size());
         fileChannel.truncate(fileChannel.position());
         fileChannel.force(false);
         releaseFileLockAndCloseFileChannel();
         success = true;
         break;
       } catch (IOException e) {
         storedIoe = e;
         System.gc();
       }
     }
   } else {
     releaseFileLockAndCloseFileChannel();
     success = true;
   }
   if (!success) {
     throw new UnderlyingStorageException(
         "Unable to close store " + getStorageFileName(), storedIoe);
   }
 }
  private void storeSerializedRecord(File file, byte[] serialized) throws IOException {
    RandomAccessFile recordFile = new RandomAccessFile(file, "rw");
    FileChannel out = recordFile.getChannel();

    out.position(0);
    writeInteger(CURRENT_VERSION_MARKER, out);
    writeBlob(serialized, out);
    out.truncate(out.position());
    recordFile.close();
  }
Example #12
0
 /**
  * Invalidate buffer before a position change has occurred (e.g. seek), flushing writes if
  * required, and correcting file position if reading
  *
  * @throws IOException
  */
 private void invalidateBuffer() throws IOException, BadDescriptorException {
   if (!reading) flushWrite();
   int posOverrun = buffer.remaining(); // how far ahead we are when reading
   buffer.clear();
   if (reading) {
     buffer.flip();
     // if the read buffer is ahead, back up
     FileChannel fileChannel = (FileChannel) descriptor.getChannel();
     if (posOverrun != 0) fileChannel.position(fileChannel.position() - posOverrun);
   }
 }
Example #13
0
 private void resetForWrite() throws IOException {
   if (descriptor.isSeekable()) {
     FileChannel fileChannel = (FileChannel) descriptor.getChannel();
     if (buffer.hasRemaining()) { // we have read ahead, and need to back up
       fileChannel.position(fileChannel.position() - buffer.remaining());
     }
   }
   // FIXME: Clearing read buffer here...is this appropriate?
   buffer.clear();
   reading = false;
 }
Example #14
0
  /**
   * Append a message and return its id (position in the file plus the firstMessageId of the file).
   * Returns -1 if this file is too full for the message.
   */
  public long append(
      long timestamp, String routingKey, ReadableByteChannel payload, int payloadSize)
      throws IOException {
    int n = routingKey.length();
    if (n > 255)
      throw new IllegalArgumentException("Routing key length " + n + " > 255 characters");

    byte[] routingKeyBytes = routingKey.getBytes(UTF8);

    synchronized (channel) {
      if (length + MESSAGE_HEADER_SIZE + routingKeyBytes.length + payloadSize > maxFileSize)
        return -1;

      header.clear();
      channel.position(length);
      header.put(TYPE_MESSAGE);
      header.putLong(timestamp);
      header.putShort((short) routingKeyBytes.length);
      header.putInt(payloadSize);
      header.put(routingKeyBytes);
      header.flip();

      int id = length - FILE_HEADER_SIZE;
      channel.write(header);
      long sz = channel.transferFrom(payload, channel.position(), payloadSize);
      if (sz != payloadSize) {
        throw new IOException(
            "Only read " + sz + " bytes from payload channel instead of " + payloadSize);
      }
      length =
          (int) channel.position()
              + payloadSize; // update after write so a partial write won't corrupt file

      // see if we need to start a new histogram bucket
      if (bucketIndex < 0
          || ((id - bucketMessageId >= bytesPerBucket) && bucketIndex < MAX_BUCKETS - 1)) {
        if (bucketIndex >= 0) {
          putBucketDataInFileHeader();
          ++bucketIndex;
        } else {
          bucketIndex = 0;
        }
        bucketMessageId = id;
        bucketTimestamp = timestamp;
        bucketCount = 1;
      } else {
        ++bucketCount;
      }

      mostRecentTimestamp = timestamp;
      return firstMessageId + id;
    }
  }
Example #15
0
  /**
   * Load enough bytes from channel to buffer. After the loading process, the caller can make sure
   * the amount in buffer is of size 'amount' if we haven't reached the end of channel.
   *
   * @param amount The amount of bytes in buffer after returning, no larger than bufferSize
   * @param reload Whether to reload or append
   */
  private void fillBuffer(long amount, boolean reload) {
    try {
      if (amount > bufferSize) {
        amount = bufferSize;
      }
      // Read all remaining bytes if the requested amount reach the end
      // of channel.
      if (channelSize - channel.position() < amount) {
        amount = channelSize - channel.position();
      }

      if (in == null) {
        switch (bufferType) {
          case HEAP:
            in = ByteBuffer.allocate(bufferSize, false);
            break;
          case DIRECT:
            in = ByteBuffer.allocate(bufferSize, true);
            break;
          case AUTO:
            in = ByteBuffer.allocate(bufferSize);
            break;
          default:
            in = ByteBuffer.allocate(bufferSize);
        }
        channel.read(in.buf());
        in.flip();
        useLoadBuf = true;
      }

      if (!useLoadBuf) {
        return;
      }

      if (reload || in.remaining() < amount) {
        if (!reload) {
          in.compact();
        } else {
          in.clear();
        }
        channel.read(in.buf());
        in.flip();
      }

    } catch (Exception e) {
      log.error("Error fillBuffer", e);
    }
  }
 public int read() throws IOException {
   if (channel.position() >= channel.size()) {
     return -1;
   }
   FileUtils.readFully(channel, ByteBuffer.wrap(buffer));
   return buffer[0] & 0xff;
 }
 public long getContentLength() {
   try {
     return Math.min(mChunkSize, mChannel.size() - mChannel.position());
   } catch (IOException e) {
     return mChunkSize;
   }
 }
Example #18
0
  /**
   * Moves the reader to the specified byte offset in the file. Mind that:
   *
   * <ul>
   *   <li>it's your responsibility to ensure the offset corresponds to the actual beginning of a
   *       shape struct
   *   <li>once you call this, reading with hasNext/next on sparse shapefiles will be broken (we
   *       don't know anymore at which shape we are)
   * </ul>
   *
   * @param offset
   * @throws IOException
   * @throws UnsupportedOperationException
   */
  public void goTo(int offset) throws IOException, UnsupportedOperationException {
    disableShxUsage();
    if (randomAccessEnabled) {
      if (this.useMemoryMappedBuffer) {
        buffer.position(offset);
      } else {
        /*
         * Check to see if requested offset is already loaded; ensure
         * that record header is in the buffer
         */
        if (this.currentOffset <= offset && this.currentOffset + buffer.limit() >= offset + 8) {
          buffer.position(this.toBufferOffset(offset));
        } else {
          FileChannel fc = (FileChannel) this.channel;
          fc.position(offset);
          this.currentOffset = offset;
          buffer.position(0);
          buffer.limit(buffer.capacity());
          fill(buffer, fc);
          buffer.position(0);
        }
      }

      int oldRecordOffset = record.end;
      record.end = offset;
      try {
        hasNext(false); // don't check for next logical record equality
      } catch (IOException ioe) {
        record.end = oldRecordOffset;
        throw ioe;
      }
    } else {
      throw new UnsupportedOperationException("Random Access not enabled");
    }
  }
 @Override
 public void close() throws IOException, InterruptedException {
   try {
     m_outstandingWriteTasksLock.lock();
     try {
       while (m_outstandingWriteTasks.get() > 0) {
         m_noMoreOutstandingWriteTasksCondition.await();
       }
     } finally {
       m_outstandingWriteTasksLock.unlock();
     }
     m_syncTask.cancel(false);
     m_channel.force(false);
   } finally {
     m_bytesAllowedBeforeSync.release(m_bytesWrittenSinceLastSync.getAndSet(0));
   }
   m_channel.position(8);
   ByteBuffer completed = ByteBuffer.allocate(1);
   if (m_writeFailed) {
     completed.put((byte) 0).flip();
   } else {
     completed.put((byte) 1).flip();
   }
   m_channel.write(completed);
   m_channel.force(false);
   m_channel.close();
   if (m_onCloseHandler != null) {
     m_onCloseHandler.run();
   }
 }
Example #20
0
  private ByteList bufferedRead(int number) throws IOException, BadDescriptorException {
    checkReadable();
    ensureRead();

    int resultSize = 0;

    // 128K seems to be the minimum at which the stat+seek is faster than reallocation
    final int BULK_THRESHOLD = 128 * 1024;
    if (number >= BULK_THRESHOLD
        && descriptor.isSeekable()
        && descriptor.getChannel() instanceof FileChannel) {
      //
      // If it is a file channel, then we can pre-allocate the output buffer
      // to the total size of buffered + remaining bytes in file
      //
      FileChannel fileChannel = (FileChannel) descriptor.getChannel();
      resultSize =
          (int)
              Math.min(
                  fileChannel.size() - fileChannel.position() + bufferedInputBytesRemaining(),
                  number);
    } else {
      //
      // Cannot discern the total read length - allocate at least enough for the buffered data
      //
      resultSize = Math.min(bufferedInputBytesRemaining(), number);
    }

    ByteList result = new ByteList(resultSize);
    bufferedRead(result, number);
    return result;
  }
 @Override
 public synchronized void reset() throws IOException {
   if (mark == -1) {
     throw new IOException("not marked");
   }
   myFileChannel.position(mark);
 }
 public int write(ByteBuffer src) throws IOException {
   if (filePointer >= length && filePointer > maxLength) {
     // may need to extend and create files
     long oldFilePointer = filePointer;
     long x = length - (length % maxLength) + maxLength;
     for (; x < filePointer; x += maxLength) {
       if (x > length) {
         // expand the file size
         position(x - 1);
         write(ByteBuffer.wrap(new byte[1]));
       }
       filePointer = oldFilePointer;
     }
   }
   long offset = filePointer % maxLength;
   int len = src.remaining();
   FileChannel channel = getFileChannel();
   channel.position(offset);
   int l = (int) Math.min(len, maxLength - offset);
   if (l == len) {
     l = channel.write(src);
   } else {
     int oldLimit = src.limit();
     src.limit(src.position() + l);
     l = channel.write(src);
     src.limit(oldLimit);
   }
   filePointer += l;
   length = Math.max(length, filePointer);
   return l;
 }
Example #23
0
 // Read up to 'len' bytes of Value. Value should already be persisted to
 // disk.  A racing delete can trigger a failure where we get a null return,
 // but no crash (although one could argue that a racing load&delete is a bug
 // no matter what).
 @Override
 public byte[] load(Value v) {
   long skip = 0;
   Key k = v._key;
   // Convert an arraylet chunk into a long-offset from the base file.
   if (k._kb[0] == Key.ARRAYLET_CHUNK) {
     skip = ValueArray.getChunkOffset(k); // The offset
     k = ValueArray.getArrayKey(k); // From the base file key
   }
   if (k._kb[0] == Key.DVEC) {
     skip = water.fvec.NFSFileVec.chunkOffset(k); // The offset
   }
   try {
     FileInputStream s = null;
     try {
       s = new FileInputStream(getFileForKey(k));
       FileChannel fc = s.getChannel();
       fc.position(skip);
       AutoBuffer ab = new AutoBuffer(fc, true, Value.NFS);
       byte[] b = ab.getA1(v._max);
       ab.close();
       assert v.isPersisted();
       return b;
     } finally {
       if (s != null) s.close();
     }
   } catch (IOException e) { // Broken disk / short-file???
     H2O.ignore(e);
     return null;
   }
 }
Example #24
0
 // from https://gist.github.com/889747
 // TODO use Jakarta Commons IO..! This implementation needs to be improved.
 private static void copyFile(File sourceFile, File destFile) throws IOException {
   if (!destFile.exists()) {
     destFile.createNewFile();
   }
   FileInputStream fIn = null;
   FileOutputStream fOut = null;
   FileChannel source = null;
   FileChannel destination = null;
   try {
     fIn = new FileInputStream(sourceFile);
     source = fIn.getChannel();
     fOut = new FileOutputStream(destFile);
     destination = fOut.getChannel();
     long transfered = 0;
     long bytes = source.size();
     while (transfered < bytes) {
       transfered += destination.transferFrom(source, 0, source.size());
       destination.position(transfered);
     }
   } finally {
     if (source != null) {
       source.close();
     } else if (fIn != null) {
       fIn.close();
     }
     if (destination != null) {
       destination.close();
     } else if (fOut != null) {
       fOut.close();
     }
   }
 }
Example #25
0
  /**
   * @param startByte
   * @param endByte
   * @return
   * @throws Exception
   * @return true if all the bytes between in the file between startByte and endByte are null, false
   *     otherwise
   */
  private boolean isFilePortionNull(int startByte, int endByte) throws IOException {
    logger.config("Checking file portion:" + Hex.asHex(startByte) + ":" + Hex.asHex(endByte));
    FileInputStream fis = null;
    FileChannel fc = null;
    try {
      fis = new FileInputStream(file);
      fc = fis.getChannel();
      fc.position(startByte);
      ByteBuffer bb = ByteBuffer.allocateDirect(endByte - startByte);
      fc.read(bb);
      while (bb.hasRemaining()) {
        if (bb.get() != 0) {
          return false;
        }
      }
    } finally {
      if (fc != null) {
        fc.close();
      }

      if (fis != null) {
        fis.close();
      }
    }
    return true;
  }
  public void test_getChannel() {
    // Test for method FileChannel FileInputStream.getChannel()
    FileChannel channel;
    byte[] buffer = new byte[100];
    byte[] stringBytes;
    final int offset = 5;
    boolean equal = true;

    try {
      FileInputStream fis = new FileInputStream(fileName);
      channel = fis.getChannel();
      assertNotNull(channel);
      assertTrue("Channel is closed.", channel.isOpen());

      // Check that the channel is associated with the input stream.
      channel.position(offset);
      fis.read(buffer, 0, 10);
      stringBytes = fileString.getBytes();
      for (int i = 0; i < 10; i++) {
        equal &= (buffer[i] == stringBytes[i + offset]);
      }
      assertTrue("Channel is not associated with this stream.", equal);

      fis.close();
      assertFalse("Channel has not been closed.", channel.isOpen());
    } catch (FileNotFoundException e) {
      fail("Could not find : " + fileName);
    } catch (IOException e) {
      fail("Exception during test : " + e.getMessage());
    }
  }
Example #27
0
 /**
  * write properties to disk
  *
  * @return success True if successfully written
  * @throws IOException
  */
 private void write() throws IOException {
   if (channel != null) {
     channel.position(0);
   }
   om.writeValue(fos, properties);
   fos.flush();
 }
Example #28
0
  /**
   * Picks a block with samplingRate probability Used in the first phase of up-front partitioning.
   *
   * @param filename
   * @param samplingRate
   */
  public void scanWithBlockSampling(String filename, double samplingRate, OutputStream out) {
    initScan(blockSampleSize);
    FileChannel ch = IOUtils.openFileChannel(filename);
    try {
      for (long position = 0; ; position += blockSampleSize) {
        while (sampleSucceed(samplingRate) == false) {
          position += blockSampleSize;
        }
        ch.position(position);
        if ((nRead = ch.read(bb)) == -1) {
          break;
        }

        byteArrayIdx = previous = 0;
        if (position != 0) {
          // skip the first tuple if not starting of file.
          while (byteArrayIdx < nRead && byteArray[byteArrayIdx] != newLine) {
            byteArrayIdx++;
          }
          previous = ++byteArrayIdx;
        }

        processByteBuffer(null, out);
        bb.clear();
        out.flush(); // It only helps get an exact profiling?
      }

    } catch (IOException e) {
      e.printStackTrace();
    }
    IOUtils.closeFileChannel(ch);
    firstPass = false;
  }
Example #29
0
 private static long getRecord(String storeDir, long recordPosition) {
   RandomAccessFile file = null;
   try {
     file = new RandomAccessFile(new File(storeDir), "rw");
     FileChannel channel = file.getChannel();
     /*
      * We have to check size, because the store version
      * field was introduced with 1.5, so if there is a non-clean
      * shutdown we may have a buffer underflow.
      */
     if (recordPosition > 3 && channel.size() < RECORD_SIZE * 5) {
       return -1;
     }
     channel.position(RECORD_SIZE * recordPosition + 1 /*inUse*/);
     ByteBuffer buffer = ByteBuffer.allocate(8);
     channel.read(buffer);
     buffer.flip();
     long previous = buffer.getLong();
     return previous;
   } catch (IOException e) {
     throw new RuntimeException(e);
   } finally {
     try {
       if (file != null) file.close();
     } catch (IOException e) {
       throw new RuntimeException(e);
     }
   }
 }
 public int read(byte[] b, int off, int len) throws IOException {
   if (channel.position() + len < channel.size()) {
     FileUtils.readFully(channel, ByteBuffer.wrap(b, off, len));
     return len;
   }
   return super.read(b, off, len);
 }