@Override
  public void writeTo(GatheringByteChannel channel) throws IOException {
    ByteBuffer[] buffers;
    ByteBuffer currentBuffer = null;
    BytesRef ref = new BytesRef();
    int pos = 0;

    // are we a slice?
    if (offset != 0) {
      // remaining size of page fragment at offset
      int fragmentSize = Math.min(length, PAGE_SIZE - (offset % PAGE_SIZE));
      bytearray.get(offset, fragmentSize, ref);
      currentBuffer = ByteBuffer.wrap(ref.bytes, ref.offset, fragmentSize);
      pos += fragmentSize;
    }

    // we only have a single page
    if (pos == length && currentBuffer != null) {
      channel.write(currentBuffer);
      return;
    }

    // a slice > pagesize will likely require extra buffers for initial/trailing fragments
    int numBuffers = countRequiredBuffers((currentBuffer != null ? 1 : 0), length - pos);

    buffers = new ByteBuffer[numBuffers];
    int bufferSlot = 0;

    if (currentBuffer != null) {
      buffers[bufferSlot] = currentBuffer;
      bufferSlot++;
    }

    // handle remainder of pages + trailing fragment
    while (pos < length) {
      int remaining = length - pos;
      int bulkSize = (remaining > PAGE_SIZE) ? PAGE_SIZE : remaining;
      bytearray.get(offset + pos, bulkSize, ref);
      currentBuffer = ByteBuffer.wrap(ref.bytes, ref.offset, bulkSize);
      buffers[bufferSlot] = currentBuffer;
      bufferSlot++;
      pos += bulkSize;
    }

    // this would indicate that our numBuffer calculation is off by one.
    assert (numBuffers == bufferSlot);

    // finally write all buffers
    channel.write(buffers);
  }
 @Override
 public int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
   if (PlatformDependent.javaVersion() < 7) {
     // XXX Gathering write is not supported because of a known issue.
     //     See http://bugs.sun.com/view_bug.do?bug_id=6210541
     return out.write(copiedNioBuffer(index, length));
   } else {
     long writtenBytes = out.write(nioBuffers(index, length));
     if (writtenBytes > Integer.MAX_VALUE) {
       return Integer.MAX_VALUE;
     } else {
       return (int) writtenBytes;
     }
   }
 }
  /** {@inheritDoc} */
  @Override
  public void write(GatheringByteChannel outputchannel) {

    try {
      int numimages = numImages();

      // Encode each image
      ByteBuffer[] images = new ByteBuffer[numimages];
      for (int i = 0; i < images.length; i++) {
        ByteBuffer image =
            com.mikeduvall.redhorizon.util.ByteBufferFactory.createLittleEndianByteBuffer(
                shpimages[i].capacity());
        CodecUtility.encodeFormat80(shpimages[i], image);
        images[i] = image;
      }

      // Construct image offset headers for each image
      ByteBuffer[] offsets = new ByteBuffer[numimages + 2];
      int offsettotal =
          ShpFileHeaderCNC.HEADER_SIZE + (ShpImageOffsetCNC.OFFSET_SIZE * offsets.length);
      for (int i = 0; i < numImages(); i++) {
        offsets[i] = new ShpImageOffsetCNC(offsettotal, FORMAT80, 0, (byte) 0).toByteBuffer();
        offsettotal += images[i].limit();
      }

      // The 2 special image offsets at the end of the offset array
      offsets[numimages] = new ShpImageOffsetCNC(offsettotal, (byte) 0, 0, (byte) 0).toByteBuffer();
      offsets[numimages + 1] = new ShpImageOffsetCNC(0, (byte) 0, 0, (byte) 0).toByteBuffer();

      // Build header
      ByteBuffer header = shpfileheader.toByteBuffer();

      // Write file
      try {
        outputchannel.write(header);
        outputchannel.write(offsets);
        outputchannel.write(images);
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    } finally {
      try {
        outputchannel.close();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  }
Esempio n. 4
0
 @Override
 public int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
   checkIndex(index, length);
   index = idx(index);
   return out.write(
       (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length));
 }
Esempio n. 5
0
 @Override
 public int readBytes(GatheringByteChannel out, int length) throws IOException {
   byte[] array = new byte[length];
   try {
     delegate.get(array, length);
     return out.write(ByteBuffer.wrap(array));
   } catch (ReadPastEndException e) {
     throw outOfBounds(e);
   }
 }
  @Override
  public int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
    ensureAccessible();
    if (length == 0) {
      return 0;
    }

    ByteBuffer tmpBuf = internalNioBuffer();
    tmpBuf.clear().position(index).limit(index + length);
    return out.write(tmpBuf);
  }
Esempio n. 7
0
  public static void main(String[] args) throws Exception {

    int reps = 10;
    if (args.length > 0) {
      reps = Integer.parseInt(args[0]);
    }

    FileOutputStream fos = new FileOutputStream(DEMOGRAPHIC);
    GatheringByteChannel gatherChannel = fos.getChannel();

    ByteBuffer[] bs = utterBS(reps);

    while (gatherChannel.write(bs) > 0) {}

    System.out.println("Mindshare paradigms sysnergized to " + DEMOGRAPHIC);
    fos.close();
  }
  private int getBytes(int var1, GatheringByteChannel var2, int var3, boolean var4)
      throws IOException {
    this.checkIndex(var1, var3);
    if (var3 == 0) {
      return 0;
    } else {
      ByteBuffer var5;
      if (var4) {
        var5 = this.internalNioBuffer();
      } else {
        var5 = ((ByteBuffer) this.memory).duplicate();
      }

      var1 = this.idx(var1);
      var5.clear().position(var1).limit(var1 + var3);
      return var2.write(var5);
    }
  }
Esempio n. 9
0
  /**
   * Write out our chain of buffers in chunks
   *
   * @param channel Where to write
   * @param chunkSize Size of chunks to write.
   * @return Amount written.
   * @throws IOException
   */
  long write(GatheringByteChannel channel, int chunkSize) throws IOException {
    int chunkRemaining = chunkSize;
    ByteBuffer lastBuffer = null;
    int bufCount = 0;
    int restoreLimit = -1;

    while (chunkRemaining > 0 && bufferOffset + bufCount < buffers.length) {
      lastBuffer = buffers[bufferOffset + bufCount];
      if (!lastBuffer.hasRemaining()) {
        bufferOffset++;
        continue;
      }
      bufCount++;
      if (lastBuffer.remaining() > chunkRemaining) {
        restoreLimit = lastBuffer.limit();
        lastBuffer.limit(lastBuffer.position() + chunkRemaining);
        chunkRemaining = 0;
        break;
      } else {
        chunkRemaining -= lastBuffer.remaining();
      }
    }
    assert lastBuffer != null;
    if (chunkRemaining == chunkSize) {
      assert !hasRemaining();
      // no data left to write
      return 0;
    }
    try {
      long ret = channel.write(buffers, bufferOffset, bufCount);
      if (ret > 0) {
        remaining -= ret;
      }
      return ret;
    } finally {
      if (restoreLimit >= 0) {
        lastBuffer.limit(restoreLimit);
      }
    }
  }
Esempio n. 10
0
    public void getBox(WritableByteChannel writableByteChannel) throws IOException {
      ByteBuffer bb = ByteBuffer.allocate(16);
      long size = getSize();
      if (isSmallBox(size)) {
        IsoTypeWriter.writeUInt32(bb, size);
      } else {
        IsoTypeWriter.writeUInt32(bb, 1);
      }
      bb.put(IsoFile.fourCCtoBytes("mdat"));
      if (isSmallBox(size)) {
        bb.put(new byte[8]);
      } else {
        IsoTypeWriter.writeUInt64(bb, size);
      }
      bb.rewind();
      writableByteChannel.write(bb);
      if (writableByteChannel instanceof GatheringByteChannel) {
        List<ByteBuffer> nuSamples = unifyAdjacentBuffers(samples);

        int STEPSIZE = 1024;
        for (int i = 0; i < Math.ceil((double) nuSamples.size() / STEPSIZE); i++) {
          List<ByteBuffer> sublist =
              nuSamples.subList(
                  i * STEPSIZE, // start
                  (i + 1) * STEPSIZE < nuSamples.size()
                      ? (i + 1) * STEPSIZE
                      : nuSamples.size()); // end
          ByteBuffer sampleArray[] = sublist.toArray(new ByteBuffer[sublist.size()]);
          do {
            ((GatheringByteChannel) writableByteChannel).write(sampleArray);
          } while (sampleArray[sampleArray.length - 1].remaining() > 0);
        }
        // System.err.println(bytesWritten);
      } else {
        for (ByteBuffer sample : samples) {
          sample.rewind();
          writableByteChannel.write(sample);
        }
      }
    }
Esempio n. 11
0
  /**
   * Drain pending buffers one at a time into the socket
   *
   * @param channel
   * @return
   * @throws IOException
   */
  @Override
  int drainTo(final GatheringByteChannel channel) throws IOException {
    int bytesWritten = 0;
    long rc = 0;
    do {
      /*
       * Nothing to write
       */
      if (m_currentWriteBuffer == null && m_queuedBuffers.isEmpty()) {
        break;
      }

      ByteBuffer buffer = null;
      if (m_currentWriteBuffer == null) {
        m_currentWriteBuffer = m_queuedBuffers.poll();
        buffer = m_currentWriteBuffer.b();
        buffer.flip();
      } else {
        buffer = m_currentWriteBuffer.b();
      }

      rc = channel.write(buffer);

      // Discard the buffer back to a pool if no data remains
      if (!buffer.hasRemaining()) {
        m_currentWriteBuffer.discard();
        m_currentWriteBuffer = null;
        m_messagesWritten++;
      }
      bytesWritten += rc;

    } while (rc > 0);

    m_bytesWritten += bytesWritten;
    return bytesWritten;
  }
Esempio n. 12
0
 @Override
 public int getBytes(int index, GatheringByteChannel out, int length) throws IOException {
   ensureAccessible();
   return out.write(
       (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length));
 }
Esempio n. 13
0
  /** {@inheritDoc} */
  @Override
  public void write(GatheringByteChannel outputchannel) {

    int numimages = numImages();

    // Build header
    ByteBuffer header = wsaheader.toByteBuffer();

    // Build palette
    ByteBuffer palette = wsapalette.toByteBuffer();

    // Encode each frame, construct matching offsets
    ByteBuffer[] frames = new ByteBuffer[isLooping() ? numimages + 1 : numimages];
    ByteBuffer lastbytes =
        com.mikeduvall.redhorizon.util.ByteBufferFactory.createLittleEndianByteBuffer(
            width() * height());

    ByteBuffer frameoffsets =
        com.mikeduvall.redhorizon.util.ByteBufferFactory.createLittleEndianByteBuffer(
            (numimages + 2) * 4);
    int offsettotal = WsaFileHeaderCNC.HEADER_SIZE + ((numimages + 2) * 4);

    for (int i = 0; i < frames.length; i++) {
      ByteBuffer framebytes = wsaframes[i];
      ByteBuffer frameint =
          com.mikeduvall.redhorizon.util.ByteBufferFactory.createLittleEndianByteBuffer(
              (int) (framebytes.capacity() * 1.5));
      ByteBuffer frame =
          com.mikeduvall.redhorizon.util.ByteBufferFactory.createLittleEndianByteBuffer(
              (int) (framebytes.capacity() * 1.5));

      // First encode in Format40, then Format80
      CodecUtility.encodeFormat40(framebytes, frameint, lastbytes);
      CodecUtility.encodeFormat80(frameint, frame);

      frames[i] = frame;
      lastbytes = framebytes;

      frameoffsets.putInt(offsettotal);
      offsettotal += frame.limit();
    }

    // Last offset for EOF
    frameoffsets.putInt(offsettotal);
    frameoffsets.rewind();

    // Write file to disk
    try {
      outputchannel.write(header);
      outputchannel.write(frameoffsets);
      outputchannel.write(palette);
      outputchannel.write(frames);
    } catch (IOException e) {
      throw new RuntimeException(e);
    }

    // Generate high-res colour lookup table
    if (!srcnohires) {

      // Figure-out the appropriate file name
      String lookupname =
          filename.contains(".")
              ? filename.substring(0, filename.lastIndexOf('.')) + ".pal"
              : filename + ".pal";

      // Write the index of the closest interpolated palette colour
      // TODO: Perform proper colour interpolation
      ByteBuffer lookup =
          com.mikeduvall.redhorizon.util.ByteBufferFactory.createLittleEndianByteBuffer(256);
      for (int i = 0; i < 256; i++) {
        lookup.put((byte) i);
      }
      lookup.rewind();

      try (FileChannel lookupfile = FileChannel.open(Paths.get(lookupname), WRITE)) {
        for (int i = 0; i < 256; i++) {
          lookupfile.write(lookup);
        }
      }
      // TODO: Should be able to soften the auto-close without needing this
      catch (IOException ex) {
        throw new RuntimeException(ex);
      }
    }
  }
  @Override
  protected void doWrite(ChannelOutboundBuffer in) throws Exception {
    int writeSpinCount = -1;

    GatheringByteChannel sink = connection().getSinkChannel();
    for (; ; ) {
      // Do gathering write for a non-single buffer case.
      final int msgCount = in.size();
      if (msgCount > 0) {
        // Ensure the pending writes are made of ByteBufs only.
        ByteBuffer[] nioBuffers = in.nioBuffers();
        if (nioBuffers != null) {

          int nioBufferCnt = in.nioBufferCount();
          long expectedWrittenBytes = in.nioBufferSize();

          long writtenBytes = 0;
          boolean done = false;
          boolean setOpWrite = false;
          for (int i = config().getWriteSpinCount() - 1; i >= 0; i--) {
            final long localWrittenBytes = sink.write(nioBuffers, 0, nioBufferCnt);
            if (localWrittenBytes == 0) {
              setOpWrite = true;
              break;
            }
            expectedWrittenBytes -= localWrittenBytes;
            writtenBytes += localWrittenBytes;
            if (expectedWrittenBytes == 0) {
              done = true;
              break;
            }
          }

          if (done) {
            // Release all buffers
            for (int i = msgCount; i > 0; i--) {
              in.remove();
            }

            // Finish the write loop if no new messages were flushed by in.remove().
            if (in.isEmpty()) {
              connection().getSinkChannel().suspendWrites();
              break;
            }
          } else {
            // Did not write all buffers completely.
            // Release the fully written buffers and update the indexes of the partially written
            // buffer.

            for (int i = msgCount; i > 0; i--) {
              final ByteBuf buf = (ByteBuf) in.current();
              final int readerIndex = buf.readerIndex();
              final int readableBytes = buf.writerIndex() - readerIndex;

              if (readableBytes < writtenBytes) {
                in.progress(readableBytes);
                in.remove();
                writtenBytes -= readableBytes;
              } else if (readableBytes > writtenBytes) {
                buf.readerIndex(readerIndex + (int) writtenBytes);
                in.progress(writtenBytes);
                break;
              } else { // readableBytes == writtenBytes
                in.progress(readableBytes);
                in.remove();
                break;
              }
            }

            incompleteWrite(setOpWrite);
            break;
          }
          continue;
        }
      }

      Object msg = in.current();
      if (msg == null) {
        // Wrote all messages.
        connection().getSinkChannel().suspendWrites();
        break;
      }

      if (msg instanceof ByteBuf) {
        ByteBuf buf = (ByteBuf) msg;
        int readableBytes = buf.readableBytes();
        if (readableBytes == 0) {
          in.remove();
          continue;
        }

        if (!buf.isDirect()) {
          ByteBufAllocator alloc = alloc();
          if (alloc.isDirectBufferPooled()) {
            // Non-direct buffers are copied into JDK's own internal direct buffer on every I/O.
            // We can do a better job by using our pooled allocator. If the current allocator does
            // not
            // pool a direct buffer, we rely on JDK's direct buffer pool.
            buf = alloc.directBuffer(readableBytes).writeBytes(buf);
            in.current(buf);
          }
        }

        boolean setOpWrite = false;
        boolean done = false;
        long flushedAmount = 0;
        if (writeSpinCount == -1) {
          writeSpinCount = config().getWriteSpinCount();
        }
        for (int i = writeSpinCount - 1; i >= 0; i--) {
          int localFlushedAmount = buf.readBytes(sink, buf.readableBytes());
          if (localFlushedAmount == 0) {
            setOpWrite = true;
            break;
          }

          flushedAmount += localFlushedAmount;
          if (!buf.isReadable()) {
            done = true;
            break;
          }
        }

        in.progress(flushedAmount);

        if (done) {
          in.remove();
        } else {
          incompleteWrite(setOpWrite);
          break;
        }
      } else if (msg instanceof FileRegion) {
        FileRegion region = (FileRegion) msg;
        boolean setOpWrite = false;
        boolean done = false;
        long flushedAmount = 0;
        if (writeSpinCount == -1) {
          writeSpinCount = config().getWriteSpinCount();
        }
        for (int i = writeSpinCount - 1; i >= 0; i--) {
          long localFlushedAmount = region.transferTo(sink, region.transfered());
          if (localFlushedAmount == 0) {
            setOpWrite = true;
            break;
          }

          flushedAmount += localFlushedAmount;
          if (region.transfered() >= region.count()) {
            done = true;
            break;
          }
        }

        in.progress(flushedAmount);

        if (done) {
          in.remove();
        } else {
          incompleteWrite(setOpWrite);
          break;
        }
      } else {
        throw new UnsupportedOperationException(
            "unsupported message type: " + StringUtil.simpleClassName(msg));
      }
    }
  }