Esempio n. 1
0
 /**
  * Constructor.
  *
  * @param socketChannel the SocketChannel underlying this SocketCommChannel
  * @param location the location for this channel
  * @param protocol the CommProtocol to use to send and receive messages
  * @throws java.io.IOException
  * @see CommProtocol
  * @see SocketChannel
  */
 public SocketCommChannel(SocketChannel socketChannel, URI location, CommProtocol protocol)
     throws IOException {
   super(location, protocol);
   this.socketChannel = socketChannel;
   socketChannel.socket().setSoLinger(true, SO_LINGER);
   this.istream =
       new PreBufferedInputStream(new BufferedInputStream(Channels.newInputStream(socketChannel)));
   this.ostream = new BufferedOutputStream(Channels.newOutputStream(socketChannel));
   setToBeClosed(false); // Socket connections are kept open by default
 }
 /*     */ public InputStream newInputStream(Path paramPath, OpenOption[] paramArrayOfOpenOption)
     /*     */ throws IOException
       /*     */ {
   /* 374 */ if (paramArrayOfOpenOption.length > 0) {
     /* 375 */ for (OpenOption localOpenOption : paramArrayOfOpenOption) {
       /* 376 */ if (localOpenOption != StandardOpenOption.READ)
         /* 377 */ throw new UnsupportedOperationException(
             "'" + localOpenOption + "' not allowed");
       /*     */ }
     /*     */ }
   /* 380 */ return Channels.newInputStream(Files.newByteChannel(paramPath, new OpenOption[0]));
   /*     */ }
Esempio n. 3
0
  public static ByteBuffer readChannelToBuffer(ReadableByteChannel channel) throws NetIOException {
    ByteArrayOutputStream bos = new ByteArrayOutputStream();

    try {
      logger.debug(
          "bytes read from channel: " + IOUtils.copy(Channels.newInputStream(channel), bos));
    } catch (IOException e) {
      throw new NetIOException(e);
    }

    return ByteBuffer.wrap(ArrayUtils.subarray(bos.toByteArray(), 0, bos.size()));
  }
Esempio n. 4
0
 /** @deprecated use read( ByteBuffer ) / write( ByteBuffer ); */
 @Deprecated
 public GnutellaInputStream getInputStream() throws IOException {
   if (inputStream == null) {
     if (socket == null) {
       throw new ConnectionClosedException("Connection already closed");
     }
     initBandwidthByteChannel();
     InputStream inStream = Channels.newInputStream(bandwidthByteChannel);
     inputStream = new GnutellaInputStream(inStream);
   }
   return inputStream;
 }
  public AnalysisStatusUpdater(final String host, final int port) throws IOException {
    EvaluatorLoggingHandler.logger.info("Listening for server status on port " + port);
    this.socketConnection = SocketChannel.open(new InetSocketAddress(host, port));

    this.socketConnection.socket().setSoTimeout(250);

    this.out = new PrintWriter(Channels.newOutputStream(this.socketConnection));
    this.in =
        new BufferedReader(new InputStreamReader(Channels.newInputStream(this.socketConnection)));

    this.completedState = QueryConstants.failedComplete;
    this.finished = false;
  }
  @FixFor("MODE-1358")
  @Test
  public void shouldCopyFilesUsingStreams() throws Exception {
    // Copy a large file into a temporary file ...
    File tempFile = File.createTempFile("copytest", "pdf");
    RandomAccessFile destinationRaf = null;
    RandomAccessFile originalRaf = null;
    try {
      URL sourceUrl = getClass().getResource("/docs/postgresql-8.4.1-US.pdf");
      assertThat(sourceUrl, is(notNullValue()));
      File sourceFile = new File(sourceUrl.toURI());
      assertThat(sourceFile.exists(), is(true));
      assertThat(sourceFile.canRead(), is(true));
      assertThat(sourceFile.isFile(), is(true));

      boolean useBufferedStream = true;
      final int bufferSize = AbstractBinaryStore.bestBufferSize(sourceFile.length());

      destinationRaf = new RandomAccessFile(tempFile, "rw");
      originalRaf = new RandomAccessFile(sourceFile, "r");

      FileChannel destinationChannel = destinationRaf.getChannel();
      OutputStream output = Channels.newOutputStream(destinationChannel);
      if (useBufferedStream) output = new BufferedOutputStream(output, bufferSize);

      // Create an input stream to the original file ...
      FileChannel originalChannel = originalRaf.getChannel();
      InputStream input = Channels.newInputStream(originalChannel);
      if (useBufferedStream) input = new BufferedInputStream(input, bufferSize);

      // Copy the content ...
      Stopwatch sw = new Stopwatch();
      sw.start();
      IoUtil.write(input, output, bufferSize);
      sw.stop();
      System.out.println(
          "Time to copy \""
              + sourceFile.getName()
              + "\" ("
              + sourceFile.length()
              + " bytes): "
              + sw.getTotalDuration());
    } finally {
      tempFile.delete();
      if (destinationRaf != null) destinationRaf.close();
      if (originalRaf != null) originalRaf.close();
    }
  }
Esempio n. 7
0
 public void testReadLoop() throws Exception {
   WritableGridFileChannel wgfc = fs.getWritableChannel("/readTest.txt", false, 100);
   try {
     assertTrue(wgfc.isOpen());
     wgfc.write(ByteBuffer.wrap("This tests read loop.".getBytes()));
   } finally {
     wgfc.close();
   }
   ReadableGridFileChannel rgfc = fs.getReadableChannel("/readTest.txt");
   try {
     assertTrue(
         "This tests read loop.".equals(new String(toBytes(Channels.newInputStream(rgfc)))));
   } finally {
     rgfc.close();
   }
 }
Esempio n. 8
0
  @Override
  public void download(String id, WritableByteChannel target) throws IOException {
    Path tmpFile = Files.createTempFile(tmpDir, id, ".unzipped");

    try (FileChannel tmpChannel = FileChannel.open(tmpFile, FileUtils.TMP_FILE_OPEN_OPTIONS)) {
      underlyingStore.download(id, tmpChannel);

      // Reset channel for reading
      tmpChannel.position(0);

      GZIPInputStream in = new GZIPInputStream(Channels.newInputStream(tmpChannel));
      OutputStream out = Channels.newOutputStream(target);

      IOUtils.copy(in, out);

      logger.debug("Data '{}' successfully unzipped", id);
    }
  }
Esempio n. 9
0
  @Override
  public void upload(String id, ReadableByteChannel src, long length) throws IOException {
    Path tmpFile = Files.createTempFile(tmpDir, id, ".zipped");

    try (FileChannel tmpChannel = FileChannel.open(tmpFile, FileUtils.TMP_FILE_OPEN_OPTIONS)) {
      InputStream in = Channels.newInputStream(src);
      GZIPOutputStream out = new GZIPOutputStream(Channels.newOutputStream(tmpChannel));

      IOUtils.copy(in, out);

      out.finish();

      logger.debug("Data '{}' successfully zipped", id);

      // Reset channel for reading
      tmpChannel.position(0);

      underlyingStore.upload(id, tmpChannel, tmpChannel.size());
    }
  }
  @Override
  public void finishResponse() throws Exception {
    super.finishResponse();
    if (_errorHandled) {
      return;
    }

    final String sourcesResponseError = "/sources response error: ";
    try {
      String exceptionName = RemoteExceptionHandler.getExceptionName(_decorated);
      if (null != exceptionName) {
        LOG.error(sourcesResponseError + RemoteExceptionHandler.getExceptionMessage(_decorated));
        _stateReuse.switchToSourcesResponseError();
      } else {
        String hostHdr = DbusConstants.UNKNOWN_HOST;
        String svcHdr = DbusConstants.UNKNOWN_SERVICE_ID;
        if (null != getParent()) {
          hostHdr = getParent().getRemoteHost();
          svcHdr = getParent().getRemoteService();
          LOG.info("initiated sesssion to host " + hostHdr + " service " + svcHdr);
        }

        InputStream bodyStream = Channels.newInputStream(_decorated);
        ObjectMapper mapper = new ObjectMapper();

        List<IdNamePair> sources =
            mapper.readValue(bodyStream, new TypeReference<List<IdNamePair>>() {});
        _stateReuse.switchToSourcesSuccess(sources, hostHdr, svcHdr);
      }
    } catch (IOException ex) {
      LOG.error(sourcesResponseError, ex);
      _stateReuse.switchToSourcesResponseError();
    } catch (RuntimeException ex) {
      LOG.error(sourcesResponseError, ex);
      _stateReuse.switchToSourcesResponseError();
    }

    _callback.enqueueMessage(_stateReuse);
  }
Esempio n. 11
0
  @SuppressWarnings("resource")
  protected void openFile() throws IOException {
    synchronized (locked) {
      while (locked.containsKey(filename) && locked.get(filename)) {
        try {
          locked.wait();
        } catch (InterruptedException e) {
        }
      }
      locked.put(filename, true);

      File file = new File(this.filename);
      if (!file.exists()) {
        locked.put(filename, false);
        locked.notifyAll();
        throw new IllegalStateException(
            "Warning: File doesn't exist (anymore):'" + this.filename + "'");
      }

      channel = new RandomAccessFile(file, "rw").getChannel();
      try {
        // TODO: add support for shared locks, allowing parallel reading
        // operations.
        lock = channel.lock();

      } catch (Exception e) {
        channel.close();
        channel = null;
        lock = null;
        locked.put(filename, false);
        locked.notifyAll();
        throw new IllegalStateException("error, couldn't obtain file lock on:" + filename, e);
      }
      fis = new BufferedInputStream(Channels.newInputStream(channel));
      fos = new BufferedOutputStream(Channels.newOutputStream(channel));
    }
  }
Esempio n. 12
0
  /**
   * Given a file, return a VersionedTranslogStream based on an optionally-existing header in the
   * file. If the file does not exist, or has zero length, returns the latest version. If the header
   * does not exist, assumes Version 0 of the translog file format.
   */
  public static ImmutableTranslogReader open(
      ChannelReference channelReference, Checkpoint checkpoint, String translogUUID)
      throws IOException {
    final FileChannel channel = channelReference.getChannel();
    final Path path = channelReference.getPath();
    assert channelReference.getGeneration() == checkpoint.generation
        : "expected generation: "
            + channelReference.getGeneration()
            + " but got: "
            + checkpoint.generation;

    try {
      if (checkpoint.offset == 0
          && checkpoint.numOps == TranslogReader.UNKNOWN_OP_COUNT) { // only old files can be empty
        return new LegacyTranslogReader(channelReference.getGeneration(), channelReference, 0);
      }

      InputStreamStreamInput headerStream =
          new InputStreamStreamInput(Channels.newInputStream(channel)); // don't close
      // Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the
      // header, in binary this looks like:
      //
      // binary: 0011 1111 1101 0111 0110 1100 0001 0111
      // hex   :    3    f    d    7    6    c    1    7
      //
      // With version 0 of the translog, the first byte is the
      // Operation.Type, which will always be between 0-4, so we know if
      // we grab the first byte, it can be:
      // 0x3f => Lucene's magic number, so we can assume it's version 1 or later
      // 0x00 => version 0 of the translog
      //
      // otherwise the first byte of the translog is corrupted and we
      // should bail
      byte b1 = headerStream.readByte();
      if (b1 == LUCENE_CODEC_HEADER_BYTE) {
        // Read 3 more bytes, meaning a whole integer has been read
        byte b2 = headerStream.readByte();
        byte b3 = headerStream.readByte();
        byte b4 = headerStream.readByte();
        // Convert the 4 bytes that were read into an integer
        int header =
            ((b1 & 0xFF) << 24) + ((b2 & 0xFF) << 16) + ((b3 & 0xFF) << 8) + ((b4 & 0xFF) << 0);
        // We confirm CodecUtil's CODEC_MAGIC number (0x3FD76C17)
        // ourselves here, because it allows us to read the first
        // byte separately
        if (header != CodecUtil.CODEC_MAGIC) {
          throw new TranslogCorruptedException(
              "translog looks like version 1 or later, but has corrupted header");
        }
        // Confirm the rest of the header using CodecUtil, extracting
        // the translog version
        int version =
            CodecUtil.checkHeaderNoMagic(
                new InputStreamDataInput(headerStream),
                TranslogWriter.TRANSLOG_CODEC,
                1,
                Integer.MAX_VALUE);
        switch (version) {
          case TranslogWriter.VERSION_CHECKSUMS:
            assert checkpoint.numOps == TranslogReader.UNKNOWN_OP_COUNT
                : "expected unknown op count but got: " + checkpoint.numOps;
            assert checkpoint.offset == Files.size(path)
                : "offset("
                    + checkpoint.offset
                    + ") != file_size("
                    + Files.size(path)
                    + ") for: "
                    + path;
            // legacy - we still have to support it somehow
            return new LegacyTranslogReaderBase(
                channelReference.getGeneration(),
                channelReference,
                CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC),
                checkpoint.offset);
          case TranslogWriter.VERSION_CHECKPOINTS:
            assert path.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX)
                : "new file ends with old suffix: " + path;
            assert checkpoint.numOps > TranslogReader.UNKNOWN_OP_COUNT
                : "expected at least 0 operatin but got: " + checkpoint.numOps;
            assert checkpoint.offset <= channel.size()
                : "checkpoint is inconsistent with channel length: "
                    + channel.size()
                    + " "
                    + checkpoint;
            int len = headerStream.readInt();
            if (len > channel.size()) {
              throw new TranslogCorruptedException("uuid length can't be larger than the translog");
            }
            BytesRef ref = new BytesRef(len);
            ref.length = len;
            headerStream.read(ref.bytes, ref.offset, ref.length);
            BytesRef uuidBytes = new BytesRef(translogUUID);
            if (uuidBytes.bytesEquals(ref) == false) {
              throw new TranslogCorruptedException(
                  "expected shard UUID ["
                      + uuidBytes
                      + "] but got: ["
                      + ref
                      + "] this translog file belongs to a different translog");
            }
            return new ImmutableTranslogReader(
                channelReference.getGeneration(),
                channelReference,
                ref.length
                    + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC)
                    + RamUsageEstimator.NUM_BYTES_INT,
                checkpoint.offset,
                checkpoint.numOps);
          default:
            throw new TranslogCorruptedException(
                "No known translog stream version: " + version + " path:" + path);
        }
      } else if (b1 == UNVERSIONED_TRANSLOG_HEADER_BYTE) {
        assert checkpoint.numOps == TranslogReader.UNKNOWN_OP_COUNT
            : "expected unknown op count but got: " + checkpoint.numOps;
        assert checkpoint.offset == Files.size(path)
            : "offset("
                + checkpoint.offset
                + ") != file_size("
                + Files.size(path)
                + ") for: "
                + path;
        return new LegacyTranslogReader(
            channelReference.getGeneration(), channelReference, checkpoint.offset);
      } else {
        throw new TranslogCorruptedException(
            "Invalid first byte in translog file, got: "
                + Long.toHexString(b1)
                + ", expected 0x00 or 0x3f");
      }
    } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {
      throw new TranslogCorruptedException("Translog header corrupted", e);
    }
  }
Esempio n. 13
0
 /**
  * A convenience method for the following code:
  *
  * <pre>
  * {@link InputStream} tempInputStream = {@link Channels#newInputStream(ReadableByteChannel) Channels.newInputStream(input)};
  * tempInputStream = new {@link BufferedInputStream#BufferedInputStream(InputStream) java.io.BufferedInputStream(tempInputStream)};
  * return new {@link ObjectInputStream#ObjectInputStream(InputStream) java.io.ObjectInputStream(tempInputStream)};
  * </pre>
  *
  * @throws IOException pass-through for any {@link IOException} that occurs in the above code
  */
 public static ObjectInputStream wrapChannelInObjectInputStream(ReadableByteChannel input)
     throws IOException {
   InputStream tempInputStream = Channels.newInputStream(input);
   tempInputStream = new BufferedInputStream(tempInputStream);
   return new ObjectInputStream(tempInputStream);
 }
Esempio n. 14
0
 @Override
 public InputStream getInputStream() throws IOException {
   return Channels.newInputStream(inner);
 }
  @Override
  public void finishResponse() throws Exception {
    super.finishResponse();
    if (_errorHandled) {
      return;
    }

    final String registerResponseError = "/register response error: ";
    try {
      String exceptionName = RemoteExceptionHandler.getExceptionName(_decorated);
      if (null != exceptionName) {
        LOG.error(registerResponseError + RemoteExceptionHandler.getExceptionMessage(_decorated));
        _stateReuse.switchToRegisterResponseError();
      } else {
        InputStream bodyStream = Channels.newInputStream(_decorated);
        ObjectMapper mapper = new ObjectMapper();
        int registerResponseVersion = 3; // either 2 or 3 would suffice here; we care only about 4

        if (_registerResponseVersionHdr != null) {
          try {
            registerResponseVersion = Integer.parseInt(_registerResponseVersionHdr);
          } catch (NumberFormatException e) {
            throw new RuntimeException(
                "Could not parse /register response protocol version: "
                    + _registerResponseVersionHdr);
          }
          if (registerResponseVersion < 2 || registerResponseVersion > 4) {
            throw new RuntimeException(
                "Out-of-range /register response protocol version: " + _registerResponseVersionHdr);
          }
        }

        if (registerResponseVersion == 4) // DDSDBUS-2009
        {
          HashMap<String, List<Object>> responseMap =
              mapper.readValue(bodyStream, new TypeReference<HashMap<String, List<Object>>>() {});

          // Look for mandatory SOURCE_SCHEMAS_KEY.
          Map<Long, List<RegisterResponseEntry>> sourcesSchemasMap =
              RegisterResponseEntry.createFromResponse(
                  responseMap, RegisterResponseEntry.SOURCE_SCHEMAS_KEY, false);
          // Look for optional KEY_SCHEMAS_KEY
          // Key schemas, if they exist, should correspond to source schemas, but it's not
          // a one-to-one mapping.  The same version of a key schema may be used for several
          // versions of a source schema, or vice versa.  (The IDs must correspond.)
          //
          // TODO (DDSDBUS-xxx):  support key schemas on the relay side, too
          Map<Long, List<RegisterResponseEntry>> keysSchemasMap =
              RegisterResponseEntry.createFromResponse(
                  responseMap, RegisterResponseEntry.KEY_SCHEMAS_KEY, true);

          // Look for optional METADATA_SCHEMAS_KEY
          List<RegisterResponseMetadataEntry> metadataSchemasList =
              RegisterResponseMetadataEntry.createFromResponse(
                  responseMap, RegisterResponseMetadataEntry.METADATA_SCHEMAS_KEY, true);

          _stateReuse.switchToRegisterSuccess(
              sourcesSchemasMap, keysSchemasMap, metadataSchemasList);
        } else // version 2 or 3
        {
          List<RegisterResponseEntry> schemasList =
              mapper.readValue(bodyStream, new TypeReference<List<RegisterResponseEntry>>() {});

          Map<Long, List<RegisterResponseEntry>> sourcesSchemasMap =
              RegisterResponseEntry.convertSchemaListToMap(schemasList);

          _stateReuse.switchToRegisterSuccess(sourcesSchemasMap, null, null);
        }
      }
    } catch (IOException ex) {
      LOG.error(registerResponseError, ex);
      _stateReuse.switchToRegisterResponseError();
    } catch (RuntimeException ex) {
      LOG.error(registerResponseError, ex);
      _stateReuse.switchToRegisterResponseError();
    }

    _callback.enqueueMessage(_stateReuse);
  }
 public DataInputStream openDataInputStream() throws IOException {
   if (this.dis == null) this.dis = new DataInputStream(Channels.newInputStream(this.channel));
   return this.dis;
 }