/** @throws Exception If failed. */
  public void testMapField() throws Exception {
    BinaryObjectBuilder builder = builder("Class");

    builder.hashCode(100);

    builder.setField("mapField", F.asMap(new Key(1), new Value(1), new Key(2), new Value(2)));
    builder.setField(
        "mapField2", F.asMap(new Key(1), new Value(1), new Key(2), new Value(2)), Map.class);

    BinaryObject po = builder.build();

    assertEquals(expectedHashCode("Class"), po.type().typeId());
    assertEquals(100, po.hashCode());

    // Test non-standard map.
    Map<Key, Value> map = po.field("mapField");

    assertEquals(2, map.size());

    for (Map.Entry<Key, Value> e : map.entrySet()) assertEquals(e.getKey().i, e.getValue().i);

    // Test binary map
    Map<BinaryObject, BinaryObject> map2 = po.field("mapField2");

    assertEquals(2, map2.size());

    for (Map.Entry<BinaryObject, BinaryObject> e : map2.entrySet())
      assertEquals(e.getKey().<Key>deserialize().i, e.getValue().<Value>deserialize().i);
  }
Пример #2
0
  /** {@inheritDoc} */
  @Override
  public void setOwner(Path p, String usr, String grp) throws IOException {
    A.notNull(p, "p");
    A.notNull(usr, "username");
    A.notNull(grp, "grpName");

    enterBusy();

    try {
      if (mode(p) == PROXY) secondaryFileSystem().setOwner(toSecondary(p), usr, grp);
      else if (rmtClient.update(
              convert(p), F.asMap(IgfsUtils.PROP_USER_NAME, usr, IgfsUtils.PROP_GROUP_NAME, grp))
          == null) {
        throw new IOException(
            "Failed to set file permission (file not found?)"
                + " [path="
                + p
                + ", username="******", grpName="
                + grp
                + ']');
      }
    } finally {
      leaveBusy();
    }
  }
Пример #3
0
  /** {@inheritDoc} */
  @SuppressWarnings("deprecation")
  @Override
  public FSDataOutputStream createInternal(
      Path f,
      EnumSet<CreateFlag> flag,
      FsPermission perm,
      int bufSize,
      short replication,
      long blockSize,
      Progressable progress,
      Options.ChecksumOpt checksumOpt,
      boolean createParent)
      throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
      IgfsPath path = convert(f);
      IgfsMode mode = modeRslvr.resolveMode(path);

      if (LOG.isDebugEnabled())
        LOG.debug(
            "Opening output stream in create [thread="
                + Thread.currentThread().getName()
                + "path="
                + path
                + ", overwrite="
                + overwrite
                + ", bufSize="
                + bufSize
                + ']');

      if (mode == PROXY) {
        FSDataOutputStream os =
            secondaryFileSystem()
                .create(toSecondary(f), perm, flag, bufSize, replication, blockSize, progress);

        if (clientLog.isLogEnabled()) {
          long logId = IgfsLogger.nextId();

          if (append) clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
          else clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

          return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
        } else return os;
      } else {
        Map<String, String> permMap =
            F.asMap(
                IgfsUtils.PROP_PERMISSION,
                toString(perm),
                IgfsUtils.PROP_PREFER_LOCAL_WRITES,
                Boolean.toString(preferLocFileWrites));

        // Create stream and close it in the 'finally' section if any sequential operation failed.
        HadoopIgfsStreamDelegate stream;

        long logId = -1;

        if (append) {
          stream = rmtClient.append(path, create, permMap);

          if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();

            clientLog.logAppend(logId, path, mode, bufSize);
          }

          if (LOG.isDebugEnabled())
            LOG.debug(
                "Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
        } else {
          stream =
              rmtClient.create(
                  path, overwrite, colocateFileWrites, replication, blockSize, permMap);

          if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();

            clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
          }

          if (LOG.isDebugEnabled())
            LOG.debug(
                "Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
        }

        assert stream != null;

        HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);

        bufSize = Math.max(64 * 1024, bufSize);

        out = new BufferedOutputStream(igfsOut, bufSize);

        FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

        // Mark stream created successfully.
        out = null;

        return res;
      }
    } finally {
      // Close if failed during stream creation.
      if (out != null) U.closeQuiet(out);

      leaveBusy();
    }
  }
Пример #4
0
  /**
   * Convert Hadoop permission into IGFS file attribute.
   *
   * @param perm Hadoop permission.
   * @return IGFS attributes.
   */
  private Map<String, String> permission(FsPermission perm) {
    if (perm == null) perm = FsPermission.getDefault();

    return F.asMap(IgfsUtils.PROP_PERMISSION, toString(perm));
  }
  /**
   * Establish TCP connection to remote hadoop process and returns client.
   *
   * @param desc Process descriptor.
   * @return Client.
   * @throws IgniteCheckedException If failed.
   */
  protected HadoopCommunicationClient createTcpClient(HadoopProcessDescriptor desc)
      throws IgniteCheckedException {
    String addr = desc.address();

    int port = desc.tcpPort();

    if (log.isDebugEnabled())
      log.debug(
          "Trying to connect to remote process [locProcDesc="
              + locProcDesc
              + ", desc="
              + desc
              + ']');

    boolean conn = false;
    HadoopTcpNioCommunicationClient client = null;
    IgniteCheckedException errs = null;

    int connectAttempts = 1;

    long connTimeout0 = connTimeout;

    int attempt = 1;

    while (!conn) { // Reconnection on handshake timeout.
      try {
        SocketChannel ch = SocketChannel.open();

        ch.configureBlocking(true);

        ch.socket().setTcpNoDelay(tcpNoDelay);
        ch.socket().setKeepAlive(true);

        if (sockRcvBuf > 0) ch.socket().setReceiveBufferSize(sockRcvBuf);

        if (sockSndBuf > 0) ch.socket().setSendBufferSize(sockSndBuf);

        ch.socket().connect(new InetSocketAddress(addr, port), (int) connTimeout);

        HandshakeFinish fin = new HandshakeFinish();

        GridNioSession ses = nioSrvr.createSession(ch, F.asMap(HANDSHAKE_FINISH_META, fin)).get();

        client = new HadoopTcpNioCommunicationClient(ses);

        if (log.isDebugEnabled()) log.debug("Waiting for handshake finish for client: " + client);

        fin.await(connTimeout0);

        conn = true;
      } catch (HadoopHandshakeTimeoutException e) {
        if (client != null) {
          client.forceClose();

          client = null;
        }

        if (log.isDebugEnabled())
          log.debug(
              "Handshake timedout (will retry with increased timeout) [timeout="
                  + connTimeout0
                  + ", desc="
                  + desc
                  + ", port="
                  + port
                  + ", err="
                  + e
                  + ']');

        if (attempt == reconCnt || connTimeout0 > maxConnTimeout) {
          if (log.isDebugEnabled())
            log.debug(
                "Handshake timed out (will stop attempts to perform the handshake) "
                    + "[timeout="
                    + connTimeout0
                    + ", maxConnTimeout="
                    + maxConnTimeout
                    + ", attempt="
                    + attempt
                    + ", reconCnt="
                    + reconCnt
                    + ", err="
                    + e.getMessage()
                    + ", addr="
                    + addr
                    + ']');

          if (errs == null)
            errs =
                new IgniteCheckedException(
                    "Failed to connect to remote Hadoop process "
                        + "(is process still running?) [desc="
                        + desc
                        + ", addrs="
                        + addr
                        + ']');

          errs.addSuppressed(e);

          break;
        } else {
          attempt++;

          connTimeout0 *= 2;

          // Continue loop.
        }
      } catch (Exception e) {
        if (client != null) {
          client.forceClose();

          client = null;
        }

        if (log.isDebugEnabled())
          log.debug("Client creation failed [addr=" + addr + ", port=" + port + ", err=" + e + ']');

        if (X.hasCause(e, SocketTimeoutException.class))
          LT.warn(
              log,
              null,
              "Connect timed out (consider increasing 'connTimeout' "
                  + "configuration property) [addr="
                  + addr
                  + ", port="
                  + port
                  + ']');

        if (errs == null)
          errs =
              new IgniteCheckedException(
                  "Failed to connect to remote Hadoop process (is process still running?) "
                      + "[desc="
                      + desc
                      + ", addrs="
                      + addr
                      + ']');

        errs.addSuppressed(e);

        // Reconnect for the second time, if connection is not established.
        if (connectAttempts < 2
            && (e instanceof ConnectException || X.hasCause(e, ConnectException.class))) {
          connectAttempts++;

          continue;
        }

        break;
      }
    }

    if (client == null) {
      assert errs != null;

      if (X.hasCause(errs, ConnectException.class))
        LT.warn(
            log,
            null,
            "Failed to connect to a remote Hadoop process (is process still running?). "
                + "Make sure operating system firewall is disabled on local and remote host) "
                + "[addrs="
                + addr
                + ", port="
                + port
                + ']');

      throw errs;
    }

    if (log.isDebugEnabled()) log.debug("Created client: " + client);

    return client;
  }