/** {@inheritDoc} */
  @Override
  public void onClose() throws IgniteCheckedException {
    if (data == null)
      // Nothing to close.
      return;

    try {
      U.closeQuiet(data.getStatement());
    } catch (SQLException e) {
      throw new IgniteCheckedException(e);
    }

    U.closeQuiet(data);
  }
  /** @throws IOException If failed. */
  private void initFavicon() throws IOException {
    assert favicon == null;

    InputStream in = getClass().getResourceAsStream("favicon.ico");

    if (in != null) {
      BufferedInputStream bis = new BufferedInputStream(in);

      ByteArrayOutputStream bos = new ByteArrayOutputStream();

      try {
        byte[] buf = new byte[2048];

        while (true) {
          int n = bis.read(buf);

          if (n == -1) break;

          bos.write(buf, 0, n);
        }

        favicon = bos.toByteArray();
      } finally {
        U.closeQuiet(bis);
      }
    }
  }
    /** @param workTokDir Token directory (common for multiple nodes). */
    private void cleanupResources(File workTokDir) {
      RandomAccessFile lockFile = null;

      FileLock lock = null;

      try {
        lockFile = new RandomAccessFile(new File(workTokDir, LOCK_FILE_NAME), "rw");

        lock = lockFile.getChannel().lock();

        if (lock != null) processTokenDirectory(workTokDir);
        else if (log.isDebugEnabled())
          log.debug(
              "Token directory is being processed concurrently: " + workTokDir.getAbsolutePath());
      } catch (OverlappingFileLockException ignored) {
        if (log.isDebugEnabled())
          log.debug(
              "Token directory is being processed concurrently: " + workTokDir.getAbsolutePath());
      } catch (FileLockInterruptionException ignored) {
        Thread.currentThread().interrupt();
      } catch (IOException e) {
        U.error(log, "Failed to process directory: " + workTokDir.getAbsolutePath(), e);
      } finally {
        U.releaseQuiet(lock);
        U.closeQuiet(lockFile);
      }
    }
Ejemplo n.º 4
0
    /** Final resource cleanup. */
    private void onFinished() {
      // Second close is no-op, if closed manually.
      U.closeQuiet(out);

      endpoint.close();

      // Finally, remove from queue.
      if (clientWorkers.unlinkx(node)) hnd.onClosed(ses);
    }
Ejemplo n.º 5
0
    /** @param e Optional exception occurred while stopping this */
    private void shutdown0(@Nullable Throwable e) {
      if (!isCancelled()) {
        if (e != null) U.error(log, "Stopping client reader due to exception: " + endpoint, e);
      }

      U.closeQuiet(out);

      endpoint.close();
    }
  /** {@inheritDoc} */
  @Override
  public void start() throws IgniteCheckedException {
    IpcSharedMemoryNativeLoader.load(log);

    pid = IpcSharedMemoryUtils.pid();

    if (pid == -1) throw new IpcEndpointBindException("Failed to get PID of the current process.");

    if (size <= 0) throw new IpcEndpointBindException("Space size should be positive: " + size);

    String tokDirPath = this.tokDirPath;

    if (F.isEmpty(tokDirPath)) throw new IpcEndpointBindException("Token directory path is empty.");

    tokDirPath = tokDirPath + '/' + locNodeId.toString() + '-' + IpcSharedMemoryUtils.pid();

    tokDir = U.resolveWorkDirectory(tokDirPath, false);

    if (port <= 0 || port >= 0xffff)
      throw new IpcEndpointBindException("Port value is illegal: " + port);

    try {
      srvSock = new ServerSocket();

      // Always bind to loopback.
      srvSock.bind(new InetSocketAddress("127.0.0.1", port));
    } catch (IOException e) {
      // Although empty socket constructor never throws exception, close it just in case.
      U.closeQuiet(srvSock);

      throw new IpcEndpointBindException(
          "Failed to bind shared memory IPC endpoint (is port already " + "in use?): " + port, e);
    }

    gcWorker = new GcWorker(gridName, "ipc-shmem-gc", log);

    new IgniteThread(gcWorker).start();

    if (log.isInfoEnabled())
      log.info(
          "IPC shared memory server endpoint started [port="
              + port
              + ", tokDir="
              + tokDir.getAbsolutePath()
              + ']');
  }
Ejemplo n.º 7
0
  /**
   * @param conn Connection.
   * @param qry Query.
   * @param explain Explain.
   * @return Table.
   * @throws IgniteCheckedException
   */
  private GridMergeTable createMergeTable(
      JdbcConnection conn, GridCacheSqlQuery qry, boolean explain) throws IgniteCheckedException {
    try {
      Session ses = (Session) conn.getSession();

      CreateTableData data = new CreateTableData();

      data.tableName = "T___";
      data.schema = ses.getDatabase().getSchema(ses.getCurrentSchemaName());
      data.create = true;

      if (!explain) {
        LinkedHashMap<String, ?> colsMap = qry.columns();

        assert colsMap != null;

        ArrayList<Column> cols = new ArrayList<>(colsMap.size());

        for (Map.Entry<String, ?> e : colsMap.entrySet()) {
          String alias = e.getKey();
          GridSqlType t = (GridSqlType) e.getValue();

          assert !F.isEmpty(alias);

          Column c = new Column(alias, t.type(), t.precision(), t.scale(), t.displaySize());

          cols.add(c);
        }

        data.columns = cols;
      } else data.columns = planColumns();

      return new GridMergeTable(data, ctx);
    } catch (Exception e) {
      U.closeQuiet(conn);

      throw new IgniteCheckedException(e);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void close() {
    closed = true;

    U.closeQuiet(srvSock);

    if (gcWorker != null) {
      U.cancel(gcWorker);

      // This method may be called from already interrupted thread.
      // Need to ensure cleaning on close.
      boolean interrupted = Thread.interrupted();

      try {
        U.join(gcWorker);
      } catch (IgniteInterruptedCheckedException e) {
        U.warn(log, "Interrupted when stopping GC worker.", e);
      } finally {
        if (interrupted) Thread.currentThread().interrupt();
      }
    }
  }
  /** @throws IOException If failed. */
  private void initDefaultPage() throws IOException {
    assert dfltPage == null;

    InputStream in = getClass().getResourceAsStream("rest.html");

    if (in != null) {
      LineNumberReader rdr = new LineNumberReader(new InputStreamReader(in, CHARSET));

      try {
        StringBuilder buf = new StringBuilder(2048);

        for (String line = rdr.readLine(); line != null; line = rdr.readLine()) {
          buf.append(line);

          if (!line.endsWith(" ")) buf.append(' ');
        }

        dfltPage = buf.toString();
      } finally {
        U.closeQuiet(rdr);
      }
    }
  }
Ejemplo n.º 10
0
  /** {@inheritDoc} */
  @SuppressWarnings("deprecation")
  @Override
  public FSDataOutputStream createInternal(
      Path f,
      EnumSet<CreateFlag> flag,
      FsPermission perm,
      int bufSize,
      short replication,
      long blockSize,
      Progressable progress,
      Options.ChecksumOpt checksumOpt,
      boolean createParent)
      throws IOException {
    A.notNull(f, "f");

    enterBusy();

    boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
    boolean append = flag.contains(CreateFlag.APPEND);
    boolean create = flag.contains(CreateFlag.CREATE);

    OutputStream out = null;

    try {
      IgfsPath path = convert(f);
      IgfsMode mode = modeRslvr.resolveMode(path);

      if (LOG.isDebugEnabled())
        LOG.debug(
            "Opening output stream in create [thread="
                + Thread.currentThread().getName()
                + "path="
                + path
                + ", overwrite="
                + overwrite
                + ", bufSize="
                + bufSize
                + ']');

      if (mode == PROXY) {
        FSDataOutputStream os =
            secondaryFileSystem()
                .create(toSecondary(f), perm, flag, bufSize, replication, blockSize, progress);

        if (clientLog.isLogEnabled()) {
          long logId = IgfsLogger.nextId();

          if (append) clientLog.logAppend(logId, path, PROXY, bufSize); // Don't have stream ID.
          else clientLog.logCreate(logId, path, PROXY, overwrite, bufSize, replication, blockSize);

          return new FSDataOutputStream(new HadoopIgfsProxyOutputStream(os, clientLog, logId));
        } else return os;
      } else {
        Map<String, String> permMap =
            F.asMap(
                IgfsUtils.PROP_PERMISSION,
                toString(perm),
                IgfsUtils.PROP_PREFER_LOCAL_WRITES,
                Boolean.toString(preferLocFileWrites));

        // Create stream and close it in the 'finally' section if any sequential operation failed.
        HadoopIgfsStreamDelegate stream;

        long logId = -1;

        if (append) {
          stream = rmtClient.append(path, create, permMap);

          if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();

            clientLog.logAppend(logId, path, mode, bufSize);
          }

          if (LOG.isDebugEnabled())
            LOG.debug(
                "Opened output stream in append [path=" + path + ", delegate=" + stream + ']');
        } else {
          stream =
              rmtClient.create(
                  path, overwrite, colocateFileWrites, replication, blockSize, permMap);

          if (clientLog.isLogEnabled()) {
            logId = IgfsLogger.nextId();

            clientLog.logCreate(logId, path, mode, overwrite, bufSize, replication, blockSize);
          }

          if (LOG.isDebugEnabled())
            LOG.debug(
                "Opened output stream in create [path=" + path + ", delegate=" + stream + ']');
        }

        assert stream != null;

        HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId);

        bufSize = Math.max(64 * 1024, bufSize);

        out = new BufferedOutputStream(igfsOut, bufSize);

        FSDataOutputStream res = new FSDataOutputStream(out, null, 0);

        // Mark stream created successfully.
        out = null;

        return res;
      }
    } finally {
      // Close if failed during stream creation.
      if (out != null) U.closeQuiet(out);

      leaveBusy();
    }
  }
Ejemplo n.º 11
0
  /**
   * @param cctx Cache context.
   * @param qry Query.
   * @param keepPortable Keep portable.
   * @return Cursor.
   */
  public Iterator<List<?>> query(
      GridCacheContext<?, ?> cctx, GridCacheTwoStepQuery qry, boolean keepPortable) {
    for (int attempt = 0; ; attempt++) {
      if (attempt != 0) {
        try {
          Thread.sleep(attempt * 10); // Wait for exchange.
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();

          throw new CacheException("Query was interrupted.", e);
        }
      }

      long qryReqId = reqIdGen.incrementAndGet();

      QueryRun r = new QueryRun();

      r.pageSize = qry.pageSize() <= 0 ? GridCacheTwoStepQuery.DFLT_PAGE_SIZE : qry.pageSize();

      r.idxs = new ArrayList<>(qry.mapQueries().size());

      String space = cctx.name();

      r.conn = (JdbcConnection) h2.connectionForSpace(space);

      AffinityTopologyVersion topVer = h2.readyTopologyVersion();

      List<String> extraSpaces = extraSpaces(space, qry.spaces());

      Collection<ClusterNode> nodes;

      // Explicit partition mapping for unstable topology.
      Map<ClusterNode, IntArray> partsMap = null;

      if (isPreloadingActive(cctx, extraSpaces)) {
        if (cctx.isReplicated()) nodes = replicatedUnstableDataNodes(cctx, extraSpaces);
        else {
          partsMap = partitionedUnstableDataNodes(cctx, extraSpaces);

          nodes = partsMap == null ? null : partsMap.keySet();
        }
      } else nodes = stableDataNodes(topVer, cctx, extraSpaces);

      if (nodes == null) continue; // Retry.

      assert !nodes.isEmpty();

      if (cctx.isReplicated() || qry.explain()) {
        assert qry.explain() || !nodes.contains(ctx.discovery().localNode())
            : "We must be on a client node.";

        // Select random data node to run query on a replicated data or get EXPLAIN PLAN from a
        // single node.
        nodes = Collections.singleton(F.rand(nodes));
      }

      int tblIdx = 0;

      final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();

      for (GridCacheSqlQuery mapQry : qry.mapQueries()) {
        GridMergeIndex idx;

        if (!skipMergeTbl) {
          GridMergeTable tbl;

          try {
            tbl = createMergeTable(r.conn, mapQry, qry.explain());
          } catch (IgniteCheckedException e) {
            throw new IgniteException(e);
          }

          idx = tbl.getScanIndex(null);

          fakeTable(r.conn, tblIdx++).setInnerTable(tbl);
        } else idx = GridMergeIndexUnsorted.createDummy();

        for (ClusterNode node : nodes) idx.addSource(node.id());

        r.idxs.add(idx);
      }

      r.latch = new CountDownLatch(r.idxs.size() * nodes.size());

      runs.put(qryReqId, r);

      try {
        if (ctx.clientDisconnected()) {
          throw new CacheException(
              "Query was cancelled, client node disconnected.",
              new IgniteClientDisconnectedException(
                  ctx.cluster().clientReconnectFuture(), "Client node disconnected."));
        }

        Collection<GridCacheSqlQuery> mapQrys = qry.mapQueries();

        if (qry.explain()) {
          mapQrys = new ArrayList<>(qry.mapQueries().size());

          for (GridCacheSqlQuery mapQry : qry.mapQueries())
            mapQrys.add(new GridCacheSqlQuery("EXPLAIN " + mapQry.query(), mapQry.parameters()));
        }

        if (nodes.size() != 1 || !F.first(nodes).isLocal()) { // Marshall params for remotes.
          Marshaller m = ctx.config().getMarshaller();

          for (GridCacheSqlQuery mapQry : mapQrys) mapQry.marshallParams(m);
        }

        boolean retry = false;

        if (send(
            nodes,
            new GridQueryRequest(qryReqId, r.pageSize, space, mapQrys, topVer, extraSpaces, null),
            partsMap)) {
          awaitAllReplies(r, nodes);

          Object state = r.state.get();

          if (state != null) {
            if (state instanceof CacheException) {
              CacheException err = (CacheException) state;

              if (err.getCause() instanceof IgniteClientDisconnectedException) throw err;

              throw new CacheException("Failed to run map query remotely.", err);
            }

            if (state instanceof AffinityTopologyVersion) {
              retry = true;

              // If remote node asks us to retry then we have outdated full partition map.
              h2.awaitForReadyTopologyVersion((AffinityTopologyVersion) state);
            }
          }
        } else // Send failed.
        retry = true;

        Iterator<List<?>> resIter = null;

        if (!retry) {
          if (qry.explain()) return explainPlan(r.conn, space, qry);

          if (skipMergeTbl) {
            List<List<?>> res = new ArrayList<>();

            assert r.idxs.size() == 1 : r.idxs;

            GridMergeIndex idx = r.idxs.get(0);

            Cursor cur = idx.findInStream(null, null);

            while (cur.next()) {
              Row row = cur.get();

              int cols = row.getColumnCount();

              List<Object> resRow = new ArrayList<>(cols);

              for (int c = 0; c < cols; c++) resRow.add(row.getValue(c).getObject());

              res.add(resRow);
            }

            resIter = res.iterator();
          } else {
            GridCacheSqlQuery rdc = qry.reduceQuery();

            // Statement caching is prohibited here because we can't guarantee correct merge index
            // reuse.
            ResultSet res =
                h2.executeSqlQueryWithTimer(
                    space, r.conn, rdc.query(), F.asList(rdc.parameters()), false);

            resIter = new Iter(res);
          }
        }

        for (GridMergeIndex idx : r.idxs) {
          if (!idx.fetchedAll()) // We have to explicitly cancel queries on remote nodes.
          send(nodes, new GridQueryCancelRequest(qryReqId), null);
        }

        if (retry) {
          if (Thread.currentThread().isInterrupted())
            throw new IgniteInterruptedCheckedException("Query was interrupted.");

          continue;
        }

        return new GridQueryCacheObjectsIterator(resIter, cctx, keepPortable);
      } catch (IgniteCheckedException | RuntimeException e) {
        U.closeQuiet(r.conn);

        if (e instanceof CacheException) throw (CacheException) e;

        Throwable cause = e;

        if (e instanceof IgniteCheckedException) {
          Throwable disconnectedErr =
              ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);

          if (disconnectedErr != null) cause = disconnectedErr;
        }

        throw new CacheException("Failed to run reduce query locally.", cause);
      } finally {
        if (!runs.remove(qryReqId, r)) U.warn(log, "Query run was already removed: " + qryReqId);

        if (!skipMergeTbl) {
          for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++)
            fakeTable(null, i).setInnerTable(null); // Drop all merge tables.
        }
      }
    }
  }
  /** {@inheritDoc} */
  @SuppressWarnings("ErrorNotRethrown")
  @Override
  public IpcEndpoint accept() throws IgniteCheckedException {
    while (!Thread.currentThread().isInterrupted()) {
      Socket sock = null;

      boolean accepted = false;

      try {
        sock = srvSock.accept();

        accepted = true;

        InputStream inputStream = sock.getInputStream();
        ObjectInputStream in = new ObjectInputStream(inputStream);

        ObjectOutputStream out = new ObjectOutputStream(sock.getOutputStream());

        IpcSharedMemorySpace inSpace = null;

        IpcSharedMemorySpace outSpace = null;

        boolean err = true;

        try {
          IpcSharedMemoryInitRequest req = (IpcSharedMemoryInitRequest) in.readObject();

          if (log.isDebugEnabled()) log.debug("Processing request: " + req);

          IgnitePair<String> p = inOutToken(req.pid(), size);

          String file1 = p.get1();
          String file2 = p.get2();

          assert file1 != null;
          assert file2 != null;

          // Create tokens.
          new File(file1).createNewFile();
          new File(file2).createNewFile();

          if (log.isDebugEnabled()) log.debug("Created token files: " + p);

          inSpace = new IpcSharedMemorySpace(file1, req.pid(), pid, size, true, log);

          outSpace = new IpcSharedMemorySpace(file2, pid, req.pid(), size, false, log);

          IpcSharedMemoryClientEndpoint ret =
              new IpcSharedMemoryClientEndpoint(inSpace, outSpace, log);

          out.writeObject(
              new IpcSharedMemoryInitResponse(
                  file2, outSpace.sharedMemoryId(), file1, inSpace.sharedMemoryId(), pid, size));

          err = !in.readBoolean();

          endpoints.add(ret);

          return ret;
        } catch (UnsatisfiedLinkError e) {
          throw IpcSharedMemoryUtils.linkError(e);
        } catch (IOException e) {
          if (log.isDebugEnabled())
            log.debug(
                "Failed to process incoming connection "
                    + "(was connection closed by another party):"
                    + e.getMessage());
        } catch (ClassNotFoundException e) {
          U.error(log, "Failed to process incoming connection.", e);
        } catch (ClassCastException e) {
          String msg =
              "Failed to process incoming connection (most probably, shared memory "
                  + "rest endpoint has been configured by mistake).";

          LT.warn(log, null, msg);

          sendErrorResponse(out, e);
        } catch (IpcOutOfSystemResourcesException e) {
          if (!omitOutOfResourcesWarn) LT.warn(log, null, OUT_OF_RESOURCES_MSG);

          sendErrorResponse(out, e);
        } catch (IgniteCheckedException e) {
          LT.error(log, e, "Failed to process incoming shared memory connection.");

          sendErrorResponse(out, e);
        } finally {
          // Exception has been thrown, need to free system resources.
          if (err) {
            if (inSpace != null) inSpace.forceClose();

            // Safety.
            if (outSpace != null) outSpace.forceClose();
          }
        }
      } catch (IOException e) {
        if (!Thread.currentThread().isInterrupted() && !accepted)
          throw new IgniteCheckedException("Failed to accept incoming connection.", e);

        if (!closed)
          LT.error(
              log, null, "Failed to process incoming shared memory connection: " + e.getMessage());
      } finally {
        U.closeQuiet(sock);
      }
    } // while

    throw new IgniteInterruptedCheckedException("Socket accept was interrupted.");
  }