Example #1
0
  /**
   * **************************************
   *
   * <p>Query APIs
   *
   * <p>**************************************
   */
  private MetadataClient.QueryResult queryPlus(
      int[] mapIds,
      String cacheId,
      String query,
      ArrayList attributes,
      Cookie _cookie,
      int maxResults,
      int timeout,
      boolean forceResults,
      boolean abortOnFailure,
      Object[] boundParameters,
      MDOutputStream outputStream)
      throws EMDException {

    if (LOG.isLoggable(Level.FINE)) {
      LOG.fine("EMDClient queryPlus called [" + query + "]");
    }

    // Sanity checks
    if ((_cookie != null) && (maxResults == -1)) {
      // Has to specify an offset AND a row count
      throw new EMDException(
          "Invalid argument : when "
              + "using cookies, you have to "
              + "specify the number of "
              + "entries to return");
    }

    ConnectionFactory.DiskConnection[] connections =
        ConnectionFactory.getConnections(mapIds, cacheId);

    if (LOG.isLoggable(Level.FINE)) {
      StringBuffer buffer = new StringBuffer();
      buffer.append("The query [" + query + "] is sent to : ");
      for (int i = 0; i < connections.length; i++) {
        connections[i].toString(buffer);
        buffer.append(" <> ");
      }
      LOG.fine(buffer.toString());
    }

    Socket[] sockets = new Socket[connections.length];
    ObjectBroker[] brokers = new ObjectBroker[connections.length];
    StreamHead[] heads = new StreamHead[connections.length];

    try {
      EMDCookie cookie = (EMDCookie) _cookie;
      NewObjectIdentifier oid = null;
      int toBeSkipped = 0;

      if (cookie != null) {
        query = cookie.getQuery();
        oid = cookie.getLastOid();
        toBeSkipped = cookie.getToBeSkipped();
        boundParameters = cookie.getBoundParameters();
      }

      // Construct the brokers and launch the queries
      for (int i = 0; i < sockets.length; i++) {
        sockets[i] = null;

        // Connect to the node
        try {
          sockets[i] = ConnectionFactory.connect(connections[i]);
        } catch (IOException e) {
          LOG.warning(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node ["
                  + e.getMessage()
                  + "]");

          String str = BundleAccess.getInstance().getBundle().getString("warn.emd.query.io");
          Object[] args = {connections[i].toString()};
          LOG.log(ExtLevel.EXT_WARNING, MessageFormat.format(str, args));

          try {
            if (sockets[i] != null) {
              sockets[i].close();
            }
          } catch (IOException ignored) {
          }
          sockets[i] = null;

          throw new EMDException(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node.",
              e);
        }

        // Launch the query
        if (sockets[i] == null) {
          brokers[i] = null;
          heads[i] = null;
        } else {
          brokers[i] = new ObjectBroker(sockets[i]);
          brokers[i].launchQueryClient(
              cacheId,
              connections[i].getDisks(),
              query,
              attributes,
              cookie,
              maxResults + toBeSkipped,
              timeout,
              forceResults,
              boundParameters);
          heads[i] = new StreamHead(brokers[i]);
        }
      }

      // Merge the result and compute the output
      ArrayList array = null;
      MDHit lastHit = null;

      if (outputStream == null) {
        array = StreamHead.mergeStreams(heads, toBeSkipped, maxResults, abortOnFailure);
        if (array != null && array.size() != 0) lastHit = (MDHit) array.get(array.size() - 1);
      } else {
        StreamHead.mergeStreams(heads, outputStream, toBeSkipped, maxResults);
        lastHit = (MDHit) outputStream.getLastObject();
      }

      MetadataClient.QueryResult result = new MetadataClient.QueryResult();
      if (lastHit != null) {
        long atime = -1;
        if (lastHit instanceof MDHitByATime) atime = ((MDHitByATime) lastHit).getATime();

        result.cookie =
            new EMDCookie(lastHit.constructOid(), query, 0, boundParameters, attributes, atime);
        result.results = array;
      } else {
        result.cookie = null;
        result.results = new ArrayList();
      }
      return (result);

    } catch (IOException e) {
      LOG.log(Level.SEVERE, "Failed to run the distributed query", e);

      String str = BundleAccess.getInstance().getBundle().getString("err.emd.query.io");
      Object[] args = {query};
      LOG.log(ExtLevel.EXT_SEVERE, MessageFormat.format(str, args));

      return (null);
    } finally {
      // Close the connections
      for (int i = 0; i < sockets.length; i++) {
        if (sockets[i] != null) {
          try {
            sockets[i].close();
            sockets[i] = null;
          } catch (IOException ignored) {
          }
        }
      }
    }
  }
Example #2
0
  /**
   * **************************************
   *
   * <p>selectUnique APIs
   *
   * <p>**************************************
   */
  public MetadataClient.SelectUniqueResult selectUnique(
      String cacheId,
      String query,
      String attribute,
      Cookie _cookie,
      int maxResults,
      int timeout,
      boolean forceResults,
      Object[] boundParameters,
      MDOutputStream outputStream)
      throws EMDException {
    // Sanity checks
    if ((_cookie != null) && (maxResults == -1)) {
      // Has to specify an offset AND a row count
      throw new EMDException(
          "Invalid argument : when "
              + "using cookies, you have to "
              + "specify the number of "
              + "entries to return");
    }

    ConnectionFactory.DiskConnection[] connections = ConnectionFactory.getConnections();
    Socket socket = null;
    ArrayList sockets = new ArrayList();
    ObjectBroker[] brokers = null;
    StreamHead[] heads = null;

    try {

      EMDCookie cookie = (EMDCookie) _cookie;
      String lastAttribute = null;
      int toBeSkipped = 0;

      if (cookie != null) {
        query = cookie.getQuery();
        attribute = cookie.getAttribute();
        lastAttribute = cookie.getLastAttribute();
        toBeSkipped = cookie.getToBeSkipped();
        boundParameters = cookie.getBoundParameters();
      }

      // Construct the brokers and launch the queries
      for (int i = 0; i < connections.length; i++) {
        try {
          socket = ConnectionFactory.connect(connections[i]);
          sockets.add(socket);
        } catch (IOException e) {
          LOG.warning(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node ["
                  + e.getMessage()
                  + "]");

          String str = BundleAccess.getInstance().getBundle().getString("warn.emd.selectUnique.io");
          Object[] args = {connections[i].toString()};
          LOG.log(ExtLevel.EXT_WARNING, MessageFormat.format(str, args));

          throw new EMDException(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node.",
              e);
        }
      }

      brokers = new ObjectBroker[sockets.size()];
      heads = new StreamHead[sockets.size()];
      Iterator iter = sockets.iterator();

      for (int i = 0; i < brokers.length; i++) {
        brokers[i] = new ObjectBroker((Socket) iter.next());
        brokers[i].launchSelectUniqueClient(
            cacheId,
            query,
            attribute,
            lastAttribute,
            maxResults + toBeSkipped,
            timeout,
            forceResults,
            boundParameters);
        heads[i] = new StreamHead(brokers[i]);
      }

      // Merge the result and compute the output
      ArrayList array = null;
      Object lastHit = null;

      if (outputStream == null) {
        array = StreamHead.mergeStreams(heads, toBeSkipped, maxResults, false);
        if (array != null && array.size() != 0) lastHit = array.get(array.size() - 1);
      } else {
        StreamHead.mergeStreams(heads, outputStream, toBeSkipped, maxResults);
        lastHit = outputStream.getLastObject();
      }

      MetadataClient.SelectUniqueResult result = new MetadataClient.SelectUniqueResult();
      if (lastHit != null) {

        if (array != null) {
          ArrayList stringList = new ArrayList();
          for (int i = 0; i < array.size(); i++) {
            stringList.add(array.get(i).toString());
          }
          result.results = new StringList(stringList);
        }

        result.cookie = new EMDCookie(lastHit.toString(), query, attribute, 0, boundParameters);
      } else {
        result.cookie = null;
        result.results = StringList.EMPTY_LIST;
      }

      return (result);

    } catch (IOException e) {
      EMDException newe =
          new EMDException("Failed to run the distributed select unique [" + e.getMessage() + "]");
      newe.initCause(e);
      throw newe;
    } finally {
      // Close the connections
      Iterator iter = sockets.iterator();
      while (iter.hasNext()) {
        try {
          ((Socket) iter.next()).close();
        } catch (IOException ignored) {
        }
      }
    }
  }