Exemple #1
0
 // Remove the legal hold
 public void removeLegalHold(NewObjectIdentifier oid, String legalHold) {
   ConnectionFactory.DiskConnection[] connections = null;
   connections = ConnectionFactory.getConnections();
   removeLegalHold(oid, legalHold, connections);
   if (LOG.isLoggable(Level.FINE)) {
     LOG.fine("Removed legal hold [" + legalHold + "] for OID " + oid);
   }
 }
Exemple #2
0
  /** Add a legal hold given a cache id * */
  public void addLegalHold(NewObjectIdentifier oid, String legalHold) {

    if (LOG.isLoggable(Level.FINE)) {
      LOG.fine("Adding (" + oid + ", " + legalHold + ")");
    }

    String sysCacheId = CacheClientInterface.SYSTEM_CACHE;
    ConnectionFactory.DiskConnection[] connections =
        ConnectionFactory.getConnections(oid.getLayoutMapId(), sysCacheId);

    addLegalHold(connections, oid, legalHold);
  }
Exemple #3
0
  public void removeMetadata(NewObjectIdentifier oid, String cacheId) throws EMDException {
    // Remove the system Metadata
    ConnectionFactory.DiskConnection[] connections = null;
    connections = ConnectionFactory.getConnections();
    removeMetadata(oid, CacheClientInterface.SYSTEM_CACHE, connections);

    LOG.info("Removed the system metadata for " + oid);

    // Clean the other cache
    if (cacheId != null) {
      removeMetadata(oid, cacheId, connections);
      if (LOG.isLoggable(Level.FINE)) {
        StringBuffer log = new StringBuffer();
        for (int i = 0; i < connections.length; i++) {
          log.append(" - ");
          connections[i].toString(log);
        }
        LOG.fine("Removed the " + cacheId + " metadata for " + oid + "[" + log.toString() + "]");
      }
    }
  }
Exemple #4
0
  /**
   * **************************************
   *
   * <p>Query APIs
   *
   * <p>**************************************
   */
  private MetadataClient.QueryResult queryPlus(
      int[] mapIds,
      String cacheId,
      String query,
      ArrayList attributes,
      Cookie _cookie,
      int maxResults,
      int timeout,
      boolean forceResults,
      boolean abortOnFailure,
      Object[] boundParameters,
      MDOutputStream outputStream)
      throws EMDException {

    if (LOG.isLoggable(Level.FINE)) {
      LOG.fine("EMDClient queryPlus called [" + query + "]");
    }

    // Sanity checks
    if ((_cookie != null) && (maxResults == -1)) {
      // Has to specify an offset AND a row count
      throw new EMDException(
          "Invalid argument : when "
              + "using cookies, you have to "
              + "specify the number of "
              + "entries to return");
    }

    ConnectionFactory.DiskConnection[] connections =
        ConnectionFactory.getConnections(mapIds, cacheId);

    if (LOG.isLoggable(Level.FINE)) {
      StringBuffer buffer = new StringBuffer();
      buffer.append("The query [" + query + "] is sent to : ");
      for (int i = 0; i < connections.length; i++) {
        connections[i].toString(buffer);
        buffer.append(" <> ");
      }
      LOG.fine(buffer.toString());
    }

    Socket[] sockets = new Socket[connections.length];
    ObjectBroker[] brokers = new ObjectBroker[connections.length];
    StreamHead[] heads = new StreamHead[connections.length];

    try {
      EMDCookie cookie = (EMDCookie) _cookie;
      NewObjectIdentifier oid = null;
      int toBeSkipped = 0;

      if (cookie != null) {
        query = cookie.getQuery();
        oid = cookie.getLastOid();
        toBeSkipped = cookie.getToBeSkipped();
        boundParameters = cookie.getBoundParameters();
      }

      // Construct the brokers and launch the queries
      for (int i = 0; i < sockets.length; i++) {
        sockets[i] = null;

        // Connect to the node
        try {
          sockets[i] = ConnectionFactory.connect(connections[i]);
        } catch (IOException e) {
          LOG.warning(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node ["
                  + e.getMessage()
                  + "]");

          String str = BundleAccess.getInstance().getBundle().getString("warn.emd.query.io");
          Object[] args = {connections[i].toString()};
          LOG.log(ExtLevel.EXT_WARNING, MessageFormat.format(str, args));

          try {
            if (sockets[i] != null) {
              sockets[i].close();
            }
          } catch (IOException ignored) {
          }
          sockets[i] = null;

          throw new EMDException(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node.",
              e);
        }

        // Launch the query
        if (sockets[i] == null) {
          brokers[i] = null;
          heads[i] = null;
        } else {
          brokers[i] = new ObjectBroker(sockets[i]);
          brokers[i].launchQueryClient(
              cacheId,
              connections[i].getDisks(),
              query,
              attributes,
              cookie,
              maxResults + toBeSkipped,
              timeout,
              forceResults,
              boundParameters);
          heads[i] = new StreamHead(brokers[i]);
        }
      }

      // Merge the result and compute the output
      ArrayList array = null;
      MDHit lastHit = null;

      if (outputStream == null) {
        array = StreamHead.mergeStreams(heads, toBeSkipped, maxResults, abortOnFailure);
        if (array != null && array.size() != 0) lastHit = (MDHit) array.get(array.size() - 1);
      } else {
        StreamHead.mergeStreams(heads, outputStream, toBeSkipped, maxResults);
        lastHit = (MDHit) outputStream.getLastObject();
      }

      MetadataClient.QueryResult result = new MetadataClient.QueryResult();
      if (lastHit != null) {
        long atime = -1;
        if (lastHit instanceof MDHitByATime) atime = ((MDHitByATime) lastHit).getATime();

        result.cookie =
            new EMDCookie(lastHit.constructOid(), query, 0, boundParameters, attributes, atime);
        result.results = array;
      } else {
        result.cookie = null;
        result.results = new ArrayList();
      }
      return (result);

    } catch (IOException e) {
      LOG.log(Level.SEVERE, "Failed to run the distributed query", e);

      String str = BundleAccess.getInstance().getBundle().getString("err.emd.query.io");
      Object[] args = {query};
      LOG.log(ExtLevel.EXT_SEVERE, MessageFormat.format(str, args));

      return (null);
    } finally {
      // Close the connections
      for (int i = 0; i < sockets.length; i++) {
        if (sockets[i] != null) {
          try {
            sockets[i].close();
            sockets[i] = null;
          } catch (IOException ignored) {
          }
        }
      }
    }
  }
Exemple #5
0
  /**
   * Method to store the metadata of an object in the cache. This method might not store the
   * metadata in the cache if it already exists or if it does not belong to the cache. The disk
   * object is used to determine if the call needs to be made to the local or remote cache server.
   *
   * <p><hr> For the system cache, argument is a SystemMetadata object For the extended cache,
   * argument is either a Map containing the info or null <hr>
   *
   * @param argument what allows the cache to do its job (see above)
   * @param disk the disk to store the metadata in
   * @return true if the metadata was stored successfully or was not supposed to be stored. false if
   *     there was a failure.
   */
  public boolean setMetadata(String cacheId, NewObjectIdentifier oid, Object argument) {
    if (LOG.isLoggable(Level.FINE)) {
      LOG.fine("setMetadata has been called [" + cacheId + " - " + oid + " - " + argument + "]");
    }

    ConnectionFactory.DiskConnection[] connections =
        ConnectionFactory.getConnections(oid.getLayoutMapId(), cacheId);

    int inserts = setMetadata(cacheId, connections, oid, argument);

    // The number of required caches is defined as the number of
    // redundant fragments, plus one, since if we lose that number of
    // fragments we've lost data.
    int nbOfNeededInserts = OAClient.getInstance().getReliability().getRedundantFragCount() + 1;

    // Normally we only insert into the 1st three fragments system
    // caches. If we didn't succeed in all of these, we attempt to
    // insert into every fragments system cache. This helps prevent
    // cases of having missing entries at query time, since query
    // will query all caches, including those for fragments 3-6. These
    // extra entries will be cleaned up by DataDoctor as it heals
    if (cacheId.equals(CacheInterface.SYSTEM_CACHE) && inserts < nbOfNeededInserts) {

      // Re-acquire connects, forcing all caches to be used.
      connections = ConnectionFactory.getConnections(oid.getLayoutMapId(), cacheId, true);
      int retriedInserts = setMetadata(cacheId, connections, oid, argument);

      if (LOG.isLoggable(Level.FINE)) {
        LOG.fine(
            "Inserted "
                + retriedInserts
                + " records into "
                + "system caches after initial attempts inserted "
                + inserts
                + " for oid "
                + oid);
      }

      // If we failed to get the required number of cache inserts after
      // retrying all fragments' system caches, alert the System Cache
      // state machine (via ClusterProperties via MgmtServer)
      if (!(retriedInserts >= nbOfNeededInserts)) {
        if (LOG.isLoggable(Level.INFO)) {
          LOG.info(
              "Alerting SysCache state machine of "
                  + "failure to insert records for "
                  + oid
                  + ". "
                  + retriedInserts
                  + " of "
                  + nbOfNeededInserts
                  + " inserts performed after initial attempts"
                  + " inserted "
                  + inserts);
        }
        try {
          getMgmtProxy().setSyscacheInsertFailureTime(System.currentTimeMillis());
        } catch (Exception e) {
          String error =
              "Failing to store oid "
                  + oid.toString()
                  + " due to syscache insert failures (needed "
                  + nbOfNeededInserts
                  + ", inserts "
                  + inserts
                  + ", second inserts "
                  + retriedInserts
                  + ") and failure to notify state machine: "
                  + e.getMessage();
          LOG.log(Level.SEVERE, error, e);
          throw new SyscacheInsertionFailureException(error, e);
        }
      }
    }

    if (LOG.isLoggable(Level.FINE)) {
      LOG.fine("The metadata have been set for object " + oid);
    }
    return false;
  }
Exemple #6
0
  /**
   * **************************************
   *
   * <p>selectUnique APIs
   *
   * <p>**************************************
   */
  public MetadataClient.SelectUniqueResult selectUnique(
      String cacheId,
      String query,
      String attribute,
      Cookie _cookie,
      int maxResults,
      int timeout,
      boolean forceResults,
      Object[] boundParameters,
      MDOutputStream outputStream)
      throws EMDException {
    // Sanity checks
    if ((_cookie != null) && (maxResults == -1)) {
      // Has to specify an offset AND a row count
      throw new EMDException(
          "Invalid argument : when "
              + "using cookies, you have to "
              + "specify the number of "
              + "entries to return");
    }

    ConnectionFactory.DiskConnection[] connections = ConnectionFactory.getConnections();
    Socket socket = null;
    ArrayList sockets = new ArrayList();
    ObjectBroker[] brokers = null;
    StreamHead[] heads = null;

    try {

      EMDCookie cookie = (EMDCookie) _cookie;
      String lastAttribute = null;
      int toBeSkipped = 0;

      if (cookie != null) {
        query = cookie.getQuery();
        attribute = cookie.getAttribute();
        lastAttribute = cookie.getLastAttribute();
        toBeSkipped = cookie.getToBeSkipped();
        boundParameters = cookie.getBoundParameters();
      }

      // Construct the brokers and launch the queries
      for (int i = 0; i < connections.length; i++) {
        try {
          socket = ConnectionFactory.connect(connections[i]);
          sockets.add(socket);
        } catch (IOException e) {
          LOG.warning(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node ["
                  + e.getMessage()
                  + "]");

          String str = BundleAccess.getInstance().getBundle().getString("warn.emd.selectUnique.io");
          Object[] args = {connections[i].toString()};
          LOG.log(ExtLevel.EXT_WARNING, MessageFormat.format(str, args));

          throw new EMDException(
              "Failed to connect to node "
                  + connections[i].getNodeAddress()
                  + ". Skipping this node.",
              e);
        }
      }

      brokers = new ObjectBroker[sockets.size()];
      heads = new StreamHead[sockets.size()];
      Iterator iter = sockets.iterator();

      for (int i = 0; i < brokers.length; i++) {
        brokers[i] = new ObjectBroker((Socket) iter.next());
        brokers[i].launchSelectUniqueClient(
            cacheId,
            query,
            attribute,
            lastAttribute,
            maxResults + toBeSkipped,
            timeout,
            forceResults,
            boundParameters);
        heads[i] = new StreamHead(brokers[i]);
      }

      // Merge the result and compute the output
      ArrayList array = null;
      Object lastHit = null;

      if (outputStream == null) {
        array = StreamHead.mergeStreams(heads, toBeSkipped, maxResults, false);
        if (array != null && array.size() != 0) lastHit = array.get(array.size() - 1);
      } else {
        StreamHead.mergeStreams(heads, outputStream, toBeSkipped, maxResults);
        lastHit = outputStream.getLastObject();
      }

      MetadataClient.SelectUniqueResult result = new MetadataClient.SelectUniqueResult();
      if (lastHit != null) {

        if (array != null) {
          ArrayList stringList = new ArrayList();
          for (int i = 0; i < array.size(); i++) {
            stringList.add(array.get(i).toString());
          }
          result.results = new StringList(stringList);
        }

        result.cookie = new EMDCookie(lastHit.toString(), query, attribute, 0, boundParameters);
      } else {
        result.cookie = null;
        result.results = StringList.EMPTY_LIST;
      }

      return (result);

    } catch (IOException e) {
      EMDException newe =
          new EMDException("Failed to run the distributed select unique [" + e.getMessage() + "]");
      newe.initCause(e);
      throw newe;
    } finally {
      // Close the connections
      Iterator iter = sockets.iterator();
      while (iter.hasNext()) {
        try {
          ((Socket) iter.next()).close();
        } catch (IOException ignored) {
        }
      }
    }
  }