예제 #1
0
  /**
   * cleanup any edge peers when trying to forward an SRDI query so we are guaranteed to the best of
   * our knowledge that the peer is a rendezvous. This is not perfect, as it may take time for the
   * peerview to converge but at least we can remove any peers that is not a rendezvous.
   */
  protected Vector cleanupAnyEdges(String src, Vector results) {
    Vector clean = new Vector(results.size());
    PeerID pid = null;
    // put the peerview as a vector of PIDs
    Vector rpvId = srdi.getGlobalPeerView();

    // remove any peers not in the current peerview
    // these peers may be gone or have become edges
    for (int i = 0; i < results.size(); i++) {
      pid = (PeerID) results.elementAt(i);
      // eliminate the src of the query so we don't resend
      // the query to whom send it to us
      if (src.equals(pid.toString())) {
        continue;
      }

      // remove the local also, so we don't send to ourself
      if (localPeerId.equals(pid)) {
        continue;
      }

      if (rpvId.contains(pid)) { // ok that's a good RDV to the best
        if (LOG.isEnabledFor(Level.DEBUG)) {
          LOG.debug("valid rdv for SRDI forward " + pid);
        }
        clean.add(pid);
      } else {
        // cleanup our SRDI cache for that peer
        srdiIndex.remove(pid);
      }
    }
    return clean;
  }
예제 #2
0
  /**
   * {@inheritDoc}
   *
   * <p>Carefull that stopApp() could in theory be called before startApp().
   */
  public void stopApp() {

    resolver.unregisterHandler(routerSName);

    // unregister SRDI
    resolver.unregisterSrdiHandler(routerSName);
    srdiIndex.stop();

    resolver = null;
    srdi = null;
    membership = null;
  }
예제 #3
0
  /**
   * Process the Query, and generate response
   *
   * @param query the query to process
   */
  public int processQuery(ResolverQueryMsg query) {

    if (!useRouteResolver) { // Route resolver disabled
      return ResolverService.OK;
    }

    if (LOG.isEnabledFor(Level.DEBUG)) {
      LOG.debug("processQuery starts");
    }

    RouteQuery routeQuery = null;
    Reader ip = null;

    try {
      ip = new StringReader(query.getQuery());

      StructuredTextDocument asDoc =
          (StructuredTextDocument)
              StructuredDocumentFactory.newStructuredDocument(MimeMediaType.XMLUTF8, ip);

      routeQuery = new RouteQuery(asDoc);

    } catch (Throwable e) {
      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("Malformed Route query ", e);
      }
      return ResolverService.OK;
    } finally {
      try {
        if (null != ip) {
          ip.close();
          ip = null;
        }
      } catch (Throwable ignored) {;
      }
    }

    PeerID pId = routeQuery.getDestPeerID();

    if (LOG.isEnabledFor(Level.DEBUG)) {
      LOG.debug("processQuery: looking for route to " + pId);
    }

    RouteAdvertisement srcRoute = routeQuery.getSrcRoute();
    List badHops = routeQuery.getBadHops();

    if (LOG.isEnabledFor(Level.DEBUG)) {
      if (badHops != null) {
        LOG.debug("processQuery: bad Hops");
        for (int i = 0; i < badHops.size(); i++) {
          LOG.debug("processQuery:   :" + ((PeerID) badHops.get(i)).toString());
        }
      }
    }
    // if our source route is not null, then publish it
    if (srcRoute != null) {
      if (!(srcRoute.getDestPeerID()).equals(localPeerId)) {
        // This is not our own peer adv so we must not keep it
        // longer than its expiration time.
        try {
          if (LOG.isEnabledFor(Level.DEBUG)) {
            LOG.debug("processQuery: publishing sender route info " + srcRoute.getDestPeerID());
          }

          // we only need to publish this route if
          // we don't know about it yet
          // XXX: here is where we could be more conservative and use isNormallyReachable() instead,
          // thus excluding
          // incoming messengers.
          if ((!router.isLocalRoute(router.pid2addr(srcRoute.getDestPeerID())))
              && (!router.isRoutedRoute(router.pid2addr(srcRoute.getDestPeerID())))) {
            routeCM.publishRoute(srcRoute);
          }
        } catch (Exception e) {
          if (LOG.isEnabledFor(Level.DEBUG)) {
            LOG.debug("Could not publish Route Adv from query - discard", e);
          }
          return ResolverService.OK;
        }
      }
    } else {
      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("No src Route in route query - discard ");
      }
      return ResolverService.OK;
    }

    if (pId == null) {
      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("Malformed route query request, no PeerId - discard");
      }
      return ResolverService.OK;
    }

    // We have more luck with that one because, since it is part of OUR
    // message, and not part of the resolver protocol, it is in OUR
    // format.
    EndpointAddress qReqAddr = router.pid2addr(pId);

    RouteAdvertisement route = null;

    // check if this peer has a route to the destination
    // requested
    boolean found = false;

    if (qReqAddr.equals(localPeerAddr)) {
      found = true;
      // return the route that is my local route
      route = router.getMyLocalRoute();
    } else {
      // only rendezvous can respond to route requests
      // if not we are generating too much traffic
      // XXX: here is where we could be more conservative and use isNormallyReachable() instead,
      // thus excluding
      // incoming messengers.
      if (router.isLocalRoute(qReqAddr)) {
        if (LOG.isEnabledFor(Level.DEBUG)) {
          LOG.debug("processQuery: peer has direct route to destination ");
        }
        // we should set the route to something  :-)

        found = true;

        // this peer has a direct route to the destination
        // return the short route advertisement we know for this peer
        // (For us it is zero hop, and we advertise ourself as the routing
        // peer in the response. The stiching is done by whoever gets that
        // response). May be there are more than one hop advertised in-there...
        // alternate routing peers...should we leave them ?
        // For now, we keep the full dest, but wack the hops.

        route =
            (RouteAdvertisement)
                AdvertisementFactory.newAdvertisement(RouteAdvertisement.getAdvertisementType());

        AccessPointAdvertisement ap =
            (AccessPointAdvertisement)
                AdvertisementFactory.newAdvertisement(
                    AccessPointAdvertisement.getAdvertisementType());

        ap.setPeerID(pId);
        route.setDest(ap);

      } else {

        route = router.getRoute(qReqAddr, false);
        if (route != null) {
          found = true;
          // check if we were given some bad hops info
          // and see if the found route contains
          // any of these bad hops. In that case, we need
          // to mark this route as bad
          for (int i = 0; i < badHops.size(); i++) {

            // destination is known to be bad
            if (router.addr2pid(qReqAddr).equals(((PeerID) badHops.get(i)))) {
              processBadRoute((PeerID) badHops.get(i), route);
              found = false;
              break;
            }

            if (route.containsHop((PeerID) badHops.get(i))) {
              if (LOG.isEnabledFor(Level.DEBUG)) {
                LOG.debug(
                    "processQuery: peer has bad route due to "
                        + ((PeerID) badHops.get(i)).toString());
              }
              processBadRoute((PeerID) badHops.get(i), route);
              found = false;
              break;
            }
          }
        }
      }
    }

    if (!found) {
      // discard the request if we are not a rendezvous
      // else forward to the next peers
      if (!group.isRendezvous()) {
        if (LOG.isEnabledFor(Level.DEBUG)) {
          LOG.debug("discard query forwarding as not a rendezvous");
        }
        return ResolverService.OK;
      }

      // did not find a route, check our srdi cache
      // make sure we protect against out of sync
      // SRDI index

      // srdi forwarding is only involved once the Index entry has
      // been found and we forwarded the resolver query. Afterward a
      // normal walk proceeds from the initial SRDI index pointing
      // rdv. This is done to protect against potential loopback
      // entries in the SRDI cache index due to out of sync peerview
      // and index.
      if (query.getHopCount() < 2) {

        // check local SRDI cache to see if we have the entry
        // we look for 10 entries, will pickup one randomly
        Vector results = srdiIndex.query("route", "DstPID", pId.toString(), 10);

        if (results.size() > 0) {
          if (LOG.isEnabledFor(Level.DEBUG)) {
            LOG.debug("processQuery srdiIndex lookup match :" + results.size());
          }

          // remove any non-rdv peers to avoid sending
          // to a non-rdv peers and garbage collect the SRDI
          // index in the process
          Vector clean = cleanupAnyEdges(query.getSrc(), results);

          if (clean.size() > 0) {

            if (LOG.isEnabledFor(Level.DEBUG)) {
              LOG.debug("found an srdi entry forwarding query to SRDI peer");
            }

            // The purpose of incrementing the hopcount
            // when an SRDI index match is found (we got a
            // pointer to a rdv that should have the route) is to
            // restrict any further forwarding. The increment
            // count is only done when a matching SRDI index is
            // found. Not when the replica is selected as we
            // still need to forward the query.  This restriction
            // is purposelly done to avoid too many longjumps
            // within a walk.
            query.incrementHopCount();

            // Note: this forwards the query to 1 peer randomly
            // selected from the result
            srdi.forwardQuery(clean, query, 1);

            // tell the resolver no further action is needed.
            return ResolverService.OK;
          }
        }
      }

      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("did not find a route or SRDI index");
      }

      // force a walk
      return ResolverService.Repropagate;
    }

    // we found a route send the response
    try {
      if (route == null) {
        if (LOG.isEnabledFor(Level.DEBUG)) {
          LOG.debug("we should have had a route at this point");
        }
        return ResolverService.OK;
      }

      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("processQuery: we have a route build route response" + route.display());
      }

      RouteAdvertisement myRoute = router.getMyLocalRoute();

      // make sure we initialized our local
      // route info as we will need it to respond. We may
      // not have our route if we are still
      // waiting for a relay connection.
      if (myRoute == null) {
        return ResolverService.OK;
      }

      RouteResponse routeResponse = new RouteResponse();

      routeResponse.setDestRoute(route);
      routeResponse.setSrcRoute(myRoute);

      if (routeResponse == null) {
        if (LOG.isEnabledFor(Level.DEBUG)) {
          LOG.debug("error creating route response");
        }
        return ResolverService.OK;
      }

      // construct a response from the query
      ResolverResponseMsg res = query.makeResponse();

      res.setCredential(credentialDoc);
      res.setResponse(routeResponse.toString());

      resolver.sendResponse(query.getSrc(), res);
      return ResolverService.OK;

    } catch (Exception ee) {
      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("processQuery: error while processing query ", ee);
      }
      return ResolverService.OK;
    }
  }
예제 #4
0
  /**
   * issue a new route discovery resolver request
   *
   * @param peer the destination as a logical enpoint address
   */
  protected void findRoute(EndpointAddress peer) {

    RouteAdvertisement myRoute = router.getMyLocalRoute();

    // No need to pursue further if we haven't
    // initialize our own route as responding
    // peers are not going to be able to respond to us.
    if (myRoute == null) {
      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("Cannot issue a find route if we don't know our own route");
      }
      return;
    }

    if (LOG.isEnabledFor(Level.DEBUG)) {
      LOG.debug("Find route for peer = " + peer);
    }

    try {
      // create a new RouteQuery message
      RouteQuery doc = null;

      // check if we have some bad route information
      // for that peer, in that case pass the bad hop count
      BadRoute badRoute;

      badRoute = (BadRoute) router.getBadRoute(peer);

      if (badRoute != null) {
        // ok we have a bad route
        // pass the bad hops info as part of the query
        if (LOG.isEnabledFor(Level.DEBUG)) {
          LOG.debug("findRoute sends query: known bad Hops" + badRoute.display());
        }
        doc = new RouteQuery(router.addr2pid(peer), myRoute, badRoute.getHops());
      } else {
        doc = new RouteQuery(router.addr2pid(peer), myRoute, null);
      }

      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("Sending query for peer : " + peer);
      }

      ResolverQuery query =
          new ResolverQuery(
              routerSName, credentialDoc, localPeerId.toString(), doc.toString(), qid++);

      // only run SRDI if we are a rendezvous
      if (group.isRendezvous()) {

        // check where to send the query via SRDI
        Vector results = null;

        if (srdiIndex != null) {
          // try to find a least 10 entries, will pick up one
          // randomly. This will protect against retry. It is
          // likely that a number of RDV will know about a route
          results = srdiIndex.query("route", "DstPID", router.addr2pid(peer).toString(), 10);

          if (results != null && results.size() > 0) {
            // use SRDI to send the query
            // remove any non rdv peers from the candidate list
            // and garbage collect the index in the process
            Vector clean = cleanupAnyEdges(query.getSrc(), results);

            if (clean.size() > 0) {
              // The purpose of incrementing the hopcount
              // when an SRDI index match is found (we got a
              // pointer to a rdv that should have the route) is to
              // restrict any further forwarding. The increment
              // count is only done when a matching SRDI index is
              // found. Not when the replica is selected as we
              // still need to forward the query.  This restriction
              // is purposelly done to avoid too many longjumps
              // within a walk.
              query.incrementHopCount();

              srdi.forwardQuery(clean, query, 1);
              if (LOG.isEnabledFor(Level.DEBUG)) {
                LOG.debug("found an srdi entry forwarding query to SRDI peer");
              }
              return;
            }
          } else {
            // it is not in our cache, look for the replica peer
            // we need to send the query
            PeerID destPeer = srdi.getReplicaPeer(router.addr2pid(peer).toString());

            if (destPeer != null && !destPeer.equals(localPeerId)) {
              // don't push anywhere if we do not have a replica
              // or we are trying to push to ourself
              if (LOG.isEnabledFor(Level.DEBUG)) {
                LOG.debug("processQuery srdiIndex DHT forward :" + destPeer);
              }

              srdi.forwardQuery(destPeer.toString(), query);
              return;
            }
          }
        }
      }

      // if we reach that point then we just use the resolver walk
      resolver = group.getResolverService();
      if (resolver != null) {
        resolver.sendQuery(null, query);
        if (LOG.isEnabledFor(Level.DEBUG)) {
          LOG.debug("find route query sent");
        }
      } else {
        if (LOG.isEnabledFor(Level.WARN)) {
          LOG.warn("cannot get the resolver service");
        }
      }

    } catch (Exception ee) {
      if (LOG.isEnabledFor(Level.WARN)) {
        LOG.warn("Exception in findRoute", ee);
      }
    }
  }
예제 #5
0
 /**
  * remove SRDI index
  *
  * @param pid of the index to be removed
  */
 protected void removeSrdiIndex(PeerID pid) {
   srdiIndex.remove(pid);
 }
예제 #6
0
  /**
   * process an SRDI message request
   *
   * @param message SRDI resolver message
   */
  public boolean processSrdi(ResolverSrdiMsg message) {

    String value;
    SrdiMessage srdiMsg;

    try {
      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug("Received a SRDI messsage in group" + group.getPeerGroupName());
      }

      XMLDocument asDoc =
          (XMLDocument)
              StructuredDocumentFactory.newStructuredDocument(
                  MimeMediaType.XMLUTF8, new StringReader(message.getPayload()));

      srdiMsg = new SrdiMessageImpl(asDoc);
    } catch (Exception e) {
      // we don't understand this msg, let's skip it
      if (LOG.isEnabledFor(Level.WARN)) {
        LOG.warn("corrupted SRDI message", e);
      }

      return false;
    }

    PeerID pid = srdiMsg.getPeerID();

    // filter messages that contain messages
    // about the local peer, so we don't enter
    // self-reference
    if (pid.equals(localPeerId)) {
      return false;
    }

    Iterator eachEntry = srdiMsg.getEntries().iterator();

    while (eachEntry.hasNext()) {
      SrdiMessage.Entry entry = (SrdiMessage.Entry) eachEntry.next();

      // drop any information  about ourself
      if (entry.key.equals(localPeerId.toString())) {
        continue;
      }
      value = entry.value;
      if (value == null) {
        value = "";
      }

      // Expiration of entries is taken care of by SrdiIdex, so we always add
      // FIXME hamada 03142003
      // All routes are added under the secondary key 'DstPID', it would be more correct to
      // Specify it in the message, but since versioning is not yet supported the following is
      // acceptable, since it is localized
      srdiIndex.add(srdiMsg.getPrimaryKey(), "DstPID", entry.key, pid, entry.expiration);
      if (LOG.isEnabledFor(Level.DEBUG)) {
        LOG.debug(
            "Primary Key ["
                + srdiMsg.getPrimaryKey()
                + "] key [DstPID]"
                + " value ["
                + entry.key
                + "] exp ["
                + entry.expiration
                + "]");
      }
    }

    return true;
  }