Example #1
0
  // need to ask for objects...  start in order
  @Test
  public void testGetAllSegmentsFromPipeline() {
    Log.info(Log.FAC_TEST, "Starting testGetAllSegmentsFromPipeline");

    long received = 0;
    byte[] bytes = new byte[1024];

    try {
      istream = new CCNInputStream(testName, readHandle);
    } catch (IOException e1) {
      Log.warning(Log.FAC_TEST, "failed to open stream for pipeline test: " + e1.getMessage());
      Assert.fail();
    }

    while (!istream.eof()) {
      try {
        received += istream.read(bytes);
      } catch (IOException e) {
        Log.warning(Log.FAC_TEST, "failed to read segments: " + e.getMessage());
        Assert.fail();
      }
    }
    Log.info(Log.FAC_TEST, "read " + received + " from stream");
    Assert.assertTrue(received == bytesWritten);

    Log.info(Log.FAC_TEST, "Completed testGetAllSegmentsFromPipeline");
  }
  @Override
  /**
   * Send out a start write request to any listening repositories and wait for a response.
   *
   * @param name the basename of the stream to start
   * @param shape currently ignored - can only be Shape.STREAM
   * @throws IOException if there is no response from a repository
   */
  public void startWrite(ContentName name, Shape shape) throws IOException {

    if (Log.isLoggable(Log.FAC_REPO, Level.INFO))
      Log.info(
          Log.FAC_REPO,
          "RepositoryFlowControl.startWrite called for name {0}, shape {1}",
          name,
          shape);
    Client client = new Client(name, shape);
    _clients.add(client);

    // A nonce is used because if we tried to write data with the same name more than once, we could
    // retrieve the
    // the previous answer from the cache, and the repo would never be informed of our start write.
    ContentName repoWriteName = new ContentName(name, COMMAND_MARKER_REPO_START_WRITE, NONCE);
    Interest writeInterest = new Interest(repoWriteName);
    if (localRepo || SystemConfiguration.FC_LOCALREPOSITORY) {
      // this is meant to be written to a local repository, not any/multiple connected repos
      writeInterest.scope(1);
    }

    _handle.expressInterest(writeInterest, this);

    synchronized (this) {
      _writeInterests.add(writeInterest);
    }

    // Wait for information to be returned from a repo
    try {
      new Waiter(getTimeout()) {
        @Override
        protected boolean check(Object o, Object check) throws Exception {
          return ((Client) check)._initialized;
        }
      }.wait(this, client);
    } catch (Exception e) {
      Log.warning(Log.FAC_REPO, e.getClass() + " : " + e.getMessage());
    }

    synchronized (this) {
      if (!client._initialized) {
        _clients.remove();
        Log.warning(Log.FAC_REPO, "No response from a repository, cannot add name space : " + name);
        throw new IOException("No response from a repository for " + name);
      }
    }
  }
Example #3
0
  // skip
  @Test
  public void testSkipWithPipeline() {
    Log.info(Log.FAC_TEST, "Starting testSkipWithPipeline");

    long received = 0;
    byte[] bytes = new byte[100];

    boolean skipDone = false;

    try {
      istream = new CCNInputStream(testName, readHandle);
    } catch (IOException e1) {
      Log.warning(Log.FAC_TEST, "Failed to get new stream: " + e1.getMessage());
      Assert.fail();
    }

    while (!istream.eof()) {
      try {
        Log.info(Log.FAC_TEST, "Read so far: " + received);
        if (received > 0 && received < 250 && !skipDone) {
          // want to skip some segments
          istream.skip(100);
          skipDone = true;
        }
        received += istream.read(bytes);
      } catch (IOException e) {
        Log.info(Log.FAC_TEST, "failed to read segments: " + e.getMessage());
        Assert.fail();
      }
    }
    Log.info(Log.FAC_TEST, "read " + received + " from stream");
    Assert.assertTrue(received == bytesWritten - 100);

    Log.info(Log.FAC_TEST, "Completed testSkipWithPipeline");
  }
Example #4
0
  /**
   * Applies policy changes
   *
   * @param pxml policy data
   * @return
   * @throws XMLStreamException
   */
  public void update(PolicyXML pxml, boolean fromNet) throws RepositoryException {
    Log.info(Log.FAC_REPO, "Updating policy");
    if (pxml._version == null) throw new RepositoryException("No version in policy file");
    if (!pxml._version.equals(POLICY_VERSION)) {
      Log.warning(Log.FAC_REPO, "Bad version in policy file: {0}", pxml._version);
      throw new RepositoryException("Bad version in policy file");
    }

    if (null == pxml._localName) throw new RepositoryException("No local name in policy file");
    if (fromNet) {
      if (!pxml._localName.equals(_pxml.getLocalName())) {
        Log.warning(
            Log.FAC_REPO, "Repository local name doesn't match: request = {0}", pxml._localName);
        throw new RepositoryException("Repository local name doesn't match policy file");
      }
    } else {
      try {
        setLocalName(pxml._localName);
      } catch (MalformedContentNameStringException e) {
        throw new RepositoryException(e.getMessage());
      }
    }

    if (null == pxml._globalPrefix) throw new RepositoryException("No globalPrefix in policy file");

    if (fromNet) {
      if (!pxml.getGlobalPrefix().equals(_pxml.getGlobalPrefix())) {
        Log.warning("Repository globalPrefix doesn't match: request = {0}", pxml._globalPrefix);
        throw new RepositoryException("Repository global prefix doesn't match policy file");
      }
    } else {
      _pxml.setGlobalPrefixOnly(pxml._globalPrefix);
    }

    _pxml.setNamespace(pxml.getNamespace());
    if (null != pxml.getNamespace()) {
      String message = "";
      for (ContentName name : pxml.getNamespace()) {
        message += name.toString() + ':';
      }
      Log.info(Log.FAC_REPO, "Policy has been updated. New namespace is: " + message);
    }
  }
Example #5
0
  /**
   * Handle responses from CCNNameEnumerator that give us a list of single-component child names.
   * Filter out the names new to us, add them to our list of known children, postprocess them with
   * processNewChildren(SortedSet<ContentName>), and signal waiters if we have new data.
   *
   * @param prefix Prefix used for name enumeration.
   * @param names The list of names returned in this name enumeration response.
   * @return int
   */
  public int handleNameEnumerator(ContentName prefix, ArrayList<ContentName> names) {

    if (Log.isLoggable(Level.INFO)) {
      if (!_enumerating) {
        // Right now, just log if we get data out of enumeration, don't drop it on the floor;
        // don't want to miss results in case we are started again.
        Log.info(
            "ENUMERATION STOPPED: but {0} new name enumeration results: our prefix: {1} returned prefix: {2}",
            names.size(), _namePrefix, prefix);
      } else {
        Log.info(
            names.size() + " new name enumeration results: our prefix: {0} returned prefix: {1}",
            _namePrefix,
            prefix);
      }
    }
    if (!prefix.equals(_namePrefix)) {
      Log.warning("Returned data doesn't match requested prefix!");
    }
    Log.info("Handling Name Iteration {0}", prefix);
    // the name enumerator hands off names to us, we own it now
    // DKS -- want to keep listed as new children we previously had
    synchronized (_childLock) {
      TreeSet<ContentName> thisRoundNew = new TreeSet<ContentName>();
      thisRoundNew.addAll(names);
      Iterator<ContentName> it = thisRoundNew.iterator();
      while (it.hasNext()) {
        ContentName name = it.next();
        if (_children.contains(name)) {
          it.remove();
        }
      }
      if (!thisRoundNew.isEmpty()) {
        if (null != _newChildren) {
          _newChildren.addAll(thisRoundNew);
        } else {
          _newChildren = thisRoundNew;
        }
        _children.addAll(thisRoundNew);
        _lastUpdate = new CCNTime();
        if (Log.isLoggable(Level.INFO)) {
          Log.info(
              "New children found: at {0} "
                  + thisRoundNew.size()
                  + " total children "
                  + _children.size(),
              _lastUpdate);
        }
        processNewChildren(thisRoundNew);
        _childLock.notifyAll();
      }
    }
    return 0;
  }
Example #6
0
  @Test
  public void testGetFirstDigest() {
    Log.info(Log.FAC_TEST, "Starting testGetFirstDigest");

    long received = 0;
    byte[] bytes = new byte[1024];

    try {
      istream = new CCNInputStream(testName, readHandle);
    } catch (IOException e1) {
      Log.warning(Log.FAC_TEST, "failed to open stream for pipeline test: " + e1.getMessage());
      Assert.fail();
    }

    try {
      Assert.assertTrue(DataUtils.arrayEquals(firstDigest, istream.getFirstDigest()));
    } catch (IOException e3) {
      Log.warning(Log.FAC_TEST, "failed to get first digest for pipeline test:");
      Assert.fail();
    }
    try {
      istream.close();
    } catch (IOException e2) {
      Log.warning(Log.FAC_TEST, "failed to close stream for pipeline test: " + e2.getMessage());
      Assert.fail();
    }
    Log.info(Log.FAC_TEST, "start first segment digest " + DataUtils.printBytes(firstDigest));

    try {
      istream = new CCNInputStream(testName, readHandle);
    } catch (IOException e1) {
      Log.warning(Log.FAC_TEST, "failed to open stream for pipeline test: " + e1.getMessage());
      Assert.fail();
    }

    while (!istream.eof()) {
      try {
        received += istream.read(bytes);
      } catch (IOException e) {
        Log.warning(Log.FAC_TEST, "failed to read segments: " + e.getMessage());
        Assert.fail();
      }
    }
    Assert.assertTrue(received == bytesWritten);
    try {
      Assert.assertTrue(DataUtils.arrayEquals(firstDigest, istream.getFirstDigest()));
    } catch (IOException e) {
      Log.warning(Log.FAC_TEST, "failed to get first digest after reading in pipeline test:");
      Assert.fail();
    }
    Log.info(Log.FAC_TEST, "end first segment digest " + DataUtils.printBytes(firstDigest));

    Log.info(Log.FAC_TEST, "Completed testGetFirstDigest");
  }
Example #7
0
 /** @throws java.lang.Exception */
 @BeforeClass
 public static void setUpBeforeClass() throws Exception {
   Log.warning(
       "Warning! This tests bakes in low-level knowledge of versioning/segmentation conventions, and must be updated when they change!");
 }
Example #8
0
  public Interest handleContent(ContentObject result, Interest interest) {
    Log.finest(
        Log.FAC_TEST, "Interests registered: " + _interests.size() + " content object returned");
    // Parameterized behavior that subclasses can override.
    ContentName interestName = null;
    if (_processedObjects.contains(result)) {
      Log.fine(
          Log.FAC_TEST,
          "FLOSSER: Got repeated content for interest: {0} content: {1}",
          interest,
          result.name());
    } else {
      Log.finest(
          Log.FAC_TEST,
          "FLOSSER: Got new content for interest {0} content name: {1}",
          interest,
          result.name());
      processContent(result);
      // update the interest. follow process used by ccnslurp.
      // exclude the next component of this object, and set up a
      // separate interest to explore its children.
      // first, remove the interest from our list as we aren't going to
      // reexpress it in exactly the same way
      synchronized (_interests) {
        for (Entry<ContentName, Interest> entry : _interests.entrySet()) {
          if (entry.getValue().equals(interest)) {
            interestName = entry.getKey();
            _interests.remove(interestName);
            break;
          }
        }
      }

      int prefixCount = interest.name().count();
      // DKS TODO should the count above be count()-1 and this just prefixCount?
      if (prefixCount == result.name().count()) {
        if (null == interest.exclude()) {
          ArrayList<Exclude.Element> excludes = new ArrayList<Exclude.Element>();
          excludes.add(new ExcludeComponent(result.digest()));
          interest.exclude(new Exclude(excludes));
          Log.finest(Log.FAC_TEST, "Creating new exclude filter for interest {0}", interest.name());
        } else {
          if (interest.exclude().match(result.digest())) {
            Log.fine(
                Log.FAC_TEST,
                "We should have already excluded content digest: "
                    + DataUtils.printBytes(result.digest()));
          } else {
            // Has to be in order...
            Log.finest(Log.FAC_TEST, "Adding child component to exclude.");
            interest.exclude().add(new byte[][] {result.digest()});
          }
        }
        Log.finer(
            Log.FAC_TEST,
            "Excluding content digest: "
                + DataUtils.printBytes(result.digest())
                + " onto interest {0} total excluded: "
                + interest.exclude().size(),
            interest.name());
      } else {
        // Add an exclude for the content we just got
        // DKS TODO might need to split to matchedComponents like ccnslurp
        if (null == interest.exclude()) {
          ArrayList<Exclude.Element> excludes = new ArrayList<Exclude.Element>();
          excludes.add(new ExcludeComponent(result.name().component(prefixCount)));
          interest.exclude(new Exclude(excludes));
          Log.finest(Log.FAC_TEST, "Creating new exclude filter for interest {0}", interest.name());
        } else {
          if (interest.exclude().match(result.name().component(prefixCount))) {
            Log.fine(
                Log.FAC_TEST,
                "We should have already excluded child component: {0}",
                ContentName.componentPrintURI(result.name().component(prefixCount)));
          } else {
            // Has to be in order...
            Log.finest(Log.FAC_TEST, "Adding child component to exclude.");
            interest.exclude().add(new byte[][] {result.name().component(prefixCount)});
          }
        }
        Log.finer(
            Log.FAC_TEST,
            "Excluding child "
                + ContentName.componentPrintURI(result.name().component(prefixCount))
                + " total excluded: "
                + interest.exclude().size());

        if (_flossSubNamespaces
            || SegmentationProfile.isNotSegmentMarker(result.name().component(prefixCount))) {
          ContentName newNamespace = null;
          try {
            if (interest.name().count() == result.name().count()) {
              newNamespace = new ContentName(interest.name(), result.digest());
              Log.info(Log.FAC_TEST, "Not adding content exclusion namespace: {0}", newNamespace);
            } else {
              newNamespace =
                  new ContentName(
                      interest.name(), result.name().component(interest.name().count()));
              Log.info(Log.FAC_TEST, "Adding new namespace: {0}", newNamespace);
              handleNamespace(newNamespace, interest.name());
            }
          } catch (IOException ioex) {
            Log.warning("IOException picking up namespace: {0}", newNamespace);
          }
        }
      }
    }
    if (null != interest)
      synchronized (_interests) {
        _interests.put(interest.name(), interest);
      }
    return interest;
  }
Example #9
0
 protected static void handleException(String message, Exception e) {
   Log.warning(message + " Exception: " + e.getClass().getName() + ": " + e.getMessage());
   Log.warningStackTrace(e);
 }