@Override
  public void doRequest(ServerData serverData) {
    CentralizedServiceWorker<I, ?, ?> serviceWorker = serverData.getServiceWorker();
    // Get the initial size of ByteArrayVertexIdMessages per partition
    // on this worker. To make sure every ByteArrayVertexIdMessages to have
    // enough space to store the messages, we divide the original one-to-all
    // message size by the number of partitions and double the size
    // (Assume the major component in one-to-all message is a id list.
    // Now each target id has a copy of message,
    // therefore we double the buffer size)
    // to get the initial size of ByteArrayVertexIdMessages.
    int initialSize =
        oneToAllMsgs.getSize() / serverData.getPartitionStore().getNumPartitions() * 2;
    // Create ByteArrayVertexIdMessages for
    // message reformatting.
    Int2ObjectOpenHashMap<ByteArrayVertexIdMessages> partitionIdMsgs =
        new Int2ObjectOpenHashMap<ByteArrayVertexIdMessages>();

    // Put data from ByteArrayOneToAllMessages to ByteArrayVertexIdMessages
    ExtendedDataInput reader = oneToAllMsgs.getOneToAllMessagesReader();
    I vertexId = getConf().createVertexId();
    M msg = oneToAllMsgs.createMessage();
    int idCount = 0;
    int partitionId = 0;
    try {
      while (reader.available() != 0) {
        msg.readFields(reader);
        idCount = reader.readInt();
        for (int i = 0; i < idCount; i++) {
          vertexId.readFields(reader);
          PartitionOwner owner = serviceWorker.getVertexPartitionOwner(vertexId);
          partitionId = owner.getPartitionId();
          ByteArrayVertexIdMessages<I, M> idMsgs = partitionIdMsgs.get(partitionId);
          if (idMsgs == null) {
            idMsgs =
                new ByteArrayVertexIdMessages<I, M>(getConf().<M>getOutgoingMessageValueFactory());
            idMsgs.setConf(getConf());
            idMsgs.initialize(initialSize);
            partitionIdMsgs.put(partitionId, idMsgs);
          }
          idMsgs.add(vertexId, msg);
        }
      }
    } catch (IOException e) {
      throw new RuntimeException("doRequest: Got IOException ", e);
    }
    // Read ByteArrayVertexIdMessages and write to message store
    try {
      for (Entry<Integer, ByteArrayVertexIdMessages> idMsgs : partitionIdMsgs.entrySet()) {
        if (!idMsgs.getValue().isEmpty()) {
          serverData
              .getIncomingMessageStore()
              .addPartitionMessages(idMsgs.getKey(), idMsgs.getValue());
        }
      }
    } catch (IOException e) {
      throw new RuntimeException("doRequest: Got IOException.", e);
    }
  }
 @Override
 public int test() {
   final Int2ObjectOpenHashMap<Integer> m_map =
       new Int2ObjectOpenHashMap<>(m_keys.length, m_fillFactor);
   for (int i = 0; i < m_keys.length; ++i) m_map.put(m_keys[i], null);
   for (int i = 0; i < m_keys.length; ++i) m_map.put(m_keys[i], null);
   return m_map.size();
 }
 @Override
 public int test() {
   final Int2ObjectOpenHashMap<Integer> m_map =
       new Int2ObjectOpenHashMap<>(m_keys.length / 2 + 1, m_fillFactor);
   final Integer value = 1;
   int add = 0, remove = 0;
   while (add < m_keys.length) {
     m_map.put(m_keys[add], value);
     ++add;
     m_map.put(m_keys[add], value);
     ++add;
     m_map.remove(m_keys[remove++]);
   }
   return m_map.size();
 }
예제 #4
0
 protected void populateCandidateParents(
     Int2ObjectOpenHashMap<IntOpenHashSet> candidateParentsPerNode, IntOpenHashSet chunk) {
   for (int v : chunk) {
     IntOpenHashSet candidateParents = new IntOpenHashSet();
     Int2ObjectOpenHashMap<IntArrayList> cPlusV = auxiliary.getCplusOnline(v);
     if (cPlusV != null) {
       for (int action : cPlusV.keySet()) {
         for (int u : cPlusV.get(action)) {
           candidateParents.add(u);
         }
       }
     }
     candidateParentsPerNode.put(v, candidateParents);
   }
 }
예제 #5
0
 /**
  * For each block (child node), find a 'base' subset of parents for which the block's
  * logLikelihood is not -Infinity
  *
  * @param candidateParentsPerNode
  * @param chosenArcsPerNode
  * @return
  */
 protected double getOutOfMinusInfinity(
     Int2ObjectOpenHashMap<IntOpenHashSet> candidateParentsPerNode,
     Int2ObjectOpenHashMap<ObjectOpenHashSet<Arc>> chosenArcsPerNode) {
   return getOutOfMinusInfinity(
       candidateParentsPerNode,
       chosenArcsPerNode,
       new IntOpenHashSet(candidateParentsPerNode.keySet()),
       null);
 }
 @Override
 public int test() {
   int res = 0;
   for (int i = 0; i < m_keys.length; ++i) if (m_map.get(m_keys[i]) != null) res ^= 1;
   return res;
 }
 @Override
 public void setup(int[] keys, float fillFactor, int oneFailOutOf) {
   super.setup(keys, fillFactor, oneFailOutOf);
   m_map = new Int2ObjectOpenHashMap<>(keys.length, fillFactor);
   for (int key : keys) m_map.put(key % oneFailOutOf == 0 ? key + 1 : key, Integer.valueOf(key));
 }
예제 #8
0
  /**
   * For a specific sub-set of blocks (child nodes), find a 'base' subset of parents for which the
   * block's logLikelihood is not -Infinity
   *
   * @param candidateParentsPerNode
   * @param chosenArcsPerNode
   * @param setOfBlocks
   * @return
   */
  protected double getOutOfMinusInfinity(
      Int2ObjectOpenHashMap<IntOpenHashSet> candidateParentsPerNode,
      Int2ObjectOpenHashMap<ObjectOpenHashSet<Arc>> chosenArcsPerNode,
      IntOpenHashSet setOfBlocks,
      TIntDoubleHashMap logLPerNode) {
    double totalLogL = 0;

    ProgressLogger pl = new ProgressLogger(LOGGER, ProgressLogger.TEN_SECONDS, "blocks");
    pl.start("Begin initializing, to avoid zero likelihood, using set-cover heuristic");
    pl.expectedUpdates = setOfBlocks.size();
    int nArcs = 0;
    for (int v : setOfBlocks) {
      pl.update();

      IntOpenHashSet vParents = candidateParentsPerNode.get(v);

      Int2ObjectOpenHashMap<IntOpenHashSet> parentActions =
          new Int2ObjectOpenHashMap<IntOpenHashSet>();

      Int2ObjectOpenHashMap<IntArrayList> cPlusV = auxiliary.getCplusOnline(v);
      Int2ObjectOpenHashMap<IntArrayList> cMinusV = auxiliary.getCminusOnline(v);

      if (cPlusV != null) {
        IntSet actions = cPlusV.keySet();
        // Heuristic: first add the parents that participate in A+ for
        // most actions
        for (int action : actions) {
          for (int u : cPlusV.get(action)) {
            if (!parentActions.containsKey(u)) {
              parentActions.put(u, new IntOpenHashSet());
            }
            parentActions.get(u).add(action);
          }
        }
      }

      KeepMaximum km = new KeepMaximum();
      km.addAllKey2Listsize(parentActions);

      IntOpenHashSet baseSetOfParents = new IntOpenHashSet();
      double logL = Double.NEGATIVE_INFINITY;
      while (logL == Double.NEGATIVE_INFINITY && (km.getMaximumKey() != -1)) {
        int u = km.getMaximumKey();
        if (baseSetOfParents.contains(u)) {
          throw new IllegalStateException("Attempted to add twice the same parent");
        }
        baseSetOfParents.add(u);
        logL = blockLogLikelihood(v, cPlusV, cMinusV, baseSetOfParents);
        IntOpenHashSet uActions = parentActions.get(u);
        for (int parent : vParents) {
          parentActions.get(parent).removeAll(uActions);
        }
        vParents.remove(u);
        parentActions.remove(u);
        km.reset();
        km.addAllKey2Listsize(parentActions);
      }

      // keep track of the likelihood
      totalLogL += logL;
      if (logLPerNode != null) {
        logLPerNode.put(v, logL);
      }

      chosenArcsPerNode.put(v, new ObjectOpenHashSet<Arc>());
      for (int u : baseSetOfParents) {
        nArcs++;
        chosenArcsPerNode.get(v).add(new Arc(u, v));
      }
    }
    pl.stop("Done initialization. Added " + nArcs + " arcs, logLikelihood=" + totalLogL);
    return totalLogL;
  }