Esempio n. 1
0
  /** @param aProperties the updated properties. */
  @SuppressWarnings("rawtypes")
  final void setProperties(final Dictionary aProperties) {
    final Map<String, String> newProps = new HashMap<String, String>();

    Enumeration keys = aProperties.keys();
    while (keys.hasMoreElements()) {
      final String key = (String) keys.nextElement();
      if (!KNOWN_KEYS.contains(key) && !IGNORED_KEYS.contains(key)) {
        LOG.log(Level.WARNING, "Unknown/unsupported profile key: " + key);
        continue;
      }

      final String value = aProperties.get(key).toString();
      newProps.put(key, value.trim());
    }

    // Verify whether all known keys are defined...
    final List<String> checkedKeys = new ArrayList<String>(KNOWN_KEYS);
    checkedKeys.removeAll(newProps.keySet());
    if (!checkedKeys.isEmpty()) {
      throw new IllegalArgumentException(
          "Profile settings not complete! Missing keys are: " + checkedKeys.toString());
    }

    this.properties.putAll(newProps);

    LOG.log(
        Level.INFO,
        "New device profile settings applied for {1} ({0}) ...", //
        new Object[] {getType(), getDescription()});
  }
Esempio n. 2
0
 private InboundTransferTask addTransfer(Address source, Set<Integer> segmentsFromSource) {
   synchronized (this) {
     segmentsFromSource.removeAll(
         transfersBySegment.keySet()); // already in progress segments are excluded
     if (!segmentsFromSource.isEmpty()) {
       InboundTransferTask inboundTransfer =
           new InboundTransferTask(
               segmentsFromSource,
               source,
               cacheTopology.getTopologyId(),
               this,
               rpcManager,
               commandsFactory,
               timeout,
               cacheName);
       for (int segmentId : segmentsFromSource) {
         transfersBySegment.put(segmentId, inboundTransfer);
       }
       List<InboundTransferTask> inboundTransfers =
           transfersBySource.get(inboundTransfer.getSource());
       if (inboundTransfers == null) {
         inboundTransfers = new ArrayList<InboundTransferTask>();
         transfersBySource.put(inboundTransfer.getSource(), inboundTransfers);
       }
       inboundTransfers.add(inboundTransfer);
       taskQueue.add(inboundTransfer);
       return inboundTransfer;
     } else {
       return null;
     }
   }
 }
  private Map<Integer, Double> computeOptimizationScores(
      int expMinCluster, int expMaxCluster, int hop, Optimizer optimizer) {

    List<Integer> searchSpace = computeSearchSpace(expMinCluster, expMaxCluster, hop);

    List<Integer> newSearchSpace = new ArrayList<>(searchSpace);
    newSearchSpace.removeAll(scores.keySet());

    OptimizationManager_old optimizationManager =
        new OptimizationManager_old(parallelWorkers, maxJobsPerWorker);
    Map<Integer, Double> scores = optimizationManager.process(optimizer, newSearchSpace);
    this.scores.putAll(scores);

    for (int s : searchSpace) scores.put(s, this.scores.get(s));

    System.out.println("[]Tried search space:" + scores);
    // are scores valid? if not, half the hop, recompute search space, re-optimize
    if (invalid(scores)) {
      if (hop != 1) {
        hop = reduceHop(hop);
        return computeOptimizationScores(expMinCluster, expMaxCluster, hop, optimizer);
      } else {
        return null;
      }
    }
    return scores;
  }
Esempio n. 4
0
 public void verify() {
   assertEquals(numberOfRequests.size(), requestsNum);
   assertEquals(numberOfJobsMap.keySet().size(), requestsNum);
   for (int num : numberOfJobsMap.values()) {
     assertEquals(num, jobsNumPerRequest);
   }
 }
Esempio n. 5
0
  /**
   * The gossip digest is built based on randomization rather than just looping through the
   * collection of live endpoints.
   *
   * @param gDigests list of Gossip Digests.
   */
  void makeRandomGossipDigest(List<GossipDigest> gDigests) {
    /* Add the local endpoint state */
    EndpointState epState = endpointStateMap_.get(localEndpoint_);
    int generation = epState.getHeartBeatState().getGeneration();
    int maxVersion = getMaxEndpointStateVersion(epState);
    gDigests.add(new GossipDigest(localEndpoint_, generation, maxVersion));

    List<InetAddress> endpoints = new ArrayList<InetAddress>(endpointStateMap_.keySet());
    Collections.shuffle(endpoints, random_);
    for (InetAddress endpoint : endpoints) {
      epState = endpointStateMap_.get(endpoint);
      if (epState != null) {
        generation = epState.getHeartBeatState().getGeneration();
        maxVersion = getMaxEndpointStateVersion(epState);
        gDigests.add(new GossipDigest(endpoint, generation, maxVersion));
      } else {
        gDigests.add(new GossipDigest(endpoint, 0, 0));
      }
    }

    /* FOR DEBUG ONLY - remove later */
    StringBuilder sb = new StringBuilder();
    for (GossipDigest gDigest : gDigests) {
      sb.append(gDigest);
      sb.append(" ");
    }
    if (logger_.isTraceEnabled()) logger_.trace("Gossip Digests are : " + sb.toString());
  }
Esempio n. 6
0
  /**
   * Remove records telling what entity caps node a contact has.
   *
   * @param contact the contact
   */
  public void removeContactCapsNode(Contact contact) {
    Caps caps = null;
    String lastRemovedJid = null;

    Iterator<String> iter = userCaps.keySet().iterator();
    while (iter.hasNext()) {
      String jid = iter.next();

      if (StringUtils.parseBareAddress(jid).equals(contact.getAddress())) {
        caps = userCaps.get(jid);
        lastRemovedJid = jid;
        iter.remove();
      }
    }

    // fire only for the last one, at the end the event out
    // of the protocol will be one and for the contact
    if (caps != null) {
      UserCapsNodeListener[] listeners;
      synchronized (userCapsNodeListeners) {
        listeners = userCapsNodeListeners.toArray(NO_USER_CAPS_NODE_LISTENERS);
      }
      if (listeners.length != 0) {
        String nodeVer = caps.getNodeVer();

        for (UserCapsNodeListener listener : listeners)
          listener.userCapsNodeRemoved(lastRemovedJid, nodeVer, false);
      }
    }
  }
Esempio n. 7
0
 public List<QName> getProcesses() {
   _rw.readLock().lock();
   try {
     return new ArrayList<QName>(_processes.keySet());
   } finally {
     _rw.readLock().unlock();
   }
 }
Esempio n. 8
0
 public Collection<String> getPackages() {
   _rw.readLock().lock();
   try {
     return new ArrayList<String>(_deploymentUnits.keySet());
   } finally {
     _rw.readLock().unlock();
   }
 }
Esempio n. 9
0
 /* Sends a Gossip message to an unreachable member */
 void doGossipToUnreachableMember(Message message) {
   double liveEndpoints = liveEndpoints_.size();
   double unreachableEndpoints = unreachableEndpoints_.size();
   if (unreachableEndpoints > 0) {
     /* based on some probability */
     double prob = unreachableEndpoints / (liveEndpoints + 1);
     double randDbl = random_.nextDouble();
     if (randDbl < prob) sendGossip(message, unreachableEndpoints_.keySet());
   }
 }
  /** @throws Exception If failed. */
  public void testAffinityPut() throws Exception {
    Thread.sleep(2 * TOP_REFRESH_FREQ);

    assertEquals(NODES_CNT, client.compute().refreshTopology(false, false).size());

    Map<UUID, Grid> gridsByLocNode = new HashMap<>(NODES_CNT);

    GridClientData partitioned = client.data(PARTITIONED_CACHE_NAME);

    GridClientCompute compute = client.compute();

    for (int i = 0; i < NODES_CNT; i++) gridsByLocNode.put(grid(i).localNode().id(), grid(i));

    for (int i = 0; i < 100; i++) {
      String key = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, key).id();

      assertEquals("Affinity mismatch for key: " + key, primaryNodeId, partitioned.affinity(key));

      assertEquals(primaryNodeId, partitioned.affinity(key));

      // Must go to primary node only. Since backup count is 0, value must present on
      // primary node only.
      partitioned.put(key, "val" + key);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(key);

        if (primaryNodeId.equals(entry.getKey())) assertEquals("val" + key, val);
        else assertNull(val);
      }
    }

    // Now check that we will see value in near cache in pinned mode.
    for (int i = 100; i < 200; i++) {
      String pinnedKey = "key" + i;

      UUID primaryNodeId = grid(0).mapKeyToNode(PARTITIONED_CACHE_NAME, pinnedKey).id();

      UUID pinnedNodeId = F.first(F.view(gridsByLocNode.keySet(), F.notEqualTo(primaryNodeId)));

      GridClientNode node = compute.node(pinnedNodeId);

      partitioned.pinNodes(node).put(pinnedKey, "val" + pinnedKey);

      for (Map.Entry<UUID, Grid> entry : gridsByLocNode.entrySet()) {
        Object val = entry.getValue().cache(PARTITIONED_CACHE_NAME).peek(pinnedKey);

        if (primaryNodeId.equals(entry.getKey()) || pinnedNodeId.equals(entry.getKey()))
          assertEquals("val" + pinnedKey, val);
        else assertNull(val);
      }
    }
  }
  public void deregisterAllVerbHandlers(EndPoint localEndPoint) {
    Iterator keys = verbHandlers_.keySet().iterator();
    String key = null;

    /*
     * endpoint specific verbhandlers can be distinguished because
     * their key's contain the name of the endpoint.
     */
    while (keys.hasNext()) {
      key = (String) keys.next();
      if (key.contains(localEndPoint.toString())) keys.remove();
    }
  }
Esempio n. 12
0
  void doStatusCheck() {
    long now = System.currentTimeMillis();

    Set<InetAddress> eps = endpointStateMap_.keySet();
    for (InetAddress endpoint : eps) {
      if (endpoint.equals(localEndpoint_)) continue;

      FailureDetector.instance.interpret(endpoint);
      EndpointState epState = endpointStateMap_.get(endpoint);
      if (epState != null) {
        long duration = now - epState.getUpdateTimestamp();

        if (StorageService.instance.getTokenMetadata().isMember(endpoint))
          epState.setHasToken(true);
        // check if this is a fat client. fat clients are removed automatically from
        // gosip after FatClientTimeout
        if (!epState.getHasToken()
            && !epState.isAlive()
            && !justRemovedEndpoints_.containsKey(endpoint)
            && (duration > FatClientTimeout_)) {
          logger_.info(
              "FatClient "
                  + endpoint
                  + " has been silent for "
                  + FatClientTimeout_
                  + "ms, removing from gossip");
          removeEndpoint(
              endpoint); // will put it in justRemovedEndpoints to respect quarantine delay
          evictFromMembership(endpoint); // can get rid of the state immediately
        }

        if (!epState.isAlive() && (duration > aVeryLongTime_)) {
          evictFromMembership(endpoint);
        }
      }
    }

    if (!justRemovedEndpoints_.isEmpty()) {
      Map<InetAddress, Long> copy = new HashMap<InetAddress, Long>(justRemovedEndpoints_);
      for (Map.Entry<InetAddress, Long> entry : copy.entrySet()) {
        if ((now - entry.getValue()) > QUARANTINE_DELAY) {
          if (logger_.isDebugEnabled())
            logger_.debug(
                QUARANTINE_DELAY + " elapsed, " + entry.getKey() + " gossip quarantine over");
          justRemovedEndpoints_.remove(entry.getKey());
        }
      }
    }
  }
Esempio n. 13
0
 private boolean removeTransfer(InboundTransferTask inboundTransfer) {
   synchronized (this) {
     taskQueue.remove(inboundTransfer);
     List<InboundTransferTask> transfers = transfersBySource.get(inboundTransfer.getSource());
     if (transfers != null) {
       if (transfers.remove(inboundTransfer)) {
         if (transfers.isEmpty()) {
           transfersBySource.remove(inboundTransfer.getSource());
         }
         transfersBySegment.keySet().removeAll(inboundTransfer.getSegments());
         return true;
       }
     }
   }
   return false;
 }
Esempio n. 14
0
  /**
   * Retire all the other versions of the same DU: first take the DU name and insert version regexp,
   * than try to match the this string against names of already deployed DUs. For instance if we are
   * deploying DU "AbsenceRequest-2/AbsenceRequest.ode" and there's already version 2 than regexp
   * "AbsenceRequest([-\\.](\d)+)?/AbsenceRequest.ode" will be matched against
   * "AbsenceRequest-2/AbsenceRequest.ode" and setRetirePackage() will be called accordingly.
   */
  private void retirePreviousPackageVersions(DeploymentUnitDir du) {
    // retire all the other versions of the same DU
    String[] nameParts = du.getName().split("/");
    /* Replace the version number (if any) with regexp to match any version number */
    nameParts[0] = nameParts[0].replaceAll("([-\\Q.\\E](\\d)+)?\\z", "");
    nameParts[0] += "([-\\Q.\\E](\\d)+)?";
    StringBuilder duNameRegExp = new StringBuilder(du.getName().length() * 2);
    for (int i = 0, n = nameParts.length; i < n; i++) {
      if (i > 0) duNameRegExp.append("/");
      duNameRegExp.append(nameParts[i]);
    }

    Pattern duNamePattern = Pattern.compile(duNameRegExp.toString());
    for (String deployedDUname : _deploymentUnits.keySet()) {
      Matcher matcher = duNamePattern.matcher(deployedDUname);
      if (matcher.matches()) {
        setRetiredPackage(deployedDUname, true);
      }
    }
  }
  private void doNotify(
      Jedis jedis, Collection<String> keys, Collection<NotifyListener> listeners) {
    if (CollectionUtils.isEmpty(keys) && CollectionUtils.isEmpty(listeners)) {
      return;
    }

    for (String key : keys) {

      Map<String, String> values = jedis.hgetAll(key);
      List<String> currentChildren =
          values == null ? new ArrayList<String>(0) : new ArrayList<String>(values.keySet());
      List<String> oldChildren = cachedNodeMap.get(key);

      // 1. 找出增加的 节点
      List<String> addChildren = CollectionUtils.getLeftDiff(currentChildren, oldChildren);
      // 2. 找出减少的 节点
      List<String> decChildren = CollectionUtils.getLeftDiff(oldChildren, currentChildren);

      if (CollectionUtils.isNotEmpty(addChildren)) {
        List<Node> nodes = new ArrayList<Node>(addChildren.size());
        for (String child : addChildren) {
          Node node = NodeRegistryUtils.parse(child);
          nodes.add(node);
        }
        for (NotifyListener listener : listeners) {
          notify(NotifyEvent.ADD, nodes, listener);
        }
      }
      if (CollectionUtils.isNotEmpty(decChildren)) {
        List<Node> nodes = new ArrayList<Node>(decChildren.size());
        for (String child : decChildren) {
          Node node = NodeRegistryUtils.parse(child);
          nodes.add(node);
        }
        for (NotifyListener listener : listeners) {
          notify(NotifyEvent.REMOVE, nodes, listener);
        }
      }
      cachedNodeMap.put(key, currentChildren);
    }
  }
 @Test
 public void iterateOverMapKeys() {
   HazelcastClient hClient = getHazelcastClient();
   Map<String, String> map = hClient.getMap("iterateOverMapKeys");
   map.put("1", "A");
   map.put("2", "B");
   map.put("3", "C");
   Set<String> keySet = map.keySet();
   assertEquals(3, keySet.size());
   Set<String> s = new HashSet<String>();
   for (String string : keySet) {
     s.add(string);
     assertTrue(Arrays.asList("1", "2", "3").contains(string));
   }
   assertEquals(3, s.size());
   Iterator<String> iterator = keySet.iterator();
   while (iterator.hasNext()) {
     iterator.next();
     iterator.remove();
   }
   assertEquals(0, map.size());
 }
Esempio n. 17
0
  public Collection<QName> undeploy(final String duName) {
    try {
      exec(
          new Callable<Collection<QName>>() {
            public Collection<QName> call(ConfStoreConnection conn) {
              DeploymentUnitDAO dudao = conn.getDeploymentUnit(duName);
              if (dudao != null) dudao.delete();
              return null;
            }
          });
    } catch (Exception ex) {
      __log.error(
          "Error synchronizing with data store; " + duName + " may be reappear after restart!");
    }

    Collection<QName> undeployed = Collections.emptyList();
    DeploymentUnitDir du;
    _rw.writeLock().lock();
    try {
      du = _deploymentUnits.remove(duName);
      if (du != null) {
        undeployed = toPids(du.getProcessNames(), du.getVersion());
      }

      for (QName pn : undeployed) {
        fireEvent(new ProcessStoreEvent(ProcessStoreEvent.Type.UNDEPLOYED, pn, du.getName()));
        __log.info(__msgs.msgProcessUndeployed(pn));
      }

      _processes.keySet().removeAll(undeployed);
    } finally {
      _rw.writeLock().unlock();
    }

    return undeployed;
  }
Esempio n. 18
0
 void addHeaders(HttpURLConnection conn)
 {
   for (String key : headers.keySet())
     conn.setRequestProperty(key, headers.get(key));
 }
Esempio n. 19
0
 @ManagedAttribute
 public Set<String> getLogAllMessagesFor() {
   return logAllMessagesForUsers.keySet();
 }
Esempio n. 20
0
  @Override
  public void onTopologyUpdate(CacheTopology cacheTopology, boolean isRebalance) {
    if (trace)
      log.tracef(
          "Received new CH %s for cache %s", cacheTopology.getWriteConsistentHash(), cacheName);

    int numStartedTopologyUpdates = activeTopologyUpdates.incrementAndGet();
    if (isRebalance) {
      rebalanceInProgress.set(true);
    }
    final ConsistentHash previousCh =
        this.cacheTopology != null ? this.cacheTopology.getWriteConsistentHash() : null;
    // Ensures writes to the data container use the right consistent hash
    // No need for a try/finally block, since it's just an assignment
    stateTransferLock.acquireExclusiveTopologyLock();
    this.cacheTopology = cacheTopology;
    if (numStartedTopologyUpdates == 1) {
      updatedKeys = new ConcurrentHashSet<Object>();
    }
    stateTransferLock.releaseExclusiveTopologyLock();
    stateTransferLock.notifyTopologyInstalled(cacheTopology.getTopologyId());

    try {
      // fetch transactions and data segments from other owners if this is enabled
      if (isTransactional || isFetchEnabled) {
        Set<Integer> addedSegments;
        if (previousCh == null) {
          // we start fresh, without any data, so we need to pull everything we own according to
          // writeCh

          addedSegments = getOwnedSegments(cacheTopology.getWriteConsistentHash());

          if (trace) {
            log.tracef("On cache %s we have: added segments: %s", cacheName, addedSegments);
          }
        } else {
          Set<Integer> previousSegments = getOwnedSegments(previousCh);
          Set<Integer> newSegments = getOwnedSegments(cacheTopology.getWriteConsistentHash());

          Set<Integer> removedSegments = new HashSet<Integer>(previousSegments);
          removedSegments.removeAll(newSegments);

          // This is a rebalance, we need to request the segments we own in the new CH.
          addedSegments = new HashSet<Integer>(newSegments);
          addedSegments.removeAll(previousSegments);

          if (trace) {
            log.tracef(
                "On cache %s we have: removed segments: %s; new segments: %s; old segments: %s; added segments: %s",
                cacheName, removedSegments, newSegments, previousSegments, addedSegments);
          }

          // remove inbound transfers and any data for segments we no longer own
          cancelTransfers(removedSegments);

          // If L1.onRehash is enabled, "removed" segments are actually moved to L1. The new (and
          // old) owners
          // will automatically add the nodes that no longer own a key to that key's requestors
          // list.
          invalidateSegments(newSegments, removedSegments);

          // check if any of the existing transfers should be restarted from a different source
          // because the initial source is no longer a member
          Set<Address> members =
              new HashSet<Address>(cacheTopology.getReadConsistentHash().getMembers());
          synchronized (this) {
            for (Iterator<Address> it = transfersBySource.keySet().iterator(); it.hasNext(); ) {
              Address source = it.next();
              if (!members.contains(source)) {
                if (trace) {
                  log.tracef(
                      "Removing inbound transfers from source %s for cache %s", source, cacheName);
                }
                List<InboundTransferTask> inboundTransfers = transfersBySource.get(source);
                it.remove();
                for (InboundTransferTask inboundTransfer : inboundTransfers) {
                  // these segments will be restarted if they are still in new write CH
                  if (trace) {
                    log.tracef(
                        "Removing inbound transfers for segments %s from source %s for cache %s",
                        inboundTransfer.getSegments(), source, cacheName);
                  }
                  transfersBySegment.keySet().removeAll(inboundTransfer.getSegments());
                  addedSegments.addAll(inboundTransfer.getUnfinishedSegments());
                }
              }
            }

            // exclude those that are already in progress from a valid source
            addedSegments.removeAll(transfersBySegment.keySet());
          }
        }

        if (!addedSegments.isEmpty()) {
          addTransfers(addedSegments); // add transfers for new or restarted segments
        }
      }
    } finally {
      stateTransferLock.notifyTransactionDataReceived(cacheTopology.getTopologyId());

      if (activeTopologyUpdates.decrementAndGet() == 0) {
        notifyEndOfTopologyUpdate(cacheTopology.getTopologyId());
      }
    }
  }
Esempio n. 21
0
 public Set<InetAddress> getUnreachableMembers() {
   return unreachableEndpoints_.keySet();
 }
  public int reduce(Optimizer optimizer, int expMinCluster, int expMaxCluster)
      throws ExecutionException, InterruptedException {
    // 1.start with min, max provided, what is oom
    int mag = computeMagnitude(expMaxCluster - expMinCluster + 1);
    // 2. setting search range. if oom is 0, set initial range to 1;
    int hop = mag == 0 ? 1 : (int) Math.pow(10.0, mag);
    // 3. find the meaning range, i.e., real min cluster and real max cluster numbers where a real
    // numbered optimisation score
    // can be computed
    int[] range = computeClusterNumberRange(expMinCluster, expMaxCluster, hop, optimizer);
    if (range[0] == -1 && range[1] != -1) {
      System.err.println(
          "[!]No meaningful lower range. Only 1 possible cluster number:" + range[1]);
      return range[1];
    } else if (range[1] == -1 && range[0] != -1) {
      System.err.println(
          "[!]No meaningful upper range. Only 1 possible cluster number:" + range[0]);
      return range[0];
    } else if (range[0] == -1 && range[1] == -1) {
      System.err.println("[!]No meaningful cluster number, cannot cluster");
      return -1;
    }
    System.out.println(
        "[]Input range:"
            + expMinCluster
            + "-"
            + expMaxCluster
            + ", Real range:"
            + range[0]
            + "-"
            + range[1]);
    expMinCluster = range[0] < range[1] ? range[0] : range[1];
    expMaxCluster = range[1] > range[0] ? range[1] : range[0];
    // 4. reset hop based on new range
    mag = computeMagnitude(expMaxCluster - expMinCluster + 1);
    hop = mag == 0 ? 1 : (int) Math.pow(10.0, mag);
    double currentMaxOptimizationScore = 0;
    int current_iteration =
        0; // todo: for location, why min > max; select range based on best interval, is it correct?
    while (current_iteration < maxIteration) {
      current_iteration++;
      currentMaxOptimizationScore = 0;
      // 5. compute optimization scores based on the search space defined by expMinCluster,
      // expMaxCluster, and range
      Map<Integer, Double> triedSplitsAndScores =
          computeOptimizationScores(expMinCluster, expMaxCluster, hop, optimizer);
      if (triedSplitsAndScores
          != null) { // already using minimum hop, but no meaningful optimisation score can be
                     // computed
        // within the range (TODO can this really happen?)
        // what is the real hop, max score?
        List<Integer> intervals = new ArrayList<>(triedSplitsAndScores.keySet());
        Collections.sort(intervals);
        int realHop = -1, lowerInterval = -1;
        for (int i : intervals) {
          if (lowerInterval != -1 && realHop == -1) realHop = Math.abs(i - lowerInterval);
          lowerInterval = i;
          Double score = triedSplitsAndScores.get(i);
          if (!Double.isInfinite(score)
              && !Double.isNaN(score)
              && score != Double.MAX_VALUE
              && score > currentMaxOptimizationScore) currentMaxOptimizationScore = score;
        }

        double global_max = 0.0;
        for (Double d : scores.values()) {
          if (!Double.isInfinite(d) && !Double.isNaN(d) && d > global_max) global_max = d;
        }

        if (stop(realHop, currentMaxOptimizationScore, global_max)) break;
        if (currentMaxOptimizationScore
            > global_max) // found a new max score, reset iterations to try
        current_iteration = 0;

        int newHop = reduceHop(realHop);
        hop = newHop;
      } else break;
    }

    int bestSplit = -1;
    double maxScore = 0;
    for (Map.Entry<Integer, Double> entry : scores.entrySet()) {
      Double score = entry.getValue();
      if (!score.isNaN() && !score.isInfinite() && score > maxScore) {
        maxScore = score;
        bestSplit = entry.getKey();
      }
    }
    System.out.println("[]Final Best=" + bestSplit + ", footprint:" + scores);
    return bestSplit;
  }