Esempio n. 1
0
 public Set<String> addOgcCrsPrefix(Collection<Integer> crses) {
   HashSet<String> withPrefix = Sets.newHashSetWithExpectedSize(crses.size());
   for (Integer crs : crses) {
     withPrefix.add(addOgcCrsPrefix(crs));
   }
   return withPrefix;
 }
Esempio n. 2
0
    /**
     * Compiles projection by: 1) Adding RowCount aggregate function if not present when limiting
     * rows. We need this to track how many rows have been scanned. 2) Reordering aggregation
     * functions (by putting fixed length aggregates first) to optimize the positional access of the
     * aggregated value.
     */
    private void compile() throws SQLException {
      final Set<SingleAggregateFunction> aggFuncSet =
          Sets.newHashSetWithExpectedSize(context.getExpressionManager().getExpressionCount());

      Iterator<Expression> expressions = context.getExpressionManager().getExpressions();
      while (expressions.hasNext()) {
        Expression expression = expressions.next();
        expression.accept(
            new SingleAggregateFunctionVisitor() {
              @Override
              public Iterator<Expression> visitEnter(SingleAggregateFunction function) {
                aggFuncSet.add(function);
                return Iterators.emptyIterator();
              }
            });
      }
      if (aggFuncSet.isEmpty() && groupBy.isEmpty()) {
        return;
      }
      List<SingleAggregateFunction> aggFuncs = new ArrayList<SingleAggregateFunction>(aggFuncSet);
      Collections.sort(aggFuncs, SingleAggregateFunction.SCHEMA_COMPARATOR);

      int minNullableIndex = getMinNullableIndex(aggFuncs, groupBy.isEmpty());
      context
          .getScan()
          .setAttribute(
              GroupedAggregateRegionObserver.AGGREGATORS,
              ServerAggregators.serialize(aggFuncs, minNullableIndex));
      ClientAggregators clientAggregators = new ClientAggregators(aggFuncs, minNullableIndex);
      context.getAggregationManager().setAggregators(clientAggregators);
    }
  /**
   * Returns a list of all applicable filters for this dimension.
   *
   * <p>The list can return null, indicating that the no-filter option must also be used.
   *
   * @param allFilters the available filters, excluding the no-filter option.
   * @return the filters to use.
   */
  @NonNull
  public Set<String> getApplicableFilters(@NonNull Set<String> allFilters) {
    if (!enable) {
      return Collections.singleton(null);
    }

    Set<String> results =
        reset
            ? Sets.<String>newHashSetWithExpectedSize(allFilters.size() + 1)
            : Sets.newHashSet(allFilters);

    if (exclude != null) {
      results.removeAll(exclude);
    }

    if (include != null) {
      // we need to make sure we only include stuff that's from the full list.
      for (String inc : include) {
        if (allFilters.contains(inc)) {
          results.add(inc);
        }
      }
    }

    return results;
  }
 @Override
 public Set<ValueSpecification> getResults(
     final FunctionCompilationContext context,
     final ComputationTarget target,
     final Map<ValueSpecification, ValueRequirement> inputs) {
   String curveName = null;
   String curveCalculationConfig = null;
   final ComputationTargetSpecification targetSpec = target.toSpecification();
   final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
   for (final Map.Entry<ValueSpecification, ValueRequirement> input : inputs.entrySet()) {
     if (ValueRequirementNames.YIELD_CURVE.equals(input.getKey().getValueName())) {
       assert curveName == null;
       assert curveCalculationConfig == null;
       curveName = input.getKey().getProperty(ValuePropertyNames.CURVE);
       curveCalculationConfig =
           input.getKey().getProperty(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
       assert curveName != null;
       assert curveCalculationConfig != null;
       final ValueProperties.Builder properties = createValueProperties(target);
       properties.with(ValuePropertyNames.CURVE, curveName);
       properties.with(ValuePropertyNames.CURVE_CALCULATION_CONFIG, curveCalculationConfig);
       results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
     }
   }
   s_logger.debug("getResults(2) returning " + results);
   return results;
 }
  @Override
  public Map<String, FudgeMsg> doSnapshot(Collection<String> uniqueIds) {
    ArgumentChecker.notNull(uniqueIds, "Unique IDs");
    if (uniqueIds.isEmpty()) {
      return Collections.emptyMap();
    }

    Set<String> buids = Sets.newHashSetWithExpectedSize(uniqueIds.size());
    for (String uniqueId : uniqueIds) {
      String buid = "/buid/" + uniqueId;
      buids.add(buid);
    }

    // caching ref data provider must not be used here
    Map<String, FudgeMsg> snapshotValues =
        getReferenceDataProvider()
            .getReferenceDataIgnoreCache(buids, BloombergDataUtils.STANDARD_FIELDS_SET);
    Map<String, FudgeMsg> returnValue = Maps.newHashMap();
    for (String buid : buids) {
      FudgeMsg fieldData = snapshotValues.get(buid);
      if (fieldData == null) {
        Exception e = new Exception("Stack Trace");
        e.fillInStackTrace();
        s_logger.error("Could not find result for {} in data snapshot, skipping", buid, e);
        // throw new OpenGammaRuntimeException("Result for " + buid + " was not found");
      } else {
        String securityUniqueId = buid.substring("/buid/".length());
        returnValue.put(securityUniqueId, fieldData);
      }
    }
    return returnValue;
  }
Esempio n. 6
0
 // Produces a set of edge-to-edge paths using the set of infrastructure
 // paths and the given edge links.
 private Set<Path> edgeToEdgePaths(EdgeLink srcLink, EdgeLink dstLink, Set<Path> paths) {
   Set<Path> endToEndPaths = Sets.newHashSetWithExpectedSize(paths.size());
   for (Path path : paths) {
     endToEndPaths.add(edgeToEdgePath(srcLink, dstLink, path));
   }
   return endToEndPaths;
 }
Esempio n. 7
0
 /**
  * Adds 'value' to either the predecessor or successor set, updating the appropriate field as
  * necessary.
  *
  * @return {@code true} if the set was modified; {@code false} if the set was not modified
  */
 private boolean add(boolean predecessorSet, Node<T> value) {
   final Collection<Node<T>> set = predecessorSet ? preds : succs;
   if (set == null) {
     // null -> SingletonList
     return updateField(predecessorSet, Collections.singletonList(value));
   }
   if (set.contains(value)) {
     // already exists in this set
     return false;
   }
   int previousSize = set.size();
   if (previousSize == 1) {
     // SingletonList -> ArrayList
     Collection<Node<T>> newSet = new ArrayList<>(ARRAYLIST_THRESHOLD);
     newSet.addAll(set);
     newSet.add(value);
     return updateField(predecessorSet, newSet);
   } else if (previousSize < ARRAYLIST_THRESHOLD) {
     // ArrayList
     set.add(value);
     return true;
   } else if (previousSize == ARRAYLIST_THRESHOLD) {
     // ArrayList -> HashSet
     Collection<Node<T>> newSet = Sets.newHashSetWithExpectedSize(INITIAL_HASHSET_CAPACITY);
     newSet.addAll(set);
     newSet.add(value);
     return updateField(predecessorSet, newSet);
   } else {
     // HashSet
     set.add(value);
     return true;
   }
 }
  private Iterable<ChangeData> byCommitsOnBranchNotMergedFromDatabase(
      Repository repo, ReviewDb db, Branch.NameKey branch, List<String> hashes)
      throws OrmException, IOException {
    Set<Change.Id> changeIds = Sets.newHashSetWithExpectedSize(hashes.size());
    String lastPrefix = null;
    for (Ref ref : repo.getRefDatabase().getRefs(RefNames.REFS_CHANGES).values()) {
      String r = ref.getName();
      if ((lastPrefix != null && r.startsWith(lastPrefix))
          || !hashes.contains(ref.getObjectId().name())) {
        continue;
      }
      Change.Id id = Change.Id.fromRef(r);
      if (id == null) {
        continue;
      }
      if (changeIds.add(id)) {
        lastPrefix = r.substring(0, r.lastIndexOf('/'));
      }
    }

    List<ChangeData> cds = new ArrayList<>(hashes.size());
    for (Change c : db.changes().get(changeIds)) {
      if (c.getDest().equals(branch) && c.getStatus() != Change.Status.MERGED) {
        cds.add(changeDataFactory.create(db, c));
      }
    }
    return cds;
  }
Esempio n. 9
0
  private static class UnificationResultImpl implements UnificationResult {
    private boolean success = true;
    private final Map<TypeConstructor, TypeProjection> substitution =
        Maps.newHashMapWithExpectedSize(1);
    private final Set<TypeConstructor> failedVariables = Sets.newHashSetWithExpectedSize(0);

    @Override
    public boolean isSuccess() {
      return success;
    }

    public void fail() {
      success = false;
    }

    @Override
    @NotNull
    public Map<TypeConstructor, TypeProjection> getSubstitution() {
      return substitution;
    }

    public void put(TypeConstructor key, TypeProjection value) {
      if (failedVariables.contains(key)) return;

      TypeProjection oldValue = substitution.put(key, value);
      if (oldValue != null && !oldValue.equals(value)) {
        substitution.remove(key);
        failedVariables.add(key);
        fail();
      }
    }
  }
    private Set<AttributeReference> combineAttributeReferences(AttributeReference attRef) {
      Set<AttributeReference> result = Sets.newHashSetWithExpectedSize(attRef.getKeys().size());

      combineAttributeReferencesRec(
          attRef.getKeys(), Lists.<AttributeReference.Key>newArrayList(), result);

      return result;
    }
 private Set<String> stripProtocol(Set<String> values) {
   final Set<String> newValues = Sets.newHashSetWithExpectedSize(values.size());
   final int index = DocumentConstants.CHANGE_HISTORY_PROTOCOL.length();
   for (String value : values) {
     newValues.add(value.substring(index));
   }
   return newValues;
 }
Esempio n. 12
0
 @NonNull
 public NdkOptions abiFilter(String filter) {
   if (abiFilters == null) {
     abiFilters = Sets.newHashSetWithExpectedSize(2);
   }
   abiFilters.add(filter);
   return this;
 }
Esempio n. 13
0
 @NonNull
 public NdkOptions abiFilters(String... filters) {
   if (abiFilters == null) {
     abiFilters = Sets.newHashSetWithExpectedSize(2);
   }
   Collections.addAll(abiFilters, filters);
   return this;
 }
 private static ClauseContext createContextForProjects(int... projects) {
   Set<ProjectIssueTypeContext> ctxs = Sets.newHashSetWithExpectedSize(projects.length);
   for (int project : projects) {
     ctxs.add(
         new ProjectIssueTypeContextImpl(
             new ProjectContextImpl((long) project), AllIssueTypesContext.getInstance()));
   }
   return new ClauseContextImpl(ctxs);
 }
Esempio n. 15
0
 public FunctionInputsImpl(
     final ComputationTargetSpecificationResolver.AtVersionCorrection resolver,
     final Collection<? extends ComputedValue> values,
     final Collection<ValueSpecification> missingValues) {
   _resolver = resolver;
   _missingValues = missingValues;
   _values = Sets.newHashSetWithExpectedSize(values.size());
   for (final ComputedValue value : values) {
     addValue(value);
   }
 }
Esempio n. 16
0
    private void populateNeighbors(double edgeDensityLower, double edgeDensityUpper) {
      for (Node node : nodes) {
        int edgeCount = determineEdgeCount(edgeDensityLower, edgeDensityUpper);
        node.neighbors = Sets.newHashSetWithExpectedSize(edgeCount);

        while (node.neighbors.size() < edgeCount) {
          Node neighbor = nodes.get(random.nextInt(nodeCount));
          node.neighbors.add(neighbor);
        }
      }
    }
Esempio n. 17
0
 static ImmutableSet<Parameter> forParameterList(List<? extends VariableElement> variables) {
   ImmutableSet.Builder<Parameter> builder = ImmutableSet.builder();
   Set<String> names = Sets.newHashSetWithExpectedSize(variables.size());
   for (VariableElement variable : variables) {
     Parameter parameter = forVariableElement(variable);
     checkArgument(names.add(parameter.name));
     builder.add(parameter);
   }
   ImmutableSet<Parameter> parameters = builder.build();
   checkArgument(variables.size() == parameters.size());
   return parameters;
 }
 @Override
 public Set<ValueSpecification> getResults(
     final FunctionCompilationContext context, final ComputationTarget target) {
   final ValueProperties.Builder properties = createValueProperties(target);
   properties.withAny(ValuePropertyNames.CURVE);
   properties.withAny(ValuePropertyNames.CURVE_CALCULATION_CONFIG);
   final Set<ValueSpecification> results = Sets.newHashSetWithExpectedSize(2);
   final ComputationTargetSpecification targetSpec = target.toSpecification();
   results.add(new ValueSpecification(YCNS_REQUIREMENT, targetSpec, properties.get()));
   s_logger.debug("getResults(1) = " + results);
   return results;
 }
  private List<SingularityTaskRequest> checkForStaleScheduledTasks(
      List<SingularityPendingTask> pendingTasks, List<SingularityTaskRequest> taskRequests) {
    final Set<String> foundPendingTaskId = Sets.newHashSetWithExpectedSize(taskRequests.size());
    final Set<String> requestIds = Sets.newHashSetWithExpectedSize(taskRequests.size());

    for (SingularityTaskRequest taskRequest : taskRequests) {
      foundPendingTaskId.add(taskRequest.getPendingTask().getPendingTaskId().getId());
      requestIds.add(taskRequest.getRequest().getId());
    }

    for (SingularityPendingTask pendingTask : pendingTasks) {
      if (!foundPendingTaskId.contains(pendingTask.getPendingTaskId().getId())) {
        LOG.info("Removing stale pending task {}", pendingTask.getPendingTaskId());
        taskManager.deletePendingTask(pendingTask.getPendingTaskId());
      }
    }

    // TODO this check isn't necessary if we keep track better during deploys
    final Map<String, SingularityRequestDeployState> deployStates =
        deployManager.getRequestDeployStatesByRequestIds(requestIds);
    final List<SingularityTaskRequest> taskRequestsWithValidDeploys =
        Lists.newArrayListWithCapacity(taskRequests.size());

    for (SingularityTaskRequest taskRequest : taskRequests) {
      SingularityRequestDeployState requestDeployState =
          deployStates.get(taskRequest.getRequest().getId());

      if (!matchesDeploy(requestDeployState, taskRequest)) {
        LOG.info(
            "Removing stale pending task {} because the deployId did not match active/pending deploys {}",
            taskRequest.getPendingTask().getPendingTaskId(),
            requestDeployState);
        taskManager.deletePendingTask(taskRequest.getPendingTask().getPendingTaskId());
      } else {
        taskRequestsWithValidDeploys.add(taskRequest);
      }
    }

    return taskRequestsWithValidDeploys;
  }
  private List<SingularityPendingTask> getScheduledTaskIds(
      int numMissingInstances,
      List<SingularityTaskId> matchingTaskIds,
      SingularityRequest request,
      RequestState state,
      SingularityDeployStatistics deployStatistics,
      String deployId,
      SingularityPendingRequest pendingRequest) {
    final Optional<Long> nextRunAt =
        getNextRunAt(request, state, deployStatistics, pendingRequest.getPendingType());

    if (!nextRunAt.isPresent()) {
      return Collections.emptyList();
    }

    final Set<Integer> inuseInstanceNumbers =
        Sets.newHashSetWithExpectedSize(matchingTaskIds.size());

    for (SingularityTaskId matchingTaskId : matchingTaskIds) {
      inuseInstanceNumbers.add(matchingTaskId.getInstanceNo());
    }

    final List<SingularityPendingTask> newTasks =
        Lists.newArrayListWithCapacity(numMissingInstances);

    int nextInstanceNumber = 1;

    for (int i = 0; i < numMissingInstances; i++) {
      while (inuseInstanceNumbers.contains(nextInstanceNumber)) {
        nextInstanceNumber++;
      }

      newTasks.add(
          new SingularityPendingTask(
              new SingularityPendingTaskId(
                  request.getId(),
                  deployId,
                  nextRunAt.get(),
                  nextInstanceNumber,
                  pendingRequest.getPendingType(),
                  pendingRequest.getTimestamp()),
              pendingRequest.getCmdLineArgsList(),
              pendingRequest.getUser()));

      nextInstanceNumber++;
    }

    return newTasks;
  }
 /** {@inheritDoc} */
 @Override
 public Set<Long> readGraphs(Result res) {
   byte[] graphBytes = res.getValue(CF_META_BYTES, COL_GRAPHS_BYTES);
   Set<Long> result = null;
   if (graphBytes != null) {
     int graphCount = Bytes.toInt(graphBytes);
     result = Sets.newHashSetWithExpectedSize(graphCount);
     int offset = Bytes.SIZEOF_INT;
     for (int i = 0; i < graphCount; i++) {
       result.add(Bytes.toLong(graphBytes, offset));
       offset += Bytes.SIZEOF_LONG;
     }
   }
   return result;
 }
 @Override
 public void readFrom(StreamInput in) throws IOException {
   super.readFrom(in);
   recoveryId = in.readLong();
   shardId = ShardId.readShardId(in);
   if (in.getVersion().onOrAfter(Version.V_1_5_0)) {
     snapshotFiles = Store.MetadataSnapshot.read(in);
   } else {
     int size = in.readVInt();
     legacySnapshotFiles = Sets.newHashSetWithExpectedSize(size);
     for (int i = 0; i < size; i++) {
       legacySnapshotFiles.add(in.readString());
     }
   }
 }
 @Override
 public Set<ValueRequirement> getRequirements(
     final FunctionCompilationContext context,
     final ComputationTarget target,
     final ValueRequirement desiredValue) {
   final ValueProperties constraints = desiredValue.getConstraints();
   final Set<String> forwardCurveNames =
       constraints.getValues(YieldCurveFunction.PROPERTY_FORWARD_CURVE);
   if (forwardCurveNames == null || forwardCurveNames.size() != 1) {
     return null;
   }
   final Set<String> fundingCurveNames =
       constraints.getValues(YieldCurveFunction.PROPERTY_FUNDING_CURVE);
   if (fundingCurveNames == null || fundingCurveNames.size() != 1) {
     return null;
   }
   final Set<String> surfaceNames = constraints.getValues(ValuePropertyNames.SURFACE);
   if (surfaceNames == null || surfaceNames.size() != 1) {
     return null;
   }
   final Set<String> curveCalculationMethods =
       constraints.getValues(ValuePropertyNames.CURVE_CALCULATION_METHOD);
   if (curveCalculationMethods == null || curveCalculationMethods.size() != 1) {
     return null;
   }
   final String forwardCurveName = forwardCurveNames.iterator().next();
   final String fundingCurveName = fundingCurveNames.iterator().next();
   final String surfaceName = surfaceNames.iterator().next();
   final String curveCalculationMethod = curveCalculationMethods.iterator().next();
   final Set<ValueRequirement> requirements = Sets.newHashSetWithExpectedSize(4);
   final Currency currency = FinancialSecurityUtils.getCurrency(target.getSecurity());
   requirements.add(
       getCurveRequirement(
           forwardCurveName,
           forwardCurveName,
           fundingCurveName,
           curveCalculationMethod,
           currency));
   requirements.add(
       getCurveRequirement(
           fundingCurveName,
           forwardCurveName,
           fundingCurveName,
           curveCalculationMethod,
           currency));
   requirements.add(getVolatilityRequirement(surfaceName, currency));
   return requirements;
 }
 protected Set<ComputationTargetSpecification> decodeComputationTargets(
     final FudgeDeserializer deserializer, final FudgeMsg msg) {
   final FudgeMsg submsg = msg.getMessage(COMPUTATION_TARGETS_FIELD);
   if (submsg == null) {
     return Collections.emptySet();
   }
   final Set<ComputationTargetSpecification> result =
       Sets.newHashSetWithExpectedSize(submsg.getNumFields());
   for (final FudgeField field : submsg) {
     result.add(
         deserializer
             .fieldValueToObject(ComputationTargetReference.class, field)
             .getSpecification());
   }
   return result;
 }
Esempio n. 25
0
  @NotNull
  private <D extends DeclarationDescriptor> Collection<D> substitute(
      @NotNull Collection<D> descriptors) {
    if (substitutor.isEmpty()) return descriptors;
    if (descriptors.isEmpty()) return descriptors;

    Set<D> result = Sets.newHashSetWithExpectedSize(descriptors.size());
    for (D descriptor : descriptors) {
      D substitute = substitute(descriptor);
      if (substitute != null) {
        result.add(substitute);
      }
    }

    return result;
  }
Esempio n. 26
0
 @NonNull
 public NdkOptions setAbiFilters(Collection<String> filters) {
   if (filters != null) {
     if (abiFilters == null) {
       abiFilters = Sets.newHashSetWithExpectedSize(filters.size());
     } else {
       abiFilters.clear();
     }
     for (String filter : filters) {
       abiFilters.add(filter);
     }
   } else {
     abiFilters = null;
   }
   return this;
 }
  @Override
  public Collection<User> loadAllForRole(Role role) {
    final String roleId = role.getId();
    final DBObject query = BasicDBObjectBuilder.start(UserImpl.ROLES, new ObjectId(roleId)).get();

    final List<DBObject> result = query(UserImpl.class, query);
    if (result == null || result.isEmpty()) {
      return Collections.emptySet();
    }
    final Set<User> users = Sets.newHashSetWithExpectedSize(result.size());
    for (DBObject dbObject : result) {
      //noinspection unchecked
      users.add(new UserImpl((ObjectId) dbObject.get("_id"), dbObject.toMap()));
    }
    return users;
  }
Esempio n. 28
0
  public void process(ProcessingAssistant processingAssistant) throws Exception {
    final Set<String> xmlRootElements = Sets.newHashSetWithExpectedSize(jaxbClasses.size());
    final Map<String, Class<?>> entityClasses = Maps.newHashMapWithExpectedSize(jaxbClasses.size());

    LOGGER.info("Analyze JAXB Classes");
    analyzeJaxbClasses(xmlRootElements, entityClasses);

    process(Mode.SAVE, xmlRootElements, entityClasses);
    processingAssistant.createConstraints();
    process(
        Mode.DELETE,
        xmlRootElements,
        entityClasses); // update archives contain not only new objects but also objects to be
                        // deleted
    processingAssistant.finish();
  }
Esempio n. 29
0
  private void onTokenTreeChanged(ZooKeeperTreeNode tokenNode) {

    if (tokenNode != null) {
      Collection<ZooKeeperTreeNode> childNodes = tokenNode.getChildren().values();
      Set<UrlTokenDto> tokenDtos = Sets.newHashSetWithExpectedSize(childNodes.size());
      for (ZooKeeperTreeNode node : childNodes) {
        try {
          UrlTokenDto dto = objectSerializer.readValue(node.getData(), UrlTokenDto.class);
          tokenDtos.add(dto);
        } catch (IOException e) {
          logger.error(String.format("Unable to deserialize UrlToken node: %s", node.getPath()));
        }
      }
      processTokenDtos(tokenDtos);
    }
  }
  @SuppressWarnings("UnnecessaryQualifiedReference")
  static void broadcastNetworkInfo() {
    try (final SemaphoreResource semaphore = SemaphoreResource.acquire(activeBroadcastSemaphore)) {
      // populate with info directly from configuration
      final Optional<NetworkConfiguration> networkConfiguration =
          NetworkConfigurations.getNetworkConfiguration();
      final List<com.eucalyptus.cluster.Cluster> clusters = Clusters.getInstance().listValues();

      final NetworkInfoSource source = cacheSource();
      final Set<String> dirtyPublicAddresses = PublicAddresses.dirtySnapshot();
      final Set<RouteKey> invalidStateRoutes = Sets.newHashSetWithExpectedSize(50);
      final int sourceFingerprint =
          fingerprint(source, clusters, dirtyPublicAddresses, NetworkGroups.NETWORK_CONFIGURATION);
      final NetworkInfo info =
          NetworkInfoBroadcasts.buildNetworkConfiguration(
              networkConfiguration,
              source,
              Suppliers.ofInstance(clusters),
              new Supplier<String>() {
                @Override
                public String get() {
                  return Topology.lookup(Eucalyptus.class).getInetAddress().getHostAddress();
                }
              },
              new Function<List<String>, List<String>>() {
                @Nullable
                @Override
                public List<String> apply(final List<String> defaultServers) {
                  return NetworkConfigurations.loadSystemNameservers(defaultServers);
                }
              },
              dirtyPublicAddresses,
              invalidStateRoutes);
      info.setVersion(
          BaseEncoding.base16().lowerCase().encode(Ints.toByteArray(sourceFingerprint)));

      if (!invalidStateRoutes.isEmpty()) {
        vpcRouteStateInvalidator.accept(invalidStateRoutes);
      }

      Applicators.apply(clusters, info);

    } catch (ApplicatorException e) {
      logger.error("Error during network broadcast", e);
    }
  }