/* package */ void updateDepGraphCells(Set<WebGridCell> newCells) {
    Set<WebGridCell> currentCells = _depGraphGrids.keySet();
    Set<WebGridCell> cellsToRemove = Sets.difference(currentCells, newCells);
    Set<WebGridCell> cellsToAdd = Sets.difference(newCells, currentCells);

    for (WebGridCell cell : cellsToRemove) {
      _depGraphGrids.remove(cell);
    }
    for (WebGridCell cell : cellsToAdd) {
      String gridName = getName() + ".depgraph-" + cell.getRowId() + "-" + cell.getColumnId();
      OperationTimer timer = new OperationTimer(s_logger, "depgraph");
      Pair<String, ValueSpecification> columnMappingPair =
          getGridStructure()
              .findCellSpecification(cell, getViewClient().getLatestCompiledViewDefinition());
      s_logger.debug("includeDepGraph took {}", timer.finished());
      // TODO should this ever happen? it is currently
      if (columnMappingPair != null) {
        PushWebViewDepGraphGrid grid =
            new PushWebViewDepGraphGrid(
                gridName,
                getViewClient(),
                getConverterCache(),
                cell,
                columnMappingPair.getFirst(),
                columnMappingPair.getSecond());
        _depGraphGrids.put(cell, grid);
      }
    }
  }
Ejemplo n.º 2
0
  private void updateGeneratedViews(
      AbstractBuild<?, ?> build, BuildListener listener, Set<GeneratedView> freshViews)
      throws IOException {
    Set<GeneratedView> generatedViews =
        extractGeneratedObjects(build.getProject(), GeneratedViewsAction.class);
    Set<GeneratedView> added = Sets.difference(freshViews, generatedViews);
    Set<GeneratedView> existing = Sets.intersection(generatedViews, freshViews);
    Set<GeneratedView> removed = Sets.difference(generatedViews, freshViews);

    logItems(listener, "Adding views", added);
    logItems(listener, "Existing views", existing);
    logItems(listener, "Removing views", removed);

    // Delete views
    if (removedViewAction == RemovedViewAction.DELETE) {
      for (GeneratedView removedView : removed) {
        String viewName = removedView.getName();
        ItemGroup parent = getLookupStrategy().getParent(build.getProject(), viewName);
        if (parent instanceof ViewGroup) {
          View view = ((ViewGroup) parent).getView(FilenameUtils.getName(viewName));
          if (view != null) {
            ((ViewGroup) parent).deleteView(view);
          }
        } else if (parent == null) {
          LOGGER.log(Level.FINE, "Parent ViewGroup seems to have been already deleted");
        } else {
          LOGGER.log(Level.WARNING, format("Could not delete view within %s", parent.getClass()));
        }
      }
    }
  }
Ejemplo n.º 3
0
  /**
   * Helper to add srcs and deps Soy files to a SoyFileSet builder. Also does sanity checks.
   *
   * @param sfsBuilder The SoyFileSet builder to add to.
   * @param inputPrefix The input path prefix to prepend to all the file paths.
   * @param srcs The srcs from the --srcs flag. Exactly one of 'srcs' and 'args' must be nonempty.
   * @param args The old-style srcs from the command line (that's how they were specified before we
   *     added the --srcs flag). Exactly one of 'srcs' and 'args' must be nonempty.
   * @param deps The deps from the --deps flag, or empty list if not applicable.
   * @param exitWithErrorFn A function that exits with an error message followed by a usage message.
   */
  static void addSoyFilesToBuilder(
      Builder sfsBuilder,
      String inputPrefix,
      Collection<String> srcs,
      Collection<String> args,
      Collection<String> deps,
      Collection<String> indirectDeps,
      Function<String, Void> exitWithErrorFn) {
    if (srcs.isEmpty() && args.isEmpty()) {
      exitWithErrorFn.apply("Must provide list of source Soy files (--srcs).");
    }
    if (!srcs.isEmpty() && !args.isEmpty()) {
      exitWithErrorFn.apply(
          "Found source Soy files from --srcs and from args (please use --srcs only).");
    }

    // Create Set versions of each of the arguments, and de-dupe. If something is included as
    // multiple file kinds, we'll keep the strongest one; a file in both srcs and deps will be a
    // src, and one in both deps and indirect_deps will be a dep.
    // TODO(gboyer): Maybe stop supporting old style (srcs from command line args) at some point.
    Set<String> srcsSet = ImmutableSet.<String>builder().addAll(srcs).addAll(args).build();
    Set<String> depsSet = Sets.difference(ImmutableSet.copyOf(deps), srcsSet);
    Set<String> indirectDepsSet =
        Sets.difference(ImmutableSet.copyOf(indirectDeps), Sets.union(srcsSet, depsSet));

    for (String src : srcsSet) {
      sfsBuilder.addWithKind(new File(inputPrefix + src), SoyFileKind.SRC);
    }
    for (String dep : depsSet) {
      sfsBuilder.addWithKind(new File(inputPrefix + dep), SoyFileKind.DEP);
    }
    for (String dep : indirectDepsSet) {
      sfsBuilder.addWithKind(new File(inputPrefix + dep), SoyFileKind.INDIRECT_DEP);
    }
  }
  private void refresh() throws KeeperException, InterruptedException {
    List<RepositoryModelEvent> events = new ArrayList<RepositoryModelEvent>();

    synchronized (reposLock) {
      Map<String, RepositoryDefinition> newRepos = loadRepositories(true);

      // Find out changes in repositories
      Set<String> removedRepos = Sets.difference(repos.keySet(), newRepos.keySet());
      for (String id : removedRepos) {
        events.add(new RepositoryModelEvent(RepositoryModelEventType.REPOSITORY_REMOVED, id));
      }

      Set<String> addedRepos = Sets.difference(newRepos.keySet(), repos.keySet());
      for (String id : addedRepos) {
        events.add(new RepositoryModelEvent(RepositoryModelEventType.REPOSITORY_ADDED, id));
      }

      for (RepositoryDefinition repoDef : newRepos.values()) {
        if (repos.containsKey(repoDef.getName()) && !repos.get(repoDef.getName()).equals(repoDef)) {
          events.add(
              new RepositoryModelEvent(
                  RepositoryModelEventType.REPOSITORY_UPDATED, repoDef.getName()));
        }
      }

      repos = newRepos;
    }

    notifyListeners(events);
  }
Ejemplo n.º 5
0
  /**
   * Verify that a plan meets the basic sanity checks. E.g., every producer should have a consumer.
   * Only producers that support multiple consumers (LocalMultiwayProducer, EOSController) can have
   * multiple consumers.
   *
   * @see #assignWorkersToFragments(List, ConstructArgs)
   * @param fragments the fragments of the plan
   */
  public static void sanityCheckEdges(final List<PlanFragmentEncoding> fragments) {
    /* These maps connect each channel id to the fragment that produces or consumes it. */
    // producers must be unique
    Map<Integer, PlanFragmentEncoding> producerMap = Maps.newHashMap();
    // consumers can be repeated, as long as the producer is a LocalMultiwayProducer
    Multimap<Integer, PlanFragmentEncoding> consumerMap = ArrayListMultimap.create();
    final Set<Integer> soleConsumer = Sets.newHashSet();

    for (PlanFragmentEncoding fragment : fragments) {
      for (OperatorEncoding<?> operator : fragment.operators) {
        /* Build the producer/consumer map. */
        if (operator instanceof AbstractConsumerEncoding) {
          AbstractConsumerEncoding<?> consumer = (AbstractConsumerEncoding<?>) operator;
          consumerMap.put(consumer.argOperatorId, fragment);
        } else if (operator instanceof AbstractProducerEncoding
            || operator instanceof IDBControllerEncoding) {
          Integer opId = operator.opId;
          PlanFragmentEncoding oldFragment = producerMap.put(opId, fragment);
          if (oldFragment != null) {
            Preconditions.checkArgument(
                false,
                "Two different operators cannot produce the same opId %s. Fragments: %s %s",
                opId,
                fragment.fragmentIndex,
                oldFragment.fragmentIndex);
          }
          if (!(operator instanceof LocalMultiwayProducerEncoding
              || operator instanceof EOSControllerEncoding)) {
            soleConsumer.add(opId);
          }
        }
      }
    }

    /* Sanity check 1: Producer must have corresponding consumers, and vice versa. */
    Set<Integer> consumedNotProduced = Sets.difference(consumerMap.keySet(), producerMap.keySet());
    Preconditions.checkArgument(
        consumedNotProduced.isEmpty(),
        "Missing producer(s) for consumer(s): %s",
        consumedNotProduced);
    Set<Integer> producedNotConsumed = Sets.difference(producerMap.keySet(), consumerMap.keySet());
    Preconditions.checkArgument(
        producedNotConsumed.isEmpty(),
        "Missing consumer(s) for producer(s): %s",
        producedNotConsumed);

    /* Sanity check 2: Operators that only admit a single consumer should have exactly one consumer. */
    for (Integer opId : soleConsumer) {
      Collection<PlanFragmentEncoding> consumers = consumerMap.get(opId);
      Preconditions.checkArgument(
          consumers.size() == 1,
          "Producer %s only supports a single consumer, not %s",
          opId,
          consumers.size());
    }
  }
Ejemplo n.º 6
0
 public static void assertSuitesEqual(Test suite1, Test suite2) {
   Set<Class<?>> caseClasses1 = Sets.newLinkedHashSet(collectCaseClasses(suite1));
   Set<Class<?>> caseClasses2 = Sets.newLinkedHashSet(collectCaseClasses(suite2));
   assertTrue(
       "New tests not found in old suite:\n" + Sets.difference(caseClasses2, caseClasses1),
       caseClasses1.containsAll(caseClasses2));
   assertTrue(
       "Old tests not found in new suite:\n" + Sets.difference(caseClasses1, caseClasses2),
       caseClasses2.containsAll(caseClasses1));
 }
Ejemplo n.º 7
0
  private Iterable<Object> saveAndDeleteFiles(
      JSONArray oldFiles, JSONArray newFiles, String entityPath) {
    Iterable<Object> toDelete =
        Sets.difference(Sets.newHashSet(oldFiles), Sets.newHashSet(newFiles));
    Iterable<Object> toSave = Sets.difference(Sets.newHashSet(newFiles), Sets.newHashSet(oldFiles));

    toDelete.forEach(file -> fileStoreService.delete(((LinkedHashMap) file).get("id").toString()));
    saveFiles(toSave, entityPath);
    return toDelete;
  }
Ejemplo n.º 8
0
  @Override
  public void process(TransactionBase tx, Bytes row, Column col) throws Exception {

    TypedTransactionBase ttx = FluoConstants.TYPEL.wrap(tx);
    String nextJson = ttx.get().row(row).col(FluoConstants.PAGE_NEW_COL).toString("");
    if (nextJson.isEmpty()) {
      log.error("An empty page was set at row {} col {}", row.toString(), col.toString());
      return;
    }

    Gson gson = new Gson();
    Page nextPage;
    if (nextJson.equals("delete")) {
      ttx.mutate().row(row).col(FluoConstants.PAGE_CUR_COL).delete();
      nextPage = Page.EMPTY;
    } else {
      ttx.mutate().row(row).col(FluoConstants.PAGE_CUR_COL).set(nextJson);
      nextPage = gson.fromJson(nextJson, Page.class);
    }

    String curJson = ttx.get().row(row).col(FluoConstants.PAGE_CUR_COL).toString("");
    Set<Page.Link> curLinks = Collections.emptySet();
    if (!curJson.isEmpty()) {
      Page curPage = gson.fromJson(curJson, Page.class);
      curLinks = curPage.getOutboundLinks();
    } else {
      Long score = ttx.get().row(row).col(FluoConstants.PAGE_SCORE_COL).toLong(0);
      ttx.mutate().row(row).col(FluoConstants.PAGE_SCORE_COL).set(score + 1);
    }

    Set<Page.Link> nextLinks = nextPage.getOutboundLinks();
    String pageUri = row.toString().substring(2);

    Sets.SetView<Page.Link> addLinks = Sets.difference(nextLinks, curLinks);
    for (Page.Link link : addLinks) {
      String r = "p:" + link.getUri();
      ttx.mutate()
          .row(r)
          .fam(FluoConstants.INLINKS_UPDATE)
          .qual(pageUri)
          .set("add," + link.getAnchorText());
      ttx.mutate().row(r).col(FluoConstants.INLINKS_CHG_NTFY).weaklyNotify();
    }

    Sets.SetView<Page.Link> delLinks = Sets.difference(curLinks, nextLinks);
    for (Page.Link link : delLinks) {
      String r = "p:" + link.getUri();
      ttx.mutate().row(r).fam(FluoConstants.INLINKS_UPDATE).qual(pageUri).set("del");
      ttx.mutate().row(r).col(FluoConstants.INLINKS_CHG_NTFY).weaklyNotify();
    }

    // clean up
    ttx.mutate().row(row).col(FluoConstants.PAGE_NEW_COL).delete();
  }
Ejemplo n.º 9
0
  private void updateGeneratedConfigFiles(
      AbstractBuild<?, ?> build,
      BuildListener listener,
      Set<GeneratedConfigFile> freshConfigFiles) {
    Set<GeneratedConfigFile> generatedConfigFiles =
        extractGeneratedObjects(build.getProject(), GeneratedConfigFilesAction.class);
    Set<GeneratedConfigFile> added = Sets.difference(freshConfigFiles, generatedConfigFiles);
    Set<GeneratedConfigFile> existing = Sets.intersection(generatedConfigFiles, freshConfigFiles);
    Set<GeneratedConfigFile> removed = Sets.difference(generatedConfigFiles, freshConfigFiles);

    logItems(listener, "Adding config files", added);
    logItems(listener, "Existing config files", existing);
    logItems(listener, "Removing config files", removed);
  }
Ejemplo n.º 10
0
  private void updateGeneratedUserContents(
      AbstractBuild<?, ?> build,
      BuildListener listener,
      Set<GeneratedUserContent> freshUserContents) {
    Set<GeneratedUserContent> generatedUserContents =
        extractGeneratedObjects(build.getProject(), GeneratedUserContentsAction.class);
    Set<GeneratedUserContent> added = Sets.difference(freshUserContents, generatedUserContents);
    Set<GeneratedUserContent> existing =
        Sets.intersection(generatedUserContents, freshUserContents);
    Set<GeneratedUserContent> removed = Sets.difference(generatedUserContents, freshUserContents);

    logItems(listener, "Adding user content", added);
    logItems(listener, "Existing user content", existing);
    logItems(listener, "Removing user content", removed);
  }
  /**
   * Flushes the rest of the UnManagedProtectionSet changes to the database and cleans up (i.e.,
   * removes) any UnManagedProtectionSets that no longer exist on the RecoverPoint device, but are
   * still in the database.
   *
   * @param protectionSystem the ProtectionSystem to clean up
   * @param dbClient a reference to the database client
   */
  private void cleanUp(ProtectionSystem protectionSystem, DbClient dbClient) {

    // flush all remaining changes to the database
    handlePersistence(dbClient, true);

    // remove any UnManagedProtectionSets found in the database
    // but no longer found on the RecoverPoint device
    Set<URI> umpsetsFoundInDbForProtectionSystem =
        DiscoveryUtils.getAllUnManagedProtectionSetsForSystem(
            dbClient, protectionSystem.getId().toString());

    SetView<URI> onlyFoundInDb =
        Sets.difference(umpsetsFoundInDbForProtectionSystem, unManagedCGsReturnedFromProvider);

    if (onlyFoundInDb != null && !onlyFoundInDb.isEmpty()) {
      Iterator<UnManagedProtectionSet> umpsesToDelete =
          dbClient.queryIterativeObjects(UnManagedProtectionSet.class, onlyFoundInDb, true);
      while (umpsesToDelete.hasNext()) {
        UnManagedProtectionSet umps = umpsesToDelete.next();
        log.info(
            "Deleting orphaned UnManagedProtectionSet {} no longer found on RecoverPoint device.",
            umps.getNativeGuid());
        dbClient.markForDeletion(umps);
      }
    }

    // reset all tracking collections
    unManagedCGsInsert = null;
    unManagedCGsUpdate = null;
    unManagedVolumesToDelete = null;
    unManagedVolumesToUpdateByWwn = null;
    unManagedCGsReturnedFromProvider = null;
  }
Ejemplo n.º 12
0
  private static void mergeConfigMaps(
      final Map<String, String> source, final HashMap<String, String> destination) {
    checkNotNull(source, "source");
    checkNotNull(destination, "destination");

    if (source.isEmpty()) {
      return;
    }

    final Sets.SetView<String> sharedKeys =
        Sets.intersection(source.keySet(), destination.keySet());
    final Sets.SetView<String> newKeys = Sets.difference(source.keySet(), destination.keySet());

    // skip empty values in the source map
    // if they are already in the destination in order to
    // prevent overwrites of populated keys with empty ones
    sharedKeys
        .stream()
        .filter(key -> !source.get(key).trim().isEmpty() && !key.startsWith("#"))
        .forEach(key -> destination.put(key, source.get(key)));

    // Add new keys regardless of whether or not they're empty
    newKeys
        .stream()
        .filter(key -> !key.startsWith("#"))
        .forEach(key -> destination.put(key, source.get(key).trim()));
  }
    /**
     * @return Pair of dep-file rule key and the members of possibleDepFileSourcePaths that actually
     *     appeared in the dep file
     * @throws IOException
     */
    public Optional<Pair<RuleKey, ImmutableSet<SourcePath>>> build(
        Optional<ImmutableSet<SourcePath>> possibleDepFileSourcePaths) throws IOException {
      ImmutableSet<SourcePath> inputs = builder.getInputsSoFar();

      ImmutableSet<SourcePath> depFileInputs = inputs;

      if (possibleDepFileSourcePaths.isPresent()) {
        // possibleDepFileSourcePaths is an ImmutableSortedSet which implements contains() via
        // binary search rather than via hashing. Thus taking the intersection/difference
        // is O(n*log(n)). Here, we make a hash-based copy of the set, so that intersection
        // will be reduced to O(N).
        ImmutableSet<SourcePath> possibleDepFileSourcePathsUnsorted =
            ImmutableSet.copyOf(possibleDepFileSourcePaths.get());
        Sets.SetView<SourcePath> nonDepFileInputs =
            Sets.difference(inputs, possibleDepFileSourcePathsUnsorted);

        builder.addToRuleKey(nonDepFileInputs);

        depFileInputs =
            ImmutableSet.copyOf(Sets.intersection(inputs, possibleDepFileSourcePathsUnsorted));
      }

      Optional<RuleKey> ruleKey = builder.build();
      if (ruleKey.isPresent()) {
        return Optional.of(new Pair<>(ruleKey.get(), depFileInputs));
      } else {
        return Optional.empty();
      }
    }
Ejemplo n.º 14
0
  /**
   * Verifies that the the set of windows that have any state stored is exactly {@code
   * expectedWindows} and that each of these windows has only tags from {@code allowedTags}.
   */
  private void assertHasOnlyGlobalAndAllowedTags(
      Set<W> expectedWindows, Set<StateTag<?>> allowedTags) {
    runner.persist();

    Set<StateNamespace> expectedWindowsSet = new HashSet<>();
    for (W expectedWindow : expectedWindows) {
      expectedWindowsSet.add(windowNamespace(expectedWindow));
    }
    Set<StateNamespace> actualWindows = new HashSet<>();

    for (StateNamespace namespace : stubContexts.state.getNamespacesInUse()) {
      if (namespace instanceof StateNamespaces.GlobalNamespace) {
        continue;
      } else if (namespace instanceof StateNamespaces.WindowNamespace) {
        Set<StateTag<?>> tagsInUse = stubContexts.state.getTagsInUse(namespace);
        if (tagsInUse.isEmpty()) {
          continue;
        }
        actualWindows.add(namespace);
        Set<StateTag<?>> unexpected = Sets.difference(tagsInUse, allowedTags);
        if (unexpected.isEmpty()) {
          continue;
        } else {
          fail(namespace + " has unexpected states: " + tagsInUse);
        }
      } else if (namespace instanceof StateNamespaces.WindowAndTriggerNamespace) {
        Set<StateTag<?>> tagsInUse = stubContexts.state.getTagsInUse(namespace);
        assertTrue(namespace + " contains " + tagsInUse, tagsInUse.isEmpty());
      } else {
        fail("Unrecognized namespace " + namespace);
      }
    }

    assertEquals(expectedWindowsSet, actualWindows);
  }
  /**
   * This method cleans up UnManaged Volumes in DB, which had been deleted manually from the Array
   * 1. Get All UnManagedVolumes from DB 2. Store URIs of unmanaged volumes returned from the
   * Provider in unManagedVolumesBookKeepingList. 3. If unmanaged volume is found only in DB, but
   * not in unManagedVolumesBookKeepingList, then set unmanaged volume to inactive.
   *
   * <p>DB | Provider
   *
   * <p>x,y,z | y,z.a [a --> new entry has been added but indexes didn't get added yet into DB]
   *
   * <p>x--> will be set to inactive
   *
   * @param storagePoolUri
   * @throws IOException
   */
  private void performStorageUnManagedVolumeBookKeeping(URI storagePoolUri) throws IOException {
    @SuppressWarnings("deprecation")
    List<URI> unManagedVolumesInDB =
        _dbClient.queryByConstraint(
            ContainmentConstraint.Factory.getPoolUnManagedVolumeConstraint(storagePoolUri));

    Set<URI> unManagedVolumesInDBSet = new HashSet<URI>(unManagedVolumesInDB);
    SetView<URI> onlyAvailableinDB =
        Sets.difference(unManagedVolumesInDBSet, unManagedVolumesReturnedFromProvider);

    _logger.info("Diff :" + Joiner.on("\t").join(onlyAvailableinDB));
    if (onlyAvailableinDB.size() > 0) {
      List<UnManagedVolume> unManagedVolumeTobeDeleted = new ArrayList<UnManagedVolume>();
      Iterator<UnManagedVolume> unManagedVolumes =
          _dbClient.queryIterativeObjects(
              UnManagedVolume.class, new ArrayList<URI>(onlyAvailableinDB));

      while (unManagedVolumes.hasNext()) {
        UnManagedVolume volume = unManagedVolumes.next();
        if (null == volume || volume.getInactive()) {
          continue;
        }

        _logger.info("Setting unManagedVolume {} inactive", volume.getId());
        volume.setStoragePoolUri(NullColumnValueGetter.getNullURI());
        volume.setStorageSystemUri(NullColumnValueGetter.getNullURI());
        volume.setInactive(true);
        unManagedVolumeTobeDeleted.add(volume);
      }
      if (unManagedVolumeTobeDeleted.size() > 0) {
        _partitionManager.updateAndReIndexInBatches(
            unManagedVolumeTobeDeleted, 1000, _dbClient, "UnManagedVolume");
      }
    }
  }
Ejemplo n.º 16
0
 private void backupFlowEntries(NodeId nodeId, Set<DeviceId> deviceIds) {
   if (deviceIds.isEmpty()) {
     return;
   }
   log.debug("Sending flowEntries for devices {} to {} as backup.", deviceIds, nodeId);
   Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>> deviceFlowEntries = Maps.newConcurrentMap();
   deviceIds.forEach(id -> deviceFlowEntries.put(id, ImmutableMap.copyOf(getFlowTable(id))));
   clusterCommunicator
       .<Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>>, Set<DeviceId>>sendAndReceive(
           deviceFlowEntries, FLOW_TABLE_BACKUP, SERIALIZER::encode, SERIALIZER::decode, nodeId)
       .whenComplete(
           (backedupDevices, error) -> {
             Set<DeviceId> devicesNotBackedup =
                 error != null
                     ? deviceFlowEntries.keySet()
                     : Sets.difference(deviceFlowEntries.keySet(), backedupDevices);
             if (devicesNotBackedup.size() > 0) {
               log.warn(
                   "Failed to backup devices: {}. Reason: {}",
                   devicesNotBackedup,
                   error.getMessage());
             }
             if (backedupDevices != null) {
               backedupDevices.forEach(
                   id -> {
                     lastBackupTimes.put(id, System.currentTimeMillis());
                     lastBackupNodes.put(id, nodeId);
                   });
             }
           });
 }
Ejemplo n.º 17
0
 private void addNewTags(Set<Tag> newTags, Set<Tag> orginalTags) {
   SetView<Tag> addedTags = Sets.difference(newTags, orginalTags);
   logger.debug("addedTags size : {}", addedTags.size());
   for (Tag tag : addedTags) {
     tag.tagged();
   }
 }
 private void setMarketDataSubscriptions(final Set<ValueRequirement> requiredSubscriptions) {
   final Set<ValueRequirement> currentSubscriptions = _marketDataSubscriptions;
   final Set<ValueRequirement> unusedMarketData =
       Sets.difference(currentSubscriptions, requiredSubscriptions);
   if (!unusedMarketData.isEmpty()) {
     s_logger.debug(
         "{} unused market data subscriptions: {}", unusedMarketData.size(), unusedMarketData);
     removeMarketDataSubscriptions(new ArrayList<ValueRequirement>(unusedMarketData));
   }
   final Set<ValueRequirement> newMarketData =
       Sets.difference(requiredSubscriptions, currentSubscriptions);
   if (!newMarketData.isEmpty()) {
     s_logger.debug("{} new market data requirements: {}", newMarketData.size(), newMarketData);
     addMarketDataSubscriptions(new HashSet<ValueRequirement>(newMarketData));
   }
 }
Ejemplo n.º 19
0
  private ImmutableSet<AspectWithParameters> requiredAspects(
      AspectDefinition aspectDefinition,
      AspectParameters aspectParameters,
      Attribute attribute,
      Target target,
      Rule originalRule) {
    if (!(target instanceof Rule)) {
      return ImmutableSet.of();
    }

    Set<AspectWithParameters> aspectCandidates =
        extractAspectCandidates(aspectDefinition, aspectParameters, attribute, originalRule);
    RuleClass ruleClass = ((Rule) target).getRuleClassObject();
    ImmutableSet.Builder<AspectWithParameters> result = ImmutableSet.builder();
    for (AspectWithParameters candidateClass : aspectCandidates) {
      ConfiguredAspectFactory candidate =
          (ConfiguredAspectFactory) AspectFactory.Util.create(candidateClass.getAspectFactory());
      if (Sets.difference(
              candidate.getDefinition().getRequiredProviders(), ruleClass.getAdvertisedProviders())
          .isEmpty()) {
        result.add(candidateClass);
      }
    }
    return result.build();
  }
Ejemplo n.º 20
0
    private void installSecondaryDexFiles() throws Exception {
      final ImmutableMap<String, Path> hashToSources = getRequiredDexFiles();
      final ImmutableSet<String> requiredHashes = hashToSources.keySet();
      final ImmutableSet<String> presentHashes = prepareSecondaryDexDir(requiredHashes);
      final Set<String> hashesToInstall = Sets.difference(requiredHashes, presentHashes);

      Map<String, Path> filesToInstallByHash =
          Maps.filterKeys(hashToSources, Predicates.in(hashesToInstall));

      // This is a bit gross.  It was a late addition.  Ideally, we could eliminate this, but
      // it wouldn't be terrible if we don't.  We store the dexed jars on the device
      // with the full SHA-1 hashes in their names.  This is the format that the loader uses
      // internally, so ideally we would just load them in place.  However, the code currently
      // expects to be able to copy the jars from a directory that matches the name in the
      // metadata file, like "secondary-1.dex.jar".  We don't want to give up putting the
      // hashes in the file names (because we use that to skip re-uploads), so just hack
      // the metadata file to have hash-like names.
      String metadataContents =
          com.google.common.io.Files.toString(
                  projectFilesystem
                      .resolve(exopackageInfo.getDexInfo().get().getMetadata())
                      .toFile(),
                  Charsets.UTF_8)
              .replaceAll(
                  "secondary-(\\d+)\\.dex\\.jar (\\p{XDigit}{40}) ", "secondary-$2.dex.jar $2 ");

      installFiles(
          "secondary_dex",
          ImmutableMap.copyOf(filesToInstallByHash),
          metadataContents,
          "secondary-%s.dex.jar",
          SECONDARY_DEX_DIR);
    }
Ejemplo n.º 21
0
  protected Set<Plugin> loadPlugins(String pluginPath) {
    final File pluginDir = new File(pluginPath);
    final Set<Plugin> plugins = new HashSet<>();

    final PluginLoader pluginLoader = new PluginLoader(pluginDir);
    for (Plugin plugin : pluginLoader.loadPlugins()) {
      final PluginMetaData metadata = plugin.metadata();
      if (capabilities().containsAll(metadata.getRequiredCapabilities())) {
        if (version.sameOrHigher(metadata.getRequiredVersion())) {
          plugins.add(plugin);
        } else {
          LOG.error(
              "Plugin \""
                  + metadata.getName()
                  + "\" requires version "
                  + metadata.getRequiredVersion()
                  + " - not loading!");
        }
      } else {
        LOG.debug(
            "Skipping plugin \"{}\" because some capabilities are missing ({}).",
            metadata.getName(),
            Sets.difference(plugin.metadata().getRequiredCapabilities(), capabilities()));
      }
    }

    LOG.info("Loaded plugins: " + plugins);
    return plugins;
  }
Ejemplo n.º 22
0
 public static void assertOldTestsInNewSuite(Test oldSuite, Test newSuite) {
   Set<Class<?>> caseClasses1 = Sets.newLinkedHashSet(collectCaseClasses(oldSuite));
   Set<Class<?>> caseClasses2 = Sets.newLinkedHashSet(collectCaseClasses(newSuite));
   assertTrue(
       "Old tests not found in new suite:\n" + Sets.difference(caseClasses1, caseClasses2),
       caseClasses2.containsAll(caseClasses1));
 }
Ejemplo n.º 23
0
 private void removeTags(Set<Tag> newTags, Set<Tag> orginalTags) {
   SetView<Tag> removedTags = Sets.difference(orginalTags, newTags);
   logger.debug("removedTags size : {}", removedTags.size());
   for (Tag tag : removedTags) {
     tag.deTagged();
   }
 }
Ejemplo n.º 24
0
 public static String translateImplication(
     SymbolicConstraint leftHandSide, SymbolicConstraint rightHandSide) {
   KILtoSMTLib leftTransformer = new KILtoSMTLib(true);
   KILtoSMTLib rightTransformer = new KILtoSMTLib(false);
   String leftExpression = ((SMTLibTerm) leftHandSide.accept(leftTransformer)).expression();
   String rightExpression = ((SMTLibTerm) rightHandSide.accept(rightTransformer)).expression();
   StringBuilder sb = new StringBuilder();
   sb.append(
       getSortAndFunctionDeclarations(
           leftHandSide.termContext().definition(),
           Sets.union(leftTransformer.variables(), rightTransformer.variables())));
   sb.append(getAxioms(leftHandSide.termContext().definition()));
   sb.append(getConstantDeclarations(leftTransformer.variables()));
   sb.append("(assert (and ");
   sb.append(leftExpression);
   sb.append(" (not ");
   Set<Variable> rightHandSideOnlyVariables =
       Sets.difference(rightTransformer.variables(), leftTransformer.variables());
   if (!rightHandSideOnlyVariables.isEmpty()) {
     sb.append("(exists (");
     sb.append(getQuantifiedVariables(rightHandSideOnlyVariables));
     sb.append(") ");
   }
   sb.append(rightExpression);
   if (!rightHandSideOnlyVariables.isEmpty()) {
     sb.append(")");
   }
   sb.append(")))");
   return sb.toString();
 }
  @Test
  public void ensureRecordsTest() {
    int empId = 11303;
    List<PayPeriod> payPeriods =
        periodService.getOpenPayPeriods(PayPeriodType.AF, empId, SortOrder.ASC);
    // Print existing records
    Set<TimeRecord> existingRecords =
        timeRecordService
            .getTimeRecords(Collections.singleton(empId), payPeriods, TimeRecordStatus.getAll())
            .stream()
            .map(TimeRecord::new)
            .collect(Collectors.toSet());
    logger.info("-------- EXISTING RECORDS --------");
    printRecords(existingRecords);

    Stopwatch sw = Stopwatch.createStarted();
    // Generate records
    manager.ensureRecords(empId);
    logger.info("generation took {} ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));

    // Print difference
    Set<TimeRecord> newRecords =
        new TreeSet<>(
            timeRecordService.getTimeRecords(
                Collections.singleton(empId), payPeriods, TimeRecordStatus.getAll()));
    logger.info("-------- NEW RECORDS --------");
    printRecords(Sets.difference(newRecords, existingRecords));
  }
 // CHECKSTYLE:OFF
 private void updateDTableTreeViewer() {
   if (!toExpands.isEmpty()
       || !toCollapses.isEmpty()
       || !toRefreshInViewerWithUpdateLabels.isEmpty()
       || launchGlobalRefreshWithoutUpdateLabels
       || launchGlobalRefreshWithUpdateLabels
       || !toUpdateInViewer.isEmpty()
       || !dColumnsToUpdateDirectly.isEmpty()
       || !dColumnsWidthToUpdate.isEmpty()
       || !dColumnsToRemove.isEmpty()
       || !dColumnsToAdd.isEmpty()
       || !dColumnsToVisibilityChanged.isEmpty()
       || updateHeaderColumnWidth) {
     Object[] objectsToUpdateInViewer =
         Sets.difference(toUpdateInViewer, toRefreshInViewerWithUpdateLabels)
             .toArray(new Object[0]);
     Runnable tableUIUpdaterRunnable =
         new TableUIUpdaterRunnable(
             dTableViewerManager,
             dTableTreeViewer,
             toExpands,
             toCollapses,
             toRefreshInViewerWithUpdateLabels,
             launchGlobalRefreshWithoutUpdateLabels,
             launchGlobalRefreshWithUpdateLabels,
             objectsToUpdateInViewer,
             dColumnsToUpdateDirectly,
             dColumnsWidthToUpdate,
             dColumnsToRemove,
             dColumnsToAdd,
             dColumnsToVisibilityChanged,
             updateHeaderColumnWidth);
     EclipseUIUtil.displayAsyncExec(tableUIUpdaterRunnable);
   }
 }
Ejemplo n.º 27
0
    @Override
    public boolean apply(Allocation allocInfo) throws MetadataException {
      Context ctx = allocInfo.getContext();
      NetworkGroups.lookup(
          ctx.getUserFullName().asAccountFullName(), NetworkGroups.defaultNetworkName());

      Set<String> networkNames = Sets.newHashSet(allocInfo.getRequest().getGroupSet());
      if (networkNames.isEmpty()) {
        networkNames.add(NetworkGroups.defaultNetworkName());
      }

      Map<String, NetworkGroup> networkRuleGroups = Maps.newHashMap();
      for (String groupName : networkNames) {
        NetworkGroup group =
            NetworkGroups.lookup(ctx.getUserFullName().asAccountFullName(), groupName);
        if (!ctx.hasAdministrativePrivileges()
            && !RestrictedTypes.filterPrivileged().apply(group)) {
          throw new IllegalMetadataAccessException(
              "Not authorized to use network group "
                  + groupName
                  + " for "
                  + ctx.getUser().getName());
        }
        networkRuleGroups.put(groupName, group);
      }
      Set<String> missingNets = Sets.difference(networkNames, networkRuleGroups.keySet());
      if (!missingNets.isEmpty()) {
        throw new NoSuchMetadataException("Failed to find security group info for: " + missingNets);
      } else {
        allocInfo.setNetworkRules(networkRuleGroups);
      }
      return true;
    }
Ejemplo n.º 28
0
  public void setSelectedEvents(Set<Event> newEvents) {
    Set<Event> oldEvents = Sets.newHashSet(selectedEvents);

    if (newEvents.equals(oldEvents)) return;

    // set of deselected events
    Set<Event> deselectedEvents = Sets.difference(oldEvents, newEvents).immutableCopy();
    // set of newly selected events
    Set<Event> newlySelectedEvents = Sets.difference(newEvents, oldEvents).immutableCopy();

    // XXX these two operations should really happen atomically
    // (we want the end result to be that selectedEvents contains everything in newEvents)
    selectedEvents.retainAll(newEvents);
    selectedEvents.addAll(newEvents);

    notifyEventsSelected(newlySelectedEvents, deselectedEvents);
  }
Ejemplo n.º 29
0
            @Override
            public ImmutableSet<String> get() {
              ImmutableSet.Builder<String> result = validateLabels(excludedSet.get());
              result.addAll(getBuckConfig().getDefaultExcludedLabels());
              ImmutableSet<String> allExcluded = result.build();

              // If someone has included a test, then we should really run it.
              return Sets.difference(allExcluded, getIncludedLabels()).immutableCopy();
            }
Ejemplo n.º 30
0
  public static <V, E> void customTraverse(
      final Graph<V, E> graph, final V startNode, final TraversalCallback<V, E> handler) {
    Set<V> traversedNodes = new HashSet<V>();
    Queue<VertexHolder<V>> traversalQueue = new PriorityQueue<VertexHolder<V>>(16);
    int counter = 0;
    traversalQueue.add(new VertexHolder<V>(startNode, counter++, 0));
    boolean done = false;
    do {
      boolean first = true;
      while (!traversalQueue.isEmpty()) {
        V node = traversalQueue.remove().getVertex();
        VertexEdges<V, E> edges = graph.getEdges(node);
        if (traversedNodes.contains(node)) {
          continue;
        }
        Map<E, V> incomming = edges.getIncomming();
        boolean hasIncomingBeenTraversed = true;
        for (V val : incomming.values()) {
          if (!traversedNodes.contains(val)) {
            hasIncomingBeenTraversed = false;
            break;
          }
        }
        if (!first && !hasIncomingBeenTraversed) {
          continue;
        }
        handler.handle(node, incomming);
        traversedNodes.add(node);
        first = false;
        Map<E, V> outgoing = edges.getOutgoing();
        for (V next : outgoing.values()) {
          traversalQueue.add(
              new VertexHolder<V>(next, counter++, graph.getEdges(next).getIncomming().size()));
        }
      }

      Set<V> leftNodes = Sets.difference(graph.getVertices(), traversedNodes);
      if (leftNodes.isEmpty()) {
        done = true;
      } else {
        boolean added = false;
        for (V node : leftNodes) {
          Collection<V> incomingNodes = graph.getEdges(node).getIncomming().values();
          for (V incoming : incomingNodes) {
            if (traversedNodes.contains(incoming)) {
              traversalQueue.add(new VertexHolder<V>(node, counter++, 0));
              added = true;
              break;
            }
          }
          if (added) {
            break;
          }
        }
      }
    } while (!done);
  }