String ownerPermissionError(Set<String> current, Set<String> allowed) throws IOException {
   LOG.info(
       "Cannot change permissions. Connection owners {}. Allowed owners {}",
       current.toString(),
       allowed.toString());
   return "Insufficient privileges to change permissions.\n\n"
       + "Allowed owners: "
       + allowed.toString()
       + "\n\n"
       + "User belongs to: "
       + current.toString();
 }
  private static void assertFiles(
      String dirPath, Set<String> expectedFiles, boolean excludeNonExistingFiles)
      throws IOException {
    LogTransaction.waitForDeletions();

    File dir = new File(dirPath).getCanonicalFile();
    File[] files = dir.listFiles();
    if (files != null) {
      for (File file : files) {
        if (file.isDirectory()) continue;

        String filePath = file.getPath();
        assertTrue(
            String.format("%s not in [%s]", filePath, expectedFiles),
            expectedFiles.contains(filePath));
        expectedFiles.remove(filePath);
      }
    }

    if (excludeNonExistingFiles) {
      for (String filePath : expectedFiles) {
        File file = new File(filePath);
        if (!file.exists()) expectedFiles.remove(filePath);
      }
    }

    assertTrue(expectedFiles.toString(), expectedFiles.isEmpty());
  }
Esempio n. 3
0
 public static void main(String[] args) {
   Set<String> set = new HashSet<String>();
   set.add("Mary");
   set.add("Bob");
   set.remove("Mary");
   System.out.println(set.toString());
 }
Esempio n. 4
0
 private static Iterator<String> listCorruptTopologies() {
   Set<String> blobStoreTopologyIds =
       nimbusBlobStore.filterAndListKeys(
           new KeyFilter<String>() {
             @Override
             public String filter(String key) {
               return ConfigUtils.getIdFromBlobKey(key);
             }
           });
   Set<String> activeTopologyIds = new HashSet<>(stormClusterState.activeStorms());
   Sets.SetView<String> diffTopology = Sets.difference(activeTopologyIds, blobStoreTopologyIds);
   LOG.info(
       "active-topology-ids [{}] blob-topology-ids [{}] diff-topology [{}]",
       activeTopologyIds.toString(),
       blobStoreTopologyIds.toString(),
       diffTopology.toString());
   return diffTopology.iterator();
 }
  @Override
  public long updateEnvClouds(long envId, List<CmsCIRelation> cloudRels, String userId) {
    // for now we will handle just new clouds
    List<CmsCIRelation> existingCloudRels =
        cmProcessor.getFromCIRelationsNaked(envId, BASE_CONSUMES, ACCOUNT_CLOUD);
    Set<Long> existingCloudIds = new HashSet<Long>();
    for (CmsCIRelation rel : existingCloudRels) {
      existingCloudIds.add(rel.getToCiId());
    }

    boolean needUpdate = false;
    for (CmsCIRelation requestRel : cloudRels) {
      if (!existingCloudIds.contains(requestRel.getToCiId())) {
        // this is new cloud lets add env->cloud rel
        cmProcessor.createRelation(requestRel);
        needUpdate = true;
      } else {
        cmProcessor.updateRelation(requestRel);
        existingCloudIds.remove(requestRel.getToCiId());
      }
    }
    if (!existingCloudIds.isEmpty()) {
      // looks like we need to delete some clouds
      // first lets see if we have any open releases
      processCloudDeletions(envId, existingCloudIds);
    }

    if (needUpdate) {
      CmsCI env = getEnv(envId);
      String nsPath = env.getNsPath() + "/" + env.getCiName() + "/manifest";
      List<CmsRfcRelation> compOfRels =
          cmRfcMrgProcessor.getFromCIRelations(
              envId, MANIFEST_COMPOSED_OF, MANIFEST_PLATFORM, "dj");
      for (CmsRfcRelation compOfRel : compOfRels) {
        CmsRfcCI platform = compOfRel.getToRfcCi();
        String platNs = platform.getNsPath();
        manifestRfcProcessor.processClouds(env, platform, platNs, nsPath, userId, null, null, null);
        Set<String> missingSrvs = cloudUtil.getMissingServices(platform.getCiId());
        if (missingSrvs.size() > 0) {
          logger.info(
              ">>>>> Not all services available for platform: "
                  + platform.getCiName()
                  + ", the missing services: "
                  + missingSrvs.toString());
          manifestRfcProcessor.disablePlatform(platform.getCiId(), userId);
        }
        logger.info("Done working on platform " + platform.getCiName());
      }
      return populateParentRelease(env, nsPath);
    } else {
      return 0;
    }
  }
Esempio n. 6
0
 @Override
 public String toString() {
   Set<Integer> set = new TreeSet<Integer>();
   for (int i = 0; i < a.length; i++) {
     for (int j = 0; j < BITS; j++) {
       if (((a[i] >>> j) & 1) == 1) {
         set.add((i << LOG) | j);
       }
     }
   }
   return set.toString();
 }
  // Check either that a temporary file is expected to exist (in the existingFiles) or that
  // it does not exist any longer (on Windows we need to check File.exists() because a list
  // might return a file as existing even if it does not)
  private static void assertFiles(Iterable<String> existingFiles, Set<File> temporaryFiles) {
    for (String filePath : existingFiles) {
      File file = new File(filePath);
      assertTrue(filePath, temporaryFiles.contains(file));
      temporaryFiles.remove(file);
    }

    for (File file : temporaryFiles) {
      if (!file.exists()) temporaryFiles.remove(file);
    }

    assertTrue(temporaryFiles.toString(), temporaryFiles.isEmpty());
  }
Esempio n. 8
0
 private String getColumnName(TJoinList joins) {
   int j = joins.size();
   Set<String> columnName = new HashSet<String>();
   for (int i = 0; i < j; i++) {
     Iterator<Table> it = DBSystem.tableList.iterator();
     Table table = null;
     while (it.hasNext()) {
       table = it.next();
       if (table.getName().equalsIgnoreCase(joins.getJoin(i).toString())) {
         Iterator it1 = table.getColumnData().entrySet().iterator();
         while (it1.hasNext()) {
           Map.Entry pairs = (Map.Entry) it1.next();
           columnName.add((String) pairs.getKey());
         }
         break;
       }
     }
   }
   return columnName
       .toString()
       .substring(1, columnName.toString().length() - 1)
       .replaceAll(" ", "")
       .toLowerCase();
 }
  public void testDelayedTasksReusePooledThreadIfExecuteAtDifferentTimes()
      throws InterruptedException, ExecutionException {
    final AppScheduledExecutorService service = new AppScheduledExecutorService(getName());
    final List<LogInfo> log = Collections.synchronizedList(new ArrayList<>());
    // pre-start one thread
    Future<?> future = service.submit(EmptyRunnable.getInstance());
    future.get();
    service.setBackendPoolCorePoolSize(1);
    assertEquals(1, service.getBackendPoolExecutorSize());

    int delay = 500;

    ScheduledFuture<?> f1 =
        service.schedule((Runnable) () -> log.add(new LogInfo(1)), delay, TimeUnit.MILLISECONDS);
    ScheduledFuture<?> f2 =
        service.schedule(
            (Runnable) () -> log.add(new LogInfo(2)), delay + 100, TimeUnit.MILLISECONDS);
    ScheduledFuture<?> f3 =
        service.schedule(
            (Runnable) () -> log.add(new LogInfo(3)), delay + 200, TimeUnit.MILLISECONDS);

    assertEquals(1, service.getBackendPoolExecutorSize());

    assertFalse(f1.isDone());
    assertFalse(f2.isDone());
    assertFalse(f3.isDone());

    TimeoutUtil.sleep(delay + 200 + 300);
    assertTrue(f1.isDone());
    assertTrue(f2.isDone());
    assertTrue(f3.isDone());
    assertEquals(1, service.getBackendPoolExecutorSize());

    assertEquals(3, log.size());
    Set<Thread> usedThreads =
        new HashSet<>(
            Arrays.asList(
                log.get(0).currentThread, log.get(1).currentThread, log.get(2).currentThread));
    if (usedThreads.size() != 1) {
      System.err.println(ThreadDumper.dumpThreadsToString());
    }
    assertEquals(usedThreads.toString(), 1, usedThreads.size()); // must be executed in same thread

    service.shutdownAppScheduledExecutorService();
    assertTrue(service.awaitTermination(10, TimeUnit.SECONDS));
  }
  @Test
  public void testLanes() throws Exception {

    List<RouteSegmentResult> routeSegments = fe.searchRoute(ctx, startPoint, endPoint, null);
    Set<Long> reachedSegments = new TreeSet<Long>();
    Assert.assertNotNull(routeSegments);
    int prevSegment = -1;
    for (int i = 0; i <= routeSegments.size(); i++) {
      if (i == routeSegments.size() || routeSegments.get(i).getTurnType() != null) {
        if (prevSegment >= 0) {
          String lanes = getLanesString(routeSegments.get(prevSegment));
          String turn = routeSegments.get(prevSegment).getTurnType().toXmlString();
          String turnLanes = turn + ":" + lanes;
          String name = routeSegments.get(prevSegment).getDescription();

          long segmentId = routeSegments.get(prevSegment).getObject().getId();
          String expectedResult = expectedResults.get(segmentId);
          if (expectedResult != null) {
            if (!Algorithms.objectEquals(expectedResult, turnLanes)
                && !Algorithms.objectEquals(expectedResult, lanes)
                && !Algorithms.objectEquals(expectedResult, turn)) {
              Assert.assertEquals("Segment " + segmentId, expectedResult, turnLanes);
            }
          }

          System.out.println("segmentId: " + segmentId + " description: " + name);
        }
        prevSegment = i;
      }

      if (i < routeSegments.size()) {
        reachedSegments.add(routeSegments.get(i).getObject().getId());
      }
    }

    Set<Long> expectedSegments = expectedResults.keySet();
    for (Long expSegId : expectedSegments) {
      Assert.assertTrue(
          "Expected segment "
              + expSegId
              + " weren't reached in route segments "
              + reachedSegments.toString(),
          reachedSegments.contains(expSegId));
    }
  }
  private Optional<T> doAuthenticate(AdCredentials credentials) throws AuthenticationException {

    DirContext boundContext = bindUser(credentials);
    if (boundContext != null) {
      AdPrincipal principal = getAdPrincipal(boundContext, credentials);
      if (authorized(principal)) {
        return Optional.fromNullable(mapper.map(principal));
      } else {
        Set<String> missingGroups = configuration.getRequiredGroups();
        missingGroups.removeAll(principal.getGroupNames());
        LOG.warn(
            String.format(
                "%s authenticated successfully but did not have authority. Missing Groups: %s",
                credentials.getUsername(), missingGroups.toString()));
      }
    }
    return Optional.absent();
  }
Esempio n. 12
0
  @Override
  public void parse() throws IOException {
    if (conservedRegionPath == null
        || !Files.exists(conservedRegionPath)
        || !Files.isDirectory(conservedRegionPath)) {
      throw new IOException(
          "Conservation directory whether does not exist, is not a directory or cannot be read");
    }

    Map<String, Path> files = new HashMap<>();
    String chromosome;
    Set<String> chromosomes = new HashSet<>();

    // Reading all files in phastCons folder
    DirectoryStream<Path> directoryStream =
        Files.newDirectoryStream(conservedRegionPath.resolve("phastCons"));
    for (Path path : directoryStream) {
      chromosome = path.getFileName().toString().split("\\.")[0].replace("chr", "");
      chromosomes.add(chromosome);
      files.put(chromosome + "phastCons", path);
    }

    // Reading all files in phylop folder
    directoryStream = Files.newDirectoryStream(conservedRegionPath.resolve("phylop"));
    for (Path path : directoryStream) {
      chromosome = path.getFileName().toString().split("\\.")[0].replace("chr", "");
      chromosomes.add(chromosome);
      files.put(chromosome + "phylop", path);
    }

    /** Now we can iterate over all the chromosomes found and process the files */
    logger.debug("Chromosomes found {}", chromosomes.toString());
    for (String chr : chromosomes) {
      logger.debug("Processing chromosome {}, file {}", chr, files.get(chr + "phastCons"));
      processFile(files.get(chr + "phastCons"), "phastCons");

      logger.debug("Processing chromosome {}, file {}", chr, files.get(chr + "phylop"));
      processFile(files.get(chr + "phylop"), "phylop");
    }
  }
 public WeightMatrix getRepresentative(Cluster<WeightMatrix> cluster) {
   Set<WeightMatrix> matrices = cluster.getElements();
   WeightMatrix bestwm = null;
   double bestdist = Double.MAX_VALUE;
   for (WeightMatrix i : matrices) {
     double sum = 0;
     for (WeightMatrix j : matrices) {
       sum += Math.pow(comp.compare(i, j), 2);
     }
     //            System.err.println("  " + i + " : " + sum + " <? " + bestdist);
     sum = sum / matrices.size();
     if (sum < bestdist) {
       bestwm = i;
       bestdist = sum;
     }
   }
   if (bestwm == null) {
     System.err.println("OOPS!" + bestdist);
     System.err.println(matrices.toString());
   }
   return bestwm;
 }
 public String toString() {
   return originals.toString();
 }
  private void discoverAndRunProcs(
      Context context,
      Set<TypeElement> annotationsPresent,
      List<ClassSymbol> topLevelClasses,
      List<PackageSymbol> packageInfoFiles) {
    Map<String, TypeElement> unmatchedAnnotations =
        new HashMap<String, TypeElement>(annotationsPresent.size());

    for (TypeElement a : annotationsPresent) {
      unmatchedAnnotations.put(a.getQualifiedName().toString(), a);
    }

    // Give "*" processors a chance to match
    if (unmatchedAnnotations.size() == 0) unmatchedAnnotations.put("", null);

    DiscoveredProcessors.ProcessorStateIterator psi = discoveredProcs.iterator();
    // TODO: Create proper argument values; need past round
    // information to fill in this constructor.  Note that the 1
    // st round of processing could be the last round if there
    // were parse errors on the initial source files; however, we
    // are not doing processing in that case.

    Set<Element> rootElements = new LinkedHashSet<Element>();
    rootElements.addAll(topLevelClasses);
    rootElements.addAll(packageInfoFiles);
    rootElements = Collections.unmodifiableSet(rootElements);

    RoundEnvironment renv =
        new JavacRoundEnvironment(false, false, rootElements, JavacProcessingEnvironment.this);

    while (unmatchedAnnotations.size() > 0 && psi.hasNext()) {
      ProcessorState ps = psi.next();
      Set<String> matchedNames = new HashSet<String>();
      Set<TypeElement> typeElements = new LinkedHashSet<TypeElement>();

      for (Map.Entry<String, TypeElement> entry : unmatchedAnnotations.entrySet()) {
        String unmatchedAnnotationName = entry.getKey();
        if (ps.annotationSupported(unmatchedAnnotationName)) {
          matchedNames.add(unmatchedAnnotationName);
          TypeElement te = entry.getValue();
          if (te != null) typeElements.add(te);
        }
      }

      if (matchedNames.size() > 0 || ps.contributed) {
        boolean processingResult = callProcessor(ps.processor, typeElements, renv);
        ps.contributed = true;
        ps.removeSupportedOptions(unmatchedProcessorOptions);

        if (printProcessorInfo || verbose) {
          log.printNoteLines(
              "x.print.processor.info",
              ps.processor.getClass().getName(),
              matchedNames.toString(),
              processingResult);
        }

        if (processingResult) {
          unmatchedAnnotations.keySet().removeAll(matchedNames);
        }
      }
    }
    unmatchedAnnotations.remove("");

    if (lint && unmatchedAnnotations.size() > 0) {
      // Remove annotations processed by javac
      unmatchedAnnotations.keySet().removeAll(platformAnnotations);
      if (unmatchedAnnotations.size() > 0) {
        log = Log.instance(context);
        log.warning("proc.annotations.without.processors", unmatchedAnnotations.keySet());
      }
    }

    // Run contributing processors that haven't run yet
    psi.runContributingProcs(renv);

    // Debugging
    if (options.isSet("displayFilerState")) filer.displayState();
  }
 private void warnIfUnmatchedOptions() {
   if (!unmatchedProcessorOptions.isEmpty()) {
     log.warning("proc.unmatched.processor.options", unmatchedProcessorOptions.toString());
   }
 }
  /**
   * Create sample-contamination maps from file
   *
   * @param ContaminationFractionFile Filename containing two columns: SampleID and Contamination
   * @param AvailableSampleIDs Set of Samples of interest (no reason to include every sample in
   *     file) or null to turn off checking
   * @param logger for logging output
   * @return sample-contamination Map
   */
  public static DefaultHashMap<String, Double> loadContaminationFile(
      File ContaminationFractionFile,
      final Double defaultContaminationFraction,
      final Set<String> AvailableSampleIDs,
      Logger logger)
      throws GATKException {
    DefaultHashMap<String, Double> sampleContamination =
        new DefaultHashMap<String, Double>(defaultContaminationFraction);
    Set<String> nonSamplesInContaminationFile = new HashSet<String>(sampleContamination.keySet());
    try {

      XReadLines reader = new XReadLines(ContaminationFractionFile, true);
      for (String line : reader) {

        if (line.length() == 0) {
          continue;
        }

        StringTokenizer st = new StringTokenizer(line, "\t");

        String fields[] = new String[2];
        try {
          fields[0] = st.nextToken();
          fields[1] = st.nextToken();
        } catch (NoSuchElementException e) {
          throw new UserException.MalformedFile(
              "Contamination file must have exactly two, tab-delimited columns. Offending line:\n"
                  + line);
        }
        if (st.hasMoreTokens()) {
          throw new UserException.MalformedFile(
              "Contamination file must have exactly two, tab-delimited columns. Offending line:\n"
                  + line);
        }

        if (fields[0].length() == 0 || fields[1].length() == 0) {
          throw new UserException.MalformedFile(
              "Contamination file can not have empty strings in either column. Offending line:\n"
                  + line);
        }

        if (sampleContamination.containsKey(fields[0])) {
          throw new UserException.MalformedFile(
              "Contamination file contains duplicate entries for input name " + fields[0]);
        }

        try {
          final Double contamination = Double.valueOf(fields[1]);
          if (contamination < 0 || contamination > 1) {
            throw new UserException.MalformedFile(
                "Contamination file contains unacceptable contamination value (must be 0<=x<=1): "
                    + line);
          }
          if (AvailableSampleIDs == null
              || AvailableSampleIDs.contains(
                  fields[0])) { // only add samples if they are in the sampleSet (or if it is null)
            sampleContamination.put(fields[0], contamination);
          } else {
            nonSamplesInContaminationFile.add(fields[0]);
          }
        } catch (NumberFormatException e) {
          throw new UserException.MalformedFile(
              "Contamination file contains unparsable double in the second field. Offending line: "
                  + line);
        }
      }

      // output to the user info lines telling which samples are in the Contamination File
      if (sampleContamination.size() > 0) {
        logger.info(
            String.format(
                "The following samples were found in the Contamination file and will be processed at the contamination level therein: %s",
                sampleContamination.keySet().toString()));

        // output to the user info lines telling which samples are NOT in the Contamination File
        if (AvailableSampleIDs != null) {
          Set<String> samplesNotInContaminationFile = new HashSet<String>(AvailableSampleIDs);
          samplesNotInContaminationFile.removeAll(sampleContamination.keySet());
          if (samplesNotInContaminationFile.size() > 0)
            logger.info(
                String.format(
                    "The following samples were NOT found in the Contamination file and will be processed at the default contamination level: %s",
                    samplesNotInContaminationFile.toString()));
        }
      }

      // output to the user Samples that do not have lines in the Contamination File
      if (nonSamplesInContaminationFile.size() > 0) {
        logger.info(
            String.format(
                "The following entries were found in the Contamination file but were not SAMPLEIDs. They will be ignored: %s",
                nonSamplesInContaminationFile.toString()));
      }

      return sampleContamination;

    } catch (IOException e) {
      throw new GATKException(
          "I/O Error while reading sample-contamination file "
              + ContaminationFractionFile.getName()
              + ": "
              + e.getMessage());
    }
  }
Esempio n. 18
0
 public String toUsageString(String prepend) {
   return prepend + String.format("Accepted values: %s", choices.toString());
 }
  @Override
  public long generateEnvManifest(long envId, String userId, Map<String, String> platModes) {
    long t1 = System.currentTimeMillis();
    String oldThreadName = Thread.currentThread().getName();
    Thread.currentThread().setName(getProcessingThreadName(oldThreadName, envId));
    List<CmsCIRelation> assemblyRels =
        cmProcessor.getToCIRelations(envId, BASE_REALIZED_IN, null, ACCOUNT_ASSEMBLY);
    CmsCI assembly = null;
    if (assemblyRels.size() > 0) {
      assembly = assemblyRels.get(0).getFromCi();
    } else {
      String error = "Can not get assembly for envId = " + envId;
      logger.error(error);
      throw new TransistorException(CmsError.TRANSISTOR_CANNOT_GET_ASSEMBLY, error);
    }

    CmsCI env = getEnv(envId);

    String nsPath = env.getNsPath() + "/" + env.getCiName() + "/manifest";

    if (hasOpenManifestRelease(nsPath)) {
      String message =
          "This environment has an open release. It needs to be discarded or committed before the design pull: "
              + env.getNsPath()
              + "/"
              + env.getCiName();
      logger.info(message);
      throw new TransistorException(CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE, message);
    }
    Long nsId = trUtil.verifyAndCreateNS(nsPath);
    logger.info("Created nsId " + nsId);
    // Long releaseId = createManifestRelease(nsPath,userId);

    List<CmsCIRelation> designPlatRels =
        cmProcessor.getFromCIRelations(assembly.getCiId(), null, "ComposedOf", CATALOG_PLATFORM);

    // we need to reset all pending deletions cis just in case there was one added back
    cmProcessor.resetDeletionsByNs(nsPath);

    // check for edge case scenario when there is new design platform with the same name as old one
    // but different pack
    long releaseId = checkPlatformPackCompliance(designPlatRels, env, nsPath, userId);
    if (releaseId > 0) {
      // stop any processing and return new release id
      return releaseId;
    }

    final CountDownLatch latch = new CountDownLatch(designPlatRels.size());
    List<Future<DesignCIManifestRfcTouple>> submittedFutureTasks =
        new ArrayList<Future<DesignCIManifestRfcTouple>>();

    Map<Long, CmsRfcCI> design2manifestPlatMap = new HashMap<Long, CmsRfcCI>();
    for (CmsCIRelation platRelation : designPlatRels) {
      String availMode = null;
      if (platModes != null) {
        availMode = platModes.get(String.valueOf(platRelation.getToCiId()));
        if (availMode != null && availMode.length() == 0) {
          availMode = "default";
        }
      }

      Future<DesignCIManifestRfcTouple> future =
          executorService.submit(
              new ManifestRfcProcessorTask(env, nsPath, userId, availMode, latch, platRelation));
      submittedFutureTasks.add(future);
    }

    boolean allPlatsProcessed = false;
    try {
      // latch.await(); //wait till all platform processing threads return
      allPlatsProcessed =
          latch.await(
              timeoutInMilliSeconds,
              TimeUnit
                  .MILLISECONDS); // wait for all platform processing threads to finish with timeout
                                  // of 10 mins
      if (!allPlatsProcessed) {
        logger.error(
            "All platforms not processed within timeout duration of " + timeoutInMilliSeconds);
        throw new TransistorException(
            CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE,
            "Failed to pull latest design for all platform within timeout duration of "
                + timeoutInMilliSeconds
                + " millis");
      }
    } catch (InterruptedException ie) {
      for (Future<DesignCIManifestRfcTouple> job : submittedFutureTasks) {
        job.cancel(true);
      }
      throw new TransistorException(
          CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE, "Design pull process interrupted. ");
    }

    for (Future<DesignCIManifestRfcTouple> task : submittedFutureTasks) {

      DesignCIManifestRfcTouple touple;
      try {
        touple = task.get();
        processPlatformRfcs(touple.manifestPlatformRfcs, userId);

        CmsRfcCI manifestPlatformRfc = touple.manifestPlatformRfcs.getManifestPlatformRfc();
        Set<String> missingSrvs = cloudUtil.getMissingServices(manifestPlatformRfc.getCiId());
        if (missingSrvs.size() > 0) {
          logger.info(
              ">>>>> Not all services available for platform: "
                  + manifestPlatformRfc.getCiName()
                  + ", the missing services: "
                  + missingSrvs.toString());
          disablePlatform(manifestPlatformRfc.getCiId(), userId);
        }
        logger.info("New release id = " + manifestPlatformRfc.getReleaseId());
        logger.info("Done working on platform " + manifestPlatformRfc.getNsPath());

        design2manifestPlatMap.put(touple.designPlatCI, manifestPlatformRfc);
      } catch (Exception e) {
        logger.error("Error in pulling latest design for all platforms ", e);
        throw new TransistorException(
            CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE,
            "Error in pulling latest design for all platforms ");
      }
    }

    // now we need to process linkedTo relations
    manifestRfcProcessor.processLinkedTo(design2manifestPlatMap, nsPath, userId);

    // now lets delete old existing plats that do not exists in new manifest
    manifestRfcProcessor.processDeletedPlatforms(
        design2manifestPlatMap.values(), env, nsPath, userId);

    // process global variables from design
    manifestRfcProcessor.processGlobalVars(assembly.getCiId(), env, nsPath, userId);
    long t2 = System.currentTimeMillis();
    long envReleaseId = populateParentRelease(env, nsPath);
    logger.info(
        "Pull design for  "
            + nsPath
            + " completed in  "
            + (t2 - t1)
            + " millis (releaseId "
            + envReleaseId
            + ")");
    return envReleaseId;
  }