/**
   * Query all purchases made at a specific store for 3 specific products. This query uses
   * cross-cache joins between {@link DimStore}, {@link DimProduct} objects stored in {@code
   * 'replicated'} cache and {@link FactPurchase} objects stored in {@code 'partitioned'} cache.
   *
   * @throws IgniteException If failed.
   */
  private static void queryProductPurchases() {
    IgniteCache<Integer, FactPurchase> factCache = Ignition.ignite().cache(PARTITIONED_CACHE_NAME);

    // All purchases for certain product made at store2.
    // =================================================

    DimProduct p1 = rand(dataProduct.values());
    DimProduct p2 = rand(dataProduct.values());
    DimProduct p3 = rand(dataProduct.values());

    System.out.println(
        "IDs of products [p1=" + p1.getId() + ", p2=" + p2.getId() + ", p3=" + p3.getId() + ']');

    // Create cross cache query to get all purchases made at store2
    // for specified products.
    QueryCursor<Cache.Entry<Integer, FactPurchase>> prodPurchases =
        factCache.query(
            new SqlQuery(
                    FactPurchase.class,
                    "from \""
                        + REPLICATED_CACHE_NAME
                        + "\".DimStore, \""
                        + REPLICATED_CACHE_NAME
                        + "\".DimProduct, "
                        + "\""
                        + PARTITIONED_CACHE_NAME
                        + "\".FactPurchase "
                        + "where DimStore.id=FactPurchase.storeId and DimProduct.id=FactPurchase.productId "
                        + "and DimStore.name=? and DimProduct.id in(?, ?, ?)")
                .setArgs("Store2", p1.getId(), p2.getId(), p3.getId()));

    printQueryResults(
        "All purchases made at store2 for 3 specific products:", prodPurchases.getAll());
  }
Example #2
0
  public void stop() {
    for (Manageable manageable : consumers.values()) {
      manageable.stop();
    }

    for (Manageable manageable : producers.values()) {
      manageable.stop();
    }
  }
  /**
   * Populate cache with {@code 'facts'}, which in our case are {@link FactPurchase} objects.
   *
   * @param factCache Cache to populate.
   * @throws IgniteException If failed.
   */
  private static void populateFacts(Cache<Integer, FactPurchase> factCache) throws IgniteException {
    for (int i = 0; i < 100; i++) {
      int id = idGen++;

      DimStore store = rand(dataStore.values());
      DimProduct prod = rand(dataProduct.values());

      factCache.put(id, new FactPurchase(id, prod.getId(), store.getId(), (i + 1)));
    }
  }
Example #4
0
 public void verify() {
   assertEquals(numberOfRequests.size(), requestsNum);
   assertEquals(numberOfJobsMap.keySet().size(), requestsNum);
   for (int num : numberOfJobsMap.values()) {
     assertEquals(num, jobsNumPerRequest);
   }
 }
    /** {@inheritDoc} */
    @Nullable
    @Override
    public Map<String, Collection<?>> run(GridStreamerContext ctx, Collection<Quote> quotes)
        throws GridException {
      GridStreamerWindow win = ctx.window("stage1");

      // Add numbers to window.
      win.enqueueAll(quotes);

      Collection<Quote> polled = win.pollEvictedBatch();

      if (!polled.isEmpty()) {
        Map<String, Bar> map = new HashMap<>();

        for (Quote quote : polled) {
          String symbol = quote.symbol();

          Bar bar = map.get(symbol);

          if (bar == null) map.put(symbol, bar = new Bar(symbol));

          bar.update(quote.price());
        }

        return Collections.<String, Collection<?>>singletonMap(ctx.nextStageName(), map.values());
      }

      return null;
    }
  public static void shutdown() {
    logger_.info("Shutting down ...");
    synchronized (MessagingService.class) {
      /* Stop listening on any socket */
      for (SelectionKey skey : listenSockets_.values()) {
        SelectorManager.getSelectorManager().cancel(skey);
      }
      listenSockets_.clear();

      /* Shutdown the threads in the EventQueue's */
      messageDeserializationExecutor_.shutdownNow();
      messageSerializerExecutor_.shutdownNow();
      messageDeserializerExecutor_.shutdownNow();
      streamExecutor_.shutdownNow();

      /* shut down the cachetables */
      taskCompletionMap_.shutdown();
      callbackMap_.shutdown();

      /* Interrupt the selector manager thread */
      SelectorManager.getSelectorManager().interrupt();

      poolTable_.clear();
      verbHandlers_.clear();
      bShutdown_ = true;
    }
    logger_.debug("Shutdown invocation complete.");
  }
Example #7
0
  public void addDiscoveryListener(final DiscoveryListener listener) {
    // get the listener caught up
    for (final URI service : services.values()) {
      executor.execute(new ServiceAddedTask(listener, service));
    }

    listeners.add(listener);
  }
Example #8
0
 public void addDiscoveryAgent(final DiscoveryAgent agent) {
   agents.add(agent);
   agent.setDiscoveryListener(this);
   for (final URI uri : registered.values()) {
     try {
       agent.registerService(uri);
     } catch (Exception e) {
       // Ignore
     }
   }
 }
Example #9
0
 /** Stop the executors and clean memory on registered CUObject */
 public static void stop() {
   synchronized (cudaEngines) {
     cuCtxSynchronizeAll();
     for (Iterator<CudaEngine> iterator = cudaEngines.values().iterator(); iterator.hasNext(); ) {
       iterator.next().shutdown();
       iterator.remove();
     }
     //		for (CudaEngine ce : cudaEngines.values()) {
     //			ce.shutdown();
     //		}
   }
 }
  @Override
  public void close() throws IOException {
    // close all the clients before throwing anything
    IOException clientException = null;
    for (RangeClient client : clients.values()) {
      try {
        client.close();
      } catch (IOException e) {
        clientException = e;
      }
    }

    if (clientException != null) throw clientException;
  }
 public static ConnectionStatistics[] getPoolStatistics() {
   Set<ConnectionStatistics> stats = new HashSet<ConnectionStatistics>();
   Iterator<TcpConnectionManager> it = poolTable_.values().iterator();
   while (it.hasNext()) {
     TcpConnectionManager cp = it.next();
     ConnectionStatistics cs =
         new ConnectionStatistics(
             cp.getLocalEndPoint(),
             cp.getRemoteEndPoint(),
             cp.getPoolSize(),
             cp.getConnectionsInUse());
     stats.add(cs);
   }
   return stats.toArray(new ConnectionStatistics[0]);
 }
Example #12
0
  /**
   * Starts new grid node.
   *
   * @param gridName name of new node.
   * @param springCfg file with spring configuration to use for this node.
   * @return a grid instance local to new node {@link GridGain#start(GridConfiguration)}.
   * @throws Exception if node run failed.
   */
  protected Grid startNode(String gridName, File springCfg) throws Exception {
    assert springCfg != null;

    ListableBeanFactory springCtx =
        new FileSystemXmlApplicationContext("file:///" + springCfg.getAbsolutePath());

    Map cfgMap = springCtx.getBeansOfType(GridConfiguration.class);

    assert cfgMap != null;
    assert !cfgMap.isEmpty();

    GridConfiguration cfg = (GridConfiguration) cfgMap.values().iterator().next();

    cfg.setGridName(gridName + "-" + getNextNodeNum());

    return G.start(cfg);
  }
Example #13
0
 public static synchronized void cuCtxSynchronizeAll() {
   for (CudaEngine ce : cudaEngines.values()) {
     ce.cuCtxSynchronize();
   }
   // List<Future<Void>> futures = new ArrayList<Future<Void>>();
   // for (CudaEngine ce : executors) {
   // futures.add(ce.exe.submit(new Callable<Void>() {
   // @Override
   // public Void call() throws Exception {
   // JCudaDriver.cuCtxSynchronize();
   // return null;
   // }
   //
   // }));
   // }
   // for (Future<Void> future : futures) {
   // try {
   // future.get();
   // } catch (InterruptedException | ExecutionException e) {
   // // TODO Auto-generated catch block
   // e.printStackTrace();
   // }
   // }
 }
Example #14
0
 public Collection<LoopEx> loops() {
   return lirLoopToEx.values();
 }
 /**
  * Gets a collection of data configurations specified by user.
  *
  * @return Collection of data configurations (possibly empty).
  */
 public Collection<GridClientDataConfiguration> getDataConfigurations() {
   return dataCfgs.values();
 }
  /**
   * Remove particular entry from the trash directory or subdirectory.
   *
   * @param parentId Parent ID.
   * @param id Entry id.
   * @throws IgniteCheckedException If delete failed for some reason.
   */
  private void deleteDirectory(IgniteUuid parentId, IgniteUuid id) throws IgniteCheckedException {
    assert parentId != null;
    assert id != null;

    while (true) {
      IgfsFileInfo info = meta.info(id);

      if (info != null) {
        assert info.isDirectory();

        Map<String, IgfsListingEntry> listing = info.listing();

        if (listing.isEmpty()) return; // Directory is empty.

        Map<String, IgfsListingEntry> delListing;

        if (listing.size() <= MAX_DELETE_BATCH) delListing = listing;
        else {
          delListing = new HashMap<>(MAX_DELETE_BATCH, 1.0f);

          int i = 0;

          for (Map.Entry<String, IgfsListingEntry> entry : listing.entrySet()) {
            delListing.put(entry.getKey(), entry.getValue());

            if (++i == MAX_DELETE_BATCH) break;
          }
        }

        GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<>();

        // Delegate to child folders.
        for (IgfsListingEntry entry : delListing.values()) {
          if (!cancelled) {
            if (entry.isDirectory()) deleteDirectory(id, entry.fileId());
            else {
              IgfsFileInfo fileInfo = meta.info(entry.fileId());

              if (fileInfo != null) {
                assert fileInfo.isFile();

                fut.add(data.delete(fileInfo));
              }
            }
          } else return;
        }

        fut.markInitialized();

        // Wait for data cache to delete values before clearing meta cache.
        try {
          fut.get();
        } catch (IgniteFutureCancelledCheckedException ignore) {
          // This future can be cancelled only due to IGFS shutdown.
          cancelled = true;

          return;
        }

        // Actual delete of folder content.
        Collection<IgniteUuid> delIds = meta.delete(id, delListing);

        if (delListing == listing && delListing.size() == delIds.size())
          break; // All entries were deleted.
      } else break; // Entry was deleted concurrently.
    }
  }
  @Override
  public long generateEnvManifest(long envId, String userId, Map<String, String> platModes) {
    long t1 = System.currentTimeMillis();
    String oldThreadName = Thread.currentThread().getName();
    Thread.currentThread().setName(getProcessingThreadName(oldThreadName, envId));
    List<CmsCIRelation> assemblyRels =
        cmProcessor.getToCIRelations(envId, BASE_REALIZED_IN, null, ACCOUNT_ASSEMBLY);
    CmsCI assembly = getAssembly(envId, assemblyRels);
    CmsCI env = getEnv(envId);
    String nsPath = getManifestNsPath(env);
    // check for openRelease
    check4OpenRelease(env, nsPath);

    Long nsId = trUtil.verifyAndCreateNS(nsPath);
    logger.info("Created nsId " + nsId);

    List<CmsCIRelation> designPlatRels =
        cmProcessor.getFromCIRelations(assembly.getCiId(), null, "ComposedOf", CATALOG_PLATFORM);

    // we need to reset all pending deletions cis just in case there was one added back
    cmProcessor.resetDeletionsByNs(nsPath);

    // check for edge case scenario when there is new design platform with the same name as old one
    // but different pack
    long releaseId = checkPlatformPackCompliance(designPlatRels, env, nsPath, userId);
    if (releaseId > 0) {
      // stop any processing and return new release id
      return releaseId;
    }

    final CountDownLatch latch = new CountDownLatch(designPlatRels.size());
    List<Future<DesignCIManifestRfcTouple>> submittedFutureTasks = new ArrayList<>();

    for (CmsCIRelation platRelation : designPlatRels) {
      String availMode = getPlatformAvailabiltyMode(platModes, platRelation);
      Future<DesignCIManifestRfcTouple> future =
          executorService.submit(
              new ManifestRfcProcessorTask(env, nsPath, userId, availMode, latch, platRelation));
      submittedFutureTasks.add(future);
    }

    boolean allPlatsProcessed;
    try {
      allPlatsProcessed =
          latch.await(
              timeoutInMilliSeconds,
              TimeUnit
                  .MILLISECONDS); // wait for all platform processing threads to finish with timeout
                                  // of 10 mins
      if (!allPlatsProcessed) {
        logger.error(
            "All platforms not processed within timeout duration of " + timeoutInMilliSeconds);
        throw new TransistorException(
            CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE,
            "Failed to pull latest design for all platform within timeout duration of "
                + timeoutInMilliSeconds
                + " millis");
      }
    } catch (InterruptedException ie) {
      for (Future<DesignCIManifestRfcTouple> job : submittedFutureTasks) {
        job.cancel(true);
      }
      throw new TransistorException(
          CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE, "Design pull process interrupted. ");
    }
    Map<Long, CmsRfcCI> design2manifestPlatMap = new HashMap<>();
    for (Future<DesignCIManifestRfcTouple> task : submittedFutureTasks) {
      DesignCIManifestRfcTouple touple;
      try {
        touple = task.get();
        processPlatformRfcs(touple.manifestPlatformRfcs, userId);
        CmsRfcCI manifestPlatformRfc = touple.manifestPlatformRfcs.getManifestPlatformRfc();
        logger.info(
            "Finished working on ="
                + manifestPlatformRfc.getNsPath()
                + " release id = "
                + manifestPlatformRfc.getReleaseId());
        design2manifestPlatMap.put(touple.designPlatCI, manifestPlatformRfc);
      } catch (Exception e) {
        logger.error("Error in pulling latest design for all platforms ", e);
        throw new TransistorException(
            CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE,
            "Error in pulling latest design for all platforms ");
      }
    }

    check4MissingServices(design2manifestPlatMap);

    // now we need to process linkedTo relations
    manifestRfcProcessor.processLinkedTo(design2manifestPlatMap, nsPath, userId);

    // now lets delete old existing plats that do not exists in new manifest
    manifestRfcProcessor.processDeletedPlatforms(
        design2manifestPlatMap.values(), env, nsPath, userId);

    // process global variables from design
    manifestRfcProcessor.processGlobalVars(assembly.getCiId(), env, nsPath, userId);
    long t2 = System.currentTimeMillis();
    long envReleaseId = populateParentRelease(env, nsPath);
    logger.info(
        "Pull design for  "
            + nsPath
            + " completed in  "
            + (t2 - t1)
            + " millis (releaseId "
            + envReleaseId
            + ")");
    return envReleaseId;
  }
Example #18
0
 /** Stop the executors and clean memory on registered CUObject */
 public static synchronized void freeMemory() {
   for (CudaEngine ce : cudaEngines.values()) {
     ce.freeCUObjectsMemory();
   }
 }
  public int reduce(Optimizer optimizer, int expMinCluster, int expMaxCluster)
      throws ExecutionException, InterruptedException {
    // 1.start with min, max provided, what is oom
    int mag = computeMagnitude(expMaxCluster - expMinCluster + 1);
    // 2. setting search range. if oom is 0, set initial range to 1;
    int hop = mag == 0 ? 1 : (int) Math.pow(10.0, mag);
    // 3. find the meaning range, i.e., real min cluster and real max cluster numbers where a real
    // numbered optimisation score
    // can be computed
    int[] range = computeClusterNumberRange(expMinCluster, expMaxCluster, hop, optimizer);
    if (range[0] == -1 && range[1] != -1) {
      System.err.println(
          "[!]No meaningful lower range. Only 1 possible cluster number:" + range[1]);
      return range[1];
    } else if (range[1] == -1 && range[0] != -1) {
      System.err.println(
          "[!]No meaningful upper range. Only 1 possible cluster number:" + range[0]);
      return range[0];
    } else if (range[0] == -1 && range[1] == -1) {
      System.err.println("[!]No meaningful cluster number, cannot cluster");
      return -1;
    }
    System.out.println(
        "[]Input range:"
            + expMinCluster
            + "-"
            + expMaxCluster
            + ", Real range:"
            + range[0]
            + "-"
            + range[1]);
    expMinCluster = range[0] < range[1] ? range[0] : range[1];
    expMaxCluster = range[1] > range[0] ? range[1] : range[0];
    // 4. reset hop based on new range
    mag = computeMagnitude(expMaxCluster - expMinCluster + 1);
    hop = mag == 0 ? 1 : (int) Math.pow(10.0, mag);
    double currentMaxOptimizationScore = 0;
    int current_iteration =
        0; // todo: for location, why min > max; select range based on best interval, is it correct?
    while (current_iteration < maxIteration) {
      current_iteration++;
      currentMaxOptimizationScore = 0;
      // 5. compute optimization scores based on the search space defined by expMinCluster,
      // expMaxCluster, and range
      Map<Integer, Double> triedSplitsAndScores =
          computeOptimizationScores(expMinCluster, expMaxCluster, hop, optimizer);
      if (triedSplitsAndScores
          != null) { // already using minimum hop, but no meaningful optimisation score can be
                     // computed
        // within the range (TODO can this really happen?)
        // what is the real hop, max score?
        List<Integer> intervals = new ArrayList<>(triedSplitsAndScores.keySet());
        Collections.sort(intervals);
        int realHop = -1, lowerInterval = -1;
        for (int i : intervals) {
          if (lowerInterval != -1 && realHop == -1) realHop = Math.abs(i - lowerInterval);
          lowerInterval = i;
          Double score = triedSplitsAndScores.get(i);
          if (!Double.isInfinite(score)
              && !Double.isNaN(score)
              && score != Double.MAX_VALUE
              && score > currentMaxOptimizationScore) currentMaxOptimizationScore = score;
        }

        double global_max = 0.0;
        for (Double d : scores.values()) {
          if (!Double.isInfinite(d) && !Double.isNaN(d) && d > global_max) global_max = d;
        }

        if (stop(realHop, currentMaxOptimizationScore, global_max)) break;
        if (currentMaxOptimizationScore
            > global_max) // found a new max score, reset iterations to try
        current_iteration = 0;

        int newHop = reduceHop(realHop);
        hop = newHop;
      } else break;
    }

    int bestSplit = -1;
    double maxScore = 0;
    for (Map.Entry<Integer, Double> entry : scores.entrySet()) {
      Double score = entry.getValue();
      if (!score.isNaN() && !score.isInfinite() && score > maxScore) {
        maxScore = score;
        bestSplit = entry.getKey();
      }
    }
    System.out.println("[]Final Best=" + bestSplit + ", footprint:" + scores);
    return bestSplit;
  }
 private boolean invalid(Map<Integer, Double> scores) {
   for (Double d : scores.values()) {
     if (!d.isInfinite() && !d.isNaN()) return false;
   }
   return true;
 }
Example #21
0
 public Set<URI> getServices() {
   return new HashSet<URI>(services.values());
 }
  @Override
  public long generateEnvManifest(long envId, String userId, Map<String, String> platModes) {
    long t1 = System.currentTimeMillis();
    String oldThreadName = Thread.currentThread().getName();
    Thread.currentThread().setName(getProcessingThreadName(oldThreadName, envId));
    List<CmsCIRelation> assemblyRels =
        cmProcessor.getToCIRelations(envId, BASE_REALIZED_IN, null, ACCOUNT_ASSEMBLY);
    CmsCI assembly = null;
    if (assemblyRels.size() > 0) {
      assembly = assemblyRels.get(0).getFromCi();
    } else {
      String error = "Can not get assembly for envId = " + envId;
      logger.error(error);
      throw new TransistorException(CmsError.TRANSISTOR_CANNOT_GET_ASSEMBLY, error);
    }

    CmsCI env = getEnv(envId);

    String nsPath = env.getNsPath() + "/" + env.getCiName() + "/manifest";

    if (hasOpenManifestRelease(nsPath)) {
      String message =
          "This environment has an open release. It needs to be discarded or committed before the design pull: "
              + env.getNsPath()
              + "/"
              + env.getCiName();
      logger.info(message);
      throw new TransistorException(CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE, message);
    }
    Long nsId = trUtil.verifyAndCreateNS(nsPath);
    logger.info("Created nsId " + nsId);
    // Long releaseId = createManifestRelease(nsPath,userId);

    List<CmsCIRelation> designPlatRels =
        cmProcessor.getFromCIRelations(assembly.getCiId(), null, "ComposedOf", CATALOG_PLATFORM);

    // we need to reset all pending deletions cis just in case there was one added back
    cmProcessor.resetDeletionsByNs(nsPath);

    // check for edge case scenario when there is new design platform with the same name as old one
    // but different pack
    long releaseId = checkPlatformPackCompliance(designPlatRels, env, nsPath, userId);
    if (releaseId > 0) {
      // stop any processing and return new release id
      return releaseId;
    }

    final CountDownLatch latch = new CountDownLatch(designPlatRels.size());
    List<Future<DesignCIManifestRfcTouple>> submittedFutureTasks =
        new ArrayList<Future<DesignCIManifestRfcTouple>>();

    Map<Long, CmsRfcCI> design2manifestPlatMap = new HashMap<Long, CmsRfcCI>();
    for (CmsCIRelation platRelation : designPlatRels) {
      String availMode = null;
      if (platModes != null) {
        availMode = platModes.get(String.valueOf(platRelation.getToCiId()));
        if (availMode != null && availMode.length() == 0) {
          availMode = "default";
        }
      }

      Future<DesignCIManifestRfcTouple> future =
          executorService.submit(
              new ManifestRfcProcessorTask(env, nsPath, userId, availMode, latch, platRelation));
      submittedFutureTasks.add(future);
    }

    boolean allPlatsProcessed = false;
    try {
      // latch.await(); //wait till all platform processing threads return
      allPlatsProcessed =
          latch.await(
              timeoutInMilliSeconds,
              TimeUnit
                  .MILLISECONDS); // wait for all platform processing threads to finish with timeout
                                  // of 10 mins
      if (!allPlatsProcessed) {
        logger.error(
            "All platforms not processed within timeout duration of " + timeoutInMilliSeconds);
        throw new TransistorException(
            CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE,
            "Failed to pull latest design for all platform within timeout duration of "
                + timeoutInMilliSeconds
                + " millis");
      }
    } catch (InterruptedException ie) {
      for (Future<DesignCIManifestRfcTouple> job : submittedFutureTasks) {
        job.cancel(true);
      }
      throw new TransistorException(
          CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE, "Design pull process interrupted. ");
    }

    for (Future<DesignCIManifestRfcTouple> task : submittedFutureTasks) {

      DesignCIManifestRfcTouple touple;
      try {
        touple = task.get();
        processPlatformRfcs(touple.manifestPlatformRfcs, userId);

        CmsRfcCI manifestPlatformRfc = touple.manifestPlatformRfcs.getManifestPlatformRfc();
        Set<String> missingSrvs = cloudUtil.getMissingServices(manifestPlatformRfc.getCiId());
        if (missingSrvs.size() > 0) {
          logger.info(
              ">>>>> Not all services available for platform: "
                  + manifestPlatformRfc.getCiName()
                  + ", the missing services: "
                  + missingSrvs.toString());
          disablePlatform(manifestPlatformRfc.getCiId(), userId);
        }
        logger.info("New release id = " + manifestPlatformRfc.getReleaseId());
        logger.info("Done working on platform " + manifestPlatformRfc.getNsPath());

        design2manifestPlatMap.put(touple.designPlatCI, manifestPlatformRfc);
      } catch (Exception e) {
        logger.error("Error in pulling latest design for all platforms ", e);
        throw new TransistorException(
            CmsError.TRANSISTOR_OPEN_MANIFEST_RELEASE,
            "Error in pulling latest design for all platforms ");
      }
    }

    // now we need to process linkedTo relations
    manifestRfcProcessor.processLinkedTo(design2manifestPlatMap, nsPath, userId);

    // now lets delete old existing plats that do not exists in new manifest
    manifestRfcProcessor.processDeletedPlatforms(
        design2manifestPlatMap.values(), env, nsPath, userId);

    // process global variables from design
    manifestRfcProcessor.processGlobalVars(assembly.getCiId(), env, nsPath, userId);
    long t2 = System.currentTimeMillis();
    long envReleaseId = populateParentRelease(env, nsPath);
    logger.info(
        "Pull design for  "
            + nsPath
            + " completed in  "
            + (t2 - t1)
            + " millis (releaseId "
            + envReleaseId
            + ")");
    return envReleaseId;
  }
  /**
   * See <a href="http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html">
   * http://e-docs.bea.com/wls/docs100/javadocs/weblogic/common/T3StartupDef.html</a> for more
   * information.
   *
   * @param str Virtual name by which the class is registered as a {@code startupClass} in the
   *     {@code config.xml} file
   * @param params A hashtable that is made up of the name-value pairs supplied from the {@code
   *     startupArgs} property
   * @return Result string (log message).
   * @throws Exception Thrown if error occurred.
   */
  @SuppressWarnings({"unchecked", "CatchGenericClass"})
  @Override
  public String startup(String str, Hashtable params) throws Exception {
    GridLogger log = new GridJavaLogger(LoggingHelper.getServerLogger());

    cfgFile = (String) params.get(cfgFilePathParam);

    if (cfgFile == null) {
      throw new IllegalArgumentException("Failed to read property: " + cfgFilePathParam);
    }

    String workMgrName = (String) params.get(workMgrParam);

    URL cfgUrl = U.resolveGridGainUrl(cfgFile);

    if (cfgUrl == null)
      throw new ServerLifecycleException(
          "Failed to find Spring configuration file (path provided should be "
              + "either absolute, relative to GRIDGAIN_HOME, or relative to META-INF folder): "
              + cfgFile);

    GenericApplicationContext springCtx;

    try {
      springCtx = new GenericApplicationContext();

      XmlBeanDefinitionReader xmlReader = new XmlBeanDefinitionReader(springCtx);

      xmlReader.loadBeanDefinitions(new UrlResource(cfgUrl));

      springCtx.refresh();
    } catch (BeansException e) {
      throw new ServerLifecycleException(
          "Failed to instantiate Spring XML application context: " + e.getMessage(), e);
    }

    Map cfgMap;

    try {
      // Note: Spring is not generics-friendly.
      cfgMap = springCtx.getBeansOfType(GridConfiguration.class);
    } catch (BeansException e) {
      throw new ServerLifecycleException(
          "Failed to instantiate bean [type="
              + GridConfiguration.class
              + ", err="
              + e.getMessage()
              + ']',
          e);
    }

    if (cfgMap == null)
      throw new ServerLifecycleException(
          "Failed to find a single grid factory configuration in: " + cfgUrl);

    if (cfgMap.isEmpty())
      throw new ServerLifecycleException("Can't find grid factory configuration in: " + cfgUrl);

    try {
      ExecutorService execSvc = null;

      MBeanServer mbeanSrv = null;

      for (GridConfiguration cfg : (Collection<GridConfiguration>) cfgMap.values()) {
        assert cfg != null;

        GridConfigurationAdapter adapter = new GridConfigurationAdapter(cfg);

        // Set logger.
        if (cfg.getGridLogger() == null) adapter.setGridLogger(log);

        if (cfg.getExecutorService() == null) {
          if (execSvc == null)
            execSvc =
                workMgrName != null
                    ? new GridThreadWorkManagerExecutor(workMgrName)
                    : new GridThreadWorkManagerExecutor(J2EEWorkManager.getDefault());

          adapter.setExecutorService(execSvc);
        }

        if (cfg.getMBeanServer() == null) {
          if (mbeanSrv == null) {
            InitialContext ctx = null;

            try {
              ctx = new InitialContext();

              mbeanSrv = (MBeanServer) ctx.lookup("java:comp/jmx/runtime");
            } catch (Exception e) {
              throw new IllegalArgumentException(
                  "MBean server was not provided and failed to obtain " + "Weblogic MBean server.",
                  e);
            } finally {
              if (ctx != null) ctx.close();
            }
          }

          adapter.setMBeanServer(mbeanSrv);
        }

        Grid grid = G.start(adapter, springCtx);

        // Test if grid is not null - started properly.
        if (grid != null) gridNames.add(grid.name());
      }

      return getClass().getSimpleName() + " started successfully.";
    } catch (GridException e) {
      // Stop started grids only.
      for (String name : gridNames) G.stop(name, true);

      throw new ServerLifecycleException("Failed to start GridGain.", e);
    }
  }