コード例 #1
0
 @NotNull
 Set<VirtualFile> getAllRoots() {
   LinkedHashSet<VirtualFile> result = ContainerUtil.newLinkedHashSet();
   result.addAll(classAndSourceRoots);
   result.addAll(contentRootOf.keySet());
   result.addAll(excludedFromLibraries.keySet());
   result.addAll(excludedFromModule.keySet());
   result.addAll(excludedFromProject);
   return result;
 }
コード例 #2
0
ファイル: StramClient.java プロジェクト: andyperlitch/Apex
  public static LinkedHashSet<String> findJars(LogicalPlan dag, Class<?>[] defaultClasses) {
    List<Class<?>> jarClasses = new ArrayList<Class<?>>();

    for (String className : dag.getClassNames()) {
      try {
        Class<?> clazz = Thread.currentThread().getContextClassLoader().loadClass(className);
        jarClasses.add(clazz);
      } catch (ClassNotFoundException e) {
        throw new IllegalArgumentException("Failed to load class " + className, e);
      }
    }

    for (Class<?> clazz : Lists.newArrayList(jarClasses)) {
      // process class and super classes (super does not require deploy annotation)
      for (Class<?> c = clazz; c != Object.class && c != null; c = c.getSuperclass()) {
        // LOG.debug("checking " + c);
        jarClasses.add(c);
        jarClasses.addAll(Arrays.asList(c.getInterfaces()));
      }
    }

    jarClasses.addAll(Arrays.asList(defaultClasses));

    if (dag.isDebug()) {
      LOG.debug("Deploy dependencies: {}", jarClasses);
    }

    LinkedHashSet<String> localJarFiles = new LinkedHashSet<String>(); // avoid duplicates
    HashMap<String, String> sourceToJar = new HashMap<String, String>();

    for (Class<?> jarClass : jarClasses) {
      if (jarClass.getProtectionDomain().getCodeSource() == null) {
        // system class
        continue;
      }
      String sourceLocation =
          jarClass.getProtectionDomain().getCodeSource().getLocation().toString();
      String jar = sourceToJar.get(sourceLocation);
      if (jar == null) {
        // don't create jar file from folders multiple times
        jar = JarFinder.getJar(jarClass);
        sourceToJar.put(sourceLocation, jar);
        LOG.debug("added sourceLocation {} as {}", sourceLocation, jar);
      }
      if (jar == null) {
        throw new AssertionError("Cannot resolve jar file for " + jarClass);
      }
      localJarFiles.add(jar);
    }

    String libJarsPath = dag.getValue(LogicalPlan.LIBRARY_JARS);
    if (!StringUtils.isEmpty(libJarsPath)) {
      String[] libJars = StringUtils.splitByWholeSeparator(libJarsPath, LIB_JARS_SEP);
      localJarFiles.addAll(Arrays.asList(libJars));
    }

    LOG.info("Local jar file dependencies: " + localJarFiles);

    return localJarFiles;
  }
コード例 #3
0
ファイル: Main.java プロジェクト: akalinkov/JavaPVT
  public static void main(String[] args) {

    Scanner input = new Scanner(System.in);
    List<String> list = new ArrayList<>();

    System.out.print("Enter text to add to ArrayList: ");
    String str = input.nextLine();
    // adding text to list:
    while (true) {
      if (str.equals("exit")) break;
      list.add(str);
      System.out.print("Enter text to add to ArrayList: ");
      str = input.nextLine();
    }

    // dellete duplicates
    LinkedHashSet<String> set = new LinkedHashSet<>();
    set.addAll(list);
    list.clear();
    list.addAll(set);

    // output of arraylist
    System.out.println("All duplicates are deleted:");
    for (String elem : list) {
      System.out.println(elem);
    }
  }
コード例 #4
0
  public static LinkedHashSet<String> sortMatching(
      final PrefixMatcher matcher, Collection<String> _names) {
    ProgressManager.checkCanceled();

    List<String> sorted = new ArrayList<String>();
    for (String name : _names) {
      if (matcher.prefixMatches(name)) {
        sorted.add(name);
      }
    }

    ProgressManager.checkCanceled();
    Collections.sort(sorted, String.CASE_INSENSITIVE_ORDER);
    ProgressManager.checkCanceled();

    LinkedHashSet<String> result = new LinkedHashSet<String>();
    for (String name : sorted) {
      if (matcher.isStartMatch(name)) {
        result.add(name);
      }
    }

    ProgressManager.checkCanceled();

    result.addAll(sorted);
    return result;
  }
コード例 #5
0
ファイル: SerDeserTest.java プロジェクト: Paytm-Sam/cassandra
  public void udtSerDeserTest(int version) throws Exception {
    ListType<?> lt = ListType.getInstance(Int32Type.instance, true);
    SetType<?> st = SetType.getInstance(UTF8Type.instance, true);
    MapType<?, ?> mt = MapType.getInstance(UTF8Type.instance, LongType.instance, true);

    UserType udt =
        new UserType(
            "ks",
            bb("myType"),
            Arrays.asList(bb("f1"), bb("f2"), bb("f3"), bb("f4")),
            Arrays.asList(LongType.instance, lt, st, mt));

    Map<ColumnIdentifier, Term.Raw> value = new HashMap<>();
    value.put(ci("f1"), lit(42));
    value.put(ci("f2"), new Lists.Literal(Arrays.<Term.Raw>asList(lit(3), lit(1))));
    value.put(ci("f3"), new Sets.Literal(Arrays.<Term.Raw>asList(lit("foo"), lit("bar"))));
    value.put(
        ci("f4"),
        new Maps.Literal(
            Arrays.<Pair<Term.Raw, Term.Raw>>asList(
                Pair.<Term.Raw, Term.Raw>create(lit("foo"), lit(24)),
                Pair.<Term.Raw, Term.Raw>create(lit("bar"), lit(12)))));

    UserTypes.Literal u = new UserTypes.Literal(value);
    Term t = u.prepare("ks", columnSpec("myValue", udt));

    QueryOptions options = QueryOptions.DEFAULT;
    if (version == 2)
      options =
          QueryOptions.fromProtocolV2(ConsistencyLevel.ONE, Collections.<ByteBuffer>emptyList());
    else if (version != 3) throw new AssertionError("Invalid protocol version for test");

    ByteBuffer serialized = t.bindAndGet(options);

    ByteBuffer[] fields = udt.split(serialized);

    assertEquals(4, fields.length);

    assertEquals(bytes(42L), fields[0]);

    // Note that no matter what the protocol version has been used in bindAndGet above, the
    // collections inside
    // a UDT should alway be serialized with version 3 of the protocol. Which is why we don't use
    // 'version'
    // on purpose below.

    assertEquals(
        Arrays.asList(3, 1), lt.getSerializer().deserializeForNativeProtocol(fields[1], 3));

    LinkedHashSet<String> s = new LinkedHashSet<>();
    s.addAll(Arrays.asList("bar", "foo"));
    assertEquals(s, st.getSerializer().deserializeForNativeProtocol(fields[2], 3));

    LinkedHashMap<String, Long> m = new LinkedHashMap<>();
    m.put("bar", 12L);
    m.put("foo", 24L);
    assertEquals(m, mt.getSerializer().deserializeForNativeProtocol(fields[3], 3));
  }
コード例 #6
0
  /**
   * Begins the in-place refactoring operation.
   *
   * @return true if the in-place refactoring was successfully started, false if it failed to start
   *     and a dialog should be shown instead.
   */
  public boolean startInplaceIntroduceTemplate() {
    final boolean replaceAllOccurrences = isReplaceAllOccurrences();
    final Ref<Boolean> result = new Ref<>();
    CommandProcessor.getInstance()
        .executeCommand(
            myProject,
            () -> {
              final String[] names = suggestNames(replaceAllOccurrences, getLocalVariable());
              final V variable = createFieldToStartTemplateOn(replaceAllOccurrences, names);
              boolean started = false;
              if (variable != null) {
                int caretOffset = getCaretOffset();
                myEditor.getCaretModel().moveToOffset(caretOffset);
                myEditor.getScrollingModel().scrollToCaret(ScrollType.MAKE_VISIBLE);

                final LinkedHashSet<String> nameSuggestions = new LinkedHashSet<>();
                nameSuggestions.add(variable.getName());
                nameSuggestions.addAll(Arrays.asList(names));
                initOccurrencesMarkers();
                setElementToRename(variable);
                updateTitle(getVariable());
                started = super.performInplaceRefactoring(nameSuggestions);
                if (started) {
                  onRenameTemplateStarted();
                  myDocumentAdapter =
                      new DocumentAdapter() {
                        @Override
                        public void documentChanged(DocumentEvent e) {
                          if (myPreview == null) return;
                          final TemplateState templateState =
                              TemplateManagerImpl.getTemplateState(myEditor);
                          if (templateState != null) {
                            final TextResult value =
                                templateState.getVariableValue(
                                    InplaceRefactoring.PRIMARY_VARIABLE_NAME);
                            if (value != null) {
                              updateTitle(getVariable(), value.getText());
                            }
                          }
                        }
                      };
                  myEditor.getDocument().addDocumentListener(myDocumentAdapter);
                  updateTitle(getVariable());
                  if (TemplateManagerImpl.getTemplateState(myEditor) != null) {
                    myEditor.putUserData(ACTIVE_INTRODUCE, this);
                  }
                }
              }
              result.set(started);
              if (!started) {
                finish(true);
              }
            },
            getCommandName(),
            getCommandName());
    return result.get();
  }
コード例 #7
0
 @NotNull
 private LinkedHashSet<OrderEntry> getLibraryOrderEntries(
     @NotNull List<VirtualFile> hierarchy,
     @Nullable VirtualFile libraryClassRoot,
     @Nullable VirtualFile librarySourceRoot,
     @NotNull MultiMap<VirtualFile, OrderEntry> libClassRootEntries,
     @NotNull MultiMap<VirtualFile, OrderEntry> libSourceRootEntries) {
   LinkedHashSet<OrderEntry> orderEntries = ContainerUtil.newLinkedHashSet();
   for (VirtualFile root : hierarchy) {
     if (root.equals(libraryClassRoot) && !sourceRootOf.containsKey(root)) {
       orderEntries.addAll(libClassRootEntries.get(root));
     }
     if (root.equals(librarySourceRoot) && libraryClassRoot == null) {
       orderEntries.addAll(libSourceRootEntries.get(root));
     }
     if (libClassRootEntries.containsKey(root)
         || sourceRootOf.containsKey(root) && librarySourceRoot == null) {
       break;
     }
   }
   return orderEntries;
 }
  @NotNull
  private static Set<String> sortMatching(
      @NotNull PrefixMatcher matcher, @NotNull Collection<String> names, @NotNull GoFile file) {
    ProgressManager.checkCanceled();
    String prefix = matcher.getPrefix();
    if (prefix.isEmpty()) return ContainerUtil.newLinkedHashSet(names);

    Set<String> packagesWithAliases = ContainerUtil.newHashSet();
    for (Map.Entry<String, Collection<GoImportSpec>> entry : file.getImportMap().entrySet()) {
      for (GoImportSpec spec : entry.getValue()) {
        String alias = spec.getAlias();
        if (spec.isDot() || alias != null) {
          packagesWithAliases.add(entry.getKey());
          break;
        }
      }
    }

    List<String> sorted = ContainerUtil.newArrayList();
    for (String name : names) {
      if (matcher.prefixMatches(name) || packagesWithAliases.contains(substringBefore(name, '.'))) {
        sorted.add(name);
      }
    }

    ProgressManager.checkCanceled();
    Collections.sort(sorted, String.CASE_INSENSITIVE_ORDER);
    ProgressManager.checkCanceled();

    LinkedHashSet<String> result = new LinkedHashSet<String>();
    for (String name : sorted) {
      if (matcher.isStartMatch(name)) {
        result.add(name);
      }
    }

    ProgressManager.checkCanceled();

    result.addAll(sorted);
    return result;
  }
コード例 #9
0
  private OWLTheory loadTheory(OWLOntology ontology, String ontologyDir, String o1, String o2) {
    OWLTheory theory = getExtendTheory(ontology, false);

    LinkedHashSet<OWLLogicalAxiom> bx = new LinkedHashSet<OWLLogicalAxiom>();
    OWLOntology ontology1 = getOntologySimple(ontologyDir, o1 + ".owl");
    OWLOntology ontology2 = getOntologySimple(ontologyDir, o2 + ".owl");
    Set<OWLLogicalAxiom> onto1Axioms =
        createIntersection(ontology.getLogicalAxioms(), ontology1.getLogicalAxioms());
    Set<OWLLogicalAxiom> onto2Axioms =
        createIntersection(ontology.getLogicalAxioms(), ontology2.getLogicalAxioms());
    int modOnto1Size = onto1Axioms.size();
    int modOnto2Size = onto2Axioms.size();
    int modMappingSize = ontology.getLogicalAxioms().size() - modOnto1Size - modOnto2Size;
    bx.addAll(onto1Axioms);
    bx.addAll(onto2Axioms);
    theory.getKnowledgeBase().addBackgroundFormulas(bx);

    logger.info(
        "ontology axiom sizes: " + modOnto1Size + "," + modOnto2Size + "," + modMappingSize);
    return theory;
  }
コード例 #10
0
ファイル: VCF3Codec.java プロジェクト: JianWang2016/Test
  /**
   * parse the filter string, first checking to see if we already have parsed it in a previous
   * attempt
   *
   * @param filterString the string to parse
   * @return a set of the filters applied
   */
  protected Set<String> parseFilters(String filterString) {

    // null for unfiltered
    if (filterString.equals(VCFConstants.UNFILTERED)) return null;

    // empty set for passes filters
    LinkedHashSet<String> fFields = new LinkedHashSet<String>();

    if (filterString.equals(VCFConstants.PASSES_FILTERS_v3)) return fFields;

    if (filterString.length() == 0)
      generateException("The VCF specification requires a valid filter status");

    // do we have the filter string cached?
    if (filterHash.containsKey(filterString)) return filterHash.get(filterString);

    // otherwise we have to parse and cache the value
    if (filterString.indexOf(VCFConstants.FILTER_CODE_SEPARATOR) == -1) fFields.add(filterString);
    else fFields.addAll(Arrays.asList(filterString.split(VCFConstants.FILTER_CODE_SEPARATOR)));

    filterHash.put(filterString, fFields);

    return fFields;
  }
コード例 #11
0
ファイル: StramClient.java プロジェクト: andyperlitch/Apex
  /**
   * Launch application for the dag represented by this client.
   *
   * @throws YarnException
   * @throws IOException
   */
  public void startApplication() throws YarnException, IOException {
    Class<?>[] defaultClasses;

    if (applicationType.equals(YARN_APPLICATION_TYPE)) {
      // TODO restrict the security check to only check if security is enabled for webservices.
      if (UserGroupInformation.isSecurityEnabled()) {
        defaultClasses = DATATORRENT_SECURITY_CLASSES;
      } else {
        defaultClasses = DATATORRENT_CLASSES;
      }
    } else {
      throw new IllegalStateException(applicationType + " is not a valid application type.");
    }

    LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses);

    if (resources != null) {
      localJarFiles.addAll(resources);
    }

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info(
        "Got Cluster metric info from ASM"
            + ", numNodeManagers="
            + clusterMetrics.getNumNodeManagers());

    // GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class);
    // GetClusterNodesResponse clusterNodesResp =
    // rmClient.clientRM.getClusterNodes(clusterNodesReq);
    // LOG.info("Got Cluster node info from ASM");
    // for (NodeReport node : clusterNodesResp.getNodeReports()) {
    //  LOG.info("Got node report from ASM for"
    //           + ", nodeId=" + node.getNodeId()
    //           + ", nodeAddress" + node.getHttpAddress()
    //           + ", nodeRackName" + node.getRackName()
    //           + ", nodeNumContainers" + node.getNumContainers()
    //           + ", nodeHealthStatus" + node.getHealthReport());
    // }
    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
      for (QueueACL userAcl : aclInfo.getUserAcls()) {
        LOG.info(
            "User ACL Info for Queue"
                + ", queueName="
                + aclInfo.getQueueName()
                + ", userAcl="
                + userAcl.name());
      }
    }

    // Get a new application id
    YarnClientApplication newApp = yarnClient.createApplication();
    appId = newApp.getNewApplicationResponse().getApplicationId();

    // Dump out information about cluster capability as seen by the resource manager
    int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);
    int amMemory = dag.getMasterMemoryMB();
    if (amMemory > maxMem) {
      LOG.info(
          "AM memory specified above max threshold of cluster. Using max value."
              + ", specified="
              + amMemory
              + ", max="
              + maxMem);
      amMemory = maxMem;
    }

    if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) {
      dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString());
    }

    // Create launch context for app master
    LOG.info("Setting up application submission context for ASM");
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);

    // set the application id
    appContext.setApplicationId(appId);
    // set the application name
    appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME));
    appContext.setApplicationType(this.applicationType);
    if (YARN_APPLICATION_TYPE.equals(this.applicationType)) {
      // appContext.setMaxAppAttempts(1); // no retries until Stram is HA
    }

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Setup security tokens
    // If security is enabled get ResourceManager and NameNode delegation tokens.
    // Set these tokens on the container so that they are sent as part of application submission.
    // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken
    // is also used by ResourceManager to fetch the jars from HDFS and set them up for the
    // application master launch.
    if (UserGroupInformation.isSecurityEnabled()) {
      Credentials credentials = new Credentials();
      String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
      if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
      }

      // For now, only getting tokens for the default file-system.
      FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
      try {
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
          for (Token<?> token : tokens) {
            LOG.info("Got dt for " + fs.getUri() + "; " + token);
          }
        }
      } finally {
        fs.close();
      }

      addRMDelegationToken(tokenRenewer, credentials);

      DataOutputBuffer dob = new DataOutputBuffer();
      credentials.writeTokenStorageToStream(dob);
      ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
      amContainer.setTokens(fsTokens);
    }

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    // copy required jar files to dfs, to be localized for containers
    FileSystem fs = StramClientUtils.newFileSystemInstance(conf);
    try {
      Path appsBasePath =
          new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS);
      Path appPath = new Path(appsBasePath, appId.toString());

      String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {}));

      LOG.info("libjars: {}", libJarsCsv);
      dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv);
      LaunchContainerRunnable.addFilesToLocalResources(
          LocalResourceType.FILE, libJarsCsv, localResources, fs);

      if (archives != null) {
        String[] localFiles = archives.split(",");
        String archivesCsv = copyFromLocal(fs, appPath, localFiles);
        LOG.info("archives: {}", archivesCsv);
        dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv);
        LaunchContainerRunnable.addFilesToLocalResources(
            LocalResourceType.ARCHIVE, archivesCsv, localResources, fs);
      }

      if (files != null) {
        String[] localFiles = files.split(",");
        String filesCsv = copyFromLocal(fs, appPath, localFiles);
        LOG.info("files: {}", filesCsv);
        dag.getAttributes().put(LogicalPlan.FILES, filesCsv);
        LaunchContainerRunnable.addFilesToLocalResources(
            LocalResourceType.FILE, filesCsv, localResources, fs);
      }

      dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString());
      if (dag.getAttributes().get(OperatorContext.STORAGE_AGENT) == null) {
          /* which would be the most likely case */
        Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS);
        // use conf client side to pickup any proxy settings from dt-site.xml
        dag.setAttribute(
            OperatorContext.STORAGE_AGENT, new FSStorageAgent(checkpointPath.toString(), conf));
      }
      if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) {
        dag.setAttribute(
            LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator());
      }

      // Set the log4j properties if needed
      if (!log4jPropFile.isEmpty()) {
        Path log4jSrc = new Path(log4jPropFile);
        Path log4jDst = new Path(appPath, "log4j.props");
        fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
        FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
        LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
        log4jRsrc.setType(LocalResourceType.FILE);
        log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
        log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
        log4jRsrc.setSize(log4jFileStatus.getLen());
        localResources.put("log4j.properties", log4jRsrc);
      }

      if (originalAppId != null) {
        Path origAppPath = new Path(appsBasePath, this.originalAppId);
        LOG.info("Restart from {}", origAppPath);
        copyInitialState(origAppPath);
      }

      // push logical plan to DFS location
      Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME);
      FSDataOutputStream outStream = fs.create(cfgDst, true);
      LogicalPlan.write(this.dag, outStream);
      outStream.close();

      Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME);
      outStream = fs.create(launchConfigDst, true);
      conf.writeXml(outStream);
      outStream.close();

      FileStatus topologyFileStatus = fs.getFileStatus(cfgDst);
      LocalResource topologyRsrc = Records.newRecord(LocalResource.class);
      topologyRsrc.setType(LocalResourceType.FILE);
      topologyRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
      topologyRsrc.setResource(ConverterUtils.getYarnUrlFromURI(cfgDst.toUri()));
      topologyRsrc.setTimestamp(topologyFileStatus.getModificationTime());
      topologyRsrc.setSize(topologyFileStatus.getLen());
      localResources.put(LogicalPlan.SER_FILE_NAME, topologyRsrc);

      // Set local resource info into app master container launch context
      amContainer.setLocalResources(localResources);

      // Set the necessary security tokens as needed
      // amContainer.setContainerTokens(containerToken);
      // Set the env variables to be setup in the env where the application master will be run
      LOG.info("Set the environment for the application master");
      Map<String, String> env = new HashMap<String, String>();

      // Add application jar(s) location to classpath
      // At some point we should not be required to add
      // the hadoop specific classpaths to the env.
      // It should be provided out of the box.
      // For now setting all required classpaths including
      // the classpath to "." for the application jar(s)
      // including ${CLASSPATH} will duplicate the class path in app master, removing it for now
      // StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*");
      StringBuilder classPathEnv = new StringBuilder("./*");
      String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
      for (String c :
          StringUtils.isBlank(classpath)
              ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH
              : classpath.split(",")) {
        if (c.equals("$HADOOP_CLIENT_CONF_DIR")) {
          // SPOI-2501
          continue;
        }
        classPathEnv.append(':');
        classPathEnv.append(c.trim());
      }
      env.put("CLASSPATH", classPathEnv.toString());
      // propagate to replace node managers user name (effective in non-secure mode)
      env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName());

      amContainer.setEnvironment(env);

      // Set the necessary command to execute the application master
      ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30);

      // Set java executable command
      LOG.info("Setting up app master command");
      vargs.add(javaCmd);
      if (dag.isDebug()) {
        vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n");
      }
      // Set Xmx based on am memory size
      // default heap size 75% of total memory
      vargs.add("-Xmx" + (amMemory * 3 / 4) + "m");
      vargs.add("-XX:+HeapDumpOnOutOfMemoryError");
      vargs.add("-XX:HeapDumpPath=/tmp/dt-heap-" + appId.getId() + ".bin");
      vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA");
      vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
      vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath()));
      if (dag.isDebug()) {
        vargs.add("-Dlog4j.debug=true");
      }

      String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL);
      if (loggersLevel != null) {
        vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel));
      }
      vargs.add(StreamingAppMaster.class.getName());
      vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
      vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

      // Get final command
      StringBuilder command = new StringBuilder(9 * vargs.size());
      for (CharSequence str : vargs) {
        command.append(str).append(" ");
      }

      LOG.info("Completed setting up app master command " + command.toString());
      List<String> commands = new ArrayList<String>();
      commands.add(command.toString());
      amContainer.setCommands(commands);

      // Set up resource type requirements
      // For now, only memory is supported so we set memory requirements
      Resource capability = Records.newRecord(Resource.class);
      capability.setMemory(amMemory);
      appContext.setResource(capability);

      // Service data is a binary blob that can be passed to the application
      // Not needed in this scenario
      // amContainer.setServiceData(serviceData);
      appContext.setAMContainerSpec(amContainer);

      // Set the priority for the application master
      Priority pri = Records.newRecord(Priority.class);
      pri.setPriority(amPriority);
      appContext.setPriority(pri);
      // Set the queue to which this application is to be submitted in the RM
      appContext.setQueue(queueName);

      // Submit the application to the applications manager
      // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest);
      // Ignore the response as either a valid response object is returned on success
      // or an exception thrown to denote some form of a failure
      String specStr =
          Objects.toStringHelper("Submitting application: ")
              .add("name", appContext.getApplicationName())
              .add("queue", appContext.getQueue())
              .add("user", UserGroupInformation.getLoginUser())
              .add("resource", appContext.getResource())
              .toString();
      LOG.info(specStr);
      if (dag.isDebug()) {
        // LOG.info("Full submission context: " + appContext);
      }
      yarnClient.submitApplication(appContext);
    } finally {
      fs.close();
    }
  }
コード例 #12
0
 public Set<String> getAllKeys() {
   LinkedHashSet<String> result = new LinkedHashSet<>();
   result.addAll(keys);
   result.addAll(predicate.getDimensions());
   return result;
 }
コード例 #13
0
 private void addFrozenItems(List<LookupElement> items, LinkedHashSet<LookupElement> model) {
   myFrozenItems.retainAll(items);
   model.addAll(myFrozenItems);
 }