/**
  * Since the segmentations are disjoint, the total set of differentially expressed segments is the
  * union of the sets of differentially expressed segments in each segmentation.
  *
  * @param c1
  * @param c2
  * @return
  */
 public SortedSet<DifferentialKey> differentialRegions(int c1, int c2) {
   SortedSet<DifferentialKey> keys = new TreeSet<DifferentialKey>();
   for (InputSegmentation iseg : segmentations) {
     keys.addAll(differentialRegions(iseg, c1, c2));
   }
   return keys;
 }
Exemple #2
0
  public static void main(String[] args) throws Exception {
    // TODO Auto-generated method stub
    Date startTime = new Date();
    String fname = "p079_keylog.txt";
    FileReader fr = new FileReader(fname);
    BufferedReader br = new BufferedReader(fr);
    SortedSet<String> slines = new TreeSet<String>();
    String line = "";

    while ((line = br.readLine()) != null) {
      slines.add(line);
    }

    String start = slines.first().split("")[0];

    String ans = "";

    for (String l : slines) {
      // System.out.println(l);
      for (String l2 : slines) {
        if (l2.contains(start) && l2.indexOf(start) > 0) start = l2.split("")[0];
      }
    }
    ans += start;
    // System.out.println(ans);
    for (int i = 0; i < 10; i++) {
      start = (getNext(slines, start));
      if (start == null) break;
      ans += start;
      // System.out.println(ans);
    }
    Date endTime = new Date();
    System.out.println(ans + " in " + (endTime.getTime() - startTime.getTime()) + " ms.");
  }
  /**
   * Converts the form field values in the <tt>ffValuesIter</tt> into a caps string.
   *
   * @param ffValuesIter the {@link Iterator} containing the form field values.
   * @param capsBldr a <tt>StringBuilder</tt> to which the caps string representing the form field
   *     values is to be appended
   */
  private static void formFieldValuesToCaps(Iterator<String> ffValuesIter, StringBuilder capsBldr) {
    SortedSet<String> fvs = new TreeSet<String>();

    while (ffValuesIter.hasNext()) fvs.add(ffValuesIter.next());

    for (String fv : fvs) capsBldr.append(fv).append('<');
  }
 /**
  * Gets the unique read groups in the table
  *
  * @param report the GATKReport containing the table with RecalUtils.READGROUP_REPORT_TABLE_TITLE
  * @return the unique read groups
  */
 private static SortedSet<String> getReadGroups(final GATKReport report) {
   final GATKReportTable reportTable = report.getTable(RecalUtils.READGROUP_REPORT_TABLE_TITLE);
   final SortedSet<String> readGroups = new TreeSet<String>();
   for (int i = 0; i < reportTable.getNumRows(); i++)
     readGroups.add(reportTable.get(i, RecalUtils.READGROUP_COLUMN_NAME).toString());
   return readGroups;
 }
Exemple #5
0
  /**
   * Returns all supported sample rates.
   *
   * @return an array of sample rates, in Hertz, never <code>null</code>.
   */
  public Integer[] getSampleRates() {
    final String rawValue = this.properties.get(DEVICE_SAMPLERATES);
    final String[] values = rawValue.split(",\\s*");
    final SortedSet<Integer> result =
        new TreeSet<Integer>(
            NumberUtils.<Integer>createNumberComparator(false /* aSortAscending */));
    for (String value : values) {
      result.add(Integer.valueOf(value.trim()));
    }

    return result.toArray(new Integer[result.size()]);
  }
Exemple #6
0
 public static File[] getDirectoryListing(File dir) {
   SortedSet<File> dirSet = new TreeSet<File>();
   SortedSet<File> fileSet = new TreeSet<File>();
   File[] files = dir.listFiles();
   for (int i = 0; i < files.length; i++) {
     if (files[i].isDirectory()) dirSet.add(files[i]);
     else fileSet.add(files[i]);
   }
   List<File> fileList = new LinkedList<File>();
   fileList.addAll(dirSet);
   fileList.addAll(fileSet);
   return fileList.toArray(new File[] {});
 }
Exemple #7
0
  /**
   * Prints one Unicode property value per line, along with its aliases, if any, for the given
   * unicodeVersion.
   *
   * @param unicodeVersion The Unicode version to print property values and aliases for
   * @throws UnicodeProperties.UnsupportedUnicodeVersionException if unicodeVersion is not supported
   */
  private static void printUnicodePropertyValuesAndAliases(String unicodeVersion)
      throws UnicodeProperties.UnsupportedUnicodeVersionException {
    Pattern versionPattern = Pattern.compile("(\\d+)(?:\\.(\\d+))?(?:\\.\\d+)?");
    Matcher matcher = versionPattern.matcher(unicodeVersion);
    if (!matcher.matches()) {
      throw new UnicodeProperties.UnsupportedUnicodeVersionException();
    }
    String underscoreVersion =
        matcher.group(1) + (null == matcher.group(2) ? "_0" : "_" + matcher.group(2));

    String[] propertyValues;
    String[] propertyValueAliases;
    try {
      Class<?> clazz = Class.forName("jflex.unicode.data.Unicode_" + underscoreVersion);
      Field field = clazz.getField("propertyValues");
      propertyValues = (String[]) field.get(null);
      field = clazz.getField("propertyValueAliases");
      propertyValueAliases = (String[]) field.get(null);
    } catch (Exception e) {
      throw new UnicodeProperties.UnsupportedUnicodeVersionException();
    }
    SortedMap<String, SortedSet<String>> propertyValuesToAliases =
        new TreeMap<String, SortedSet<String>>();
    for (String value : propertyValues) {
      propertyValuesToAliases.put(value, new TreeSet<String>());
    }
    for (int i = 0; i < propertyValueAliases.length; i += 2) {
      String alias = propertyValueAliases[i];
      String value = propertyValueAliases[i + 1];
      SortedSet<String> aliases = propertyValuesToAliases.get(value);
      if (null == aliases) {
        aliases = new TreeSet<String>();
        propertyValuesToAliases.put(value, aliases);
      }
      aliases.add(alias);
    }
    for (Map.Entry<String, SortedSet<String>> entry : propertyValuesToAliases.entrySet()) {
      String value = entry.getKey();
      SortedSet<String> aliases = entry.getValue();
      Out.print(value);
      if (aliases.size() > 0) {
        for (String alias : aliases) {
          Out.print(", " + alias);
        }
      }
      Out.println("");
    }
  }
 void clear() {
   pending_entries.clear();
   output_set.clear();
   object_tasks.clear();
   active_threads.clear();
   time_marks.clear();
   thread_entries = null;
   next_time = 0;
   end_time = 0;
   current_thread = null;
   thread_map.clear();
   cpu_time = null;
   thread_counter = 0;
   task_counter = 0;
   max_delta = 1;
 }
Exemple #9
0
  public static String getNext(SortedSet<String> slines, String num) {
    String nxt = "";
    SortedSet<String> candidates = new TreeSet<String>();
    for (String s : slines) {
      if (s.contains(num) && s.indexOf(num) < s.length() - 1) {
        candidates.add(s.split("")[s.indexOf(num) + 1]);
      }
    }
    // System.out.println(candidates);
    if (candidates.size() == 0) return null;
    nxt = candidates.first();
    for (String s : slines) {
      for (String c : candidates) {
        if (s.contains(c) && s.contains(nxt) && s.indexOf(c) < s.indexOf(nxt)) nxt = c;
      }
    }

    return nxt;
  }
  /** Create a new set of shared job state counters for all jobs in the given group. */
  public void initCounters(TaskTimer timer, QueueJobGroup group) {
    long groupID = group.getGroupID();

    if (LogMgr.getInstance().isLoggable(LogMgr.Kind.Ops, LogMgr.Level.Finest))
      LogMgr.getInstance()
          .log(LogMgr.Kind.Ops, LogMgr.Level.Finest, "Init Job Counts for Group (" + groupID + ")");

    SortedSet<Long> jobIDs = group.getJobIDs();
    Counters counters = new Counters(jobIDs.size());

    timer.acquire();
    synchronized (pCountersByGroup) {
      timer.resume();

      if (pCountersByGroup.put(groupID, counters) != null)
        LogMgr.getInstance()
            .logAndFlush(
                LogMgr.Kind.Ops,
                LogMgr.Level.Warning,
                "Somehow the job group ("
                    + groupID
                    + ") was already in the state "
                    + "counts table!");
    }

    timer.acquire();
    synchronized (pCountersByJob) {
      timer.resume();

      for (Long jobID : jobIDs) {
        if (pCountersByJob.put(jobID, counters) != null)
          LogMgr.getInstance()
              .logAndFlush(
                  LogMgr.Kind.Ops,
                  LogMgr.Level.Warning,
                  "Somehow the job (" + jobID + ") was already in the state counts table!");
      }
    }
  }
  public RecalibrationReport(final File recalFile, final SortedSet<String> allReadGroups) {
    final GATKReport report = new GATKReport(recalFile);

    argumentTable = report.getTable(RecalUtils.ARGUMENT_REPORT_TABLE_TITLE);
    RAC = initializeArgumentCollectionTable(argumentTable);

    GATKReportTable quantizedTable = report.getTable(RecalUtils.QUANTIZED_REPORT_TABLE_TITLE);
    quantizationInfo = initializeQuantizationTable(quantizedTable);

    Pair<ArrayList<Covariate>, ArrayList<Covariate>> covariates =
        RecalUtils.initializeCovariates(RAC); // initialize the required and optional covariates
    ArrayList<Covariate> requiredCovariates = covariates.getFirst();
    ArrayList<Covariate> optionalCovariates = covariates.getSecond();
    requestedCovariates = new Covariate[requiredCovariates.size() + optionalCovariates.size()];
    optionalCovariateIndexes = new HashMap<String, Integer>(optionalCovariates.size());
    int covariateIndex = 0;
    for (final Covariate covariate : requiredCovariates)
      requestedCovariates[covariateIndex++] = covariate;
    for (final Covariate covariate : optionalCovariates) {
      requestedCovariates[covariateIndex] = covariate;
      final String covariateName =
          covariate
              .getClass()
              .getSimpleName()
              .split("Covariate")[
              0]; // get the name of the covariate (without the "covariate" part of it) so we can
                  // match with the GATKReport
      optionalCovariateIndexes.put(covariateName, covariateIndex - 2);
      covariateIndex++;
    }

    for (Covariate cov : requestedCovariates)
      cov.initialize(
          RAC); // initialize any covariate member variables using the shared argument collection

    recalibrationTables = new RecalibrationTables(requestedCovariates, allReadGroups.size());

    initializeReadGroupCovariates(allReadGroups);

    parseReadGroupTable(
        report.getTable(RecalUtils.READGROUP_REPORT_TABLE_TITLE),
        recalibrationTables.getReadGroupTable());

    parseQualityScoreTable(
        report.getTable(RecalUtils.QUALITY_SCORE_REPORT_TABLE_TITLE),
        recalibrationTables.getQualityScoreTable());

    parseAllCovariatesTable(
        report.getTable(RecalUtils.ALL_COVARIATES_REPORT_TABLE_TITLE), recalibrationTables);
  }
  private BdynRangeSet addToRange(long start, long t0, long t1, BdynRangeSet rslt) {
    OutputEntry timee = new OutputEntry(start);
    SortedSet<OutputEntry> ss = output_set.tailSet(timee);
    for (OutputEntry e1 : ss) {
      if (e1.getStartTime() > t1) break;
      if (e1.getEndTime(t1) >= t0) {
        ThreadData td = e1.getThread();
        if (rslt == null) rslt = new BdynRangeSet();
        Set<BdynEntry> r1 = rslt.get(td);
        if (r1 == null) {
          r1 = new HashSet<BdynEntry>();
          rslt.put(td, r1);
        }
        r1.add(e1);
      }
    }

    return rslt;
  }
  /** disassembles program held in code store */
  void disassembleProgram(String asmFileName) {

    try {
      asmOut = new FileWriter(asmFileName);
    } catch (IOException e) {
      System.out.println("Disassembler: can not create asm output file " + asmName);
      error = true;
      return;
    }

    // collect all addresses that may be the target of a jump instruction
    SortedSet<Integer> targets = new TreeSet<Integer>();
    for (int addr = Machine.CB; addr < Machine.CT; addr++) {
      Instruction inst = Machine.code[addr];
      Machine.Op op = Machine.intToOp[inst.op];
      switch (op) {
        case CALL:
          // only consider calls (branches) within code memory (i.e. not primitives)
          if (inst.r == Machine.Reg.CB.ordinal()) targets.add(inst.d);
          break;
        case JUMP:
          // address following an unconditional branch is an implicit target
          targets.add(addr + 1);
          /* FALL THROUGH! */
        case JUMPIF:
          // a jump of any sort creates a branch target
          targets.add(inst.d);
      }
    }

    // map branch target addresses to unique labels
    addrToLabel = new HashMap<Integer, String>();
    int labelCounter = 10;
    for (Integer addr : targets) {
      String label = "L" + labelCounter++;
      addrToLabel.put(addr, label);
    }

    // disassemble each instruction
    for (int addr = Machine.CB; addr < Machine.CT; addr++) {

      // generate instruction address
      asmWrite(String.format("%3d  ", addr));

      // if this addr is a branch target, output label
      if (addrToLabel.containsKey(addr))
        asmWrite(String.format("%-7s", addrToLabel.get(addr) + ":"));
      else asmWrite("       ");

      // instruction
      writeInstruction(Machine.code[addr]);

      // newline
      asmWrite("\n");
    }

    // close output file
    try {
      asmOut.close();
    } catch (IOException e) {
      error = true;
    }
  }
 Iterator<Long> getTimeMarkIterator() {
   return time_marks.iterator();
 }
 void addTimeMark(long when) {
   time_marks.add(when);
 }
 int getActiveThreadCount() {
   return active_threads.size();
 }
 long getStartTime() {
   if (output_set.isEmpty()) return 0;
   return output_set.first().getStartTime();
 }
Exemple #18
0
 public static QueryFilter namesQueryFilter(
     ColumnFamilyStore cfs, DecoratedKey key, CellName... names) {
   SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
   for (CellName n : names) s.add(n);
   return QueryFilter.getNamesFilter(key, cfs.name, s, System.currentTimeMillis());
 }
Exemple #19
0
 public static NamesQueryFilter namesFilter(ColumnFamilyStore cfs, String... names) {
   SortedSet<CellName> s = new TreeSet<CellName>(cfs.getComparator());
   for (String str : names) s.add(cellname(str));
   return new NamesQueryFilter(s);
 }
  /**
   * Calculates the <tt>String</tt> for a specific <tt>DiscoverInfo</tt> which is to be hashed in
   * order to compute the ver string for that <tt>DiscoverInfo</tt>.
   *
   * @param discoverInfo the <tt>DiscoverInfo</tt> for which the <tt>String</tt> to be hashed in
   *     order to compute its ver string is to be calculated
   * @return the <tt>String</tt> for <tt>discoverInfo</tt> which is to be hashed in order to compute
   *     its ver string
   */
  private static String calculateEntityCapsString(DiscoverInfo discoverInfo) {
    StringBuilder bldr = new StringBuilder();

    // Add identities
    {
      Iterator<DiscoverInfo.Identity> identities = discoverInfo.getIdentities();
      SortedSet<DiscoverInfo.Identity> is =
          new TreeSet<DiscoverInfo.Identity>(
              new Comparator<DiscoverInfo.Identity>() {
                public int compare(DiscoverInfo.Identity i1, DiscoverInfo.Identity i2) {
                  int category = i1.getCategory().compareTo(i2.getCategory());

                  if (category != 0) return category;

                  int type = i1.getType().compareTo(i2.getType());

                  if (type != 0) return type;

                  /*
                   * TODO Sort by xml:lang.
                   *
                   * Since sort by xml:lang is currently missing,
                   * use the last supported sort criterion i.e.
                   * type.
                   */
                  return type;
                }
              });

      if (identities != null) while (identities.hasNext()) is.add(identities.next());

      for (DiscoverInfo.Identity i : is) {
        bldr.append(i.getCategory())
            .append('/')
            .append(i.getType())
            .append("//")
            .append(i.getName())
            .append('<');
      }
    }

    // Add features
    {
      Iterator<DiscoverInfo.Feature> features = getDiscoverInfoFeatures(discoverInfo);
      SortedSet<String> fs = new TreeSet<String>();

      if (features != null) while (features.hasNext()) fs.add(features.next().getVar());

      for (String f : fs) bldr.append(f).append('<');
    }

    DataForm extendedInfo = (DataForm) discoverInfo.getExtension("x", "jabber:x:data");

    if (extendedInfo != null) {
      synchronized (extendedInfo) {
        SortedSet<FormField> fs =
            new TreeSet<FormField>(
                new Comparator<FormField>() {
                  public int compare(FormField f1, FormField f2) {
                    return f1.getVariable().compareTo(f2.getVariable());
                  }
                });

        FormField formType = null;

        for (Iterator<FormField> fieldsIter = extendedInfo.getFields(); fieldsIter.hasNext(); ) {
          FormField f = fieldsIter.next();
          if (!f.getVariable().equals("FORM_TYPE")) fs.add(f);
          else formType = f;
        }

        // Add FORM_TYPE values
        if (formType != null) formFieldValuesToCaps(formType.getValues(), bldr);

        // Add the other values
        for (FormField f : fs) {
          bldr.append(f.getVariable()).append('<');
          formFieldValuesToCaps(f.getValues(), bldr);
        }
      }
    }

    return bldr.toString();
  }
 public static ImmutableSortedSet copyOfSorted(SortedSet sortedset) {
   Comparator comparator1 = sortedset.comparator();
   if (comparator1 == null) comparator1 = NATURAL_ORDER;
   return copyOfInternal(comparator1, sortedset);
 }
  /**
   * Translates a StatValue into a PerfStatValue with the specified filter, with the specified
   * operations, and the specified trim. For the latter, if start is -1, uses the beginning of the
   * statistic's existence, and if end is -1, goes to the end of the statistic's existence.
   */
  protected static PerfStatValue getPerfStatValue(
      StatSpec statspec, TrimSpec trimspec, StatArchiveReader.StatValue sv) {
    long start = trimspec.getStart();
    long end = trimspec.getEnd();
    sv = sv.createTrimmed(start, end);
    if (Log.getLogWriter().finestEnabled()) {
      Log.getLogWriter()
          .finest(
              "PerfStatReader: Trimmed from "
                  + trimspec.getStartStr()
                  + " ("
                  + start
                  + ") to "
                  + trimspec.getEndStr()
                  + " ("
                  + end
                  + ")");
    }

    int filter = statspec.getFilter();
    double mean = -1;
    if (filter == StatArchiveReader.StatValue.FILTER_PERSEC && statspec.getMean()) {
      if (start == -1) {
        start = sv.getRawAbsoluteTimeStamps()[0];
      }
      if (end == -1) {
        long[] rats = sv.getRawAbsoluteTimeStamps();
        end = rats[rats.length - 1];
      }
      long elapsedSec = (end - start) / 1000;
      sv.setFilter(StatArchiveReader.StatValue.FILTER_NONE);
      double del = sv.getSnapshotsMaximum() - sv.getSnapshotsMinimum();
      mean = del / elapsedSec;
    }
    sv.setFilter(filter);

    // @todo lises see if psv really needs to hang onto specs
    PerfStatValue psv = new PerfStatValue(statspec, trimspec);
    psv.setIsLargerBetter(sv.getDescriptor().isLargerBetter());
    psv.setSamples(sv.getSnapshotsSize());

    if (statspec.getMin()) psv.setMin(sv.getSnapshotsMinimum());
    if (statspec.getMax()) psv.setMax(sv.getSnapshotsMaximum());
    if (statspec.getMaxMinusMin())
      psv.setMaxMinusMin(sv.getSnapshotsMaximum() - sv.getSnapshotsMinimum());
    if (statspec.getMean()) {
      if (filter == StatArchiveReader.StatValue.FILTER_PERSEC) {
        psv.setMean(mean);
      } else {
        psv.setMean(sv.getSnapshotsAverage());
      }
    }
    if (statspec.getStddev()) psv.setStddev(sv.getSnapshotsStandardDeviation());

    SortedSet archives = new TreeSet();
    StatArchiveReader.ResourceInst[] resources = sv.getResources();
    String productVersion = null;
    for (int i = 0; i < resources.length; i++) {
      String archive = resources[i].getArchive().getFile().getParentFile().getName();
      if (productVersion == null) {
        productVersion = resources[i].getArchive().getArchiveInfo().getProductVersion();
      }
      if (!archives.contains(archive)) {
        archives.add(archive);
      }
    }
    psv.setArchives(archives);
    psv.setProductVersion(productVersion);

    return psv;
  }
  /**
   * Collects the differentially expressed segments (segment pairs with the same start and end, but
   * one member of the pair has the LINE type while the other does not) from a given segmentation.
   *
   * @param seg
   * @param c1
   * @param c2
   * @return
   */
  private Collection<DifferentialKey> differentialRegions(InputSegmentation seg, int c1, int c2) {
    ArrayList<DifferentialKey> keys = new ArrayList<DifferentialKey>();

    // Get the ground-truth data, so we can assemble the region identifiers when
    // we've found the differentially expressed segments.
    InputData data = seg.input;
    Integer[] locations = data.locations();
    String strand = data.strand();
    String chrom = data.chrom();

    // Most of the setup of this method is filtering and sorting the Segment objects,
    // so that we *only* consider the segments from the two given channels (c1, and c2)
    // and so that we consider them in order, so that we're not comparing segments
    // different parts of the input chunk.
    SortedSet<Segment> c1Segs = new TreeSet<Segment>();
    SortedSet<Segment> c2Segs = new TreeSet<Segment>();

    for (Segment s : seg.segments) {
      if (s.channel == c1) {
        c1Segs.add(s);
      } else if (s.channel == c2) {
        c2Segs.add(s);
      }
    }

    // These should now be in sorted order, given that we built them using the
    // SortedSet objects.
    Segment[] c1array = c1Segs.toArray(new Segment[0]);
    Segment[] c2array = c2Segs.toArray(new Segment[0]);

    // sanity check.
    if (c1array.length != c2array.length) {
      throw new IllegalArgumentException();
    }

    for (int i = 0; i < c1array.length; i++) {
      Segment s1 = c1array[i], s2 = c2array[i];

      // sanity check.
      if (!s1.start.equals(s2.start) || !s1.end.equals(s2.end)) {
        throw new IllegalArgumentException(
            String.format("%d,%d doesn't match %d,%d", s1.start, s1.end, s2.start, s2.end));
      }

      // There are three conditions here:
      // (1) Neither of the segments is 'shared'
      // (2) The segments don't have the same type
      // (3) At least one of them is a line.
      if (!s1.shared && !s2.shared) {
        if (s1.segmentType.equals(Segment.LINE) || s2.segmentType.equals(Segment.LINE)) {

          boolean differential = false;

          // Remember: for any segment 's', s.start and s.end are *indices*
          // into the 'locations' array of the corresponding data chunk.
          DifferentialKey key =
              new DifferentialKey(
                  new RegionKey(chrom, locations[s1.start], locations[s1.end], strand), s1, s2);

          if (s1.segmentType.equals(Segment.FLAT)) {
            differential = key.s2Expr() > key.s1Expr();
          } else if (s2.segmentType.equals(Segment.FLAT)) {
            differential = key.s1Expr() > key.s2Expr();
          } else {
            differential = Math.abs(key.diffExpr()) >= 0.5;
          }

          if (differential) {
            // If all conditions have been satisfied, then we build
            // the region identifier and save it in the list to be returned.
            keys.add(key);
          }
        }
      }
    }

    return keys;
  }