Beispiel #1
1
 public boolean intersects(
     List<ByteBuffer> minClusteringValues, List<ByteBuffer> maxClusteringValues) {
   for (Slice slice : this) {
     if (slice.intersects(comparator, minClusteringValues, maxClusteringValues)) return true;
   }
   return false;
 }
Beispiel #2
0
 public Builder add(Slice slice) {
   assert comparator.compare(slice.start(), slice.end()) <= 0;
   if (slices.size() > 0
       && comparator.compare(slices.get(slices.size() - 1).end(), slice.start()) > 0)
     needsNormalizing = true;
   slices.add(slice);
   return this;
 }
Beispiel #3
0
    public boolean selects(Clustering clustering) {
      for (int i = 0; i < slices.length; i++) {
        Slice slice = slices[i];
        if (comparator.compare(clustering, slice.start()) < 0) return false;

        if (comparator.compare(clustering, slice.end()) <= 0) return true;
      }
      return false;
    }
Beispiel #4
0
    private Slices forReversePaging(
        ClusteringComparator comparator, Clustering lastReturned, boolean inclusive) {
      for (int i = slices.length - 1; i >= 0; i--) {
        Slice slice = slices[i];
        Slice newSlice = slice.forPaging(comparator, lastReturned, inclusive, true);
        if (newSlice == null) continue;

        if (slice == newSlice && i == slices.length - 1) return this;

        ArrayBackedSlices newSlices =
            new ArrayBackedSlices(comparator, Arrays.copyOfRange(slices, 0, i + 1));
        newSlices.slices[i] = newSlice;
        return newSlices;
      }
      return Slices.NONE;
    }
Beispiel #5
0
    private Slices forForwardPaging(
        ClusteringComparator comparator, Clustering lastReturned, boolean inclusive) {
      for (int i = 0; i < slices.length; i++) {
        Slice slice = slices[i];
        Slice newSlice = slice.forPaging(comparator, lastReturned, inclusive, false);
        if (newSlice == null) continue;

        if (slice == newSlice && i == 0) return this;

        ArrayBackedSlices newSlices =
            new ArrayBackedSlices(comparator, Arrays.copyOfRange(slices, i, slices.length));
        newSlices.slices[0] = newSlice;
        return newSlices;
      }
      return Slices.NONE;
    }
Beispiel #6
0
      public static ComponentOfSlice fromSlice(int component, Slice slice) {
        Slice.Bound start = slice.start();
        Slice.Bound end = slice.end();

        if (component >= start.size() && component >= end.size()) return null;

        boolean startInclusive = true, endInclusive = true;
        ByteBuffer startValue = null, endValue = null;
        if (component < start.size()) {
          startInclusive = start.isInclusive();
          startValue = start.get(component);
        }
        if (component < end.size()) {
          endInclusive = end.isInclusive();
          endValue = end.get(component);
        }
        return new ComponentOfSlice(startInclusive, startValue, endInclusive, endValue);
      }
 /**
  * See if a structure is "whole", which means that none of its fields is missing from the
  * constraint, all of fields use default (non-constrained) dimension), and all of its fields are
  * also whole. This must be done recursively.
  *
  * @param dstruct to test
  * @return true if this structure is whole.
  */
 protected boolean isWholeCompound(DapStructure dstruct) {
   int processed = 0;
   List<DapVariable> fields = dstruct.getFields();
   for (DapVariable field : fields) {
     // not contractable if this field has non-original dimensions
     Segment seg = findSegment(field);
     if (seg == null) break; // this compound is not whole
     List<Slice> slices = seg.slices;
     if (slices != null) {
       for (Slice slice : slices) {
         if (slice.isConstrained()) break;
       }
     }
     if (field.getSort() == DapSort.STRUCTURE || field.getSort() == DapSort.SEQUENCE) {
       if (!isWholeCompound((DapStructure) field)) break; // this compound is not whole
     }
     processed++;
   }
   return (processed == fields.size());
 }
 /**
  * Recursive helper for tostring/toConstraintString
  *
  * @param seg
  * @param buf
  * @param forconstraint
  */
 protected void dumpvar(Segment seg, StringBuilder buf, boolean forconstraint) {
   if (seg.var.isTopLevel()) buf.append(seg.var.getFQN());
   else buf.append(seg.var.getShortName());
   List<DapDimension> dimset = seg.var.getDimensions();
   // Add any slices
   List<Slice> slices = seg.slices;
   if (slices == null) dimset = new ArrayList<DapDimension>();
   else assert dimset.size() == slices.size();
   for (int i = 0; i < dimset.size(); i++) {
     Slice slice = slices.get(i);
     DapDimension dim = dimset.get(i);
     try {
       buf.append(forconstraint ? slice.toConstraintString() : slice.toString());
     } catch (DapException de) {
     }
   }
   // if the var is atomic, then we are done
   if (seg.var.getSort() == DapSort.ATOMICVARIABLE) return;
   // If structure and all fields are in the view, then done
   if (seg.var.getSort() == DapSort.STRUCTURE || seg.var.getSort() == DapSort.SEQUENCE) {
     if (!isWholeCompound((DapStructure) seg.var)) {
       // Need to insert {...} and recurse
       buf.append(LBRACE);
       DapStructure struct = (DapStructure) seg.var;
       boolean first = true;
       for (DapVariable field : struct.getFields()) {
         if (!first) buf.append(";");
         first = false;
         Segment fseg = findSegment(field);
         dumpvar(fseg, buf, forconstraint);
       }
       buf.append(RBRACE);
     }
     if (seg.var.getSort() == DapSort.SEQUENCE && seg.filter != null) {
       buf.append("|");
       buf.append(seg.filter.toString());
     }
   }
 }
Beispiel #9
0
    /**
     * Given an array of slices (potentially overlapping and in any order) and return an equivalent
     * array of non-overlapping slices in clustering order.
     *
     * @param slices an array of slices. This may be modified by this method.
     * @return the smallest possible array of non-overlapping slices in clustering order. If the
     *     original slices are already non-overlapping and in comparator order, this may or may not
     *     return the provided slices directly.
     */
    private List<Slice> normalize(List<Slice> slices) {
      if (slices.size() <= 1) return slices;

      Collections.sort(
          slices,
          new Comparator<Slice>() {
            @Override
            public int compare(Slice s1, Slice s2) {
              int c = comparator.compare(s1.start(), s2.start());
              if (c != 0) return c;

              return comparator.compare(s1.end(), s2.end());
            }
          });

      List<Slice> slicesCopy = new ArrayList<>(slices.size());

      Slice last = slices.get(0);

      for (int i = 1; i < slices.size(); i++) {
        Slice s2 = slices.get(i);

        boolean includesStart = last.includes(comparator, s2.start());
        boolean includesFinish = last.includes(comparator, s2.end());

        if (includesStart && includesFinish) continue;

        if (!includesStart && !includesFinish) {
          slicesCopy.add(last);
          last = s2;
          continue;
        }

        if (includesStart) {
          last = Slice.make(last.start(), s2.end());
          continue;
        }

        assert !includesFinish;
      }

      slicesCopy.add(last);
      return slicesCopy;
    }
  /**
   * Compute dimension related information using slicing and redef info. In effect, this is where
   * projection constraints are applied
   *
   * <p>Assume that the constraint compiler has given us the following info:
   *
   * <ol>
   *   <li>A list of the variables to include.
   *   <li>A pair (DapDimension,Slice) for each redef
   *   <li>For each variable in #1, a list of slices taken from the constraint expression
   * </ol>
   *
   * <p>Two products will be produced.
   *
   * <ol>
   *   <li>The variables map will be modified so that the slices properly reflect any original or
   *       redef dimensions.
   *   <li>A set, dimrefs, of all referenced original dimensions.
   * </ol>
   *
   * <p>The processing is as follows
   *
   * <ol>
   *   <li>For each redef create a new redef dimension
   *   <li>For each variable:
   *       <ol>
   *         <li>if the variable is scalar, do nothing.
   *         <li>if the variable has no associated slices, then make its new dimensions be the
   *             original dimensions.
   *         <li>otherwise, walk the slices and create new dimensions from them; use redefs where
   *             indicated
   *         <li>
   *       </ol>
   * </ol>
   */
  protected void computedimensions() throws DapException {
    // Build the redefmap
    for (DapDimension key : redefslice.keySet()) {
      Slice slice = redefslice.get(key);
      DapDimension newdim = (DapDimension) key.clone();
      newdim.setSize(slice.getCount());
      redef.put(key, newdim);
    }

    // Process each variable
    for (int i = 0; i < segments.size(); i++) {
      Segment seg = segments.get(i);
      if (seg.var.getRank() == 0) continue;
      List<Slice> slices = seg.slices;
      List<DapDimension> orig = seg.var.getDimensions();
      List<DapDimension> newdims = new ArrayList<>();
      // If the slice list is short then pad it with
      // default slices
      if (slices == null) slices = new ArrayList<Slice>();
      while (slices.size() < orig.size()) // pad
      {
        slices.add(new Slice().setConstrained(false));
      }
      assert (slices != null && slices.size() == orig.size());
      for (int j = 0; j < slices.size(); j++) {
        Slice slice = slices.get(j);
        DapDimension dim0 = orig.get(j);
        DapDimension newdim = redef.get(dim0);
        if (newdim == null) newdim = dim0;
        // fill in the undefined last value
        slice.setMaxSize(newdim.getSize());
        slice.finish();

        Slice newslice = null;
        if (slice.isConstrained()) {
          // Construct an anonymous dimension for this slice
          newdim = new DapDimension(slice.getCount());
        } else { // replace with a new slice from the dim
          newslice = new Slice(newdim);
          if (newslice != null) {
            // track set of referenced non-anonymous dimensions
            if (!dimrefs.contains(dim0)) dimrefs.add(dim0);
            slices.set(j, newslice);
          }
        }
        // record the dimension per variable
        newdims.add(newdim);
      }
      seg.setDimset(newdims);
    }
  }
 public long getCardinality(Slice slice, BindingSet bindings) {
   long card = getCardinality(slice.getArg(), bindings);
   return Math.min(card, slice.getLimit());
 }
 void setSlices(List<Slice> slices) throws DapException {
   this.slices = slices;
   // Make sure they are finished
   for (Slice sl : slices) sl.finish();
 }
Beispiel #13
0
  public static void run(
      SIRStream str,
      JInterfaceDeclaration[] interfaces,
      SIRInterfaceTable[] interfaceTables,
      SIRStructure[] structs,
      SIRHelper[] helpers,
      SIRGlobal global) {
    System.out.println("Entry to SMP Backend...");

    checkArguments();
    setScheduler();

    if (KjcOptions.smp > 16) {
      setupLargeConfig();
    }

    // create cores in desired amount and order
    int[] cores = new int[KjcOptions.smp];
    for (int x = 0; x < KjcOptions.smp; x++) cores[x] = coreOrder[x];
    chip = new SMPMachine(cores);

    // create a new structs.h file for typedefs etc.
    structs_h = new Structs_h(structs);

    // The usual optimizations and transformation to slice graph
    CommonPasses commonPasses = new CommonPasses();
    // perform standard optimizations, use the number of cores the user wants to target
    commonPasses.run(str, interfaces, interfaceTables, structs, helpers, global, chip.size());
    // perform some standard cleanup on the slice graph.
    commonPasses.simplifySlices();
    // dump slice graph to dot file
    commonPasses.getSlicer().dumpGraph("traces.dot", null);

    // partition the slice graph based on the scheduling policy
    SpaceTimeScheduleAndSlicer graphSchedule =
        new SpaceTimeScheduleAndSlicer(commonPasses.getSlicer());
    scheduler.setGraphSchedule(graphSchedule);
    scheduler.run(chip.size());
    FilterInfo.reset();

    // generate schedules for initialization, primepump and steady-state
    scheduleSlices(graphSchedule);

    // generate layout for filters
    scheduler.runLayout();

    // dump final slice graph to dot file
    graphSchedule.getSlicer().dumpGraph("after_slice_partition.dot", scheduler);
    graphSchedule.getSlicer().dumpGraph("slice_graph.dot", scheduler, false);

    // if load balancing, find candidiate fission groups to load balance
    if (KjcOptions.loadbalance) {
      LoadBalancer.findCandidates();
      LoadBalancer.instrumentMainMethods();
    }

    // create all buffers and set the rotation lengths
    RotatingBuffer.createBuffers(graphSchedule);

    // now convert to Kopi code plus communication commands
    backEndBits = new SMPBackEndFactory(chip, scheduler);
    backEndBits.getBackEndMain().run(graphSchedule, backEndBits);

    // generate code for file writer
    CoreCodeStore.generatePrintOutputCode();

    if (KjcOptions.numbers > 0) chip.getNthComputeNode(0).getComputeCode().generateNumbersCode();

    // emit c code for all cores
    EmitSMPCode.doit(backEndBits);

    // dump structs.h file
    structs_h.writeToFile();

    // display final assignment of filters to cores
    System.out.println("Final filter assignments:");
    System.out.println("========================================");
    for (int x = 0; x < KjcOptions.smp; x++) {
      Core core = chip.getNthComputeNode(x);
      Set<FilterSliceNode> filters = core.getComputeCode().getFilters();
      long totalWork = 0;

      System.out.println("Core " + core.getCoreID() + ": ");
      for (FilterSliceNode filter : filters) {
        long work = SliceWorkEstimate.getWork(filter.getParent());
        System.out.format("%16d | " + filter + "\n", work);
        totalWork += work;
      }
      System.out.format("%16d | Total\n", totalWork);
    }

    // calculate computation to communication ratio
    if (KjcOptions.sharedbufs) {
      LinkedList<Slice> slices =
          DataFlowOrder.getTraversal(graphSchedule.getSlicer().getTopSlices());
      HashSet<Slice> compProcessed = new HashSet<Slice>();
      HashSet<Slice> commProcessed = new HashSet<Slice>();

      long comp = 0;
      long comm = 0;

      for (Slice slice : slices) {
        if (compProcessed.contains(slice)) continue;

        comp += SliceWorkEstimate.getWork(slice);
        compProcessed.add(slice);
      }

      /*
                  for(Slice slice : slices) {
                      if(commProcessed.contains(slice))
                          continue;

                      FilterInfo info = FilterInfo.getFilterInfo(slice.getFirstFilter());
                      int totalItemsReceived = info.totalItemsReceived(SchedulingPhase.STEADY);

                      if(totalItemsReceived == 0)
                          continue;

                      InputSliceNode input = slice.getHead();
                      Set<InterSliceEdge> sources = input.getSourceSet(SchedulingPhase.STEADY);
                      int numInputRots = totalItemsReceived / input.totalWeights(SchedulingPhase.STEADY);

                      if(!FissionGroupStore.isFizzed(slice)) {
                          for(InterSliceEdge source : sources) {
                              Slice srcSlice = source.getSrc().getParent();

      //                         if(srcSlice.getFirstFilter().isFileInput())
      //                             continue;

                              if(FissionGroupStore.isFizzed(srcSlice)) {
                                  // Filter is not fizzed, source is fizzed
                                  // Filter must receive (N-1)/N of inputs from different cores
                                  comm += numInputRots *
                                      input.getWeight(source, SchedulingPhase.STEADY) /
                                      KjcOptions.smp * (KjcOptions.smp - 1);
                              }
                              else {
                                  // Filter is not fizzed, source is not fizzed
                                  // Check to see if on same core
                                  // If not, must communicate all elements
                                  if(!scheduler.getComputeNode(slice.getFirstFilter()).equals(
                                         scheduler.getComputeNode(srcSlice.getFirstFilter()))) {
                                      comm += numInputRots *
                                          input.getWeight(source, SchedulingPhase.STEADY);
                                  }
                              }
                          }
                      }
                      else {
                          for(InterSliceEdge source : sources) {
                              Slice srcSlice = source.getSrc().getParent();

      //                         if(srcSlice.getFirstFilter().isFileInput())
      //                             continue;

                              if(FissionGroupStore.isFizzed(srcSlice)) {
                                  // Filter is fizzed, source is also fizzed
                                  int totalItemsReceivedPerFizzed = totalItemsReceived /
                                      FissionGroupStore.getFissionGroup(slice).fizzedSlices.length;
                                  int numInputRotsPerFizzed = numInputRots /
                                      FissionGroupStore.getFissionGroup(slice).fizzedSlices.length;

                                  System.out.println("totalItemsReceivedPerFizzed: " + totalItemsReceivedPerFizzed);
                                  System.out.println("numInputRotsPerFizzed: " + numInputRotsPerFizzed);

                                  int inputWeightBeforeSrc =
                                      input.weightBefore(source, SchedulingPhase.STEADY);
                                  int inputWeightSrc = input.getWeight(source, SchedulingPhase.STEADY);
                                  int inputTotalWeight = input.totalWeights(SchedulingPhase.STEADY);

                                  System.out.println("inputWeightBeforeSrc: " + inputWeightBeforeSrc);
                                  System.out.println("inputWeightSrc: " + inputWeightSrc);
                                  System.out.println("copyDown: " + info.copyDown);

                                  int numXmit = 0;

                                  for(int rot = 0 ; rot < numInputRotsPerFizzed ; rot++) {
                                      numXmit += Math.min(inputWeightSrc,
                                                          Math.max(0,
                                                                   info.copyDown +
                                                                   rot * inputTotalWeight +
                                                                   inputWeightBeforeSrc + inputWeightSrc -
                                                                   totalItemsReceivedPerFizzed));
                                  }

                                  System.out.println("numXmit: " + numXmit);

                                  comm += KjcOptions.smp * numXmit;
                              }
                              else {
                                  // Filter is fizzed, source is not fizzed
                                  // Source must send (N-1)/N of outputs to different cores
                                  comm += numInputRots *
                                      input.getWeight(source, SchedulingPhase.STEADY) /
                                      KjcOptions.smp * (KjcOptions.smp - 1);
                              }
                          }
                      }

                      commProcessed.add(slice);
                  }
                  */

      // Simple communication estimation
      for (Slice slice : slices) {
        if (commProcessed.contains(slice)) continue;

        FilterInfo info = FilterInfo.getFilterInfo(slice.getFirstFilter());
        int totalItemsReceived = info.totalItemsReceived(SchedulingPhase.STEADY);

        if (totalItemsReceived == 0) continue;

        comm += totalItemsReceived;

        if (FissionGroupStore.isFizzed(slice)) {
          assert info.peek >= info.pop;
          comm += (info.peek - info.pop) * KjcOptions.smp;
        }

        commProcessed.add(slice);
      }

      // Simple communication estimation 2
      /*
      for(Slice slice : slices) {
          if(commProcessed.contains(slice))
              continue;

          FilterInfo info = FilterInfo.getFilterInfo(slice.getFirstFilter());
          int totalItemsReceived = info.totalItemsReceived(SchedulingPhase.STEADY);
          int totalItemsSent = info.totalItemsSent(SchedulingPhase.STEADY);

          comm += totalItemsReceived;
          comm += totalItemsSent;

          if(totalItemsReceived == 0)
              continue;

          if(FissionGroupStore.isFizzed(slice)) {
              assert info.peek >= info.pop;
              comm += (info.peek - info.pop) * KjcOptions.smp;
          }

          commProcessed.add(slice);
      }
      */

      System.out.println("Final Computation: " + comp);
      System.out.println("Final Communication: " + comm);
      System.out.println("Final Comp/Comm Ratio: " + (float) comp / (float) comm);
    }

    System.exit(0);
  }
Beispiel #14
0
  /**
   * Creates a {@code Slices} object that contains a single slice.
   *
   * @param comparator the comparator for the table {@code slice} is a slice of.
   * @param slice the single slice that the return object should contains.
   * @return the newly created {@code Slices} object.
   */
  public static Slices with(ClusteringComparator comparator, Slice slice) {
    if (slice.start() == Slice.Bound.BOTTOM && slice.end() == Slice.Bound.TOP) return Slices.ALL;

    assert comparator.compare(slice.start(), slice.end()) <= 0;
    return new ArrayBackedSlices(comparator, new Slice[] {slice});
  }
Beispiel #15
0
 public Builder add(Slice.Bound start, Slice.Bound end) {
   return add(Slice.make(start, end));
 }