Пример #1
1
  public test2O(String[] args) throws Exception {

    int numtask, taskid, rc;
    test outmsg[] = new test[1];
    test inmsg[] = new test[1];
    int i, dest = 0, type = 1;
    int source, rtype = type, rbytes = -1, flag, dontcare = -1;
    int msgid;
    Status status;
    Request req;

    MPI.Init(args);
    taskid = MPI.COMM_WORLD.Rank();
    numtask = MPI.COMM_WORLD.Size();

    if (numtask > 2) {
      if (taskid == 0) {
        System.out.println("test2O must run with less than 8 tasks");
      }
      MPI.Finalize();
      return;
    }

    if (taskid == 1) {
      MPI.COMM_WORLD.Barrier();
      outmsg[0] = new test();
      outmsg[0].a = 5;
      type = 1;
      MPI.COMM_WORLD.Send(outmsg, 0, 1, MPI.OBJECT, dest, type);
    }

    if (taskid == 0) {
      source = MPI.ANY_SOURCE;
      rtype = MPI.ANY_TAG;
      req = MPI.COMM_WORLD.Irecv(inmsg, 0, 1, MPI.OBJECT, source, rtype);

      status = req.Test();
      if (status != null) System.out.println("ERROR(1)");
      MPI.COMM_WORLD.Barrier();

      status = req.Wait();
      if (inmsg[0].a != 5 || status.source != 1 || status.tag != 1) {
        System.out.println("ERROR(2)");
        System.out.println(" inmsg[0].a " + inmsg[0].a);
        System.out.println(" status.source " + status.source);
        System.out.println(" status.tag " + status.source);
      }
    }

    MPI.COMM_WORLD.Barrier();
    if (taskid == 1) System.out.println("Test2O TEST COMPLETE <" + taskid + ">");
    MPI.Finalize();
  }
Пример #2
0
  /** Each subclass can implement the receive method accordingly. */
  protected List<MigrationObject> doReceive() {
    MigrationObject[] recvBuf = new MigrationObject[1];
    List<MigrationObject> migrants = new ArrayList<MigrationObject>();

    /* Test if some delayed migrant (from a previous receive) has arrived
     * and get them. */
    migrants.addAll(testRecvRequests());

    /* Try to receive a migrant from each sender. */
    for (int i = 0; i < getSenders().length; i++) {

      Request request =
          getCommunicator().Irecv(recvBuf, 0, recvBuf.length, MPI.OBJECT, getSenders()[i], TAG);

      if (request.Test() != null) {
          /* If received */
        if (recvBuf[0] != null) {
          migrants.add(recvBuf[0]);
        }
      } else {
        addRecvRequest(request, recvBuf); /* Request pending. */
      }
    }

    return migrants;
  }
Пример #3
0
  /** Each subclass can implement the send method accordingly. */
  protected void doSend(MigrationObject migrant) {

    MigrationObject[] sendBuf = new MigrationObject[1];
    sendBuf[0] = migrant;

    /* Complete previous sends. */
    testSendRequests();

    for (int i = 0; i < getReceivers().length; i++) {
      Request sendRequest =
          getCommunicator().Isend(sendBuf, 0, sendBuf.length, MPI.OBJECT, getReceivers()[i], TAG);
      if (sendRequest.Test() == null) {
          /* Request pending */
        addSendRequest(sendRequest, sendBuf);
      }
    }
  }
Пример #4
0
  /**
   * Cleans up this topology by finalizing all pending communications. This method must be called
   * when the topology is not need anymore.
   */
  public void finish() {

    /* Make sure all send requests have finished before exit. */
    if ((pendingSendRequest != null) && !pendingSendRequest.isEmpty()) {
      Request[] rS = new Request[pendingSendRequest.size()];
      rS = pendingSendRequest.keySet().toArray(rS);
      Request.Waitall(rS);
      pendingSendRequest.clear();
    }

    /* Make sure all recv requests have finished before exit. The received
     * objects are ignored and get lost. */
    if ((pendingRecvRequest != null) && !pendingRecvRequest.isEmpty()) {
      Request[] rR = new Request[pendingRecvRequest.size()];
      rR = pendingRecvRequest.keySet().toArray(rR);
      Request.Waitall(rR);
      pendingRecvRequest.clear();
    }
  }
Пример #5
0
  /* Looks for finished send communications and updates the pending list. */
  private void testSendRequests() {
    if ((pendingSendRequest != null) && !pendingSendRequest.isEmpty()) {
      Request[] rS = new Request[pendingSendRequest.size()];
      rS = pendingSendRequest.keySet().toArray(rS);

      Status[] status = Request.Testsome(rS);
      for (int i = 0; i < status.length; i++) {
        pendingSendRequest.remove(rS[status[i].index]);
      }
    }
  }
Пример #6
0
  public static void main(String args[]) throws MPIException {
    int myself, tasks;
    IntBuffer in = MPI.newIntBuffer(MAXLEN);
    Request request;

    MPI.Init(args);
    myself = MPI.COMM_WORLD.getRank();
    tasks = MPI.COMM_WORLD.getSize();

    for (int j = 1; j <= MAXLEN; j *= 10) {
      for (int i = 0; i < j; i++) {
        in.put(i, i);
      }

      request = MPI.COMM_WORLD.iAllReduce(in, j, MPI.INT, MPI.SUM);
      request.waitFor();
      request.free();

      for (int k = 0; k < j; k++) {
        if (in.get(k) != k * tasks) {
          OmpitestError.ompitestError(
              OmpitestError.getFileName(),
              OmpitestError.getLineNumber(),
              " bad answer ("
                  + in.get(k)
                  + ") at index "
                  + k
                  + " of "
                  + j
                  + " (should be "
                  + (k * tasks)
                  + ")\n");
          break;
        }
      }
    }

    MPI.COMM_WORLD.barrier();
    MPI.Finalize();
  }
Пример #7
0
  /* Returns a list with the delayed migrants and updates the pending list. */
  private List<MigrationObject> testRecvRequests() {

    List<MigrationObject> delayedMigrants = new ArrayList<MigrationObject>();

    if (pendingRecvRequest != null) {
      Request[] rR = new Request[pendingRecvRequest.size()];
      rR = pendingRecvRequest.keySet().toArray(rR);

      Status[] status = Request.Testsome(rR);

      for (int i = 0; i < status.length; i++) {
        Request r = rR[status[i].index];
        MigrationObject delayedMigrant = ((MigrationObject[]) pendingRecvRequest.get(r))[0];
        if (delayedMigrant != null) {
          delayedMigrants.add(delayedMigrant);
        }
        pendingRecvRequest.remove(r);
      }
    }

    return delayedMigrants;
  }
Пример #8
0
  public testany(String[] args) throws Exception {

    int me, tasks, i, index, done;
    ;
    int mebuf[] = new int[1];
    boolean flag;

    MPI.Init(args);
    me = MPI.COMM_WORLD.Rank();
    tasks = MPI.COMM_WORLD.Size();

    int data[] = new int[tasks];
    Request req[] = new Request[tasks];
    Status status;

    mebuf[0] = me;
    if (me > 0) MPI.COMM_WORLD.Send(mebuf, 0, 1, MPI.INT, 0, 1);
    else if (me == 0) {
      req[0] = MPI.REQUEST_NULL;
      for (i = 1; i < tasks; i++) req[i] = MPI.COMM_WORLD.Irecv(data, i, 1, MPI.INT, i, 1);

      done = 0;
      while (done < tasks - 1) {
        status = Request.Testany(req);
        if (status != null) {
          done++;
          if (!req[status.index].Is_null())
            System.out.println("ERROR in MPI_Testany: reqest not set to null");
          if (data[status.index] != status.index)
            System.out.println("ERROR in MPI.Testany: wrong data");
        }
      }
    }

    // MPI.COMM_WORLD.Barrier();
    // if(me == 1)
    System.out.println("Testany TEST COMPLETE <" + me + ">");
    MPI.Finalize();
  }
Пример #9
0
  public static void main(String[] args) throws MPIException {
    Comm comm;
    IntBuffer sBuf, rBuf;
    int rank, size, extent;
    int[] sendCounts, recvCounts, rDispls, sDispls;
    Datatype[] sDTypes, rDTypes;
    Request req;

    MPI.Init(args);

    comm = MPI.COMM_WORLD;

    /* Create the buffer */
    size = comm.getSize();
    rank = comm.getRank();

    sBuf = MPI.newIntBuffer(size * size);
    rBuf = MPI.newIntBuffer(size * size);

    /* Load up the buffers */
    for (int i = 0; i < (size * size); i++) {
      sBuf.put(i, (i + 100 * rank));
      rBuf.put(i, -i);
    }

    /* Create and load the arguments to alltoallw */
    sendCounts = new int[size];
    recvCounts = new int[size];
    rDispls = new int[size];
    sDispls = new int[size];
    sDTypes = new Datatype[size];
    rDTypes = new Datatype[size];

    extent =
        4; // MPI.INT.getExtent(); //getExtent returns 1, but a 4 is needed for these calculations

    for (int i = 0; i < size; i++) {
      sendCounts[i] = i;
      recvCounts[i] = rank;
      rDispls[i] = (i * rank * extent);
      sDispls[i] = (((i * (i + 1)) / 2) * extent);
      sDTypes[i] = MPI.INT;
      rDTypes[i] = MPI.INT;
    }

    req = comm.iAllToAllw(sBuf, sendCounts, sDispls, sDTypes, rBuf, recvCounts, rDispls, rDTypes);
    req.waitFor();
    req.free();

    /* Check rbuf */
    for (int i = 0; i < size; i++) {
      int p = rDispls[i] / extent;
      for (int j = 0; j < rank; j++) {
        if (rBuf.get(p + j) != (i * 100 + (rank * (rank + 1)) / 2 + j)) {
          System.out.println(i + " " + j + " " + size + " " + rank + " " + extent);
          OmpitestError.ompitestError(
              OmpitestError.getFileName(),
              OmpitestError.getLineNumber(),
              "bad answer "
                  + rBuf.get(p + j)
                  + " (should be "
                  + (i * 100 + (rank * (rank + 1)) / 2 + j)
                  + ")\n");
        }
      }
    }

    MPI.COMM_WORLD.barrier();
    MPI.Finalize();
    if (rank == 0) {
      System.out.println("Test completed.");
    }
  }