Пример #1
1
  public test2O(String[] args) throws Exception {

    int numtask, taskid, rc;
    test outmsg[] = new test[1];
    test inmsg[] = new test[1];
    int i, dest = 0, type = 1;
    int source, rtype = type, rbytes = -1, flag, dontcare = -1;
    int msgid;
    Status status;
    Request req;

    MPI.Init(args);
    taskid = MPI.COMM_WORLD.Rank();
    numtask = MPI.COMM_WORLD.Size();

    if (numtask > 2) {
      if (taskid == 0) {
        System.out.println("test2O must run with less than 8 tasks");
      }
      MPI.Finalize();
      return;
    }

    if (taskid == 1) {
      MPI.COMM_WORLD.Barrier();
      outmsg[0] = new test();
      outmsg[0].a = 5;
      type = 1;
      MPI.COMM_WORLD.Send(outmsg, 0, 1, MPI.OBJECT, dest, type);
    }

    if (taskid == 0) {
      source = MPI.ANY_SOURCE;
      rtype = MPI.ANY_TAG;
      req = MPI.COMM_WORLD.Irecv(inmsg, 0, 1, MPI.OBJECT, source, rtype);

      status = req.Test();
      if (status != null) System.out.println("ERROR(1)");
      MPI.COMM_WORLD.Barrier();

      status = req.Wait();
      if (inmsg[0].a != 5 || status.source != 1 || status.tag != 1) {
        System.out.println("ERROR(2)");
        System.out.println(" inmsg[0].a " + inmsg[0].a);
        System.out.println(" status.source " + status.source);
        System.out.println(" status.tag " + status.source);
      }
    }

    MPI.COMM_WORLD.Barrier();
    if (taskid == 1) System.out.println("Test2O TEST COMPLETE <" + taskid + ">");
    MPI.Finalize();
  }
Пример #2
0
  @AdviseWith(adviceClasses = {PropsUtilAdvice.class})
  @Test
  public void testClassInitializationOnSPI() throws Exception {
    System.setProperty(PropsKeys.INTRABAND_IMPL, SelectorIntraband.class.getName());
    System.setProperty(PropsKeys.INTRABAND_TIMEOUT_DEFAULT, "10000");
    System.setProperty(PropsKeys.INTRABAND_WELDER_IMPL, SocketWelder.class.getName());

    MPI mpiImpl = _getMPIImpl();

    Assert.assertNotNull(mpiImpl);
    Assert.assertTrue(mpiImpl.isAlive());

    MPI mpi = MPIHelperUtil.getMPI();

    Assert.assertSame(mpi, UnicastRemoteObject.toStub(mpiImpl));
    Assert.assertTrue(mpi.isAlive());

    Intraband intraband = MPIHelperUtil.getIntraband();

    Assert.assertSame(SelectorIntraband.class, intraband.getClass());

    DatagramReceiveHandler[] datagramReceiveHandlers = intraband.getDatagramReceiveHandlers();

    Assert.assertSame(
        BootstrapRPCDatagramReceiveHandler.class,
        datagramReceiveHandlers[SystemDataType.RPC.getValue()].getClass());
  }
Пример #3
0
  public reduce_scatter(String[] args) throws Exception {

    final int MAXLEN = 10000;

    int out[] = new int[MAXLEN * 100];
    int in[] = new int[MAXLEN * 100];
    int i, j, k;
    int myself, tasks;
    int recvcounts[] = new int[128];

    MPI.Init(args);
    myself = MPI.COMM_WORLD.Rank();
    tasks = MPI.COMM_WORLD.Size();

    if (tasks > 8) {
      if (myself == 0) {
        System.out.println("reduce_scatter must run with 8 tasks!");
      }
      MPI.Finalize();
      return;
    }
    j = 10;
    // for(j=1;j<=MAXLEN*tasks;j*=10) {
    for (i = 0; i < tasks; i++) recvcounts[i] = j;
    for (i = 0; i < j * tasks; i++) out[i] = i;

    MPI.COMM_WORLD.Reduce_scatter(out, 0, in, 0, recvcounts, MPI.INT, MPI.SUM);

    for (k = 0; k < j; k++) {
      if (in[k] != tasks * (myself * j + k)) {
        System.out.println(
            "bad answer ("
                + in[k]
                + ") at index "
                + k
                + " of "
                + j
                + "(should be "
                + tasks * (myself * j + k)
                + ")");
        break;
      }
    }
    // }

    MPI.COMM_WORLD.Barrier();
    if (myself == 0) System.out.println("Reduce_scatter TEST COMPLETE");
    MPI.Finalize();
  }
Пример #4
0
 private void getTrueLbExtent() throws MPIException {
   MPI.check();
   int lbExt[] = new int[2];
   getTrueLbExtent(handle, lbExt);
   trueLb = lbExt[0] / baseSize;
   trueExtent = lbExt[1] / baseSize;
 }
Пример #5
0
  public scanO(String[] args) throws Exception {

    final int MAXLEN = 10000;

    int i, j, k;
    complexNum out[] = new complexNum[MAXLEN];
    complexNum in[] = new complexNum[MAXLEN];
    int myself, tasks;
    boolean bool = false;

    MPI.Init(args);
    myself = MPI.COMM_WORLD.Rank();
    tasks = MPI.COMM_WORLD.Size();

    for (i = 0; i < MAXLEN; i++) {
      in[i] = new complexNum();
      out[i] = new complexNum();
      out[i].realPart = i;
      out[i].imaginPart = i;
    }

    complexAdd cadd = new complexAdd();
    Op op = new Op(cadd, bool);
    MPI.COMM_WORLD.Scan(out, 0, in, 0, MAXLEN, MPI.OBJECT, op);

    for (k = 0; k < MAXLEN; k++) {
      if (in[k].realPart != k * (myself + 1)) {
        System.out.println(
            "bad answer ("
                + (in[k].realPart)
                + ") at index "
                + k
                + "(should be "
                + (k * (myself + 1))
                + ")");
        break;
      }
    }

    MPI.COMM_WORLD.Barrier();
    if (myself == 0) System.out.println("ScanO TEST COMPLETE");
    MPI.Finalize();
  }
Пример #6
0
  public static void main(String args[]) throws MPIException {
    int myself, tasks;
    IntBuffer in = MPI.newIntBuffer(MAXLEN);
    Request request;

    MPI.Init(args);
    myself = MPI.COMM_WORLD.getRank();
    tasks = MPI.COMM_WORLD.getSize();

    for (int j = 1; j <= MAXLEN; j *= 10) {
      for (int i = 0; i < j; i++) {
        in.put(i, i);
      }

      request = MPI.COMM_WORLD.iAllReduce(in, j, MPI.INT, MPI.SUM);
      request.waitFor();
      request.free();

      for (int k = 0; k < j; k++) {
        if (in.get(k) != k * tasks) {
          OmpitestError.ompitestError(
              OmpitestError.getFileName(),
              OmpitestError.getLineNumber(),
              " bad answer ("
                  + in.get(k)
                  + ") at index "
                  + k
                  + " of "
                  + j
                  + " (should be "
                  + (k * tasks)
                  + ")\n");
          break;
        }
      }
    }

    MPI.COMM_WORLD.barrier();
    MPI.Finalize();
  }
Пример #7
0
  public static void main(String args[]) throws MPIException {
    int root, myself, tasks;
    int out[] = new int[MAXLEN], in[] = new int[MAXLEN];

    MPI.Init(args);
    myself = MPI.COMM_WORLD.getRank();
    tasks = MPI.COMM_WORLD.getSize();

    root = tasks / 2;
    for (int j = 1; j <= MAXLEN; j *= 10) {
      for (int i = 0; i < j; i++) {
        out[i] = i;
      }

      MPI.COMM_WORLD.reduce(out, in, j, MPI.INT, MPI.SUM, root);

      if (myself == root) {
        for (int k = 0; k < j; k++) {
          if (in[k] != k * tasks) {
            OmpitestError.ompitestError(
                OmpitestError.getFileName(),
                OmpitestError.getLineNumber(),
                " bad answer ("
                    + in[k]
                    + ") at index "
                    + k
                    + " of "
                    + j
                    + " (should be "
                    + (k * tasks)
                    + ")\n");
            break;
          }
        }
      }
    }
    MPI.COMM_WORLD.barrier();
    MPI.Finalize();
  }
Пример #8
0
  public sub2(String[] args) throws Exception {

    int dims[] = new int[2];
    dims[0] = 2;
    dims[1] = 3;
    boolean periods[] = new boolean[2];
    int size, rank;
    MPI.Init(args);
    rank = MPI.COMM_WORLD.Rank();
    size = MPI.COMM_WORLD.Size();

    if (size != 8) {
      if (rank == 0) System.out.println("topo->sub2: MUST RUN WITH 8 TASKS");

      MPI.COMM_WORLD.Barrier();
      MPI.Finalize();
      return;
    }

    Cartcomm comm = MPI.COMM_WORLD.Create_cart(dims, periods, false);

    if (comm != null) {
      int[] dims2 = comm.Get().dims;
      boolean remain[] = new boolean[2];
      remain[0] = false;
      remain[1] = true;
      Cartcomm subcomm = comm.Sub(remain);
      int nsize = subcomm.Size();
      int nrank = subcomm.Rank();

      System.out.println(
          "rank <" + rank + ">,nrank<" + nrank + ">,size<" + size + ">,nsize <" + nsize);
    } else {
      System.out.println("rank <" + rank + ",size<" + size + ">");
    }

    MPI.COMM_WORLD.Barrier();
    MPI.Finalize();
  }
Пример #9
0
  public testany(String[] args) throws Exception {

    int me, tasks, i, index, done;
    ;
    int mebuf[] = new int[1];
    boolean flag;

    MPI.Init(args);
    me = MPI.COMM_WORLD.Rank();
    tasks = MPI.COMM_WORLD.Size();

    int data[] = new int[tasks];
    Request req[] = new Request[tasks];
    Status status;

    mebuf[0] = me;
    if (me > 0) MPI.COMM_WORLD.Send(mebuf, 0, 1, MPI.INT, 0, 1);
    else if (me == 0) {
      req[0] = MPI.REQUEST_NULL;
      for (i = 1; i < tasks; i++) req[i] = MPI.COMM_WORLD.Irecv(data, i, 1, MPI.INT, i, 1);

      done = 0;
      while (done < tasks - 1) {
        status = Request.Testany(req);
        if (status != null) {
          done++;
          if (!req[status.index].Is_null())
            System.out.println("ERROR in MPI_Testany: reqest not set to null");
          if (data[status.index] != status.index)
            System.out.println("ERROR in MPI.Testany: wrong data");
        }
      }
    }

    // MPI.COMM_WORLD.Barrier();
    // if(me == 1)
    System.out.println("Testany TEST COMPLETE <" + me + ">");
    MPI.Finalize();
  }
Пример #10
0
  @AdviseWith(adviceClasses = {PropsUtilAdvice.class})
  @Test
  public void testClassInitializationOnMPI() throws Exception {
    PropsUtil.setProps(
        (Props)
            ProxyUtil.newProxyInstance(
                MPIHelperUtilTest.class.getClassLoader(),
                new Class<?>[] {Props.class},
                new InvocationHandler() {

                  @Override
                  public Object invoke(Object proxy, Method method, Object[] args) {

                    throw new UnsupportedOperationException();
                  }
                }));

    MPI mpiImpl = _getMPIImpl();

    Assert.assertNotNull(mpiImpl);
    Assert.assertTrue(mpiImpl.isAlive());

    MPI mpi = MPIHelperUtil.getMPI();

    Assert.assertSame(mpi, UnicastRemoteObject.toStub(mpiImpl));
    Assert.assertTrue(mpi.isAlive());

    Intraband intraband = MPIHelperUtil.getIntraband();

    Assert.assertSame(ExecutorIntraband.class, intraband.getClass());

    DatagramReceiveHandler[] datagramReceiveHandlers = intraband.getDatagramReceiveHandlers();

    Assert.assertSame(
        BootstrapRPCDatagramReceiveHandler.class,
        datagramReceiveHandlers[SystemDataType.RPC.getValue()].getClass());
  }
Пример #11
0
  /**
   * Takes a byte array containing serialized and concatenated MPIs and converts it to an array of
   * MPIs. The buffer is assumed to consist of a 4-byte int containing the number of MPIs in the
   * array, followed by {size, data} pairs for each MPI.
   *
   * @throws OTRException
   */
  public static MPI[] unserializeMPIArray(byte[] buffer) throws OTRException {
    InBuf ibuf = new InBuf(buffer);

    int count = (int) ibuf.readUInt();

    if (count <= 0) throw new OTRException("Invalid count");

    MPI[] mpis = new MPI[count];

    for (int i = 0; i < count; i++) {
      mpis[i] = MPI.readMPI(ibuf);
    }

    return mpis;
  }
Пример #12
0
  /** Hash one or two MPIs. To hash only one MPI, b may be set to NULL. */
  public static MPI hash(int version, MPI a, MPI b, Provider prov) throws OTRException {
    int totalsize = 1 + 4 + a.getLength();
    if (b != null) {
      totalsize += 4 + b.getLength();
    }
    byte[] buf = new byte[totalsize];
    OutBuf obuf = new OutBuf(buf);

    obuf.writeByte((byte) version);
    obuf.writeUInt(a.getLength());
    a.writeRaw(obuf);

    if (b != null) {
      obuf.writeUInt(b.getLength());
      b.writeRaw(obuf);
    }
    byte[] out = obuf.getBytes();
    SHA256 sha = prov.getSHA256();
    byte[] digest = sha.hash(out);
    return new MPI(digest);
  }
Пример #13
0
 /**
  * Java binding of {@code MPI_TYPE_DUP}.
  *
  * @return new datatype
  * @throws MPIException
  */
 public Datatype dup() throws MPIException {
   MPI.check();
   return new Datatype(this, dup(handle));
 }
Пример #14
0
  public waitall2(String args[]) throws Exception {
    MPI.Init(args);
    int me = MPI.COMM_WORLD.Rank();

    mpi.Request r[] = new Request[10];
    mpi.Status s[] = new Status[10];
    r[0] = MPI.REQUEST_NULL;
    r[9] = MPI.REQUEST_NULL;

    int intArray[] = new int[100];
    float floatArray[] = new float[100];
    double doubleArray[] = new double[100];
    long longArray[] = new long[100];
    boolean booleanArray[] = new boolean[100];
    short shortArray[] = new short[100];
    char charArray[] = new char[100];
    byte byteArray[] = new byte[100];

    int intReadArray[] = new int[100];
    float floatReadArray[] = new float[100];
    double doubleReadArray[] = new double[100];
    long longReadArray[] = new long[100];
    boolean booleanReadArray[] = new boolean[100];
    short shortReadArray[] = new short[100];
    char charReadArray[] = new char[100];
    byte byteReadArray[] = new byte[100];

    for (int i = 0; i < intArray.length; i++) {
      intArray[i] = i + 1;
      floatArray[i] = i + 11;
      doubleArray[i] = i + 11.11;
      longArray[i] = i + 11;
      booleanArray[i] = true;
      shortArray[i] = 1;
      charArray[i] = 's';
      byteArray[i] = 's';

      intReadArray[i] = 3;
      floatReadArray[i] = i + 19;
      doubleReadArray[i] = i + 99.11;
      longReadArray[i] = i + 9;
      shortReadArray[i] = 2;
      booleanReadArray[i] = false;
      charReadArray[i] = 'x';
      byteReadArray[i] = 'x';
    }

    if (MPI.COMM_WORLD.Rank() == 0) {

      r[1] = MPI.COMM_WORLD.Isend(intArray, 0, 100, MPI.INT, 1, 1);
      r[2] = MPI.COMM_WORLD.Isend(byteArray, 0, 100, MPI.BYTE, 1, 2);
      /*
      	r[3]=MPI.COMM_WORLD.Isend(charArray,0,100,MPI.CHAR,1,3);
      	r[4]=MPI.COMM_WORLD.Isend(doubleArray,0,100,MPI.DOUBLE,1,4);
      	r[5]=MPI.COMM_WORLD.Isend(longArray,0,100,MPI.LONG,1,5);
      	r[6]=MPI.COMM_WORLD.Isend(booleanArray,0,100,MPI.BOOLEAN,1,6);
      	r[7]=MPI.COMM_WORLD.Isend(shortArray,0,100,MPI.SHORT,1,7);
      	r[8]=MPI.COMM_WORLD.Isend(floatArray,0,100,MPI.FLOAT,1,8);
      */
      r[1].Wait();
      r[2].Wait();
      /*	s = Request.Waitall(r);

      	for(int i=0 ; i<s.length ; i++) {

      	  if(s[i].equals(MPI.EMPTY_STATUS) ) {
                  continue;
      	  }

                if( s[i].source != 1 && s[i].tag != i) {
                  System.out.println("Error in status objects (sender)");
      	  }

              }
      */

    } else if (MPI.COMM_WORLD.Rank() == 1) {
      r[1] = MPI.COMM_WORLD.Irecv(intReadArray, 0, 100, MPI.INT, 0, 1);
      r[2] = MPI.COMM_WORLD.Irecv(byteReadArray, 0, 100, MPI.BYTE, 0, 2);
      r[1].Wait();
      r[2].Wait();
      /*
      	r[3]=MPI.COMM_WORLD.Irecv(charReadArray,0,100,MPI.CHAR,0,3);
      	r[4]=MPI.COMM_WORLD.Irecv(doubleReadArray,0,100,MPI.DOUBLE,0,4);
      	r[5]=MPI.COMM_WORLD.Irecv(longReadArray,0,100,MPI.LONG,0,5);
      	r[6]=MPI.COMM_WORLD.Irecv(booleanReadArray,0,100,MPI.BOOLEAN,0,6);
      	r[7]=MPI.COMM_WORLD.Irecv(shortReadArray,0,100,MPI.SHORT,0,7);
      	r[8]=MPI.COMM_WORLD.Irecv(floatReadArray,0,100,MPI.FLOAT,0,8);

      	s = Request.Waitall(r);

      	for(int i=0 ; i<s.length ; i++) {

      	  if(s[i].equals(MPI.EMPTY_STATUS) ) {
                  continue;
      	  }

                if( s[i].source != 0 && s[i].tag != i) {
                  System.out.println("Error in status objects (Receiver)");
      	  }

      	}
      */
      if (Arrays.equals(intArray, intReadArray)) {
          /* &&
          Arrays.equals(floatArray,floatReadArray) &&
          Arrays.equals(doubleArray,doubleReadArray) &&
          Arrays.equals(longArray,longReadArray) &&
          Arrays.equals(shortArray,shortReadArray) &&
          Arrays.equals(charArray,charReadArray) &&
          Arrays.equals(byteArray,byteReadArray) &&
          Arrays.equals(booleanArray,booleanReadArray)) { */

        System.out.println("waitall2 TEST Completed");
        /*
        System.out.println("\n#################"+
        		"\n <<<<PASSED>>>> "+
        		"\n################");
        */
      } else {
        System.out.println("\n#################" + "\n <<<<FAILED>>>> " + "\n################");
      }
    }

    MPI.COMM_WORLD.Barrier();
    MPI.Finalize();
  }
Пример #15
0
 /**
  * Construct new datatype representing replication of old datatype into contiguous locations.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_CONTIGUOUS}.
  *
  * <p>The base type of the new datatype is the same as the base type of {@code oldType}.
  *
  * @param count replication count
  * @param oldType old datatype
  * @return new datatype
  * @throws MPIException
  */
 public static Datatype createContiguous(int count, Datatype oldType) throws MPIException {
   MPI.check();
   return new Datatype(oldType, getContiguous(count, oldType.handle));
 }
Пример #16
0
 /**
  * Identical to {@code createVector} except that the stride is expressed directly in terms of the
  * buffer index, rather than the units of the old type.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_HVECTOR}.
  *
  * @param count number of blocks
  * @param blockLength number of elements in each
  * @param stride number of bytes between start of each block
  * @param oldType old datatype
  * @return new datatype
  * @throws MPIException
  */
 public static Datatype createHVector(int count, int blockLength, int stride, Datatype oldType)
     throws MPIException {
   MPI.check();
   long handle = getHVector(count, blockLength, stride, oldType.handle);
   return new Datatype(oldType, handle);
 }
Пример #17
0
  public scattervO(String[] args) throws Exception {

    final int MAXLEN = 10;

    int myself, tasks;
    MPI.Init(args);
    myself = MPI.COMM_WORLD.Rank();
    tasks = MPI.COMM_WORLD.Size();

    if (tasks > 8) {
      if (myself == 0) System.out.println("scattervO must run with fewer than 8 tasks!");
      MPI.Finalize();
      return;
    }

    int root, i = 0, j, k, stride = 15;
    test out[] = new test[tasks * stride];
    test in[] = new test[MAXLEN];
    int dis[] = new int[tasks];
    int scount[] = new int[tasks];

    for (i = 0; i < MAXLEN; i++) {
      in[i] = new test();
      in[i].a = 0;
    }
    for (i = 0; i < tasks; i++) {
      dis[i] = i * stride;
      scount[i] = 5;
    }

    scount[0] = 10;

    for (i = 0; i < tasks * stride; i++) {
      out[i] = new test();
      out[i].a = i;
    }

    MPI.COMM_WORLD.Scatterv(out, 0, scount, dis, MPI.OBJECT, in, 0, scount[myself], MPI.OBJECT, 0);

    String[] messbuf = new String[1];

    if (myself == 0) {
      System.out.println("Original array on root...");
      for (i = 0; i < tasks * stride; i++) System.out.print(out[i].a + " ");
      System.out.println();
      System.out.println();

      System.out.println("Result on proc 0...");
      System.out.println("Stride = 15 " + "Count = " + scount[0]);
      for (i = 0; i < MAXLEN; i++) System.out.print(in[i].a + " ");
      System.out.println();
      System.out.println();

      // Reproduces output of original test case, but deterministically

      int nmess = tasks < 3 ? tasks : 3;
      for (int t = 1; t < nmess; t++) {
        MPI.COMM_WORLD.Recv(messbuf, 0, 1, MPI.OBJECT, t, 0);

        System.out.print(messbuf[0]);
      }
    }

    if (myself == 1) {
      StringBuffer mess = new StringBuffer();

      mess.append("Result on proc 1...\n");
      mess.append("Stride = 15 " + "Count = " + scount[1] + "\n");
      for (i = 0; i < MAXLEN; i++) mess.append(in[i].a + " ");
      mess.append("\n");
      mess.append("\n");

      messbuf[0] = mess.toString();
      MPI.COMM_WORLD.Send(messbuf, 0, 1, MPI.OBJECT, 0, 0);
    }

    if (myself == 2) {
      StringBuffer mess = new StringBuffer();

      mess.append("Result on proc 2...\n");
      mess.append("Stride = 15 " + "Count = " + scount[2] + "\n");
      for (i = 0; i < MAXLEN; i++) mess.append(in[i].a + " ");
      mess.append("\n");

      messbuf[0] = mess.toString();
      MPI.COMM_WORLD.Send(messbuf, 0, 1, MPI.OBJECT, 0, 0);
    }

    if (myself == 0) System.out.println("ScattervO TEST COMPLETE");
    MPI.Finalize();
  }
Пример #18
0
 /**
  * Sets the print name for the datatype.
  *
  * @param name name for the datatype
  * @throws MPIException
  */
 public void setName(String name) throws MPIException {
   MPI.check();
   setName(handle, name);
 }
Пример #19
0
 /**
  * Frees the datatype.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_FREE}.
  *
  * @throws MPIException
  */
 @Override
 public void free() throws MPIException {
   MPI.check();
   handle = free(handle);
 }
Пример #20
0
 /**
  * Deletes an attribute value associated with a key.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_DELETE_ATTR}.
  *
  * @param keyval attribute key
  * @throws MPIException
  */
 public void deleteAttr(int keyval) throws MPIException {
   MPI.check();
   deleteAttr(handle, keyval);
 }
Пример #21
0
 /**
  * Retrieves attribute value by key.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_GET_ATTR}.
  *
  * @param keyval attribute key
  * @return attribute value or null if no attribute is associated with the key.
  * @throws MPIException
  */
 public Object getAttr(int keyval) throws MPIException {
   MPI.check();
   Object obj = getAttr(handle, keyval);
   return obj instanceof byte[] ? MPI.attrGet((byte[]) obj) : obj;
 }
Пример #22
0
 /**
  * Stores attribute value associated with a key.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_SET_ATTR}.
  *
  * @param keyval attribute key
  * @param value attribute value
  * @throws MPIException
  */
 public void setAttr(int keyval, Object value) throws MPIException {
   MPI.check();
   setAttr(handle, keyval, MPI.attrSet(value));
 }
Пример #23
0
 /**
  * Create a new attribute key.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_CREATE_KEYVAL}.
  *
  * @return attribute key for future access
  * @throws MPIException
  */
 public static int createKeyval() throws MPIException {
   MPI.check();
   return createKeyval_jni();
 }
Пример #24
0
 /**
  * Return the print name from the datatype.
  *
  * @return name of the datatype
  * @throws MPIException
  */
 public String getName() throws MPIException {
   MPI.check();
   return getName(handle);
 }
Пример #25
0
  public static void main(String[] args) {

    String inputPrmFile = args[3];
    String inputConfFile = args[4];
    String inputTopFile = args[5];
    String outputTrajFile = args[6];
    String outputEnergyFile = args[7];
    String ITSout = args[8];

    MdSystem<LJParticle> system =
        GromacsImporter.buildLJParticleSystem(
            "JOB_NAME", inputPrmFile, inputConfFile, inputTopFile);

    MdParameter prm = system.getParam();

    final double dt = prm.getDt();
    final int nsteps = prm.getNsteps();
    final int nstlist = prm.getNstlist();
    final int nstxout = prm.getNstxout();
    final int nstvout = prm.getNstvout();
    final int nstenergy = prm.getNstenergy();
    final int nstlog = 10; // prm.getNstlog();
    final double T0 = prm.getT0();
    final double TRef = prm.getRefT();
    final double tauT = prm.getTauT();
    final boolean convertHbonds = prm.convertHbonds();
    if (convertHbonds) {
      system.convertHBondsToConstraints();
    }

    /** ************ ITS setup *************** */
    final int ksize = 12;
    final double Tstep = 25.0; // T increment
    final int ITSEnergyFreq = nstxout; // frequency of energy storing for ITS

    final double[] Temps = new double[ksize];
    final double[] p0 = new double[ksize];

    for (int i = 0; i < ksize; i++) {
      Temps[i] = T0 + i * Tstep;
      p0[i] = 1.0 / ksize;
    }
    /** ************************************* */
    final BigDecimal[] n0 = new BigDecimal[ksize];
    MathContext mathset = new MathContext(5);
    n0[0] = new BigDecimal("5.99880e-03", mathset);
    n0[1] = new BigDecimal("3.64660e+209", mathset);
    n0[2] = new BigDecimal("1.23850e+391", mathset);
    n0[3] = new BigDecimal("2.59790e+548", mathset);
    n0[4] = new BigDecimal("1.21530e+686", mathset);
    n0[5] = new BigDecimal("2.85080e+807", mathset);
    n0[6] = new BigDecimal("2.93170e+915", mathset);
    n0[7] = new BigDecimal("1.33400e+1012", mathset);
    n0[8] = new BigDecimal("1.18050e+1099", mathset);
    n0[9] = new BigDecimal("4.27230e+1177", mathset);
    n0[10] = new BigDecimal("1.70190e+1249", mathset);
    n0[11] = new BigDecimal("2.87210e+1314", mathset);

    // gen valocity
    system.genRandomVelocities(T0);

    /** MPI preparation * */
    MPI.Init(args);
    final int rank = MPI.COMM_WORLD.Rank();
    final int np = MPI.COMM_WORLD.Size();

    Integrator<MdSystem<LJParticle>> integrator = new VelocityVerlet<MdSystem<LJParticle>>(dt);

    FastLJC<MdSystem<LJParticle>> nonbond = new FastLJC<MdSystem<LJParticle>>(system);

    DomainDecomposition<MdSystem<LJParticle>> decomposition =
        new DomainDecomposition<MdSystem<LJParticle>>(system, np);

    Thermostat<MdSystem<LJParticle>> thermostat =
        new BerendsenThermostat<MdSystem<LJParticle>>(TRef, tauT);

    // push initial positions to the new trajectory
    system.forwardPosition(integrator);

    // get partitions using new positions
    system.partition(decomposition);

    Domain domain = decomposition.getDomain(rank);

    DomainNeighborList<MdSystem<LJParticle>> nblist =
        new DomainNeighborList<MdSystem<LJParticle>>(system, domain);

    int SUB_CAPACITY = domain.getCapacity();

    // head node
    if (rank == 0) {

      try {
        PrintStream ps = new PrintStream(outputTrajFile);
        PrintStream psEnergy = new PrintStream(outputEnergyFile);
        PrintStream psITS = new PrintStream(ITSout);

        ITS<MdSystem<LJParticle>> its = new ITS<MdSystem<LJParticle>>(T0, ksize, Temps, p0, n0);
        double Uorg = 0.0;

        for (int tstep = 0; tstep < nsteps; tstep++) {

          if (tstep % nstlog == 0) {
            System.out.println(String.format("Computing t = %5.3f ps", tstep * dt));
          }

          // integrate forward (apply position constraints if applicable)
          if (tstep != 0) {
            system.forwardPosition(integrator);
            system.applyPositionConstraint();
          }

          // (I) update domains and send them to slave nodes
          if (tstep % nstlist == 0) {
            // updates partitions using new positions
            system.partition(decomposition);

            // send updated partition info to slave nodes
            for (int proc = 1; proc < np; proc++) {
              int[] partition = decomposition.exportPartition(proc);
              MPI.COMM_WORLD.Send(partition, 0, SUB_CAPACITY, MPI.INT, proc, 99);
            }
            // update local neighbor list
            nblist.update(system);
          }

          // (II) export new positions to slave nodes
          for (int proc = 1; proc < np; proc++) {
            Domain domainEach = decomposition.getDomain(proc);
            double[] positionArray = system.exportNewPositions(domainEach);

            MPI.COMM_WORLD.Send(positionArray, 0, 3 * SUB_CAPACITY, MPI.DOUBLE, proc, 99);
          }

          // (ITS Step 0 ) compute original energy
          {
            double nonbondEnergy = 0.0;
            nonbondEnergy = system.getNonBondEnergy(nonbond, nblist);
            // receive partial nonbond energies from slave-nodes and add
            for (int proc = 1; proc < np; proc++) {
              double[] partialEnergy = new double[1];

              MPI.COMM_WORLD.Recv(partialEnergy, 0, 1, MPI.DOUBLE, proc, 99);
              nonbondEnergy += partialEnergy[0];
            }
            double coulombEnergy = 0.0; // temporary.
            double bond = system.getBondEnergy();
            double angle = system.getAngleEnergy();
            double dihedral = system.getDihedralEnergy();
            Uorg = nonbondEnergy + bond + angle + dihedral;

            if (tstep % ITSEnergyFreq == 0) {
              its.printEnergy(psITS, system, Uorg);
            }
          }

          // update non-bonded forces
          system.updateNonBondForce(nonbond, nblist);

          // update long-ranged forces

          // update bonded forces
          system.updateBondForce();

          // update angle forces
          system.updateAngleForce();

          // update dihedral forces
          system.updateDihedralForce();

          // (III) receive computed forces from slaves
          for (int proc = 1; proc < np; proc++) {
            double[] forceArray = new double[3 * SUB_CAPACITY];

            MPI.COMM_WORLD.Recv(forceArray, 0, 3 * SUB_CAPACITY, MPI.DOUBLE, proc, 99);

            Domain domainEach = decomposition.getDomain(proc);
            system.importNewForces(domainEach, forceArray);
          }

          // (ITS step 1 ) Apply biasing forces
          its.applyBiasForce(system, Uorg);

          // forward velocities
          system.forwardVelocity(integrator);
          // apply velocity constraints
          system.correctConstraintVelocity();

          // apply temperature coupling
          thermostat.apply(system);

          // print energy (using information in newTraj )
          if (tstep % nstenergy == 0) {
            double nonbondEnergy = 0.0;
            nonbondEnergy = system.getNonBondEnergy(nonbond, nblist);
            // receive partial nonbond energies from slave-nodes and add
            for (int proc = 1; proc < np; proc++) {
              double[] partialEnergy = new double[1];

              MPI.COMM_WORLD.Recv(partialEnergy, 0, 1, MPI.DOUBLE, proc, 99);
              nonbondEnergy += partialEnergy[0];
            }

            double coulombEnergy = 0.0; // temporary.
            mymd.MdIO.printEnergy(system, nonbondEnergy, coulombEnergy, psEnergy);
          }

          // update current trajectories from new trajectories
          system.update();

          if (tstep % nstxout == 0) {
            mymd.MdIO.writeGro(system, ps);
          }
        }

        ps.close();
        psEnergy.close();
      } catch (java.io.IOException ex) {

      }
    }
    // slave nodes
    else {

      decomposition = null;
      String slaveName = String.format("slave-%d", rank);

      /**
       * * change subSystem constructor so it accepts a MdSystem object as an input parameter.
       * SubSystem is needed only if MdSystem object exists. *
       */
      // create a sub-system for slave node
      Trajectory subTraj = new Trajectory(system.getSize());
      subTraj.setBox(system.getBox());
      SubSystem<LJParticle> subsystem =
          new SubSystem.Builder<LJParticle>()
              .name(slaveName)
              .particles(system.getParticles())
              .parameters(system.getParam())
              .topology(system.getTopology())
              .subTrajectory(subTraj)
              .build();

      // mother system is freed. no longer needed for slave nodes
      system = null;

      for (int tstep = 0; tstep < nsteps; tstep++) {

        // (I) receive updated partition info from head node
        if (tstep % nstlist == 0) {
          int[] partition = new int[SUB_CAPACITY];
          MPI.COMM_WORLD.Recv(partition, 0, SUB_CAPACITY, MPI.INT, 0, 99);

          // import received array to its local domain
          domain.importArray(partition);
        }

        // (II) receives new positions
        double[] positionArray = new double[3 * SUB_CAPACITY];
        MPI.COMM_WORLD.Recv(positionArray, 0, SUB_CAPACITY * 3, MPI.DOUBLE, 0, 99);

        // import new positions into the subsystem
        subsystem.importNewPositions(domain, positionArray);

        if (tstep % nstlist == 0) {
          // update local neighbor list
          nblist.update(subsystem);
        }

        // (ITS 0 step)
        double[] partialEnergy = new double[1];
        partialEnergy[0] = subsystem.getNonBondEnergy(nonbond, nblist);

        MPI.COMM_WORLD.Send(partialEnergy, 0, 1, MPI.DOUBLE, 0, 99);

        // compute non-bonded forces
        subsystem.updateNonBondForce(nonbond, nblist);

        // PME

        // (III) export forces and send them to head-node
        double[] forceArray = subsystem.exportNewForces(domain);
        MPI.COMM_WORLD.Send(forceArray, 0, SUB_CAPACITY * 3, MPI.DOUBLE, 0, 99);

        if (tstep % nstenergy == 0) {
          partialEnergy = new double[1];
          partialEnergy[0] = subsystem.getNonBondEnergy(nonbond, nblist);

          MPI.COMM_WORLD.Send(partialEnergy, 0, 1, MPI.DOUBLE, 0, 99);
        }

        // reset force components
        subsystem.update();
      }
    }

    MPI.Finalize();
  }
Пример #26
0
 /**
  * The most general type constructor.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_STRUCT}.
  *
  * <p>The number of blocks is taken to be size of the {@code blockLengths} argument. The second
  * and third arguments, {@code displacements}, and {@code types}, should be the same size.
  *
  * @param blockLengths number of elements in each block
  * @param displacements byte displacement of each block
  * @param types type of elements in each block
  * @return new datatype
  * @throws MPIException
  */
 public static Datatype createStruct(int[] blockLengths, int[] displacements, Datatype[] types)
     throws MPIException {
   MPI.check();
   long handle = getStruct(blockLengths, displacements, types);
   return new Datatype(MPI.BYTE, handle);
 }
Пример #27
0
  public static void main(String[] args) throws MPIException {
    Comm comm;
    IntBuffer sBuf, rBuf;
    int rank, size, extent;
    int[] sendCounts, recvCounts, rDispls, sDispls;
    Datatype[] sDTypes, rDTypes;
    Request req;

    MPI.Init(args);

    comm = MPI.COMM_WORLD;

    /* Create the buffer */
    size = comm.getSize();
    rank = comm.getRank();

    sBuf = MPI.newIntBuffer(size * size);
    rBuf = MPI.newIntBuffer(size * size);

    /* Load up the buffers */
    for (int i = 0; i < (size * size); i++) {
      sBuf.put(i, (i + 100 * rank));
      rBuf.put(i, -i);
    }

    /* Create and load the arguments to alltoallw */
    sendCounts = new int[size];
    recvCounts = new int[size];
    rDispls = new int[size];
    sDispls = new int[size];
    sDTypes = new Datatype[size];
    rDTypes = new Datatype[size];

    extent =
        4; // MPI.INT.getExtent(); //getExtent returns 1, but a 4 is needed for these calculations

    for (int i = 0; i < size; i++) {
      sendCounts[i] = i;
      recvCounts[i] = rank;
      rDispls[i] = (i * rank * extent);
      sDispls[i] = (((i * (i + 1)) / 2) * extent);
      sDTypes[i] = MPI.INT;
      rDTypes[i] = MPI.INT;
    }

    req = comm.iAllToAllw(sBuf, sendCounts, sDispls, sDTypes, rBuf, recvCounts, rDispls, rDTypes);
    req.waitFor();
    req.free();

    /* Check rbuf */
    for (int i = 0; i < size; i++) {
      int p = rDispls[i] / extent;
      for (int j = 0; j < rank; j++) {
        if (rBuf.get(p + j) != (i * 100 + (rank * (rank + 1)) / 2 + j)) {
          System.out.println(i + " " + j + " " + size + " " + rank + " " + extent);
          OmpitestError.ompitestError(
              OmpitestError.getFileName(),
              OmpitestError.getLineNumber(),
              "bad answer "
                  + rBuf.get(p + j)
                  + " (should be "
                  + (i * 100 + (rank * (rank + 1)) / 2 + j)
                  + ")\n");
        }
      }
    }

    MPI.COMM_WORLD.barrier();
    MPI.Finalize();
    if (rank == 0) {
      System.out.println("Test completed.");
    }
  }
Пример #28
0
 /**
  * Identical to {@code createIndexed} except that the displacements are expressed directly in
  * terms of the buffer index, rather than the units of the old type.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_HINDEXED}.
  *
  * @param blockLengths number of elements per block
  * @param displacements byte displacement in buffer for each block
  * @param oldType old datatype
  * @return new datatype
  * @throws MPIException
  */
 public static Datatype createHIndexed(int[] blockLengths, int[] displacements, Datatype oldType)
     throws MPIException {
   MPI.check();
   long handle = getHIndexed(blockLengths, displacements, oldType.handle);
   return new Datatype(oldType, handle);
 }
Пример #29
0
 /**
  * Frees an attribute key.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_FREE_KEYVAL}.
  *
  * @param keyval attribute key
  * @throws MPIException
  */
 public static void freeKeyval(int keyval) throws MPIException {
   MPI.check();
   freeKeyval_jni(keyval);
 }
Пример #30
0
 /**
  * Create a datatype with a new lower bound and extent from an existing datatype.
  *
  * <p>Java binding of the MPI operation {@code MPI_TYPE_CREATE_RESIZED}.
  *
  * @param oldType input datatype
  * @param lb new lower bound of datatype (address integer)
  * @param extent new extent of datatype (address integer)
  * @return new datatype
  * @throws MPIException
  */
 public static Datatype createResized(Datatype oldType, int lb, int extent) throws MPIException {
   MPI.check();
   long handle = getResized(oldType.handle, lb, extent);
   return new Datatype(oldType, handle);
 }