public test2O(String[] args) throws Exception { int numtask, taskid, rc; test outmsg[] = new test[1]; test inmsg[] = new test[1]; int i, dest = 0, type = 1; int source, rtype = type, rbytes = -1, flag, dontcare = -1; int msgid; Status status; Request req; MPI.Init(args); taskid = MPI.COMM_WORLD.Rank(); numtask = MPI.COMM_WORLD.Size(); if (numtask > 2) { if (taskid == 0) { System.out.println("test2O must run with less than 8 tasks"); } MPI.Finalize(); return; } if (taskid == 1) { MPI.COMM_WORLD.Barrier(); outmsg[0] = new test(); outmsg[0].a = 5; type = 1; MPI.COMM_WORLD.Send(outmsg, 0, 1, MPI.OBJECT, dest, type); } if (taskid == 0) { source = MPI.ANY_SOURCE; rtype = MPI.ANY_TAG; req = MPI.COMM_WORLD.Irecv(inmsg, 0, 1, MPI.OBJECT, source, rtype); status = req.Test(); if (status != null) System.out.println("ERROR(1)"); MPI.COMM_WORLD.Barrier(); status = req.Wait(); if (inmsg[0].a != 5 || status.source != 1 || status.tag != 1) { System.out.println("ERROR(2)"); System.out.println(" inmsg[0].a " + inmsg[0].a); System.out.println(" status.source " + status.source); System.out.println(" status.tag " + status.source); } } MPI.COMM_WORLD.Barrier(); if (taskid == 1) System.out.println("Test2O TEST COMPLETE <" + taskid + ">"); MPI.Finalize(); }
public void JGFrun(int size) throws MPIException { if (rank == 0) { JGFInstrumentor.addTimer("Section3:MonteCarlo:Total", "Solutions", size); JGFInstrumentor.addTimer("Section3:MonteCarlo:Run", "Samples", size); } JGFsetsize(size); MPI.COMM_WORLD.Barrier(); if (rank == 0) { JGFInstrumentor.startTimer("Section3:MonteCarlo:Total"); } JGFinitialise(); JGFapplication(); if (rank == 0) { JGFvalidate(); } JGFtidyup(); MPI.COMM_WORLD.Barrier(); if (rank == 0) { JGFInstrumentor.stopTimer("Section3:MonteCarlo:Total"); JGFInstrumentor.addOpsToTimer("Section3:MonteCarlo:Run", (double) input[1]); JGFInstrumentor.addOpsToTimer("Section3:MonteCarlo:Total", 1); JGFInstrumentor.printTimer("Section3:MonteCarlo:Run"); JGFInstrumentor.printTimer("Section3:MonteCarlo:Total"); } }
public reduce_scatter(String[] args) throws Exception { final int MAXLEN = 10000; int out[] = new int[MAXLEN * 100]; int in[] = new int[MAXLEN * 100]; int i, j, k; int myself, tasks; int recvcounts[] = new int[128]; MPI.Init(args); myself = MPI.COMM_WORLD.Rank(); tasks = MPI.COMM_WORLD.Size(); if (tasks > 8) { if (myself == 0) { System.out.println("reduce_scatter must run with 8 tasks!"); } MPI.Finalize(); return; } j = 10; // for(j=1;j<=MAXLEN*tasks;j*=10) { for (i = 0; i < tasks; i++) recvcounts[i] = j; for (i = 0; i < j * tasks; i++) out[i] = i; MPI.COMM_WORLD.Reduce_scatter(out, 0, in, 0, recvcounts, MPI.INT, MPI.SUM); for (k = 0; k < j; k++) { if (in[k] != tasks * (myself * j + k)) { System.out.println( "bad answer (" + in[k] + ") at index " + k + " of " + j + "(should be " + tasks * (myself * j + k) + ")"); break; } } // } MPI.COMM_WORLD.Barrier(); if (myself == 0) System.out.println("Reduce_scatter TEST COMPLETE"); MPI.Finalize(); }
// ISEND not supported yet for threaded functions // will just call the normal send command public static void Isend(Object buf, int offset, int count, int type, int dest, int tag) throws MPIException, InterruptedException { if (mpiRun) { Datatype dtype = getDatatype(type); MPI.COMM_WORLD.Isend(buf, offset, count, dtype, dest, tag); } else threadEle.get(Thread.currentThread()).Send(buf, offset, count, type, dest, tag); }
public static Object Iprobe(int source, int tag) throws MPIException, InterruptedException { if (mpiRun) { if (source == -1) source = MPI.ANY_SOURCE; if (tag == -1) tag = MPI.ANY_TAG; return MPI.COMM_WORLD.Iprobe(source, tag); } else return threadEle.get(Thread.currentThread()).Iprobe(source, tag); }
public void JGFapplication() throws MPIException { MPI.COMM_WORLD.Barrier(); if (rank == 0) { JGFInstrumentor.startTimer("Section3:MonteCarlo:Run"); } runiters(); MPI.COMM_WORLD.Barrier(); if (rank == 0) { JGFInstrumentor.stopTimer("Section3:MonteCarlo:Run"); } if (rank == 0) { presults(); } }
// Either going to return a status or ThreadStatus public static Object Recv(Object buf, int offset, int count, int type, int source, int tag) throws MPIException, InterruptedException { if (mpiRun) { if (source == -1) source = MPI.ANY_SOURCE; if (tag == -1) tag = MPI.ANY_TAG; Datatype dtype = getDatatype(type); return MPI.COMM_WORLD.Recv(buf, offset, count, dtype, source, tag); } else return threadEle.get(Thread.currentThread()).Recv(buf, offset, count, type, source, tag); }
public static void main(String args[]) throws MPIException { int myself, tasks; IntBuffer in = MPI.newIntBuffer(MAXLEN); Request request; MPI.Init(args); myself = MPI.COMM_WORLD.getRank(); tasks = MPI.COMM_WORLD.getSize(); for (int j = 1; j <= MAXLEN; j *= 10) { for (int i = 0; i < j; i++) { in.put(i, i); } request = MPI.COMM_WORLD.iAllReduce(in, j, MPI.INT, MPI.SUM); request.waitFor(); request.free(); for (int k = 0; k < j; k++) { if (in.get(k) != k * tasks) { OmpitestError.ompitestError( OmpitestError.getFileName(), OmpitestError.getLineNumber(), " bad answer (" + in.get(k) + ") at index " + k + " of " + j + " (should be " + (k * tasks) + ")\n"); break; } } } MPI.COMM_WORLD.barrier(); MPI.Finalize(); }
public static void main(String args[]) throws MPIException { int root, myself, tasks; int out[] = new int[MAXLEN], in[] = new int[MAXLEN]; MPI.Init(args); myself = MPI.COMM_WORLD.getRank(); tasks = MPI.COMM_WORLD.getSize(); root = tasks / 2; for (int j = 1; j <= MAXLEN; j *= 10) { for (int i = 0; i < j; i++) { out[i] = i; } MPI.COMM_WORLD.reduce(out, in, j, MPI.INT, MPI.SUM, root); if (myself == root) { for (int k = 0; k < j; k++) { if (in[k] != k * tasks) { OmpitestError.ompitestError( OmpitestError.getFileName(), OmpitestError.getLineNumber(), " bad answer (" + in[k] + ") at index " + k + " of " + j + " (should be " + (k * tasks) + ")\n"); break; } } } } MPI.COMM_WORLD.barrier(); MPI.Finalize(); }
public sub2(String[] args) throws Exception { int dims[] = new int[2]; dims[0] = 2; dims[1] = 3; boolean periods[] = new boolean[2]; int size, rank; MPI.Init(args); rank = MPI.COMM_WORLD.Rank(); size = MPI.COMM_WORLD.Size(); if (size != 8) { if (rank == 0) System.out.println("topo->sub2: MUST RUN WITH 8 TASKS"); MPI.COMM_WORLD.Barrier(); MPI.Finalize(); return; } Cartcomm comm = MPI.COMM_WORLD.Create_cart(dims, periods, false); if (comm != null) { int[] dims2 = comm.Get().dims; boolean remain[] = new boolean[2]; remain[0] = false; remain[1] = true; Cartcomm subcomm = comm.Sub(remain); int nsize = subcomm.Size(); int nrank = subcomm.Rank(); System.out.println( "rank <" + rank + ">,nrank<" + nrank + ">,size<" + size + ">,nsize <" + nsize); } else { System.out.println("rank <" + rank + ",size<" + size + ">"); } MPI.COMM_WORLD.Barrier(); MPI.Finalize(); }
public testany(String[] args) throws Exception { int me, tasks, i, index, done; ; int mebuf[] = new int[1]; boolean flag; MPI.Init(args); me = MPI.COMM_WORLD.Rank(); tasks = MPI.COMM_WORLD.Size(); int data[] = new int[tasks]; Request req[] = new Request[tasks]; Status status; mebuf[0] = me; if (me > 0) MPI.COMM_WORLD.Send(mebuf, 0, 1, MPI.INT, 0, 1); else if (me == 0) { req[0] = MPI.REQUEST_NULL; for (i = 1; i < tasks; i++) req[i] = MPI.COMM_WORLD.Irecv(data, i, 1, MPI.INT, i, 1); done = 0; while (done < tasks - 1) { status = Request.Testany(req); if (status != null) { done++; if (!req[status.index].Is_null()) System.out.println("ERROR in MPI_Testany: reqest not set to null"); if (data[status.index] != status.index) System.out.println("ERROR in MPI.Testany: wrong data"); } } } // MPI.COMM_WORLD.Barrier(); // if(me == 1) System.out.println("Testany TEST COMPLETE <" + me + ">"); MPI.Finalize(); }
public scanO(String[] args) throws Exception { final int MAXLEN = 10000; int i, j, k; complexNum out[] = new complexNum[MAXLEN]; complexNum in[] = new complexNum[MAXLEN]; int myself, tasks; boolean bool = false; MPI.Init(args); myself = MPI.COMM_WORLD.Rank(); tasks = MPI.COMM_WORLD.Size(); for (i = 0; i < MAXLEN; i++) { in[i] = new complexNum(); out[i] = new complexNum(); out[i].realPart = i; out[i].imaginPart = i; } complexAdd cadd = new complexAdd(); Op op = new Op(cadd, bool); MPI.COMM_WORLD.Scan(out, 0, in, 0, MAXLEN, MPI.OBJECT, op); for (k = 0; k < MAXLEN; k++) { if (in[k].realPart != k * (myself + 1)) { System.out.println( "bad answer (" + (in[k].realPart) + ") at index " + k + "(should be " + (k * (myself + 1)) + ")"); break; } } MPI.COMM_WORLD.Barrier(); if (myself == 0) System.out.println("ScanO TEST COMPLETE"); MPI.Finalize(); }
public static int Size() throws MPIException { if (mpiRun) return MPI.COMM_WORLD.Size(); else return threadEle.get(Thread.currentThread()).getSize(); }
public static void main(String[] args) throws MPIException { Comm comm; IntBuffer sBuf, rBuf; int rank, size, extent; int[] sendCounts, recvCounts, rDispls, sDispls; Datatype[] sDTypes, rDTypes; Request req; MPI.Init(args); comm = MPI.COMM_WORLD; /* Create the buffer */ size = comm.getSize(); rank = comm.getRank(); sBuf = MPI.newIntBuffer(size * size); rBuf = MPI.newIntBuffer(size * size); /* Load up the buffers */ for (int i = 0; i < (size * size); i++) { sBuf.put(i, (i + 100 * rank)); rBuf.put(i, -i); } /* Create and load the arguments to alltoallw */ sendCounts = new int[size]; recvCounts = new int[size]; rDispls = new int[size]; sDispls = new int[size]; sDTypes = new Datatype[size]; rDTypes = new Datatype[size]; extent = 4; // MPI.INT.getExtent(); //getExtent returns 1, but a 4 is needed for these calculations for (int i = 0; i < size; i++) { sendCounts[i] = i; recvCounts[i] = rank; rDispls[i] = (i * rank * extent); sDispls[i] = (((i * (i + 1)) / 2) * extent); sDTypes[i] = MPI.INT; rDTypes[i] = MPI.INT; } req = comm.iAllToAllw(sBuf, sendCounts, sDispls, sDTypes, rBuf, recvCounts, rDispls, rDTypes); req.waitFor(); req.free(); /* Check rbuf */ for (int i = 0; i < size; i++) { int p = rDispls[i] / extent; for (int j = 0; j < rank; j++) { if (rBuf.get(p + j) != (i * 100 + (rank * (rank + 1)) / 2 + j)) { System.out.println(i + " " + j + " " + size + " " + rank + " " + extent); OmpitestError.ompitestError( OmpitestError.getFileName(), OmpitestError.getLineNumber(), "bad answer " + rBuf.get(p + j) + " (should be " + (i * 100 + (rank * (rank + 1)) / 2 + j) + ")\n"); } } } MPI.COMM_WORLD.barrier(); MPI.Finalize(); if (rank == 0) { System.out.println("Test completed."); } }
private void mpiInit(String args[]) throws MPIException { MPI.Init(args); mpiRank = MPI.COMM_WORLD.getRank(); mpiRanksTotal = MPI.COMM_WORLD.getSize(); }
public static void main(String[] args) { String inputPrmFile = args[3]; String inputConfFile = args[4]; String inputTopFile = args[5]; String outputTrajFile = args[6]; String outputEnergyFile = args[7]; String ITSout = args[8]; MdSystem<LJParticle> system = GromacsImporter.buildLJParticleSystem( "JOB_NAME", inputPrmFile, inputConfFile, inputTopFile); MdParameter prm = system.getParam(); final double dt = prm.getDt(); final int nsteps = prm.getNsteps(); final int nstlist = prm.getNstlist(); final int nstxout = prm.getNstxout(); final int nstvout = prm.getNstvout(); final int nstenergy = prm.getNstenergy(); final int nstlog = 10; // prm.getNstlog(); final double T0 = prm.getT0(); final double TRef = prm.getRefT(); final double tauT = prm.getTauT(); final boolean convertHbonds = prm.convertHbonds(); if (convertHbonds) { system.convertHBondsToConstraints(); } /** ************ ITS setup *************** */ final int ksize = 12; final double Tstep = 25.0; // T increment final int ITSEnergyFreq = nstxout; // frequency of energy storing for ITS final double[] Temps = new double[ksize]; final double[] p0 = new double[ksize]; for (int i = 0; i < ksize; i++) { Temps[i] = T0 + i * Tstep; p0[i] = 1.0 / ksize; } /** ************************************* */ final BigDecimal[] n0 = new BigDecimal[ksize]; MathContext mathset = new MathContext(5); n0[0] = new BigDecimal("5.99880e-03", mathset); n0[1] = new BigDecimal("3.64660e+209", mathset); n0[2] = new BigDecimal("1.23850e+391", mathset); n0[3] = new BigDecimal("2.59790e+548", mathset); n0[4] = new BigDecimal("1.21530e+686", mathset); n0[5] = new BigDecimal("2.85080e+807", mathset); n0[6] = new BigDecimal("2.93170e+915", mathset); n0[7] = new BigDecimal("1.33400e+1012", mathset); n0[8] = new BigDecimal("1.18050e+1099", mathset); n0[9] = new BigDecimal("4.27230e+1177", mathset); n0[10] = new BigDecimal("1.70190e+1249", mathset); n0[11] = new BigDecimal("2.87210e+1314", mathset); // gen valocity system.genRandomVelocities(T0); /** MPI preparation * */ MPI.Init(args); final int rank = MPI.COMM_WORLD.Rank(); final int np = MPI.COMM_WORLD.Size(); Integrator<MdSystem<LJParticle>> integrator = new VelocityVerlet<MdSystem<LJParticle>>(dt); FastLJC<MdSystem<LJParticle>> nonbond = new FastLJC<MdSystem<LJParticle>>(system); DomainDecomposition<MdSystem<LJParticle>> decomposition = new DomainDecomposition<MdSystem<LJParticle>>(system, np); Thermostat<MdSystem<LJParticle>> thermostat = new BerendsenThermostat<MdSystem<LJParticle>>(TRef, tauT); // push initial positions to the new trajectory system.forwardPosition(integrator); // get partitions using new positions system.partition(decomposition); Domain domain = decomposition.getDomain(rank); DomainNeighborList<MdSystem<LJParticle>> nblist = new DomainNeighborList<MdSystem<LJParticle>>(system, domain); int SUB_CAPACITY = domain.getCapacity(); // head node if (rank == 0) { try { PrintStream ps = new PrintStream(outputTrajFile); PrintStream psEnergy = new PrintStream(outputEnergyFile); PrintStream psITS = new PrintStream(ITSout); ITS<MdSystem<LJParticle>> its = new ITS<MdSystem<LJParticle>>(T0, ksize, Temps, p0, n0); double Uorg = 0.0; for (int tstep = 0; tstep < nsteps; tstep++) { if (tstep % nstlog == 0) { System.out.println(String.format("Computing t = %5.3f ps", tstep * dt)); } // integrate forward (apply position constraints if applicable) if (tstep != 0) { system.forwardPosition(integrator); system.applyPositionConstraint(); } // (I) update domains and send them to slave nodes if (tstep % nstlist == 0) { // updates partitions using new positions system.partition(decomposition); // send updated partition info to slave nodes for (int proc = 1; proc < np; proc++) { int[] partition = decomposition.exportPartition(proc); MPI.COMM_WORLD.Send(partition, 0, SUB_CAPACITY, MPI.INT, proc, 99); } // update local neighbor list nblist.update(system); } // (II) export new positions to slave nodes for (int proc = 1; proc < np; proc++) { Domain domainEach = decomposition.getDomain(proc); double[] positionArray = system.exportNewPositions(domainEach); MPI.COMM_WORLD.Send(positionArray, 0, 3 * SUB_CAPACITY, MPI.DOUBLE, proc, 99); } // (ITS Step 0 ) compute original energy { double nonbondEnergy = 0.0; nonbondEnergy = system.getNonBondEnergy(nonbond, nblist); // receive partial nonbond energies from slave-nodes and add for (int proc = 1; proc < np; proc++) { double[] partialEnergy = new double[1]; MPI.COMM_WORLD.Recv(partialEnergy, 0, 1, MPI.DOUBLE, proc, 99); nonbondEnergy += partialEnergy[0]; } double coulombEnergy = 0.0; // temporary. double bond = system.getBondEnergy(); double angle = system.getAngleEnergy(); double dihedral = system.getDihedralEnergy(); Uorg = nonbondEnergy + bond + angle + dihedral; if (tstep % ITSEnergyFreq == 0) { its.printEnergy(psITS, system, Uorg); } } // update non-bonded forces system.updateNonBondForce(nonbond, nblist); // update long-ranged forces // update bonded forces system.updateBondForce(); // update angle forces system.updateAngleForce(); // update dihedral forces system.updateDihedralForce(); // (III) receive computed forces from slaves for (int proc = 1; proc < np; proc++) { double[] forceArray = new double[3 * SUB_CAPACITY]; MPI.COMM_WORLD.Recv(forceArray, 0, 3 * SUB_CAPACITY, MPI.DOUBLE, proc, 99); Domain domainEach = decomposition.getDomain(proc); system.importNewForces(domainEach, forceArray); } // (ITS step 1 ) Apply biasing forces its.applyBiasForce(system, Uorg); // forward velocities system.forwardVelocity(integrator); // apply velocity constraints system.correctConstraintVelocity(); // apply temperature coupling thermostat.apply(system); // print energy (using information in newTraj ) if (tstep % nstenergy == 0) { double nonbondEnergy = 0.0; nonbondEnergy = system.getNonBondEnergy(nonbond, nblist); // receive partial nonbond energies from slave-nodes and add for (int proc = 1; proc < np; proc++) { double[] partialEnergy = new double[1]; MPI.COMM_WORLD.Recv(partialEnergy, 0, 1, MPI.DOUBLE, proc, 99); nonbondEnergy += partialEnergy[0]; } double coulombEnergy = 0.0; // temporary. mymd.MdIO.printEnergy(system, nonbondEnergy, coulombEnergy, psEnergy); } // update current trajectories from new trajectories system.update(); if (tstep % nstxout == 0) { mymd.MdIO.writeGro(system, ps); } } ps.close(); psEnergy.close(); } catch (java.io.IOException ex) { } } // slave nodes else { decomposition = null; String slaveName = String.format("slave-%d", rank); /** * * change subSystem constructor so it accepts a MdSystem object as an input parameter. * SubSystem is needed only if MdSystem object exists. * */ // create a sub-system for slave node Trajectory subTraj = new Trajectory(system.getSize()); subTraj.setBox(system.getBox()); SubSystem<LJParticle> subsystem = new SubSystem.Builder<LJParticle>() .name(slaveName) .particles(system.getParticles()) .parameters(system.getParam()) .topology(system.getTopology()) .subTrajectory(subTraj) .build(); // mother system is freed. no longer needed for slave nodes system = null; for (int tstep = 0; tstep < nsteps; tstep++) { // (I) receive updated partition info from head node if (tstep % nstlist == 0) { int[] partition = new int[SUB_CAPACITY]; MPI.COMM_WORLD.Recv(partition, 0, SUB_CAPACITY, MPI.INT, 0, 99); // import received array to its local domain domain.importArray(partition); } // (II) receives new positions double[] positionArray = new double[3 * SUB_CAPACITY]; MPI.COMM_WORLD.Recv(positionArray, 0, SUB_CAPACITY * 3, MPI.DOUBLE, 0, 99); // import new positions into the subsystem subsystem.importNewPositions(domain, positionArray); if (tstep % nstlist == 0) { // update local neighbor list nblist.update(subsystem); } // (ITS 0 step) double[] partialEnergy = new double[1]; partialEnergy[0] = subsystem.getNonBondEnergy(nonbond, nblist); MPI.COMM_WORLD.Send(partialEnergy, 0, 1, MPI.DOUBLE, 0, 99); // compute non-bonded forces subsystem.updateNonBondForce(nonbond, nblist); // PME // (III) export forces and send them to head-node double[] forceArray = subsystem.exportNewForces(domain); MPI.COMM_WORLD.Send(forceArray, 0, SUB_CAPACITY * 3, MPI.DOUBLE, 0, 99); if (tstep % nstenergy == 0) { partialEnergy = new double[1]; partialEnergy[0] = subsystem.getNonBondEnergy(nonbond, nblist); MPI.COMM_WORLD.Send(partialEnergy, 0, 1, MPI.DOUBLE, 0, 99); } // reset force components subsystem.update(); } } MPI.Finalize(); }
public waitall2(String args[]) throws Exception { MPI.Init(args); int me = MPI.COMM_WORLD.Rank(); mpi.Request r[] = new Request[10]; mpi.Status s[] = new Status[10]; r[0] = MPI.REQUEST_NULL; r[9] = MPI.REQUEST_NULL; int intArray[] = new int[100]; float floatArray[] = new float[100]; double doubleArray[] = new double[100]; long longArray[] = new long[100]; boolean booleanArray[] = new boolean[100]; short shortArray[] = new short[100]; char charArray[] = new char[100]; byte byteArray[] = new byte[100]; int intReadArray[] = new int[100]; float floatReadArray[] = new float[100]; double doubleReadArray[] = new double[100]; long longReadArray[] = new long[100]; boolean booleanReadArray[] = new boolean[100]; short shortReadArray[] = new short[100]; char charReadArray[] = new char[100]; byte byteReadArray[] = new byte[100]; for (int i = 0; i < intArray.length; i++) { intArray[i] = i + 1; floatArray[i] = i + 11; doubleArray[i] = i + 11.11; longArray[i] = i + 11; booleanArray[i] = true; shortArray[i] = 1; charArray[i] = 's'; byteArray[i] = 's'; intReadArray[i] = 3; floatReadArray[i] = i + 19; doubleReadArray[i] = i + 99.11; longReadArray[i] = i + 9; shortReadArray[i] = 2; booleanReadArray[i] = false; charReadArray[i] = 'x'; byteReadArray[i] = 'x'; } if (MPI.COMM_WORLD.Rank() == 0) { r[1] = MPI.COMM_WORLD.Isend(intArray, 0, 100, MPI.INT, 1, 1); r[2] = MPI.COMM_WORLD.Isend(byteArray, 0, 100, MPI.BYTE, 1, 2); /* r[3]=MPI.COMM_WORLD.Isend(charArray,0,100,MPI.CHAR,1,3); r[4]=MPI.COMM_WORLD.Isend(doubleArray,0,100,MPI.DOUBLE,1,4); r[5]=MPI.COMM_WORLD.Isend(longArray,0,100,MPI.LONG,1,5); r[6]=MPI.COMM_WORLD.Isend(booleanArray,0,100,MPI.BOOLEAN,1,6); r[7]=MPI.COMM_WORLD.Isend(shortArray,0,100,MPI.SHORT,1,7); r[8]=MPI.COMM_WORLD.Isend(floatArray,0,100,MPI.FLOAT,1,8); */ r[1].Wait(); r[2].Wait(); /* s = Request.Waitall(r); for(int i=0 ; i<s.length ; i++) { if(s[i].equals(MPI.EMPTY_STATUS) ) { continue; } if( s[i].source != 1 && s[i].tag != i) { System.out.println("Error in status objects (sender)"); } } */ } else if (MPI.COMM_WORLD.Rank() == 1) { r[1] = MPI.COMM_WORLD.Irecv(intReadArray, 0, 100, MPI.INT, 0, 1); r[2] = MPI.COMM_WORLD.Irecv(byteReadArray, 0, 100, MPI.BYTE, 0, 2); r[1].Wait(); r[2].Wait(); /* r[3]=MPI.COMM_WORLD.Irecv(charReadArray,0,100,MPI.CHAR,0,3); r[4]=MPI.COMM_WORLD.Irecv(doubleReadArray,0,100,MPI.DOUBLE,0,4); r[5]=MPI.COMM_WORLD.Irecv(longReadArray,0,100,MPI.LONG,0,5); r[6]=MPI.COMM_WORLD.Irecv(booleanReadArray,0,100,MPI.BOOLEAN,0,6); r[7]=MPI.COMM_WORLD.Irecv(shortReadArray,0,100,MPI.SHORT,0,7); r[8]=MPI.COMM_WORLD.Irecv(floatReadArray,0,100,MPI.FLOAT,0,8); s = Request.Waitall(r); for(int i=0 ; i<s.length ; i++) { if(s[i].equals(MPI.EMPTY_STATUS) ) { continue; } if( s[i].source != 0 && s[i].tag != i) { System.out.println("Error in status objects (Receiver)"); } } */ if (Arrays.equals(intArray, intReadArray)) { /* && Arrays.equals(floatArray,floatReadArray) && Arrays.equals(doubleArray,doubleReadArray) && Arrays.equals(longArray,longReadArray) && Arrays.equals(shortArray,shortReadArray) && Arrays.equals(charArray,charReadArray) && Arrays.equals(byteArray,byteReadArray) && Arrays.equals(booleanArray,booleanReadArray)) { */ System.out.println("waitall2 TEST Completed"); /* System.out.println("\n#################"+ "\n <<<<PASSED>>>> "+ "\n################"); */ } else { System.out.println("\n#################" + "\n <<<<FAILED>>>> " + "\n################"); } } MPI.COMM_WORLD.Barrier(); MPI.Finalize(); }
public scattervO(String[] args) throws Exception { final int MAXLEN = 10; int myself, tasks; MPI.Init(args); myself = MPI.COMM_WORLD.Rank(); tasks = MPI.COMM_WORLD.Size(); if (tasks > 8) { if (myself == 0) System.out.println("scattervO must run with fewer than 8 tasks!"); MPI.Finalize(); return; } int root, i = 0, j, k, stride = 15; test out[] = new test[tasks * stride]; test in[] = new test[MAXLEN]; int dis[] = new int[tasks]; int scount[] = new int[tasks]; for (i = 0; i < MAXLEN; i++) { in[i] = new test(); in[i].a = 0; } for (i = 0; i < tasks; i++) { dis[i] = i * stride; scount[i] = 5; } scount[0] = 10; for (i = 0; i < tasks * stride; i++) { out[i] = new test(); out[i].a = i; } MPI.COMM_WORLD.Scatterv(out, 0, scount, dis, MPI.OBJECT, in, 0, scount[myself], MPI.OBJECT, 0); String[] messbuf = new String[1]; if (myself == 0) { System.out.println("Original array on root..."); for (i = 0; i < tasks * stride; i++) System.out.print(out[i].a + " "); System.out.println(); System.out.println(); System.out.println("Result on proc 0..."); System.out.println("Stride = 15 " + "Count = " + scount[0]); for (i = 0; i < MAXLEN; i++) System.out.print(in[i].a + " "); System.out.println(); System.out.println(); // Reproduces output of original test case, but deterministically int nmess = tasks < 3 ? tasks : 3; for (int t = 1; t < nmess; t++) { MPI.COMM_WORLD.Recv(messbuf, 0, 1, MPI.OBJECT, t, 0); System.out.print(messbuf[0]); } } if (myself == 1) { StringBuffer mess = new StringBuffer(); mess.append("Result on proc 1...\n"); mess.append("Stride = 15 " + "Count = " + scount[1] + "\n"); for (i = 0; i < MAXLEN; i++) mess.append(in[i].a + " "); mess.append("\n"); mess.append("\n"); messbuf[0] = mess.toString(); MPI.COMM_WORLD.Send(messbuf, 0, 1, MPI.OBJECT, 0, 0); } if (myself == 2) { StringBuffer mess = new StringBuffer(); mess.append("Result on proc 2...\n"); mess.append("Stride = 15 " + "Count = " + scount[2] + "\n"); for (i = 0; i < MAXLEN; i++) mess.append(in[i].a + " "); mess.append("\n"); messbuf[0] = mess.toString(); MPI.COMM_WORLD.Send(messbuf, 0, 1, MPI.OBJECT, 0, 0); } if (myself == 0) System.out.println("ScattervO TEST COMPLETE"); MPI.Finalize(); }