iterat = 10
comm2 = mu.comm_spawn("worker_c", None, 2)
mu.comm_bcast(comm2, iterat, MPI.INT)
Note: comm_bcast is a function wrapper I wrote:
def comm_bcast(comm, array, mpitype=None):
comm.Barrier()
if mpitype is None: # Receive
comm.Bcast(array, root=0)
else: # Send
comm.Bcast([array, mpitype], root=MPI.ROOT)
MPICH:
int iterat;
int root = 0;
MPI_Comm comm;
MPI_Init(NULL, NULL);
MPI_Comm_get_parent(&comm);
MPI_Barrier(comm);
MPI_Bcast(&iterat, 1, MPI_INT, root, comm);
When compiled and executed, I get the following output:
Traceback (most recent call last):
File "./master.py", line 81, in <module>
mu.comm_bcast(comm0, niterat, mpitype=MPI.INT)
File "/home/maddie/code/commloop/bin/mutils.py", line 116, in comm_bcast
comm.Bcast([array, mpitype], root=MPI.ROOT)
File "Comm.pyx", line 405, in mpi4py.MPI.Comm.Bcast (src/mpi4py.MPI.c:66743)
File "message.pxi", line 395, in mpi4py.MPI._p_msg_cco.for_bcast (src/mpi4py.MPI.c:23279)
File "message.pxi", line 355, in mpi4py.MPI._p_msg_cco.for_cco_send (src/mpi4py.MPI.c:22959)
File "message.pxi", line 111, in mpi4py.MPI.message_simple (src/mpi4py.MPI.c:20516)
File "message.pxi", line 51, in mpi4py.MPI.message_basic (src/mpi4py.MPI.c:19644)
File "asbuffer.pxi", line 108, in mpi4py.MPI.getbuffer (src/mpi4py.MPI.c:6757)
File "asbuffer.pxi", line 50, in mpi4py.MPI.PyObject_GetBufferEx (src/mpi4py.MPI.c:6093)
TypeError: expected a readable buffer object
Assertion failed in file socksm.c at line 362: sc->pg_is_set
internal ABORT - process 0
Fatal error in PMPI_Barrier: Other MPI error, error stack:
PMPI_Barrier(425).....................: MPI_Barrier(comm=0x84000005) failed
MPIR_Barrier_impl(331)................: Failure during collective
MPIR_Barrier_impl(323)................:
MPIR_Barrier_inter(187)...............:
MPIR_Bcast_inter(1280)................:
MPIR_Bcast_intra(1119)................:
MPIR_Bcast_scatter_ring_allgather(962):
MPIR_Bcast_binomial(213)..............: Failure during collective
MPIR_Bcast_scatter_ring_allgather(955):
MPIR_Bcast_binomial(213)..............: Failure during collective
MPIR_Bcast_inter(1263)................:
dequeue_and_set_error(596)............: Communication error with rank 0
Traceback (most recent call last):
File "worker.py", line 43, in <module>
arrsiz = mu.comm_scatter(comm, arrsiz)
File "/home/maddie/code/commloop/bin/mutils.py", line 58, in comm_scatter
comm.Scatter(None, array, root=0)
File "Comm.pyx", line 441, in mpi4py.MPI.Comm.Scatter (src/mpi4py.MPI.c:67285)
mpi4py.MPI.Exception: Other MPI error, error stack:
PMPI_Scatter(791).........: MPI_Scatter(sbuf=(nil), scount=0, MPI_BYTE, rbuf=0x248ac90, rcount=1, MPI_LONG, root=0, comm=0x84000001) failed
MPIR_Scatter_impl(619)....:
MPIR_Scatter(588).........:
MPIR_Scatter_inter(517)...:
MPIR_Scatter_impl(619)....:
MPIR_Scatter(582).........:
MPIR_Scatter_intra(398)...: Failure during collective
MPIR_Scatter_inter(499)...:
dequeue_and_set_error(596): Communication error with rank 0
Traceback (most recent call last):
File "worker.py", line 43, in <module>
arrsiz = mu.comm_scatter(comm, arrsiz)
File "/home/maddie/code/commloop/bin/mutils.py", line 56, in comm_scatter
comm.Barrier()
File "Comm.pyx", line 394, in mpi4py.MPI.Comm.Barrier (src/mpi4py.MPI.c:66612)
mpi4py.MPI.Exception: Other MPI error, error stack:
PMPI_Barrier(425).....................: MPI_Barrier(comm=0x84000005) failed
MPIR_Barrier_impl(331)................: Failure during collective
MPIR_Barrier_impl(323)................:
MPIR_Barrier_inter(187)...............:
MPIR_Bcast_inter(1280)................:
MPIR_Bcast_intra(1119)................:
MPIR_Bcast_scatter_ring_allgather(962):
MPIR_Bcast_binomial(213)..............: Failure during collective
MPIR_Bcast_scatter_ring_allgather(955):
MPIR_Bcast_binomial(213)..............: Failure during collective
MPIR_Bcast_inter(1263)................:
MPIDI_CH3U_Recvq_FDU_or_AEP(380)......: Communication error with rank 0
^CCtrl-C caught... cleaning up processes
So it seems like there's something wrong with my call syntax in worker_c.c, but I'm not sure what it is.
Thanks for any help,
Madison
I'm not sure, but it looks like you need to create a single element numpy array instead of passing the integer in (np.asarray(iterat), dtype=np.int).
Internal Error: invalid error code 389e0e (Ring ids do not match) in MPIR_Bcast_impl:1328
Fatal error in PMPI_Bcast: Other MPI error, error stack:
PMPI_Bcast(1478).....: MPI_Bcast(buf=0x7ffff4e85a80, count=1, MPI_INT, root=0, comm=0x84000005) failed
MPIR_Bcast_impl(1328):
=====================================================================================
= BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES
= EXIT CODE: 11
= CLEANING UP REMAINING PROCESSES
= YOU CAN IGNORE THE BELOW CLEANUP MESSAGES
=====================================================================================
[proxy:0:0@exo] HYD_pmcd_pmip_control_cmd_cb (./pm/pmiserv/pmip_cb.c:906): assert (!closed) failed
[proxy:0:0@exo] HYDT_dmxu_poll_wait_for_event (./tools/demux/demux_poll.c:77): callback returned error status
[proxy:0:0@exo] main (./pm/pmiserv/pmip.c:226): demux engine error waiting for event
[proxy:4:0@exo] HYD_pmcd_pmip_control_cmd_cb (./pm/pmiserv/pmip_cb.c:906): assert (!closed) failed
[proxy:4:0@exo] HYDT_dmxu_poll_wait_for_event (./tools/demux/demux_poll.c:77): callback returned error status
[proxy:4:0@exo] main (./pm/pmiserv/pmip.c:226): demux engine error waiting for event
[mpiexec@exo] HYDT_bscu_wait_for_completion (./tools/bootstrap/utils/bscu_wait.c:70): one of the processes terminated badly; aborting
[mpiexec@exo] HYDT_bsci_wait_for_completion (./tools/bootstrap/src/bsci_wait.c:23): launcher returned error waiting for completion
[mpiexec@exo] HYD_pmci_wait_for_completion (./pm/pmiserv/pmiserv_pmci.c:189): launcher returned error waiting for completion
[mpiexec@exo] main (./ui/mpich/mpiexec.c:397): process manager error waiting for completion
#!/usr/bin/env python
from mpi4py import MPI
import test3 as mu
import numpy as np
# Spawn the communicator
comm2 = mu.comm_spawn("worker_c", None, spawn)
iterat = 10
niterat = np.asarray([iterat], np.int)
mu.comm_bcast(comm2, niterat, MPI.INT)
from mpi4py import MPI
import numpy as np
import math as m
def comm_spawn(cmd, arg, nprocs):
comm = MPI.COMM_SELF.Spawn(cmd, arg, nprocs)
return comm
def comm_bcast(comm, array, mpitype=None):
comm.Barrier()
if mpitype is None: # Receive
comm.Bcast(array, root=0)
else: # Send
comm.Bcast([array, mpitype], root=MPI.ROOT)
#include <stdio.h>
#include <stdlib.h>
#include <mpi/mpi.h>
int main(int argc, char *argv[]){
int myid, world_size;//, size;
int root = 0;
int* iterat;
// Open communications with the Master
MPI_Comm comm;
MPI_Init(NULL, NULL);
MPI_Comm_get_parent(&comm);
// Number of iterations to loop over
MPI_Barrier(comm);
MPI_Bcast(&iterat, 1, MPI_INT, root, comm);
MPI_Finalize();
}