t*****o 发帖数: 74 | 1 ok, here is the code
include "mpif.h"
c
parameter ( MAX = 100 )
integer myid, numprocs, ierr
integer mstatus(MPI_STATUS_SIZE)
real a(MAX), b(MAX)
c ======= MPI initialization
call MPI_INIT( ierr )
call MPI_COMM_SIZE( MPI_COMM_WORLD, numprocs, ierr )
call MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr )
n = 5
c --- initialize array
do i = 1, n
a(i) = myid*1. + (i-1)*0.01
end do
c --- print out for checkin |
|
s****h 发帖数: 921 | 2 我有2个script文件:
psse_dyn.py,里面定一个function psse_dyn()
psse_chop.py,里面定一个function psse_chop()
我在另外一个Python script里调用任何一个都可以运行.
调用:
import psse_chop
psse_chop.psse_chop()
或者:
import psse_dyn
ierr=psse_dyn.psse_dyn()
二个都没问题.
可是如果同时做:
import psse_dyn
ierr=psse_dyn.psse_dyn()
import psse_chop
psse_chop.psse_chop()
运行到psse_chop.psse_chop()就出错。
据说psse_dyn和psse_chop里的函数不是我写的。
好像psse_dyn是用Visual Studio VC++8做的;而psse_chop涉及到numpy,与Visual
Studio VC++8不兼容。
遇到这种问题,有解决办法没有?
谢谢! |
|
N***m 发帖数: 4460 | 3 do you need something like these?
ierr=MPI_Wait(&send_request,&status);
ierr=MPI_Wait(&recv_request,&status); |
|
c**********w 发帖数: 1746 | 4 这个程序就是想测试一下ISEND和IRECV。启动四个线程,每个把一个数字发到rank+1的
下一个thread上,最后一个rank为3的发回到rank=0的线程,用WAIT_ALL等待,然后把
结果打印出来,已经编译成功,发现四个线程也都启动,但却不能通讯。测试条件为
quad-core, linux, openMP
程序:
program hello
USE mpi
integer rank, size, ierror, status(MPI_STATUS_SIZE)
integer dest,tag, ierr,tag2, req(2), src
REAL*8 res, msg, ans,sout
integer status_array(MPI_STATUS_SIZE,2)
call MPI_INIT(ierror)
call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierror)
call MPI_COMM_RANK(MPI_CO |
|
m*****7 发帖数: 67 | 5 tag必须一样才能收到
现在只有rank1能收到是因为:tag2初始值是0,
rank0发给rank1的msg tag也是0,
所以碰巧match上了
这个程序就是想测试一下ISEND和IRECV。启动四个线程,每个把一个数字发到rank+1的
下一个thread上,最后一个rank为3的发回到rank=0的线程,用WAIT_ALL等待,然后把
结果打印出来,已经编译成功,发现四个线程也都启动,但却不能通讯。测试条件为
quad-core, linux, openMP
程序:
program hello
USE mpi
integer rank, size, ierror, status(MPI_STATUS_SIZE)
integer dest,tag, ierr,tag2, req(2), src
REAL*8 res, msg, ans,sout
integer status_array(MPI_STATUS_SIZE,2)
call MPI_INIT(ierror)
cal |
|
c**********w 发帖数: 1746 | 6 这个程序就是想测试一下ISEND和IRECV。启动四个线程,每个把一个数字发到rank+1的
下一个thread上,最后一个rank为3的发回到rank=0的线程,用WAIT_ALL等待,然后把
结果打印出来,已经编译成功,发现四个线程也都启动,但却不能通讯。测试条件为
quad-core, linux, openMP
程序:
program hello
USE mpi
integer rank, size, ierror, status(MPI_STATUS_SIZE)
integer dest,tag, ierr,tag2, req(2), src
REAL*8 res, msg, ans,sout
integer status_array(MPI_STATUS_SIZE,2)
call MPI_INIT(ierror)
call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierror)
call MPI_COMM_RANK(M |
|
x*y 发帖数: 364 | 7 I can use MPI_gather to gather integer and real, but I couldn't gather double
precision arrays.
The code for gather part is:
call MPI_gather(a,nipe,MPI_DOUBLE_PRECISION,b,nipe,
& MPI_DOUBLE_PRECISION, 0,MPI_COMM_WORLD,ierr)
I got the error when run the code as following:
p1_3410: (0.018301) xx_shmalloc: returning NULL; requested 4000048 bytes
p1_3410: (0.018301) p4_shmalloc returning NULL; request = 4000048 bytes
You can increase the amount of memory by setting the e |
|
l*******G 发帖数: 1191 | 8 sigh,,you should use your own random number generator,, otherwise, your code
is not portable coz it depends on compiler.. Here are two codes copied from
ROMS model:
SUBROUTINE nrng (ix, a, n, ierr)
!
!=======================================================================
! !
! Gaussian random-number generator from the NSWC Library. It calls !
! the NSWC uniform random-number generator, URNG. !
! |
|
c**********w 发帖数: 1746 | 9 发信人: chairmanmeow (chairmanmeow), 信区: Programming
标 题: 哪位帮忙看一个极为简单的 MPI 程序,感谢拉!
发信站: BBS 未名空间站 (Sat Jul 17 13:59:56 2010, 美东)
这个程序就是想测试一下ISEND和IRECV。启动四个线程,每个把一个数字发到rank+1的
下一个thread上,最后一个rank为3的发回到rank=0的线程,用WAIT_ALL等待,然后把
结果打印出来,已经编译成功,发现四个线程也都启动,但却不能通讯。测试条件为
quad-core, linux, openMP
程序:
program hello
USE mpi
integer rank, size, ierror, status(MPI_STATUS_SIZE)
integer dest,tag, ierr,tag2, req(2), src
REAL*8 res, msg, ans,sout
integer status_array(MPI_STATUS |
|
c**********w 发帖数: 1746 | 10 这个程序就是想测试一下ISEND和IRECV。启动四个线程,每个把一个数字发到rank+1的
下一个thread上,最后一个rank为3的发回到rank=0的线程,用WAIT_ALL等待,然后把
结果打印出来,已经编译成功,发现四个线程也都启动,但却不能通讯。测试条件为
quad-core, linux, openMP
程序:
program hello
USE mpi
integer rank, size, ierror, status(MPI_STATUS_SIZE)
integer dest,tag, ierr,tag2, req(2), src
REAL*8 res, msg, ans,sout
integer status_array(MPI_STATUS_SIZE,2)
call MPI_INIT(ierror)
call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierror)
call MPI_COMM_RANK(MPI_CO |
|