Open MPI logo

Open MPI User's Mailing List Archives

  |   Home   |   Support   |   FAQ   |   all Open MPI User's mailing list

Subject: [OMPI users] problems with parallel IO
From: Alexander Beck-Ratzka (alexander.beck-ratzka_at_[hidden])
Date: 2011-08-25 08:25:27


Hi Folks,

I have problems to retrieve my data thatI have written out with MPI parallel
IO. Ins tests everything works fine, but within an huger environment, the data
read in differ from those written out.

Here the setup of my experiment:

##### the writer #####
program parallel_io

  use mpi

  implicit none

  integer,parameter :: nx=1,ny=300,nz=256,nv=12
  integer ierr, i, myrank, comm_size, BUFSIZE, thefile, intsize

  parameter (BUFSIZE=1075200)

  real,dimension(nv+2,nx,ny,nz) :: v1

  integer (kind=MPI_OFFSET_KIND) disp
  integer ix, iy, iz, nn, counter

  character(6) cname
  call mpi_init(ierr)
  call mpi_comm_size(mpi_comm_world, comm_size, ierr)
  call mpi_comm_rank(mpi_comm_world, myrank,ierr)

  counter=0
  do ix = 1,nz
     do iy=1,ny
        do iz=1,nx
           do nn=1,nv+2
              v1(nn,ix,iy,iz) = counter*(myrank+20)/200.
              counter=counter+1
           end do
        end do
     end do
  end do

  call mpi_barrier(mpi_comm_world,ierr)

  call mpi_type_extent(mpi_real, intsize, ierr)
  call mpi_file_open(mpi_comm_world, 'testfile', MPI_MODE_WRONLY +
MPI_MODE_CREATE, mpi_info_null, thefile, ierr)
  call mpi_type_size(MPI_INTEGER, intsize, ierr)

  disp = myrank * BUFSIZE * intsize

  ! call mpi_file_set_view(thefile, disp, MPI_INTEGER, MPI_INTEGER, 'native',
mpi_info_null, ierr)
  call mpi_file_write_at(thefile, disp, v1(1,1,1,1), BUFSIZE, MPI_REAL,
mpi_status_ignore, ierr)

  call mpi_file_close(thefile, ierr)

  ! print the data read in...

  open (12, file='out000.dat-parallel-write-0')

  if (myrank.eq.0) then
     write (12,'(i4,e18.8)') ((((myrank,
v1(nn,ix,iy,iz),nn=1,nv+2),ix=1,nx),iy=1,ny), iz=1,nz)
  endif

  close (12)

  call mpi_finalize(ierr)

end program parallel_io

###############################################

and the reader...

####################reader#######################
 program parallel_read_io

  use mpi

  implicit none
  integer,parameter :: nx=1,ny=300,nz=256,nv=12

  integer ierr, i, myrank, comm_size, BUFSIZE, thefile, realsize
  parameter (BUFSIZE=1075200)

  real,dimension(nv+2,nx,ny,nz) :: v1

  integer (kind=MPI_OFFSET_KIND) disp

  integer ix, iy, iz, nn

  call mpi_init(ierr)
  call mpi_comm_size(mpi_comm_world, comm_size, ierr)
  call mpi_comm_rank(mpi_comm_world, myrank,ierr)

  ! do i=0,BUFSIZE
  ! buf(i) = myrank*BUFSIZE + i
  ! end do

  call mpi_type_extent(mpi_integer, realsize, ierr)
  call mpi_file_open(mpi_comm_world, 'testfile', MPI_MODE_RDONLY, mpi_info_null,
thefile, ierr)
  call mpi_type_size(MPI_REAL, realsize, ierr)

  disp = myrank * BUFSIZE * realsize
  print*, 'myrank: ', myrank, ' disp: ', disp, ' realsize: ', realsize

  ! call mpi_file_set_view(thefile, disp, MPI_INTEGER, MPI_INTEGER, 'native',
mpi_info_null, ierr)
  ! call mpi_file_read(thefile, buf, BUFSIZE, MPI_INTEGER, mpi_status_ignore,
ierr)

  call mpi_file_read_at(thefile, disp, v1(1,1,1,1), BUFSIZE, MPI_REAL,
mpi_status_ignore, ierr)

  call mpi_file_close(thefile, ierr)

  call mpi_barrier(mpi_comm_world,ierr)

  ! print the data read in...

  open (12, file='out000.dat-parallel-read-0')

  if (myrank.eq.0) then
     write (12,'(i4,e18.8)') ((((myrank,
v1(nn,ix,iy,iz),nn=1,nv+2),ix=1,nx),iy=1,ny), iz=1,nz)
  endif

  close (12)

  call mpi_finalize(ierr)

end program parallel_read_io
###############################################

Here everything is working fine. However integrating this into a huger program,
I get totally different data written out and read in.

The setup up is the same as in the experiment, but I need some more memory...

What might be the reason for such problems, and if I have an MPI error, how
can I estimate this within a fortan program. I have only found examples for
the error handling of MPI errors in C or C++. I would need an example for C.

So any hints or ideas?

Best wishes

Alexander