SPEED
SETUP_MPI.f90
Go to the documentation of this file.
1! Copyright (C) 2012 The SPEED FOUNDATION
2! Author: Ilario Mazzieri
3!
4! This file is part of SPEED.
5!
6! SPEED is free software; you can redistribute it and/or modify it
7! under the terms of the GNU Affero General Public License as
8! published by the Free Software Foundation, either version 3 of the
9! License, or (at your option) any later version.
10!
11! SPEED is distributed in the hope that it will be useful, but
12! WITHOUT ANY WARRANTY; without even the implied warranty of
13! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14! Affero General Public License for more details.
15!
16! You should have received a copy of the GNU Affero General Public License
17! along with SPEED. If not, see <http://www.gnu.org/licenses/>.
18
42
43 subroutine setup_mpi(nnode,node_proc,ncs,cs,&
44 nsend,node_send,nrecv,node_recv,&
45 nproc,proc_send,proc_recv,id,filempi,mpi_comm)
46
47
48 implicit none
49 include 'SPEED.MPI'
50
51 character*70 :: filename, filempi, filename_new
52
53 integer*4 :: nnode,ncs,nsend,nrecv,nproc,id,mpi_ierr
54 integer*4 :: i,j,k,ie,ip,ic,unitfile,ncs_mpi,nelem_mpi,nelem_loc, mpi_comm
55
56 integer*4, dimension(:), allocatable :: cs_mpi
57 integer*4, dimension(:), allocatable :: i4vec
58 integer*4, dimension(nnode) :: node_proc
59 integer*4, dimension(0:ncs) :: cs
60 integer*4, dimension(nsend) :: node_send
61 integer*4, dimension(nrecv) :: node_recv
62 integer*4, dimension(nproc) :: proc_send, proc_recv
63
64 !real*8 :: start1,start2
65 allocate(i4vec(ncs))
66 nelem_loc = cs(0) - 1
67
68
69 do ip = 1, nproc
70 if ((ip -1).eq.id) then
71 proc_recv(ip) = 0
72 else
73
74 i4vec = 0
75 ic = 0
76 do ie = 1,nelem_loc
77 do i = cs(ie -1) +1, cs(ie) -1
78 if (node_proc(cs(i)).eq.(ip -1)) then
79 ic = ic +1
80 i4vec(ic) = cs(i)
81 endif
82 enddo
83 enddo
84
85 !ordering nodes
86 do i = 1, ic
87 do j = i, ic
88 if (i4vec(j).lt.i4vec(i)) then
89 k = i4vec(i)
90 i4vec(i) = i4vec(j)
91 i4vec(j) = k
92 endif
93 enddo
94 enddo
95
96 !counting nodes to receive
97 j = 1
98 do i = 2, ic
99 if (i4vec(i).ne.i4vec(j)) then
100 j = j +1
101 i4vec(j) = i4vec(i)
102 endif
103 enddo
104
105 if (ic.eq.0) j = 0
106 proc_recv(ip) = j
107
108 !fill node_recv vector
109 if (nrecv.ne.0) then
110 ic = 0
111 do i = 1, ip -1
112 ic = ic +proc_recv(i)
113 enddo
114
115 do i = 1, proc_recv(ip)
116 node_recv(ic +i) = i4vec(i)
117 enddo
118 endif
119
120 endif
121 enddo
122
123 if (nrecv.eq.0) then
124 do i = 1, nproc
125 nrecv = nrecv +proc_recv(i)
126 enddo
127 endif
128
129 deallocate(i4vec)
130
131 do ip = 1, nproc
132
133 ! if(id .eq. 0) then
134
135 ! if(id .eq. ip-1) start1=MPI_WTIME()
136 ! filename = 'cons000000.mpi'
137 ! unitfile = 40
138 ! if (ip-1 .lt. 10) then
139 ! write(filename(10:10),'(i1)') ip-1
140 ! else if (ip-1 .lt. 100) then
141 ! write(filename(9:10),'(i2)') ip-1
142 ! else if (ip-1 .lt. 1000) then
143 ! write(filename(8:10),'(i3)') ip-1
144 ! else if (ip-1 .lt. 10000) then
145 ! write(filename(7:10),'(i4)') ip-1
146 ! else if (ip-1 .lt. 100000) then
147 ! write(filename(6:10),'(i5)') ip-1
148 ! else if (ip-1 .lt. 1000000) then
149 ! write(filename(5:10),'(i6)') ip-1
150 ! endif
151
152 ! if(len_trim(filempi) .ne. 70) then
153 ! filename_new = filempi(1:len_trim(filempi)) // '/' // filename
154 ! else
155 ! filename_new = filename
156 ! endif
157
158 ! open(unitfile,file=filename_new)
159 ! read(unitfile,*) ncs_mpi
160 ! allocate(cs_mpi(0:ncs_mpi))
161
162
163 ! do i = 0, ncs_mpi
164 ! read(unitfile,*) cs_mpi(i)
165 ! enddo
166 ! close(unitfile)
167
168 ! endif
169
170 !call MPI_SCATTER(ncs_mpi, 1, SPEED_INTEGER, ncs_mpi, 1, SPEED_INTEGER, 0, mpi_comm)
171 !call MPI_BCAST(ncs_mpi,1,SPEED_INTEGER,0,MPI_COMM_WORLD,mpi_ierr)
172 if(id .eq. ip-1) ncs_mpi = ncs;
173 call mpi_bcast(ncs_mpi,1,speed_integer,ip-1,mpi_comm_world,mpi_ierr)
174
175 !if(id .ne. 0) allocate(cs_mpi(0:ncs_mpi))
176 allocate(cs_mpi(0:ncs_mpi))
177 if(id .eq. ip-1) cs_mpi = cs;
178
179 call mpi_bcast(cs_mpi,ncs_mpi+1,speed_integer,ip-1,mpi_comm_world,mpi_ierr)
180
181 !call MPI_BCAST(cs_mpi,ncs_mpi+1,SPEED_INTEGER,0,MPI_COMM_WORLD,mpi_ierr)
182 !call MPI_SCATTER(cs_mpi,ncs_mpi+1, SPEED_INTEGER, cs_mpi, ncs_mpi+1, SPEED_INTEGER, 0, mpi_comm)
183
184 ! if(id .eq. ip-1) then
185 ! start2=MPI_WTIME()
186 ! write (*,*) "Time to send all .mpi files to all proc",start2-start1,"sec"
187 ! endif
188
189
190 allocate(i4vec(ncs_mpi))
191 nelem_mpi = cs_mpi(0)-1
192
193
194
195 if ((ip -1).eq.id) then
196 proc_send(ip) = 0
197 else
198 i4vec = 0
199
200 ic = 0
201 do ie = 1,nelem_mpi
202 do i = cs_mpi(ie -1) +1, cs_mpi(ie) -1
203 if (node_proc(cs_mpi(i)).eq.id) then
204 ic = ic +1
205 i4vec(ic) = cs_mpi(i)
206 endif
207 enddo
208 enddo
209
210 do i = 1, ic
211 do j = i, ic
212 if (i4vec(j).lt.i4vec(i)) then
213 k = i4vec(i)
214 i4vec(i) = i4vec(j)
215 i4vec(j) = k
216 endif
217 enddo
218 enddo
219
220 j = 1
221 do i = 2, ic
222 if (i4vec(i).ne.i4vec(j)) then
223 j = j +1
224 i4vec(j) = i4vec(i)
225 endif
226 enddo
227
228 if (ic .eq. 0) j = 0
229 proc_send(ip) = j
230
231 if (nsend.ne.0) then
232 ic = 0
233 do i = 1, ip -1
234 ic = ic +proc_send(i)
235 enddo
236
237 do i = 1, proc_send(ip)
238 node_send(ic +i) = i4vec(i)
239 enddo
240 endif
241 endif
242
243 deallocate(i4vec, cs_mpi)
244
245 enddo
246
247 if (nsend.eq.0) then
248 do i = 1, nproc
249 nsend = nsend +proc_send(i)
250 enddo
251 endif
252
253 return
254
255 end subroutine setup_mpi
subroutine setup_mpi(nnode, node_proc, ncs, cs, nsend, node_send, nrecv, node_recv, nproc, proc_send, proc_recv, id, filempi, mpi_comm)
Routine used to setup the communication buffer structure.
Definition SETUP_MPI.f90:46