SPEED
SETUP_MPI_JUMP.f90 File Reference

Go to the source code of this file.

Functions/Subroutines

subroutine setup_mpi_jump (ne_dg_loc, el_new, nnode, node_proc, nel_loc, loc_el_num, nnloc, loc_n_num, ncs, cs, nsend_jump, node_send_jump, nrecv_jump, node_recv_jump, nproc, proc_send_jump, proc_recv_jump, id, mpi_file)
 Routine used to setup the communication buffer structure.
 

Function/Subroutine Documentation

◆ setup_mpi_jump()

subroutine setup_mpi_jump ( integer*4  ne_dg_loc,
type(el4loop), dimension(ne_dg_loc), intent(in)  el_new,
integer*4  nnode,
integer*4, dimension(nnode)  node_proc,
integer*4  nel_loc,
integer*4, dimension(nel_loc)  loc_el_num,
integer*4  nnloc,
integer*4, dimension(nnloc)  loc_n_num,
integer*4  ncs,
integer*4, dimension(0:ncs)  cs,
integer*4  nsend_jump,
integer*4, dimension(nsend_jump)  node_send_jump,
integer*4  nrecv_jump,
integer*4, dimension(nrecv_jump)  node_recv_jump,
integer*4  nproc,
integer*4, dimension(nproc)  proc_send_jump,
integer*4, dimension(nproc)  proc_recv_jump,
integer*4  id,
character*70  mpi_file 
)

Routine used to setup the communication buffer structure.

Note
If run with nsend_jump and nrecv_jump equal to zero gives back the correct values of these to allocate node_send_jump and node_recv_jump, otherwise fills node_send_jump and node_recv_jump with the indices of the nodes to be sent to and received from the other processors, and proc_send_jump and proc_recv_jump with the number of nodes to be sent and received
Author
Ilario Mazzieri
Date
September, 2013
Version
1.0
Parameters
[in]ne_dg_locnumber of local dg elements
[in]el_newstructure containing dg elements
[in]nnodenumber of local nodes
[in]node_procvector containing the processor to which the node is assigned
[in]nel_locnumber of local elements
[in]loc_el_numlocal element numeration
[in]nn_locnumber of local nodes
[in]loc_n_numlocal node numeration
[in]ncslength of the connectivity vector
[in]cslocal connectivity vector
[in]nprocnumber of processors
[in]idprocessor identifyer
[in]mpi_filefolder where *.mpi files are stores
[out]nsend_jumpnumber of nodes to be sent to other processors
[out]node_send_jumpordered vector of nodes to be sent
[out]nrecv_jumpnumber of nodes to be received from other procs
[out]node_recv_jumpordered vector of nodes to be received
[out]proc_send_jumpvector containing the number of the nodes to be sent to each proc.
[out]proc_recv_jumpvector containing the number of the nodes to be received from each proc.

Definition at line 49 of file SETUP_MPI_JUMP.f90.

53
54 use max_var
55 use dgjump
56
57 implicit none
58
59 type(el4loop), dimension(ne_dg_loc), intent(in) :: el_new
60
61 character*70 :: filename, mpi_file, filename_new
62
63 integer*4 :: nel_loc, ncs, nsend_jump, nrecv_jump, nproc, id, ne_dg_loc, nnode, nnloc
64 integer*4 :: i,j,k,ie,ip,ic,ieloc
65 integer*4 :: it,ir,im, tt, ttt
66 integer*4 :: unitfile, ncs_mpi, nelem_mpi, pos
67
68 integer*4, dimension(:), allocatable :: i4vec
69 integer*4, dimension(:), allocatable :: cs_mpi
70 integer*4, dimension(nel_loc) :: loc_el_num
71 integer*4, dimension(nnloc) :: loc_n_num(nnloc)
72 integer*4, dimension(nnode) :: node_proc
73 integer*4, dimension(0:ncs) :: cs
74 integer*4, dimension(nsend_jump) :: node_send_jump
75 integer*4, dimension(nrecv_jump) :: node_recv_jump
76 integer*4, dimension(nproc) :: proc_send_jump, proc_recv_jump
77
78 do ip = 1, nproc
79
80 if ((ip -1).eq.id) then
81 proc_recv_jump(ip) = 0
82 else
83
84 filename = 'DGCS000000.mpi'
85 unitfile = 40
86 if (ip-1 .lt. 10) then
87 write(filename(10:10),'(i1)') ip-1
88 else if (ip-1 .lt. 100) then
89 write(filename(9:10),'(i2)') ip-1
90 else if (ip-1 .lt. 1000) then
91 write(filename(8:10),'(i3)') ip-1
92 else if (ip-1 .lt. 10000) then
93 write(filename(7:10),'(i4)') ip-1
94 else if (ip-1 .lt. 100000) then
95 write(filename(6:10),'(i5)') ip-1
96 else if (ip-1 .lt. 1000000) then
97 write(filename(5:10),'(i6)') ip-1
98 endif
99
100 if(len_trim(mpi_file) .ne. 70) then
101 filename_new = mpi_file(1:len_trim(mpi_file)) // '/' // filename
102 else
103 filename_new = filename
104 endif
105
106 open(unitfile,file=filename_new)
107 read(unitfile,*) ncs_mpi
108 allocate(cs_mpi(0:ncs_mpi))
109
110 do i = 0, ncs_mpi
111 read(unitfile,*) cs_mpi(i)
112 enddo
113 close(unitfile)
114
115
116 allocate(i4vec(ncs_mpi))
117 nelem_mpi = cs_mpi(0)-1
118
119 i4vec = 0
120 ic = 0
121
122
123 do im = 1, ne_dg_loc
124
125 do it = 1, el_new(im)%num_of_ne
126
127 ie = el_new(im)%el_conf(it,1)
128
129 call check_mpi(ncs_mpi, cs_mpi, ie, tt, pos)
130
131 if(tt .eq. 1) then
132
133 do i = cs_mpi(pos -1) +1, cs_mpi(pos) -1
134
135 ! if (node_proc(cs_mpi(i)) .eq. (ip -1)) then
136 ttt = 0
137 if(ic .ge. 1) call check_vector(ic, i4vec(1:ic), cs_mpi(i), ttt)
138 if(ttt.eq.0) then
139 ic = ic +1
140 i4vec(ic) = cs_mpi(i)
141 endif
142 ! endif
143 enddo
144
145 endif
146
147 enddo
148 enddo
149
150
151
152
153 do i = 1, ic
154 do j = i, ic
155 if (i4vec(j).lt.i4vec(i)) then
156 k = i4vec(i)
157 i4vec(i) = i4vec(j)
158 i4vec(j) = k
159 endif
160 enddo
161 enddo
162
163
164 j = 1
165 do i = 2, ic
166 if (i4vec(i).ne.i4vec(j)) then
167 j = j +1
168 i4vec(j) = i4vec(i)
169 endif
170 enddo
171
172
173 if (ic .eq. 0) j = 0
174 proc_recv_jump(ip) = j
175
176 if (nrecv_jump .ne. 0) then
177 ic = 0
178 do i = 1, ip -1
179 ic = ic + proc_recv_jump(i)
180 enddo
181
182 do i = 1, proc_recv_jump(ip)
183 node_recv_jump(ic +i) = i4vec(i)
184 enddo
185 endif
186
187 deallocate(cs_mpi, i4vec)
188 endif
189
190 enddo
191
192 if (nrecv_jump.eq.0) then
193 do i = 1, nproc
194 nrecv_jump = nrecv_jump + proc_recv_jump(i)
195 enddo
196 endif
197
198!-----------------------nodes to send------------------------------------
199
200 allocate(i4vec(ncs))
201
202 do ip = 1, nproc
203 if ((ip -1).eq.id) then
204 proc_send_jump(ip) = 0
205 else
206
207 filename = 'DGCS000000.mpi'
208 unitfile = 40
209 if (ip-1 .lt. 10) then
210 write(filename(10:10),'(i1)') ip-1
211 else if (ip-1 .lt. 100) then
212 write(filename(9:10),'(i2)') ip-1
213 else if (ip-1 .lt. 1000) then
214 write(filename(8:10),'(i3)') ip-1
215 else if (ip-1 .lt. 10000) then
216 write(filename(7:10),'(i4)') ip-1
217 else if (ip-1 .lt. 100000) then
218 write(filename(6:10),'(i5)') ip-1
219 else if (ip-1 .lt. 1000000) then
220 write(filename(5:10),'(i6)') ip-1
221 endif
222
223 if(len_trim(mpi_file) .ne. 70) then
224 filename_new = mpi_file(1:len_trim(mpi_file)) // '/' // filename
225 else
226 filename_new = filename
227 endif
228
229 open(unitfile,file=filename_new)
230 read(unitfile,*) ncs_mpi
231 allocate(cs_mpi(0:ncs_mpi))
232
233 do i = 0, ncs_mpi
234 read(unitfile,*) cs_mpi(i)
235 enddo
236 close(unitfile)
237
238
239 i4vec = 0
240 ic = 0
241
242 do im = 1, ne_dg_loc
243 ir = el_new(im)%ind
244
245 do it = 1, el_new(im)%num_of_ne
246 ie = el_new(im)%el_conf(it,1)
247
248 call check_mpi(ncs_mpi, cs_mpi, ie, tt, pos)
249 if(tt .eq. 1) then
250
251 call get_indloc_from_indglo(loc_el_num, nel_loc, ir, ieloc)
252
253 do i = cs(ieloc -1) +1, cs(ieloc) -1
254
255 ! if (node_proc(loc_n_num(cs(i))) .eq. id) then
256 ttt = 0
257 if(ic .ge. 1) call check_vector(ic, i4vec(1:ic), loc_n_num(cs(i)), ttt)
258
259 if(ttt.eq.0) then
260 ic = ic +1
261 i4vec(ic) = loc_n_num(cs(i))
262 endif
263 ! endif
264 enddo
265 endif
266 enddo
267 enddo
268
269
270
271
272 do i = 1, ic
273 do j = i, ic
274 if (i4vec(j).lt.i4vec(i)) then
275 k = i4vec(i)
276 i4vec(i) = i4vec(j)
277 i4vec(j) = k
278 endif
279 enddo
280 enddo
281
282 j = 1
283 do i = 2, ic
284 if (i4vec(i).ne.i4vec(j)) then
285 j = j +1
286 i4vec(j) = i4vec(i)
287 endif
288 enddo
289
290
291 if (ic.eq.0) j = 0
292 proc_send_jump(ip) = j
293
294 if (nsend_jump.ne.0) then
295 ic = 0
296 do i = 1, ip -1
297 ic = ic +proc_send_jump(i)
298 enddo
299
300 do i = 1, proc_send_jump(ip)
301 node_send_jump(ic +i) = i4vec(i)
302 enddo
303 endif
304
305 deallocate(cs_mpi)
306
307 endif
308 enddo
309
310 if (nsend_jump.eq.0) then
311 do i = 1, nproc
312 nsend_jump = nsend_jump +proc_send_jump(i)
313 enddo
314 endif
315
316
317 deallocate(i4vec)
318
319 return
320
subroutine check_mpi(n, v, ind, tt, pos)
Checks if a an element is present on a vector and give its position.
Definition CHECK_MPI.f90:30
subroutine check_vector(n, v, ind, tt)
Checks if an element is present in a vector.
subroutine get_indloc_from_indglo(local_el, nel_loc, ie, ic)
Returns local id from global id.
Contains structure for jump matrices.
Definition MODULES.f90:155
Set maximal bounds.
Definition MODULES.f90:54

References check_mpi(), check_vector(), and get_indloc_from_indglo().

Here is the call graph for this function: