MFC:Pre_process  v1.0
m_mpi_proxy.f90
Go to the documentation of this file.
1 !! __ _______________
2 !! / |/ / ____/ ____/
3 !! / /|_/ / /_ / /
4 !! / / / / __/ / /___
5 !! /_/ /_/_/ \____/
6 !!
7 !! This file is part of MFC.
8 !!
9 !! MFC is the legal property of its developers, whose names
10 !! are listed in the copyright file included with this source
11 !! distribution.
12 !!
13 !! MFC is free software: you can redistribute it and/or modify
14 !! it under the terms of the GNU General Public License as published
15 !! by the Free Software Foundation, either version 3 of the license
16 !! or any later version.
17 !!
18 !! MFC is distributed in the hope that it will be useful,
19 !! but WITHOUT ANY WARRANTY; without even the implied warranty of
20 !! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 !! GNU General Public License for more details.
22 !!
23 !! You should have received a copy of the GNU General Public License
24 !! along with MFC (LICENSE).
25 !! If not, see <http://www.gnu.org/licenses/>.
26 
33 
40 
41 
42  ! Dependencies =============================================================
43  USE mpi !< message passing interface (mpi) module
44 
45  USE m_derived_types !< definitions of the derived types
46 
47  USE m_global_parameters !< global parameters for the code
48  ! ==========================================================================
49 
50 
51  IMPLICIT NONE
52 
53  INTEGER, PRIVATE :: err_code, ierr
55 
56 
57  CONTAINS
58 
62  SUBROUTINE s_mpi_initialize() ! ----------------------------
63 
64 
65  ! Establishing the MPI environment
66  CALL mpi_init(ierr)
67 
68 
69  ! Checking whether the MPI environment has been properly intialized
70  IF(ierr /= mpi_success) THEN
71  print '(A)', 'Unable to initialize MPI environment. Exiting ...'
72  CALL mpi_abort(mpi_comm_world, err_code, ierr)
73  END IF
74 
75 
76  ! Querying number of processors available for the job
77  CALL mpi_comm_size(mpi_comm_world, num_procs, ierr)
78 
79 
80  ! Identifying the rank of the local processor
81  CALL mpi_comm_rank(mpi_comm_world, proc_rank ,ierr)
82 
83 
84  END SUBROUTINE s_mpi_initialize ! --------------------------
85 
86 
87 
88 
90  SUBROUTINE s_mpi_abort() ! ---------------------------------------------
91 
92  ! Terminating the MPI environment
93  CALL mpi_abort(mpi_comm_world, err_code, ierr)
94 
95  END SUBROUTINE s_mpi_abort ! -------------------------------------------
96 
97 
98 
99  !! @param q_cons_vf Conservative variables
100  SUBROUTINE s_initialize_mpi_data(q_cons_vf) ! --------------------------
102  TYPE(scalar_field), &
103  DIMENSION(sys_size), &
104  INTENT(IN) :: q_cons_vf
105 
106  INTEGER, DIMENSION(num_dims) :: sizes_glb, sizes_loc
107  INTEGER :: ierr
108 
109  ! Generic loop iterator
110  INTEGER :: i
111 
112  DO i = 1, sys_size
113  mpi_io_data%var(i)%sf => q_cons_vf(i)%sf
114  END DO
115 
116  ! Define global(g) and local(l) sizes for flow variables
117  sizes_glb(1) = m_glb+1; sizes_loc(1) = m+1
118  IF (n > 0) THEN
119  sizes_glb(2) = n_glb+1; sizes_loc(2) = n+1
120  IF (p > 0) THEN
121  sizes_glb(3) = p_glb+1; sizes_loc(3) = p+1
122  END IF
123  END IF
124 
125  ! Define the view for each variable
126  DO i = 1, sys_size
127  CALL mpi_type_create_subarray(num_dims,sizes_glb,sizes_loc,start_idx,&
128  mpi_order_fortran,mpi_double_precision,mpi_io_data%view(i),ierr)
129  CALL mpi_type_commit(mpi_io_data%view(i),ierr)
130  END DO
131 
132  END SUBROUTINE s_initialize_mpi_data ! ---------------------------------
133 
134 
135 
136 
137 
139  SUBROUTINE s_mpi_barrier() ! -------------------------------------------
141  ! Calling MPI_BARRIER
142  CALL mpi_barrier(mpi_comm_world, ierr)
143 
144  END SUBROUTINE s_mpi_barrier ! -----------------------------------------
145 
146 
147 
153  SUBROUTINE s_mpi_bcast_user_inputs() ! ---------------------------------
154 
155 
156  ! Generic loop iterator
157  INTEGER :: i
158 
159 
160  ! Logistics
161  CALL mpi_bcast( case_dir , len(case_dir), mpi_character , &
162  0 , mpi_comm_world, ierr )
163  CALL mpi_bcast( old_grid , 1 , mpi_logical , &
164  0 , mpi_comm_world, ierr )
165  CALL mpi_bcast( old_ic , 1 , mpi_logical , &
166  0 , mpi_comm_world, ierr )
167  CALL mpi_bcast( t_step_old, 1 , mpi_integer , &
168  0 , mpi_comm_world, ierr )
169 
170 
171  ! Computational domain parameters
172  CALL mpi_bcast(m, 1, mpi_integer, 0, mpi_comm_world, ierr)
173  CALL mpi_bcast(n, 1, mpi_integer, 0, mpi_comm_world, ierr)
174  CALL mpi_bcast(p, 1, mpi_integer, 0, mpi_comm_world, ierr)
175  CALL mpi_bcast(m_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
176  CALL mpi_bcast(n_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
177  CALL mpi_bcast(p_glb, 1, mpi_integer, 0, mpi_comm_world, ierr)
178 
179  CALL mpi_bcast(cyl_coord, 1, mpi_logical, 0, mpi_comm_world, ierr)
180 
181  CALL mpi_bcast( x_domain%beg, 1, mpi_double_precision, &
182  0, mpi_comm_world, ierr )
183  CALL mpi_bcast( x_domain%end, 1, mpi_double_precision, &
184  0, mpi_comm_world, ierr )
185  CALL mpi_bcast( y_domain%beg, 1, mpi_double_precision, &
186  0, mpi_comm_world, ierr )
187  CALL mpi_bcast( y_domain%end, 1, mpi_double_precision, &
188  0, mpi_comm_world, ierr )
189  CALL mpi_bcast( z_domain%beg, 1, mpi_double_precision, &
190  0, mpi_comm_world, ierr )
191  CALL mpi_bcast( z_domain%end, 1, mpi_double_precision, &
192  0, mpi_comm_world, ierr )
193 
194  CALL mpi_bcast(stretch_x, 1, mpi_logical, 0, mpi_comm_world, ierr)
195  CALL mpi_bcast(stretch_y, 1, mpi_logical, 0, mpi_comm_world, ierr)
196  CALL mpi_bcast(stretch_z, 1, mpi_logical, 0, mpi_comm_world, ierr)
197 
198  CALL mpi_bcast(a_x, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
199  CALL mpi_bcast(a_y, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
200  CALL mpi_bcast(a_z, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
201  CALL mpi_bcast(loops_x, 1, mpi_integer, 0, mpi_comm_world, ierr)
202  CALL mpi_bcast(loops_y, 1, mpi_integer, 0, mpi_comm_world, ierr)
203  CALL mpi_bcast(loops_z, 1, mpi_integer, 0, mpi_comm_world, ierr)
204  CALL mpi_bcast(x_a, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
205  CALL mpi_bcast(x_b, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
206  CALL mpi_bcast(y_a, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
207  CALL mpi_bcast(y_b, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
208  CALL mpi_bcast(z_a, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
209  CALL mpi_bcast(z_b, 1,mpi_double_precision, 0,mpi_comm_world, ierr)
210 
211 
212  ! Simulation algorithm parameters
213  CALL mpi_bcast(model_eqns, 1, mpi_integer, 0, mpi_comm_world, ierr)
214  CALL mpi_bcast(num_fluids, 1, mpi_integer, 0, mpi_comm_world, ierr)
215  CALL mpi_bcast(adv_alphan, 1, mpi_logical, 0, mpi_comm_world, ierr)
216  CALL mpi_bcast(mpp_lim, 1, mpi_logical, 0, mpi_comm_world, ierr)
217  CALL mpi_bcast(weno_order, 1, mpi_integer, 0, mpi_comm_world, ierr)
218 
219  CALL mpi_bcast( bc_x%beg, 1, mpi_double_precision, &
220  0, mpi_comm_world, ierr )
221  CALL mpi_bcast( bc_x%end, 1, mpi_double_precision, &
222  0, mpi_comm_world, ierr )
223  CALL mpi_bcast( bc_y%beg, 1, mpi_double_precision, &
224  0, mpi_comm_world, ierr )
225  CALL mpi_bcast( bc_y%end, 1, mpi_double_precision, &
226  0, mpi_comm_world, ierr )
227  CALL mpi_bcast( bc_z%beg, 1, mpi_double_precision, &
228  0, mpi_comm_world, ierr )
229  CALL mpi_bcast( bc_z%end, 1, mpi_double_precision, &
230  0, mpi_comm_world, ierr )
231 
232  CALL mpi_bcast(parallel_io, 1, mpi_logical, 0, mpi_comm_world, ierr)
233  CALL mpi_bcast(precision, 1, mpi_integer, 0, mpi_comm_world, ierr)
234  CALL mpi_bcast(perturb_flow, 1, mpi_logical, 0, mpi_comm_world, ierr)
235  CALL mpi_bcast(perturb_flow_fluid, 1, mpi_integer, 0, mpi_comm_world, ierr)
236  CALL mpi_bcast(perturb_sph, 1, mpi_logical, 0, mpi_comm_world, ierr)
237  CALL mpi_bcast(perturb_sph_fluid, 1, mpi_integer, 0, mpi_comm_world, ierr)
238  CALL mpi_bcast(fluid_rho(1) , num_fluids_max, mpi_logical , &
239  0 , mpi_comm_world, &
240  ierr )
241 
242  ! Initial condition parameters
243  CALL mpi_bcast(num_patches, 1, mpi_integer, 0, mpi_comm_world, ierr)
244 
245  DO i = 1, num_patches_max
246  CALL mpi_bcast( patch_icpp(i)%geometry , 1, &
247  mpi_integer , 0, &
248  mpi_comm_world, ierr )
249  CALL mpi_bcast( patch_icpp(i)%x_centroid , 1, &
250  mpi_double_precision , 0, &
251  mpi_comm_world, ierr )
252  CALL mpi_bcast( patch_icpp(i)%y_centroid , 1, &
253  mpi_double_precision , 0, &
254  mpi_comm_world, ierr )
255  CALL mpi_bcast( patch_icpp(i)%z_centroid , 1, &
256  mpi_double_precision , 0, &
257  mpi_comm_world, ierr )
258  CALL mpi_bcast( patch_icpp(i)%length_x , 1, &
259  mpi_double_precision , 0, &
260  mpi_comm_world, ierr )
261  CALL mpi_bcast( patch_icpp(i)%length_y , 1, &
262  mpi_double_precision , 0, &
263  mpi_comm_world, ierr )
264  CALL mpi_bcast( patch_icpp(i)%length_z , 1, &
265  mpi_double_precision , 0, &
266  mpi_comm_world, ierr )
267  CALL mpi_bcast( patch_icpp(i)%radius , 1, &
268  mpi_double_precision , 0, &
269  mpi_comm_world, ierr )
270  CALL mpi_bcast( patch_icpp(i)%epsilon , 1, &
271  mpi_double_precision , 0, &
272  mpi_comm_world, ierr )
273  CALL mpi_bcast( patch_icpp(i)%beta , 1, &
274  mpi_double_precision , 0, &
275  mpi_comm_world, ierr )
276  CALL mpi_bcast( patch_icpp(i)%normal(1) , 3, &
277  mpi_double_precision , 0, &
278  mpi_comm_world, ierr )
279  CALL mpi_bcast( patch_icpp(i)%radii(1) , 3, &
280  mpi_double_precision , 0, &
281  mpi_comm_world, ierr )
282  CALL mpi_bcast( patch_icpp(i)%smoothen , 1, &
283  mpi_logical , 0, &
284  mpi_comm_world, ierr )
285  CALL mpi_bcast( patch_icpp(i)%smooth_patch_id, 1, &
286  mpi_integer , 0, &
287  mpi_comm_world, ierr )
288  CALL mpi_bcast( patch_icpp(i)%smooth_coeff , 1, &
289  mpi_double_precision , 0, &
290  mpi_comm_world, ierr )
291  CALL mpi_bcast( patch_icpp(i)%rho , 1, &
292  mpi_double_precision , 0, &
293  mpi_comm_world, ierr )
294  CALL mpi_bcast( patch_icpp(i)%vel(1) , 3, &
295  mpi_double_precision , 0, &
296  mpi_comm_world, ierr )
297  CALL mpi_bcast( patch_icpp(i)%pres , 1, &
298  mpi_double_precision , 0, &
299  mpi_comm_world, ierr )
300  CALL mpi_bcast( patch_icpp(i)%gamma , 1, &
301  mpi_double_precision , 0, &
302  mpi_comm_world, ierr )
303  CALL mpi_bcast( patch_icpp(i)%pi_inf , 1, &
304  mpi_double_precision , 0, &
305  mpi_comm_world, ierr )
306  CALL mpi_bcast( patch_icpp(i)%alter_patch(0) , &
307  num_patches_max , mpi_logical , 0, &
308  mpi_comm_world, ierr )
309  CALL mpi_bcast( patch_icpp(i)%alpha_rho(1) , &
310  num_fluids_max , mpi_double_precision, 0, &
311  mpi_comm_world, ierr )
312  CALL mpi_bcast( patch_icpp(i)%alpha(1) , &
313  num_fluids_max-1, mpi_double_precision, 0, &
314  mpi_comm_world, ierr )
315 
316  ! Bubbles
317  CALL mpi_bcast( patch_icpp(i)%r0 , 1, &
318  mpi_double_precision , 0, &
319  mpi_comm_world, ierr )
320  CALL mpi_bcast( patch_icpp(i)%v0 , 1, &
321  mpi_double_precision , 0, &
322  mpi_comm_world, ierr )
323 
324  END DO
325 
326 
327  ! Fluids physical parameters
328  DO i = 1, num_fluids_max
329  CALL mpi_bcast( fluid_pp(i)%gamma , 1, &
330  mpi_double_precision, 0, &
331  mpi_comm_world, ierr )
332  CALL mpi_bcast( fluid_pp(i)%pi_inf , 1, &
333  mpi_double_precision, 0, &
334  mpi_comm_world, ierr )
335 
336  CALL mpi_bcast( fluid_pp(i)%mul0 , 1, &
337  mpi_double_precision, 0, &
338  mpi_comm_world, ierr )
339  CALL mpi_bcast( fluid_pp(i)%ss , 1, &
340  mpi_double_precision, 0, &
341  mpi_comm_world, ierr )
342  CALL mpi_bcast( fluid_pp(i)%pv , 1, &
343  mpi_double_precision, 0, &
344  mpi_comm_world, ierr )
345  CALL mpi_bcast( fluid_pp(i)%gamma_v , 1, &
346  mpi_double_precision, 0, &
347  mpi_comm_world, ierr )
348  CALL mpi_bcast( fluid_pp(i)%M_v , 1, &
349  mpi_double_precision, 0, &
350  mpi_comm_world, ierr )
351  CALL mpi_bcast( fluid_pp(i)%mu_v , 1, &
352  mpi_double_precision, 0, &
353  mpi_comm_world, ierr )
354  CALL mpi_bcast( fluid_pp(i)%k_v , 1, &
355  mpi_double_precision, 0, &
356  mpi_comm_world, ierr )
357  END DO
358 
359  ! Tait EOS
360  CALL mpi_bcast( pref,1, &
361  mpi_double_precision,0, &
362  mpi_comm_world,ierr)
363  CALL mpi_bcast( rhoref,1, &
364  mpi_double_precision,0, &
365  mpi_comm_world,ierr)
366 
367  ! Bubble modeling
368  CALL mpi_bcast( bubbles,1, &
369  mpi_logical,0, &
370  mpi_comm_world,ierr )
371  CALL mpi_bcast( polytropic,1, &
372  mpi_logical,0, &
373  mpi_comm_world,ierr )
374  CALL mpi_bcast( polydisperse,1, &
375  mpi_logical,0, &
376  mpi_comm_world,ierr )
377  CALL mpi_bcast( poly_sigma,1, &
378  mpi_double_precision,0, &
379  mpi_comm_world,ierr)
380  CALL mpi_bcast( thermal,1, &
381  mpi_integer,0, &
382  mpi_comm_world,ierr)
383  CALL mpi_bcast( r0ref,1, &
384  mpi_double_precision,0, &
385  mpi_comm_world,ierr)
386  CALL mpi_bcast( nb,1, &
387  mpi_integer,0, &
388  mpi_comm_world,ierr)
389 
390  CALL mpi_bcast( web,1, &
391  mpi_double_precision,0, &
392  mpi_comm_world,ierr)
393  CALL mpi_bcast( ca,1, &
394  mpi_double_precision,0, &
395  mpi_comm_world,ierr)
396  CALL mpi_bcast( re_inv,1, &
397  mpi_double_precision,0, &
398  mpi_comm_world,ierr)
399 
400 
401 
402  END SUBROUTINE s_mpi_bcast_user_inputs ! -------------------------------
403 
404 
405 
411  SUBROUTINE s_mpi_decompose_computational_domain() ! --------------------
413  ! # of processors in the x-, y- and z-coordinate directions
414  INTEGER :: num_procs_x, num_procs_y, num_procs_z
415 
416  ! Temporary # of processors in x-, y- and z-coordinate directions
417  ! used during the processor factorization optimization procedure
418  REAL(KIND(0d0)) :: tmp_num_procs_x, tmp_num_procs_y, tmp_num_procs_z
419 
420  ! Processor factorization (fct) minimization parameter
421  REAL(KIND(0d0)) :: fct_min
422 
423  ! Cartesian processor topology communicator
424  INTEGER :: MPI_COMM_CART
425 
426  ! Number of remaining cells for a particular coordinate direction
427  ! after the bulk has evenly been distributed among the available
428  ! processors for that coordinate direction
429  INTEGER :: rem_cells
430 
431  ! Generic loop iterators
432  INTEGER :: i,j
433 
434  IF (num_procs == 1 .AND. parallel_io) THEN
435  DO i = 1, num_dims
436  start_idx(i) = 0
437  END DO
438  RETURN
439  END IF
440 
441  ! Performing the computational domain decomposition. The procedure
442  ! is optimized by ensuring that each processor contains a close to
443  ! equivalent piece of the computational domain. Note that explicit
444  ! type-casting is omitted here for code legibility purposes.
445 
446  ! Generating 3D Cartesian Processor Topology =======================
447 
448  IF(n > 0) THEN
449 
450  IF(p > 0) THEN
451 
452  IF (cyl_coord .AND. p > 0) THEN
453  ! Implement pencil processor blocking if using cylindrical coordinates so
454  ! that all cells in azimuthal direction are stored on a single processor.
455  ! This is necessary for efficient application of Fourier filter near axis.
456 
457  ! Initial values of the processor factorization optimization
458  num_procs_x = 1
459  num_procs_y = num_procs
460  num_procs_z = 1
461  ierr = -1
462 
463  ! Computing minimization variable for these initial values
464  tmp_num_procs_x = num_procs_x
465  tmp_num_procs_y = num_procs_y
466  tmp_num_procs_z = num_procs_z
467  fct_min = 10d0*abs((m+1)/tmp_num_procs_x &
468  -(n+1)/tmp_num_procs_y)
469 
470  ! Searching for optimal computational domain distribution
471  DO i = 1, num_procs
472 
473  IF( mod(num_procs,i) == 0 &
474  .AND. &
475  (m+1)/i >= num_stcls_min*weno_order ) THEN
476 
477  tmp_num_procs_x = i
478  tmp_num_procs_y = num_procs / i
479 
480  IF( fct_min >= abs((m+1)/tmp_num_procs_x &
481  -(n+1)/tmp_num_procs_y) &
482  .AND. &
483  (n+1)/tmp_num_procs_y &
484  >= &
486 
487  num_procs_x = i
488  num_procs_y = num_procs/i
489  fct_min = abs((m+1)/tmp_num_procs_x &
490  -(n+1)/tmp_num_procs_y)
491  ierr = 0
492 
493  END IF
494 
495  END IF
496 
497  END DO
498 
499  ELSE
500 
501  ! Initial values of the processor factorization optimization
502  num_procs_x = 1
503  num_procs_y = 1
504  num_procs_z = num_procs
505  ierr = -1
506 
507  ! Computing minimization variable for these initial values
508  tmp_num_procs_x = num_procs_x
509  tmp_num_procs_y = num_procs_y
510  tmp_num_procs_z = num_procs_z
511  fct_min = 10d0*abs((m+1)/tmp_num_procs_x &
512  -(n+1)/tmp_num_procs_y) &
513  + 10d0*abs((n+1)/tmp_num_procs_y &
514  -(p+1)/tmp_num_procs_z)
515 
516  ! Searching for optimal computational domain distribution
517  DO i = 1, num_procs
518 
519  IF( mod(num_procs,i) == 0 &
520  .AND. &
521  (m+1)/i >= num_stcls_min*weno_order ) THEN
522 
523  DO j = 1, (num_procs/i)
524 
525  IF( mod(num_procs/i,j) == 0 &
526  .AND. &
527  (n+1)/j >= num_stcls_min*weno_order ) THEN
528 
529  tmp_num_procs_x = i
530  tmp_num_procs_y = j
531  tmp_num_procs_z = num_procs / (i*j)
532 
533  IF( fct_min >= abs((m+1)/tmp_num_procs_x &
534  -(n+1)/tmp_num_procs_y) &
535  + abs((n+1)/tmp_num_procs_y &
536  -(p+1)/tmp_num_procs_z) &
537  .AND. &
538  (p+1)/tmp_num_procs_z &
539  >= &
541  THEN
542 
543  num_procs_x = i
544  num_procs_y = j
545  num_procs_z = num_procs/(i*j)
546  fct_min = abs((m+1)/tmp_num_procs_x &
547  -(n+1)/tmp_num_procs_y)&
548  + abs((n+1)/tmp_num_procs_y &
549  -(p+1)/tmp_num_procs_z)
550  ierr = 0
551 
552  END IF
553 
554  END IF
555 
556  END DO
557 
558  END IF
559 
560  END DO
561 
562  END IF
563 
564  ! Checking whether the decomposition of the computational
565  ! domain was successful
566  IF(proc_rank == 0 .AND. ierr == -1) THEN
567  print '(A)', 'Unable to decompose computational ' // &
568  'domain for selected number of ' // &
569  'processors. Exiting ...'
570  CALL mpi_abort(mpi_comm_world, err_code, ierr)
571  END IF
572 
573  ! Creating a new communicator using Cartesian topology
574  CALL mpi_cart_create( mpi_comm_world, 3, (/ num_procs_x, &
575  num_procs_y, num_procs_z /), &
576  (/ .true., .true., .true. /), &
577  .false., mpi_comm_cart, ierr )
578 
579  ! Finding corresponding Cartesian coordinates of the local
580  ! processor rank in newly declared cartesian communicator
581  CALL mpi_cart_coords( mpi_comm_cart, proc_rank, 3, &
582  proc_coords, ierr )
583 
584  ! END: Generating 3D Cartesian Processor Topology ==================
585 
586 
587  ! Sub-domain Global Parameters in z-direction ======================
588 
589  ! Number of remaining cells after majority is distributed
590  rem_cells = mod(p+1, num_procs_z)
591 
592  ! Preliminary uniform cell-width spacing
593  IF(old_grid .NEQV. .true.) THEN
594  dz = (z_domain%end - z_domain%beg) / REAL(p+1,kind(0d0))
595  END IF
596 
597  ! Optimal number of cells per processor
598  p = (p+1) / num_procs_z - 1
599 
600  ! Distributing any remaining cells
601  DO i = 1, rem_cells
602  IF(proc_coords(3) == i-1) THEN
603  p = p + 1
604  EXIT
605  END IF
606  END DO
607 
608  ! Beginning and end sub-domain boundary locations
609  IF (parallel_io .NEQV. .true.) THEN
610  IF(old_grid .NEQV. .true.) THEN
611  IF(proc_coords(3) < rem_cells) THEN
612  z_domain%beg = z_domain%beg + dz*REAL( (p+1) * & proc_coords(3) )
613  z_domain%end = z_domain%end - dz*REAL( (p+1) * & (num_procs_z - proc_coords(3) - 1) &
614  - (num_procs_z - rem_cells) )
615  ELSE
616  z_domain%beg = z_domain%beg + dz*REAL( (p+1) * & proc_coords(3) + rem_cells )
617  z_domain%end = z_domain%end - dz*REAL( (p+1) * & (num_procs_z - proc_coords(3) - 1) )
618  END IF
619  END IF
620  ELSE
621  IF (proc_coords(3) < rem_cells) THEN
622  start_idx(3) = (p+1) * proc_coords(3)
623  ELSE
624  start_idx(3) = (p+1) * proc_coords(3) + rem_cells
625  END IF
626  END IF
627 
628  ! ==================================================================
629 
630 
631  ! Generating 2D Cartesian Processor Topology =======================
632 
633  ELSE
634 
635  ! Initial values of the processor factorization optimization
636  num_procs_x = 1
637  num_procs_y = num_procs
638  ierr = -1
639 
640  ! Computing minimization variable for these initial values
641  tmp_num_procs_x = num_procs_x
642  tmp_num_procs_y = num_procs_y
643  fct_min = 10d0*abs((m+1)/tmp_num_procs_x &
644  -(n+1)/tmp_num_procs_y)
645 
646  ! Searching for optimal computational domain distribution
647  DO i = 1, num_procs
648 
649  IF( mod(num_procs,i) == 0 &
650  .AND. &
651  (m+1)/i >= num_stcls_min*weno_order ) THEN
652 
653  tmp_num_procs_x = i
654  tmp_num_procs_y = num_procs / i
655 
656  IF( fct_min >= abs((m+1)/tmp_num_procs_x &
657  -(n+1)/tmp_num_procs_y) &
658  .AND. &
659  (n+1)/tmp_num_procs_y &
660  >= &
662 
663  num_procs_x = i
664  num_procs_y = num_procs/i
665  fct_min = abs((m+1)/tmp_num_procs_x &
666  -(n+1)/tmp_num_procs_y)
667  ierr = 0
668 
669  END IF
670 
671  END IF
672 
673  END DO
674 
675  ! Checking whether the decomposition of the computational
676  ! domain was successful
677  IF(proc_rank == 0 .AND. ierr == -1) THEN
678  print '(A)', 'Unable to decompose computational ' // &
679  'domain for selected number of ' // &
680  'processors. Exiting ...'
681  CALL mpi_abort(mpi_comm_world, err_code, ierr)
682  END IF
683 
684  ! Creating a new communicator using Cartesian topology
685  CALL mpi_cart_create( mpi_comm_world, 2, (/ num_procs_x, &
686  num_procs_y /), (/ .true., &
687  .true. /), .false., mpi_comm_cart, &
688  ierr )
689 
690  ! Finding corresponding Cartesian coordinates of the local
691  ! processor rank in newly declared cartesian communicator
692  CALL mpi_cart_coords( mpi_comm_cart, proc_rank, 2, &
693  proc_coords, ierr )
694 
695  END IF
696 
697  ! END: Generating 2D Cartesian Processor Topology ==================
698 
699 
700  ! Sub-domain Global Parameters in y-direction ======================
701 
702  ! Number of remaining cells after majority has been distributed
703  rem_cells = mod(n+1, num_procs_y)
704 
705  ! Preliminary uniform cell-width spacing
706  IF(old_grid .NEQV. .true.) THEN
707  dy = (y_domain%end - y_domain%beg) / REAL(n+1, kind(0d0))
708  END IF
709 
710  ! Optimal number of cells per processor
711  n = (n+1) / num_procs_y - 1
712 
713  ! Distributing any remaining cells
714  DO i = 1, rem_cells
715  IF(proc_coords(2) == i-1) THEN
716  n = n + 1
717  EXIT
718  END IF
719  END DO
720 
721  ! Beginning and end sub-domain boundary locations
722  IF (parallel_io .NEQV. .true.) THEN
723  IF(old_grid .NEQV. .true.) THEN
724  IF(proc_coords(2) < rem_cells) THEN
725  y_domain%beg = y_domain%beg + dy*REAL( (n+1) * & proc_coords(2) )
726  y_domain%end = y_domain%end - dy*REAL( (n+1) * & (num_procs_y - proc_coords(2) - 1) &
727  - (num_procs_y - rem_cells) )
728  ELSE
729  y_domain%beg = y_domain%beg + dy*REAL( (n+1) * & proc_coords(2) + rem_cells )
730  y_domain%end = y_domain%end - dy*REAL( (n+1) * & (num_procs_y - proc_coords(2) - 1) )
731  END IF
732  END IF
733  ELSE
734  IF (proc_coords(2) < rem_cells) THEN
735  start_idx(2) = (n+1) * proc_coords(2)
736  ELSE
737  start_idx(2) = (n+1) * proc_coords(2) + rem_cells
738  END IF
739  END IF
740 
741  ! ==================================================================
742 
743 
744  ! Generating 1D Cartesian Processor Topology =======================
745 
746  ELSE
747 
748  ! Number of processors in the coordinate direction is equal to
749  ! the total number of processors available
750  num_procs_x = num_procs
751 
752  ! Creating a new communicator using Cartesian topology
753  CALL mpi_cart_create( mpi_comm_world, 1, (/ num_procs_x /), &
754  (/ .true. /), .false., mpi_comm_cart, &
755  ierr )
756 
757  ! Finding the corresponding Cartesian coordinates of the local
758  ! processor rank in the newly declared cartesian communicator
759  CALL mpi_cart_coords( mpi_comm_cart, proc_rank, 1, &
760  proc_coords, ierr )
761 
762  END IF
763 
764  ! ==================================================================
765 
766 
767  ! Sub-domain Global Parameters in x-direction ======================
768 
769  ! Number of remaining cells after majority has been distributed
770  rem_cells = mod(m+1, num_procs_x)
771 
772  ! Preliminary uniform cell-width spacing
773  IF(old_grid .NEQV. .true.) THEN
774  dx = (x_domain%end - x_domain%beg) / REAL(m+1, kind(0d0))
775  END IF
776 
777  ! Optimal number of cells per processor
778  m = (m+1) / num_procs_x - 1
779 
780  ! Distributing any remaining cells
781  DO i = 1, rem_cells
782  IF(proc_coords(1) == i-1) THEN
783  m = m + 1
784  EXIT
785  END IF
786  END DO
787 
788  ! Beginning and end sub-domain boundary locations
789  IF (parallel_io .NEQV. .true.) THEN
790  IF(old_grid .NEQV. .true.) THEN
791  IF(proc_coords(1) < rem_cells) THEN
792  x_domain%beg = x_domain%beg + dx*REAL( (m+1) * & proc_coords(1) )
793  x_domain%end = x_domain%end - dx*REAL( (m+1) * & (num_procs_x - proc_coords(1) - 1) &
794  - (num_procs_x - rem_cells) )
795  ELSE
796  x_domain%beg = x_domain%beg + dx*REAL( (m+1) * & proc_coords(1) + rem_cells )
797  x_domain%end = x_domain%end - dx*REAL( (m+1) * & (num_procs_x - proc_coords(1) - 1) )
798  END IF
799  END IF
800  ELSE
801  IF (proc_coords(1) < rem_cells) THEN
802  start_idx(1) = (m+1) * proc_coords(1)
803  ELSE
804  start_idx(1) = (m+1) * proc_coords(1) + rem_cells
805  END IF
806  END IF
807 
808  ! ==================================================================
809 
810 
811 
812  END SUBROUTINE s_mpi_decompose_computational_domain ! ------------------
813 
814 
821  SUBROUTINE s_mpi_reduce_min(var_loc) ! ---------------------------------
822 
823  REAL(KIND(0d0)), INTENT(INOUT) :: var_loc
824 
825  ! Temporary storage variable that holds the reduced minimum value
826  REAL(KIND(0d0)) :: var_glb
827 
828 
829  ! Performing reduction procedure and eventually storing its result
830  ! into the variable that was initially inputted into the subroutine
831  CALL mpi_reduce( var_loc, var_glb, 1, mpi_double_precision, &
832  mpi_min, 0, mpi_comm_world, ierr )
833 
834  CALL mpi_bcast( var_glb, 1, mpi_double_precision, &
835  0, mpi_comm_world, ierr )
836 
837  var_loc = var_glb
838 
839 
840  END SUBROUTINE s_mpi_reduce_min ! --------------------------------------
841 
842 
843 
844 
846  SUBROUTINE s_mpi_finalize() ! ------------------------------
847 
848  ! Terminating the MPI environment
849  CALL mpi_finalize(ierr)
850 
851  END SUBROUTINE s_mpi_finalize ! ----------------------------
852 
853 END MODULE m_mpi_proxy
854 
real(kind(0d0)), dimension(num_fluids_max) fluid_rho
integer, parameter num_patches_max
Maximum number of patches allowed.
integer, dimension(:), allocatable proc_coords
Processor coordinates in MPI_CART_COMM.
integer t_step_old
Existing IC/grid folder.
subroutine s_mpi_barrier()
Halts all processes until all have reached barrier.
subroutine s_mpi_decompose_computational_domain()
Description: This subroutine takes care of efficiently distributing the computational domain among th...
subroutine s_mpi_initialize()
The subroutine intializes the MPI environment and queries both the number of processors that will be ...
Definition: m_mpi_proxy.f90:63
integer perturb_sph_fluid
Fluid to be perturbed with perturb_sph flag.
type(physical_parameters), dimension(num_fluids_max) fluid_pp
Database of the physical parameters of each of the fluids that is present in the flow. These include the stiffened gas equation of state parameters, the Reynolds numbers and the Weber numbers.
integer, private err_code
Definition: m_mpi_proxy.f90:53
This module contains all of the parameters characterizing the computational domain, simulation algorithm, initial condition and the stiffened equation of state.
integer, parameter num_stcls_min
Mininum # of stencils.
Derived type annexing a scalar field (SF)
logical old_grid
Use existing grid data.
type(bounds_info) z_domain
Locations of the domain bounds in the x-, y- and z-coordinate directions.
type(bounds_info) x_domain
subroutine s_mpi_finalize()
Finalization of all MPI related processes.
integer p_glb
Global number of cells in each direction.
This file contains the definitions of all of the custom-defined types used in the pre-process code...
integer p
Number of cells in the x-, y- and z-coordinate directions.
integer num_fluids
Number of different fluids present in the flow.
logical old_ic
Use existing IC data.
integer, dimension(:), allocatable start_idx
Starting cell-center index of local processor in global grid.
integer sys_size
Number of unknowns in the system of equations.
logical mpp_lim
Alpha limiter.
integer model_eqns
Multicomponent flow model.
subroutine s_mpi_reduce_min(var_loc)
The following subroutine takes the inputted variable and determines its minimum value on the entire c...
integer precision
Precision of output files.
type(mpi_io_var), public mpi_io_data
character(len=path_len) case_dir
Case folder location.
real(kind(0d0)) pref
Reference parameters for Tait EOS.
integer, parameter num_fluids_max
Maximum number of fluids allowed.
integer num_procs
Number of processors.
subroutine s_mpi_bcast_user_inputs()
Since only processor with rank 0 is in charge of reading and checking the consistency of the user pro...
type(bounds_info) y_domain
integer num_patches
Number of patches composing initial condition.
type(ic_patch_parameters), dimension(num_patches_max) patch_icpp
Database of the initial condition patch parameters (icpp) for each of the patches employed in the con...
integer weno_order
Order of accuracy for the WENO reconstruction.
subroutine s_mpi_abort()
The subroutine terminates the MPI execution environment.
Definition: m_mpi_proxy.f90:91
subroutine s_initialize_mpi_data(q_cons_vf)
real(kind(0d0)) dz
Minimum cell-widths in the x-, y- and z-coordinate directions.
logical parallel_io
Format of the data files.
integer num_dims
Number of spatial dimensions.
logical adv_alphan
Advection of the last volume fraction.
type(bounds_info) bc_z
Boundary conditions in the x-, y- and z-coordinate directions.
integer proc_rank
Rank of the local processor.
This module serves as a proxy to the parameters and subroutines available in the MPI implementation&#39;s...
Definition: m_mpi_proxy.f90:39
logical stretch_z
Grid stretching flags for the x-, y- and z-coordinate directions.
integer perturb_flow_fluid
Fluid to be perturbed with perturb_flow flag.