forked from CICE-Consortium/CICE
-
Notifications
You must be signed in to change notification settings - Fork 10
/
ice_domain.F90
763 lines (659 loc) · 29.6 KB
/
ice_domain.F90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
#ifdef ncdf
#define USE_NETCDF
#endif
!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
module ice_domain
! This module contains the model domain and routines for initializing
! the domain. It also initializes the decompositions and
! distributions across processors/threads by calling relevant
! routines in the block, distribution modules.
!
! author: Phil Jones, LANL
! Oct. 2004: Adapted from POP by William H. Lipscomb, LANL
! Feb. 2007: E. Hunke removed NE and SW boundary options (they were buggy
! and not used anyhow).
use ice_kinds_mod
use ice_constants, only: shlat, nhlat
use ice_communicate, only: my_task, master_task, get_num_procs, &
add_mpi_barriers, ice_barrier
use ice_broadcast, only: broadcast_scalar, broadcast_array
use ice_blocks, only: block, get_block, create_blocks, nghost, &
nblocks_x, nblocks_y, nblocks_tot, nx_block, ny_block, debug_blocks
use ice_distribution, only: distrb
use ice_boundary, only: ice_halo
use ice_exit, only: abort_ice
use ice_fileunits, only: nu_nml, nml_filename, nu_diag, &
get_fileunit, release_fileunit, flush_fileunit
use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted
use icepack_intfc, only: icepack_query_parameters
#ifdef USE_NETCDF
use netcdf
#endif
implicit none
private
public :: init_domain_blocks ,&
init_domain_distribution
integer (int_kind), public :: &
nblocks ! actual number of blocks on this processor
logical (kind=log_kind), public :: &
close_boundaries
integer (int_kind), dimension(:), pointer, public :: &
blocks_ice => null() ! block ids for local blocks
type (distrb), public :: &
distrb_info ! block distribution info
type (ice_halo), public :: &
halo_info ! ghost cell update info
character (char_len), public :: &
ew_boundary_type, &! type of domain bndy in each logical
ns_boundary_type ! direction (ew is i, ns is j)
logical (kind=log_kind), public :: &
maskhalo_dyn , & ! if true, use masked halo updates for dynamics
maskhalo_remap , & ! if true, use masked halo updates for transport
maskhalo_bound , & ! if true, use masked halo updates for bound_state
halo_dynbundle , & ! if true, bundle halo update in dynamics
landblockelim , & ! if true, land block elimination is on
orca_halogrid ! if true, input fields are haloed as defined by orca grid
!-----------------------------------------------------------------------
!
! module private variables - for the most part these appear as
! module variables to facilitate sharing info between init_domain1
! and init_domain2.
!
!-----------------------------------------------------------------------
character (char_len) :: &
distribution_type, &! method to use for distributing blocks
! 'cartesian', 'roundrobin', 'sectrobin', 'sectcart'
! 'rake', 'spacecurve', etc
distribution_wght ! method for weighting work per block
! 'block' = POP default configuration
! 'blockall' = no land block elimination
! 'latitude' = no. ocean points * |lat|
! 'file' = read distribution_wgth_file
character (char_len_long) :: &
distribution_wght_file ! file for distribution_wght=file
integer (int_kind) :: &
nprocs ! num of processors
!***********************************************************************
contains
!***********************************************************************
subroutine init_domain_blocks
! This routine reads in domain information and calls the routine
! to set up the block decomposition.
use ice_distribution, only: processor_shape
use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks, &
nx_global, ny_global, block_size_x, block_size_y
use ice_fileunits, only: goto_nml
!----------------------------------------------------------------------
!
! local variables
!
!----------------------------------------------------------------------
integer (int_kind) :: &
nml_error ! namelist read error flag
character(len=char_len) :: nml_name ! text namelist name
character(len=char_len_long) :: tmpstr2 ! for namelist check
character(len=*), parameter :: subname = '(init_domain_blocks)'
!----------------------------------------------------------------------
!
! input namelists
!
!----------------------------------------------------------------------
namelist /domain_nml/ nprocs, &
max_blocks, &
block_size_x, &
block_size_y, &
nx_global, &
ny_global, &
processor_shape, &
distribution_type, &
distribution_wght, &
distribution_wght_file, &
ew_boundary_type, &
ns_boundary_type, &
maskhalo_dyn, &
maskhalo_remap, &
maskhalo_bound, &
add_mpi_barriers, &
debug_blocks
!----------------------------------------------------------------------
!
! read domain information from namelist input
!
!----------------------------------------------------------------------
nprocs = -1
processor_shape = 'slenderX2'
distribution_type = 'cartesian'
distribution_wght = 'latitude'
distribution_wght_file = 'unknown'
ew_boundary_type = 'cyclic'
ns_boundary_type = 'open'
maskhalo_dyn = .false. ! if true, use masked halos for dynamics
maskhalo_remap = .false. ! if true, use masked halos for transport
maskhalo_bound = .false. ! if true, use masked halos for bound_state
halo_dynbundle = .true. ! if true, bundle halo updates in dynamics
add_mpi_barriers = .false. ! if true, throttle communication
debug_blocks = .false. ! if true, print verbose block information
max_blocks = -1 ! max number of blocks per processor
block_size_x = -1 ! size of block in first horiz dimension
block_size_y = -1 ! size of block in second horiz dimension
nx_global = -1 ! NXGLOB, i-axis size
ny_global = -1 ! NYGLOB, j-axis size
landblockelim = .true. ! on by default
if (my_task == master_task) then
nml_name = 'domain_nml'
write(nu_diag,*) subname,' Reading ', trim(nml_name)
call get_fileunit(nu_nml)
open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error)
if (nml_error /= 0) then
call abort_ice(subname//' ERROR: domain_nml open file '// &
trim(nml_filename), file=__FILE__, line=__LINE__)
endif
call goto_nml(nu_nml,trim(nml_name),nml_error)
if (nml_error /= 0) then
call abort_ice(subname//' ERROR: searching for '// trim(nml_name), &
file=__FILE__, line=__LINE__)
endif
nml_error = 1
do while (nml_error > 0)
read(nu_nml, nml=domain_nml,iostat=nml_error)
! check if error
if (nml_error /= 0) then
! backspace and re-read erroneous line
backspace(nu_nml)
read(nu_nml,fmt='(A)') tmpstr2
call abort_ice(subname//' ERROR: ' // trim(nml_name) // ' reading ' // &
trim(tmpstr2), file=__FILE__, line=__LINE__)
endif
end do
close(nu_nml)
call release_fileunit(nu_nml)
endif
call broadcast_scalar(nprocs, master_task)
call broadcast_scalar(processor_shape, master_task)
call broadcast_scalar(distribution_type, master_task)
call broadcast_scalar(distribution_wght, master_task)
call broadcast_scalar(distribution_wght_file, master_task)
call broadcast_scalar(ew_boundary_type, master_task)
call broadcast_scalar(ns_boundary_type, master_task)
call broadcast_scalar(maskhalo_dyn, master_task)
call broadcast_scalar(maskhalo_remap, master_task)
call broadcast_scalar(maskhalo_bound, master_task)
call broadcast_scalar(add_mpi_barriers, master_task)
call broadcast_scalar(debug_blocks, master_task)
if (my_task == master_task) then
if (max_blocks < 1) then
max_blocks=( ((nx_global-1)/block_size_x + 1) * &
((ny_global-1)/block_size_y + 1) - 1) / nprocs + 1
max_blocks=max(1,max_blocks)
write(nu_diag,'(/,a52,i6,/)') &
'(ice_domain): max_block < 1: max_block estimated to ',max_blocks
endif
endif
call broadcast_scalar(max_blocks, master_task)
call broadcast_scalar(block_size_x, master_task)
call broadcast_scalar(block_size_y, master_task)
call broadcast_scalar(nx_global, master_task)
call broadcast_scalar(ny_global, master_task)
!----------------------------------------------------------------------
!
! perform some basic checks on domain
!
!----------------------------------------------------------------------
if (nx_global < 1 .or. ny_global < 1 .or. ncat < 1) then
!***
!*** domain size zero or negative
!***
call abort_ice(subname//' ERROR: Invalid domain: size < 1', file=__FILE__, line=__LINE__) ! no domain
else if (nprocs /= get_num_procs()) then
!***
!*** input nprocs does not match system (eg MPI) request
!***
#if (defined CESMCOUPLED)
nprocs = get_num_procs()
#else
write(nu_diag,*) subname,' ERROR: nprocs, get_num_procs = ',nprocs,get_num_procs()
call abort_ice(subname//' ERROR: Input nprocs not same as system request', file=__FILE__, line=__LINE__)
#endif
else if (nghost < 1) then
!***
!*** must have at least 1 layer of ghost cells
!***
call abort_ice(subname//' ERROR: Not enough ghost cells allocated', file=__FILE__, line=__LINE__)
endif
!----------------------------------------------------------------------
!
! compute block decomposition and details
!
!----------------------------------------------------------------------
call create_blocks(nx_global, ny_global, trim(ew_boundary_type), &
trim(ns_boundary_type))
!----------------------------------------------------------------------
!
! Now we need grid info before proceeding further
! Print some domain information
!
!----------------------------------------------------------------------
if (my_task == master_task) then
write(nu_diag,'(/,a18,/)')'Domain Information'
write(nu_diag,'(a,i6)') ' Horizontal domain: nx = ', nx_global
write(nu_diag,'(a,i6)') ' ny = ', ny_global
write(nu_diag,'(a,i6)') ' No. of categories: nc = ', ncat
write(nu_diag,'(a,i6)') ' No. of ice layers: ni = ', nilyr
write(nu_diag,'(a,i6)') ' No. of snow layers:ns = ', nslyr
write(nu_diag,'(a,i6)') ' Processors: total = ', nprocs
write(nu_diag,'(a,a)') ' Processor shape = ', trim(processor_shape)
write(nu_diag,'(a,a)') ' Distribution type = ', trim(distribution_type)
write(nu_diag,'(a,a)') ' Distribution weight = ', trim(distribution_wght)
write(nu_diag,'(a,a)') ' Distribution wght file= ', trim(distribution_wght_file)
write(nu_diag,'(a,a)') ' ew_boundary_type = ', trim(ew_boundary_type)
write(nu_diag,'(a,a)') ' ns_boundary_type = ', trim(ns_boundary_type)
write(nu_diag,'(a,l6)') ' maskhalo_dyn = ', maskhalo_dyn
write(nu_diag,'(a,l6)') ' maskhalo_remap = ', maskhalo_remap
write(nu_diag,'(a,l6)') ' maskhalo_bound = ', maskhalo_bound
write(nu_diag,'(a,l6)') ' add_mpi_barriers = ', add_mpi_barriers
write(nu_diag,'(a,l6)') ' debug_blocks = ', debug_blocks
write(nu_diag,'(a,2i6)') ' block_size_x,_y = ', block_size_x, block_size_y
write(nu_diag,'(a,i6)') ' max_blocks = ', max_blocks
write(nu_diag,'(a,i6,/)')' Number of ghost cells = ', nghost
endif
!----------------------------------------------------------------------
end subroutine init_domain_blocks
!***********************************************************************
subroutine init_domain_distribution(KMTG,ULATG,grid_ice)
! This routine calls appropriate setup routines to distribute blocks
! across processors and defines arrays with block ids for any local
! blocks. Information about ghost cell update routines is also
! initialized here through calls to the appropriate boundary routines.
use ice_boundary, only: ice_HaloCreate
use ice_distribution, only: create_distribution, create_local_block_ids, ice_distributionGet
use ice_domain_size, only: max_blocks, nx_global, ny_global
real (dbl_kind), dimension(nx_global,ny_global), intent(in) :: &
KMTG ,&! global topography
ULATG ! global latitude field (radians)
character(len=*), intent(in) :: &
grid_ice ! grid_ice, B, C, CD, etc
!----------------------------------------------------------------------
!
! local variables
!
!----------------------------------------------------------------------
integer (int_kind), dimension (nx_global, ny_global) :: &
flat ! latitude-dependent scaling factor
character (char_len) :: outstring
integer (int_kind), parameter :: &
max_work_unit=10 ! quantize the work into values from 1,max
integer (int_kind) :: &
i,j,n ,&! dummy loop indices
ig,jg ,&! global indices
igm1,igp1,jgm1,jgp1,&! global indices
ninfo ,&! ice_distributionGet check
np, nlb, m ,&! debug blocks temporaries
work_unit ,&! size of quantized work unit
#ifdef USE_NETCDF
fid ,&! file id
varid ,&! var id
status ,&! netcdf return code
#endif
tblocks_tmp ,&! total number of blocks
nblocks_tmp ,&! temporary value of nblocks
nblocks_max ! max blocks on proc
real (dbl_kind) :: &
puny, & ! puny limit
rad_to_deg ! radians to degrees
integer (int_kind), dimension(:), allocatable :: &
blkinfo ,&! ice_distributionGet check
nocn ,&! number of ocean points per block
work_per_block ! number of work units per block
type (block) :: &
this_block ! block information for current block
real (dbl_kind), dimension(:,:), allocatable :: &
wght ! wghts from file
character(len=*), parameter :: subname = '(init_domain_distribution)'
!----------------------------------------------------------------------
!
! check that there are at least nghost+1 rows or columns of land cells
! for closed boundary conditions (otherwise grid lengths are zero in
! cells neighboring ocean points).
!
!----------------------------------------------------------------------
call icepack_query_parameters(puny_out=puny, rad_to_deg_out=rad_to_deg)
call icepack_warnings_flush(nu_diag)
if (icepack_warnings_aborted()) call abort_ice(error_message=subname, &
file=__FILE__, line=__LINE__)
if (trim(ns_boundary_type) == 'closed') then
call abort_ice(subname//' ERROR: ns_boundary_type = closed not supported', file=__FILE__, line=__LINE__)
allocate(nocn(nblocks_tot))
nocn = 0
do n=1,nblocks_tot
this_block = get_block(n,n)
if (this_block%jblock == nblocks_y) then ! north edge
do j = this_block%jhi-1, this_block%jhi
if (this_block%j_glob(j) > 0) then
do i = 1, nx_block
if (this_block%i_glob(i) > 0) then
ig = this_block%i_glob(i)
jg = this_block%j_glob(j)
if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1
endif
enddo
endif
enddo
endif
if (this_block%jblock == 1) then ! south edge
do j = this_block%jlo, this_block%jlo+1
if (this_block%j_glob(j) > 0) then
do i = 1, nx_block
if (this_block%i_glob(i) > 0) then
ig = this_block%i_glob(i)
jg = this_block%j_glob(j)
if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1
endif
enddo
endif
enddo
endif
if (nocn(n) > 0) then
write(nu_diag,*) subname,'ns closed, Not enough land cells along ns edge'
call abort_ice(subname//' ERROR: Not enough land cells along ns edge for ns closed', &
file=__FILE__, line=__LINE__)
endif
enddo
deallocate(nocn)
endif
if (trim(ew_boundary_type) == 'closed') then
call abort_ice(subname//' ERROR: ew_boundary_type = closed not supported', file=__FILE__, line=__LINE__)
allocate(nocn(nblocks_tot))
nocn = 0
do n=1,nblocks_tot
this_block = get_block(n,n)
if (this_block%iblock == nblocks_x) then ! east edge
do j = 1, ny_block
if (this_block%j_glob(j) > 0) then
do i = this_block%ihi-1, this_block%ihi
if (this_block%i_glob(i) > 0) then
ig = this_block%i_glob(i)
jg = this_block%j_glob(j)
if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1
endif
enddo
endif
enddo
endif
if (this_block%iblock == 1) then ! west edge
do j = 1, ny_block
if (this_block%j_glob(j) > 0) then
do i = this_block%ilo, this_block%ilo+1
if (this_block%i_glob(i) > 0) then
ig = this_block%i_glob(i)
jg = this_block%j_glob(j)
if (KMTG(ig,jg) > puny) nocn(n) = nocn(n) + 1
endif
enddo
endif
enddo
endif
if (nocn(n) > 0) then
write(nu_diag,*) subname,'ew closed, Not enough land cells along ew edge'
call abort_ice(subname//' ERROR: Not enough land cells along ew edge for ew closed', &
file=__FILE__, line=__LINE__)
endif
enddo
deallocate(nocn)
endif
!----------------------------------------------------------------------
!
! estimate the amount of work per processor using the topography
! and latitude
!
!----------------------------------------------------------------------
if (distribution_wght == 'latitude') then
flat = max(NINT(abs(ULATG*rad_to_deg), int_kind),1) ! linear function
else
flat = 1
endif
if (distribution_wght == 'blockall') landblockelim = .false.
allocate(nocn(nblocks_tot))
if (distribution_wght == 'file') then
allocate(wght(nx_global,ny_global))
if (my_task == master_task) then
! cannot use ice_read_write due to circular dependency
#ifdef USE_NETCDF
status = nf90_open(distribution_wght_file, NF90_NOWRITE, fid)
if (status /= nf90_noerr) then
call abort_ice(subname//' ERROR: Cannot open '//trim(distribution_wght_file), &
file=__FILE__, line=__LINE__)
endif
status = nf90_inq_varid(fid, 'wght', varid)
if (status /= nf90_noerr) then
call abort_ice(subname//' ERROR: Cannot find wght '//trim(distribution_wght_file), &
file=__FILE__, line=__LINE__)
endif
status = nf90_get_var(fid, varid, wght)
if (status /= nf90_noerr) then
call abort_ice(subname//' ERROR: Cannot get wght '//trim(distribution_wght_file), &
file=__FILE__, line=__LINE__)
endif
status = nf90_close(fid)
if (status /= nf90_noerr) then
call abort_ice(subname//' ERROR: Cannot close '//trim(distribution_wght_file), &
file=__FILE__, line=__LINE__)
endif
write(nu_diag,*) 'read ',trim(distribution_wght_file),minval(wght),maxval(wght)
#else
call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', &
file=__FILE__, line=__LINE__)
#endif
endif
call broadcast_array(wght, master_task)
nocn = 0
do n=1,nblocks_tot
this_block = get_block(n,n)
do j=this_block%jlo,this_block%jhi
if (this_block%j_glob(j) > 0) then
do i=this_block%ilo,this_block%ihi
if (this_block%i_glob(i) > 0) then
ig = this_block%i_glob(i)
jg = this_block%j_glob(j)
! if (KMTG(ig,jg) > puny) &
! nocn(n) = max(nocn(n),nint(wght(ig,jg)+1.0_dbl_kind))
if (KMTG(ig,jg) > puny) then
if (wght(ig,jg) > 0.00001_dbl_kind) then
nocn(n) = nocn(n)+nint(wght(ig,jg))
else
nocn(n) = max(nocn(n),1)
endif
endif
endif
end do
endif
end do
enddo
deallocate(wght)
else
nocn = 0
do n=1,nblocks_tot
this_block = get_block(n,n)
do j=this_block%jlo,this_block%jhi
if (this_block%j_glob(j) > 0) then
do i=this_block%ilo,this_block%ihi
if (this_block%i_glob(i) > 0) then
ig = this_block%i_glob(i)
jg = this_block%j_glob(j)
if (grid_ice == 'C' .or. grid_ice == 'CD') then
! Have to be careful about block elimination with C/CD
! Use a bigger stencil
igm1 = mod(ig-2+nx_global,nx_global)+1
igp1 = mod(ig,nx_global)+1
jgm1 = max(jg-1,1)
jgp1 = min(jg+1,ny_global)
if ((KMTG(ig ,jg ) > puny .or. &
KMTG(igm1,jg ) > puny .or. KMTG(igp1,jg ) > puny .or. &
KMTG(ig ,jgp1) > puny .or. KMTG(ig ,jgm1) > puny) .and. &
(ULATG(ig,jg) < shlat/rad_to_deg .or. &
ULATG(ig,jg) > nhlat/rad_to_deg) ) &
nocn(n) = nocn(n) + flat(ig,jg)
else
if (KMTG(ig,jg) > puny .and. &
(ULATG(ig,jg) < shlat/rad_to_deg .or. &
ULATG(ig,jg) > nhlat/rad_to_deg) ) &
nocn(n) = nocn(n) + flat(ig,jg)
endif
endif
end do
endif
end do
!*** with array syntax, we actually do work on non-ocean
!*** points, so where the block is not completely land,
!*** reset nocn to be the full size of the block
! use processor_shape = 'square-pop' and distribution_wght = 'block'
! to make CICE and POP decompositions/distributions identical.
#ifdef CICE_IN_NEMO
! Keep all blocks even the ones only containing land points
if (distribution_wght == 'block') nocn(n) = nx_block*ny_block
#else
if (distribution_wght == 'block' .and. nocn(n) > 0) nocn(n) = nx_block*ny_block
if (.not. landblockelim) nocn(n) = max(nocn(n),1)
#endif
end do
endif ! distribution_wght = file
work_unit = maxval(nocn)/max_work_unit + 1
!*** find number of work units per block
allocate(work_per_block(nblocks_tot))
where (nocn > 1)
work_per_block = nocn/work_unit + 2
elsewhere (nocn == 1)
work_per_block = nocn/work_unit + 1
elsewhere
work_per_block = 0
end where
if (my_task == master_task) then
write(nu_diag,*) 'ice_domain work_unit, max_work_unit = ',work_unit, max_work_unit
write(nu_diag,*) 'ice_domain nocn = ',minval(nocn),maxval(nocn),sum(nocn)
write(nu_diag,*) 'ice_domain work_per_block = ',minval(work_per_block),maxval(work_per_block),sum(work_per_block)
endif
deallocate(nocn)
!----------------------------------------------------------------------
!
! determine the distribution of blocks across processors
!
!----------------------------------------------------------------------
distrb_info = create_distribution(distribution_type, &
nprocs, work_per_block)
deallocate(work_per_block)
!----------------------------------------------------------------------
!
! allocate and determine block id for any local blocks
!
!----------------------------------------------------------------------
call create_local_block_ids(blocks_ice, distrb_info)
! write out block distribution
! internal check of icedistributionGet as part of verification process
if (debug_blocks) then
call flush_fileunit(nu_diag)
call ice_barrier()
if (my_task == master_task) then
write(nu_diag,*) ' '
write(nu_diag,'(2a)') subname, ' Blocks by Proc:'
endif
call ice_distributionGet(distrb_info, nprocs=np, numLocalBlocks=nlb)
do m = 1, np
if (m == my_task+1) then
do n=1,nlb
write(nu_diag,'(2a,3i8)') &
subname,' my_task, local block ID, global block ID: ', &
my_task, n, distrb_info%blockGlobalID(n)
enddo
call flush_fileunit(nu_diag)
endif
call ice_barrier()
enddo
if (my_task == master_task) then
write(nu_diag,*) ' '
write(nu_diag,'(2a)') subname, ' Blocks by Global Block ID:'
do m = 1, nblocks_tot
write(nu_diag,'(2a,3i8)') &
subname,' global block id, proc, local block ID: ', &
m, distrb_info%blockLocation(m), distrb_info%blockLocalID(m)
enddo
call flush_fileunit(nu_diag)
endif
call ice_barrier()
call ice_distributionGet(distrb_info, nprocs=ninfo)
if (ninfo /= distrb_info%nprocs) &
call abort_ice(subname//' ice_distributionGet nprocs ERROR', file=__FILE__, line=__LINE__)
call ice_distributionGet(distrb_info, communicator=ninfo)
if (ninfo /= distrb_info%communicator) &
call abort_ice(subname//' ice_distributionGet communicator ERROR', file=__FILE__, line=__LINE__)
call ice_distributionGet(distrb_info, numLocalBlocks=ninfo)
if (ninfo /= distrb_info%numLocalBlocks) &
call abort_ice(subname//' ice_distributionGet numLocalBlocks ERROR', file=__FILE__, line=__LINE__)
allocate(blkinfo(ninfo))
call ice_distributionGet(distrb_info, blockGlobalID = blkinfo)
do n = 1, ninfo
if (blkinfo(n) /= distrb_info%blockGlobalID(n)) &
call abort_ice(subname//' ice_distributionGet blockGlobalID ERROR', file=__FILE__, line=__LINE__)
enddo
deallocate(blkinfo)
allocate(blkinfo(nblocks_tot))
call ice_distributionGet(distrb_info, blockLocation = blkinfo)
do n = 1, nblocks_tot
if (blkinfo(n) /= distrb_info%blockLocation(n)) &
call abort_ice(subname//' ice_distributionGet blockLocation ERROR', file=__FILE__, line=__LINE__)
enddo
call ice_distributionGet(distrb_info, blockLocalID = blkinfo)
do n = 1, nblocks_tot
if (blkinfo(n) /= distrb_info%blockLocalID(n)) &
call abort_ice(subname//' ice_distributionGet blockLocalID ERROR', file=__FILE__, line=__LINE__)
enddo
deallocate(blkinfo)
if (my_task == master_task) then
write(nu_diag,*) ' '
write(nu_diag,'(2a)') subname,' ice_distributionGet checks pass'
write(nu_diag,*) ' '
endif
endif
if (associated(blocks_ice)) then
nblocks = size(blocks_ice)
else
nblocks = 0
endif
nblocks_max = 0
tblocks_tmp = 0
do n=0,distrb_info%nprocs - 1
nblocks_tmp = nblocks
call broadcast_scalar(nblocks_tmp, n)
nblocks_max = max(nblocks_max,nblocks_tmp)
tblocks_tmp = tblocks_tmp + nblocks_tmp
end do
if (my_task == master_task) then
write(nu_diag,*) &
'ice: total number of blocks is', tblocks_tmp
endif
if (nblocks_max > max_blocks) then
write(outstring,*) ' ERROR: num blocks exceed max: increase max to', nblocks_max
call abort_ice(subname//trim(outstring), file=__FILE__, line=__LINE__)
else if (nblocks_max < max_blocks) then
write(outstring,*) 'WARNING: ice no. blocks too large: decrease max to', nblocks_max
if (my_task == master_task) then
write(nu_diag,*) ' ********WARNING***********'
write(nu_diag,*) subname,trim(outstring)
write(nu_diag,*) ' **************************'
write(nu_diag,*) ' '
endif
endif
!----------------------------------------------------------------------
!
! Set up ghost cell updates for each distribution.
! Boundary types are cyclic, closed, tripole or tripoleT.
!
!----------------------------------------------------------------------
! update ghost cells on all four boundaries
halo_info = ice_HaloCreate(distrb_info, &
trim(ns_boundary_type), &
trim(ew_boundary_type), &
nx_global)
!----------------------------------------------------------------------
end subroutine init_domain_distribution
!***********************************************************************
end module ice_domain
!|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||