diff --git a/src/gsi/apply_scaledepwgts.f90 b/src/gsi/apply_scaledepwgts.f90 index 8b93cf0b57..e97b6fb614 100644 --- a/src/gsi/apply_scaledepwgts.f90 +++ b/src/gsi/apply_scaledepwgts.f90 @@ -42,51 +42,47 @@ subroutine init_mult_spc_wgts(jcap_in) !$$$ end documentation block use kinds, only: r_kind,i_kind,r_single - use hybrid_ensemble_parameters,only: s_ens_hv,sp_loc,grd_ens,grd_loc,sp_ens - use hybrid_ensemble_parameters,only: n_ens,p_sploc2ens,grd_sploc - use hybrid_ensemble_parameters,only: use_localization_grid - use gridmod,only: use_sp_eqspace - use general_specmod, only: general_init_spec_vars use constants, only: zero,half,one,two,three,rearth,pi,tiny_r_kind - use constants, only: rad2deg use mpimod, only: mype use general_sub2grid_mod, only: general_sub2grid_create_info use egrid2agrid_mod,only: g_create_egrid2agrid use general_sub2grid_mod, only: sub2grid_info - use gsi_io, only: verbose use hybrid_ensemble_parameters, only: nsclgrp use hybrid_ensemble_parameters, only: spc_multwgt,spcwgt_params,r_ensloccov4scl implicit none integer(i_kind),intent(in ) :: jcap_in - real(r_kind),allocatable :: totwvlength(:) - integer(i_kind) i,ii,j,k,l,n,kk,nsigend + integer(i_kind) i integer(i_kind) ig real(r_kind) rwv0,rtem1,rtem2 real (r_kind):: fwgtofwvlen - integer(i_kind) :: l_sum_spc_weights + real(r_kind) :: totwvlength + logical :: l_sum_spc_weights ! Spectral scale decomposition is differernt between SDL-cross and SDL-nocross if( r_ensloccov4scl < tiny_r_kind )then - l_sum_spc_weights = 1 + l_sum_spc_weights = .false. else - l_sum_spc_weights = 0 + l_sum_spc_weights = .true. end if - allocate(totwvlength(jcap_in)) + spc_multwgt(0,1)=one + do ig=2,nsclgrp + spc_multwgt(0,ig)=zero + end do - rwv0=2*pi*rearth*0.001_r_kind - do i=1,jcap_in - totwvlength(i)= rwv0/real(i) - enddo + + rwv0=2.0_r_kind*pi*rearth*0.001_r_kind do i=1,jcap_in - rtem1=0 + totwvlength= rwv0/real(i) + rtem1=zero do ig=1,nsclgrp if(ig /= 2) then spc_multwgt(i,ig)=fwgtofwvlen(spcwgt_params(1,ig),spcwgt_params(2,ig),& - spcwgt_params(3,ig),spcwgt_params(4,ig),totwvlength(i)) - if(l_sum_spc_weights == 0 ) then + spcwgt_params(3,ig),spcwgt_params(4,ig),totwvlength) + spc_multwgt(i,ig)=min(max(spc_multwgt(i,ig),zero),one) + if(l_sum_spc_weights) then rtem1=rtem1+spc_multwgt(i,ig) else rtem1=rtem1+spc_multwgt(i,ig)*spc_multwgt(i,ig) @@ -94,18 +90,19 @@ subroutine init_mult_spc_wgts(jcap_in) endif enddo rtem2 =1.0_r_kind - rtem1 - if(abs(rtem2) >= zero) then + if(rtem2 >= zero) then - if(l_sum_spc_weights == 0 ) then + if(l_sum_spc_weights) then spc_multwgt(i,2)=rtem2 else spc_multwgt(i,2)=sqrt(rtem2) endif + else + if(mype == 0)write(6,*) ' rtem2 < zero ',i,rtem2,(spc_multwgt(i,ig),ig=1,nsclgrp) + spc_multwgt(i,2)=zero endif enddo - spc_multwgt=max(spc_multwgt,0.0_r_kind) - deallocate(totwvlength) return end subroutine init_mult_spc_wgts @@ -117,18 +114,15 @@ subroutine apply_scaledepwgts(grd_in,sp_in,wbundle,spwgts,wbundle2) ! POC: xuguang.wang@ou.edu ! use constants, only: one - use control_vectors, only: nrf_var,cvars2d,cvars3d,control_vector + use control_vectors, only: control_vector use kinds, only: r_kind,i_kind use kinds, only: r_single - use mpimod, only: mype,nvar_id,levs_id - use hybrid_ensemble_parameters, only: oz_univ_static use general_specmod, only: general_spec_multwgt use gsi_bundlemod, only: gsi_bundle use general_sub2grid_mod, only: general_sub2grid,general_grid2sub use general_specmod, only: spec_vars use general_sub2grid_mod, only: sub2grid_info - use mpimod, only: mpi_comm_world,mype,npe,ierror - use file_utility, only : get_lun + use mpimod, only: mpi_comm_world,mype implicit none ! Declare passed variables @@ -139,15 +133,11 @@ subroutine apply_scaledepwgts(grd_in,sp_in,wbundle,spwgts,wbundle2) real(r_kind),dimension(0:sp_in%jcap),intent(in):: spwgts ! Declare local variables - integer(i_kind) ii,kk - integer(i_kind) i,j,lunit + integer(i_kind) kk - real(r_kind),dimension(grd_in%lat2,grd_in%lon2):: slndt,sicet,sst real(r_kind),dimension(grd_in%nlat*grd_in%nlon*grd_in%nlevs_alloc) :: hwork real(r_kind),dimension(grd_in%nlat,grd_in%nlon,grd_in%nlevs_alloc) :: work real(r_kind),dimension(sp_in%nc):: spc1 - character*64 :: fname1 - character*5:: varname1 ! Beta1 first ! Get from subdomains to diff --git a/src/gsi/control_vectors.f90 b/src/gsi/control_vectors.f90 index ea41b36c46..97578124d2 100644 --- a/src/gsi/control_vectors.f90 +++ b/src/gsi/control_vectors.f90 @@ -895,7 +895,7 @@ real(r_quad) function qdot_prod_sub(xcv,ycv) m3d=xcv%step(1)%n3d m2d=xcv%step(1)%n2d itot=max(m3d,0)+max(m2d,0) - if(l_hyb_ens)itot=itot+n_ens + if(l_hyb_ens)itot=itot+n_ens*naensgrp allocate(partsum(itot)) do ii=1,nsubwin !$omp parallel do schedule(dynamic,1) private(i) diff --git a/src/gsi/convthin.f90 b/src/gsi/convthin.f90 index 99629b8aff..3a52188d73 100644 --- a/src/gsi/convthin.f90 +++ b/src/gsi/convthin.f90 @@ -464,8 +464,7 @@ subroutine map3grids_m(flg,pflag,pcoord,nlevp,dlat_earth,dlon_earth,pob,crit1,io real(r_kind) dlat1,dlon1,pob1 real(r_kind) dx,dy,dp - real(r_kind) dxx,dyy,dpp - real(r_kind) crit!,dist1 + real(r_kind) crit !,dist1 logical foreswp, aftswp diff --git a/src/gsi/cplr_get_fv3_regional_ensperts.f90 b/src/gsi/cplr_get_fv3_regional_ensperts.f90 index fb4afe121d..5a3e72970d 100644 --- a/src/gsi/cplr_get_fv3_regional_ensperts.f90 +++ b/src/gsi/cplr_get_fv3_regional_ensperts.f90 @@ -1021,7 +1021,7 @@ subroutine general_read_fv3_regional_parallel_over_ens(this,iope,fv3_filenamegin use netcdf, only: nf90_inq_dimid,nf90_inquire_dimension use netcdf, only: nf90_inq_varid,nf90_inquire_variable,nf90_get_var use kinds, only: r_kind,r_single,i_kind - use gridmod, only: eta1_ll,eta2_ll + use gridmod, only: eta1_ll use constants, only: zero,one,fv,zero_single,one_tenth,h300 use hybrid_ensemble_parameters, only: grd_ens,q_hyb_ens use hybrid_ensemble_parameters, only: fv3sar_ensemble_opt @@ -1036,14 +1036,11 @@ subroutine general_read_fv3_regional_parallel_over_ens(this,iope,fv3_filenamegin use gsi_rfv3io_mod, only: gsi_fv3ncdf_readuv use gsi_rfv3io_mod, only: gsi_fv3ncdf_readuv_v1 use gsi_rfv3io_mod, only: gsi_fv3ncdf2d_read_v1 - use directDA_radaruse_mod, only: l_use_dbz_directDA use gsi_bundlemod, only: gsi_gridcreate use gsi_bundlemod, only: gsi_grid use gsi_bundlemod, only: gsi_bundlecreate,gsi_bundledestroy use gsi_bundlemod, only: gsi_bundlegetvar use obsmod, only: if_model_dbz - use directDA_radaruse_mod, only: l_use_cvpqx, cvpqx_pval, cld_nt_updt - use directDA_radaruse_mod, only: l_cvpnr, cvpnr_pval use gsi_rfv3io_mod, only: gsi_fv3ncdf_read_ens_parallel_over_ens,gsi_fv3ncdf_readuv_ens_parallel_over_ens @@ -1085,7 +1082,6 @@ subroutine general_read_fv3_regional_parallel_over_ens(this,iope,fv3_filenamegin character(len=:),allocatable :: tracers !='fv3_tracer' character(len=:),allocatable :: sfcdata !='fv3_sfcdata' character(len=:),allocatable :: couplerres!='coupler.res' - integer (i_kind) ier,istatus associate( this => this ) ! eliminates warning for unused dummy argument needed for binding @@ -1205,7 +1201,7 @@ subroutine parallel_read_fv3_step2(this,mype,iope, & use hybrid_ensemble_parameters, only: grd_ens use mpimod, only: mpi_comm_world,ierror,mpi_rtype use kinds, only: r_kind,r_single,i_kind - use gridmod,only: itotsub + use constants, only: half,zero implicit none @@ -1234,6 +1230,7 @@ subroutine parallel_read_fv3_step2(this,mype,iope, & ! transfer data from root to subdomains on each task ! scatterv used, since full grids exist only on root task. allocate(wrk_send_2d(grd_ens%itotsub)) + g_oz=zero ! first PS (output from fill_regional_2d is a column vector with a halo) if(mype==iope) call this%fill_regional_2d(gg_ps,wrk_send_2d) call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, & diff --git a/src/gsi/cplr_gfs_ensmod.f90 b/src/gsi/cplr_gfs_ensmod.f90 index 67f15ebae2..550f85d209 100644 --- a/src/gsi/cplr_gfs_ensmod.f90 +++ b/src/gsi/cplr_gfs_ensmod.f90 @@ -180,7 +180,7 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & use gsi_4dvar, only: ens_fhrlevs use gsi_bundlemod, only: gsi_bundle use gsi_bundlemod, only : assignment(=) - use hybrid_ensemble_parameters, only: n_ens,grd_ens,ntlevs_ens + use hybrid_ensemble_parameters, only: n_ens,grd_ens use hybrid_ensemble_parameters, only: ensemble_path use control_vectors, only: nc2d,nc3d !use control_vectors, only: cvars2d,cvars3d @@ -202,7 +202,7 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & character(len=*),parameter :: myname_='get_user_ens_gfs_fastread_' character(len=70) :: filename character(len=70) :: filenamesfc - integer(i_kind) :: i,ii,j,jj,k,n + integer(i_kind) :: i,ii,j,k,n integer(i_kind) :: io_pe,n_io_pe_s,n_io_pe_e,n_io_pe_em,i_ens integer(i_kind) :: ip integer(i_kind) :: nlon,nlat,nsig @@ -301,6 +301,8 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & iasm,iaemz,jasm,jaemz,kasm,kaemz,masm,maemz, & filename) end if + else + allocate(en_full(1,1,1,1)) end if call mpi_allreduce(m_cvars2dw,m_cvars2d,nc2d,mpi_integer4,mpi_max,mpi_comm_world,ierror) @@ -314,7 +316,7 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, & allocate(en_loc(ibsm:ibemz,jbsm:jbemz,kbsm:kbemz,mbsm:mbemz)) call genex(s_a2b,en_full,en_loc) - if(mas == mae)deallocate(en_full) + deallocate(en_full) ! call genex_destroy_info(s_a2b) ! check on actual routine name @@ -921,6 +923,7 @@ subroutine parallel_read_gfsnc_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig call die(myname_, ': ***FATAL ERROR*** insufficient ens fcst for hybrid',999) endif + ierror=0 ! If file exists, open and process atmges = open_dataset(filename,errcode=ierror) if (ierror /=0) then diff --git a/src/gsi/general_specmod.f90 b/src/gsi/general_specmod.f90 index 20feae98de..c90187bf70 100644 --- a/src/gsi/general_specmod.f90 +++ b/src/gsi/general_specmod.f90 @@ -317,7 +317,7 @@ subroutine general_spec_multwgt(sp,spcwrk,spcwgt) real(r_kind),dimension(sp%nc),intent(inout) :: spcwrk real(r_kind),dimension(0:sp%jcap),intent(in) :: spcwgt - integer(i_kind) ii1,l,m,jmax,ks,n + integer(i_kind) l,jmax,ks,n !! Code borrowed from spvar in splib jmax=sp%jcap diff --git a/src/gsi/get_gefs_ensperts_dualres.f90 b/src/gsi/get_gefs_ensperts_dualres.f90 index fab95ad210..e244fa9f53 100644 --- a/src/gsi/get_gefs_ensperts_dualres.f90 +++ b/src/gsi/get_gefs_ensperts_dualres.f90 @@ -95,7 +95,7 @@ subroutine get_gefs_ensperts_dualres ! integer(i_kind) il,jl logical ice,hydrometeor type(sub2grid_info) :: grd_tmp - integer(i_kind) :: ig0,ig + integer(i_kind) :: ig ! Create perturbations grid and get variable names from perturbations if(en_perts(1,1,1)%grid%im/=grd_ens%lat2.or. & diff --git a/src/gsi/gsi_rfv3io_mod.f90 b/src/gsi/gsi_rfv3io_mod.f90 index 7e7b11d57c..4fcb2aba1d 100644 --- a/src/gsi/gsi_rfv3io_mod.f90 +++ b/src/gsi/gsi_rfv3io_mod.f90 @@ -1824,7 +1824,7 @@ subroutine gsi_fv3ncdf2d_read(fv3filenamegin,it,ges_z,ges_t2m,ges_q2m) real(r_kind),intent(in),dimension(:,:),pointer::ges_q2m type (type_fv3regfilenameg),intent(in) :: fv3filenamegin character(len=max_varname_length) :: name - integer(i_kind),allocatable,dimension(:):: dim_id,dim + integer(i_kind),allocatable,dimension(:):: dim real(r_kind),allocatable,dimension(:):: work real(r_kind),allocatable,dimension(:,:):: a real(r_kind),allocatable,dimension(:,:,:):: sfcn2d @@ -2774,10 +2774,9 @@ subroutine gsi_fv3ncdf_read_ens_parallel_over_ens(filenamein,fv3filenamegin, & type (type_fv3regfilenameg),intent(in) ::fv3filenamegin integer(i_kind) ,intent(in ) :: iope real(r_kind),allocatable,dimension(:,:):: uu2d, uu2d_tmp - real(r_kind),allocatable,dimension(:):: wrk_send_2d real(r_kind),dimension(nlat,nlon,nsig):: hwork real(r_kind),dimension(nlat,nlon,nsig),intent(out),optional:: delp,tsen,w,q,oz,ql,qr,qs,qi,qg,dbz - character(len=max_varname_length) :: varname,vgsiname + character(len=max_varname_length) :: varname character(len=max_varname_length) :: name character(len=max_varname_length), allocatable,dimension(:) :: varname_files @@ -2994,8 +2993,6 @@ subroutine gsi_fv3ncdf_readuv_ens_parallel_over_ens(ges_u,ges_v,fv3filenamegin,i character(:), allocatable:: filenamein real(r_kind),allocatable,dimension(:,:):: u2d,v2d real(r_kind),allocatable,dimension(:,:):: uc2d,vc2d - character(len=max_varname_length) :: varname,vgsiname - real(r_kind),allocatable,dimension(:,:,:,:):: worksub integer(i_kind) u_grd_VarId,v_grd_VarId integer(i_kind) nlatcase,nloncase integer(i_kind) nxcase,nycase diff --git a/src/gsi/read_dbz_nc.f90 b/src/gsi/read_dbz_nc.f90 index ee1d3cb2e4..cddbd14de4 100644 --- a/src/gsi/read_dbz_nc.f90 +++ b/src/gsi/read_dbz_nc.f90 @@ -71,7 +71,7 @@ subroutine read_dbz_nc(nread,ndata,nodata,infile,lunout,obstype,sis,hgtl_full,no one_tenth,r1000,r60,r60inv,r100,r400,grav_equator, & eccentricity,somigliana,grav_ratio,grav,semi_major_axis,flattening use gridmod, only: tll2xy,nsig,nlat,nlon - use obsmod, only: iadate,doradaroneob,oneoblat,oneoblon,oneobheight,oneobradid, & + use obsmod, only: iadate,doradaroneob,oneoblat,oneoblon,oneobheight, & mintiltdbz,maxtiltdbz,minobrangedbz,maxobrangedbz,& static_gsi_nopcp_dbz,rmesh_dbz,zmesh_dbz use hybrid_ensemble_parameters,only : l_hyb_ens diff --git a/src/gsi/read_iasi.f90 b/src/gsi/read_iasi.f90 index d0a3793b4e..208b333f49 100644 --- a/src/gsi/read_iasi.f90 +++ b/src/gsi/read_iasi.f90 @@ -720,7 +720,7 @@ subroutine read_iasi(mype,val_iasi,ithin,isfcalc,rmesh,jsatid,gstime,& !$omp parallel do schedule(dynamic,1) private(i,sc_chan,bufr_chan,radiance) channel_loop: do i=1,satinfo_nchan bufr_chan = bufr_index(i) - if (bufr_chan /= 0 ) then + if (bufr_chan > 0 ) then ! check that channel number is within reason if (( allchan(2,bufr_chan) > zero .and. allchan(2,bufr_chan) < 99999._r_kind)) then ! radiance bounds radiance = allchan(2,bufr_chan)*scalef(bufr_chan) @@ -729,8 +729,6 @@ subroutine read_iasi(mype,val_iasi,ithin,isfcalc,rmesh,jsatid,gstime,& else temperature(bufr_chan) = tbmin endif - else - temperature(bufr_chan) = tbmin end if end do channel_loop diff --git a/src/gsi/setuprad.f90 b/src/gsi/setuprad.f90 index faa79f0efc..822ec8ea22 100644 --- a/src/gsi/setuprad.f90 +++ b/src/gsi/setuprad.f90 @@ -998,6 +998,29 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,& endif ! Compute microwave cloud liquid water or graupel water path for bias correction and QC. + if (adp_anglebc) then +! If using adaptive angle dependent bias correction, update the predicctors +! for this part of bias correction. The AMSUA cloud liquid water algorithm +! uses total angle dependent bias correction for channels 1 and 2 + do i=1,nchanl + mm=ich(i) + if (goessndr .or. goes_img .or. ahi .or. seviri .or. ssmi .or. ssmis .or. gmi .or. abi) then + pred(npred,i)=nadir*deg2rad + else + pred(npred,i)=data_s(iscan_ang,n) + end if + do j=2,angord + pred(npred-j+1,i)=pred(npred,i)**j + end do + cbias(nadir,mm)=zero + if (iuse_rad(mm)/=4) then + do j=1,angord + cbias(nadir,mm)=cbias(nadir,mm)+predchan(npred-j+1,i)*pred(npred-j+1,i) + end do + end if + end do + end if +!***** clw_obs=zero clw_guess_retrieval=zero gwp=zero @@ -1059,28 +1082,13 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,& endif predbias=zero + abi2km_bc = zero + abi2km_bc(2) = 233.5_r_kind + abi2km_bc(3) = 241.7_r_kind + abi2km_bc(4) = 250.5_r_kind !$omp parallel do schedule(dynamic,1) private(i,mm,j,k,tlap,node,bias) do i=1,nchanl mm=ich(i) -! If using adaptive angle dependent bias correction, update the predicctors -! for this part of bias correction. The AMSUA cloud liquid water algorithm -! uses total angle dependent bias correction for channels 1 and 2 - if (adp_anglebc) then - if (goessndr .or. goes_img .or. ahi .or. seviri .or. ssmi .or. ssmis .or. gmi .or. abi) then - pred(npred,i)=nadir*deg2rad - else - pred(npred,i)=data_s(iscan_ang,n) - end if - do j=2,angord - pred(npred-j+1,i)=pred(npred,i)**j - end do - cbias(nadir,mm)=zero - if (iuse_rad(mm)/=4) then - do j=1,angord - cbias(nadir,mm)=cbias(nadir,mm)+predchan(npred-j+1,i)*pred(npred-j+1,i) - end do - end if - end if !***** ! COMPUTE AND APPLY BIAS CORRECTION TO SIMULATED VALUES @@ -1107,6 +1115,7 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,& else pred(3,i) = clw_obs*cosza*cosza end if + if(radmod%lcloud_fwd .and. sea) pred(3,i ) = zero ! Apply bias correction @@ -1156,10 +1165,6 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,& end if if (abi2km .and. regional) then - abi2km_bc = zero - abi2km_bc(2) = 233.5_r_kind - abi2km_bc(3) = 241.7_r_kind - abi2km_bc(4) = 250.5_r_kind pred(:,i) = zero if (i>=2 .and. i<=4) then if (tb_obs(i) > 190.0_r_kind .and. tb_obs(i) < 300.0_r_kind) then diff --git a/src/gsi/state_vectors.f90 b/src/gsi/state_vectors.f90 index ffad280bf5..df332303b0 100644 --- a/src/gsi/state_vectors.f90 +++ b/src/gsi/state_vectors.f90 @@ -386,7 +386,7 @@ subroutine norms_vars(xst,pmin,pmax,psum,pnum) ! local variables real(r_kind),allocatable,dimension(:) :: zloc,nloc real(r_kind),allocatable,dimension(:,:) :: zall,nall - integer(i_kind) :: i,ii + integer(i_kind) :: i pmin=zero pmax=zero diff --git a/src/gsi/stpcalc.f90 b/src/gsi/stpcalc.f90 index 2d79fd3e3b..30387341e3 100644 --- a/src/gsi/stpcalc.f90 +++ b/src/gsi/stpcalc.f90 @@ -781,7 +781,9 @@ subroutine stpcalc(stpinout,sval,sbias,dirx,dval,dbias, & end if exit stepsize else if(ii == istp_iter)then - write(iout_iter,*) ' early termination due to no decrease in penalty ',cx,stp(ii) + if(mype == minmype)then + write(iout_iter,*) ' early termination due to no decrease in penalty ',cx,stp(ii) + end if stp(istp_use)=zero end_iter = .true. exit stepsize diff --git a/src/gsi/stprad.f90 b/src/gsi/stprad.f90 index 20fe0dd1a9..e81688f7e3 100644 --- a/src/gsi/stprad.f90 +++ b/src/gsi/stprad.f90 @@ -313,7 +313,7 @@ subroutine stprad(radhead,dval,xval,rpred,spred,out,sges,nstep) endif -!$omp parallel do schedule(dynamic,1) private(nn,ic,mm,ncr,k,kk,rad,val,val2,cg_rad,wnotgross,wgross) +! !$omp parallel do schedule(dynamic,1) private(nn,ic,mm,ncr,k,kk,rad,val,val2,cg_rad,wnotgross,wgross) do nn=1,radptr%nchan if(nstep > 0)then