Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue with global mean weight for multiscale ensemble runs for ig > 1 plus changes to make code work for multiscale (bugs introduced in trunk). #570

Merged
merged 30 commits into from
May 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
b846b29
GitHub Issue NOAA-EMC/GSI#175. Use the global 127L B-Matrix in regio…
jderber-NOAA Aug 13, 2021
05f1c1f
Merge remote-tracking branch 'upstream/master'
jderber-NOAA Sep 30, 2021
059e402
Merge remote-tracking branch 'upstream/master'
jderber-NOAA Oct 15, 2021
f938842
GitHub Issue NOAA-EMC/GSI#219 Improve Minimization and fix bug in vqc
jderber-NOAA Oct 22, 2021
fafadac
Merge remote-tracking branch 'upstream/master'
jderber-NOAA Nov 3, 2021
52c5ae6
fix setupw
jderber-NOAA Nov 5, 2021
f00e377
Merge remote-tracking branch 'upstream/master'
jderber-NOAA Nov 16, 2021
7703367
Merge remote-tracking branch 'upstream/master'
jderber-NOAA Feb 17, 2022
9eb9606
Merge remote-tracking branch 'upstream/master'
jderber-NOAA Feb 23, 2022
3eb0e13
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Jun 17, 2022
ddced98
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Jun 29, 2022
f60343b
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Jul 11, 2022
8dbfbd1
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Jul 14, 2022
1554f65
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Jul 21, 2022
bf060fd
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Aug 29, 2022
3f073fa
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Nov 4, 2022
7f62d1c
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Jan 25, 2023
85cbdb1
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Feb 23, 2023
51a444b
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Mar 3, 2023
0be4126
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Mar 22, 2023
57fda95
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Apr 13, 2023
6336b79
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA Apr 17, 2023
aaa98a1
Fix issues with multiscale ensemble weights
jderber-NOAA May 5, 2023
8389884
Fix error.
jderber-NOAA May 5, 2023
a10841d
Merge remote-tracking branch 'upstream/develop' into develop
jderber-NOAA May 10, 2023
ba590e6
Merge branch 'develop' into multiscalebug
jderber-NOAA May 10, 2023
6ca95d4
Fixing issues.
jderber-NOAA May 22, 2023
889d2e9
Fix final bug.
jderber-NOAA May 23, 2023
f8a903e
Remove commented out line
jderber-NOAA May 24, 2023
db1fd2d
Remove commented out line.
jderber-NOAA May 30, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 24 additions & 34 deletions src/gsi/apply_scaledepwgts.f90
Original file line number Diff line number Diff line change
Expand Up @@ -42,70 +42,67 @@ subroutine init_mult_spc_wgts(jcap_in)
!$$$ end documentation block

use kinds, only: r_kind,i_kind,r_single
use hybrid_ensemble_parameters,only: s_ens_hv,sp_loc,grd_ens,grd_loc,sp_ens
use hybrid_ensemble_parameters,only: n_ens,p_sploc2ens,grd_sploc
use hybrid_ensemble_parameters,only: use_localization_grid
use gridmod,only: use_sp_eqspace
use general_specmod, only: general_init_spec_vars
use constants, only: zero,half,one,two,three,rearth,pi,tiny_r_kind
use constants, only: rad2deg
use mpimod, only: mype
use general_sub2grid_mod, only: general_sub2grid_create_info
use egrid2agrid_mod,only: g_create_egrid2agrid
use general_sub2grid_mod, only: sub2grid_info
use gsi_io, only: verbose
use hybrid_ensemble_parameters, only: nsclgrp
use hybrid_ensemble_parameters, only: spc_multwgt,spcwgt_params,r_ensloccov4scl
implicit none

integer(i_kind),intent(in ) :: jcap_in
real(r_kind),allocatable :: totwvlength(:)

integer(i_kind) i,ii,j,k,l,n,kk,nsigend
integer(i_kind) i
integer(i_kind) ig
real(r_kind) rwv0,rtem1,rtem2
real (r_kind):: fwgtofwvlen
integer(i_kind) :: l_sum_spc_weights
real(r_kind) :: totwvlength
logical :: l_sum_spc_weights

! Spectral scale decomposition is differernt between SDL-cross and SDL-nocross
if( r_ensloccov4scl < tiny_r_kind )then
l_sum_spc_weights = 1
l_sum_spc_weights = .false.
RussTreadon-NOAA marked this conversation as resolved.
Show resolved Hide resolved
else
l_sum_spc_weights = 0
l_sum_spc_weights = .true.
end if

allocate(totwvlength(jcap_in))
spc_multwgt(0,1)=one
do ig=2,nsclgrp
spc_multwgt(0,ig)=zero
end do
RussTreadon-NOAA marked this conversation as resolved.
Show resolved Hide resolved

rwv0=2*pi*rearth*0.001_r_kind
do i=1,jcap_in
totwvlength(i)= rwv0/real(i)
enddo

rwv0=2.0_r_kind*pi*rearth*0.001_r_kind
do i=1,jcap_in
rtem1=0
totwvlength= rwv0/real(i)
rtem1=zero
do ig=1,nsclgrp
if(ig /= 2) then
spc_multwgt(i,ig)=fwgtofwvlen(spcwgt_params(1,ig),spcwgt_params(2,ig),&
spcwgt_params(3,ig),spcwgt_params(4,ig),totwvlength(i))
if(l_sum_spc_weights == 0 ) then
spcwgt_params(3,ig),spcwgt_params(4,ig),totwvlength)
spc_multwgt(i,ig)=min(max(spc_multwgt(i,ig),zero),one)
if(l_sum_spc_weights) then
rtem1=rtem1+spc_multwgt(i,ig)
else
rtem1=rtem1+spc_multwgt(i,ig)*spc_multwgt(i,ig)
endif
endif
enddo
rtem2 =1.0_r_kind - rtem1
if(abs(rtem2) >= zero) then
if(rtem2 >= zero) then

if(l_sum_spc_weights == 0 ) then
if(l_sum_spc_weights) then
spc_multwgt(i,2)=rtem2
else
spc_multwgt(i,2)=sqrt(rtem2)
endif
else
if(mype == 0)write(6,*) ' rtem2 < zero ',i,rtem2,(spc_multwgt(i,ig),ig=1,nsclgrp)
spc_multwgt(i,2)=zero
endif
enddo
spc_multwgt=max(spc_multwgt,0.0_r_kind)

deallocate(totwvlength)
return
end subroutine init_mult_spc_wgts

Expand All @@ -117,18 +114,15 @@ subroutine apply_scaledepwgts(grd_in,sp_in,wbundle,spwgts,wbundle2)
! POC: xuguang.wang@ou.edu
!
use constants, only: one
use control_vectors, only: nrf_var,cvars2d,cvars3d,control_vector
use control_vectors, only: control_vector
use kinds, only: r_kind,i_kind
use kinds, only: r_single
use mpimod, only: mype,nvar_id,levs_id
use hybrid_ensemble_parameters, only: oz_univ_static
use general_specmod, only: general_spec_multwgt
use gsi_bundlemod, only: gsi_bundle
use general_sub2grid_mod, only: general_sub2grid,general_grid2sub
use general_specmod, only: spec_vars
use general_sub2grid_mod, only: sub2grid_info
use mpimod, only: mpi_comm_world,mype,npe,ierror
use file_utility, only : get_lun
use mpimod, only: mpi_comm_world,mype
implicit none

! Declare passed variables
Expand All @@ -139,15 +133,11 @@ subroutine apply_scaledepwgts(grd_in,sp_in,wbundle,spwgts,wbundle2)
real(r_kind),dimension(0:sp_in%jcap),intent(in):: spwgts

! Declare local variables
integer(i_kind) ii,kk
integer(i_kind) i,j,lunit
integer(i_kind) kk

real(r_kind),dimension(grd_in%lat2,grd_in%lon2):: slndt,sicet,sst
real(r_kind),dimension(grd_in%nlat*grd_in%nlon*grd_in%nlevs_alloc) :: hwork
real(r_kind),dimension(grd_in%nlat,grd_in%nlon,grd_in%nlevs_alloc) :: work
real(r_kind),dimension(sp_in%nc):: spc1
character*64 :: fname1
character*5:: varname1

! Beta1 first
! Get from subdomains to
Expand Down
2 changes: 1 addition & 1 deletion src/gsi/control_vectors.f90
Original file line number Diff line number Diff line change
Expand Up @@ -895,7 +895,7 @@ real(r_quad) function qdot_prod_sub(xcv,ycv)
m3d=xcv%step(1)%n3d
m2d=xcv%step(1)%n2d
itot=max(m3d,0)+max(m2d,0)
if(l_hyb_ens)itot=itot+n_ens
if(l_hyb_ens)itot=itot+n_ens*naensgrp
RussTreadon-NOAA marked this conversation as resolved.
Show resolved Hide resolved
allocate(partsum(itot))
do ii=1,nsubwin
!$omp parallel do schedule(dynamic,1) private(i)
Expand Down
3 changes: 1 addition & 2 deletions src/gsi/convthin.f90
Original file line number Diff line number Diff line change
Expand Up @@ -464,8 +464,7 @@ subroutine map3grids_m(flg,pflag,pcoord,nlevp,dlat_earth,dlon_earth,pob,crit1,io

real(r_kind) dlat1,dlon1,pob1
real(r_kind) dx,dy,dp
real(r_kind) dxx,dyy,dpp
real(r_kind) crit!,dist1
real(r_kind) crit !,dist1

logical foreswp, aftswp

Expand Down
9 changes: 3 additions & 6 deletions src/gsi/cplr_get_fv3_regional_ensperts.f90
Original file line number Diff line number Diff line change
Expand Up @@ -1021,7 +1021,7 @@ subroutine general_read_fv3_regional_parallel_over_ens(this,iope,fv3_filenamegin
use netcdf, only: nf90_inq_dimid,nf90_inquire_dimension
use netcdf, only: nf90_inq_varid,nf90_inquire_variable,nf90_get_var
use kinds, only: r_kind,r_single,i_kind
use gridmod, only: eta1_ll,eta2_ll
use gridmod, only: eta1_ll
use constants, only: zero,one,fv,zero_single,one_tenth,h300
use hybrid_ensemble_parameters, only: grd_ens,q_hyb_ens
use hybrid_ensemble_parameters, only: fv3sar_ensemble_opt
Expand All @@ -1036,14 +1036,11 @@ subroutine general_read_fv3_regional_parallel_over_ens(this,iope,fv3_filenamegin
use gsi_rfv3io_mod, only: gsi_fv3ncdf_readuv
use gsi_rfv3io_mod, only: gsi_fv3ncdf_readuv_v1
use gsi_rfv3io_mod, only: gsi_fv3ncdf2d_read_v1
use directDA_radaruse_mod, only: l_use_dbz_directDA
use gsi_bundlemod, only: gsi_gridcreate
use gsi_bundlemod, only: gsi_grid
use gsi_bundlemod, only: gsi_bundlecreate,gsi_bundledestroy
use gsi_bundlemod, only: gsi_bundlegetvar
use obsmod, only: if_model_dbz
use directDA_radaruse_mod, only: l_use_cvpqx, cvpqx_pval, cld_nt_updt
use directDA_radaruse_mod, only: l_cvpnr, cvpnr_pval
use gsi_rfv3io_mod, only: gsi_fv3ncdf_read_ens_parallel_over_ens,gsi_fv3ncdf_readuv_ens_parallel_over_ens


Expand Down Expand Up @@ -1085,7 +1082,6 @@ subroutine general_read_fv3_regional_parallel_over_ens(this,iope,fv3_filenamegin
character(len=:),allocatable :: tracers !='fv3_tracer'
character(len=:),allocatable :: sfcdata !='fv3_sfcdata'
character(len=:),allocatable :: couplerres!='coupler.res'
integer (i_kind) ier,istatus


associate( this => this ) ! eliminates warning for unused dummy argument needed for binding
Expand Down Expand Up @@ -1205,7 +1201,7 @@ subroutine parallel_read_fv3_step2(this,mype,iope, &
use hybrid_ensemble_parameters, only: grd_ens
use mpimod, only: mpi_comm_world,ierror,mpi_rtype
use kinds, only: r_kind,r_single,i_kind
use gridmod,only: itotsub
use constants, only: half,zero

implicit none

Expand Down Expand Up @@ -1234,6 +1230,7 @@ subroutine parallel_read_fv3_step2(this,mype,iope, &
! transfer data from root to subdomains on each task
! scatterv used, since full grids exist only on root task.
allocate(wrk_send_2d(grd_ens%itotsub))
g_oz=zero
! first PS (output from fill_regional_2d is a column vector with a halo)
if(mype==iope) call this%fill_regional_2d(gg_ps,wrk_send_2d)
call mpi_scatterv(wrk_send_2d,grd_ens%ijn_s,grd_ens%displs_s,mpi_rtype, &
Expand Down
9 changes: 6 additions & 3 deletions src/gsi/cplr_gfs_ensmod.f90
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, &
use gsi_4dvar, only: ens_fhrlevs
use gsi_bundlemod, only: gsi_bundle
use gsi_bundlemod, only : assignment(=)
use hybrid_ensemble_parameters, only: n_ens,grd_ens,ntlevs_ens
use hybrid_ensemble_parameters, only: n_ens,grd_ens
use hybrid_ensemble_parameters, only: ensemble_path
use control_vectors, only: nc2d,nc3d
!use control_vectors, only: cvars2d,cvars3d
Expand All @@ -202,7 +202,7 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, &
character(len=*),parameter :: myname_='get_user_ens_gfs_fastread_'
character(len=70) :: filename
character(len=70) :: filenamesfc
integer(i_kind) :: i,ii,j,jj,k,n
integer(i_kind) :: i,ii,j,k,n
integer(i_kind) :: io_pe,n_io_pe_s,n_io_pe_e,n_io_pe_em,i_ens
integer(i_kind) :: ip
integer(i_kind) :: nlon,nlat,nsig
Expand Down Expand Up @@ -301,6 +301,8 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, &
iasm,iaemz,jasm,jaemz,kasm,kaemz,masm,maemz, &
filename)
end if
else
allocate(en_full(1,1,1,1))
end if

call mpi_allreduce(m_cvars2dw,m_cvars2d,nc2d,mpi_integer4,mpi_max,mpi_comm_world,ierror)
Expand All @@ -314,7 +316,7 @@ subroutine get_user_ens_gfs_fastread_(ntindex,atm_bundle, &
allocate(en_loc(ibsm:ibemz,jbsm:jbemz,kbsm:kbemz,mbsm:mbemz))
call genex(s_a2b,en_full,en_loc)

if(mas == mae)deallocate(en_full)
deallocate(en_full)


! call genex_destroy_info(s_a2b) ! check on actual routine name
Expand Down Expand Up @@ -921,6 +923,7 @@ subroutine parallel_read_gfsnc_state_(en_full,m_cvars2d,m_cvars3d,nlon,nlat,nsig
call die(myname_, ': ***FATAL ERROR*** insufficient ens fcst for hybrid',999)
endif

ierror=0
! If file exists, open and process
atmges = open_dataset(filename,errcode=ierror)
if (ierror /=0) then
Expand Down
2 changes: 1 addition & 1 deletion src/gsi/general_specmod.f90
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ subroutine general_spec_multwgt(sp,spcwrk,spcwgt)
real(r_kind),dimension(sp%nc),intent(inout) :: spcwrk
real(r_kind),dimension(0:sp%jcap),intent(in) :: spcwgt

integer(i_kind) ii1,l,m,jmax,ks,n
integer(i_kind) l,jmax,ks,n

!! Code borrowed from spvar in splib
jmax=sp%jcap
Expand Down
2 changes: 1 addition & 1 deletion src/gsi/get_gefs_ensperts_dualres.f90
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ subroutine get_gefs_ensperts_dualres
! integer(i_kind) il,jl
logical ice,hydrometeor
type(sub2grid_info) :: grd_tmp
integer(i_kind) :: ig0,ig
integer(i_kind) :: ig

! Create perturbations grid and get variable names from perturbations
if(en_perts(1,1,1)%grid%im/=grd_ens%lat2.or. &
Expand Down
7 changes: 2 additions & 5 deletions src/gsi/gsi_rfv3io_mod.f90
Original file line number Diff line number Diff line change
Expand Up @@ -1824,7 +1824,7 @@ subroutine gsi_fv3ncdf2d_read(fv3filenamegin,it,ges_z,ges_t2m,ges_q2m)
real(r_kind),intent(in),dimension(:,:),pointer::ges_q2m
type (type_fv3regfilenameg),intent(in) :: fv3filenamegin
character(len=max_varname_length) :: name
integer(i_kind),allocatable,dimension(:):: dim_id,dim
integer(i_kind),allocatable,dimension(:):: dim
real(r_kind),allocatable,dimension(:):: work
real(r_kind),allocatable,dimension(:,:):: a
real(r_kind),allocatable,dimension(:,:,:):: sfcn2d
Expand Down Expand Up @@ -2774,10 +2774,9 @@ subroutine gsi_fv3ncdf_read_ens_parallel_over_ens(filenamein,fv3filenamegin, &
type (type_fv3regfilenameg),intent(in) ::fv3filenamegin
integer(i_kind) ,intent(in ) :: iope
real(r_kind),allocatable,dimension(:,:):: uu2d, uu2d_tmp
real(r_kind),allocatable,dimension(:):: wrk_send_2d
real(r_kind),dimension(nlat,nlon,nsig):: hwork
real(r_kind),dimension(nlat,nlon,nsig),intent(out),optional:: delp,tsen,w,q,oz,ql,qr,qs,qi,qg,dbz
character(len=max_varname_length) :: varname,vgsiname
character(len=max_varname_length) :: varname
character(len=max_varname_length) :: name
character(len=max_varname_length), allocatable,dimension(:) :: varname_files

Expand Down Expand Up @@ -2994,8 +2993,6 @@ subroutine gsi_fv3ncdf_readuv_ens_parallel_over_ens(ges_u,ges_v,fv3filenamegin,i
character(:), allocatable:: filenamein
real(r_kind),allocatable,dimension(:,:):: u2d,v2d
real(r_kind),allocatable,dimension(:,:):: uc2d,vc2d
character(len=max_varname_length) :: varname,vgsiname
real(r_kind),allocatable,dimension(:,:,:,:):: worksub
integer(i_kind) u_grd_VarId,v_grd_VarId
integer(i_kind) nlatcase,nloncase
integer(i_kind) nxcase,nycase
Expand Down
2 changes: 1 addition & 1 deletion src/gsi/read_dbz_nc.f90
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ subroutine read_dbz_nc(nread,ndata,nodata,infile,lunout,obstype,sis,hgtl_full,no
one_tenth,r1000,r60,r60inv,r100,r400,grav_equator, &
eccentricity,somigliana,grav_ratio,grav,semi_major_axis,flattening
use gridmod, only: tll2xy,nsig,nlat,nlon
use obsmod, only: iadate,doradaroneob,oneoblat,oneoblon,oneobheight,oneobradid, &
use obsmod, only: iadate,doradaroneob,oneoblat,oneoblon,oneobheight, &
mintiltdbz,maxtiltdbz,minobrangedbz,maxobrangedbz,&
static_gsi_nopcp_dbz,rmesh_dbz,zmesh_dbz
use hybrid_ensemble_parameters,only : l_hyb_ens
Expand Down
4 changes: 1 addition & 3 deletions src/gsi/read_iasi.f90
Original file line number Diff line number Diff line change
Expand Up @@ -720,7 +720,7 @@ subroutine read_iasi(mype,val_iasi,ithin,isfcalc,rmesh,jsatid,gstime,&
!$omp parallel do schedule(dynamic,1) private(i,sc_chan,bufr_chan,radiance)
channel_loop: do i=1,satinfo_nchan
bufr_chan = bufr_index(i)
if (bufr_chan /= 0 ) then
if (bufr_chan > 0 ) then
! check that channel number is within reason
if (( allchan(2,bufr_chan) > zero .and. allchan(2,bufr_chan) < 99999._r_kind)) then ! radiance bounds
radiance = allchan(2,bufr_chan)*scalef(bufr_chan)
Expand All @@ -729,8 +729,6 @@ subroutine read_iasi(mype,val_iasi,ithin,isfcalc,rmesh,jsatid,gstime,&
else
temperature(bufr_chan) = tbmin
endif
else
temperature(bufr_chan) = tbmin
end if
end do channel_loop

Expand Down
51 changes: 28 additions & 23 deletions src/gsi/setuprad.f90
Original file line number Diff line number Diff line change
Expand Up @@ -998,6 +998,29 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,&
endif

! Compute microwave cloud liquid water or graupel water path for bias correction and QC.
if (adp_anglebc) then
! If using adaptive angle dependent bias correction, update the predicctors
! for this part of bias correction. The AMSUA cloud liquid water algorithm
! uses total angle dependent bias correction for channels 1 and 2
do i=1,nchanl
mm=ich(i)
if (goessndr .or. goes_img .or. ahi .or. seviri .or. ssmi .or. ssmis .or. gmi .or. abi) then
pred(npred,i)=nadir*deg2rad
else
pred(npred,i)=data_s(iscan_ang,n)
end if
do j=2,angord
pred(npred-j+1,i)=pred(npred,i)**j
end do
cbias(nadir,mm)=zero
if (iuse_rad(mm)/=4) then
do j=1,angord
cbias(nadir,mm)=cbias(nadir,mm)+predchan(npred-j+1,i)*pred(npred-j+1,i)
end do
end if
end do
end if
!*****
clw_obs=zero
clw_guess_retrieval=zero
gwp=zero
Expand Down Expand Up @@ -1059,28 +1082,13 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,&
endif

predbias=zero
abi2km_bc = zero
abi2km_bc(2) = 233.5_r_kind
abi2km_bc(3) = 241.7_r_kind
abi2km_bc(4) = 250.5_r_kind
!$omp parallel do schedule(dynamic,1) private(i,mm,j,k,tlap,node,bias)
do i=1,nchanl
mm=ich(i)
! If using adaptive angle dependent bias correction, update the predicctors
! for this part of bias correction. The AMSUA cloud liquid water algorithm
! uses total angle dependent bias correction for channels 1 and 2
if (adp_anglebc) then
if (goessndr .or. goes_img .or. ahi .or. seviri .or. ssmi .or. ssmis .or. gmi .or. abi) then
pred(npred,i)=nadir*deg2rad
else
pred(npred,i)=data_s(iscan_ang,n)
end if
do j=2,angord
pred(npred-j+1,i)=pred(npred,i)**j
end do
cbias(nadir,mm)=zero
if (iuse_rad(mm)/=4) then
do j=1,angord
cbias(nadir,mm)=cbias(nadir,mm)+predchan(npred-j+1,i)*pred(npred-j+1,i)
end do
end if
end if

!*****
! COMPUTE AND APPLY BIAS CORRECTION TO SIMULATED VALUES
Expand All @@ -1107,6 +1115,7 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,&
else
pred(3,i) = clw_obs*cosza*cosza
end if

if(radmod%lcloud_fwd .and. sea) pred(3,i ) = zero

! Apply bias correction
Expand Down Expand Up @@ -1156,10 +1165,6 @@ subroutine setuprad(obsLL,odiagLL,lunin,mype,aivals,stats,nchanl,nreal,nobs,&
end if

if (abi2km .and. regional) then
abi2km_bc = zero
abi2km_bc(2) = 233.5_r_kind
abi2km_bc(3) = 241.7_r_kind
abi2km_bc(4) = 250.5_r_kind
pred(:,i) = zero
if (i>=2 .and. i<=4) then
if (tb_obs(i) > 190.0_r_kind .and. tb_obs(i) < 300.0_r_kind) then
Expand Down
Loading