-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathmain.f90
147 lines (117 loc) · 4.27 KB
/
main.f90
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
!--------+--------+--------+--------+--------+--------+--------+------!
! main program for electronic transport properties of 3D topological
! insulator nanoribbon, using non-equibrium green's function
! constructed by Q.S.Wu on Dec/25/2010
! mail: wuquansheng@gmail.com
! wuq@phys.ethz.ch
! spring10boy@163.com
!--------+--------+--------+--------+--------+--------+--------+------!
program main
!use mpi
use para
implicit none
character *20 :: cpu_nam
integer :: namelen
! an error index, if ierr !=0 then error occurs
integer :: ierr
integer :: i
! Fermi energy
real(Dp),allocatable :: omega(:)
! conductance vs Fermi energy
real(Dp),allocatable :: T(:, :)
real(Dp),allocatable :: T_mpi(:, :)
! time measure
real(Dp) :: time1,time2,time3
! initial the environment of mpi
call MPI_INIT(ierr)
call MPI_Comm_rank(MPI_comm_world,cpuid,ierr)
call mpi_comm_size(MPI_comm_world,num_cpu,ierr)
call mpi_get_processor_name(cpu_nam,namelen,ierr)
if (cpuid.eq.0)then
write(*,*)' +-----+-----+-----+-----+-----+-----+-----+-----+----+'
write(*,*)' + Begin our program JUPITER +'
write(*,*)' +-----+-----+-----+-----+-----+-----+-----+-----+----+'
call show_now
endif
! if mpi initial wrong, alarm
if(cpuid.eq.0)then
if(ierr.ne.0)then
write(*,*)' >>> Error : mpi initialize wrong'
stop
endif
endif
call now(time2)
time1=time2
!read some parameters from input.dat
if(cpuid.eq.0)then
call readinput
endif
nrpts = 7
allocate(ndegen(nrpts))
allocate(irvec(2,nrpts))
allocate(HmnR(nband*2,nband*2,nrpts))
if (cpuid.eq.0) write(*,*) ' > Begin reading Hmn_R.data <'
call readHmnR()
if (cpuid.eq.0) write(*,*) ' >> Read Hmn_R.data successfully <<'
! broadcast nband and Nrpts to every cpu
call MPI_bcast(Nrpts,1,mpi_in,0,mpi_cw,ierr)
! broadcast ndim,Nk,omeganum,Maxomega,nslab,soc,eta to every cpu
call MPI_bcast(Ndim,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(Nx,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(Ny,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(Np,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(Seed,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(omeganum,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(numB,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(ndis,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(soc,1,mpi_in,0,mpi_cw,ierr)
call MPI_bcast(eta,1,mpi_dp,0,mpi_cw,ierr)
call MPI_bcast(minB,1,mpi_dp,0,mpi_cw,ierr)
call MPI_bcast(maxB,1,mpi_dp,0,mpi_cw,ierr)
call MPI_bcast(minomega,1,mpi_dp,0,mpi_cw,ierr)
call MPI_bcast(maxomega,1,mpi_dp,0,mpi_cw,ierr)
call MPI_bcast(Rate,1,mpi_dp,0,mpi_cw,ierr)
call MPI_bcast(disorder_type,40,mpi_char,0,mpi_cw,ierr)
call MPI_bcast(disorder_strength,1,mpi_dp,0,mpi_cw,ierr)
allocate(omega(omeganum))
allocate(T(omeganum,Ndis))
allocate(T_mpi(omeganum,Ndis))
omega= 0d0
T = 0d0
T_mpi= 0d0
do i=1,omeganum
if (omeganum.ne.1)then
omega(i)=minomega+(i-1)*(maxomega-minomega)/(omeganum-1)
else
omega(i)=minomega
endif
enddo
!call ek_bulk
call chern
101 format(3i5,a10,f9.4,a11,f5.2,a3,f6.1,a2,2f10.5)
if(cpuid.eq.0)write( *,*)' >> End calculate conductance <<'
! gather conductance T from each cpu and write it to a file
!call mpi_allreduce(T, T_mpi, size(T), mpi_dp, mpi_sum, mpi_cw, ierr)
!T= T_mpi
if (cpuid.eq.0)then
do i=1,omeganum
enddo
endif
call now(time3)
if (cpuid.eq.0)then
write(*,*)' '
write(*,*)' +-----+-----+-----+-----+-----+-----+-----+-----+----+'
write(*,'(a42,f27.1,a2,a5)')' +>>> Costing time for all this program :&
',time3-time1,' s',' <<<+'
call show_now
write(*,*)' +>> Congratulations! you finished the calculation. <<+'
write(*,*)' +-----+-----+-----+-----+-----+-----+-----+-----+----+'
endif
call mpi_finalize(ierr)
# if defined (__CUDA__)
! shutdown cula framework
call cula_shutdown()
call cuda_destroy() ! destroy cuda
call cuda_deallocate
# endif
end program