diff --git a/EOS_database/download_EOSdatabase1D.sh b/EOS_database/download_EOSdatabase1D.sh index 4db196e..f87870c 100644 --- a/EOS_database/download_EOSdatabase1D.sh +++ b/EOS_database/download_EOSdatabase1D.sh @@ -11,5 +11,6 @@ #wget --no-check-certificate 'https://www.dropbox.com/scl/fi/pxm2fv24mzgirx3nevsud/EoS.pkl?rlkey=pbb5rzx2869z8hhxgblerpgh7&dl=0' -O EoS.pkl # v1.2 -wget --no-check-certificate 'https://www.dropbox.com/scl/fi/iyx1ad6bzupjtkspsjgfm/EoS.pkl?rlkey=871ywdocxhfdiwnhou6va2z9o&dl=0' -O EoS.pkl -wget --no-check-certificate 'https://www.dropbox.com/scl/fi/kpf1t103g774wfit0dde6/EoS_v0.pkl?rlkey=q85bn8l1sjffdz4m7fiohvc4c&dl=0' -O EoS_v0.pkl +wget --no-check-certificate 'https://www.dropbox.com/s/h1uaddwhaiu841b/EoS.pkl?dl=0' -O EoS.pkl + +#wget --no-check-certificate 'https://www.dropbox.com/scl/fi/kpf1t103g774wfit0dde6/EoS_v0.pkl?rlkey=q85bn8l1sjffdz4m7fiohvc4c&dl=0' -O EoS_v0.pkl diff --git a/analysisKit/analyze_thermal_photons.py b/analysisKit/analyze_thermal_photons.py new file mode 100644 index 0000000..c5240e1 --- /dev/null +++ b/analysisKit/analyze_thermal_photons.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 +import sys +from os import path +import pickle +import numpy as np + + +def help_message(): + print("Usage: {0} database_file".format(sys.argv[0])) + exit(0) + + +centralityRange = 1. +Reg_centrality_cut_list = [ + 0., 5., 10., 20., 30., 40., 50., 60., 70., 80., 90., 100. +] +centralityCutList = Reg_centrality_cut_list +# centralityCutList = [0, 1, 2, 3, 4, 6, 8, 10, 15, 20, 30, 40, 50, 60, +# 70, 80, 90, 100] +dNcutList = [] # pre-defined Nch cut if simulation is not minimum bias + + +def computeJKMeanandErr(dataArr): + nev, nEta = dataArr.shape + dataMean = np.mean(dataArr, axis=0) + dataErr = np.sqrt((nev - 1)/nev*np.sum((dataArr - dataMean)**2, axis=0)) + return dataMean, dataErr + + +def calculate_photon_dNdy(yArr, pTArr, data_ypTdiff, + outputFileName: str) -> None: + """ + this function calculate the photon dN/dy + """ + dpT = pTArr[1] - pTArr[0] + nev = data_ypTdiff.shape[0] + dNdy = np.sum(data_ypTdiff*pTArr, axis=2)*2*np.pi*dpT + dNdy_mean = np.mean(dNdy, axis=0) + dNdy_err = np.std(dNdy, axis=0)/np.sqrt(nev) + results = np.array([yArr, dNdy_mean, dNdy_err]) + np.savetxt(outputFileName, + results.transpose(), + fmt="%.4e", + delimiter=" ", + header="y dN/dy dN/dy_err") + + +def calculate_photon_pTSpectra(yArr, pTArr, data_ypTdiff, + outputFileName: str) -> None: + """ + this function calculate the photon pT spectra + """ + nev = data_ypTdiff.shape[0] + idx = np.abs(yArr) < 0.5 + dy = yArr[1] - yArr[0] + Yinterval = len(yArr[idx])*dy + dNd2pT = np.sum(data_ypTdiff[:, idx, :], axis=1)*dy/Yinterval + dNd2pT_mean = np.mean(dNd2pT, axis=0) + dNd2pT_err = np.std(dNd2pT, axis=0)/np.sqrt(nev) + results = np.array([pTArr, dNd2pT_mean, dNd2pT_err]) + np.savetxt(outputFileName, + results.transpose(), + fmt="%.4e", + delimiter=" ", + header="pT(GeV) dN/d2pT dN/d2pT_err") + + +def calculate_photon_vnpT(yArr, pTArr, photon_dN, photon_v2, rapInterval, + dataRef, etaRef, nOrder, + outputFileName: str) -> None: + """ + this function compute the v_n(p_T) according to the scalar product + method + """ + nev, nQn, nEta = dataRef.shape + etaArr = np.linspace(-7, 7, nEta) + etaRefMin = etaRef[0] + etaRefMax = etaRef[1] + etaRef1Interp = np.linspace(etaRefMin, etaRefMax, 16) + etaRef2Interp = np.linspace(-etaRefMax, -etaRefMin, 16) + QnRef1 = [] + QnRef2 = [] + dNRef1 = [] + dNRef2 = [] + for iev in range(nev): + Qn1_interp = np.interp(etaRef1Interp, etaArr, + dataRef[iev, -1, :]*dataRef[iev, nOrder + 1, :]) + Qn2_interp = np.interp(etaRef2Interp, etaArr, + dataRef[iev, -1, :]*dataRef[iev, nOrder + 1, :]) + Q01_interp = np.interp(etaRef1Interp, etaArr, dataRef[iev, -1, :]) + Q02_interp = np.interp(etaRef2Interp, etaArr, dataRef[iev, -1, :]) + QnRef1.append(np.sum(Qn1_interp)) + QnRef2.append(np.sum(Qn2_interp)) + dNRef1.append(np.sum(Q01_interp)) + dNRef2.append(np.sum(Q02_interp)) + + QnRef1 = np.array(QnRef1).reshape((nev, 1)) + QnRef2 = np.array(QnRef2).reshape((nev, 1)) + dNRef1 = np.array(dNRef1).reshape((nev, 1)) + dNRef2 = np.array(dNRef2).reshape((nev, 1)) + + idx = (yArr < rapInterval[1]) & (yArr > rapInterval[0]) + dy = yArr[1] - yArr[0] + dNd2pT = np.sum(photon_dN[:, idx, :], axis=1) + QnpT = np.sum(photon_v2[:, idx, :]*photon_dN[:, idx, :], axis=1) + + vnpTNum = np.real(QnpT*np.conj(QnRef1 + QnRef2)) + n2Num = dNd2pT*(dNRef1 + dNRef2) + 1e-16 + vnpTDen = np.real(QnRef1*np.conj(QnRef2)) + n2Den = dNRef1*dNRef2 + 1e-16 + + vnpT_arr = np.zeros([nev, npT]) + for iev in range(nev): + array_idx = [True]*nev + array_idx[iev] = False + array_idx = np.array(array_idx) + + vnpT_arr[iev, :] = (np.mean(vnpTNum[array_idx, :], axis=0) + /np.mean(n2Num[array_idx, :], axis=0)/(np.sqrt( + np.mean(vnpTDen[array_idx], axis=0) + /np.mean(n2Den[array_idx], axis=0)) + 1e-16)) + + vnpT_mean, vnpT_err = computeJKMeanandErr(vnpT_arr) + dNpT_mean = np.mean(dNd2pT, axis=0)*dy + dNpT_err = np.std(dNd2pT, axis=0)*dy/np.sqrt(nev) + + results = np.array([pTArr, dNpT_mean, dNpT_err, vnpT_mean, vnpT_err]) + np.savetxt(outputFileName, + results.transpose(), + fmt="%.4e", + delimiter=" ", + header="pT (GeV) dN/d2pT dN/d2pT_err vn(pT) vn(pT)_err") + + +try: + database_file = str(sys.argv[1]) +except IndexError: + help_message() + +with open(database_file, "rb") as pf: + data = pickle.load(pf) + +dNdyList = [] +for event_name in data.keys(): + dNdyList.append(data[event_name]['Nch']) +dNdyList = -np.sort(-np.array(dNdyList)) +print(f"Number of good events: {len(dNdyList)}") + +for icen in range(len(centralityCutList) - 1): + if centralityCutList[icen + 1] < centralityCutList[icen]: + continue + selected_events_list = [] + + dN_dy_cut_high = dNdyList[int(len(dNdyList)*centralityCutList[icen]/100.)] + dN_dy_cut_low = dNdyList[min( + len(dNdyList) - 1, int(len(dNdyList)*centralityCutList[icen + 1]/100.))] + + if len(dNcutList) == len(centralityCutList): + dN_dy_cut_high = dNcutList[icen] + dN_dy_cut_low = dNcutList[icen + 1] + + for event_name in data.keys(): + if (data[event_name]['Nch'] > dN_dy_cut_low + and data[event_name]['Nch'] <= dN_dy_cut_high): + selected_events_list.append(event_name) + + nev = len(selected_events_list) + if nev <= 0: + continue + + cenLabel = "{:d}-{:d}".format( + int(centralityCutList[icen]*centralityRange), + int(centralityCutList[icen + 1]*centralityRange)) + cenBinMid = (centralityCutList[icen] + + centralityCutList[icen + 1])/2.*centralityRange + print("analysis {}%-{}% nev = {}...".format( + centralityCutList[icen]*centralityRange, + centralityCutList[icen + 1]*centralityRange, nev)) + print(f"dNdy: {dN_dy_cut_low:.2f} - {dN_dy_cut_high:.2f}") + + photon_dN_ypTDiff = [] + photon_v2_ypTDiff = [] + QnArrEta = [] + Ncoll = [] + pTArr = data[selected_events_list[0]]['photon_pTArr'] + yArr = data[selected_events_list[0]]['photon_yArr'] + npT = len(pTArr); ny = len(yArr) + for event_name in selected_events_list: + Ncoll.append(data[event_name]['Ncoll']) + photon_dN_ypTDiff.append(data[event_name]['photon_ypTdiff'][:, 0]) + photon_v2_ypTDiff.append(data[event_name]['photon_ypTdiff'][:, 3] + + 1j*data[event_name]['photon_ypTdiff'][:, 4]) + QnArrEta.append(data[event_name]['chVneta_pT_0p15_2']) + Ncoll = np.array(Ncoll) + photon_dN_ypTDiff = np.array(photon_dN_ypTDiff).reshape(-1, ny, npT) + photon_v2_ypTDiff = np.array(photon_v2_ypTDiff).reshape(-1, ny, npT) + QnArrEta = np.array(QnArrEta) + + if icen == 0: + f = open("Ncoll.dat", 'w') + f.write("# centrality Ncoll Ncoll_err\n") + else: + f = open("Ncoll.dat", 'a') + f.write(f"{cenBinMid} {np.mean(Ncoll):.3e} {np.std(Ncoll)/np.sqrt(nev):.3e}\n") + + calculate_photon_dNdy(yArr, pTArr, photon_dN_ypTDiff, + f"photon_dNdy_C{cenLabel}.dat") + calculate_photon_pTSpectra(yArr, pTArr, photon_dN_ypTDiff, + f"photon_pTSpectra_C{cenLabel}.dat") + calculate_photon_vnpT(yArr, pTArr, photon_dN_ypTDiff, photon_v2_ypTDiff, + [-0.5, 0.5], QnArrEta, [0.5, 1.], 2, + f"photon_v2pT_C{cenLabel}.dat") diff --git a/analysisKit/fetch_Qnch_from_hdf5.py b/analysisKit/fetch_Qnch_from_hdf5.py index 02a53a6..a24502c 100644 --- a/analysisKit/fetch_Qnch_from_hdf5.py +++ b/analysisKit/fetch_Qnch_from_hdf5.py @@ -18,6 +18,7 @@ initialFlag = False pTdiffFlag = True etadiffFlag = True +photonFlag = True kinematicCutsDict = { "STAR_eta_-0p5_0p5_pT_0p2_4": { @@ -79,6 +80,11 @@ pidList = [('ch', '9999'), ('pi+', '211'), ('pi-', '-211'), ('K+', '321'), ('K-', '-321'), ('p', '2212'), ('pbar', '-2212')] +photonList = ['QGP_2to2_total', 'QGP_AMYcollinear', + 'HG_2to2_meson_total', 'HG_omega', + 'HG_rho_spectralfun', 'HG_pipi_bremsstrahlung', + ] + LHCetaRangeList = [ '-0.4_0.4', '-0.5_0.5', '-0.8_-0.4', '-2.4_-0.5', '-2.5_-0.5', '-3.7_-1.7', '-4.9_-3.1', '-5.1_-2.8', '0.4_0.8', '0.5_2.4', '0.5_2.5', '1.7_3.7', @@ -92,6 +98,26 @@ def help_message(): exit(0) +def get3DGlauberData(h5Event): + """ + this function gets the 3D Glauber data from the hdf5 event + """ + data = [] + for fileName in h5Event.keys(): + if "strings_" in fileName: + data = h5Event.get(fileName).attrs.get("header") + break + data = data.decode("utf-8").split(" ") + b = float(data[3]) + Npart = int(data[7]) + Ncoll = int(data[10]) + Nstrings = int(data[13]) + Etot = float(data[16]) + Pztot = float(data[20]) + randomSeed = int(data[24]) + return [b, Npart, Ncoll, Nstrings, Etot, Pztot, randomSeed] + + def calcualte_inte_Vn_pT(pT_low, pT_high, data): """ this function calculates the pT-integrated vn in a @@ -370,6 +396,29 @@ def calcualte_yield_and_meanpT(pT_low, pT_high, data): + 1j*vn_data[:, 2*iOrder + 1]) outdata[event_i][f"{pidName}_pTArr"] = np.array(pTdiffData) + if photonFlag: + eventData = get3DGlauberData(eventGroup) + outdata[event_i]["Ncoll"] = eventData[2] + outdata[event_i]["Npart"] = eventData[1] + outdata[event_i]["b"] = eventData[0] + + photonFullres = [] + for ichannel, channelName in enumerate(photonList): + vn_filename = f"{channelName}_Spvn_tot_ypTdiff.dat" + vn_data = np.nan_to_num(eventGroup.get(vn_filename)) + if ichannel == 0: + photonFullres = vn_data + photonFullres[:, 3:] = (vn_data[:, 3:] + * vn_data[:, 2].reshape(-1, 1)) + else: + photonFullres[:, 2] += vn_data[:, 2] + photonFullres[:, 3:] += (vn_data[:, 3:] + * vn_data[:, 2].reshape(-1, 1)) + photonFullres[:, 3:] /= photonFullres[:, 2].reshape(-1, 1) + outdata[event_i]["photon_ypTdiff"] = photonFullres[:, 2:] + outdata[event_i]["photon_pTArr"] = np.unique(photonFullres[:, 1]) + outdata[event_i]["photon_yArr"] = np.unique(photonFullres[:, 0]) + if etadiffFlag: # eta-differential spectra and vn vn_filename = f'particle_9999_pTeta_distribution{weakString}.dat' diff --git a/codes/get_code_packages.sh b/codes/get_code_packages.sh index 132fb99..440c97d 100755 --- a/codes/get_code_packages.sh +++ b/codes/get_code_packages.sh @@ -35,7 +35,7 @@ rm -fr iSS_code/.git # download photonEmission wrapper rm -fr photonEmission_hydroInterface_code git clone --depth=1 https://github.com/chunshen1987/photonEmission_hydroInterface photonEmission_hydroInterface_code -(cd photonEmission_hydroInterface_code; git checkout a8a9f14c98a3e40519d9704090c3b42eba0107be) +(cd photonEmission_hydroInterface_code; git checkout 6009a95b65bd36b35db0a11be629f0551fbeac20) rm -fr photonEmission_hydroInterface_code/.git # download UrQMD afterburner diff --git a/config/BESPost/download_posteriorChain.sh b/config/BESPost/download_posteriorChain.sh new file mode 100644 index 0000000..f0dcc73 --- /dev/null +++ b/config/BESPost/download_posteriorChain.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +wget --no-check-certificate 'https://www.dropbox.com/scl/fi/4so8e9jxb9wnuddrb84df/posteriorChain.pkl?rlkey=h2i9jpg3u56f3zaz9v0uud1o3&dl=0' -O posteriorChain.pkl diff --git a/config/BESPost/parameterGenerator.py b/config/BESPost/parameterGenerator.py new file mode 100644 index 0000000..d48a4f9 --- /dev/null +++ b/config/BESPost/parameterGenerator.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +""" + This script translates the posterior chain files in the parameters + can be read in by the iEBE-MUSIC package. +""" + +import pickle +import sys +import numpy as np + +parameterName = [ + 'BG', 'shadowing_factor', 'ylossParam4At2', 'ylossParam4At4', + 'ylossParam4At6', 'ylossParam4var', 'remnant_energy_loss_fraction', + 'lambdaB', 'string_source_sigma_x_200', 'string_source_sigma_x_19p6', + 'string_source_sigma_x_7p7', 'string_source_sigma_eta_200', + 'string_source_sigma_eta_19p6', 'string_source_sigma_eta_7p7', + 'stringTransverseShiftFrac', 'stringPreEqFlowFactor', + 'Shear_to_S_ratio', 'shear_muB_0p2', 'shear_muB_0p4', + 'bulk_viscosity_10_max', 'bulk_viscosity_10_T_peak', + 'bulk_viscosity_10_width_high', 'bulk_viscosity_10_width_low', + 'eps_switch' +] + +outputParameterName = [ + 'BG', 'shadowing_factor', 'ylossParam4At2', 'ylossParam4At4', + 'ylossParam4At6', 'ylossParam4var', 'remnant_energy_loss_fraction', + 'lambdaB', 'string_source_sigma_x', 'string_source_sigma_eta', + 'stringTransverseShiftFrac', 'stringPreEqFlowFactor', 'Shear_to_S_ratio', + 'shear_muBf0p2', 'shear_muBf0p4', 'bulk_viscosity_10_max', + 'bulk_viscosity_10_T_peak', 'bulk_viscosity_10_width_high', + 'bulk_viscosity_10_width_low', 'eps_switch', +] + +setId = int(sys.argv[1]) +paramFile = str(sys.argv[2]) +ecm = float(sys.argv[3]) + +with open("posteriorChain.pkl", 'rb') as f: + data = pickle.load(f) +nParamSets = data['chain'].shape[0] +setId = setId % nParamSets +print(f"Using parameter set: {setId}") + +paramSet = data['chain'][setId, :] +paramDict = {} +for i, param_i in enumerate(parameterName): + paramDict[param_i] = paramSet[i] + +paramDict['shear_muBf0p2'] = (paramDict['shear_muB_0p2'] + /paramDict['Shear_to_S_ratio']) +paramDict['shear_muBf0p4'] = (paramDict['shear_muB_0p4'] + /paramDict['Shear_to_S_ratio']) +if ecm < 7.7: + paramDict['string_source_sigma_x'] = paramDict['string_source_sigma_x_7p7'] + paramDict['string_source_sigma_eta'] = ( + paramDict['string_source_sigma_eta_7p7']) +elif ecm < 19.6: + frac = (np.log(ecm) - np.log(7.7)) / (np.log(19.6) - np.log(7.7)) + paramDict['string_source_sigma_x'] = ( + (1 - frac) * paramDict['string_source_sigma_x_7p7'] + + frac * paramDict['string_source_sigma_x_19p6'] + ) + paramDict['string_source_sigma_eta'] = ( + (1 - frac) * paramDict['string_source_sigma_eta_7p7'] + + frac * paramDict['string_source_sigma_eta_19p6'] + ) +elif ecm < 200.: + frac = (np.log(ecm) - np.log(19.6)) / (np.log(200.) - np.log(19.6)) + paramDict['string_source_sigma_x'] = ( + (1 - frac) * paramDict['string_source_sigma_x_19p6'] + + frac * paramDict['string_source_sigma_x_200'] + ) + paramDict['string_source_sigma_eta'] = ( + (1 - frac) * paramDict['string_source_sigma_eta_19p6'] + + frac * paramDict['string_source_sigma_eta_200'] + ) +else: + paramDict['string_source_sigma_x'] = paramDict['string_source_sigma_x_200'] + paramDict['string_source_sigma_eta'] = ( + paramDict['string_source_sigma_eta_200']) + +with open(paramFile, "w") as f: + for param_i in outputParameterName: + f.write("{} {}\n".format(param_i, paramDict[param_i])) diff --git a/config/BESPost/parameters_dict_user_3DGlb_BayesianBESMAP_200_AuAu.py b/config/BESPost/parameters_dict_user_3DGlb_BayesianBESMAP_200_AuAu.py new file mode 100644 index 0000000..aecba28 --- /dev/null +++ b/config/BESPost/parameters_dict_user_3DGlb_BayesianBESMAP_200_AuAu.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +""" + This script contains all the user modified parameters in + the iEBE-MUSIC package. +""" + + +# control parameters +control_dict = { + 'initial_state_type': "3DMCGlauber_dynamical", + 'walltime': "120:00:00", # walltime to run + 'save_hydro_surfaces': False, # flag to save hydro surfaces + 'save_UrQMD_files': False, # flag to save UrQMD files + 'compute_photon_emission': False, # flag to compute EM radiation from hydrodynamic medium + 'usePosteriorParameters': False, + 'PosteriorChainFilePath': "config/BESPost", + 'PosteriorParamSet': 0, +} + + +# 3DMCGlauber model +mcglauber_dict = { + 'database_name': "self", # self: generate initial condition on the fly + 'Projectile': "Au", # projectile nucleus name + 'Target' : "Au", # target nucleus name + 'roots' : 200, # collision energy (GeV) + 'seed' : -1, # random seed (-1: system) + 'baryon_junctions': 1, # 0: baryon number assumed to be at string end + # 1: baryon number transported assuming baryon + # junctions (at smaller x) + # see arXiv:nucl-th/9602027 + 'lambdaB': 0.129, # parameter the controls the strength of + # the baryon junction stopping + 'lambdaBs': 1.0, # fraction of single-to-double string junction stopping + 'BG': 17.095, + 'shadowing_factor': 0.145, # a shadowning factor for producing strings from multiple scatterings + 'rapidity_loss_method': 4, + 'remnant_energy_loss_fraction': 0.611, # nucleon remnants energy loss fraction (fraction of string's y_loss) [0, 1] + 'ylossParam4At2': 1.467, + 'ylossParam4At4': 1.759, + 'ylossParam4At6': 2.260, + 'ylossParam4var': 0.356, + 'evolve_QCD_string_mode': 4, # string evolution mode + # 1: deceleration with fixed rapidity loss (m/sigma = 1 fm, dtau = 0.5 fm) + # 2: deceleration with LEXUS sampled rapidit loss (both dtau and sigma fluctuate) + # 3: deceleration with LEXUS sampled rapidit loss (m/sigma = 1 fm, dtau fluctuates) + # 4: deceleration with LEXUS sampled rapidit loss (dtau = 0.5 fm, m/sigma fluctuates) +} + + +# MUSIC +music_dict = { + 'beastMode': 2, + 'Initial_profile': 13, # type of initial condition + # 13: dynamical initialization (3dMCGlauber_dynamical) + # -- 131: 3dMCGlauber with zero nucleus thickness + 'string_source_sigma_eta': 0.156, # the smearning size of the hotspot in eta + 'string_source_sigma_x': 0.113, # the smearning size of the hotspot in eta + 'stringTransverseShiftFrac': 0.884, # control the shift of transverse coord as a function of eta for string + 'stringPreEqFlowFactor': 0.04, # pre-Eq. flow factor + + 's_factor': 1.000, # normalization factor read in initial data file + 'Delta_Tau': 0.020, # time step to use in the evolution [fm/c] + 'boost_invariant': 0, # whether the simulation is boost-invariant + 'Eta_grid_size': 13.0, # spatial rapidity range + # [-Eta_grid_size/2, Eta_grid_size/2 - delta_eta] + 'Grid_size_in_eta': 64, # number of the grid points in spatial rapidity direction + 'X_grid_size_in_fm': 30, # spatial range along x direction in the transverse plane + # [-X_grid_size_in_fm/2, X_grid_size_in_fm/2] + 'Y_grid_size_in_fm': 30, # spatial range along x direction in the transverse plane + # [-X_grid_size_in_fm/2, X_grid_size_in_fm/2] + 'Grid_size_in_x': 128, # number of the grid points in x direction + 'Grid_size_in_y': 128, # number of the grid points in y direction + 'EOS_to_use': 14, # type of the equation of state + # 14: neos_BQS lattice EoS at finite mu_B + # 17: BEST lattice EoS at finite mu_B + # transport coefficients + 'quest_revert_strength': 1.0, + 'Viscosity_Flag_Yes_1_No_0': 1, # turn on viscosity in the evolution + 'Include_Shear_Visc_Yes_1_No_0': 1, # include shear viscous effect + 'Shear_to_S_ratio': 0.045, # value of \eta/s + 'T_dependent_Shear_to_S_ratio': 0, # flag to use temperature dep. \eta/s(T) + 'muB_dependent_Shear_to_S_ratio': 7, + 'shear_muBf0p2': 6.22222, # piece-wise eta/s(muB) for muB_dependent_Shear_to_S_ratio == 7 + 'shear_muBf0p4': 6.37778, # piece-wise eta/s(muB) for muB_dependent_Shear_to_S_ratio == 7 + 'Include_Bulk_Visc_Yes_1_No_0': 1, # include bulk viscous effect + 'T_dependent_zeta_over_s': 10, # parameterization of \zeta/s(T) + 'bulk_viscosity_10_max': 0.148, # the peak value of \zeta/s(T) + 'bulk_viscosity_10_T_peak': 0.214, # the peak temperature for \zeta/s(T) + 'bulk_viscosity_10_T_peak_muBcurv': -0.15, # Tpeak = Tpeak_0 + curv*muB^2 + 'bulk_viscosity_10_width_high': 0.018, + 'bulk_viscosity_10_width_low': 0.040, + 'Include_second_order_terms': 1, # include second order non-linear coupling terms + 'Include_vorticity_terms': 0, # include vorticity coupling terms + 'Include_Rhob_Yes_1_No_0': 1, + 'turn_on_baryon_diffusion': 0, + 'kappa_coefficient': 0.4, + + # parameters for freeze out and Cooper-Frye + 'freeze_out_tau_start_max': 2.0, # the maximum freeze-out starting time [fm/c] + 'N_freeze_out': 1, + 'eps_switch': 0.35, # GeV/fm^3 +} + + +# iSS +iss_dict = { + 'hydro_mode': 2, # mode for reading in freeze out information + 'MC_sampling': 4, + 'include_deltaf_shear': 1, # include delta f contribution from shear + 'include_deltaf_bulk': 1, # include delta f contribution from bulk + 'bulk_deltaf_kind': 20, # 20: 22-momentum approximation, 21: CE relaxation time approximation + 'include_deltaf_diffusion': 0, # include delta f contribution from diffusion + 'sample_upto_desired_particle_number': 1, # 1: flag to run sampling until desired + # particle numbers is reached + 'number_of_particles_needed': 100000, # number of hadrons to sample + 'local_charge_conservation': 0, # flag to impose local charge conservation + 'global_momentum_conservation': 0, # flag to impose GMC +} + + +# hadronic afterburner toolkit +hadronic_afterburner_toolkit_dict = { + 'analyze_flow': 3, # 0/1: flag to perform flow analysis + 'analyze_HBT': 0, # 0/1: flag to perform HBT analysis + 'event_buffer_size': 500000, # the number of events read in at once + 'rapidity_shift': 0.0, # 0.5*log(Z_P*A_T/(A_P*Z_T)) + 'compute_correlation': 0, # flag to compute correlation function + 'flag_charge_dependence': 0, # flag to compute charge dependence correlation + 'compute_corr_rap_dep': 0, # flag to compute the rapidity dependent multi-particle correlation + 'resonance_weak_feed_down_flag': 1, # include weak feed down contribution + 'npT': 20, # number of pT points for pT-differential spectra and vn + 'pT_min': 0.00, # the minimum value of transverse momentum (GeV) + 'pT_max': 3.80, # the maximum value of transverse momentum (GeV) + 'n_rap': 71, # numpber of points in rapidity distr. + 'rapidity_dis_min': -7.0, # minimum value of particle rapidity distribution + 'rapidity_dis_max': 7.0, # maximum value of particle rapidity distribution +} diff --git a/docker/Dockerfile b/docker/Dockerfile index b042a24..825fda7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -39,7 +39,9 @@ RUN cd ${HOME} && \ ./compile_code_packages.sh && \ cd ../EOS_database && \ bash download_EOSdatabase1D.sh && \ - bash download_QGPViscositydatabase1D.sh + bash download_QGPViscositydatabase1D.sh && \ + cd ../config/BESPost && \ + bash download_posteriorChain.sh # Cleaning caches to reduce size of image RUN yum clean all diff --git a/docker/Dockerfile_ubuntu b/docker/Dockerfile_ubuntu index 8ac720c..5bbf1d0 100644 --- a/docker/Dockerfile_ubuntu +++ b/docker/Dockerfile_ubuntu @@ -39,7 +39,9 @@ RUN cd ${HOME} \ && ./compile_code_packages.sh \ && cd ../EOS_database \ && bash download_EOSdatabase1D.sh \ -&& bash download_QGPViscositydatabase1D.sh +&& bash download_QGPViscositydatabase1D.sh \ +&& cd ../config/BESPost \ +&& bash download_posteriorChain.sh ENV PYTHONIOENCODING utf-8 diff --git a/generate_jobs.py b/generate_jobs.py index 15398d5..fd8d8f5 100755 --- a/generate_jobs.py +++ b/generate_jobs.py @@ -992,9 +992,9 @@ def main(): setId = random.randint(0, 1000000) paramFile = path.join(working_folder_name, "iEBE_parameters.txt") subprocess.call( - "(cd {}/{}; python3 parameterGenerator.py {} {})".format( + "(cd {}/{}; python3 parameterGenerator.py {} {} {})".format( code_package_path, posteriorChainFilePath, setId, - paramFile), + paramFile, parameter_dict.mcglauber_dict['roots']), shell=True) if args.bayes_file != "":