-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnormal_probability_distribution.py
95 lines (77 loc) · 3.33 KB
/
normal_probability_distribution.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import numpy as np
from scipy import stats as st
import modeling_of_uncertainty as mou
import matplotlib.pyplot as plt
import math as mt
import seaborn as sns
sns.set_style("ticks")
def norm_pdf_plot(sample): #Plots the normal pdf with normalized histogram
mean = mou.mean_sample(sample)
std = mou.std_sample(sample)
step = abs(min(sample) - max(sample))/100
x = np.arange(min(sample), max(sample) + step, step)
pdf = st.norm.pdf(x, mean, std)
plt.plot(x, pdf, color = 'dimgray', label = 'Theoretical PDF')
plt.legend()
mou.hist(sample, 'sturges', dens_norm = True)
return pdf
def norm_cdf_plot(sample, alpha): #Plots the normal theoretical cdf compared to the empirical one
mean = mou.mean_sample(sample)
std = mou.std_sample(sample)
step = abs(min(sample) - max(sample))/100
x = np.arange(min(sample), max(sample) + step, step)
cdf = st.norm.cdf(x, mean, std)
n = len(sample)
y = np.arange(1, n+1)/n
F1 = []
F2 = []
for i in range(0, len(sample)):
e = (((mt.log(2/alpha))/(2*n))**0.5)
F1.append(y[i] - e)
F2.append(y[i] + e)
plt.plot(x, cdf, color = 'dimgray', label = 'Theoretical CDF')
plt.scatter(sorted(sample), y, label='Empirical CDF')
plt.plot(sorted(sample), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')
plt.plot(sorted(sample), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)
plt.ylabel('Cumulative Distribution Function')
plt.xlabel('Observed Data')
plt.legend()
plt.show()
return cdf
def phi(x, mean, std): #Computes the probability of getting a value of x or lesser in a random variable (cdf)
phi = st.norm.cdf(x, mean, std)
return phi
def mean_norm(sample, alpha):
n = len(sample)
std = mou.std_sample(sample)
k = st.norm.ppf(1 - alpha/2)
e = k*std/(n**0.5)
print("The population mean with a confidence level of {}% is {} \u00B1 {}.".format(int((1-alpha)*100), mou.mean_sample(sample), e))
def phi_inverse(prob): #Computes the inverse of the normal for a given probability
phi_inv = st.norm.ppf(prob)
return phi_inv
def norm_qq_plot(sample, alpha): #plots the quantile-quantie plot for the given data
y = np.arange(1, len(sample)+1)/(len(sample)+1)
mean = mou.mean_sample(sample)
std = mou.std_sample(sample)
theo_qq = phi_inverse(y)
x = theo_qq*std + mean
#Kolmogorov-Smirnov Test for getting the confidence interval
K = (-0.5*mt.log(alpha/2))**0.5
M = (len(sample)**2/(2*len(sample)))**0.5
CI_qq_high = []
CI_qq_low = []
for prob in y:
F1 = prob - K/M
F2 = prob + K/M
s_low = phi_inverse(F1)
s_high = phi_inverse(F2)
CI_qq_low.append(s_low*std + mean)
CI_qq_high.append(s_high*std + mean)
sns.regplot(x, sorted(sample), ci = None, line_kws={'color':'dimgray','label':'Regression Line'})
plt.plot(sorted(sample), CI_qq_low, linestyle='--', color='red', alpha = 1, lw = 0.8, label = 'Kolmogorov-Smirnov Confidence Bands')
plt.legend()
plt.plot(sorted(sample), CI_qq_high, linestyle='--', color='red', alpha = 1, lw = 0.8)
plt.xlabel('Theoretical Normal Quantiles')
plt.ylabel('Sample Quantiles')
plt.show()