-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnormal_statistical_tests.py
70 lines (56 loc) · 2.51 KB
/
normal_statistical_tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import numpy as np
import normal_probability_distribution as npd
import modeling_of_uncertainty as mou
import math as mt
from scipy.stats import chi2
from scipy import stats as st
def chi_square_test(sample, alpha):
m = mt.ceil(1 + 3.322*mt.log(len(sample),10))
k = 2
f = m - 1 - k
c = chi2.ppf(1-alpha, f)
h = (max(sample)-min(sample))/m
mean = mou.mean_sample(sample)
std = mou.std_sample(sample)
class_ = [min(sample)]
for i in range(0, m):
class_.append(class_[i] + h)
e = []
for i in range(1, len(class_)):
if i == 1:
e.append(npd.phi(class_[i], mean, std)*len(sample))
elif i == (len(class_)):
e.append((1 - npd.phi(class_[i-1], mean, std))*len(sample))
else:
e.append((npd.phi(class_[i], mean, std)-npd.phi(class_[i-1], mean, std))*len(sample))
n = []
t = 0
i = 1
sample = sorted(sample)
sample.append(max(sample)+1)
for j in range(0, len(sample)-1):
if sample[j+1] > class_[i]:
n.append((j+1)-t)
t = j + 1
i = i + 1
test_1 = []
for i in range(0, len(n)):
test_1.append(((e[i]-n[i])**2)/e[i])
statistics = sum(test_1)
if statistics < c:
print('Chi-Square Statistics = {}\nP-Value = {}\nConsidering that Chi-Square Statistics < P-Value, with a confidence level of {}%, the normal distribution is acceptable.'.format(statistics, c, (1-alpha)*100))
elif statistics > c:
print('Chi-Square Statistics = {}\nP-Value = {}\nConsidering that Chi-Square Statistics > P-Value, with a confidence level of {}%, the normal distribution is not acceptable.'.format(statistics, c, (1-alpha)*100))
def ks_test(sample, alpha):
sort_sample = sorted(sample)
n = len(sort_sample)
S = np.arange(1, n + 1)/n
mean = np.mean(sample)
std = np.std(sample, ddof = 1)
F = st.norm.cdf(sort_sample, mean, std)
D = max(abs(F-S))
D_ks = st.ksone.ppf(1 - alpha/2, n)
if D < D_ks:
print('Kolmogorov-Smirnov Statistics = {}\nP-Value = {}\nConsidering that K-S Test Statistics < P-Value, with a confidence level of {}%, the normal distribution is acceptable.'.format(D, D_ks, (1-alpha)*100))
elif D > D_ks:
print('Kolmogorov-Smirnov Statistics = {}\nP-Value = {}\nConsidering that K-S Test Statistics > P-Value, with a confidence level of {}%, the normal distribution is not acceptable.'.format(D, D_ks, (1-alpha)*100))