This repository has been archived by the owner on Feb 18, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgaussian_elimination.py
144 lines (121 loc) · 3.97 KB
/
gaussian_elimination.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# -*- coding: utf-8 -*-
"""Gaussian-Elimination.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1LjufYXP68A9JjO7iPFnwPkxrcpi3SP8q
"""
import numpy as np
"""### Forward and Backward Substitution"""
# Row-Oriented Back-Substituion to solve Upper-Tri System Ax=b
def upperbSub(A,b):
n=len(A)
x=np.zeros(n)
x[n-1]=b[n-1]/A[n-1,n-1]
for i in range(1,n):
x[n-1-i]=1/A[n-1-i,n-1-i]*(b[n-1-i]-np.dot(A[n-1-i,n-i:],x[n-i:]))
return x
# Row-Oriented Forward-Substitution for Lower-Tri System Ax=b
# Single for-loop
def lowerfSub(A,b):
n=len(A) # get the dimension of A in 0-indexing
x=np.zeros(n)
x[0] = b[0]/A[0][0] # the first entry
for i in range(1,n):
print()
x[i]=(b[i]-np.dot(A[i,:], x))/A[i][i]
return x
"""### LU Decomposition and Gaussian Elimination"""
# split matrix A into its two matrices L, U
def split(A):
U=np.zeros((len(A),len(A)))
L=np.eye(len(A))
for i in range(len(A)):
U[i,i:]=A[i,i:]
L[i+1:,i]=A[i+1:,i]
return L, U
"""##### Inner Product Method"""
#Overwrite the input matrix A into its LU Decomposition form
def innerGEWP(A):
#first row remained unchanged
A[1:,0]=A[1:,0]/A[0,0] # first col is uniquely determined
for k in range(1,len(A)): #for rows
#update U
A[k,k:] = A[k,k:] - A[k,:k]@A[:k,k:]
#update L
A[(k+1):,k] = (A[(k+1):,k]-A[(k+1):,:k]@A[:k,k])/A[k,k]
return A
"""##### Outer Product Method"""
#Overwrite the input matrix A into its LU Decomposition
def outerGEWP(A):
for i in range(len(A)):
A[(i+1):,i]=A[(i+1):,i]/A[i,i]
A[(i+1):,(i+1):]-=np.outer(A[(i+1):,i], A[i,(i+1):])
return A
"""### Testing"""
# Row-Oriented Forward-Substitution - Simple Test
A=[[1,0,0,0],[2,3,0,0],[1,-1,9,0],[2,0,3,1]]
A=np.array(A) # using np array for A instead of nested list so that multi-dimensional slicing is enabled
b=np.random.rand(4)
print(b)
x=lowerfSub(A,b)
np.allclose(A@x,b)
# LU Decomposition by Inner Product
for i in range(10):
A=np.random.rand(20,20)
B=np.copy(A) #provide a copy for testing purposes
L,U=split(innerGEWP(A))
print(np.allclose(L@U,B))
# LU Decomposition by Outer Product
for i in range(10):
A=np.random.rand(20,20)
B=np.copy(A) #provide a copy for testing purposes
L,U=split(outerGEWP(A)) #use the split() function above in Problem 4
print(np.allclose(L@U,B))
"""### Time Complexity for the Gaussian Elimination Algorithms"""
# Three Loop Gaussian
def gaussianELM(B):
A=np.copy(B)
n=len(A)
for i in range(n-1):
for j in range(i+1,n):
A[j,i]=A[j,i]/A[i,i]
for k in range(i+1,n):
A[j,k]-= A[j,i]*A[i,k]
return A
# compare time complexity between gaussianELM and innerGEWP
from time import process_time as pt
for i in range(10):
A=np.random.rand(300,300)
start=pt()
gaussianELM(A)
t=pt()-start
start=pt()
innerGEWP(A)
print(t/(pt()-start))
print("\n")
# compare time complexity between gaussianELM and outerGEWP
from time import process_time as pt
for i in range(10):
A=np.random.rand(300,300)
start=pt()
gaussianELM(A)
t=pt()-start
start=pt()
outerGEWP(A)
print(t/(pt()-start))
print("\n")
from time import process_time as pt
for i in range(10):
A=np.random.rand(1000,1000)
start=pt()
innerGEWP(A)
t=pt()-start
start=pt()
outerGEWP(A)
print(t/(pt()-start))
print("\n")
"""### Conclusion on Gaussian Elimination
From the above time comparisons, it seems like innerGEWP is the fastest algorithm, whereas gaussianEML is the slowest.
As we used operations like matrix multiplication, which are computed in parallel by Numpy, we save more time in innerGEWP and outerGEWP. In contrast, the three nested for-loop in gaussianEML means that each steps(operation) is computed individually, which implies a O(n^3) time complexity.
The reason why I think innerGEWP is doing better than outerGEWP is because of the np.outer() function. Given a m*1 vector u and a n*1 vector v, their outer product is like the matrix multiplication of u and the transpose of v.
"""