-
Notifications
You must be signed in to change notification settings - Fork 2
/
dl1.txt
172 lines (138 loc) · 4.94 KB
/
dl1.txt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
-----------------------------------
#Lets load the dataset and sample some
column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'PRICE']
df = pd.read_csv('housing.csv', header=None, delimiter=r"\s+", names=column_names)
-----------------------------------
df.head(5)
-----------------------------------
# Dimension of the dataset
df.shape
-----------------------------------
# Let's summarize the data to see the distribution of data
df.describe()
-----------------------------------
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
fig, axs = plt.subplots(ncols=7, nrows=2, figsize=(20, 10))
index = 0
axs = axs.flatten()
for k,v in df.items():
sns.boxplot(y=k, data=df, ax=axs[index])
index += 1
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=5.0)
-----------------------------------
for k, v in df.items():
q1 = v.quantile(0.25)
q3 = v.quantile(0.75)
irq = q3 - q1
v_col = v[(v <= q1 - 1.5 * irq) | (v >= q3 + 1.5 * irq)]
perc = np.shape(v_col)[0] * 100.0 / np.shape(df)[0]
print("Column %s outliers = %.2f%%" % (k, perc))
-----------------------------------
df = df[~(df['PRICE'] >= 35.0)]
print(np.shape(df))
-----------------------------------
#Looking at the data with names and target variable
df.head()
-----------------------------------
#Shape of the data
print(df.shape)
-----------------------------------
#Checking the null values in the dataset
df.isnull().sum()
-----------------------------------
# No null values in the dataset, no missing value treatement needed
-----------------------------------
#Checking the statistics of the data
df.describe()
-----------------------------------
df.info()
-----------------------------------
#checking the distribution of the target variable
import seaborn as sns
sns.histplot(df.PRICE , kde = True)
-----------------------------------
#Distribution using box plot
sns.boxplot(df.PRICE)
-----------------------------------
#checking Correlation of the data
correlation = df.corr()
correlation.loc['PRICE']
-----------------------------------
# plotting the heatmap
import matplotlib.pyplot as plt
fig,axes = plt.subplots(figsize=(15,12))
sns.heatmap(correlation,square = True,annot = True)
-----------------------------------
# Checking the scatter plot with the most correlated features
plt.figure(figsize = (20,5))
features = ['LSTAT','RM','PTRATIO']
for i, col in enumerate(features):
plt.subplot(1, len(features) , i+1)
x = df[col]
y = df.PRICE
plt.scatter(x, y, marker='o')
plt.title("Variation in House prices")
plt.xlabel(col)
plt.ylabel('"House prices in $1000"')
-----------------------------------
#X = data[['LSTAT','RM','PTRATIO']]
X = df.iloc[:,:-1]
y= df.PRICE
-----------------------------------
# Splitting the data into train and test for building the model
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.2, random_state = 4)
-----------------------------------
#Linear Regression
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
-----------------------------------
#Fitting the model
regressor.fit(X_train,y_train)
-----------------------------------
#Prediction on the test dataset
y_pred = regressor.predict(X_test)
-----------------------------------
# Predicting RMSE the Test set results
from sklearn.metrics import mean_squared_error
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
print(rmse)
-----------------------------------
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_pred)
print(r2)
-----------------------------------
#Scaling the dataset
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
-----------------------------------
#Creating the neural network model
import keras
from keras.layers import Dense, Activation,Dropout
from keras.models import Sequential
model = Sequential()
model.add(Dense(128,activation = 'relu',input_dim =13))
model.add(Dense(64,activation = 'relu'))
model.add(Dense(32,activation = 'relu'))
model.add(Dense(16,activation = 'relu'))
model.add(Dense(1))
model.compile(optimizer = 'adam',loss = 'mean_squared_error')
-----------------------------------
model.fit(X_train, y_train, epochs = 100)
-----------------------------------
y_pred = model.predict(X_test)
-----------------------------------
from sklearn.metrics import r2_score
r2 = r2_score(y_test, y_pred)
print(r2)
-----------------------------------
# Predicting RMSE the Test set results
from sklearn.metrics import mean_squared_error
rmse = (np.sqrt(mean_squared_error(y_test, y_pred)))
print(rmse)