-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add examples for all helpers functions
- Loading branch information
Showing
8 changed files
with
276 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 22:28, 14/08/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,21 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 23:29, 24/09/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% | ||
|
||
import numpy as np | ||
from metaperceptron import Data | ||
|
||
X = np.array([[1., -2., 2.], | ||
[-2., 1., 3.], | ||
[4., 1., -2.]]) | ||
y = np.array([[1, 2, 0], | ||
[0, 0, 1], | ||
[0, 2, 2]]) | ||
|
||
y = np.array([[1, 2, 0]]) | ||
|
||
data = Data(X, y) | ||
y, le = data.encode_label(y) | ||
print(y) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 15:18, 17/09/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% | ||
|
||
from metaperceptron import MhaMlpClassifier, Data | ||
from sklearn.datasets import make_classification | ||
|
||
|
||
# Create a multi-class classification dataset with 4 classes | ||
X, y = make_classification( | ||
n_samples=300, # Total number of data points | ||
n_features=7, # Number of features | ||
n_informative=3, # Number of informative features | ||
n_redundant=0, # Number of redundant features | ||
n_classes=4, # Number of classes | ||
random_state=42 | ||
) | ||
data = Data(X, y, name="RandomData") | ||
data.split_train_test(test_size=0.2, random_state=2) | ||
|
||
opt_paras = {"name": "WOA", "epoch": 10, "pop_size": 30} | ||
model = MhaMlpClassifier(hidden_size=50, act1_name="tanh", act2_name="sigmoid", | ||
obj_name="NPV", optimizer="OriginalWOA", optimizer_paras=opt_paras, verbose=True) | ||
model.fit(data.X_train, data.y_train) | ||
y_pred = model.predict(data.X_test) | ||
|
||
## Get parameters for model | ||
print(model.get_params()) | ||
|
||
## Get weights of neural network (ELM network) | ||
print(model.network.get_weights()) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 09:49, 25/09/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% | ||
|
||
from metaperceptron import MhaMlpClassifier, Data | ||
from sklearn.datasets import make_classification | ||
|
||
|
||
# Create a multi-class classification dataset with 4 classes | ||
X, y = make_classification( | ||
n_samples=300, # Total number of data points | ||
n_features=7, # Number of features | ||
n_informative=3, # Number of informative features | ||
n_redundant=0, # Number of redundant features | ||
n_classes=4, # Number of classes | ||
random_state=42 | ||
) | ||
data = Data(X, y, name="RandomData") | ||
data.split_train_test(test_size=0.2, random_state=2) | ||
|
||
opt_paras = {"name": "WOA", "epoch": 30, "pop_size": 30} | ||
model = MhaMlpClassifier(hidden_size=50, act1_name="tanh", act2_name="sigmoid", | ||
obj_name="CEL", optimizer="OriginalWOA", optimizer_paras=opt_paras, verbose=True) | ||
model.fit(data.X_train, data.y_train, lb=(-10., ), ub=(10., )) | ||
y_pred = model.predict(data.X_test) | ||
|
||
## Get parameters for model | ||
print(model.get_params()) | ||
|
||
## Get weights of neural network (ELM network) | ||
print(model.network.get_weights()) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,38 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 15:10, 17/09/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% | ||
|
||
from metaperceptron import MhaMlpClassifier, Data | ||
from sklearn.datasets import make_classification | ||
|
||
|
||
# Create a multi-class classification dataset with 4 classes | ||
X, y = make_classification( | ||
n_samples=300, # Total number of data points | ||
n_features=7, # Number of features | ||
n_informative=3, # Number of informative features | ||
n_redundant=0, # Number of redundant features | ||
n_classes=4, # Number of classes | ||
random_state=42 | ||
) | ||
data = Data(X, y, name="RandomData") | ||
data.split_train_test(test_size=0.2, random_state=2) | ||
|
||
opt_paras = {"name": "WOA", "epoch": 10, "pop_size": 30} | ||
model = MhaMlpClassifier(hidden_size=50, act1_name="tanh", act2_name="sigmoid", | ||
obj_name="CEL", optimizer="OriginalWOA", optimizer_paras=opt_paras, verbose=True) | ||
model.fit(data.X_train, data.y_train) | ||
y_pred = model.predict(data.X_test) | ||
print(model.evaluate(data.y_test, y_pred, list_metrics=("AS", "PS", "F1S"))) | ||
model.save_training_loss(save_path="history", filename="loss.csv") | ||
model.save_evaluation_metrics(data.y_test, y_pred, list_metrics=("AS", "PS", "F1S"), save_path="history", filename="metrics.csv") | ||
|
||
## Save Model | ||
model.save_model(save_path="history", filename="ga-elm.pkl") | ||
|
||
## Load Model | ||
new_model = MhaMlpClassifier() | ||
trained_model = new_model.load_model(load_path="history", filename="ga-elm.pkl") | ||
print(trained_model.scores(data.X_test, data.y_test, list_methods=("AS", "PS", "F1S"))) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,33 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 13:52, 17/09/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% | ||
|
||
from metaperceptron import MhaMlpClassifier, Data | ||
from sklearn.datasets import make_classification | ||
|
||
|
||
# Create a multi-class classification dataset with 4 classes | ||
X, y = make_classification( | ||
n_samples=300, # Total number of data points | ||
n_features=7, # Number of features | ||
n_informative=3, # Number of informative features | ||
n_redundant=0, # Number of redundant features | ||
n_classes=4, # Number of classes | ||
random_state=42 | ||
) | ||
data = Data(X, y, name="RandomData") | ||
data.split_train_test(test_size=0.2, random_state=2) | ||
|
||
opt_paras = {"name": "WOA", "epoch": 10, "pop_size": 30} | ||
model = MhaMlpClassifier(hidden_size=50, act1_name="tanh", act2_name="sigmoid", | ||
obj_name="CEL", optimizer="OriginalWOA", optimizer_paras=opt_paras, verbose=True) | ||
model.fit(data.X_train, data.y_train) | ||
y_pred = model.predict(data.X_test) | ||
print(model.evaluate(data.y_test, y_pred, list_metrics=("AS", "PS", "F1S"))) | ||
|
||
## Save results | ||
model.save_training_loss(save_path="history", filename="loss.csv") | ||
|
||
model.save_evaluation_metrics(data.y_test, y_pred, list_metrics=("AS", "PS", "F1S"), save_path="history", filename="metrics.csv") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,32 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 15:43, 17/09/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% | ||
|
||
from metaperceptron import MhaMlpClassifier, Data | ||
from sklearn.datasets import make_classification | ||
|
||
|
||
# Create a multi-class classification dataset with 4 classes | ||
X, y = make_classification( | ||
n_samples=300, # Total number of data points | ||
n_features=7, # Number of features | ||
n_informative=3, # Number of informative features | ||
n_redundant=0, # Number of redundant features | ||
n_classes=4, # Number of classes | ||
random_state=42 | ||
) | ||
data = Data(X, y, name="RandomData") | ||
data.split_train_test(test_size=0.2, random_state=2) | ||
|
||
opt_paras = {"name": "WOA", "epoch": 10, "pop_size": 30} | ||
model = MhaMlpClassifier(hidden_size=50, act1_name="tanh", act2_name="sigmoid", | ||
obj_name="CEL", optimizer="OriginalWOA", optimizer_paras=opt_paras, verbose=True) | ||
model.fit(data.X_train, data.y_train) | ||
y_pred = model.predict(data.X_test) | ||
print(model.evaluate(data.y_test, y_pred, list_metrics=("AS", "PS", "F1S"))) | ||
|
||
## Save predicted results | ||
model.save_y_predicted(data.X_train, data.y_train, save_path="history", filename="train_y_predicted.csv") | ||
model.save_y_predicted(data.X_test, data.y_test, save_path="history", filename="test_y_predicted.csv") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,81 @@ | ||
#!/usr/bin/env python | ||
# Created by "Thieu" at 12:49, 17/09/2023 ----------% | ||
# Email: nguyenthieu2102@gmail.com % | ||
# Github: https://github.com/thieu1995 % | ||
# --------------------------------------------------% | ||
|
||
import numpy as np | ||
from metaperceptron import DataTransformer, Data | ||
|
||
|
||
X = np.array([[1., -2., 2.], | ||
[-2., 1., 3.], | ||
[4., 1., -2.]]) | ||
|
||
## Get all supported scaling methods | ||
print(DataTransformer.SUPPORTED_SCALERS.keys()) | ||
|
||
### 1) Using only standard scaler | ||
scaler = DataTransformer(scaling_methods="standard") | ||
X_scaled = scaler.fit_transform(X) | ||
X_unscaled = scaler.inverse_transform(X_scaled) | ||
|
||
# Print the results | ||
print("Original Data:") | ||
print(X) | ||
print("Scaled Data:") | ||
print(X_scaled) | ||
print("Transformed Back to Original:") | ||
print(X_unscaled) | ||
|
||
### 2) Using multiple scalers | ||
scaler = DataTransformer(scaling_methods=("standard", "minmax")) # Just like Pipeline | ||
X_scaled = scaler.fit_transform(X) | ||
X_unscaled = scaler.inverse_transform(X_scaled) | ||
|
||
# Print the results | ||
print("\nOriginal Data:") | ||
print(X) | ||
print("Scaled Data:") | ||
print(X_scaled) | ||
print("Transformed Back to Original:") | ||
print(X_unscaled) | ||
|
||
### 3) Use methods in Data class instead | ||
data = Data(X) | ||
X_scaled, scaler = data.scale(X, scaling_methods=("standard", "minmax")) # Just like Pipeline | ||
X_unscaled = scaler.inverse_transform(X_scaled) | ||
|
||
# Print the results | ||
print("\nOriginal Data:") | ||
print(X) | ||
print("Scaled Data:") | ||
print(X_scaled) | ||
print("Transformed Back to Original:") | ||
print(X_unscaled) | ||
|
||
### 4) Use methods in Data class with parameters | ||
data = Data(X) | ||
X_scaled, scaler = data.scale(X, scaling_methods=("sinh-arc-sinh", "minmax"), list_dict_paras=({"epsilon": 0.5, "delta": 2.5}, None)) | ||
X_unscaled = scaler.inverse_transform(X_scaled) | ||
|
||
# Print the results | ||
print("\nOriginal Data:") | ||
print(X) | ||
print("Scaled Data:") | ||
print(X_scaled) | ||
print("Transformed Back to Original:") | ||
print(X_unscaled) | ||
|
||
### 5) Use methods in Data class with parameters | ||
data = Data(X) | ||
X_scaled, scaler = data.scale(X, scaling_methods=("yeo-johnson", "sinh-arc-sinh"), list_dict_paras=({"lmbda": 1.2}, {"epsilon": 0.5, "delta": 2.5})) | ||
X_unscaled = scaler.inverse_transform(X_scaled) | ||
|
||
# Print the results | ||
print("\nOriginal Data:") | ||
print(X) | ||
print("Scaled Data:") | ||
print(X_scaled) | ||
print("Transformed Back to Original:") | ||
print(X_unscaled) |