-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
120 lines (100 loc) · 3.3 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import pathlib
import onnxmltools
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
"""Set the path of the dataset."""
data_dir = pathlib.Path(r"/content/drive/MyDrive/nude_or_not")
"""Define variables for train the model."""
# Number of samples that will be propagated through the network.
batch_size = 64
# Image width and height after resizing.
img_height = 224
img_width = 224
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=128,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=128,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
num_classes = len(class_names)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.Rescaling(1./255)
normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
model = keras.Sequential([
layers.Input(batch_shape=(batch_size, img_height, img_width, 3)),
layers.Rescaling(1./255),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(num_classes)
])
model.compile(optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"])
model.summary()
epochs=20
history = model.fit(
train_ds,
validation_data=val_ds,
steps_per_epoch=300,
epochs=epochs,
batch_size=batch_size,
)
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal",
input_shape=(img_height,
img_width,
3)),
layers.RandomRotation(0.1),
layers.RandomZoom(0.1),
]
)
model = Sequential([
layers.Input(batch_shape=(batch_size, img_height, img_width, 3)),
data_augmentation,
layers.Rescaling(1./255),
layers.Conv2D(16, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding="same", activation="relu"),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation="relu"),
layers.Dense(num_classes)
])
model.compile(optimizer="adam",
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"])
model.summary()
epochs = 27
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs,
batch_size=batch_size,
)
onnx_model = onnxmltools.convert_keras(model, target_opset=18)
onnxmltools.utils.save_model(onnx_model, "corpus.onnx")