Skip to content

Commit

Permalink
[Dev] algo: update the new algo (convolutional)
Browse files Browse the repository at this point in the history
  • Loading branch information
trixky committed Jul 7, 2024
1 parent 9c12a1c commit 2bf8542
Show file tree
Hide file tree
Showing 8 changed files with 101 additions and 30 deletions.
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{
"name": "digit-classifier",
"name": "fashion-cnn",
"version": "0.0.1",
"private": true,
"scripts": {
Expand Down
6 changes: 4 additions & 2 deletions src/lib/logic/evaluate.ts
Original file line number Diff line number Diff line change
@@ -1,16 +1,18 @@
import * as tf from "@tensorflow/tfjs";
import normalize from "./normalize";

export default function evaluate(model: tf.Sequential, inputs: number[][], outputs: number[]): {
answer: number,
expected: number,
input: number[],
} {
const offset = Math.floor(Math.random() * inputs.length);
const newInput = tf.tensor1d(inputs[offset]);
const newInput = normalize([inputs[offset]], 0, 255);
const inputTensor = newInput.reshape([1, 28, 28, 1]);

const answer = tf.tidy(() => {
// expandDims adds a dimension to the tensor
const prediction = model.predict(newInput.expandDims()) as tf.Tensor<tf.Rank>;
const prediction = model.predict(inputTensor) as tf.Tensor<tf.Rank>;
prediction.print();
// squeeze is the opposite of expandDims
return prediction.squeeze().argMax()
Expand Down
37 changes: 26 additions & 11 deletions src/lib/logic/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,35 @@ import * as tf from "@tensorflow/tfjs";

const model = tf.sequential();

model.add(tf.layers.dense({
inputShape: [784],
activation: 'relu',
units: 32
// --------------------------------------- Convolutional Neural Network ---------------------------------------
model.add(tf.layers.conv2d({
inputShape: [28, 28, 1],
filters: 16,
kernelSize: 3, // Square Filter of 3 by 3. Could also specify rectangle eg [2, 3].
strides: 1,
padding: 'same',
activation: 'relu'
}));

model.add(tf.layers.dense({
activation: 'relu',
units: 16
}));
model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));

model.add(tf.layers.dense({
activation: 'softmax',
units: 10
model.add(tf.layers.conv2d({
filters: 32,
kernelSize: 3,
strides: 1,
padding: 'same',
activation: 'relu'
}));

model.add(tf.layers.maxPooling2d({ poolSize: 2, strides: 2 }));

// --------------------------------------- Multi Layer Perceptron ---------------------------------------
model.add(tf.layers.flatten());


model.add(tf.layers.dense({units: 128, activation: 'relu'}));


model.add(tf.layers.dense({units: 10, activation: 'softmax'}));

export default model;
20 changes: 20 additions & 0 deletions src/lib/logic/normalize.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import * as tf from '@tensorflow/tfjs';

export default function normalize(inputs: number[][], min: number, max: number): tf.Tensor<tf.Rank> {
const normalized = tf.tidy(function () {
const MIN_VALUES = tf.scalar(min);
const MAX_VALUES = tf.scalar(max);

const INPUT_TENSOR = tf.tensor2d(inputs);
const TENSOR_SUBTRACT_MIN_VALUE = tf.sub(INPUT_TENSOR, MIN_VALUES);

const RANGE_SIZE = tf.sub(MAX_VALUES, MIN_VALUES);

const NORMALIZED_VALUES = tf.div(TENSOR_SUBTRACT_MIN_VALUE, RANGE_SIZE);


return NORMALIZED_VALUES;
});

return normalized;
}
9 changes: 6 additions & 3 deletions src/lib/logic/train.ts
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import * as tf from "@tensorflow/tfjs";
import model from './model';
import normalize from './normalize';

export default async function train(inputs: number[][], outputs: number[], callBack: (epoch: number, accuracy: number) => void): Promise<tf.Sequential> {
// Shuffle the two arrays in the same way so inputs still match outputs indexes.
tf.util.shuffleCombo(inputs, outputs);
// inputs feature Array is 1 dimensional.
const inputTensor = tf.tensor2d(inputs);
const inputTensor = normalize(inputs, 0, 255);
// Output feature Array is 1 dimensional.
const outputTensor = tf.oneHot(tf.tensor1d(outputs, 'int32'), 10);

Expand All @@ -15,11 +16,12 @@ export default async function train(inputs: number[][], outputs: number[], callB
metrics: ["accuracy"]
});

const result = await model.fit(inputTensor, outputTensor, {
const reshapedInputs = inputTensor.reshape([inputs.length, 28, 28, 1]);
const result = await model.fit(reshapedInputs, outputTensor, {
shuffle: true,
validationSplit: 0.2,
batchSize: 512,
epochs: 50,
epochs: 10,
callbacks: {
onEpochEnd: (epoch: number, logs: tf.Logs | any) => {
console.log(`Epoch ${epoch}: loss = ${logs.loss}, accuracy = ${logs.acc}`);
Expand All @@ -30,6 +32,7 @@ export default async function train(inputs: number[][], outputs: number[], callB

inputTensor.dispose();
outputTensor.dispose();
reshapedInputs.dispose();

return model;
}
6 changes: 3 additions & 3 deletions src/routes/+layout.svelte
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,15 @@
/* make animation the image need to make a infinit 360 rotation */
img {
animation: spin 60s linear infinite;
transform: scale(1.2);
transform: scale(1.4);
}
@keyframes spin {
0% {
transform: scale(1.2) rotate(0deg);
transform: scale(1.4) rotate(0deg);
}
100% {
transform: scale(1.2) rotate(360deg);
transform: scale(1.4) rotate(360deg);
}
}
</style>
47 changes: 39 additions & 8 deletions src/routes/+page.svelte
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,19 @@
import evaluate from "$lib/logic/evaluate";
import * as tf from "@tensorflow/tfjs";
const LOOKUP = [
"T-shirt",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Boot",
];
interface Dataset {
inputs: number[][];
outputs: number[];
Expand Down Expand Up @@ -65,6 +78,8 @@
inputEvaluation = input;
answerEvaluation = answer;
expectedEvaluation = expected;
console.log(input);
}
}
Expand All @@ -86,7 +101,7 @@
<div id="progress" class="my-2">
<div>
<p>epoch:</p>
<p>&nbsp;{(epoch + 1).toString().padStart(2, " ")} / 50</p>
<p>&nbsp;{(epoch + 1).toString().padStart(2, " ")} / 10</p>
</div>
<div>
<p>accuracy:</p>
Expand All @@ -105,17 +120,21 @@
</div>
<ol class="mt-5">
{#each inputEvaluation as cell}
{@const intensity = Math.min(Math.floor(cell * 10), 9)}
<li class="bg-black bg-opacity-80 w-2 h-2 md:w-3 md:h-3" style="opacity: {cell + 0.1};">
{@const intensity = Math.min(Math.floor((cell * 10) / 255), 9)}
<li
class="bg-black bg-opacity-80 w-2 h-2 md:w-3 md:h-3"
style="opacity: {intensity / 9 + 0.1};"
>
<p class="text-[0.7em] md:text-xs">{intensity}</p>
</li>
{/each}
</ol>
<div id="result" class="flex justify-between w-full mt-3">
<p>
<span class:nothing={model === null || model === undefined}>expected:</span
><span class="nothing">&nbsp;</span><span class:nothing={expectedEvaluation === null}
>{expectedEvaluation === null ? "?" : expectedEvaluation}</span
><span class="nothing">&nbsp;</span><span
class:nothing={expectedEvaluation === null}
>{expectedEvaluation === null ? "?" : LOOKUP[expectedEvaluation]}</span
>
</p>
<p>
Expand All @@ -127,7 +146,7 @@
class:failure={answerEvaluation !== null &&
expectedEvaluation !== answerEvaluation}
>
{answerEvaluation === null ? "?" : answerEvaluation}
{answerEvaluation === null ? "?" : LOOKUP[answerEvaluation]}
</span>
</p>
</div>
Expand All @@ -141,8 +160,20 @@
<p>20% (8 000)</p>
</div>
<div>
<p>hidden layers (2):</p>
<p>32 + 16</p>
<p>convolutional layers (4):</p>
<p>&nbsp;</p>
</div>
<div>
<p>&nbsp;</p>
<p>16(28x28)[5x5 filter] + stride[2] & max pool[2x2]</p>
</div>
<div>
<p>&nbsp;</p>
<p>32(14x14)[5x5 filter] + stride[2] & max pool[2x2]</p>
</div>
<div>
<p>hidden layers (1):</p>
<p>128</p>
</div>
<div>
<p>activation functions:</p>
Expand Down

0 comments on commit 2bf8542

Please sign in to comment.