From fe3329df27ecf58cc0e5ffcf5d2ca908f9093660 Mon Sep 17 00:00:00 2001
From: uakarsh <55104596+uakarsh@users.noreply.github.com>
Date: Wed, 6 Jul 2022 19:43:32 +0530
Subject: [PATCH 1/9] Created using Colaboratory
---
...image_classification_imagenette_mini.ipynb | 1193 +++++++++++++++++
1 file changed, 1193 insertions(+)
create mode 100644 flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
new file mode 100644
index 0000000000..334a8c2c7e
--- /dev/null
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
@@ -0,0 +1,1193 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "view-in-github",
+ "colab_type": "text"
+ },
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "source": [
+ "# Copyright The PyTorch Lightning team.\n",
+ "#\n",
+ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# http://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License.\n",
+ "\n",
+ "# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154"
+ ],
+ "metadata": {
+ "id": "aEGsCOim1-ri"
+ },
+ "execution_count": null,
+ "outputs": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ApF-xlLV-2S0"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install learn2learn\n",
+ "!pip install kornia\n",
+ "!pip install lightning-flash\n",
+ "!pip install 'lightning-flash[image]'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "OWmur8B1_9op",
+ "outputId": "638a2770-55ff-42ae-b97a-10d60462610f"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "--2022-07-06 14:03:20-- https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1\n",
+ "Resolving www.dropbox.com (www.dropbox.com)... 162.125.11.18, 2620:100:601c:18::a27d:612\n",
+ "Connecting to www.dropbox.com (www.dropbox.com)|162.125.11.18|:443... connected.\n",
+ "HTTP request sent, awaiting response... 301 Moved Permanently\n",
+ "Location: /s/dl/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl [following]\n",
+ "--2022-07-06 14:03:20-- https://www.dropbox.com/s/dl/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl\n",
+ "Reusing existing connection to www.dropbox.com:443.\n",
+ "HTTP request sent, awaiting response... 302 Found\n",
+ "Location: https://uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com/cd/0/get/BonlWaNWpPqfbmVfhuL9L_PdHqR-yBb5BiC0o5SoaVx-JVtoaYW_BVD74812lIdsNVjYR6CXoEEk85lQnLjClFCRWOBmtzcltTV9uxsJm-NguKfP-qGcUjvtYmVbIh_E1BVwTxorF9_TJAsCLYjCpNBwa30197VtA4X5QZ-DQpXSqw/file?dl=1# [following]\n",
+ "--2022-07-06 14:03:21-- https://uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com/cd/0/get/BonlWaNWpPqfbmVfhuL9L_PdHqR-yBb5BiC0o5SoaVx-JVtoaYW_BVD74812lIdsNVjYR6CXoEEk85lQnLjClFCRWOBmtzcltTV9uxsJm-NguKfP-qGcUjvtYmVbIh_E1BVwTxorF9_TJAsCLYjCpNBwa30197VtA4X5QZ-DQpXSqw/file?dl=1\n",
+ "Resolving uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com (uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com)... 162.125.11.15, 2620:100:601c:15::a27d:60f\n",
+ "Connecting to uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com (uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com)|162.125.11.15|:443... connected.\n",
+ "HTTP request sent, awaiting response... 200 OK\n",
+ "Length: 1145461190 (1.1G) [application/binary]\n",
+ "Saving to: 'mini-imagenet-cache-train.pkl?dl=1'\n",
+ "\n",
+ "mini-imagenet-cache 100%[===================>] 1.07G 90.0MB/s in 13s \n",
+ "\n",
+ "2022-07-06 14:03:35 (82.7 MB/s) - 'mini-imagenet-cache-train.pkl?dl=1' saved [1145461190/1145461190]\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "## Train file\n",
+ "!wget https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "kmh_SrYQAKNG",
+ "outputId": "caef2c13-dd4e-4410-d4f0-7bb78370d642"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "--2022-07-06 14:03:35-- https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1\n",
+ "Resolving www.dropbox.com (www.dropbox.com)... 162.125.6.18, 2620:100:601c:18::a27d:612\n",
+ "Connecting to www.dropbox.com (www.dropbox.com)|162.125.6.18|:443... connected.\n",
+ "HTTP request sent, awaiting response... 301 Moved Permanently\n",
+ "Location: /s/dl/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl [following]\n",
+ "--2022-07-06 14:03:35-- https://www.dropbox.com/s/dl/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl\n",
+ "Reusing existing connection to www.dropbox.com:443.\n",
+ "HTTP request sent, awaiting response... 302 Found\n",
+ "Location: https://uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com/cd/0/get/BomjGsUa6V-Fl0AxfbjFITON5qgxjVFWtqlEZP_XkKazHYdR86WgUNAz0N5_Y1HM52U8Q3MMNC4Nwii_ScnkYqAVUj88ezCAo79OrJ1yQtWDEFuj0dnLo9RW1VGFd04pZm9yx5YNdp1YJ5PM8cRK6TEbrsaCmJ77IFSu9BDouLzS6A/file?dl=1# [following]\n",
+ "--2022-07-06 14:03:36-- https://uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com/cd/0/get/BomjGsUa6V-Fl0AxfbjFITON5qgxjVFWtqlEZP_XkKazHYdR86WgUNAz0N5_Y1HM52U8Q3MMNC4Nwii_ScnkYqAVUj88ezCAo79OrJ1yQtWDEFuj0dnLo9RW1VGFd04pZm9yx5YNdp1YJ5PM8cRK6TEbrsaCmJ77IFSu9BDouLzS6A/file?dl=1\n",
+ "Resolving uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com (uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com)... 162.125.11.15, 2620:100:601c:15::a27d:60f\n",
+ "Connecting to uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com (uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com)|162.125.11.15|:443... connected.\n",
+ "HTTP request sent, awaiting response... 200 OK\n",
+ "Length: 292661258 (279M) [application/binary]\n",
+ "Saving to: 'mini-imagenet-cache-validation.pkl?dl=1'\n",
+ "\n",
+ "mini-imagenet-cache 100%[===================>] 279.10M 86.4MB/s in 3.4s \n",
+ "\n",
+ "2022-07-06 14:03:40 (82.8 MB/s) - 'mini-imagenet-cache-validation.pkl?dl=1' saved [292661258/292661258]\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "## Validation File\n",
+ "!wget https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "Re6WRCnIAwpF"
+ },
+ "outputs": [],
+ "source": [
+ "!cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'\n",
+ "!cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "9OX0VzelBGqi"
+ },
+ "outputs": [],
+ "source": [
+ "!rm './mini-imagenet-cache-train.pkl?dl=1'\n",
+ "!rm './mini-imagenet-cache-validation.pkl?dl=1'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "g0FHMMCF-hM7"
+ },
+ "outputs": [],
+ "source": [
+ "warnings.simplefilter(\"ignore\")\n",
+ "\n",
+ "from typing import Callable, Tuple, Union\n",
+ "from dataclasses import dataclass\n",
+ "import warnings\n",
+ "\n",
+ "import kornia.augmentation as Ka\n",
+ "import kornia.geometry as Kg\n",
+ "import learn2learn as l2l\n",
+ "\n",
+ "import torch\n",
+ "import torchvision\n",
+ "from torch import nn\n",
+ "import torchvision.transforms as T\n",
+ "\n",
+ "from flash.core.data.io.input_transform import InputTransform\n",
+ "import flash\n",
+ "from flash.core.data.io.input import DataKeys\n",
+ "from flash.core.data.transforms import ApplyToKeys, kornia_collate\n",
+ "from flash.image import ImageClassificationData, ImageClassifier\n",
+ "\n",
+ "from PIL import Image\n",
+ "import numpy as np"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "VlqRrJ7CA1v9"
+ },
+ "outputs": [],
+ "source": [
+ "# download MiniImagenet\n",
+ "train_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"train\", download=False)\n",
+ "val_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"validation\", download=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "x1S1mfXtuHNd"
+ },
+ "outputs": [],
+ "source": [
+ "@dataclass\n",
+ "class ImageClassificationInputTransform(InputTransform):\n",
+ "\n",
+ " image_size: Tuple[int, int] = (196, 196)\n",
+ " mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n",
+ " std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n",
+ "\n",
+ " def per_sample_transform(self):\n",
+ " return T.Compose([\n",
+ " ApplyToKeys(\n",
+ " DataKeys.INPUT,\n",
+ " T.Compose([\n",
+ " T.ToTensor(),\n",
+ " Kg.Resize((196, 196)),\n",
+ " # SPATIAL\n",
+ " Ka.RandomHorizontalFlip(p=0.25),\n",
+ " Ka.RandomRotation(degrees=90.0, p=0.25),\n",
+ " Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),\n",
+ " Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),\n",
+ " \n",
+ " # PIXEL-LEVEL\n",
+ " Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness\n",
+ " Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation\n",
+ " Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast\n",
+ " Ka.ColorJitter(hue=1 / 30, p=0.25), # hue\n",
+ " Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),\n",
+ " Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),\n",
+ " ]),\n",
+ " ),\n",
+ " ApplyToKeys(DataKeys.TARGET, torch.as_tensor)]\n",
+ " )\n",
+ "\n",
+ " def train_per_sample_transform(self):\n",
+ " return T.Compose(\n",
+ " [\n",
+ " ApplyToKeys(\n",
+ " \"input\",\n",
+ " T.Compose(\n",
+ " [\n",
+ " T.ToTensor(),\n",
+ " T.Resize(self.image_size),\n",
+ " T.Normalize(self.mean, self.std),\n",
+ " T.RandomHorizontalFlip(),\n",
+ " T.ColorJitter(),\n",
+ " T.RandomAutocontrast(),\n",
+ " T.RandomPerspective(),\n",
+ " ]\n",
+ " ),\n",
+ " ),\n",
+ " ApplyToKeys(\"target\", torch.as_tensor),\n",
+ " ]\n",
+ " )\n",
+ " def per_batch_transform_on_device(self):\n",
+ " return ApplyToKeys(\n",
+ " DataKeys.INPUT,\n",
+ " Ka.RandomHorizontalFlip(p=0.25),\n",
+ " )\n",
+ " \n",
+ "\n",
+ " def collate(self):\n",
+ " return kornia_collate\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "xfei_uix-veN"
+ },
+ "outputs": [],
+ "source": [
+ "# construct datamodule\n",
+ "\n",
+ "datamodule = ImageClassificationData.from_tensors(\n",
+ " train_data=train_dataset.x,\n",
+ " train_targets=torch.from_numpy(train_dataset.y.astype(int)),\n",
+ " val_data=val_dataset.x,\n",
+ " val_targets=torch.from_numpy(val_dataset.y.astype(int)),\n",
+ " train_transform=ImageClassificationInputTransform,\n",
+ " val_transform=ImageClassificationInputTransform,\n",
+ " batch_size= 1\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "referenced_widgets": [
+ "f41977658ad34289884749c22eb3aaef"
+ ]
+ },
+ "id": "KZbvMCKn-zlJ",
+ "outputId": "0a5a0e5e-007a-439e-85a6-1c199d1fefad"
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Downloading: \"https://download.pytorch.org/models/resnet18-f37072fd.pth\" to /home/studio-lab-user/.cache/torch/hub/checkpoints/resnet18-f37072fd.pth\n"
+ ]
+ },
+ {
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "f41977658ad34289884749c22eb3aaef",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ " 0%| | 0.00/44.7M [00:00, ?B/s]"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using 'prototypicalnetworks' provided by learnables/learn2learn (https://github.com/learnables/learn2learn).\n"
+ ]
+ }
+ ],
+ "source": [
+ "model = ImageClassifier(\n",
+ " backbone=\"resnet18\",\n",
+ " training_strategy=\"prototypicalnetworks\",\n",
+ " training_strategy_kwargs={\n",
+ " \"epoch_length\": 10 * 16,\n",
+ " \"meta_batch_size\": 1,\n",
+ " \"num_tasks\": 200,\n",
+ " \"test_num_tasks\": 2000,\n",
+ " \"ways\": datamodule.num_classes,\n",
+ " \"shots\": 1,\n",
+ " \"test_ways\": 5,\n",
+ " \"test_shots\": 1,\n",
+ " \"test_queries\": 15,\n",
+ " },\n",
+ " optimizer=torch.optim.Adam,\n",
+ " learning_rate=0.001,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 361,
+ "referenced_widgets": [
+ "52e6b0c277524a0295b54f2496230821",
+ "89e6db5a79a2427fa117a903cfbb0541",
+ "875577999f4f477dae59eff999452430",
+ "c0f62b8649e54994b87ea7ba555ddd60",
+ "5b264066728b41f4810b26a150a394f9",
+ "0b38016cd9f34774bf090a0e15378712",
+ "53266fce08a142cfbc3d949e5262b9e5",
+ "caaa95ccb1474cca8ed53dd35bdab965",
+ "7a78b1520e7b40da912b323133174937",
+ "541af18c8f6d4983b612f902e413fb9e",
+ "8ffbf193b3564109b1424699e5be8f4c",
+ "123e733ade784f9ba622ef2f89fc6f65",
+ "41ac3e8859a94c508e4bf2d3fd8b1e6e",
+ "10959d4fa6624777af2a73b99c90e9a9",
+ "bdb79707f0484da1b9810c0312511ab6",
+ "f05c7723a0354bedad829e6b075bf96c",
+ "f1e5d92e3b384525b294b11ce8cf1bda",
+ "5c28420be300468ea8e62383b52e7fa2",
+ "512440dc2426428a82f6a96042a9e1cc",
+ "2fed134d52c146d989653fe21713fe5d",
+ "184e40be255243fdbc9c0c41c4d1aba5",
+ "533d044cabf745cfaee30e620e08c349",
+ "",
+ "076f86156e3146c99cd8697971eced86"
+ ]
+ },
+ "id": "zyWdJHm6-010",
+ "outputId": "9333cb28-8530-46f4-9f2c-972175a5627d"
+ },
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Using 16bit native Automatic Mixed Precision (AMP)\n",
+ "GPU available: True, used: True\n",
+ "TPU available: False, using: 0 TPU cores\n",
+ "IPU available: False, using: 0 IPUs\n",
+ "HPU available: False, using: 0 HPUs\n",
+ "Missing logger folder: /home/studio-lab-user/notebooks/lightning_logs\n",
+ "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n",
+ "\n",
+ " | Name | Type | Params\n",
+ "-----------------------------------------------------\n",
+ "0 | train_metrics | ModuleDict | 0 \n",
+ "1 | val_metrics | ModuleDict | 0 \n",
+ "2 | test_metrics | ModuleDict | 0 \n",
+ "3 | adapter | Learn2LearnAdapter | 11.2 M\n",
+ "-----------------------------------------------------\n",
+ "11.2 M Trainable params\n",
+ "0 Non-trainable params\n",
+ "11.2 M Total params\n",
+ "22.419 Total estimated model params size (MB)\n"
+ ]
+ },
+ {
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "Sanity Checking: 0it [00:00, ?it/s]"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ },
+ {
+ "data": {
+ "application/vnd.jupyter.widget-view+json": {
+ "model_id": "076f86156e3146c99cd8697971eced86",
+ "version_major": 2,
+ "version_minor": 0
+ },
+ "text/plain": [
+ "Training: 0it [00:00, ?it/s]"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "trainer = flash.Trainer(\n",
+ " max_epochs=1,\n",
+ " gpus=1,\n",
+ " precision=16,\n",
+ ")\n",
+ "trainer.finetune(model, datamodule=datamodule, strategy=\"no_freeze\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "sOVzw75a14At"
+ },
+ "outputs": [],
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "collapsed_sections": [],
+ "name": "image_classification_imagenette_mini",
+ "provenance": [],
+ "include_colab_link": true
+ },
+ "gpuClass": "standard",
+ "kernelspec": {
+ "display_name": "default:Python",
+ "language": "python",
+ "name": "conda-env-default-py"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.7"
+ },
+ "widgets": {
+ "application/vnd.jupyter.widget-state+json": {
+ "0b38016cd9f34774bf090a0e15378712": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "10959d4fa6624777af2a73b99c90e9a9": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_512440dc2426428a82f6a96042a9e1cc",
+ "max": 320,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_2fed134d52c146d989653fe21713fe5d",
+ "value": 0
+ }
+ },
+ "123e733ade784f9ba622ef2f89fc6f65": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_41ac3e8859a94c508e4bf2d3fd8b1e6e",
+ "IPY_MODEL_10959d4fa6624777af2a73b99c90e9a9",
+ "IPY_MODEL_bdb79707f0484da1b9810c0312511ab6"
+ ],
+ "layout": "IPY_MODEL_f05c7723a0354bedad829e6b075bf96c"
+ }
+ },
+ "184e40be255243fdbc9c0c41c4d1aba5": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "2fed134d52c146d989653fe21713fe5d": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "41ac3e8859a94c508e4bf2d3fd8b1e6e": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_f1e5d92e3b384525b294b11ce8cf1bda",
+ "placeholder": "",
+ "style": "IPY_MODEL_5c28420be300468ea8e62383b52e7fa2",
+ "value": "Epoch 0: 0%"
+ }
+ },
+ "512440dc2426428a82f6a96042a9e1cc": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": "2",
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "52e6b0c277524a0295b54f2496230821": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HBoxModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HBoxModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HBoxView",
+ "box_style": "",
+ "children": [
+ "IPY_MODEL_89e6db5a79a2427fa117a903cfbb0541",
+ "IPY_MODEL_875577999f4f477dae59eff999452430",
+ "IPY_MODEL_c0f62b8649e54994b87ea7ba555ddd60"
+ ],
+ "layout": "IPY_MODEL_5b264066728b41f4810b26a150a394f9"
+ }
+ },
+ "53266fce08a142cfbc3d949e5262b9e5": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "533d044cabf745cfaee30e620e08c349": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "541af18c8f6d4983b612f902e413fb9e": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "5b264066728b41f4810b26a150a394f9": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": "inline-flex",
+ "flex": null,
+ "flex_flow": "row wrap",
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": "100%"
+ }
+ },
+ "5c28420be300468ea8e62383b52e7fa2": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "7a78b1520e7b40da912b323133174937": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "ProgressStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "ProgressStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "bar_color": null,
+ "description_width": ""
+ }
+ },
+ "875577999f4f477dae59eff999452430": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "FloatProgressModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "FloatProgressModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "ProgressView",
+ "bar_style": "",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_caaa95ccb1474cca8ed53dd35bdab965",
+ "max": 2,
+ "min": 0,
+ "orientation": "horizontal",
+ "style": "IPY_MODEL_7a78b1520e7b40da912b323133174937",
+ "value": 2
+ }
+ },
+ "89e6db5a79a2427fa117a903cfbb0541": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_0b38016cd9f34774bf090a0e15378712",
+ "placeholder": "",
+ "style": "IPY_MODEL_53266fce08a142cfbc3d949e5262b9e5",
+ "value": "Sanity Checking DataLoader 0: 100%"
+ }
+ },
+ "8ffbf193b3564109b1424699e5be8f4c": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "DescriptionStyleModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "DescriptionStyleModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "StyleView",
+ "description_width": ""
+ }
+ },
+ "bdb79707f0484da1b9810c0312511ab6": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_184e40be255243fdbc9c0c41c4d1aba5",
+ "placeholder": "",
+ "style": "IPY_MODEL_533d044cabf745cfaee30e620e08c349",
+ "value": " 0/320 [00:00<?, ?it/s]"
+ }
+ },
+ "c0f62b8649e54994b87ea7ba555ddd60": {
+ "model_module": "@jupyter-widgets/controls",
+ "model_module_version": "1.5.0",
+ "model_name": "HTMLModel",
+ "state": {
+ "_dom_classes": [],
+ "_model_module": "@jupyter-widgets/controls",
+ "_model_module_version": "1.5.0",
+ "_model_name": "HTMLModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/controls",
+ "_view_module_version": "1.5.0",
+ "_view_name": "HTMLView",
+ "description": "",
+ "description_tooltip": null,
+ "layout": "IPY_MODEL_541af18c8f6d4983b612f902e413fb9e",
+ "placeholder": "",
+ "style": "IPY_MODEL_8ffbf193b3564109b1424699e5be8f4c",
+ "value": " 2/2 [00:21<00:00, 10.68s/it]"
+ }
+ },
+ "caaa95ccb1474cca8ed53dd35bdab965": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": "2",
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ },
+ "f05c7723a0354bedad829e6b075bf96c": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": "inline-flex",
+ "flex": null,
+ "flex_flow": "row wrap",
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": "100%"
+ }
+ },
+ "f1e5d92e3b384525b294b11ce8cf1bda": {
+ "model_module": "@jupyter-widgets/base",
+ "model_module_version": "1.2.0",
+ "model_name": "LayoutModel",
+ "state": {
+ "_model_module": "@jupyter-widgets/base",
+ "_model_module_version": "1.2.0",
+ "_model_name": "LayoutModel",
+ "_view_count": null,
+ "_view_module": "@jupyter-widgets/base",
+ "_view_module_version": "1.2.0",
+ "_view_name": "LayoutView",
+ "align_content": null,
+ "align_items": null,
+ "align_self": null,
+ "border": null,
+ "bottom": null,
+ "display": null,
+ "flex": null,
+ "flex_flow": null,
+ "grid_area": null,
+ "grid_auto_columns": null,
+ "grid_auto_flow": null,
+ "grid_auto_rows": null,
+ "grid_column": null,
+ "grid_gap": null,
+ "grid_row": null,
+ "grid_template_areas": null,
+ "grid_template_columns": null,
+ "grid_template_rows": null,
+ "height": null,
+ "justify_content": null,
+ "justify_items": null,
+ "left": null,
+ "margin": null,
+ "max_height": null,
+ "max_width": null,
+ "min_height": null,
+ "min_width": null,
+ "object_fit": null,
+ "object_position": null,
+ "order": null,
+ "overflow": null,
+ "overflow_x": null,
+ "overflow_y": null,
+ "padding": null,
+ "right": null,
+ "top": null,
+ "visibility": null,
+ "width": null
+ }
+ }
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
\ No newline at end of file
From 77b7878c9b9c279d977488d916be90974e7b6a31 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Wed, 6 Jul 2022 14:23:44 +0000
Subject: [PATCH 2/9] [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---
...image_classification_imagenette_mini.ipynb | 1545 ++++-------------
1 file changed, 359 insertions(+), 1186 deletions(-)
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
index 334a8c2c7e..f3ba7ba9fd 100644
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
@@ -1,1193 +1,366 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "view-in-github",
- "colab_type": "text"
- },
- "source": [
- ""
- ]
- },
- {
- "cell_type": "code",
- "source": [
- "# Copyright The PyTorch Lightning team.\n",
- "#\n",
- "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
- "# you may not use this file except in compliance with the License.\n",
- "# You may obtain a copy of the License at\n",
- "#\n",
- "# http://www.apache.org/licenses/LICENSE-2.0\n",
- "#\n",
- "# Unless required by applicable law or agreed to in writing, software\n",
- "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
- "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
- "# See the License for the specific language governing permissions and\n",
- "# limitations under the License.\n",
- "\n",
- "# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154"
- ],
- "metadata": {
- "id": "aEGsCOim1-ri"
- },
- "execution_count": null,
- "outputs": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "ApF-xlLV-2S0"
- },
- "outputs": [],
- "source": [
- "!pip install learn2learn\n",
- "!pip install kornia\n",
- "!pip install lightning-flash\n",
- "!pip install 'lightning-flash[image]'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "OWmur8B1_9op",
- "outputId": "638a2770-55ff-42ae-b97a-10d60462610f"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "--2022-07-06 14:03:20-- https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1\n",
- "Resolving www.dropbox.com (www.dropbox.com)... 162.125.11.18, 2620:100:601c:18::a27d:612\n",
- "Connecting to www.dropbox.com (www.dropbox.com)|162.125.11.18|:443... connected.\n",
- "HTTP request sent, awaiting response... 301 Moved Permanently\n",
- "Location: /s/dl/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl [following]\n",
- "--2022-07-06 14:03:20-- https://www.dropbox.com/s/dl/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl\n",
- "Reusing existing connection to www.dropbox.com:443.\n",
- "HTTP request sent, awaiting response... 302 Found\n",
- "Location: https://uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com/cd/0/get/BonlWaNWpPqfbmVfhuL9L_PdHqR-yBb5BiC0o5SoaVx-JVtoaYW_BVD74812lIdsNVjYR6CXoEEk85lQnLjClFCRWOBmtzcltTV9uxsJm-NguKfP-qGcUjvtYmVbIh_E1BVwTxorF9_TJAsCLYjCpNBwa30197VtA4X5QZ-DQpXSqw/file?dl=1# [following]\n",
- "--2022-07-06 14:03:21-- https://uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com/cd/0/get/BonlWaNWpPqfbmVfhuL9L_PdHqR-yBb5BiC0o5SoaVx-JVtoaYW_BVD74812lIdsNVjYR6CXoEEk85lQnLjClFCRWOBmtzcltTV9uxsJm-NguKfP-qGcUjvtYmVbIh_E1BVwTxorF9_TJAsCLYjCpNBwa30197VtA4X5QZ-DQpXSqw/file?dl=1\n",
- "Resolving uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com (uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com)... 162.125.11.15, 2620:100:601c:15::a27d:60f\n",
- "Connecting to uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com (uc9143f4c9c9d1358c5dc2f8f70f.dl.dropboxusercontent.com)|162.125.11.15|:443... connected.\n",
- "HTTP request sent, awaiting response... 200 OK\n",
- "Length: 1145461190 (1.1G) [application/binary]\n",
- "Saving to: 'mini-imagenet-cache-train.pkl?dl=1'\n",
- "\n",
- "mini-imagenet-cache 100%[===================>] 1.07G 90.0MB/s in 13s \n",
- "\n",
- "2022-07-06 14:03:35 (82.7 MB/s) - 'mini-imagenet-cache-train.pkl?dl=1' saved [1145461190/1145461190]\n",
- "\n"
- ]
- }
- ],
- "source": [
- "## Train file\n",
- "!wget https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "kmh_SrYQAKNG",
- "outputId": "caef2c13-dd4e-4410-d4f0-7bb78370d642"
- },
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "--2022-07-06 14:03:35-- https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1\n",
- "Resolving www.dropbox.com (www.dropbox.com)... 162.125.6.18, 2620:100:601c:18::a27d:612\n",
- "Connecting to www.dropbox.com (www.dropbox.com)|162.125.6.18|:443... connected.\n",
- "HTTP request sent, awaiting response... 301 Moved Permanently\n",
- "Location: /s/dl/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl [following]\n",
- "--2022-07-06 14:03:35-- https://www.dropbox.com/s/dl/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl\n",
- "Reusing existing connection to www.dropbox.com:443.\n",
- "HTTP request sent, awaiting response... 302 Found\n",
- "Location: https://uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com/cd/0/get/BomjGsUa6V-Fl0AxfbjFITON5qgxjVFWtqlEZP_XkKazHYdR86WgUNAz0N5_Y1HM52U8Q3MMNC4Nwii_ScnkYqAVUj88ezCAo79OrJ1yQtWDEFuj0dnLo9RW1VGFd04pZm9yx5YNdp1YJ5PM8cRK6TEbrsaCmJ77IFSu9BDouLzS6A/file?dl=1# [following]\n",
- "--2022-07-06 14:03:36-- https://uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com/cd/0/get/BomjGsUa6V-Fl0AxfbjFITON5qgxjVFWtqlEZP_XkKazHYdR86WgUNAz0N5_Y1HM52U8Q3MMNC4Nwii_ScnkYqAVUj88ezCAo79OrJ1yQtWDEFuj0dnLo9RW1VGFd04pZm9yx5YNdp1YJ5PM8cRK6TEbrsaCmJ77IFSu9BDouLzS6A/file?dl=1\n",
- "Resolving uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com (uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com)... 162.125.11.15, 2620:100:601c:15::a27d:60f\n",
- "Connecting to uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com (uc914baf6ffcf61a2dbe6da68796.dl.dropboxusercontent.com)|162.125.11.15|:443... connected.\n",
- "HTTP request sent, awaiting response... 200 OK\n",
- "Length: 292661258 (279M) [application/binary]\n",
- "Saving to: 'mini-imagenet-cache-validation.pkl?dl=1'\n",
- "\n",
- "mini-imagenet-cache 100%[===================>] 279.10M 86.4MB/s in 3.4s \n",
- "\n",
- "2022-07-06 14:03:40 (82.8 MB/s) - 'mini-imagenet-cache-validation.pkl?dl=1' saved [292661258/292661258]\n",
- "\n"
- ]
- }
- ],
- "source": [
- "## Validation File\n",
- "!wget https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "Re6WRCnIAwpF"
- },
- "outputs": [],
- "source": [
- "!cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'\n",
- "!cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "9OX0VzelBGqi"
- },
- "outputs": [],
- "source": [
- "!rm './mini-imagenet-cache-train.pkl?dl=1'\n",
- "!rm './mini-imagenet-cache-validation.pkl?dl=1'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "g0FHMMCF-hM7"
- },
- "outputs": [],
- "source": [
- "warnings.simplefilter(\"ignore\")\n",
- "\n",
- "from typing import Callable, Tuple, Union\n",
- "from dataclasses import dataclass\n",
- "import warnings\n",
- "\n",
- "import kornia.augmentation as Ka\n",
- "import kornia.geometry as Kg\n",
- "import learn2learn as l2l\n",
- "\n",
- "import torch\n",
- "import torchvision\n",
- "from torch import nn\n",
- "import torchvision.transforms as T\n",
- "\n",
- "from flash.core.data.io.input_transform import InputTransform\n",
- "import flash\n",
- "from flash.core.data.io.input import DataKeys\n",
- "from flash.core.data.transforms import ApplyToKeys, kornia_collate\n",
- "from flash.image import ImageClassificationData, ImageClassifier\n",
- "\n",
- "from PIL import Image\n",
- "import numpy as np"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "VlqRrJ7CA1v9"
- },
- "outputs": [],
- "source": [
- "# download MiniImagenet\n",
- "train_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"train\", download=False)\n",
- "val_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"validation\", download=False)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "x1S1mfXtuHNd"
- },
- "outputs": [],
- "source": [
- "@dataclass\n",
- "class ImageClassificationInputTransform(InputTransform):\n",
- "\n",
- " image_size: Tuple[int, int] = (196, 196)\n",
- " mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n",
- " std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n",
- "\n",
- " def per_sample_transform(self):\n",
- " return T.Compose([\n",
- " ApplyToKeys(\n",
- " DataKeys.INPUT,\n",
- " T.Compose([\n",
- " T.ToTensor(),\n",
- " Kg.Resize((196, 196)),\n",
- " # SPATIAL\n",
- " Ka.RandomHorizontalFlip(p=0.25),\n",
- " Ka.RandomRotation(degrees=90.0, p=0.25),\n",
- " Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),\n",
- " Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),\n",
- " \n",
- " # PIXEL-LEVEL\n",
- " Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness\n",
- " Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation\n",
- " Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast\n",
- " Ka.ColorJitter(hue=1 / 30, p=0.25), # hue\n",
- " Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),\n",
- " Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),\n",
- " ]),\n",
- " ),\n",
- " ApplyToKeys(DataKeys.TARGET, torch.as_tensor)]\n",
- " )\n",
- "\n",
- " def train_per_sample_transform(self):\n",
- " return T.Compose(\n",
- " [\n",
- " ApplyToKeys(\n",
- " \"input\",\n",
- " T.Compose(\n",
- " [\n",
- " T.ToTensor(),\n",
- " T.Resize(self.image_size),\n",
- " T.Normalize(self.mean, self.std),\n",
- " T.RandomHorizontalFlip(),\n",
- " T.ColorJitter(),\n",
- " T.RandomAutocontrast(),\n",
- " T.RandomPerspective(),\n",
- " ]\n",
- " ),\n",
- " ),\n",
- " ApplyToKeys(\"target\", torch.as_tensor),\n",
- " ]\n",
- " )\n",
- " def per_batch_transform_on_device(self):\n",
- " return ApplyToKeys(\n",
- " DataKeys.INPUT,\n",
- " Ka.RandomHorizontalFlip(p=0.25),\n",
- " )\n",
- " \n",
- "\n",
- " def collate(self):\n",
- " return kornia_collate\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "xfei_uix-veN"
- },
- "outputs": [],
- "source": [
- "# construct datamodule\n",
- "\n",
- "datamodule = ImageClassificationData.from_tensors(\n",
- " train_data=train_dataset.x,\n",
- " train_targets=torch.from_numpy(train_dataset.y.astype(int)),\n",
- " val_data=val_dataset.x,\n",
- " val_targets=torch.from_numpy(val_dataset.y.astype(int)),\n",
- " train_transform=ImageClassificationInputTransform,\n",
- " val_transform=ImageClassificationInputTransform,\n",
- " batch_size= 1\n",
- ")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/",
- "referenced_widgets": [
- "f41977658ad34289884749c22eb3aaef"
- ]
- },
- "id": "KZbvMCKn-zlJ",
- "outputId": "0a5a0e5e-007a-439e-85a6-1c199d1fefad"
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Downloading: \"https://download.pytorch.org/models/resnet18-f37072fd.pth\" to /home/studio-lab-user/.cache/torch/hub/checkpoints/resnet18-f37072fd.pth\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "f41977658ad34289884749c22eb3aaef",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- " 0%| | 0.00/44.7M [00:00, ?B/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Using 'prototypicalnetworks' provided by learnables/learn2learn (https://github.com/learnables/learn2learn).\n"
- ]
- }
- ],
- "source": [
- "model = ImageClassifier(\n",
- " backbone=\"resnet18\",\n",
- " training_strategy=\"prototypicalnetworks\",\n",
- " training_strategy_kwargs={\n",
- " \"epoch_length\": 10 * 16,\n",
- " \"meta_batch_size\": 1,\n",
- " \"num_tasks\": 200,\n",
- " \"test_num_tasks\": 2000,\n",
- " \"ways\": datamodule.num_classes,\n",
- " \"shots\": 1,\n",
- " \"test_ways\": 5,\n",
- " \"test_shots\": 1,\n",
- " \"test_queries\": 15,\n",
- " },\n",
- " optimizer=torch.optim.Adam,\n",
- " learning_rate=0.001,\n",
- ")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/",
- "height": 361,
- "referenced_widgets": [
- "52e6b0c277524a0295b54f2496230821",
- "89e6db5a79a2427fa117a903cfbb0541",
- "875577999f4f477dae59eff999452430",
- "c0f62b8649e54994b87ea7ba555ddd60",
- "5b264066728b41f4810b26a150a394f9",
- "0b38016cd9f34774bf090a0e15378712",
- "53266fce08a142cfbc3d949e5262b9e5",
- "caaa95ccb1474cca8ed53dd35bdab965",
- "7a78b1520e7b40da912b323133174937",
- "541af18c8f6d4983b612f902e413fb9e",
- "8ffbf193b3564109b1424699e5be8f4c",
- "123e733ade784f9ba622ef2f89fc6f65",
- "41ac3e8859a94c508e4bf2d3fd8b1e6e",
- "10959d4fa6624777af2a73b99c90e9a9",
- "bdb79707f0484da1b9810c0312511ab6",
- "f05c7723a0354bedad829e6b075bf96c",
- "f1e5d92e3b384525b294b11ce8cf1bda",
- "5c28420be300468ea8e62383b52e7fa2",
- "512440dc2426428a82f6a96042a9e1cc",
- "2fed134d52c146d989653fe21713fe5d",
- "184e40be255243fdbc9c0c41c4d1aba5",
- "533d044cabf745cfaee30e620e08c349",
- "",
- "076f86156e3146c99cd8697971eced86"
- ]
- },
- "id": "zyWdJHm6-010",
- "outputId": "9333cb28-8530-46f4-9f2c-972175a5627d"
- },
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "Using 16bit native Automatic Mixed Precision (AMP)\n",
- "GPU available: True, used: True\n",
- "TPU available: False, using: 0 TPU cores\n",
- "IPU available: False, using: 0 IPUs\n",
- "HPU available: False, using: 0 HPUs\n",
- "Missing logger folder: /home/studio-lab-user/notebooks/lightning_logs\n",
- "LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]\n",
- "\n",
- " | Name | Type | Params\n",
- "-----------------------------------------------------\n",
- "0 | train_metrics | ModuleDict | 0 \n",
- "1 | val_metrics | ModuleDict | 0 \n",
- "2 | test_metrics | ModuleDict | 0 \n",
- "3 | adapter | Learn2LearnAdapter | 11.2 M\n",
- "-----------------------------------------------------\n",
- "11.2 M Trainable params\n",
- "0 Non-trainable params\n",
- "11.2 M Total params\n",
- "22.419 Total estimated model params size (MB)\n"
- ]
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Sanity Checking: 0it [00:00, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "application/vnd.jupyter.widget-view+json": {
- "model_id": "076f86156e3146c99cd8697971eced86",
- "version_major": 2,
- "version_minor": 0
- },
- "text/plain": [
- "Training: 0it [00:00, ?it/s]"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- }
- ],
- "source": [
- "trainer = flash.Trainer(\n",
- " max_epochs=1,\n",
- " gpus=1,\n",
- " precision=16,\n",
- ")\n",
- "trainer.finetune(model, datamodule=datamodule, strategy=\"no_freeze\")"
- ]
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "view-in-github"
+ },
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "aEGsCOim1-ri"
+ },
+ "outputs": [],
+ "source": [
+ "# Copyright The PyTorch Lightning team.\n",
+ "#\n",
+ "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "# you may not use this file except in compliance with the License.\n",
+ "# You may obtain a copy of the License at\n",
+ "#\n",
+ "# http://www.apache.org/licenses/LICENSE-2.0\n",
+ "#\n",
+ "# Unless required by applicable law or agreed to in writing, software\n",
+ "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "# See the License for the specific language governing permissions and\n",
+ "# limitations under the License.\n",
+ "\n",
+ "# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "ApF-xlLV-2S0"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install learn2learn\n",
+ "!pip install kornia\n",
+ "!pip install lightning-flash\n",
+ "!pip install 'lightning-flash[image]'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
},
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "sOVzw75a14At"
- },
- "outputs": [],
- "source": [
- ""
- ]
- }
- ],
- "metadata": {
- "accelerator": "GPU",
+ "id": "OWmur8B1_9op",
+ "outputId": "638a2770-55ff-42ae-b97a-10d60462610f"
+ },
+ "outputs": [],
+ "source": [
+ "## Train file\n",
+ "!wget https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
"colab": {
- "collapsed_sections": [],
- "name": "image_classification_imagenette_mini",
- "provenance": [],
- "include_colab_link": true
+ "base_uri": "https://localhost:8080/"
},
- "gpuClass": "standard",
- "kernelspec": {
- "display_name": "default:Python",
- "language": "python",
- "name": "conda-env-default-py"
+ "id": "kmh_SrYQAKNG",
+ "outputId": "caef2c13-dd4e-4410-d4f0-7bb78370d642"
+ },
+ "outputs": [],
+ "source": [
+ "## Validation File\n",
+ "!wget https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "Re6WRCnIAwpF"
+ },
+ "outputs": [],
+ "source": [
+ "!cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'\n",
+ "!cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "9OX0VzelBGqi"
+ },
+ "outputs": [],
+ "source": [
+ "!rm './mini-imagenet-cache-train.pkl?dl=1'\n",
+ "!rm './mini-imagenet-cache-validation.pkl?dl=1'"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "g0FHMMCF-hM7"
+ },
+ "outputs": [],
+ "source": [
+ "warnings.simplefilter(\"ignore\")\n",
+ "\n",
+ "from typing import Callable, Tuple, Union\n",
+ "from dataclasses import dataclass\n",
+ "import warnings\n",
+ "\n",
+ "import kornia.augmentation as Ka\n",
+ "import kornia.geometry as Kg\n",
+ "import learn2learn as l2l\n",
+ "\n",
+ "import torch\n",
+ "import torchvision\n",
+ "from torch import nn\n",
+ "import torchvision.transforms as T\n",
+ "\n",
+ "from flash.core.data.io.input_transform import InputTransform\n",
+ "import flash\n",
+ "from flash.core.data.io.input import DataKeys\n",
+ "from flash.core.data.transforms import ApplyToKeys, kornia_collate\n",
+ "from flash.image import ImageClassificationData, ImageClassifier\n",
+ "\n",
+ "from PIL import Image\n",
+ "import numpy as np"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "VlqRrJ7CA1v9"
+ },
+ "outputs": [],
+ "source": [
+ "# download MiniImagenet\n",
+ "train_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"train\", download=False)\n",
+ "val_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"validation\", download=False)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "x1S1mfXtuHNd"
+ },
+ "outputs": [],
+ "source": [
+ "@dataclass\n",
+ "class ImageClassificationInputTransform(InputTransform):\n",
+ "\n",
+ " image_size: Tuple[int, int] = (196, 196)\n",
+ " mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n",
+ " std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n",
+ "\n",
+ " def per_sample_transform(self):\n",
+ " return T.Compose([\n",
+ " ApplyToKeys(\n",
+ " DataKeys.INPUT,\n",
+ " T.Compose([\n",
+ " T.ToTensor(),\n",
+ " Kg.Resize((196, 196)),\n",
+ " # SPATIAL\n",
+ " Ka.RandomHorizontalFlip(p=0.25),\n",
+ " Ka.RandomRotation(degrees=90.0, p=0.25),\n",
+ " Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),\n",
+ " Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),\n",
+ " \n",
+ " # PIXEL-LEVEL\n",
+ " Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness\n",
+ " Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation\n",
+ " Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast\n",
+ " Ka.ColorJitter(hue=1 / 30, p=0.25), # hue\n",
+ " Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),\n",
+ " Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),\n",
+ " ]),\n",
+ " ),\n",
+ " ApplyToKeys(DataKeys.TARGET, torch.as_tensor)]\n",
+ " )\n",
+ "\n",
+ " def train_per_sample_transform(self):\n",
+ " return T.Compose(\n",
+ " [\n",
+ " ApplyToKeys(\n",
+ " \"input\",\n",
+ " T.Compose(\n",
+ " [\n",
+ " T.ToTensor(),\n",
+ " T.Resize(self.image_size),\n",
+ " T.Normalize(self.mean, self.std),\n",
+ " T.RandomHorizontalFlip(),\n",
+ " T.ColorJitter(),\n",
+ " T.RandomAutocontrast(),\n",
+ " T.RandomPerspective(),\n",
+ " ]\n",
+ " ),\n",
+ " ),\n",
+ " ApplyToKeys(\"target\", torch.as_tensor),\n",
+ " ]\n",
+ " )\n",
+ " def per_batch_transform_on_device(self):\n",
+ " return ApplyToKeys(\n",
+ " DataKeys.INPUT,\n",
+ " Ka.RandomHorizontalFlip(p=0.25),\n",
+ " )\n",
+ " \n",
+ "\n",
+ " def collate(self):\n",
+ " return kornia_collate\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "xfei_uix-veN"
+ },
+ "outputs": [],
+ "source": [
+ "# construct datamodule\n",
+ "\n",
+ "datamodule = ImageClassificationData.from_tensors(\n",
+ " train_data=train_dataset.x,\n",
+ " train_targets=torch.from_numpy(train_dataset.y.astype(int)),\n",
+ " val_data=val_dataset.x,\n",
+ " val_targets=torch.from_numpy(val_dataset.y.astype(int)),\n",
+ " train_transform=ImageClassificationInputTransform,\n",
+ " val_transform=ImageClassificationInputTransform,\n",
+ " batch_size= 1\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "referenced_widgets": [
+ "f41977658ad34289884749c22eb3aaef"
+ ]
},
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.7"
+ "id": "KZbvMCKn-zlJ",
+ "outputId": "0a5a0e5e-007a-439e-85a6-1c199d1fefad"
+ },
+ "outputs": [],
+ "source": [
+ "model = ImageClassifier(\n",
+ " backbone=\"resnet18\",\n",
+ " training_strategy=\"prototypicalnetworks\",\n",
+ " training_strategy_kwargs={\n",
+ " \"epoch_length\": 10 * 16,\n",
+ " \"meta_batch_size\": 1,\n",
+ " \"num_tasks\": 200,\n",
+ " \"test_num_tasks\": 2000,\n",
+ " \"ways\": datamodule.num_classes,\n",
+ " \"shots\": 1,\n",
+ " \"test_ways\": 5,\n",
+ " \"test_shots\": 1,\n",
+ " \"test_queries\": 15,\n",
+ " },\n",
+ " optimizer=torch.optim.Adam,\n",
+ " learning_rate=0.001,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 361,
+ "referenced_widgets": [
+ "52e6b0c277524a0295b54f2496230821",
+ "89e6db5a79a2427fa117a903cfbb0541",
+ "875577999f4f477dae59eff999452430",
+ "c0f62b8649e54994b87ea7ba555ddd60",
+ "5b264066728b41f4810b26a150a394f9",
+ "0b38016cd9f34774bf090a0e15378712",
+ "53266fce08a142cfbc3d949e5262b9e5",
+ "caaa95ccb1474cca8ed53dd35bdab965",
+ "7a78b1520e7b40da912b323133174937",
+ "541af18c8f6d4983b612f902e413fb9e",
+ "8ffbf193b3564109b1424699e5be8f4c",
+ "123e733ade784f9ba622ef2f89fc6f65",
+ "41ac3e8859a94c508e4bf2d3fd8b1e6e",
+ "10959d4fa6624777af2a73b99c90e9a9",
+ "bdb79707f0484da1b9810c0312511ab6",
+ "f05c7723a0354bedad829e6b075bf96c",
+ "f1e5d92e3b384525b294b11ce8cf1bda",
+ "5c28420be300468ea8e62383b52e7fa2",
+ "512440dc2426428a82f6a96042a9e1cc",
+ "2fed134d52c146d989653fe21713fe5d",
+ "184e40be255243fdbc9c0c41c4d1aba5",
+ "533d044cabf745cfaee30e620e08c349",
+ "",
+ "076f86156e3146c99cd8697971eced86"
+ ]
},
- "widgets": {
- "application/vnd.jupyter.widget-state+json": {
- "0b38016cd9f34774bf090a0e15378712": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": null,
- "flex": null,
- "flex_flow": null,
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": null
- }
- },
- "10959d4fa6624777af2a73b99c90e9a9": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "FloatProgressModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "FloatProgressModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "ProgressView",
- "bar_style": "",
- "description": "",
- "description_tooltip": null,
- "layout": "IPY_MODEL_512440dc2426428a82f6a96042a9e1cc",
- "max": 320,
- "min": 0,
- "orientation": "horizontal",
- "style": "IPY_MODEL_2fed134d52c146d989653fe21713fe5d",
- "value": 0
- }
- },
- "123e733ade784f9ba622ef2f89fc6f65": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "HBoxModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "HBoxModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "HBoxView",
- "box_style": "",
- "children": [
- "IPY_MODEL_41ac3e8859a94c508e4bf2d3fd8b1e6e",
- "IPY_MODEL_10959d4fa6624777af2a73b99c90e9a9",
- "IPY_MODEL_bdb79707f0484da1b9810c0312511ab6"
- ],
- "layout": "IPY_MODEL_f05c7723a0354bedad829e6b075bf96c"
- }
- },
- "184e40be255243fdbc9c0c41c4d1aba5": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": null,
- "flex": null,
- "flex_flow": null,
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": null
- }
- },
- "2fed134d52c146d989653fe21713fe5d": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "ProgressStyleModel",
- "state": {
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "ProgressStyleModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "StyleView",
- "bar_color": null,
- "description_width": ""
- }
- },
- "41ac3e8859a94c508e4bf2d3fd8b1e6e": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "HTMLModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "HTMLModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "HTMLView",
- "description": "",
- "description_tooltip": null,
- "layout": "IPY_MODEL_f1e5d92e3b384525b294b11ce8cf1bda",
- "placeholder": "",
- "style": "IPY_MODEL_5c28420be300468ea8e62383b52e7fa2",
- "value": "Epoch 0: 0%"
- }
- },
- "512440dc2426428a82f6a96042a9e1cc": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": null,
- "flex": "2",
- "flex_flow": null,
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": null
- }
- },
- "52e6b0c277524a0295b54f2496230821": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "HBoxModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "HBoxModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "HBoxView",
- "box_style": "",
- "children": [
- "IPY_MODEL_89e6db5a79a2427fa117a903cfbb0541",
- "IPY_MODEL_875577999f4f477dae59eff999452430",
- "IPY_MODEL_c0f62b8649e54994b87ea7ba555ddd60"
- ],
- "layout": "IPY_MODEL_5b264066728b41f4810b26a150a394f9"
- }
- },
- "53266fce08a142cfbc3d949e5262b9e5": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "DescriptionStyleModel",
- "state": {
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "DescriptionStyleModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "StyleView",
- "description_width": ""
- }
- },
- "533d044cabf745cfaee30e620e08c349": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "DescriptionStyleModel",
- "state": {
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "DescriptionStyleModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "StyleView",
- "description_width": ""
- }
- },
- "541af18c8f6d4983b612f902e413fb9e": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": null,
- "flex": null,
- "flex_flow": null,
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": null
- }
- },
- "5b264066728b41f4810b26a150a394f9": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": "inline-flex",
- "flex": null,
- "flex_flow": "row wrap",
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": "100%"
- }
- },
- "5c28420be300468ea8e62383b52e7fa2": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "DescriptionStyleModel",
- "state": {
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "DescriptionStyleModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "StyleView",
- "description_width": ""
- }
- },
- "7a78b1520e7b40da912b323133174937": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "ProgressStyleModel",
- "state": {
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "ProgressStyleModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "StyleView",
- "bar_color": null,
- "description_width": ""
- }
- },
- "875577999f4f477dae59eff999452430": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "FloatProgressModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "FloatProgressModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "ProgressView",
- "bar_style": "",
- "description": "",
- "description_tooltip": null,
- "layout": "IPY_MODEL_caaa95ccb1474cca8ed53dd35bdab965",
- "max": 2,
- "min": 0,
- "orientation": "horizontal",
- "style": "IPY_MODEL_7a78b1520e7b40da912b323133174937",
- "value": 2
- }
- },
- "89e6db5a79a2427fa117a903cfbb0541": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "HTMLModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "HTMLModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "HTMLView",
- "description": "",
- "description_tooltip": null,
- "layout": "IPY_MODEL_0b38016cd9f34774bf090a0e15378712",
- "placeholder": "",
- "style": "IPY_MODEL_53266fce08a142cfbc3d949e5262b9e5",
- "value": "Sanity Checking DataLoader 0: 100%"
- }
- },
- "8ffbf193b3564109b1424699e5be8f4c": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "DescriptionStyleModel",
- "state": {
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "DescriptionStyleModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "StyleView",
- "description_width": ""
- }
- },
- "bdb79707f0484da1b9810c0312511ab6": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "HTMLModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "HTMLModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "HTMLView",
- "description": "",
- "description_tooltip": null,
- "layout": "IPY_MODEL_184e40be255243fdbc9c0c41c4d1aba5",
- "placeholder": "",
- "style": "IPY_MODEL_533d044cabf745cfaee30e620e08c349",
- "value": " 0/320 [00:00<?, ?it/s]"
- }
- },
- "c0f62b8649e54994b87ea7ba555ddd60": {
- "model_module": "@jupyter-widgets/controls",
- "model_module_version": "1.5.0",
- "model_name": "HTMLModel",
- "state": {
- "_dom_classes": [],
- "_model_module": "@jupyter-widgets/controls",
- "_model_module_version": "1.5.0",
- "_model_name": "HTMLModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/controls",
- "_view_module_version": "1.5.0",
- "_view_name": "HTMLView",
- "description": "",
- "description_tooltip": null,
- "layout": "IPY_MODEL_541af18c8f6d4983b612f902e413fb9e",
- "placeholder": "",
- "style": "IPY_MODEL_8ffbf193b3564109b1424699e5be8f4c",
- "value": " 2/2 [00:21<00:00, 10.68s/it]"
- }
- },
- "caaa95ccb1474cca8ed53dd35bdab965": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": null,
- "flex": "2",
- "flex_flow": null,
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": null
- }
- },
- "f05c7723a0354bedad829e6b075bf96c": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": "inline-flex",
- "flex": null,
- "flex_flow": "row wrap",
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": "100%"
- }
- },
- "f1e5d92e3b384525b294b11ce8cf1bda": {
- "model_module": "@jupyter-widgets/base",
- "model_module_version": "1.2.0",
- "model_name": "LayoutModel",
- "state": {
- "_model_module": "@jupyter-widgets/base",
- "_model_module_version": "1.2.0",
- "_model_name": "LayoutModel",
- "_view_count": null,
- "_view_module": "@jupyter-widgets/base",
- "_view_module_version": "1.2.0",
- "_view_name": "LayoutView",
- "align_content": null,
- "align_items": null,
- "align_self": null,
- "border": null,
- "bottom": null,
- "display": null,
- "flex": null,
- "flex_flow": null,
- "grid_area": null,
- "grid_auto_columns": null,
- "grid_auto_flow": null,
- "grid_auto_rows": null,
- "grid_column": null,
- "grid_gap": null,
- "grid_row": null,
- "grid_template_areas": null,
- "grid_template_columns": null,
- "grid_template_rows": null,
- "height": null,
- "justify_content": null,
- "justify_items": null,
- "left": null,
- "margin": null,
- "max_height": null,
- "max_width": null,
- "min_height": null,
- "min_width": null,
- "object_fit": null,
- "object_position": null,
- "order": null,
- "overflow": null,
- "overflow_x": null,
- "overflow_y": null,
- "padding": null,
- "right": null,
- "top": null,
- "visibility": null,
- "width": null
- }
- }
- }
- }
+ "id": "zyWdJHm6-010",
+ "outputId": "9333cb28-8530-46f4-9f2c-972175a5627d"
+ },
+ "outputs": [],
+ "source": [
+ "trainer = flash.Trainer(\n",
+ " max_epochs=1,\n",
+ " gpus=1,\n",
+ " precision=16,\n",
+ ")\n",
+ "trainer.finetune(model, datamodule=datamodule, strategy=\"no_freeze\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "sOVzw75a14At"
+ },
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "collapsed_sections": [],
+ "include_colab_link": true,
+ "name": "image_classification_imagenette_mini",
+ "provenance": []
+ },
+ "gpuClass": "standard",
+ "kernelspec": {
+ "display_name": "default:Python",
+ "language": "python",
+ "name": "conda-env-default-py"
},
- "nbformat": 4,
- "nbformat_minor": 0
-}
\ No newline at end of file
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.7"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
From 3fa07599134eec3b34b3a8ca36ce4583ac9596f5 Mon Sep 17 00:00:00 2001
From: uakarsh <55104596+uakarsh@users.noreply.github.com>
Date: Thu, 28 Jul 2022 12:16:22 +0530
Subject: [PATCH 3/9] Removed the notebook
---
...image_classification_imagenette_mini.ipynb | 366 ------------------
1 file changed, 366 deletions(-)
delete mode 100644 flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
deleted file mode 100644
index f3ba7ba9fd..0000000000
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.ipynb
+++ /dev/null
@@ -1,366 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "view-in-github"
- },
- "source": [
- ""
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "aEGsCOim1-ri"
- },
- "outputs": [],
- "source": [
- "# Copyright The PyTorch Lightning team.\n",
- "#\n",
- "# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
- "# you may not use this file except in compliance with the License.\n",
- "# You may obtain a copy of the License at\n",
- "#\n",
- "# http://www.apache.org/licenses/LICENSE-2.0\n",
- "#\n",
- "# Unless required by applicable law or agreed to in writing, software\n",
- "# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
- "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
- "# See the License for the specific language governing permissions and\n",
- "# limitations under the License.\n",
- "\n",
- "# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "ApF-xlLV-2S0"
- },
- "outputs": [],
- "source": [
- "!pip install learn2learn\n",
- "!pip install kornia\n",
- "!pip install lightning-flash\n",
- "!pip install 'lightning-flash[image]'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "OWmur8B1_9op",
- "outputId": "638a2770-55ff-42ae-b97a-10d60462610f"
- },
- "outputs": [],
- "source": [
- "## Train file\n",
- "!wget https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
- },
- "id": "kmh_SrYQAKNG",
- "outputId": "caef2c13-dd4e-4410-d4f0-7bb78370d642"
- },
- "outputs": [],
- "source": [
- "## Validation File\n",
- "!wget https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "Re6WRCnIAwpF"
- },
- "outputs": [],
- "source": [
- "!cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'\n",
- "!cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "9OX0VzelBGqi"
- },
- "outputs": [],
- "source": [
- "!rm './mini-imagenet-cache-train.pkl?dl=1'\n",
- "!rm './mini-imagenet-cache-validation.pkl?dl=1'"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "g0FHMMCF-hM7"
- },
- "outputs": [],
- "source": [
- "warnings.simplefilter(\"ignore\")\n",
- "\n",
- "from typing import Callable, Tuple, Union\n",
- "from dataclasses import dataclass\n",
- "import warnings\n",
- "\n",
- "import kornia.augmentation as Ka\n",
- "import kornia.geometry as Kg\n",
- "import learn2learn as l2l\n",
- "\n",
- "import torch\n",
- "import torchvision\n",
- "from torch import nn\n",
- "import torchvision.transforms as T\n",
- "\n",
- "from flash.core.data.io.input_transform import InputTransform\n",
- "import flash\n",
- "from flash.core.data.io.input import DataKeys\n",
- "from flash.core.data.transforms import ApplyToKeys, kornia_collate\n",
- "from flash.image import ImageClassificationData, ImageClassifier\n",
- "\n",
- "from PIL import Image\n",
- "import numpy as np"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "VlqRrJ7CA1v9"
- },
- "outputs": [],
- "source": [
- "# download MiniImagenet\n",
- "train_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"train\", download=False)\n",
- "val_dataset = l2l.vision.datasets.MiniImagenet(root=\"./\", mode=\"validation\", download=False)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "x1S1mfXtuHNd"
- },
- "outputs": [],
- "source": [
- "@dataclass\n",
- "class ImageClassificationInputTransform(InputTransform):\n",
- "\n",
- " image_size: Tuple[int, int] = (196, 196)\n",
- " mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)\n",
- " std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)\n",
- "\n",
- " def per_sample_transform(self):\n",
- " return T.Compose([\n",
- " ApplyToKeys(\n",
- " DataKeys.INPUT,\n",
- " T.Compose([\n",
- " T.ToTensor(),\n",
- " Kg.Resize((196, 196)),\n",
- " # SPATIAL\n",
- " Ka.RandomHorizontalFlip(p=0.25),\n",
- " Ka.RandomRotation(degrees=90.0, p=0.25),\n",
- " Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),\n",
- " Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),\n",
- " \n",
- " # PIXEL-LEVEL\n",
- " Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness\n",
- " Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation\n",
- " Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast\n",
- " Ka.ColorJitter(hue=1 / 30, p=0.25), # hue\n",
- " Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),\n",
- " Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),\n",
- " ]),\n",
- " ),\n",
- " ApplyToKeys(DataKeys.TARGET, torch.as_tensor)]\n",
- " )\n",
- "\n",
- " def train_per_sample_transform(self):\n",
- " return T.Compose(\n",
- " [\n",
- " ApplyToKeys(\n",
- " \"input\",\n",
- " T.Compose(\n",
- " [\n",
- " T.ToTensor(),\n",
- " T.Resize(self.image_size),\n",
- " T.Normalize(self.mean, self.std),\n",
- " T.RandomHorizontalFlip(),\n",
- " T.ColorJitter(),\n",
- " T.RandomAutocontrast(),\n",
- " T.RandomPerspective(),\n",
- " ]\n",
- " ),\n",
- " ),\n",
- " ApplyToKeys(\"target\", torch.as_tensor),\n",
- " ]\n",
- " )\n",
- " def per_batch_transform_on_device(self):\n",
- " return ApplyToKeys(\n",
- " DataKeys.INPUT,\n",
- " Ka.RandomHorizontalFlip(p=0.25),\n",
- " )\n",
- " \n",
- "\n",
- " def collate(self):\n",
- " return kornia_collate\n"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "xfei_uix-veN"
- },
- "outputs": [],
- "source": [
- "# construct datamodule\n",
- "\n",
- "datamodule = ImageClassificationData.from_tensors(\n",
- " train_data=train_dataset.x,\n",
- " train_targets=torch.from_numpy(train_dataset.y.astype(int)),\n",
- " val_data=val_dataset.x,\n",
- " val_targets=torch.from_numpy(val_dataset.y.astype(int)),\n",
- " train_transform=ImageClassificationInputTransform,\n",
- " val_transform=ImageClassificationInputTransform,\n",
- " batch_size= 1\n",
- ")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/",
- "referenced_widgets": [
- "f41977658ad34289884749c22eb3aaef"
- ]
- },
- "id": "KZbvMCKn-zlJ",
- "outputId": "0a5a0e5e-007a-439e-85a6-1c199d1fefad"
- },
- "outputs": [],
- "source": [
- "model = ImageClassifier(\n",
- " backbone=\"resnet18\",\n",
- " training_strategy=\"prototypicalnetworks\",\n",
- " training_strategy_kwargs={\n",
- " \"epoch_length\": 10 * 16,\n",
- " \"meta_batch_size\": 1,\n",
- " \"num_tasks\": 200,\n",
- " \"test_num_tasks\": 2000,\n",
- " \"ways\": datamodule.num_classes,\n",
- " \"shots\": 1,\n",
- " \"test_ways\": 5,\n",
- " \"test_shots\": 1,\n",
- " \"test_queries\": 15,\n",
- " },\n",
- " optimizer=torch.optim.Adam,\n",
- " learning_rate=0.001,\n",
- ")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/",
- "height": 361,
- "referenced_widgets": [
- "52e6b0c277524a0295b54f2496230821",
- "89e6db5a79a2427fa117a903cfbb0541",
- "875577999f4f477dae59eff999452430",
- "c0f62b8649e54994b87ea7ba555ddd60",
- "5b264066728b41f4810b26a150a394f9",
- "0b38016cd9f34774bf090a0e15378712",
- "53266fce08a142cfbc3d949e5262b9e5",
- "caaa95ccb1474cca8ed53dd35bdab965",
- "7a78b1520e7b40da912b323133174937",
- "541af18c8f6d4983b612f902e413fb9e",
- "8ffbf193b3564109b1424699e5be8f4c",
- "123e733ade784f9ba622ef2f89fc6f65",
- "41ac3e8859a94c508e4bf2d3fd8b1e6e",
- "10959d4fa6624777af2a73b99c90e9a9",
- "bdb79707f0484da1b9810c0312511ab6",
- "f05c7723a0354bedad829e6b075bf96c",
- "f1e5d92e3b384525b294b11ce8cf1bda",
- "5c28420be300468ea8e62383b52e7fa2",
- "512440dc2426428a82f6a96042a9e1cc",
- "2fed134d52c146d989653fe21713fe5d",
- "184e40be255243fdbc9c0c41c4d1aba5",
- "533d044cabf745cfaee30e620e08c349",
- "",
- "076f86156e3146c99cd8697971eced86"
- ]
- },
- "id": "zyWdJHm6-010",
- "outputId": "9333cb28-8530-46f4-9f2c-972175a5627d"
- },
- "outputs": [],
- "source": [
- "trainer = flash.Trainer(\n",
- " max_epochs=1,\n",
- " gpus=1,\n",
- " precision=16,\n",
- ")\n",
- "trainer.finetune(model, datamodule=datamodule, strategy=\"no_freeze\")"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {
- "id": "sOVzw75a14At"
- },
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "accelerator": "GPU",
- "colab": {
- "collapsed_sections": [],
- "include_colab_link": true,
- "name": "image_classification_imagenette_mini",
- "provenance": []
- },
- "gpuClass": "standard",
- "kernelspec": {
- "display_name": "default:Python",
- "language": "python",
- "name": "conda-env-default-py"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.9.7"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 0
-}
From cd8c4a60993afd283fd6135afdb2a274dab816df Mon Sep 17 00:00:00 2001
From: uakarsh <55104596+uakarsh@users.noreply.github.com>
Date: Thu, 28 Jul 2022 12:25:44 +0530
Subject: [PATCH 4/9] Updated the script and removed the notebook
---
.../image_classification_imagenette_mini.py | 106 ++++++++++++++----
1 file changed, 87 insertions(+), 19 deletions(-)
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
index d1f5dea28a..ac3b458c23 100644
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
@@ -14,38 +14,78 @@
# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154
+'''
+Requirements:
+
+pip install learn2learn
+pip install kornia
+pip install lightning-flash
+pip install 'lightning-flash[image]'
+'''
+
+'''
+## Train file
+https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1
+
+## Validation File
+https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1
+
+Followed by renaming the pickle files
+cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'
+cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'
+
+'''
+
+import warnings
+
+from typing import Callable, Tuple, Union
+from dataclasses import dataclass
import warnings
import kornia.augmentation as Ka
import kornia.geometry as Kg
import learn2learn as l2l
+
import torch
import torchvision
from torch import nn
+import torchvision.transforms as T
+from flash.core.data.io.input_transform import InputTransform
import flash
from flash.core.data.io.input import DataKeys
from flash.core.data.transforms import ApplyToKeys, kornia_collate
from flash.image import ImageClassificationData, ImageClassifier
+from PIL import Image
+import numpy as np
+
warnings.simplefilter("ignore")
# download MiniImagenet
-train_dataset = l2l.vision.datasets.MiniImagenet(root="data", mode="train", download=True)
-val_dataset = l2l.vision.datasets.MiniImagenet(root="data", mode="validation", download=True)
+train_dataset = l2l.vision.datasets.MiniImagenet(root="./", mode="train", download=False)
+val_dataset = l2l.vision.datasets.MiniImagenet(root="./", mode="validation", download=False)
+
+@dataclass
+class ImageClassificationInputTransform(InputTransform):
-transform = {
- "per_sample_transform": nn.Sequential(
+ image_size: Tuple[int, int] = (196, 196)
+ mean: Union[float, Tuple[float, float, float]] = (0.485, 0.456, 0.406)
+ std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)
+
+ def per_sample_transform(self):
+ return T.Compose([
ApplyToKeys(
DataKeys.INPUT,
- nn.Sequential(
- torchvision.transforms.ToTensor(),
+ T.Compose([
+ T.ToTensor(),
Kg.Resize((196, 196)),
# SPATIAL
Ka.RandomHorizontalFlip(p=0.25),
Ka.RandomRotation(degrees=90.0, p=0.25),
Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),
Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),
+
# PIXEL-LEVEL
Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness
Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation
@@ -53,24 +93,52 @@
Ka.ColorJitter(hue=1 / 30, p=0.25), # hue
Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),
Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),
- ),
+ ]),
),
- ApplyToKeys(DataKeys.TARGET, torch.as_tensor),
- ),
- "collate": kornia_collate,
- "per_batch_transform_on_device": ApplyToKeys(
+ ApplyToKeys(DataKeys.TARGET, torch.as_tensor)]
+ )
+
+ def train_per_sample_transform(self):
+ return T.Compose(
+ [
+ ApplyToKeys(
+ "input",
+ T.Compose(
+ [
+ T.ToTensor(),
+ T.Resize(self.image_size),
+ T.Normalize(self.mean, self.std),
+ T.RandomHorizontalFlip(),
+ T.ColorJitter(),
+ T.RandomAutocontrast(),
+ T.RandomPerspective(),
+ ]
+ ),
+ ),
+ ApplyToKeys("target", torch.as_tensor),
+ ]
+ )
+ def per_batch_transform_on_device(self):
+ return ApplyToKeys(
DataKeys.INPUT,
Ka.RandomHorizontalFlip(p=0.25),
- ),
-}
+ )
+
+
+ def collate(self):
+ return kornia_collate
+
# construct datamodule
+
datamodule = ImageClassificationData.from_tensors(
train_data=train_dataset.x,
train_targets=torch.from_numpy(train_dataset.y.astype(int)),
val_data=val_dataset.x,
val_targets=torch.from_numpy(val_dataset.y.astype(int)),
- transform=transform,
+ train_transform=ImageClassificationInputTransform,
+ val_transform=ImageClassificationInputTransform,
+ batch_size= 1
)
model = ImageClassifier(
@@ -78,7 +146,7 @@
training_strategy="prototypicalnetworks",
training_strategy_kwargs={
"epoch_length": 10 * 16,
- "meta_batch_size": 4,
+ "meta_batch_size": 1,
"num_tasks": 200,
"test_num_tasks": 2000,
"ways": datamodule.num_classes,
@@ -92,9 +160,9 @@
)
trainer = flash.Trainer(
- max_epochs=200,
- gpus=2,
- accelerator="ddp_shared",
+ max_epochs=1,
+ gpus=1,
precision=16,
)
-trainer.finetune(model, datamodule=datamodule, strategy="no_freeze")
+
+trainer.finetune(model, datamodule=datamodule, strategy="no_freeze")
\ No newline at end of file
From 6a1016814e66ca8cc4495bf9c44b6d522d80c35d Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Thu, 28 Jul 2022 06:56:40 +0000
Subject: [PATCH 5/9] [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---
.../image_classification_imagenette_mini.py | 88 +++++++++----------
1 file changed, 44 insertions(+), 44 deletions(-)
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
index ac3b458c23..fb253c8837 100644
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
@@ -14,16 +14,16 @@
# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154
-'''
+"""
Requirements:
pip install learn2learn
pip install kornia
pip install lightning-flash
pip install 'lightning-flash[image]'
-'''
+"""
-'''
+"""
## Train file
https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1
@@ -34,38 +34,35 @@
cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'
cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'
-'''
+"""
import warnings
-
-from typing import Callable, Tuple, Union
from dataclasses import dataclass
-import warnings
+from typing import Callable, Tuple, Union
import kornia.augmentation as Ka
import kornia.geometry as Kg
import learn2learn as l2l
-
+import numpy as np
import torch
import torchvision
-from torch import nn
import torchvision.transforms as T
+from PIL import Image
+from torch import nn
-from flash.core.data.io.input_transform import InputTransform
import flash
from flash.core.data.io.input import DataKeys
+from flash.core.data.io.input_transform import InputTransform
from flash.core.data.transforms import ApplyToKeys, kornia_collate
from flash.image import ImageClassificationData, ImageClassifier
-from PIL import Image
-import numpy as np
-
warnings.simplefilter("ignore")
# download MiniImagenet
train_dataset = l2l.vision.datasets.MiniImagenet(root="./", mode="train", download=False)
val_dataset = l2l.vision.datasets.MiniImagenet(root="./", mode="validation", download=False)
+
@dataclass
class ImageClassificationInputTransform(InputTransform):
@@ -74,29 +71,32 @@ class ImageClassificationInputTransform(InputTransform):
std: Union[float, Tuple[float, float, float]] = (0.229, 0.224, 0.225)
def per_sample_transform(self):
- return T.Compose([
- ApplyToKeys(
- DataKeys.INPUT,
- T.Compose([
- T.ToTensor(),
- Kg.Resize((196, 196)),
- # SPATIAL
- Ka.RandomHorizontalFlip(p=0.25),
- Ka.RandomRotation(degrees=90.0, p=0.25),
- Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),
- Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),
-
- # PIXEL-LEVEL
- Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness
- Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation
- Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast
- Ka.ColorJitter(hue=1 / 30, p=0.25), # hue
- Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),
- Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),
- ]),
- ),
- ApplyToKeys(DataKeys.TARGET, torch.as_tensor)]
- )
+ return T.Compose(
+ [
+ ApplyToKeys(
+ DataKeys.INPUT,
+ T.Compose(
+ [
+ T.ToTensor(),
+ Kg.Resize((196, 196)),
+ # SPATIAL
+ Ka.RandomHorizontalFlip(p=0.25),
+ Ka.RandomRotation(degrees=90.0, p=0.25),
+ Ka.RandomAffine(degrees=1 * 5.0, shear=1 / 5, translate=1 / 20, p=0.25),
+ Ka.RandomPerspective(distortion_scale=1 / 25, p=0.25),
+ # PIXEL-LEVEL
+ Ka.ColorJitter(brightness=1 / 30, p=0.25), # brightness
+ Ka.ColorJitter(saturation=1 / 30, p=0.25), # saturation
+ Ka.ColorJitter(contrast=1 / 30, p=0.25), # contrast
+ Ka.ColorJitter(hue=1 / 30, p=0.25), # hue
+ Ka.RandomMotionBlur(kernel_size=2 * (4 // 3) + 1, angle=1, direction=1.0, p=0.25),
+ Ka.RandomErasing(scale=(1 / 100, 1 / 50), ratio=(1 / 20, 1), p=0.25),
+ ]
+ ),
+ ),
+ ApplyToKeys(DataKeys.TARGET, torch.as_tensor),
+ ]
+ )
def train_per_sample_transform(self):
return T.Compose(
@@ -118,15 +118,15 @@ def train_per_sample_transform(self):
ApplyToKeys("target", torch.as_tensor),
]
)
+
def per_batch_transform_on_device(self):
- return ApplyToKeys(
- DataKeys.INPUT,
- Ka.RandomHorizontalFlip(p=0.25),
- )
-
+ return ApplyToKeys(
+ DataKeys.INPUT,
+ Ka.RandomHorizontalFlip(p=0.25),
+ )
def collate(self):
- return kornia_collate
+ return kornia_collate
# construct datamodule
@@ -138,7 +138,7 @@ def collate(self):
val_targets=torch.from_numpy(val_dataset.y.astype(int)),
train_transform=ImageClassificationInputTransform,
val_transform=ImageClassificationInputTransform,
- batch_size= 1
+ batch_size=1,
)
model = ImageClassifier(
@@ -165,4 +165,4 @@ def collate(self):
precision=16,
)
-trainer.finetune(model, datamodule=datamodule, strategy="no_freeze")
\ No newline at end of file
+trainer.finetune(model, datamodule=datamodule, strategy="no_freeze")
From a2a2b0fab5bb7437830329d848684b24dd4e5dc4 Mon Sep 17 00:00:00 2001
From: Kushashwa Ravi Shrimali
Date: Thu, 28 Jul 2022 14:44:58 +0530
Subject: [PATCH 6/9] Apply suggestions from code review
---
.../image_classification_imagenette_mini.py | 11 +----------
1 file changed, 1 insertion(+), 10 deletions(-)
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
index fb253c8837..bf98f72e30 100644
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
@@ -14,15 +14,6 @@
# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154
-"""
-Requirements:
-
-pip install learn2learn
-pip install kornia
-pip install lightning-flash
-pip install 'lightning-flash[image]'
-"""
-
"""
## Train file
https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1
@@ -102,7 +93,7 @@ def train_per_sample_transform(self):
return T.Compose(
[
ApplyToKeys(
- "input",
+ DataKeys.INPUT,
T.Compose(
[
T.ToTensor(),
From 35683cdc758db8fe1ddeb88790cc98b0651eb4f9 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
<66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Thu, 28 Jul 2022 09:17:05 +0000
Subject: [PATCH 7/9] [pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
---
.../learn2learn/image_classification_imagenette_mini.py | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
index bf98f72e30..cb95ab2984 100644
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
@@ -14,9 +14,7 @@
# adapted from https://github.com/learnables/learn2learn/blob/master/examples/vision/protonet_miniimagenet.py#L154
-"""
-## Train file
-https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1
+"""## Train file https://www.dropbox.com/s/9g8c6w345s2ek03/mini-imagenet-cache-train.pkl?dl=1
## Validation File
https://www.dropbox.com/s/ip1b7se3gij3r1b/mini-imagenet-cache-validation.pkl?dl=1
@@ -24,7 +22,6 @@
Followed by renaming the pickle files
cp './mini-imagenet-cache-train.pkl?dl=1' './mini-imagenet-cache-train.pkl'
cp './mini-imagenet-cache-validation.pkl?dl=1' './mini-imagenet-cache-validation.pkl'
-
"""
import warnings
From e8f230da7a6655326c261abb4b247acb98874f0b Mon Sep 17 00:00:00 2001
From: Kushashwa Ravi Shrimali
Date: Fri, 29 Jul 2022 12:04:54 +0530
Subject: [PATCH 8/9] Update
flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
---
.../learn2learn/image_classification_imagenette_mini.py | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
index cb95ab2984..8af2ca255d 100644
--- a/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
+++ b/flash_examples/integrations/learn2learn/image_classification_imagenette_mini.py
@@ -26,17 +26,13 @@
import warnings
from dataclasses import dataclass
-from typing import Callable, Tuple, Union
+from typing import Tuple, Union
import kornia.augmentation as Ka
import kornia.geometry as Kg
import learn2learn as l2l
-import numpy as np
import torch
-import torchvision
import torchvision.transforms as T
-from PIL import Image
-from torch import nn
import flash
from flash.core.data.io.input import DataKeys
From 58971835f0c82110f3b6a9995c2bb8aeac560496 Mon Sep 17 00:00:00 2001
From: Kushashwa Ravi Shrimali
Date: Fri, 26 Aug 2022 12:59:30 +0530
Subject: [PATCH 9/9] Add CHANGELOG entry
---
CHANGELOG.md | 2 ++
1 file changed, 2 insertions(+)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d7a9a00a0f..d0235a2668 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -46,6 +46,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
### Fixed
+- Fixed the script of integrating `lightning-flash` with `learn2learn` ([#1376](https://github.com/Lightning-AI/lightning-flash/pull/1383))
+
- Fixed JIT tracing tests where the model class was not attached to the `Trainer` class ([#1410](https://github.com/Lightning-AI/lightning-flash/pull/1410))
- Fixed examples for BaaL integration by removing usage of `on__dataloader` hooks (removed in PL 1.7.0) ([#1410](https://github.com/Lightning-AI/lightning-flash/pull/1410))