Skip to content
This repository has been archived by the owner on Oct 9, 2023. It is now read-only.

Commit

Permalink
Update docstrings (#865)
Browse files Browse the repository at this point in the history
Co-authored-by: Ethan Harris <ewah1g13@soton.ac.uk>
  • Loading branch information
SkafteNicki and ethanwharris authored Nov 8, 2021
1 parent bc09c64 commit 4a8615f
Show file tree
Hide file tree
Showing 7 changed files with 68 additions and 65 deletions.
46 changes: 38 additions & 8 deletions flash/graph/classification/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Type, Union
from typing import Any, Callable, List, Optional, Type, Union

import torch
from torch import nn
from torch import nn, Tensor
from torch.nn import functional as F
from torch.nn import Linear

Expand All @@ -25,31 +25,60 @@
if _GRAPH_AVAILABLE:
from torch_geometric.nn import BatchNorm, GCNConv, global_mean_pool, MessagePassing
else:
MessagePassing = None
GCNConv = None
MessagePassing = object
GCNConv = object


class GraphBlock(nn.Module):
def __init__(self, nc_input, nc_output, conv_cls, act=nn.ReLU(), **conv_kwargs):
"""Graph convolutional block.
Args:
nc_input: number of input channels
nc_output: number of output channels
conv_cls: graph convolutional class to use
act: activation function to use
**conv_kwargs: additional kwargs used for initialization of convolutional operator
"""

def __init__(
self,
nc_input: int,
nc_output: int,
conv_cls: nn.Module,
act: Union[Callable, nn.Module] = nn.ReLU(),
**conv_kwargs
):
super().__init__()
self.conv = conv_cls(nc_input, nc_output, **conv_kwargs)
self.norm = BatchNorm(nc_output)
self.act = act

def forward(self, x, edge_index, edge_weight):
def forward(self, x: Tensor, edge_index: Tensor, edge_weight: Optional[Tensor] = None) -> Tensor:
x = self.conv(x, edge_index, edge_weight=edge_weight)
x = self.norm(x)
return self.act(x)


class BaseGraphModel(nn.Module):
"""Base convolutional graph model.
Args:
num_features: number of input features
hidden_channels: list of integers with the number of channels in all the hidden layers.
The length of the list determines the depth of the network.
num_classes: integer determining the number of classes
conv_cls: graph convolutional class to use as building blocks
act: activation function to use between layers
**conv_kwargs: additional kwargs used for initialization of convolutional operator
"""

def __init__(
self,
num_features: int,
hidden_channels: List[int],
num_classes: int,
conv_cls: Type[MessagePassing],
act=nn.ReLU(),
act: Union[Callable, nn.Module] = nn.ReLU(),
**conv_kwargs: Any
):
super().__init__()
Expand All @@ -67,7 +96,7 @@ def __init__(

self.lin = Linear(nc_output, num_classes)

def forward(self, data):
def forward(self, data: Any) -> Tensor:
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
# 1. Obtain node embeddings
for block in self.blocks:
Expand Down Expand Up @@ -96,6 +125,7 @@ class GraphClassifier(ClassificationTask):
metrics: Metrics to compute for training and evaluation.
model: GraphNN used, defaults to BaseGraphModel.
conv_cls: kind of convolution used in model, defaults to GCNConv
**conv_kwargs: additional kwargs used for initialization of convolutional operator
"""

required_extras = "graph"
Expand Down
20 changes: 5 additions & 15 deletions flash/image/detection/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,23 +25,13 @@ class ObjectDetector(AdapterTask):
:ref:`object_detection`.
Args:
num_classes: the number of classes for detection, including background
model: a string of :attr`_models`. Defaults to 'fasterrcnn'.
backbone: Pretrained backbone CNN architecture. Constructs a model with a
ResNet-50-FPN backbone when no backbone is specified.
fpn: If True, creates a Feature Pyramind Network on top of Resnet based CNNs.
pretrained: if true, returns a model pre-trained on COCO train2017
pretrained_backbone: if true, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers: number of trainable resnet layers starting from final block.
Only applicable for `fasterrcnn`.
loss: the function(s) to update the model with. Has no effect for torchvision detection models.
metrics: The provided metrics. All metrics here will be logged to progress bar and the respective logger.
Changing this argument currently has no effect.
num_classes: The number of object classes.
backbone: String indicating the backbone CNN architecture to use.
head: String indicating the head module to use ontop of the backbone.
pretrained: Whether the model should be loaded with it's pretrained weights.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
pretrained: Whether the model from torchvision should be loaded with it's pretrained weights.
Has no effect for custom models.
learning_rate: The learning rate to use for training
learning_rate: The learning rate to use for training.
output: The :class:`~flash.core.data.io.output.Output` to use when formatting prediction outputs.
kwargs: additional kwargs nessesary for initializing the backbone task
"""
Expand Down
10 changes: 6 additions & 4 deletions flash/image/face_detection/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,17 +54,19 @@ def transform(self, sample: Any) -> Dict[str, Any]:
class FaceDetector(Task):
"""The ``FaceDetector`` is a :class:`~flash.Task` for detecting faces in images.
For more details, see
:ref:`face_detection`.
For more details, see :ref:`face_detection`.
Args:
model: a string of :attr`_models`. Defaults to 'lffd_slim'.
pretrained: Whether the model from fastface should be loaded with it's pretrained weights.
loss: the function(s) to update the model with. Has no effect for fastface models.
loss_fn: the function(s) to update the model with. Has no effect for fastface models.
metrics: The provided metrics. All metrics here will be logged to progress bar and the respective logger.
Changing this argument currently has no effect.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
learning_rate: The learning rate to use for training
learning_rate: The learning rate to use for training.
output: The :class:`~flash.core.data.io.output.Output` to use when formatting prediction outputs.
kwargs: additional kwargs nessesary for initializing face detector backbone
"""

required_extras: str = "image"
Expand Down
23 changes: 7 additions & 16 deletions flash/image/instance_segmentation/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,24 +29,15 @@ class InstanceSegmentation(AdapterTask):
:ref:`object_detection`.
Args:
num_classes: the number of classes for detection, including background
model: a string of :attr`_models`. Defaults to 'fasterrcnn'.
backbone: Pretained backbone CNN architecture. Constructs a model with a
ResNet-50-FPN backbone when no backbone is specified.
fpn: If True, creates a Feature Pyramind Network on top of Resnet based CNNs.
pretrained: if true, returns a model pre-trained on COCO train2017
pretrained_backbone: if true, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers: number of trainable resnet layers starting from final block.
Only applicable for `fasterrcnn`.
loss: the function(s) to update the model with. Has no effect for torchvision detection models.
metrics: The provided metrics. All metrics here will be logged to progress bar and the respective logger.
Changing this argument currently has no effect.
num_classes: The number of object classes.
backbone: String indicating the backbone CNN architecture to use.
head: String indicating the head module to use on top of the backbone.
pretrained: Whether the model should be loaded with it's pretrained weights.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
pretrained: Whether the model from torchvision should be loaded with it's pretrained weights.
Has no effect for custom models.
learning_rate: The learning rate to use for training
learning_rate: The learning rate to use for training.
output: The :class:`~flash.core.data.io.output.Output` to use when formatting prediction outputs.
**kwargs: additional kwargs used for initializing the task
"""

heads: FlashRegistry = INSTANCE_SEGMENTATION_HEADS
Expand Down
28 changes: 10 additions & 18 deletions flash/image/keypoint_detection/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,28 +21,20 @@


class KeypointDetector(AdapterTask):
"""The ``ObjectDetector`` is a :class:`~flash.Task` for detecting objects in images. For more details, see
:ref:`object_detection`.
"""The ``KeypointDetector`` is a :class:`~flash.Task` for detecting keypoints in images. For more details, see
:ref:`keypoint_detection`.
Args:
num_classes: the number of classes for detection, including background
model: a string of :attr`_models`. Defaults to 'fasterrcnn'.
backbone: Pretained backbone CNN architecture. Constructs a model with a
ResNet-50-FPN backbone when no backbone is specified.
fpn: If True, creates a Feature Pyramind Network on top of Resnet based CNNs.
pretrained: if true, returns a model pre-trained on COCO train2017
pretrained_backbone: if true, returns a model with backbone pre-trained on Imagenet
trainable_backbone_layers: number of trainable resnet layers starting from final block.
Only applicable for `fasterrcnn`.
loss: the function(s) to update the model with. Has no effect for torchvision detection models.
metrics: The provided metrics. All metrics here will be logged to progress bar and the respective logger.
Changing this argument currently has no effect.
num_keypoints: Number of keypoints to detect.
num_classes: The number of keypoint classes.
backbone: String indicating the backbone CNN architecture to use.
head: String indicating the head module to use on top of the backbone.
pretrained: Whether the model should be loaded with it's pretrained weights.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
pretrained: Whether the model from torchvision should be loaded with it's pretrained weights.
Has no effect for custom models.
learning_rate: The learning rate to use for training
learning_rate: The learning rate to use for training.
output: The :class:`~flash.core.data.io.output.Output` to use when formatting prediction outputs.
**kwargs: additional kwargs used for initializing the task
"""

heads: FlashRegistry = KEYPOINT_DETECTION_HEADS
Expand Down
3 changes: 0 additions & 3 deletions flash/pointcloud/detection/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ class PointCloudObjectDetector(Task):
pointcloud data.
Args:
num_features: The number of features (elements) in the input data.
num_classes: The number of classes (outputs) for this :class:`~flash.core.model.Task`.
backbone: The backbone name (or a tuple of ``nn.Module``, output size) to use.
backbone_kwargs: Any additional kwargs to pass to the backbone constructor.
Expand All @@ -52,7 +51,6 @@ class PointCloudObjectDetector(Task):
metrics: Any metrics to use with this :class:`~flash.core.model.Task`. If ``None``, a default will be selected
by the :class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
learning_rate: The learning rate for the optimizer.
multi_label: If ``True``, this will be treated as a multi-label classification problem.
output: The :class:`~flash.core.data.io.output.Output` to use when formatting prediction outputs.
lambda_loss_cls: The value to scale the loss classification.
lambda_loss_bbox: The value to scale the bounding boxes loss.
Expand All @@ -67,7 +65,6 @@ def __init__(
num_classes: int,
backbone: Union[str, Tuple[nn.Module, int]] = "pointpillars_kitti",
backbone_kwargs: Optional[Dict] = None,
head: Optional[nn.Module] = None,
loss_fn: LOSS_FN_TYPE = None,
optimizer: OPTIMIZER_TYPE = "Adam",
lr_scheduler: LR_SCHEDULER_TYPE = None,
Expand Down
3 changes: 2 additions & 1 deletion flash/pointcloud/segmentation/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,11 @@ class PointCloudSegmentation(ClassificationTask):
pointcloud data.
Args:
num_features: The number of features (elements) in the input data.
num_classes: The number of classes (outputs) for this :class:`~flash.core.model.Task`.
backbone: The backbone name (or a tuple of ``nn.Module``, output size) to use.
backbone_kwargs: Any additional kwargs to pass to the backbone constructor.
head: a `nn.Module` to use on top of the backbone. The output dimension should match the `num_classes`
argument. If not set will default to a single linear layer.
loss_fn: The loss function to use. If ``None``, a default will be selected by the
:class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
optimizer: Optimizer to use for training.
Expand Down

0 comments on commit 4a8615f

Please sign in to comment.