Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs: add doc strings to the neural networks architectures #630

Merged
merged 2 commits into from
Jul 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -322,10 +322,10 @@ Let's define a `Trainer` instance, using for example of the already existing `GI

```python
from deeprank2.trainer import Trainer
from deeprank2.neuralnets.gnn.naive_gnn import NaiveNetwork
from deeprank2.neuralnets.gnn.vanilla_gnn import VanillaNetwork

trainer = Trainer(
NaiveNetwork,
VanillaNetwork,
dataset_train,
dataset_val,
dataset_test
Expand Down Expand Up @@ -389,11 +389,11 @@ Finally, the `Trainer` instance can be defined and the new data can be tested:

```python
from deeprank2.trainer import Trainer
from deeprank2.neuralnets.gnn.naive_gnn import NaiveNetwork
from deeprank2.neuralnets.gnn.vanilla_gnn import VanillaNetwork
from deeprank2.utils.exporters import HDF5OutputExporter

trainer = Trainer(
NaiveNetwork,
VanillaNetwork,
dataset_test = dataset_test,
pretrained_model = "<pretrained_model_path>",
output_exporters = [HDF5OutputExporter("<output_folder_path>")]
Expand Down
22 changes: 20 additions & 2 deletions deeprank2/neuralnets/cnn/model3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,16 @@
# ----------------------------------------------------------------------


class CnnRegression(nn.Module): # noqa: D101
class CnnRegression(nn.Module):
"""Convolutional Neural Network architecture for regression.

This type of network is used to predict a single scalar value of a continuous variable.
gcroci2 marked this conversation as resolved.
Show resolved Hide resolved

Args:
num_features: Number of features in the input data.
box_shape: Shape of the input data.
"""

def __init__(self, num_features: int, box_shape: tuple[int]):
super().__init__()

Expand Down Expand Up @@ -76,7 +85,16 @@ def forward(self, data):
# ----------------------------------------------------------------------


class CnnClassification(nn.Module): # noqa: D101
class CnnClassification(nn.Module):
"""Convolutional Neural Network architecture for binary classification.

This type of network is used to predict the class of an input data point.

Args:
num_features: Number of features in the input data.
box_shape: Shape of the input data.
"""

def __init__(self, num_features, box_shape):
super().__init__()

Expand Down
55 changes: 52 additions & 3 deletions deeprank2/neuralnets/gnn/alignmentnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,19 @@
__author__ = "Daniel-Tobias Rademaker"


class GNNLayer(nn.Module): # noqa: D101
class GNNLayer(nn.Module):
"""Custom-defined layer of a Graph Neural Network.

Args:
nmb_edge_projection: Number of features in the edge projection.
nmb_hidden_attr: Number of features in the hidden attributes.
nmb_output_features: Number of output features.
message_vector_length: Length of the message vector.
nmb_mlp_neurons: Number of neurons in the MLP.
act_fn: Activation function. Defaults to nn.SiLU().
gcroci2 marked this conversation as resolved.
Show resolved Hide resolved
is_last_layer: Whether this is the last layer of the GNN. Defaults to True.
"""

def __init__(
self,
nmb_edge_projection,
Expand Down Expand Up @@ -104,7 +116,26 @@ def output(self, hidden_features, get_attention=True):
return output


class SuperGNN(nn.Module): # noqa: D101
class SuperGNN(nn.Module):
"""SuperGNN is a class that defines multiple GNN layers.

In particular, the `preproc_edge_mlp` and `preproc_node_mlp` are meant to
preprocess the edge and node attributes, respectively.

The `modlist` is a list of GNNLayer objects.

Args:
nm_edge_attr: Number of edge features.
nmb_node_attr: Number of node features.
nmb_hidden_attr: Number of hidden features.
nmb_mlp_neurons: Number of neurons in the MLP.
nmb_edge_projection: Number of edge projections.
nmb_gnn_layers: Number of GNN layers.
nmb_output_features: Number of output features.
message_vector_length: Length of the message vector.
act_fn: Activation function. Defaults to nn.SiLU().
"""

def __init__(
self,
nmb_edge_attr,
Expand Down Expand Up @@ -172,7 +203,25 @@ def run_through_network(self, edges, edge_attr, node_attr, with_output_attention
return self.modlist[-1].output(node_attr, True) # (boolean-positional-value-in-call)


class AlignmentGNN(SuperGNN): # noqa: D101
class AlignmentGNN(SuperGNN):
"""Architecture based on multiple :class:`GNNLayer` layers, suited for both regression and classification tasks.

It applies different layers to the nodes and edges of a graph (`preproc_edge_mlp` and `preproc_node_mlp`),
and then applies multiple GNN layers (`modlist`).

Args:
nm_edge_attr: Number of edge features.
nmb_node_attr: Number of node features.
nmb_output_features: Number of output features.
nmb_hidden_attr: Number of hidden features.
message_vector_length: Length of the message vector.
nmb_mlp_neurons: Number of neurons in the MLP.
nmb_gnn_layers: Number of GNN layers.
nmb_edge_projection: Number of edge projections.
act_fn: Activation function. Defaults to nn.SiLU().

"""

def __init__(
self,
nmb_edge_attr,
Expand Down
17 changes: 13 additions & 4 deletions deeprank2/neuralnets/gnn/foutnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,13 @@
class FoutLayer(nn.Module):
"""FoutLayer.
gcroci2 marked this conversation as resolved.
Show resolved Hide resolved

This layer is described by eq. (1) of
Protein Interface Predition using Graph Convolutional Network
This layer is described by eq. (1) of Protein Interface Predition using Graph Convolutional Network
by Alex Fout et al. NIPS 2018.

Args:
in_channels: Size of each input sample.
out_channels: Size of each output sample.
bias: If set to :obj:`False`, the layer will not learn an additive bias. Defaults to True.
bias: If set to `False`, the layer will not learn an additive bias. Defaults to True.
"""

def __init__(self, in_channels: int, out_channels: int, bias: bool = True):
Expand Down Expand Up @@ -70,7 +69,17 @@ def __repr__(self):
return f"{self.__class__.__name__}({self.in_channels}, {self.out_channels})"


class FoutNet(nn.Module): # noqa: D101
class FoutNet(nn.Module):
"""Architecture based on the FoutLayer, suited for both regression and classification tasks.

It also uses community pooling to reduce the number of nodes.

Args:
input_shape: Size of each input sample.
output_shape: Size of each output sample. Defaults to 1.
input_shape_edge: Size of each input edge. Defaults to None.
"""

def __init__(
self,
input_shape,
Expand Down
26 changes: 21 additions & 5 deletions deeprank2/neuralnets/gnn/ginet.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,16 @@
# ruff: noqa: ANN001, ANN201


class GINetConvLayer(nn.Module): # noqa: D101
class GINetConvLayer(nn.Module):
"""GiNet convolutional layer for graph neural networks.

Args:
in_channels: Number of input features.
out_channels: Number of output features.
number_edge_features: Number of edge features. Defaults to 1.
bias: If set to `False`, the layer will not learn an additive bias. Defaults to False.
"""

def __init__(self, in_channels, out_channels, number_edge_features=1, bias=False):
super().__init__()

Expand Down Expand Up @@ -54,10 +63,17 @@ def __repr__(self):
return f"{self.__class__.__name__}({self.in_channels}, {self.out_channels})"


class GINet(nn.Module): # noqa: D101
# input_shape -> number of node input features
# output_shape -> number of output value per graph
# input_shape_edge -> number of edge input features
class GINet(nn.Module):
"""Architecture based on the GiNet convolutional layer, suited for both regression and classification tasks.

It uses community pooling to reduce the number of nodes.

Args:
input_shape: Number of input features.
output_shape: Number of output value per graph. Defaults to 1.
input_shape_edge: Number of edge input features. Defaults to 1.
"""

def __init__(self, input_shape, output_shape=1, input_shape_edge=1):
super().__init__()
self.conv1 = GINetConvLayer(input_shape, 16, input_shape_edge)
Expand Down
24 changes: 19 additions & 5 deletions deeprank2/neuralnets/gnn/ginet_nocluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,16 @@
# ruff: noqa: ANN001, ANN201


class GINetConvLayer(nn.Module): # noqa: D101
class GINetConvLayer(nn.Module):
"""GiNet convolutional layer for graph neural networks.

Args:
in_channels: Number of input features.
out_channels: Number of output features.
number_edge_features: Number of edge features. Defaults to 1.
bias: If set to `False`, the layer will not learn an additive bias. Defaults to False.
"""

def __init__(self, in_channels, out_channels, number_edge_features=1, bias=False):
super().__init__()

Expand Down Expand Up @@ -51,10 +60,15 @@ def __repr__(self):
return f"{self.__class__.__name__}({self.in_channels}, {self.out_channels})"


class GINet(nn.Module): # noqa: D101
# input_shape -> number of node input features
# output_shape -> number of output value per graph
# input_shape_edge -> number of edge input features
class GINet(nn.Module):
"""Architecture based on the GiNet convolutional layer, suited for both regression and classification tasks.

Args:
input_shape: Number of input features.
output_shape: Number of output value per graph. Defaults to 1.
input_shape_edge: Number of edge input features. Defaults to 1.
"""

def __init__(self, input_shape, output_shape=1, input_shape_edge=1):
super().__init__()
self.conv1 = GINetConvLayer(input_shape, 16, input_shape_edge)
Expand Down
14 changes: 12 additions & 2 deletions deeprank2/neuralnets/gnn/sgat.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ class SGraphAttentionLayer(nn.Module):
Args:
in_channels: Size of each input sample.
out_channels: Size of each output sample.
bias: If set to :obj:`False`, the layer will not learn an additive bias. Defaults to True.
bias: If set to `False`, the layer will not learn an additive bias. Defaults to True.
""" # noqa: D301

def __init__(
Expand Down Expand Up @@ -87,7 +87,17 @@ def __repr__(self):
return f"{self.__class__.__name__}({self.in_channels}, {self.out_channels})"


class SGAT(nn.Module): # noqa:D101
class SGAT(nn.Module):
"""Simple graph attention network, suited for both regression and classification tasks.

It uses two graph attention layers and a MLP to predict the output.

Args:
input_shape: Size of each input sample.
output_shape: Size of each output sample. Defaults to 1.
input_shape_edge: Size of each input edge. Defaults to None.
"""

def __init__(
self,
input_shape,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,14 @@
# ruff: noqa: ANN001, ANN201


class NaiveConvolutionalLayer(nn.Module): # noqa: D101
class VanillaConvolutionalLayer(nn.Module):
"""Vanilla convolutional layer for graph neural networks.

Args:
count_node_features: Number of node features.
count_edge_features: Number of edge features.
"""

def __init__(self, count_node_features, count_edge_features):
super().__init__()
message_size = 32
Expand All @@ -31,18 +38,21 @@ def forward(self, node_features, edge_node_indices, edge_features):
return self._node_mlp(node_input)


class NaiveNetwork(nn.Module): # noqa: D101
def __init__(self, input_shape: int, output_shape: int, input_shape_edge: int):
"""NaiveNetwork.
class VanillaNetwork(nn.Module):
"""Vanilla graph neural network architecture suited for both regression and classification tasks.

Args:
input_shape: Number of node input features.
output_shape: Number of output value per graph.
input_shape_edge: Number of edge input features.
"""
It uses two vanilla convolutional layers and a MLP to predict the output.

Args:
input_shape: Number of node input features.
output_shape: Number of output value per graph.
input_shape_edge: Number of edge input features.
"""

def __init__(self, input_shape: int, output_shape: int, input_shape_edge: int):
super().__init__()
self._external1 = NaiveConvolutionalLayer(input_shape, input_shape_edge)
self._external2 = NaiveConvolutionalLayer(input_shape, input_shape_edge)
self._external1 = VanillaConvolutionalLayer(input_shape, input_shape_edge)
self._external2 = VanillaConvolutionalLayer(input_shape, input_shape_edge)
hidden_size = 128
self._graph_mlp = nn.Sequential(nn.Linear(input_shape, hidden_size), nn.ReLU(), nn.Linear(hidden_size, output_shape))

Expand Down
4 changes: 2 additions & 2 deletions docs/source/deeprank2.neuralnets.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ deeprank2.neuralnets.gnn.ginet\_nocluster
:undoc-members:
:show-inheritance:

deeprank2.neuralnets.gnn.naive\_gnn
deeprank2.neuralnets.gnn.vanilla\_gnn
------------------------------------------

.. automodule:: deeprank2.neuralnets.gnn.naive_gnn
.. automodule:: deeprank2.neuralnets.gnn.vanilla_gnn
:members:
:undoc-members:
:show-inheritance:
Expand Down
12 changes: 6 additions & 6 deletions docs/source/getstarted.md
Original file line number Diff line number Diff line change
Expand Up @@ -312,10 +312,10 @@ Let's define a `Trainer` instance, using for example of the already existing `GI

```python
from deeprank2.trainer import Trainer
from deeprank2.neuralnets.gnn.naive_gnn import NaiveNetwork
from deeprank2.neuralnets.gnn.vanilla_gnn import VanillaNetwork

trainer = Trainer(
NaiveNetwork,
VanillaNetwork,
dataset_train,
dataset_val,
dataset_test
Expand Down Expand Up @@ -366,11 +366,11 @@ The user can specify a DeepRank2 exporter or a custom one in `output_exporters`

```python
from deeprank2.trainer import Trainer
from deeprank2.neuralnets.gnn.naive_gnn import NaiveNetwork
from deeprank2.neuralnets.gnn.vanilla_gnn import VanillaNetwork
from deeprank2.utils.exporters import HDF5OutputExporter

trainer = Trainer(
NaiveNetwork,
VanillaNetwork,
dataset_train,
dataset_val,
dataset_test,
Expand Down Expand Up @@ -452,11 +452,11 @@ Finally, the `Trainer` instance can be defined and the new data can be tested:

```python
from deeprank2.trainer import Trainer
from deeprank2.neuralnets.gnn.naive_gnn import NaiveNetwork
from deeprank2.neuralnets.gnn.vanilla_gnn import VanillaNetwork
from deeprank2.utils.exporters import HDF5OutputExporter

trainer = Trainer(
NaiveNetwork,
VanillaNetwork,
dataset_test = dataset_test,
pretrained_model = "<pretrained_model_path>",
output_exporters = [HDF5OutputExporter("<output_folder_path>")]
Expand Down
Loading
Loading