Skip to content

Commit

Permalink
Reformatted, added header
Browse files Browse the repository at this point in the history
  • Loading branch information
AdityaKane2001 committed Oct 20, 2021
1 parent 2e2a7ec commit 88546ea
Show file tree
Hide file tree
Showing 15 changed files with 1,495 additions and 1,464 deletions.
2,661 changes: 1,269 additions & 1,392 deletions README.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
[metadata]
license_file = LICENSE
license_file = LICENSE
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
author="Sidney Radcliffe",
author_email="sidneyradcliffe@gmail.com",
description="Minimal automatic differentiation implementation in Python, NumPy.",
long_description=pathlib.Path('README.md').read_text(),
long_description=pathlib.Path("README.md").read_text(),
long_description_content_type="text/markdown",
url="https://github.com/sradc/smallpebble",
license="Apache License 2.0",
Expand Down
14 changes: 14 additions & 0 deletions smallpebble/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from smallpebble.array_library import use
from smallpebble.core import *
from smallpebble.nn import *
Expand Down
14 changes: 14 additions & 0 deletions smallpebble/array_library.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""This module acts as a proxy, allowing NumPy/CuPy to be switched dynamically.
The default library is NumPy.
Expand Down
16 changes: 15 additions & 1 deletion smallpebble/core/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,15 @@
from smallpebble.core.ops import *
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from smallpebble.core.ops import *
56 changes: 29 additions & 27 deletions smallpebble/core/ops.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# ----------------
# ---------------- AUTOMATIC DIFFERENTIATION
# ----------------
Expand All @@ -8,6 +22,7 @@
import numpy
import smallpebble.array_library as np


class Variable:
"To be used in calculations to be differentiated."

Expand Down Expand Up @@ -59,8 +74,7 @@ def __call__(self, *args):
def run(self):
"Compute the value of this node."
if not self.arguments:
raise AssignmentError(
f"No arguments have been assigned to {self}.")
raise AssignmentError(f"No arguments have been assigned to {self}.")
argvals = (a.run() if hasattr(a, "run") else a for a in self.arguments)
return self.function(*argvals)

Expand All @@ -75,6 +89,7 @@ def assign_value(self, variable):
"Assign a Variable instance to this placeholder."
self.arguments = [variable]


def get_gradients(variable):
"Compute the first derivatives of `variable` with respect to child variables."
gradients = defaultdict(lambda: 0)
Expand All @@ -93,10 +108,10 @@ def compute_gradients(variable, path_value):
def reshape(a, shape):
"Reshape `a` into shape `shape`."
value = np.reshape(a.array, shape)
local_gradients = [
(a, lambda path_value: path_value.reshape(a.array.shape))]
local_gradients = [(a, lambda path_value: path_value.reshape(a.array.shape))]
return Variable(value, local_gradients)


def mul(a, b):
"Elementwise multiplication."
value = a.array * b.array
Expand All @@ -115,7 +130,6 @@ def neg(a):
return Variable(value, local_gradients)



def add(a, b):
"Elementwise addition."
value = a.array + b.array
Expand All @@ -140,6 +154,7 @@ def add_at(a, indices, b):
]
return Variable(value, local_gradients)


def div(a, b):
"Elementwise division."
value = a.array / b.array
Expand All @@ -158,13 +173,11 @@ def enable_broadcast(a, b, matmul=False):
a_repeatdims, b_repeatdims = broadcastinfo(a_shape, b_shape)

def multiply_by_locgrad_a(path_value):
path_value = np.sum(
path_value, axis=a_repeatdims).reshape(a.array.shape)
path_value = np.sum(path_value, axis=a_repeatdims).reshape(a.array.shape)
return np.zeros(a.array.shape, a.array.dtype) + path_value

def multiply_by_locgrad_b(path_value):
path_value = np.sum(
path_value, axis=b_repeatdims).reshape(b.array.shape)
path_value = np.sum(path_value, axis=b_repeatdims).reshape(b.array.shape)
return np.zeros(b.array.shape, b.array.dtype) + path_value

a_ = Variable(a.array, local_gradients=[(a, multiply_by_locgrad_a)])
Expand Down Expand Up @@ -250,8 +263,7 @@ def exp(a):
def expand_dims(a, axis):
"Add new axes with size of 1, indices specified by `axis`."
value = np.expand_dims(a.array, axis)
local_gradients = [
(a, lambda path_value: path_value.reshape(a.array.shape))]
local_gradients = [(a, lambda path_value: path_value.reshape(a.array.shape))]
return Variable(value, local_gradients)


Expand Down Expand Up @@ -303,8 +315,7 @@ def maxax(a, axis):
flatshape = value.shape
idx = np.argmax(value, axis=-1)
value = np.take_along_axis(value, idx[..., np.newaxis], -1)
value = value.reshape(
tuple(1 if i == axis else v for i, v in enumerate(a.shape)))
value = value.reshape(tuple(1 if i == axis else v for i, v in enumerate(a.shape)))

def multiply_by_locgrad(path_value):
result = np.zeros(flatshape)
Expand All @@ -330,7 +341,6 @@ def multiply_by_locgrad(path_value):
return Variable(value, local_gradients)



def broadcastinfo(a_shape, b_shape):
"Get which dimensions are added or repeated when `a` and `b` are broadcast."
ndim = max(len(a_shape), len(b_shape))
Expand All @@ -346,8 +356,7 @@ def broadcastinfo(a_shape, b_shape):

a_repeatdims = (a_shape_ == 1) & (b_shape_ > 1) # the repeated dims
a_repeatdims[:add_ndims_to_a] = True # the added dims
a_repeatdims = np.where(a_repeatdims == True)[
0] # indices of axes where True
a_repeatdims = np.where(a_repeatdims == True)[0] # indices of axes where True
a_repeatdims = [int(i) for i in a_repeatdims]

b_repeatdims = (b_shape_ == 1) & (a_shape_ > 1)
Expand All @@ -365,8 +374,7 @@ def np_add_at(a, indices, b):
elif np.library.__name__ == "cupy":
np.scatter_add(a, indices, b)
else:
raise ValueError(
"Expected np.library.__name__ to be `numpy` or `cupy`.")
raise ValueError("Expected np.library.__name__ to be `numpy` or `cupy`.")


def np_strided_sliding_view(x, window_shape: tuple, strides: tuple):
Expand All @@ -385,18 +393,15 @@ def np_strided_sliding_view(x, window_shape: tuple, strides: tuple):
"""
# Need the checks, because as_strided is not memory safe.
if not len(window_shape) == x.ndim:
raise ValueError(
f"Must provide one window size for each dimension of x.")
raise ValueError(f"Must provide one window size for each dimension of x.")
if not len(strides) == x.ndim:
raise ValueError(
f"Must provide one stride size for each dimension of x.")
raise ValueError(f"Must provide one stride size for each dimension of x.")
if any(size < 0 for size in window_shape):
raise ValueError("`window_shape` cannot contain negative values")
if any(stride < 0 for stride in strides):
raise ValueError("`strides` cannot contain negative values")
if any(x_size < w_size for x_size, w_size in zip(x.shape, window_shape)):
raise ValueError(
"window shape cannot be larger than input array shape")
raise ValueError("window shape cannot be larger than input array shape")
reduced_shape = tuple(
math.ceil((x - w + 1) / s) for x, s, w in zip(x.shape, strides, window_shape)
)
Expand All @@ -408,9 +413,6 @@ def np_strided_sliding_view(x, window_shape: tuple, strides: tuple):
return np.lib.stride_tricks.as_strided(x, strides=out_strides, shape=out_shape)





# ---------------- AUGMENTING `VARIABLE`
# Add methods/properties to the Variable class for convenience/user-experience.
# This isn't essential to autodiff.
Expand Down
14 changes: 14 additions & 0 deletions smallpebble/misc/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,15 @@
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from smallpebble.misc.data import load_data
14 changes: 14 additions & 0 deletions smallpebble/misc/data.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""Rough and ready... load (/download) MNIST/CIFAR data from openml.org."""
from collections import namedtuple
import hashlib
Expand Down
14 changes: 14 additions & 0 deletions smallpebble/nn/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,16 @@
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from smallpebble.nn.conv import *
from smallpebble.nn.training import *
38 changes: 24 additions & 14 deletions smallpebble/nn/conv.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,24 @@
# Copyright 2021 The SmallPebble authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import smallpebble.core as core
from collections import defaultdict
import math
import numpy
import smallpebble.array_library as np


def conv2d(images, kernels, padding="SAME", strides=[1, 1]) -> core.Variable:
"""2D convolution, with same api as tf.nn.conv2d [1].
Expand Down Expand Up @@ -31,12 +46,10 @@ def conv2d(images, kernels, padding="SAME", strides=[1, 1]) -> core.Variable:
images, padding, imheight, imwidth, stride_y, stride_x, kernheight, kernwidth
)
window_shape = (1, kernheight, kernwidth, channels_in)
image_patches = strided_sliding_view(
images, window_shape, (1, stride_y, stride_x, 1))
image_patches = strided_sliding_view(images, window_shape, (1, stride_y, stride_x, 1))
outh, outw = image_patches.shape[1], image_patches.shape[2]
patches_as_matrix = core.reshape(
image_patches, [n_images * outh * outw,
kernheight * kernwidth * channels_in]
image_patches, [n_images * outh * outw, kernheight * kernwidth * channels_in]
)
kernels_as_matrix = core.reshape(
kernels, [kernheight * kernwidth * channels_in, channels_out]
Expand All @@ -53,8 +66,7 @@ def maxpool2d(images, kernheight, kernwidth, padding="SAME", strides=[1, 1]):
images, padding, imheight, imwidth, stride_y, stride_x, kernheight, kernwidth
)
window_shape = (1, kernheight, kernwidth, 1)
image_patches = strided_sliding_view(
images, window_shape, [1, stride_y, stride_x, 1])
image_patches = strided_sliding_view(images, window_shape, [1, stride_y, stride_x, 1])
flat_patches_shape = image_patches.array.shape[:4] + (-1,)
image_patches = core.reshape(image_patches, shape=flat_patches_shape)
result = core.maxax(image_patches, axis=-1)
Expand All @@ -81,8 +93,9 @@ def strided_sliding_view(a, window_shape, strides):

def multiply_by_locgrad(path_value): # TODO: a faster method
result = np.zeros(a.shape, a.dtype)
core.np_add_at(core.np_strided_sliding_view(
result, window_shape, strides), None, path_value)
core.np_add_at(
core.np_strided_sliding_view(result, window_shape, strides), None, path_value
)
return result

local_gradients = [(a, multiply_by_locgrad)]
Expand Down Expand Up @@ -117,14 +130,11 @@ def patches_index(imheight, imwidth, kernheight, kernwidth, stride_y, stride_x):
"Index to get image patches, e.g. for 2d convolution."
max_y_idx = imheight - kernheight + 1
max_x_idx = imwidth - kernwidth + 1
row_major_index = np.arange(
imheight * imwidth).reshape([imheight, imwidth])
row_major_index = np.arange(imheight * imwidth).reshape([imheight, imwidth])
patch_corners = row_major_index[0:max_y_idx:stride_y, 0:max_x_idx:stride_x]
elements_relative = row_major_index[0:kernheight, 0:kernwidth]
index_of_patches = patch_corners.reshape(
[-1, 1]) + elements_relative.reshape([1, -1])
index_of_patches = np.unravel_index(
index_of_patches, shape=[imheight, imwidth])
index_of_patches = patch_corners.reshape([-1, 1]) + elements_relative.reshape([1, -1])
index_of_patches = np.unravel_index(index_of_patches, shape=[imheight, imwidth])
outheight, outwidth = patch_corners.shape
n_patches = outheight * outwidth
return index_of_patches, outheight, outwidth, n_patches
Expand Down
Loading

0 comments on commit 88546ea

Please sign in to comment.