Skip to content

Commit

Permalink
Update aliases to reflect package name (#547)
Browse files Browse the repository at this point in the history
* Change `import pytensor.tensor as at` to `as pt` everywhere in the docs

* Change `import pytensor.tensor as at` to `as pt` everywhere

Change `import pytensor.scalar as aes` to `as ps` everywhere

Change `import pytensor.tensor.random as aer` to `as ptr` everywhere

Change test variables with `_at` suffix or `at_` prefix to `_pt` and `pt_`, respectively

* More renaming

* Rename remaining instances of `aes` and `aer`
  • Loading branch information
jessegrabowski authored Dec 11, 2023
1 parent c38eea0 commit 9df55cc
Show file tree
Hide file tree
Showing 156 changed files with 3,663 additions and 3,667 deletions.
6 changes: 3 additions & 3 deletions doc/extending/creating_a_numba_jax_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -135,16 +135,16 @@ Here's a small example of a test for :class:`Eye`:

.. code:: python
import pytensor.tensor as at
import pytensor.tensor as pt
def test_jax_Eye():
"""Test JAX conversion of the `Eye` `Op`."""
# Create a symbolic input for `Eye`
x_at = at.scalar()
x_at = pt.scalar()
# Create a variable that is the output of an `Eye` `Op`
eye_var = at.eye(x_at)
eye_var = pt.eye(x_at)
# Create an PyTensor `FunctionGraph`
out_fg = FunctionGraph(outputs=[eye_var])
Expand Down
10 changes: 5 additions & 5 deletions doc/extending/creating_an_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -786,7 +786,7 @@ signature:
.. testcode:: asop

import pytensor
import pytensor.tensor as at
import pytensor.tensor as pt
import numpy as np
from pytensor import function
from pytensor.compile.ops import as_op
Expand All @@ -797,17 +797,17 @@ signature:
return [ashp[:-1] + bshp[-1:]]


@as_op(itypes=[at.matrix, at.matrix],
otypes=[at.matrix], infer_shape=infer_shape_numpy_dot)
@as_op(itypes=[pt.matrix, pt.matrix],
otypes=[pt.matrix], infer_shape=infer_shape_numpy_dot)
def numpy_dot(a, b):
return np.dot(a, b)

You can try it as follows:

.. testcode:: asop

x = at.matrix()
y = at.matrix()
x = pt.matrix()
y = pt.matrix()
f = function([x, y], numpy_dot(x, y))
inp1 = np.random.random_sample((5, 4))
inp2 = np.random.random_sample((4, 7))
Expand Down
30 changes: 13 additions & 17 deletions doc/extending/extending_pytensor_solution_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@

class ProdOp(Op):
def make_node(self, x, y):
x = at.as_tensor_variable(x)
y = at.as_tensor_variable(y)
x = pt.as_tensor_variable(x)
y = pt.as_tensor_variable(y)
outdim = x.type.ndim
output = TensorType(
dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=(None,) * outdim
Expand All @@ -39,8 +39,8 @@ def grad(self, inputs, output_grads):

class SumDiffOp(Op):
def make_node(self, x, y):
x = at.as_tensor_variable(x)
y = at.as_tensor_variable(y)
x = pt.as_tensor_variable(x)
y = pt.as_tensor_variable(y)
outdim = x.type.ndim
output1 = TensorType(
dtype=pytensor.scalar.upcast(x.dtype, y.dtype), shape=(None,) * outdim
Expand All @@ -62,20 +62,16 @@ def infer_shape(self, fgraph, node, i0_shapes):
def grad(self, inputs, output_grads):
og1, og2 = output_grads
if og1 is None:
og1 = at.zeros_like(og2)
og1 = pt.zeros_like(og2)
if og2 is None:
og2 = at.zeros_like(og1)
og2 = pt.zeros_like(og1)
return [og1 + og2, og1 - og2]


# 3. Testing apparatus

import numpy as np

from tests import unittest_tools as utt
from pytensor import tensor as at
from pytensor import tensor as pt
from pytensor.graph.basic import Apply
from pytensor.graph.op import Op
from pytensor.tensor.type import dmatrix, matrix


Expand Down Expand Up @@ -182,8 +178,8 @@ def infer_shape_numpy_dot(fgraph, node, input_shapes):


@as_op(
itypes=[at.fmatrix, at.fmatrix],
otypes=[at.fmatrix],
itypes=[pt.fmatrix, pt.fmatrix],
otypes=[pt.fmatrix],
infer_shape=infer_shape_numpy_dot,
)
def numpy_add(a, b):
Expand All @@ -197,17 +193,17 @@ def infer_shape_numpy_add_sub(fgraph, node, input_shapes):


@as_op(
itypes=[at.fmatrix, at.fmatrix],
otypes=[at.fmatrix],
itypes=[pt.fmatrix, pt.fmatrix],
otypes=[pt.fmatrix],
infer_shape=infer_shape_numpy_add_sub,
)
def numpy_add(a, b):
return np.add(a, b)


@as_op(
itypes=[at.fmatrix, at.fmatrix],
otypes=[at.fmatrix],
itypes=[pt.fmatrix, pt.fmatrix],
otypes=[pt.fmatrix],
infer_shape=infer_shape_numpy_add_sub,
)
def numpy_sub(a, b):
Expand Down
28 changes: 14 additions & 14 deletions doc/extending/graph_rewriting.rst
Original file line number Diff line number Diff line change
Expand Up @@ -443,7 +443,7 @@ The following is an example that distributes dot products across additions.
.. code::
import pytensor
import pytensor.tensor as at
import pytensor.tensor as pt
from pytensor.graph.rewriting.kanren import KanrenRelationSub
from pytensor.graph.rewriting.basic import EquilibriumGraphRewriter
from pytensor.graph.rewriting.utils import rewrite_graph
Expand All @@ -462,7 +462,7 @@ The following is an example that distributes dot products across additions.
)
# Tell `kanren` that `add` is associative
fact(associative, at.add)
fact(associative, pt.add)
def dot_distributeo(in_lv, out_lv):
Expand All @@ -473,13 +473,13 @@ The following is an example that distributes dot products across additions.
# Make sure the input is a `_dot`
eq(in_lv, etuple(_dot, A_lv, add_term_lv)),
# Make sure the term being `_dot`ed is an `add`
heado(at.add, add_term_lv),
heado(pt.add, add_term_lv),
# Flatten the associative pairings of `add` operations
assoc_flatten(add_term_lv, add_flat_lv),
# Get the flattened `add` arguments
tailo(add_cdr_lv, add_flat_lv),
# Add all the `_dot`ed arguments and set the output
conso(at.add, dot_cdr_lv, out_lv),
conso(pt.add, dot_cdr_lv, out_lv),
# Apply the `_dot` to all the flattened `add` arguments
mapo(lambda x, y: conso(_dot, etuple(A_lv, x), y), add_cdr_lv, dot_cdr_lv),
)
Expand All @@ -490,10 +490,10 @@ The following is an example that distributes dot products across additions.
Below, we apply `dot_distribute_rewrite` to a few example graphs. First we create simple test graph:

>>> x_at = at.vector("x")
>>> y_at = at.vector("y")
>>> A_at = at.matrix("A")
>>> test_at = A_at.dot(x_at + y_at)
>>> x_at = pt.vector("x")
>>> y_at = pt.vector("y")
>>> A_at = pt.matrix("A")
>>> test_at = A_pt.dot(x_at + y_at)
>>> print(pytensor.pprint(test_at))
(A @ (x + y))

Expand All @@ -506,18 +506,18 @@ Next we apply the rewrite to the graph:
We see that the dot product has been distributed, as desired. Now, let's try a
few more test cases:

>>> z_at = at.vector("z")
>>> w_at = at.vector("w")
>>> test_at = A_at.dot((x_at + y_at) + (z_at + w_at))
>>> z_at = pt.vector("z")
>>> w_at = pt.vector("w")
>>> test_at = A_pt.dot((x_at + y_at) + (z_at + w_at))
>>> print(pytensor.pprint(test_at))
(A @ ((x + y) + (z + w)))
>>> res = rewrite_graph(test_at, include=[], custom_rewrite=dot_distribute_rewrite, clone=False)
>>> print(pytensor.pprint(res))
(((A @ x) + (A @ y)) + ((A @ z) + (A @ w)))

>>> B_at = at.matrix("B")
>>> w_at = at.vector("w")
>>> test_at = A_at.dot(x_at + (y_at + B_at.dot(z_at + w_at)))
>>> B_at = pt.matrix("B")
>>> w_at = pt.vector("w")
>>> test_at = A_pt.dot(x_at + (y_at + B_pt.dot(z_at + w_at)))
>>> print(pytensor.pprint(test_at))
(A @ (x + (y + ((B @ z) + (B @ w)))))
>>> res = rewrite_graph(test_at, include=[], custom_rewrite=dot_distribute_rewrite, clone=False)
Expand Down
6 changes: 3 additions & 3 deletions doc/extending/graphstructures.rst
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@ The following illustrates these elements:

.. testcode::

import pytensor.tensor as at
import pytensor.tensor as pt

x = at.dmatrix('x')
y = at.dmatrix('y')
x = pt.dmatrix('x')
y = pt.dmatrix('y')
z = x + y

**Diagram**
Expand Down
4 changes: 2 additions & 2 deletions doc/extending/tips.rst
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ simple function:

.. code::
from pytensor import tensor as at
from pytensor import tensor as pt
def sum_square_difference(a, b):
return at.sum((a - b)**2)
return pt.sum((a - b)**2)
Even without taking PyTensor's rewrites into account, it is likely
to work just as well as a custom implementation. It also supports all
Expand Down
12 changes: 6 additions & 6 deletions doc/extending/unittest.rst
Original file line number Diff line number Diff line change
Expand Up @@ -98,13 +98,13 @@ Example:
.. code-block:: python
import numpy as np
import pytensor.tensor as at
import pytensor.tensor as pt
def test_dot_validity():
a = at.dmatrix('a')
b = at.dmatrix('b')
c = at.dot(a, b)
a = pt.dmatrix('a')
b = pt.dmatrix('b')
c = pt.dot(a, b)
c_fn = pytensor.function([a, b], [c])
Expand Down Expand Up @@ -187,7 +187,7 @@ symbolic variable:

def test_verify_exprgrad():
def fun(x,y,z):
return (x + at.cos(y)) / (4 * z)**2
return (x + pt.cos(y)) / (4 * z)**2

x_val = np.asarray([[1], [1.1], [1.2]])
y_val = np.asarray([0.1, 0.2])
Expand All @@ -207,7 +207,7 @@ Here is an example showing how to use :func:`verify_grad` on an :class:`Op` inst
"""
a_val = np.asarray([[0,1,2],[3,4,5]], dtype='float64')
rng = np.random.default_rng(42)
pytensor.gradient.verify_grad(at.Flatten(), [a_val], rng=rng)
pytensor.gradient.verify_grad(pt.Flatten(), [a_val], rng=rng)

.. note::

Expand Down
6 changes: 3 additions & 3 deletions doc/glossary.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Glossary
.. testsetup::

import pytensor
import pytensor.tensor as at
import pytensor.tensor as pt

.. glossary::

Expand All @@ -31,7 +31,7 @@ Glossary
A variable with an immutable value.
For example, when you type

>>> x = at.ivector()
>>> x = pt.ivector()
>>> y = x + 3

Then a `constant` is created to represent the ``3`` in the graph.
Expand Down Expand Up @@ -151,7 +151,7 @@ Glossary
The the main data structure you work with when using PyTensor.
For example,

>>> x = at.ivector()
>>> x = pt.ivector()
>>> y = -x**2

``x`` and ``y`` are both :class:`Variable`\s, i.e. instances of the :class:`Variable` class.
Expand Down
6 changes: 3 additions & 3 deletions doc/introduction.rst
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,11 @@ its features, but it illustrates concretely what PyTensor is.
.. code-block:: python
import pytensor
from pytensor import tensor as at
from pytensor import tensor as pt
# declare two symbolic floating-point scalars
a = at.dscalar()
b = at.dscalar()
a = pt.dscalar()
b = pt.dscalar()
# create a simple expression
c = a + b
Expand Down
4 changes: 2 additions & 2 deletions doc/library/compile/debugmode.rst
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,10 @@ a cluster.
.. testcode::

import pytensor
from pytensor import tensor as at
from pytensor import tensor as pt
from pytensor.compile.debugmode import DebugMode

x = at.dscalar('x')
x = pt.dscalar('x')

f = pytensor.function([x], 10*x, mode='DebugMode')

Expand Down
20 changes: 10 additions & 10 deletions doc/library/compile/io.rst
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,10 @@ A non-None `value` argument makes an In() instance an optional parameter
of the compiled function. For example, in the following code we are
defining an arity-2 function ``inc``.

>>> import pytensor.tensor as at
>>> import pytensor.tensor as pt
>>> from pytensor import function
>>> from pytensor.compile.io import In
>>> u, x, s = at.scalars('u', 'x', 's')
>>> u, x, s = pt.scalars('u', 'x', 's')
>>> inc = function([u, In(x, value=3), In(s, update=(s+x*u), value=10.0)], [])

Since we provided a ``value`` for ``s`` and ``x``, we can call it with just a value for ``u`` like this:
Expand Down Expand Up @@ -183,8 +183,8 @@ method to access values by indexing a Function directly by typing
To show some examples of these access methods...


>>> from pytensor import tensor as at, function
>>> a, b, c = at.scalars('xys') # set the internal names of graph nodes
>>> from pytensor import tensor as pt, function
>>> a, b, c = pt.scalars('xys') # set the internal names of graph nodes
>>> # Note that the name of c is 's', not 'c'!
>>> fn = function([a, b, ((c, c+a+b), 10.0)], [])

Expand Down Expand Up @@ -236,12 +236,12 @@ Every element of the inputs list will be upgraded to an In instance if necessary
Example:

>>> import pytensor
>>> from pytensor import tensor as at
>>> from pytensor import tensor as pt
>>> from pytensor.compile.io import In
>>> x = at.scalar()
>>> y = at.scalar('y')
>>> z = at.scalar('z')
>>> w = at.scalar('w')
>>> x = pt.scalar()
>>> y = pt.scalar('y')
>>> z = pt.scalar('z')
>>> w = pt.scalar('w')

>>> fn = pytensor.function(inputs=[x, y, In(z, value=42), ((w, w+x), 0)],
... outputs=x + y + z)
Expand Down Expand Up @@ -308,7 +308,7 @@ If a list of ``Variable`` or ``Out`` instances is given as argument, then the co

>>> import numpy
>>> from pytensor.compile.io import Out
>>> x, y, s = at.matrices('xys')
>>> x, y, s = pt.matrices('xys')

>>> # print a list of 2 ndarrays
>>> fn1 = pytensor.function([x], [x+x, Out((x+x).T, borrow=True)])
Expand Down
6 changes: 3 additions & 3 deletions doc/library/compile/nanguardmode.rst
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@ of abnormal values: NaNs, Infs, and abnormally big values.

import numpy as np
import pytensor
import pytensor.tensor as at
import pytensor.tensor as pt
from pytensor.compile.nanguardmode import NanGuardMode

x = at.matrix()
x = pt.matrix()
w = pytensor.shared(np.random.standard_normal((5, 7)).astype(pytensor.config.floatX))
y = at.dot(x, w)
y = pt.dot(x, w)
fun = pytensor.function(
[x], y,
mode=NanGuardMode(nan_is_error=True, inf_is_error=True, big_is_error=True)
Expand Down
Loading

0 comments on commit 9df55cc

Please sign in to comment.