Skip to content

Commit

Permalink
docs: replace all double graves with single graves (#9679)
Browse files Browse the repository at this point in the history
  • Loading branch information
deepyaman authored Jul 24, 2024
1 parent ae1e112 commit dd26d60
Show file tree
Hide file tree
Showing 13 changed files with 59 additions and 59 deletions.
36 changes: 18 additions & 18 deletions ibis/backends/bigquery/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,27 +381,27 @@ def do_connect(
auth_cache
Selects the behavior of the credentials cache.
``'default'``
`'default'``
Reads credentials from disk if available, otherwise
authenticates and caches credentials to disk.
``'reauth'``
`'reauth'``
Authenticates and caches credentials to disk.
``'none'``
`'none'``
Authenticates and does **not** cache credentials.
Defaults to ``'default'``.
Defaults to `'default'`.
partition_column
Identifier to use instead of default ``_PARTITIONTIME`` partition
column. Defaults to ``'PARTITIONTIME'``.
Identifier to use instead of default `_PARTITIONTIME` partition
column. Defaults to `'PARTITIONTIME'`.
client
A ``Client`` from the ``google.cloud.bigquery`` package. If not
set, one is created using the ``project_id`` and ``credentials``.
A `Client` from the `google.cloud.bigquery` package. If not
set, one is created using the `project_id` and `credentials`.
storage_client
A ``BigQueryReadClient`` from the
``google.cloud.bigquery_storage_v1`` package. If not set, one is
created using the ``project_id`` and ``credentials``.
A `BigQueryReadClient` from the
`google.cloud.bigquery_storage_v1` package. If not set, one is
created using the `project_id` and `credentials`.
location
Default location for BigQuery objects.
Expand Down Expand Up @@ -487,7 +487,7 @@ def from_connection(
storage_client: bqstorage.BigQueryReadClient | None = None,
dataset_id: str = "",
) -> Backend:
"""Create a BigQuery `Backend` from an existing ``Client``.
"""Create a BigQuery `Backend` from an existing `Client`.
Parameters
----------
Expand Down Expand Up @@ -1318,20 +1318,20 @@ def connect(
auth_cache
Selects the behavior of the credentials cache.
``'default'``
`'default'``
Reads credentials from disk if available, otherwise
authenticates and caches credentials to disk.
``'reauth'``
`'reauth'``
Authenticates and caches credentials to disk.
``'none'``
`'none'``
Authenticates and does **not** cache credentials.
Defaults to ``'default'``.
Defaults to `'default'`.
partition_column
Identifier to use instead of default ``_PARTITIONTIME`` partition
column. Defaults to ``'PARTITIONTIME'``.
Identifier to use instead of default `_PARTITIONTIME` partition
column. Defaults to `'PARTITIONTIME'`.
Returns
-------
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/bigquery/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def parse_project_and_dataset(project: str, dataset: str = "") -> tuple[str, str
project : str
A project name
dataset : Optional[str]
A ``<project>.<dataset>`` string or just a dataset name
A `<project>.<dataset>` string or just a dataset name
Examples
--------
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/duckdb/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1328,7 +1328,7 @@ def _to_duckdb_relation(
params: Mapping[ir.Scalar, Any] | None = None,
limit: int | str | None = None,
):
"""Preprocess the expr, and return a ``duckdb.DuckDBPyRelation`` object.
"""Preprocess the expr, and return a `duckdb.DuckDBPyRelation` object.
When retrieving in-memory results, it's faster to use `duckdb_con.sql`
than `duckdb_con.execute`, as the query planner can take advantage of
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/impala/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def do_connect(
ca_cert
Local path to 3rd party CA certificate or copy of server
certificate for self-signed certificates. If SSL is enabled, but
this argument is ``None``, then certificate validation is skipped.
this argument is `None`, then certificate validation is skipped.
user
LDAP user to authenticate
password
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/mysql/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ def list_tables(
[deprecated] The schema to perform the list against.
database
Database to list tables from. Default behavior is to show tables in
the current database (``self.current_database``).
the current database (`self.current_database`).
"""
if schema is not None:
self._warn_schema()
Expand Down
4 changes: 2 additions & 2 deletions ibis/backends/pyspark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ def create_database(
name
Database name
catalog
Catalog to create database in (defaults to ``current_catalog``)
Catalog to create database in (defaults to `current_catalog`)
path
Path where to store the database data; otherwise uses Spark default
force
Expand Down Expand Up @@ -473,7 +473,7 @@ def drop_database(
name
Database name
catalog
Catalog containing database to drop (defaults to ``current_catalog``)
Catalog containing database to drop (defaults to `current_catalog`)
force
If False, Spark throws exception if database is not empty or
database does not exist
Expand Down
4 changes: 2 additions & 2 deletions ibis/backends/sqlite/udf.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ def udf(func=None, *, skip_if_exists=False, deterministic=True):
Returns
-------
callable
A callable object that returns ``None`` if any of its inputs are
``None``.
A callable object that returns `None` if any of its inputs are
`None`.
"""
if func is None:
Expand Down
2 changes: 1 addition & 1 deletion ibis/backends/tests/test_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -930,7 +930,7 @@ def test_ungrouped_unbounded_window(
def test_grouped_bounded_range_window(backend, alltypes, df):
# Explanation of the range window spec below:
#
# `preceding=10, following=0, order_by='id'``:
# `preceding=10, following=0, order_by='id'`:
# The window at a particular row (call its `id` value x) will contain
# some other row (call its `id` value y) if x-10 <= y <= x.
# `group_by='string_col'`:
Expand Down
4 changes: 2 additions & 2 deletions ibis/common/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,8 +245,8 @@ class CoercionError(Exception): ...
class Coercible(Abstract):
"""Protocol for defining coercible types.
Coercible types define a special ``__coerce__`` method that accepts an object
with an instance of the type. Used in conjunction with the ``coerced_to``
Coercible types define a special `__coerce__` method that accepts an object
with an instance of the type. Used in conjunction with the `coerced_to``
pattern to coerce arguments to a specific type.
"""

Expand Down
22 changes: 11 additions & 11 deletions ibis/expr/types/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,24 +208,24 @@ def visualize(
Parameters
----------
format
Image output format. These are specified by the ``graphviz`` Python
Image output format. These are specified by the `graphviz` Python
library.
label_edges
Show operation input names as edge labels
verbose
Print the graphviz DOT code to stderr if [](`True`)
node_attr
Mapping of ``(attribute, value)`` pairs set for all nodes.
Options are specified by the ``graphviz`` Python library.
Mapping of `(attribute, value)` pairs set for all nodes.
Options are specified by the `graphviz` Python library.
node_attr_getter
Callback taking a node and returning a mapping of ``(attribute, value)`` pairs
for that node. Options are specified by the ``graphviz`` Python library.
Callback taking a node and returning a mapping of `(attribute, value)` pairs
for that node. Options are specified by the `graphviz` Python library.
edge_attr
Mapping of ``(attribute, value)`` pairs set for all edges.
Options are specified by the ``graphviz`` Python library.
Mapping of `(attribute, value)` pairs set for all edges.
Options are specified by the `graphviz` Python library.
edge_attr_getter
Callback taking two adjacent nodes and returning a mapping of ``(attribute, value)`` pairs
for the edge between those nodes. Options are specified by the ``graphviz`` Python library.
Callback taking two adjacent nodes and returning a mapping of `(attribute, value)` pairs
for the edge between those nodes. Options are specified by the `graphviz` Python library.
Examples
--------
Expand All @@ -248,7 +248,7 @@ def visualize(
Raises
------
ImportError
If ``graphviz`` is not installed.
If `graphviz` is not installed.
"""
import ibis.expr.visualize as viz

Expand Down Expand Up @@ -541,7 +541,7 @@ def to_pandas_batches(
params
Mapping of scalar parameter expressions to value.
chunk_size
Maximum number of rows in each returned `DataFrame``.
Maximum number of rows in each returned `DataFrame`.
kwargs
Keyword arguments
Expand Down
32 changes: 16 additions & 16 deletions ibis/expr/types/relations.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,10 @@ def f( # noqa: D417
Boolean or column names to join on
lname
A format string to use to rename overlapping columns in the left
table (e.g. ``"left_{name}"``).
table (e.g. `"left_{name}"`).
rname
A format string to use to rename overlapping columns in the right
table (e.g. ``"right_{name}"``).
table (e.g. `"right_{name}"`).
Returns
-------
Expand Down Expand Up @@ -1335,7 +1335,7 @@ def sample(
float between 0 and 1.
method
The sampling method to use. The default is "row", which includes
each row with a probability of ``fraction``. If method is "block",
each row with a probability of `fraction`. If method is "block",
some backends may instead perform sampling a fraction of blocks of
rows (where "block" is a backend dependent definition). This is
identical to "row" for backends lacking a blockwise sampling
Expand Down Expand Up @@ -2253,16 +2253,16 @@ def rename(
An optional method for renaming columns. May be one of:
- A format string to use to rename all columns, like
``"prefix_{name}"``.
`"prefix_{name}"`.
- A function from old name to new name. If the function returns
``None`` the old name is used.
- The literal strings ``"snake_case"`` or ``"ALL_CAPS"`` to
rename all columns using a ``snake_case`` or ``"ALL_CAPS"``
`None` the old name is used.
- The literal strings `"snake_case"` or `"ALL_CAPS"` to
rename all columns using a `snake_case` or `"ALL_CAPS"``
naming convention respectively.
- A mapping from new name to old name. Existing columns not present
in the mapping will passthrough with their original name.
substitutions
Columns to be explicitly renamed, expressed as ``new_name=old_name``
Columns to be explicitly renamed, expressed as `new_name=old_name``
keyword arguments.
Returns
Expand Down Expand Up @@ -2297,7 +2297,7 @@ def rename(
└───────────┴───────────────┴─────────────────────────────────────┘
Rename specific columns by passing keyword arguments like
``new_name="old_name"``
`new_name="old_name"``
>>> t.rename(study_name="studyName").head(1)
┏━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
Expand Down Expand Up @@ -3123,13 +3123,13 @@ def join(
predicates
Condition(s) to join on. See examples for details.
how
Join method, e.g. ``"inner"`` or ``"left"``.
Join method, e.g. `"inner"` or `"left"`.
lname
A format string to use to rename overlapping columns in the left
table (e.g. ``"left_{name}"``).
table (e.g. `"left_{name}"`).
rname
A format string to use to rename overlapping columns in the right
table (e.g. ``"right_{name}"``).
table (e.g. `"right_{name}"`).
Examples
--------
Expand Down Expand Up @@ -3301,10 +3301,10 @@ def asof_join(
Amount of time to look behind when joining
lname
A format string to use to rename overlapping columns in the left
table (e.g. ``"left_{name}"``).
table (e.g. `"left_{name}"`).
rname
A format string to use to rename overlapping columns in the right
table (e.g. ``"right_{name}"``).
table (e.g. `"right_{name}"`).
Returns
-------
Expand Down Expand Up @@ -3336,10 +3336,10 @@ def cross_join(
Additional tables to cross join
lname
A format string to use to rename overlapping columns in the left
table (e.g. ``"left_{name}"``).
table (e.g. `"left_{name}"`).
rname
A format string to use to rename overlapping columns in the right
table (e.g. ``"right_{name}"``).
table (e.g. `"right_{name}"`).
Returns
-------
Expand Down
2 changes: 1 addition & 1 deletion ibis/expr/types/structs.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def lift(self) -> ir.Table:
return table.to_expr().select([self[name] for name in self.names])

def destructure(self) -> list[ir.Value]:
"""Destructure a ``StructValue`` into the corresponding struct fields.
"""Destructure a `StructValue` into the corresponding struct fields.
When assigned, a destruct value will be destructured and assigned to
multiple columns.
Expand Down
4 changes: 2 additions & 2 deletions ibis/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def is_function(v: Any) -> bool:


def log(msg: str) -> None:
"""Log `msg` using ``options.verbose_log`` if set, otherwise ``print``."""
"""Log `msg` using `options.verbose_log` if set, otherwise `print`."""
from ibis.config import options

if options.verbose:
Expand All @@ -171,7 +171,7 @@ def approx_equal(a: Real, b: Real, eps: Real):
def safe_index(elements: Sequence[int], value: int) -> int:
"""Find the location of `value` in `elements`.
Return -1 if `value` is not found instead of raising ``ValueError``.
Return -1 if `value` is not found instead of raising `ValueError`.
Parameters
----------
Expand Down

0 comments on commit dd26d60

Please sign in to comment.