diff --git a/awswrangler/catalog/_create.py b/awswrangler/catalog/_create.py
index bd21fc721..843b4b6c8 100644
--- a/awswrangler/catalog/_create.py
+++ b/awswrangler/catalog/_create.py
@@ -1131,7 +1131,7 @@ def create_csv_table(
If True allows schema evolution (new or missing columns), otherwise a exception will be raised.
(Only considered if dataset=True and mode in ("append", "overwrite_partitions"))
Related tutorial:
- https://aws-sdk-pandas.readthedocs.io/en/2.11.0/tutorials/014%20-%20Schema%20Evolution.html
+ https://aws-sdk-pandas.readthedocs.io/en/3.5.2/tutorials/014%20-%20Schema%20Evolution.html
sep : str
String of length 1. Field delimiter for the output file.
skip_header_line_count : Optional[int]
@@ -1317,7 +1317,7 @@ def create_json_table(
If True allows schema evolution (new or missing columns), otherwise a exception will be raised.
(Only considered if dataset=True and mode in ("append", "overwrite_partitions"))
Related tutorial:
- https://aws-sdk-pandas.readthedocs.io/en/2.11.0/tutorials/014%20-%20Schema%20Evolution.html
+ https://aws-sdk-pandas.readthedocs.io/en/3.5.2/tutorials/014%20-%20Schema%20Evolution.html
serde_library : Optional[str]
Specifies the SerDe Serialization library which will be used. You need to provide the Class library name
as a string.
diff --git a/awswrangler/opensearch/_write.py b/awswrangler/opensearch/_write.py
index 33a53c69e..b0c9bcedb 100644
--- a/awswrangler/opensearch/_write.py
+++ b/awswrangler/opensearch/_write.py
@@ -489,12 +489,13 @@ def index_documents(
use_threads: bool | int = False,
**kwargs: Any,
) -> dict[str, Any]:
- """Index all documents to OpenSearch index.
+ """
+ Index all documents to OpenSearch index.
Note
----
`max_retries`, `initial_backoff`, and `max_backoff` are not supported with parallel bulk
- (when `use_threads`is set to True).
+ (when `use_threads` is set to True).
Note
----
diff --git a/awswrangler/s3/_read_orc.py b/awswrangler/s3/_read_orc.py
index 62588b67b..7d471ff84 100644
--- a/awswrangler/s3/_read_orc.py
+++ b/awswrangler/s3/_read_orc.py
@@ -224,7 +224,7 @@ def read_orc(
must return a bool, True to read the partition or False to ignore it.
Ignored if `dataset=False`.
E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False``
- https://aws-data-wrangler.readthedocs.io/en/3.1.1/tutorials/023%20-%20Flexible%20Partitions%20Filter.html
+ https://aws-sdk-pandas.readthedocs.io/en/3.5.2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html
columns : List[str], optional
List of columns to read from the file(s).
validate_schema : bool, default False
@@ -386,7 +386,7 @@ def read_orc_table(
must return a bool, True to read the partition or False to ignore it.
Ignored if `dataset=False`.
E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False``
- https://aws-sdk-pandas.readthedocs.io/en/3.1.1/tutorials/023%20-%20Flexible%20Partitions%20Filter.html
+ https://aws-sdk-pandas.readthedocs.io/en/3.5.2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html
columns : List[str], optional
List of columns to read from the file(s).
validate_schema : bool, default False
diff --git a/awswrangler/s3/_read_parquet.py b/awswrangler/s3/_read_parquet.py
index 4e1913306..80ae1e9fe 100644
--- a/awswrangler/s3/_read_parquet.py
+++ b/awswrangler/s3/_read_parquet.py
@@ -381,7 +381,7 @@ def read_parquet(
must return a bool, True to read the partition or False to ignore it.
Ignored if `dataset=False`.
E.g ``lambda x: True if x["year"] == "2020" and x["month"] == "1" else False``
- https://aws-data-wrangler.readthedocs.io/en/3.5.0/tutorials/023%20-%20Flexible%20Partitions%20Filter.html
+ https://aws-sdk-pandas.readthedocs.io/en/3.5.2/tutorials/023%20-%20Flexible%20Partitions%20Filter.html
columns : List[str], optional
List of columns to read from the file(s).
validate_schema : bool, default False
diff --git a/awswrangler/s3/_write_orc.py b/awswrangler/s3/_write_orc.py
index 895d978cd..2e0b9a2ee 100644
--- a/awswrangler/s3/_write_orc.py
+++ b/awswrangler/s3/_write_orc.py
@@ -404,7 +404,7 @@ def to_orc(
concurrent_partitioning: bool
If True will increase the parallelism level during the partitions writing. It will decrease the
writing time and increase the memory usage.
- https://aws-sdk-pandas.readthedocs.io/en/3.1.1/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html
+ https://aws-sdk-pandas.readthedocs.io/en/3.5.2/tutorials/022%20-%20Writing%20Partitions%20Concurrently.html
mode: str, optional
``append`` (Default), ``overwrite``, ``overwrite_partitions``. Only takes effect if dataset=True.
catalog_versioning : bool
@@ -413,7 +413,7 @@ def to_orc(
If True allows schema evolution (new or missing columns), otherwise a exception will be raised. True by default.
(Only considered if dataset=True and mode in ("append", "overwrite_partitions"))
Related tutorial:
- https://aws-sdk-pandas.readthedocs.io/en/3.1.1/tutorials/014%20-%20Schema%20Evolution.html
+ https://aws-sdk-pandas.readthedocs.io/en/3.5.2/tutorials/014%20-%20Schema%20Evolution.html
database : str, optional
Glue/Athena catalog: Database name.
table : str, optional
diff --git a/tutorials/003 - Amazon S3.ipynb b/tutorials/003 - Amazon S3.ipynb
index 24b7a8433..9ee0315ed 100644
--- a/tutorials/003 - Amazon S3.ipynb
+++ b/tutorials/003 - Amazon S3.ipynb
@@ -37,7 +37,7 @@
"\t* [3.3 Reading multiple Parquet files](#3.3-Reading-multiple-Parquet-files)\n",
"\t\t* [3.3.1 Reading Parquet by list](#3.3.1-Reading-Parquet-by-list)\n",
"\t\t* [3.3.2 Reading Parquet by prefix](#3.3.2-Reading-Parquet-by-prefix)\n",
- "* [4. Fixed-width formatted files (only read)](#4.-Fixed-width-formatted-files-%28only-read%29)\n",
+ "* [4. Fixed-width formatted files (only read)](#4.-Fixed-width-formatted-files-(only-read))\n",
"\t* [4.1 Reading single FWF file](#4.1-Reading-single-FWF-file)\n",
"\t* [4.2 Reading multiple FWF files](#4.2-Reading-multiple-FWF-files)\n",
"\t\t* [4.2.1 Reading FWF by list](#4.2.1-Reading-FWF-by-list)\n",
@@ -55,7 +55,7 @@
"* [8. Upload Objects](#8.-Upload-objects)\n",
" * [8.1 Upload object from a file path](#8.1-Upload-object-from-a-file-path)\n",
" * [8.2 Upload object from a file-like object in binary mode](#8.2-Upload-object-from-a-file-like-object-in-binary-mode)\n",
- "* [9. Delete objects](#7.-Delete-objects)\n"
+ "* [9. Delete objects](#9.-Delete-objects)\n"
]
},
{
@@ -136,8 +136,49 @@
"outputs": [
{
"data": {
- "text/html": "
\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo"
+ ]
},
"execution_count": 4,
"metadata": {},
@@ -169,8 +210,55 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n 2 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo\n2 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo\n",
+ "2 3 bar"
+ ]
},
"execution_count": 5,
"metadata": {},
@@ -195,8 +283,55 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n 2 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo\n2 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo\n",
+ "2 3 bar"
+ ]
},
"execution_count": 6,
"metadata": {},
@@ -228,7 +363,9 @@
"outputs": [
{
"data": {
- "text/plain": "['s3://woodadw-test/json/file2.json']"
+ "text/plain": [
+ "['s3://woodadw-test/json/file2.json']"
+ ]
},
"execution_count": 7,
"metadata": {},
@@ -257,8 +394,49 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo"
+ ]
},
"execution_count": 8,
"metadata": {},
@@ -290,8 +468,55 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n 0 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo\n0 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo\n",
+ "0 3 bar"
+ ]
},
"execution_count": 9,
"metadata": {},
@@ -316,8 +541,55 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n 0 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo\n0 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ " 0 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo\n",
+ "0 3 bar"
+ ]
},
"execution_count": 10,
"metadata": {},
@@ -376,8 +648,49 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo"
+ ]
},
"execution_count": 12,
"metadata": {},
@@ -409,8 +722,55 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n 2 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo\n2 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo\n",
+ "2 3 bar"
+ ]
},
"execution_count": 13,
"metadata": {},
@@ -435,8 +795,55 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n 2 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo\n2 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo\n",
+ "2 3 bar"
+ ]
},
"execution_count": 14,
"metadata": {},
@@ -491,8 +898,59 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n date | \n
\n \n \n \n 0 | \n 1 | \n Herfelingen | \n 27-12-18 | \n
\n \n 1 | \n 2 | \n Lambusart | \n 14-06-18 | \n
\n \n 2 | \n 3 | \n Spormaggiore | \n 15-04-18 | \n
\n \n
\n
",
- "text/plain": " id name date\n0 1 Herfelingen 27-12-18\n1 2 Lambusart 14-06-18\n2 3 Spormaggiore 15-04-18"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ " date | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " Herfelingen | \n",
+ " 27-12-18 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " Lambusart | \n",
+ " 14-06-18 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " Spormaggiore | \n",
+ " 15-04-18 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name date\n",
+ "0 1 Herfelingen 27-12-18\n",
+ "1 2 Lambusart 14-06-18\n",
+ "2 3 Spormaggiore 15-04-18"
+ ]
},
"execution_count": 16,
"metadata": {},
@@ -524,8 +982,73 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n date | \n
\n \n \n \n 0 | \n 1 | \n Herfelingen | \n 27-12-18 | \n
\n \n 1 | \n 2 | \n Lambusart | \n 14-06-18 | \n
\n \n 2 | \n 3 | \n Spormaggiore | \n 15-04-18 | \n
\n \n 3 | \n 4 | \n Buizingen | \n 05-09-19 | \n
\n \n 4 | \n 5 | \n San Rafael | \n 04-09-19 | \n
\n \n
\n
",
- "text/plain": " id name date\n0 1 Herfelingen 27-12-18\n1 2 Lambusart 14-06-18\n2 3 Spormaggiore 15-04-18\n3 4 Buizingen 05-09-19\n4 5 San Rafael 04-09-19"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ " date | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " Herfelingen | \n",
+ " 27-12-18 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " Lambusart | \n",
+ " 14-06-18 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " Spormaggiore | \n",
+ " 15-04-18 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 4 | \n",
+ " Buizingen | \n",
+ " 05-09-19 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 5 | \n",
+ " San Rafael | \n",
+ " 04-09-19 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name date\n",
+ "0 1 Herfelingen 27-12-18\n",
+ "1 2 Lambusart 14-06-18\n",
+ "2 3 Spormaggiore 15-04-18\n",
+ "3 4 Buizingen 05-09-19\n",
+ "4 5 San Rafael 04-09-19"
+ ]
},
"execution_count": 17,
"metadata": {},
@@ -550,8 +1073,73 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n date | \n
\n \n \n \n 0 | \n 1 | \n Herfelingen | \n 27-12-18 | \n
\n \n 1 | \n 2 | \n Lambusart | \n 14-06-18 | \n
\n \n 2 | \n 3 | \n Spormaggiore | \n 15-04-18 | \n
\n \n 3 | \n 4 | \n Buizingen | \n 05-09-19 | \n
\n \n 4 | \n 5 | \n San Rafael | \n 04-09-19 | \n
\n \n
\n
",
- "text/plain": " id name date\n0 1 Herfelingen 27-12-18\n1 2 Lambusart 14-06-18\n2 3 Spormaggiore 15-04-18\n3 4 Buizingen 05-09-19\n4 5 San Rafael 04-09-19"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ " date | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " Herfelingen | \n",
+ " 27-12-18 | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " Lambusart | \n",
+ " 14-06-18 | \n",
+ "
\n",
+ " \n",
+ " 2 | \n",
+ " 3 | \n",
+ " Spormaggiore | \n",
+ " 15-04-18 | \n",
+ "
\n",
+ " \n",
+ " 3 | \n",
+ " 4 | \n",
+ " Buizingen | \n",
+ " 05-09-19 | \n",
+ "
\n",
+ " \n",
+ " 4 | \n",
+ " 5 | \n",
+ " San Rafael | \n",
+ " 04-09-19 | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name date\n",
+ "0 1 Herfelingen 27-12-18\n",
+ "1 2 Lambusart 14-06-18\n",
+ "2 3 Spormaggiore 15-04-18\n",
+ "3 4 Buizingen 05-09-19\n",
+ "4 5 San Rafael 04-09-19"
+ ]
},
"execution_count": 18,
"metadata": {},
@@ -583,7 +1171,9 @@
"outputs": [
{
"data": {
- "text/plain": "'s3://woodadw-test/file0.xlsx'"
+ "text/plain": [
+ "'s3://woodadw-test/file0.xlsx'"
+ ]
},
"execution_count": 19,
"metadata": {},
@@ -610,8 +1200,49 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo"
+ ]
},
"execution_count": 20,
"metadata": {},
@@ -768,8 +1399,49 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo"
+ ]
},
"execution_count": 25,
"metadata": {},
@@ -812,8 +1484,43 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 3 bar"
+ ]
},
"execution_count": 26,
"metadata": {},
@@ -877,8 +1584,49 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 1 | \n foo | \n
\n \n 1 | \n 2 | \n boo | \n
\n \n
\n
",
- "text/plain": " id name\n0 1 foo\n1 2 boo"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 1 | \n",
+ " foo | \n",
+ "
\n",
+ " \n",
+ " 1 | \n",
+ " 2 | \n",
+ " boo | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 1 foo\n",
+ "1 2 boo"
+ ]
},
"execution_count": 27,
"metadata": {},
@@ -918,8 +1666,43 @@
"outputs": [
{
"data": {
- "text/html": "\n\n
\n \n \n | \n id | \n name | \n
\n \n \n \n 0 | \n 3 | \n bar | \n
\n \n
\n
",
- "text/plain": " id name\n0 3 bar"
+ "text/html": [
+ "\n",
+ "\n",
+ "
\n",
+ " \n",
+ " \n",
+ " | \n",
+ " id | \n",
+ " name | \n",
+ "
\n",
+ " \n",
+ " \n",
+ " \n",
+ " 0 | \n",
+ " 3 | \n",
+ " bar | \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ " id name\n",
+ "0 3 bar"
+ ]
},
"execution_count": 28,
"metadata": {},
diff --git a/tutorials/030 - Data Api.ipynb b/tutorials/030 - Data Api.ipynb
index 33e7144f2..ec1a6a0e5 100644
--- a/tutorials/030 - Data Api.ipynb
+++ b/tutorials/030 - Data Api.ipynb
@@ -26,8 +26,8 @@
"metadata": {},
"source": [
"## Connect to the cluster\n",
- "- [wr.data_api.redshift.connect()](https://aws-sdk-pandas.readthedocs.io/en/2.11.0/stubs/awswrangler.data_api.redshift.connect.html)\n",
- "- [wr.data_api.rds.connect()](https://aws-sdk-pandas.readthedocs.io/en/2.11.0/stubs/awswrangler.data_api.rds.connect.html)"
+ "- [wr.data_api.redshift.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.5.2/stubs/awswrangler.data_api.redshift.connect.html)\n",
+ "- [wr.data_api.rds.connect()](https://aws-sdk-pandas.readthedocs.io/en/3.5.2/stubs/awswrangler.data_api.rds.connect.html)"
]
},
{
@@ -60,8 +60,8 @@
"metadata": {},
"source": [
"## Read from database\n",
- "- [wr.data_api.redshift.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/2.11.0/stubs/awswrangler.data_api.redshift.read_sql_query.html)\n",
- "- [wr.data_api.rds.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/2.11.0/stubs/awswrangler.data_api.rds.read_sql_query.html)"
+ "- [wr.data_api.redshift.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.5.2/stubs/awswrangler.data_api.redshift.read_sql_query.html)\n",
+ "- [wr.data_api.rds.read_sql_query()](https://aws-sdk-pandas.readthedocs.io/en/3.5.2/stubs/awswrangler.data_api.rds.read_sql_query.html)"
]
},
{
diff --git a/tutorials/031 - OpenSearch.ipynb b/tutorials/031 - OpenSearch.ipynb
index 42c725c32..5f1d1ece1 100644
--- a/tutorials/031 - OpenSearch.ipynb
+++ b/tutorials/031 - OpenSearch.ipynb
@@ -19,30 +19,30 @@
"metadata": {},
"source": [
"## Table of Contents\n",
- "* [1. Initialize](#initialize)\n",
- " * [Connect to your Amazon OpenSearch domain](#connect)\n",
- " * [Enter your bucket name](#bucket)\n",
- " * [Initialize sample data](#sample-data)\n",
- "* [2. Indexing (load)](#indexing)\n",
- "\t* [Index documents (no Pandas)](#index-documents)\n",
- "\t* [Index json file](#index-json)\n",
+ "* [1. Initialize](#1.-Initialize)\n",
+ " * [Connect to your Amazon OpenSearch domain](#Connect-to-your-Amazon-OpenSearch-domain)\n",
+ " * [Enter your bucket name](#enter-your-bucket-name)\n",
+ " * [Initialize sample data](#initialize-sample-data)\n",
+ "* [2. Indexing (load)](#2.-Indexing-(load))\n",
+ "\t* [Index documents (no Pandas)](#index-documents-(no-pandas))\n",
+ "\t* [Index json file](#index-json-file)\n",
" * [Index CSV](#index-csv)\n",
- "* [3. Search](#search)\n",
- "\t* [3.1 Search by DSL](#search-dsl)\n",
- "\t* [3.2 Search by SQL](#search-sql)\n",
- "* [4. Delete Indices](#delete-index)\n",
- "* [5. Bonus - Prepare data and index from DataFrame](#bonus)\n",
- "\t* [Prepare the data for indexing](#prepare-data)\n",
- " * [Create index with mapping](#create-index-w-mapping)\n",
- " * [Index dataframe](#index-df)\n",
- " * [Execute geo query](#search-geo)\n"
+ "* [3. Search](#3.-Search)\n",
+ "\t* [Search by DSL](#search-by-dsl)\n",
+ "\t* [Search by SQL](#search-by-sql)\n",
+ "* [4. Delete Indices](#4.-Delete-Indices)\n",
+ "* [5. Bonus - Prepare data and index from DataFrame](#5.-Bonus---Prepare-data-and-index-from-DataFrame)\n",
+ "\t* [Prepare the data for indexing](#prepare-the-data-for-indexing)\n",
+ " * [Create index with mapping](#create-index-with-mapping)\n",
+ " * [Index dataframe](#index-dataframe)\n",
+ " * [Execute geo query](#execute-geo-query)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 1. Initialize"
+ "## 1. Initialize"
]
},
{
@@ -68,7 +68,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Connect to your Amazon OpenSearch domain"
+ "### Connect to your Amazon OpenSearch domain"
]
},
{
@@ -89,7 +89,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Enter your bucket name"
+ "### Enter your bucket name"
]
},
{
@@ -105,7 +105,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Initialize sample data"
+ "### Initialize sample data"
]
},
{
@@ -215,14 +215,14 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 2. Indexing (load)"
+ "## 2. Indexing (load)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Index documents (no Pandas)"
+ "### Index documents (no Pandas)"
]
},
{
@@ -532,7 +532,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Index CSV"
+ "### Index CSV"
]
},
{
@@ -800,7 +800,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 3. Search\n",
+ "## 3. Search\n",
"Search results are returned as Pandas DataFrame"
]
},
@@ -808,7 +808,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### 3.1 Search by DSL"
+ "### Search by DSL"
]
},
{
@@ -915,7 +915,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### 3.1 Search by SQL"
+ "### Search by SQL"
]
},
{
@@ -1015,7 +1015,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 4. Delete Indices"
+ "## 4. Delete Indices"
]
},
{
@@ -1049,7 +1049,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 5. Bonus - Prepare data and index from DataFrame"
+ "## 5. Bonus - Prepare data and index from DataFrame"
]
},
{
@@ -1081,7 +1081,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Prepare the data for indexing"
+ "### Prepare the data for indexing"
]
},
{
@@ -1103,7 +1103,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Create index with mapping"
+ "### Create index with mapping"
]
},
{
@@ -1145,7 +1145,7 @@
"metadata": {},
"source": [
"\n",
- "### Index dataframe"
+ "### Index dataframe"
]
},
{
@@ -1179,7 +1179,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### Execute geo query\n",
+ "### Execute geo query\n",
"#### Sort restaurants by distance from Times-Square"
]
},
diff --git a/tutorials/032 - Lake Formation Governed Tables.ipynb b/tutorials/032 - Lake Formation Governed Tables.ipynb
index 4bba071f7..fe32604c2 100644
--- a/tutorials/032 - Lake Formation Governed Tables.ipynb
+++ b/tutorials/032 - Lake Formation Governed Tables.ipynb
@@ -32,7 +32,7 @@
" * [1.1.2 Read within query as of time](#1.1.2-Read-within-query-as-of-time)\n",
" * [1.2 Read full table](#1.2-Read-full-table)\n",
"* [2. Write Governed table](#2.-Write-Governed-table)\n",
- " * [2.1 Create new Governed table](#2.1-Create-new-Governed-table)\n",
+ " * [2.1 Create new Governed table](#2.1-Create-a-new-Governed-table)\n",
" * [2.1.1 CSV table](#2.1.1-CSV-table)\n",
" * [2.1.2 Parquet table](#2.1.2-Parquet-table)\n",
" * [2.2 Overwrite operations](#2.2-Overwrite-operations)\n",
@@ -40,7 +40,7 @@
" * [2.2.2 Append](#2.2.2-Append)\n",
" * [2.2.3 Create partitioned Governed table](#2.2.3-Create-partitioned-Governed-table)\n",
" * [2.2.4 Overwrite partitions](#2.2.4-Overwrite-partitions)\n",
- "* [3. Multiple read/write operations within a transaction](#2.-Multiple-read/write-operations-within-a-transaction)"
+ "* [3. Multiple read/write operations within a transaction](#3.-multiple-read/write-operations-within-a-transaction)"
]
},
{
@@ -422,7 +422,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.13"
+ "version": "3.11.7"
},
"vscode": {
"interpreter": {