diff --git a/examples/jupyter_notebook_magic/example.ipynb b/examples/jupyter_notebook_magic/example.ipynb index ab237735d..9c66077d5 100644 --- a/examples/jupyter_notebook_magic/example.ipynb +++ b/examples/jupyter_notebook_magic/example.ipynb @@ -2,10 +2,19 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "e78e2bc2", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "/home/tjean/projects/hamilton/.venv/bin/python: No module named pip\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], "source": [ "# Execute this cell to install dependencies\n", "%pip install sf-hamilton[visualization]" @@ -52,11 +61,15 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "e3b8a109", "metadata": {}, "outputs": [], "source": [ + "# disable plugin autoloading for faster notebook start time\n", + "from hamilton import registry\n", + "registry.disable_autoload()\n", + "\n", "%reload_ext hamilton.plugins.jupyter_magic\n", "from hamilton import driver # we'll need this later" ] @@ -75,7 +88,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "2a3d64f3", "metadata": {}, "outputs": [ @@ -86,10 +99,10 @@ "\u001b[0;31mDocstring:\u001b[0m\n", "::\n", "\n", - " %cell_to_module [-m [MODULE_NAME]] [-d [DISPLAY]] [-x [EXECUTE]]\n", - " [-b BUILDER] [-c CONFIG] [-i INPUTS] [-o OVERRIDES]\n", - " [--hide_results] [-w [WRITE_TO_FILE]]\n", - " [module_name]\n", + " %cell_to_module [-m [MODULE_NAME]] [-d [DISPLAY]] [--display_cache]\n", + " [-x [EXECUTE]] [-b BUILDER] [-c CONFIG] [-i INPUTS]\n", + " [-o OVERRIDES] [--show_results] [-w [WRITE_TO_FILE]]\n", + " [name]\n", "\n", "Turn a notebook cell into a Hamilton module definition. This allows you to define\n", "and execute a dataflow from a single cell.\n", @@ -105,7 +118,7 @@ "```\n", "\n", "positional arguments:\n", - " module_name Name for the module defined in this cell.\n", + " name Name for the module defined in this cell.\n", "\n", "options:\n", " -m <[MODULE_NAME]>, --module_name <[MODULE_NAME]>\n", @@ -114,6 +127,8 @@ " -d <[DISPLAY]>, --display <[DISPLAY]>\n", " Display the dataflow. The argument is the variable\n", " name of a dictionary of visualization kwargs; else {}.\n", + " --display_cache After execution, display the retrieved results. This\n", + " uses `dr.cache.view_run()`.\n", " -x <[EXECUTE]>, --execute <[EXECUTE]>\n", " Execute the dataflow. The argument is the variable\n", " name of a list of nodes; else execute all nodes.\n", @@ -130,11 +145,12 @@ " -o OVERRIDES, --overrides OVERRIDES\n", " Execution overrides. The argument is the variable name\n", " of a dict of overrides; else {}.\n", - " --hide_results Hides the automatic display of execution results.\n", + " --show_results Print node values in the output cell after each node\n", + " is executed.\n", " -w <[WRITE_TO_FILE]>, --write_to_file <[WRITE_TO_FILE]>\n", " Write cell content to a file. The argument is the file\n", " path; else write to {module_name}.py\n", - "\u001b[0;31mFile:\u001b[0m ~/projects/dagworks/hamilton/hamilton/plugins/jupyter_magic.py" + "\u001b[0;31mFile:\u001b[0m ~/projects/hamilton/hamilton/plugins/jupyter_magic.py" ] } ], @@ -161,7 +177,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "ef73893f", "metadata": {}, "outputs": [ @@ -220,7 +236,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -243,7 +259,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "4b20538d", "metadata": {}, "outputs": [ @@ -272,7 +288,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "1117471d", "metadata": {}, "outputs": [], @@ -295,7 +311,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "19a7a66e", "metadata": {}, "outputs": [ @@ -354,7 +370,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -369,7 +385,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "4efeda39", "metadata": {}, "outputs": [], @@ -379,7 +395,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "id": "0fc841a4", "metadata": {}, "outputs": [ @@ -438,7 +454,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -466,7 +482,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "a6f6052a", "metadata": {}, "outputs": [], @@ -478,7 +494,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "id": "91875c91", "metadata": {}, "outputs": [ @@ -488,7 +504,7 @@ "\"Knock, knock. Who's there? Cowsays\"" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -515,7 +531,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "id": "b6f00cdb", "metadata": {}, "outputs": [ @@ -591,7 +607,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -609,7 +625,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "id": "758b3fbe", "metadata": {}, "outputs": [ @@ -685,7 +701,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -703,7 +719,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "id": "073225f8", "metadata": {}, "outputs": [], @@ -713,7 +729,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "id": "311f0bad", "metadata": {}, "outputs": [ @@ -789,7 +805,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -825,7 +841,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "id": "f8fc4d76", "metadata": {}, "outputs": [], @@ -839,7 +855,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 17, "id": "3b924480", "metadata": {}, "outputs": [ @@ -871,29 +887,15 @@ "knock_joke\n", "true\n", "\n", - "\n", - "\n", - "topic\n", - "\n", - "\n", - "topic\n", - "Parallelizable\n", - "\n", "\n", - "\n", + "\n", "joke_prompt\n", "\n", "joke_prompt: knock_joke\n", "str\n", "\n", - "\n", - "\n", - "topic->joke_prompt\n", - "\n", - "\n", - "\n", "\n", - "\n", + "\n", "joke_prompt_collection\n", "\n", "\n", @@ -901,12 +903,26 @@ "list\n", "\n", "\n", - "\n", + "\n", "joke_prompt->joke_prompt_collection\n", "\n", "\n", "\n", "\n", + "\n", + "\n", + "topic\n", + "\n", + "\n", + "topic\n", + "Parallelizable\n", + "\n", + "\n", + "\n", + "topic->joke_prompt\n", + "\n", + "\n", + "\n", "\n", "\n", "config\n", @@ -939,7 +955,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -974,7 +990,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 18, "id": "630ff804", "metadata": {}, "outputs": [], @@ -992,7 +1008,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 19, "id": "66dccada", "metadata": {}, "outputs": [ @@ -1005,73 +1021,96 @@ "\n", "\n", - "\n", - "\n", + "\n", + "\n", "%3\n", - "\n", + "\n", "\n", "cluster__legend\n", - "\n", - "Legend\n", + "\n", + "Legend\n", "\n", - "\n", + "\n", "\n", - "reply\n", - "\n", - "reply\n", - "str\n", - "\n", - "\n", - "\n", - "punchline\n", - "\n", - "punchline\n", - "str\n", - "\n", - "\n", - "\n", - "reply->punchline\n", - "\n", - "\n", + "topic\n", + "\n", + "\n", + "topic\n", + "Parallelizable\n", "\n", "\n", "\n", "joke_response\n", - "\n", - "joke_response\n", - "str\n", + "\n", + "joke_response\n", + "str\n", "\n", - "\n", + "\n", "\n", - "joke_prompt\n", - "\n", - "joke_prompt\n", - "str\n", + "joke_prompt_collection\n", + "\n", + "\n", + "joke_prompt_collection\n", + "list\n", "\n", - "\n", + "\n", + "\n", + "_joke_response_inputs\n", + "\n", + "joke_prompt\n", + "str\n", + "\n", + "\n", "\n", - "joke_prompt->reply\n", - "\n", - "\n", + "_joke_response_inputs->joke_response\n", + "\n", + "\n", "\n", - "\n", + "\n", + "\n", + "_joke_prompt_collection_inputs\n", + "\n", + "joke_prompt\n", + "str\n", + "\n", + "\n", "\n", - "joke_prompt->joke_response\n", - "\n", - "\n", + "_joke_prompt_collection_inputs->joke_prompt_collection\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "input\n", + "\n", + "input\n", "\n", "\n", - "\n", + "\n", "function\n", - "\n", - "function\n", + "\n", + "function\n", + "\n", + "\n", + "\n", + "expand\n", + "\n", + "\n", + "expand\n", + "\n", + "\n", + "\n", + "collect\n", + "\n", + "\n", + "collect\n", "\n", "\n", "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -1097,7 +1136,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 20, "id": "8bcd9ef3", "metadata": {}, "outputs": [ @@ -1156,6 +1195,18 @@ "%module_to_cell ./joke.py" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "873793f0", + "metadata": {}, + "outputs": [], + "source": [ + "%%cell_to_module joke\n", + "def joke_prompt(topic: str) -> str:\n", + " return f\"Knock, knock. Who's there? {topic}\"\n" + ] + }, { "cell_type": "markdown", "id": "9f9aec0a", @@ -1177,17 +1228,17 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 21, "id": "f704fe3f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'joke_prompt': \"Knock, knock. Who's there? Cowsay\"}" + "{'joke_prompt': None}" ] }, - "execution_count": 22, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -1211,7 +1262,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 22, "id": "7eebf289", "metadata": {}, "outputs": [ @@ -1234,45 +1285,45 @@ "\n", "Legend\n", "\n", - "\n", + "\n", "\n", - "a_dataframe\n", - "\n", - "a_dataframe\n", - "DataFrame\n", + "joke_prompt\n", + "\n", + "joke_prompt\n", + "str\n", "\n", "\n", "\n", "reply\n", - "\n", - "reply\n", - "str\n", + "\n", + "reply\n", + "str\n", + "\n", + "\n", + "\n", + "joke_prompt->reply\n", + "\n", + "\n", "\n", "\n", "\n", "punchline\n", - "\n", - "punchline\n", - "str\n", + "\n", + "punchline\n", + "str\n", "\n", "\n", "\n", "reply->punchline\n", - "\n", - "\n", + "\n", + "\n", "\n", - "\n", + "\n", "\n", - "joke_prompt\n", - "\n", - "joke_prompt\n", - "str\n", - "\n", - "\n", - "\n", - "joke_prompt->reply\n", - "\n", - "\n", + "a_dataframe\n", + "\n", + "a_dataframe\n", + "DataFrame\n", "\n", "\n", "\n", @@ -1290,95 +1341,7 @@ "\n" ], "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
ab
00a
11b
22c
33d
\n", - "
" - ], - "text/plain": [ - " a b\n", - "0 0 a\n", - "1 1 b\n", - "2 2 c\n", - "3 3 d" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "\"Knock, knock. Who's there? Cowsay\"" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'Cowsay who?'" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'No, Cowsay MooOOooo'" + "" ] }, "metadata": {}, @@ -1406,10 +1369,43 @@ }, { "cell_type": "markdown", - "id": "c99854dd", + "id": "55b030df", "metadata": {}, "source": [ - "👆 As you see, node results are automatically displayed in topologically sorted order. You can hide them with `--hide_results`." + "Adding the flag `--show_results` will print node results to the cell output after each execution." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "ac13d20a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "KeyError: Received `--inputs docs` but variable not found.\n" + ] + } + ], + "source": [ + "%%cell_to_module joke --execute --inputs docs\n", + "import pandas as pd\n", + "\n", + "def joke_prompt() -> str:\n", + " return f\"Knock, knock. Who's there? Cowsay\"\n", + "\n", + "def reply(joke_prompt: str) -> str:\n", + " _, _, right = joke_prompt.partition(\"? \")\n", + " return f\"{right} who?\"\n", + "\n", + "def punchline(reply: str) -> str:\n", + " left, _, _ = reply.partition(\" \")\n", + " return f\"No, {left} MooOOooo\"\n", + "\n", + "def a_dataframe() -> pd.DataFrame:\n", + " return pd.DataFrame({\"a\": [0, 1, 2, 3], \"b\": [\"a\", \"b\", \"c\", \"d\"]})" ] }, { @@ -1456,39 +1452,39 @@ "\n", "Legend\n", "
\n", - "\n", - "\n", - "topic\n", - "\n", - "topic\n", - "str\n", - "\n", "\n", - "\n", + "\n", "joke_prompt\n", "\n", "joke_prompt\n", "str\n", "\n", - "\n", - "\n", - "topic->joke_prompt\n", - "\n", - "\n", - "\n", "\n", - "\n", + "\n", "reply\n", "\n", "reply\n", "str\n", "\n", "\n", - "\n", + "\n", "joke_prompt->reply\n", "\n", "\n", "\n", + "\n", + "\n", + "topic\n", + "\n", + "topic\n", + "str\n", + "\n", + "\n", + "\n", + "topic->joke_prompt\n", + "\n", + "\n", + "\n", "\n", "\n", "function\n", @@ -1505,16 +1501,7 @@ "\n" ], "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'Cowsay who?'" + "" ] }, "metadata": {}, @@ -1553,6 +1540,13 @@ "id": "7ce116fc", "metadata": {}, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "DeprecationWarning: `--hide_results` is no longer required when using `--execute`. Now, results are hidden by default. Use `--show_results` if you want them automatically printed to cell output.\n" + ] + }, { "data": { "image/svg+xml": [ @@ -1572,25 +1566,25 @@ "\n", "Legend\n", "\n", - "\n", + "\n", "\n", + "joke_prompt\n", + "\n", + "joke_prompt\n", + "str\n", + "\n", + "\n", + "\n", "reply\n", "\n", "reply\n", "str\n", "\n", - "\n", - "\n", - "punchline\n", - "\n", - "punchline\n", - "str\n", - "\n", - "\n", - "\n", - "reply->punchline\n", - "\n", - "\n", + "\n", + "\n", + "joke_prompt->reply\n", + "\n", + "\n", "\n", "\n", "\n", @@ -1599,24 +1593,24 @@ "topic\n", "str\n", "\n", - "\n", - "\n", - "joke_prompt\n", - "\n", - "joke_prompt\n", - "str\n", - "\n", "\n", - "\n", + "\n", "topic->joke_prompt\n", "\n", "\n", "\n", - "\n", - "\n", - "joke_prompt->reply\n", - "\n", - "\n", + "\n", + "\n", + "punchline\n", + "\n", + "punchline\n", + "str\n", + "\n", + "\n", + "\n", + "reply->punchline\n", + "\n", + "\n", "\n", "\n", "\n", @@ -1634,7 +1628,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -1771,7 +1765,7 @@ "str\n", "\n", "\n", - "\n", + "\n", "punchline\n", "\n", "\n", @@ -1782,13 +1776,13 @@ "str\n", "\n", "\n", - "\n", + "\n", "reply->punchline\n", "\n", "\n", "\n", "\n", - "\n", + "\n", "joke_prompt\n", "\n", "joke_prompt\n", @@ -1808,7 +1802,7 @@ "str\n", "\n", "\n", - "\n", + "\n", "_joke_prompt_inputs->joke_prompt\n", "\n", "\n", @@ -1845,43 +1839,7 @@ "\n" ], "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'monday'" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "\"Knock, knock. Who's there? monday\"" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'monday who?'" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'Bingo bongo!'" + "" ] }, "metadata": {}, @@ -1955,13 +1913,26 @@ "\n", "Legend\n", "\n", - "\n", + "\n", "\n", + "joke_prompt\n", + "\n", + "joke_prompt\n", + "str\n", + "\n", + "\n", + "\n", "reply\n", "\n", "reply\n", "str\n", "\n", + "\n", + "\n", + "joke_prompt->reply\n", + "\n", + "\n", + "\n", "\n", "\n", "punchline\n", @@ -1975,19 +1946,6 @@ "\n", "\n", "\n", - "\n", - "\n", - "joke_prompt\n", - "\n", - "joke_prompt\n", - "str\n", - "\n", - "\n", - "\n", - "joke_prompt->reply\n", - "\n", - "\n", - "\n", "\n", "\n", "function\n", @@ -2004,7 +1962,7 @@ "\n" ], "text/plain": [ - "" + "" ] }, "metadata": {}, @@ -2015,39 +1973,12 @@ "output_type": "stream", "text": [ "Executing node: joke_prompt.\n", - "Finished debugging node: joke_prompt in 53.4μs. Status: Success.\n", + "Finished debugging node: joke_prompt in 80.3μs. Status: Success.\n", "Executing node: reply.\n", - "Finished debugging node: reply in 10.5μs. Status: Success.\n", + "Finished debugging node: reply in 44.8μs. Status: Success.\n", "Executing node: punchline.\n", - "Finished debugging node: punchline in 9.78μs. Status: Success.\n" + "Finished debugging node: punchline in 42.7μs. Status: Success.\n" ] - }, - { - "data": { - "text/plain": [ - "\"Knock, knock. Who's there? Cowsay\"" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'Cowsay who?'" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'No, Cowsay MooOOooo'" - ] - }, - "metadata": {}, - "output_type": "display_data" } ], "source": [ @@ -2071,19 +2002,133 @@ "source": [ "There are ton of awesome adapters that can help you with your notebook experience. Here are a few notable mentions:\n", "\n", - "1. `hamilton.lifecycle.default.CacheAdapter()` will automatically version the node's code and input values and store its result on disk. When running the same node (code, inputs) pair, it will read the value from disk instead of recomputing. This can help save LLM API costs!\n", - "2. `hamilton.plugins.h_diskcache.DiskCacheAdapter()` same core features as `CacheAdapter()`, but more utilities around cache management\n", - "3. `hamilton.lifecycle.default.PrintLn()` print execution status.\n", - "4. `hamilton.plugins.h_tqdm.ProgressBar()` add a progress bar for execution.\n", - "5. `hamilton.lifecycle.default.PDBDebugger()` allows you to step into a node with a Python debugger, allowing you to execute code line by line.\n", + "1. `hamilton.lifecycle.default.PrintLn()` print execution status (this is similar to the `--show_results` flag).\n", + "2. `hamilton.plugins.h_tqdm.ProgressBar()` add a progress bar for execution.\n", + "3. `hamilton.lifecycle.default.PDBDebugger()` allows you to step into a node with a Python debugger, allowing you to execute code line by line.\n", "\n", "Note that all of these adapters work with Hamilton outside notebooks too!" ] + }, + { + "cell_type": "markdown", + "id": "80779b60", + "metadata": {}, + "source": [ + "## 3.6 Caching\n", + "The next builder include the `.with_cache()` clause. This will store results on disks. Adding the `--display_cache` flag will print a visualization of what results were retrieved from cache once execution is completed.\n", + "\n", + "To view it, execute the cell twice." + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "id": "2a43046b", + "metadata": {}, + "outputs": [], + "source": [ + "builder_with_cache = driver.Builder().with_cache()" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "id": "0ec9d3bf", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "%3\n", + "\n", + "\n", + "cluster__legend\n", + "\n", + "Legend\n", + "\n", + "\n", + "\n", + "joke_prompt\n", + "\n", + "joke_prompt\n", + "str\n", + "\n", + "\n", + "\n", + "reply\n", + "\n", + "reply\n", + "str\n", + "\n", + "\n", + "\n", + "joke_prompt->reply\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "punchline\n", + "\n", + "punchline\n", + "str\n", + "\n", + "\n", + "\n", + "reply->punchline\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "output\n", + "\n", + "output\n", + "\n", + "\n", + "\n", + "from cache\n", + "\n", + "from cache\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "%%cell_to_module joke --builder builder_with_cache --execute --display_cache\n", + "def joke_prompt() -> str:\n", + " return f\"Knock, knock. Who's there? Cowsay\"\n", + "\n", + "def reply(joke_prompt: str) -> str:\n", + " _, _, right = joke_prompt.partition(\"? \")\n", + " return f\"{right} who?\"\n", + "\n", + "def punchline(reply: str) -> str:\n", + " left, _, _ = reply.partition(\" \")\n", + " return f\"No, {left} MooOOooo\"" + ] } ], "metadata": { "kernelspec": { - "display_name": "venv", + "display_name": ".venv", "language": "python", "name": "python3" }, @@ -2097,7 +2142,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.1" + "version": "3.11.9" } }, "nbformat": 4, diff --git a/hamilton/plugins/jupyter_magic.py b/hamilton/plugins/jupyter_magic.py index 675b17365..3c865d57f 100644 --- a/hamilton/plugins/jupyter_magic.py +++ b/hamilton/plugins/jupyter_magic.py @@ -184,6 +184,11 @@ def resolve_unknown_args_cell_to_module(self, unknown: List[str]): "DeprecationWarning: -v/--verbose no long does anything and will be removed in future releases." ) + if any(arg == "--hide_results" for arg in unknown): + print( + "DeprecationWarning: `--hide_results` is no longer required when using `--execute`. Now, results are hidden by default. Use `--show_results` if you want them automatically printed to cell output." + ) + # there for backwards compatibility. Equivalent to calling `%%cell_to_module?` # not included as @argument because it's not really a function arg to %%cell_to_module if any(arg in ("-h", "--help") for arg in unknown): @@ -227,6 +232,11 @@ def resolve_config_arg(self, config_arg) -> Union[bool, dict]: const=True, help="Display the dataflow. The argument is the variable name of a dictionary of visualization kwargs; else {}.", ) + @argument( + "--display_cache", + action="store_true", + help="After execution, display the retrieved results. This uses `dr.cache.view_run()`.", + ) @argument( "-x", "--execute", @@ -255,9 +265,9 @@ def resolve_config_arg(self, config_arg) -> Union[bool, dict]: help="Execution overrides. The argument is the variable name of a dict of overrides; else {}.", ) @argument( - "--hide_results", + "--show_results", action="store_true", - help="Hides the automatic display of execution results.", + help="Print node values in the output cell after each node is executed.", ) @argument( "-w", @@ -304,7 +314,8 @@ def B(A: int) -> bool: # main case: exit if variable is not in user namespace if value and self.shell.user_ns.get(value) is None: - return f"KeyError: Received `--{name} {value}` but variable not found." + print(f"KeyError: Received `--{name} {value}` but variable not found.") + return # parse config; exit if config is invalid config = self.resolve_config_arg(args.config) @@ -330,7 +341,10 @@ def B(A: int) -> bool: # determine the Driver config # can't check from args.builder because it might be None if config and base_builder.config: - return "AssertionError: Received a config -c/--config and a Builder -b/--builder with an existing config. Pass either one." + print( + "AssertionError: Received a config -c/--config and a Builder -b/--builder with an existing config. Pass either one." + ) + return # Decision: write to file before trying to build and execute Driver # See argument `help` for behavior details @@ -395,10 +409,16 @@ def B(A: int) -> bool: results = {_normalize_result_names(name): value for name, value in results.items()} self.shell.push(results) - if args.hide_results: - return - # results will follow the order of `final_vars` or topologically sorted if all vars - display(*(results[n] for n in final_vars)) + if args.show_results: + # results will follow the order of `final_vars` or topologically sorted if all vars + display(*(results[n] for n in final_vars)) + + if args.display_cache: + dot = dr.cache.view_run() + if self.notebook_env == "databricks": + display_in_databricks(dot) + else: + display(dot) # TODO unify the API and logic of `%%cell_to_module` and `%%incr_cell_to_module` @magic_arguments() @@ -544,7 +564,7 @@ def reset_module(self, line): print(f"KeyError: `{args.module_name}` not found.") @magic_arguments() - @argument("name", type=str, help="Creates a dictionary fromt the cell's content.") + @argument("name", type=str, help="Creates a dictionary from the cell's content.") @cell_magic def set_dict(self, line: str, cell: str): """Execute the cell and store all assigned variables as inputs"""