Skip to content

Commit

Permalink
Lint Python notebooks with ruff.
Browse files Browse the repository at this point in the history
  • Loading branch information
obi1kenobi committed Nov 13, 2023
1 parent acfc485 commit df69e1a
Show file tree
Hide file tree
Showing 31 changed files with 126 additions and 161 deletions.
6 changes: 4 additions & 2 deletions .github/workflows/extract_ignored_words_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
pyproject_toml = toml.load("pyproject.toml")

# Extract the ignore words list (adjust the key as per your TOML structure)
ignore_words_list = pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
ignore_words_list = (
pyproject_toml.get("tool", {}).get("codespell", {}).get("ignore-words-list")
)

print(f"::set-output name=ignore_words_list::{ignore_words_list}")
print(f"::set-output name=ignore_words_list::{ignore_words_list}")
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,8 @@
"source": [
"import os\n",
"\n",
"os.environ[\"SERPER_API_KEY\"] = \"\"os.environ[\"OPENAI_API_KEY\"] = \"\""
"os.environ[\"SERPER_API_KEY\"] = \"\"\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\""
]
},
{
Expand Down
12 changes: 9 additions & 3 deletions docs/docs/guides/local_llms.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -265,10 +265,16 @@
"cell_type": "code",
"execution_count": null,
"id": "5eba38dc",
"metadata": {},
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"CMAKE_ARGS=\"-DLLAMA_METAL=on\" FORCE_CMAKE=1 pip install -U llama-cpp-python --no-cache-dirclear"
"%env CMAKE_ARGS=\"-DLLAMA_METAL=on\"\n",
"%env FORCE_CMAKE=1\n",
"%pip install -U llama-cpp-python --no-cache-dirclear`"
]
},
{
Expand Down Expand Up @@ -379,7 +385,7 @@
"metadata": {},
"outputs": [],
"source": [
"pip install gpt4all"
"pip install gpt4all\n"
]
},
{
Expand Down
3 changes: 2 additions & 1 deletion docs/docs/guides/safety/amazon_comprehend_chain.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@
"from langchain_experimental.comprehend_moderation import AmazonComprehendModerationChain\n",
"\n",
"comprehend_moderation = AmazonComprehendModerationChain(\n",
" client=comprehend_client, verbose=True # optional\n",
" client=comprehend_client,\n",
" verbose=True, # optional\n",
")"
]
},
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/callbacks/argilla.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@
" api_key=os.environ[\"ARGILLA_API_KEY\"],\n",
")\n",
"\n",
"dataset.push_to_argilla(\"langchain-dataset\");"
"dataset.push_to_argilla(\"langchain-dataset\")"
]
},
{
Expand Down
8 changes: 2 additions & 6 deletions docs/docs/integrations/callbacks/context.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,10 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "shellscript"
}
},
"metadata": {},
"outputs": [],
"source": [
"$ pip install context-python --upgrade"
"!pip install context-python --upgrade"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/chat/baidu_qianfan_endpoint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@
"source": [
"res = chat.stream(\n",
" [HumanMessage(content=\"hi\")],\n",
" **{\"top_p\": 0.4, \"temperature\": 0.1, \"penalty_score\": 1}\n",
" **{\"top_p\": 0.4, \"temperature\": 0.1, \"penalty_score\": 1},\n",
")\n",
"\n",
"for r in res:\n",
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/document_loaders/google_drive.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
"source": [
"loader = GoogleDriveLoader(\n",
" folder_id=\"1yucgL9WGgWZdM1TOuKkeghlPizuzMYb5\",\n",
" token_path='/path/where/you/want/token/to/be/created/google_token.json'\n",
" token_path=\"/path/where/you/want/token/to/be/created/google_token.json\",\n",
" # Optional: configure whether to recursively fetch files from subfolders. Defaults to False.\n",
" recursive=False,\n",
")"
Expand Down
8 changes: 2 additions & 6 deletions docs/docs/integrations/document_loaders/rockset.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,10 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "shellscript"
}
},
"metadata": {},
"outputs": [],
"source": [
"$ pip3 install rockset"
"!pip install rockset"
]
},
{
Expand Down
22 changes: 11 additions & 11 deletions docs/docs/integrations/document_loaders/web_base.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
"# Example: transcript = soup.select_one(\"td[class='scrtext']\").text\n",
"# BS4 documentation can be found here: https://www.crummy.com/software/BeautifulSoup/bs4/doc/\n",
"\n",
"\"\"\";"
"\"\"\""
]
},
{
Expand Down Expand Up @@ -226,19 +226,23 @@
},
{
"cell_type": "markdown",
"id": "672264ad",
"metadata": {
"collapsed": false
},
"source": [
"## Using proxies\n",
"\n",
"Sometimes you might need to use proxies to get around IP blocks. You can pass in a dictionary of proxies to the loader (and `requests` underneath) to use them."
],
"metadata": {
"collapsed": false
},
"id": "672264ad"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9caf0310",
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"loader = WebBaseLoader(\n",
Expand All @@ -249,11 +253,7 @@
" },\n",
")\n",
"docs = loader.load()"
],
"metadata": {
"collapsed": false
},
"id": "9caf0310"
]
}
],
"metadata": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@
"\n",
"print(\n",
" *[d.page_content + \"\\n\\n\" + json.dumps(d.metadata) for d in enhanced_documents],\n",
" sep=\"\\n\\n---------------\\n\\n\"\n",
" sep=\"\\n\\n---------------\\n\\n\",\n",
")"
]
},
Expand Down Expand Up @@ -167,7 +167,7 @@
"\n",
"print(\n",
" *[d.page_content + \"\\n\\n\" + json.dumps(d.metadata) for d in enhanced_documents],\n",
" sep=\"\\n\\n---------------\\n\\n\"\n",
" sep=\"\\n\\n---------------\\n\\n\",\n",
")"
]
},
Expand Down Expand Up @@ -225,7 +225,7 @@
"\n",
"print(\n",
" *[d.page_content + \"\\n\\n\" + json.dumps(d.metadata) for d in enhanced_documents],\n",
" sep=\"\\n\\n---------------\\n\\n\"\n",
" sep=\"\\n\\n---------------\\n\\n\",\n",
")"
]
},
Expand Down
2 changes: 1 addition & 1 deletion docs/docs/integrations/llms/baidu_qianfan_endpoint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@
"res = llm.generate(\n",
" prompts=[\"hi\"],\n",
" streaming=True,\n",
" **{\"top_p\": 0.4, \"temperature\": 0.1, \"penalty_score\": 1}\n",
" **{\"top_p\": 0.4, \"temperature\": 0.1, \"penalty_score\": 1},\n",
")\n",
"\n",
"for r in res:\n",
Expand Down
7 changes: 5 additions & 2 deletions docs/docs/integrations/llms/chatglm.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@
"outputs": [],
"source": [
"from langchain.llms import ChatGLM\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain\n",
"from langchain.prompts import PromptTemplate\n",
"from langchain.chains import LLMChain\n",
"\n",
"# import os"
]
Expand Down Expand Up @@ -52,7 +53,9 @@
"llm = ChatGLM(\n",
" endpoint_url=endpoint_url,\n",
" max_token=80000,\n",
" history=[[\"我将从美国到中国来旅游,出行前希望了解中国的城市\", \"欢迎问我任何问题。\"]],\n",
" history=[\n",
" [\"我将从美国到中国来旅游,出行前希望了解中国的城市\", \"欢迎问我任何问题。\"]\n",
" ],\n",
" top_p=0.9,\n",
" model_kwargs={\"sample_model_args\": False},\n",
")\n",
Expand Down
14 changes: 4 additions & 10 deletions docs/docs/integrations/llms/edenai.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,10 @@
"which you can get by creating an account https://app.edenai.run/user/register and heading here https://app.edenai.run/admin/account/settings\n",
"\n",
"Once we have a key we'll want to set it as an environment variable by running:\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"export EDENAI_API_KEY=\"...\""
"\n",
"```bash\n",
"export EDENAI_API_KEY=\"...\"\n",
"```"
]
},
{
Expand Down
7 changes: 4 additions & 3 deletions docs/docs/integrations/llms/gooseai.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
"metadata": {},
"source": [
"## Install openai\n",
"The `openai` package is required to use the GooseAI API. Install `openai` using `pip3 install openai`."
"The `openai` package is required to use the GooseAI API. Install `openai` using `pip install openai`."
]
},
{
Expand All @@ -25,7 +25,7 @@
"metadata": {},
"outputs": [],
"source": [
"$ pip3 install openai"
"!pip install openai"
]
},
{
Expand All @@ -43,7 +43,8 @@
"source": [
"import os\n",
"from langchain.llms import GooseAI\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
"from langchain.prompts import PromptTemplate\n",
"from langchain.chains import LLMChain"
]
},
{
Expand Down
4 changes: 1 addition & 3 deletions docs/docs/integrations/llms/jsonformer_experimental.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,7 @@
"BEGIN! Answer the Human's question as best as you are able.\n",
"------\n",
"Human: 'What's the difference between an iterator and an iterable?'\n",
"AI Assistant:\"\"\".format(\n",
" arg_schema=ask_star_coder.args\n",
")"
"AI Assistant:\"\"\".format(arg_schema=ask_star_coder.args)"
]
},
{
Expand Down
7 changes: 3 additions & 4 deletions docs/docs/integrations/llms/openlm.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@
"outputs": [],
"source": [
"from langchain.llms import OpenLM\n",
"from langchain.prompts import PromptTemplate\nfrom langchain.chains import LLMChain"
"from langchain.prompts import PromptTemplate\n",
"from langchain.chains import LLMChain"
]
},
{
Expand Down Expand Up @@ -106,9 +107,7 @@
" result = llm_chain.run(question)\n",
" print(\n",
" \"\"\"Model: {}\n",
"Result: {}\"\"\".format(\n",
" model, result\n",
" )\n",
"Result: {}\"\"\".format(model, result)\n",
" )"
]
}
Expand Down
40 changes: 11 additions & 29 deletions docs/docs/integrations/llms/titan_takeoff.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
},
"outputs": [],
"source": [
"pip install titan-iris"
"!pip install titan-iris"
]
},
{
Expand All @@ -49,22 +49,13 @@
"## Taking off\n",
"Models are referred to by their model id on HuggingFace. Takeoff uses port 8000 by default, but can be configured to use another port. There is also support to use a Nvidia GPU by specifying cuda for the device flag.\n",
"\n",
"To start the takeoff server, run:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "shellscript"
}
},
"outputs": [],
"source": [
"To start the takeoff server, run:\n",
"\n",
"```shell\n",
"iris takeoff --model tiiuae/falcon-7b-instruct --device cpu\n",
"iris takeoff --model tiiuae/falcon-7b-instruct --device cuda # Nvidia GPU required\n",
"iris takeoff --model tiiuae/falcon-7b-instruct --device cpu --port 5000 # run on port 5000 (default: 8000)"
"iris takeoff --model tiiuae/falcon-7b-instruct --device cpu --port 5000 # run on port 5000 (default: 8000)\n",
"```"
]
},
{
Expand All @@ -74,20 +65,11 @@
"You will then be directed to a login page, where you will need to create an account to proceed.\n",
"After logging in, run the command onscreen to check whether the server is ready. When it is ready, you can start using the Takeoff integration.\n",
"\n",
"To shutdown the server, run the following command. You will be presented with options on which Takeoff server to shut down, in case you have multiple running servers.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "shellscript"
}
},
"outputs": [],
"source": [
"iris takeoff --shutdown # shutdown the server"
"To shutdown the server, run the following command. You will be presented with options on which Takeoff server to shut down, in case you have multiple running servers.\n",
"\n",
"```shell\n",
"iris takeoff --shutdown # shutdown the server\n",
"```"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@
"history = RocksetChatMessageHistory(\n",
" session_id=\"MySession\",\n",
" client=RocksetClient(\n",
" api_key=\"YOUR API KEY\", host=Regions.usw2a1 # us-west-2 Oregon\n",
" api_key=\"YOUR API KEY\",\n",
" host=Regions.usw2a1, # us-west-2 Oregon\n",
" ),\n",
" collection=\"langchain_demo\",\n",
" sync=True,\n",
Expand Down
Loading

0 comments on commit df69e1a

Please sign in to comment.