diff --git a/README.md b/README.md index c5ba633..ab81b48 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,52 @@ # BMO Chatbot for Obsidian -Generate and brainstorm ideas while creating your notes using Large Language Models (LLMs) from Ollama, LM Studio, Anthropic, OpenAI, Mistral AI, and more for Obsidian. +Generate and brainstorm ideas while creating your notes using Large Language Models (LLMs) from Ollama, LM Studio, Anthropic, Google Gemini, OpenAI, Mistral AI, and more for Obsidian.

original_example - dataview_example

## Features +### Profiles + +Create chatbots with specific knowledge, personalities, and presets. + +

+ profiles_example +

+ +### BMO Generate + +Generate a response from the editor using your connected LLMs. + +

+ bmo_generate_example +

+ +### 'Prompt Select Generate' Command + +Prompt, select, and generate within your editor. + +

+ prompt_select_generate_example +

+ +### Obsidian Markdown Rendering + +Render codeblocks (e.g. Dataview) that can be displayed in your chat view. Use the system prompt to customize your chatbot responses. + +

+ dataview_example +

+ +### And More! + - **Interact with self-hosted Large Language Models (LLMs):** Use the REST API URLs provided to interact with self-hosted Large Language Models (LLMs) using [Ollama](https://ollama.ai) or [LM Studio](https://lmstudio.ai/). -- **Profiles:** Create chatbots with specific knowledge, personalities, and presets. -- **Chat from anywhere in Obsidian:** Chat with your bot from anywhere within Obsidian. - **Chat with current note:** Use your chatbot to reference and engage within your current note. -- **Chatbot renders in Obsidian Markdown:** Receive formatted responses in Obsidian Markdown for consistency. +- **Chat from anywhere in Obsidian:** Chat with your bot from anywhere within Obsidian. - **Customizable bot name:** Personalize the chatbot's name. -- **Prompt Select Generate:** Prompt, select, and generate within your editor. +- **Chatbot renders in Obsidian Markdown:** Receive formatted responses in Obsidian Markdown for consistency. - **Save current chat history as markdown:** Use the `/save` command in chat to save current conversation. ## Requirements @@ -69,7 +100,7 @@ To start using the plugin, enable it in your settings menu and insert an API key - `/prompt` - List or change prompts. - `/prompt 1` or `/prompt [PROMPT-NAME]` - `/maxtokens [VALUE]` - Set max tokens. -- `/temp [VALUE]` - Change temperature range from 0 to 1. +- `/temp [VALUE]` - Change temperature range from 0 to 2. - `/ref on | off` - Turn on or off reference current note. - `/append` - Append current chat history to current active note. - `/save` - Save current chat history to a note. @@ -80,10 +111,10 @@ To start using the plugin, enable it in your settings menu and insert an API key - Any self-hosted models using [Ollama](https://ollama.ai). - See [instructions](https://github.com/longy2k/obsidian-bmo-chatbot/wiki) to setup Ollama with Obsidian. -- Any self-hosted models using OpenAI's REST API URL endpoints. +- Any self-hosted models using OpenAI-based endpoints. - [LM Studio](https://lmstudio.ai/) - [LocalAI](https://github.com/mudler/LocalAI) -- Anthropic +- Anthropic (Warning: Anthropric models cannot be aborted. Please use with caution. Reload plugin if necessary.) - claude-instant-1.2 - claude-2.0 - claude-2.1 @@ -92,7 +123,7 @@ To start using the plugin, enable it in your settings menu and insert an API key - claude-3-5-sonnet-20240620 - claude-3-opus-20240229 - Mistral AI's models -- Google Gemini Pro +- Google Gemini Models - OpenAI - gpt-3.5-turbo - gpt-4 diff --git a/README_images/Screenshot-1.png b/README_images/Screenshot-1.png deleted file mode 100644 index 4fe0131..0000000 Binary files a/README_images/Screenshot-1.png and /dev/null differ diff --git a/README_images/Screenshot-2.png b/README_images/Screenshot-2.png deleted file mode 100644 index fdb87ca..0000000 Binary files a/README_images/Screenshot-2.png and /dev/null differ diff --git a/README_images/bmo-generate.gif b/README_images/bmo-generate.gif new file mode 100644 index 0000000..ebf6a5d Binary files /dev/null and b/README_images/bmo-generate.gif differ diff --git a/README_images/eli5-example.png b/README_images/eli5-example.png deleted file mode 100644 index ba95708..0000000 Binary files a/README_images/eli5-example.png and /dev/null differ diff --git a/README_images/original-example.png b/README_images/original-example.png index 34d1d42..86c8da7 100644 Binary files a/README_images/original-example.png and b/README_images/original-example.png differ diff --git a/README_images/profiles.gif b/README_images/profiles.gif new file mode 100644 index 0000000..82623c7 Binary files /dev/null and b/README_images/profiles.gif differ diff --git a/README_images/prompt-select-generate.gif b/README_images/prompt-select-generate.gif new file mode 100644 index 0000000..acbb562 Binary files /dev/null and b/README_images/prompt-select-generate.gif differ diff --git a/manifest.json b/manifest.json index 836fd50..c01a130 100644 --- a/manifest.json +++ b/manifest.json @@ -1,9 +1,9 @@ { "id": "bmo-chatbot", "name": "BMO Chatbot", - "version": "2.1.2", + "version": "2.2.0", "minAppVersion": "1.0.0", - "description": "Generate and brainstorm ideas while creating your notes using Large Language Models (LLMs) from Ollama, LM Studio, Anthropic, OpenAI, Mistral AI, and more for Obsidian.", + "description": "Generate and brainstorm ideas while creating your notes using Large Language Models (LLMs) from Ollama, LM Studio, Anthropic, Google Gemini, OpenAI, Mistral AI, and more for Obsidian.", "author": "Longy2k", "authorUrl": "https://github.com/longy2k", "fundingUrl": "https://ko-fi.com/longy2k", diff --git a/package-lock.json b/package-lock.json index 822b912..b421691 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "bmo-chatbot", - "version": "2.1.2", + "version": "2.2.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "bmo-chatbot", - "version": "2.1.2", + "version": "2.2.0", "license": "MIT", "dependencies": { "@google/generative-ai": "^0.14.1", diff --git a/package.json b/package.json index 0f96032..648c91a 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "bmo-chatbot", - "version": "2.1.2", - "description": "Generate and brainstorm ideas while creating your notes using Large Language Models (LLMs) from Ollama, LM Studio, Anthropic, OpenAI, Mistral AI, and more for Obsidian.", + "version": "2.2.0", + "description": "Generate and brainstorm ideas while creating your notes using Large Language Models (LLMs) from Ollama, LM Studio, Anthropic, Google Gemini, OpenAI, Mistral AI, and more for Obsidian.", "main": "main.js", "scripts": { "dev": "node esbuild.config.mjs", diff --git a/src/components/FetchModelList.ts b/src/components/FetchModelList.ts index ee39cb8..826bc88 100644 --- a/src/components/FetchModelList.ts +++ b/src/components/FetchModelList.ts @@ -78,8 +78,6 @@ export async function fetchGoogleGeminiModels(plugin: BMOGPT) { }, }); - console.log(response.json.models); - // Check if the response is valid and has data if (response.json && response.json.models) { const models = response.json.models.map((model: { name: string; }) => model.name).filter((model: string) => model.startsWith('models/gemini')); diff --git a/src/components/FetchModelResponse.ts b/src/components/FetchModelResponse.ts index 5195ab6..9647029 100644 --- a/src/components/FetchModelResponse.ts +++ b/src/components/FetchModelResponse.ts @@ -886,8 +886,6 @@ export async function fetchGoogleGeminiResponse(plugin: BMOGPT, settings: BMOSet }), signal: abortController?.signal, }); - - console.log(response); if (!response.ok) { throw new Error(`HTTP error! status: ${response.status}`);