Skip to content

Commit

Permalink
refactor(ui): bounding popover (#13) (#29)
Browse files Browse the repository at this point in the history
* refactor(ui): bounding popover (#13)

* refactor(ui): bounding popover

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* chore: update readme instructions on setting up render-markdown.nvim

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* chore: align code style

* fix: incorrect type annotation

* fix: make it work with mouse movement

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* fix: focus correct on render

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* fix: make sure to close the view

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* chore: cleanup cursor position

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* docs: add notes on rc

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* fix: make sure to apply if has diff

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* fix: do not simulate user input

---------

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>
Co-authored-by: yetone <yetoneful@gmail.com>

* fix(autocmd): make sure to load tiktoken on correct events (closes #16) (#24)

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* feat(type): better hinting on nui components (#27)

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* feat: scrollview and tracking config and lazy load and perf (#33)

* feat: scrollview and tracking config and lazy load and perf

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* fix: add back options

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* revert: remove unused autocmd

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>

* fix: get code content

* fix: keybinding hint virtual text position

---------

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>
Co-authored-by: yetone <yetoneful@gmail.com>

---------

Signed-off-by: Aaron Pham <contact@aarnphm.xyz>
Co-authored-by: Aaron Pham <contact@aarnphm.xyz>
  • Loading branch information
yetone and aarnphm authored Aug 17, 2024
1 parent ce5d421 commit f8cbc88
Show file tree
Hide file tree
Showing 10 changed files with 661 additions and 395 deletions.
9 changes: 0 additions & 9 deletions .luarc.json

This file was deleted.

156 changes: 95 additions & 61 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,8 @@
https://github.com/user-attachments/assets/510e6270-b6cf-459d-9a2f-15b397d1fe53



https://github.com/user-attachments/assets/86140bfd-08b4-483d-a887-1b701d9e37dd



## Features

- **AI-Powered Code Assistance**: Interact with AI to ask questions about your current code file and receive intelligent suggestions for improvement or modification.
Expand All @@ -22,74 +18,89 @@ https://github.com/user-attachments/assets/86140bfd-08b4-483d-a887-1b701d9e37dd

Install `avante.nvim` using [lazy.nvim](https://github.com/folke/lazy.nvim):

```lua
{
"yetone/avante.nvim",
event = "VeryLazy",
opts = {},
build = "make",
dependencies = {
"nvim-tree/nvim-web-devicons",
{
"grapp-dev/nui-components.nvim",
dependencies = {
"MunifTanjim/nui.nvim"
}
},
"nvim-lua/plenary.nvim",
"MeanderingProgrammer/render-markdown.nvim",
```lua
{
"yetone/avante.nvim",
event = "VeryLazy",
opts = {},
build = "make",
dependencies = {
"nvim-tree/nvim-web-devicons",
{
"grapp-dev/nui-components.nvim",
dependencies = {
"MunifTanjim/nui.nvim"
}
},
}
```
"nvim-lua/plenary.nvim",
"MeanderingProgrammer/render-markdown.nvim", -- optional
},
}
```

> [!IMPORTANT]
>
> If your neovim doesn't use LuaJIT, then change `build` to `make lua51`. By default running make will install luajit.
> For ARM-based setup, make sure to also install cargo as we will have to build the tiktoken_core from source.
> [!note] `render-markdown.nvim`
>
> `render-markdown.nvim` is an optional dependency that is used to render the markdown content of the chat history. Make sure to also include `Avante` as a filetype
> to its setup:
>
> ```lua
> {
> "MeanderingProgrammer/markdown.nvim",
> opts = {
> file_types = { "markdown", "Avante" },
> },
> ft = { "markdown", "Avante" },
> }
> ```
Default setup configuration:
```lua
{
provider = "claude", -- "claude" or "openai" or "azure"
openai = {
endpoint = "https://api.openai.com",
model = "gpt-4o",
temperature = 0,
max_tokens = 4096,
```lua
{
provider = "claude", -- "claude" or "openai" or "azure"
openai = {
endpoint = "https://api.openai.com",
model = "gpt-4o",
temperature = 0,
max_tokens = 4096,
},
azure = {
endpoint = "", -- Example: "https://<your-resource-name>.openai.azure.com"
deployment = "", -- Azure deployment name (e.g., "gpt-4o", "my-gpt-4o-deployment")
api_version = "2024-06-01",
temperature = 0,
max_tokens = 4096,
},
claude = {
endpoint = "https://api.anthropic.com",
model = "claude-3-5-sonnet-20240620",
temperature = 0,
max_tokens = 4096,
},
highlights = {
diff = {
current = "DiffText", -- need have background color
incoming = "DiffAdd", -- need have background color
},
azure = {
endpoint = "", -- Example: "https://<your-resource-name>.openai.azure.com"
deployment = "", -- Azure deployment name (e.g., "gpt-4o", "my-gpt-4o-deployment")
api_version = "2024-06-01",
temperature = 0,
max_tokens = 4096,
},
mappings = {
show_sidebar = "<leader>aa",
diff = {
ours = "co",
theirs = "ct",
none = "c0",
both = "cb",
next = "]x",
prev = "[x",
},
claude = {
endpoint = "https://api.anthropic.com",
model = "claude-3-5-sonnet-20240620",
temperature = 0,
max_tokens = 4096,
},
highlights = {
diff = {
current = "DiffText", -- need have background color
incoming = "DiffAdd", -- need have background color
},
},
mappings = {
show_sidebar = "<leader>aa",
diff = {
ours = "co",
theirs = "ct",
none = "c0",
both = "cb",
next = "]x",
prev = "[x",
},
},
}
```
},
}
```
## Usage

Expand Down Expand Up @@ -165,6 +176,29 @@ To set up the development environment:
pre-commit install --install-hooks
```

For setting up lua_ls you can use the following for `nvim-lspconfig`:

```lua
lua_ls = {
settings = {
Lua = {
runtime = {
version = "LuaJIT",
special = { reload = "require" },
},
workspace = {
library = {
vim.fn.expand "$VIMRUNTIME/lua",
vim.fn.expand "$VIMRUNTIME/lua/vim/lsp",
vim.fn.stdpath "data" .. "/lazy/lazy.nvim/lua/lazy",
"${3rd}/luv/library",
},
},
},
},
},
```

## License

avante.nvim is licensed under the Apache License. For more details, please refer to the [LICENSE](./LICENSE) file.
59 changes: 33 additions & 26 deletions lua/avante/ai_bot.lua
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
local M = {}
local fn = vim.fn

local curl = require("plenary.curl")
local utils = require("avante.utils")
local config = require("avante.config")
local tiktoken = require("avante.tiktoken")

local fn = vim.fn
local Utils = require("avante.utils")
local Config = require("avante.config")
local Tiktoken = require("avante.tiktoken")

---@class avante.AiBot
local M = {}

local system_prompt = [[
You are an excellent programming expert.
Expand Down Expand Up @@ -64,7 +66,7 @@ local function call_claude_api_stream(question, code_lang, code_content, on_chun

local user_prompt = base_user_prompt

local tokens = config.get().claude.max_tokens
local tokens = Config.claude.max_tokens
local headers = {
["Content-Type"] = "application/json",
["x-api-key"] = api_key,
Expand All @@ -82,16 +84,16 @@ local function call_claude_api_stream(question, code_lang, code_content, on_chun
text = user_prompt,
}

if tiktoken.count(code_prompt_obj.text) > 1024 then
if Tiktoken.count(code_prompt_obj.text) > 1024 then
code_prompt_obj.cache_control = { type = "ephemeral" }
end

if tiktoken.count(user_prompt_obj.text) > 1024 then
if Tiktoken.count(user_prompt_obj.text) > 1024 then
user_prompt_obj.cache_control = { type = "ephemeral" }
end

local body = {
model = config.get().claude.model,
model = Config.claude.model,
system = system_prompt,
messages = {
{
Expand All @@ -107,13 +109,13 @@ local function call_claude_api_stream(question, code_lang, code_content, on_chun
},
},
stream = true,
temperature = config.get().claude.temperature,
temperature = Config.claude.temperature,
max_tokens = tokens,
}

local url = utils.trim_suffix(config.get().claude.endpoint, "/") .. "/v1/messages"
local url = Utils.trim_suffix(Config.claude.endpoint, "/") .. "/v1/messages"

print("Sending request to Claude API...")
-- print("Sending request to Claude API...")

curl.post(url, {
---@diagnostic disable-next-line: unused-local
Expand Down Expand Up @@ -154,7 +156,7 @@ end

local function call_openai_api_stream(question, code_lang, code_content, on_chunk, on_complete)
local api_key = os.getenv("OPENAI_API_KEY")
if not api_key and config.get().provider == "openai" then
if not api_key and Config.provider == "openai" then
error("OPENAI_API_KEY environment variable is not set")
end

Expand All @@ -169,16 +171,16 @@ local function call_openai_api_stream(question, code_lang, code_content, on_chun
.. "\n```"

local url, headers, body
if config.get().provider == "azure" then
if Config.provider == "azure" then
api_key = os.getenv("AZURE_OPENAI_API_KEY") or os.getenv("OPENAI_API_KEY")
if not api_key then
error("Azure OpenAI API key is not set. Please set AZURE_OPENAI_API_KEY or OPENAI_API_KEY environment variable.")
end
url = config.get().azure.endpoint
url = Config.azure.endpoint
.. "/openai/deployments/"
.. config.get().azure.deployment
.. Config.azure.deployment
.. "/chat/completions?api-version="
.. config.get().azure.api_version
.. Config.azure.api_version
headers = {
["Content-Type"] = "application/json",
["api-key"] = api_key,
Expand All @@ -188,29 +190,29 @@ local function call_openai_api_stream(question, code_lang, code_content, on_chun
{ role = "system", content = system_prompt },
{ role = "user", content = user_prompt },
},
temperature = config.get().azure.temperature,
max_tokens = config.get().azure.max_tokens,
temperature = Config.azure.temperature,
max_tokens = Config.azure.max_tokens,
stream = true,
}
else
url = utils.trim_suffix(config.get().openai.endpoint, "/") .. "/v1/chat/completions"
url = Utils.trim_suffix(Config.openai.endpoint, "/") .. "/v1/chat/completions"
headers = {
["Content-Type"] = "application/json",
["Authorization"] = "Bearer " .. api_key,
}
body = {
model = config.get().openai.model,
model = Config.openai.model,
messages = {
{ role = "system", content = system_prompt },
{ role = "user", content = user_prompt },
},
temperature = config.get().openai.temperature,
max_tokens = config.get().openai.max_tokens,
temperature = Config.openai.temperature,
max_tokens = Config.openai.max_tokens,
stream = true,
}
end

print("Sending request to " .. (config.get().provider == "azure" and "Azure OpenAI" or "OpenAI") .. " API...")
-- print("Sending request to " .. (config.get().provider == "azure" and "Azure OpenAI" or "OpenAI") .. " API...")

curl.post(url, {
---@diagnostic disable-next-line: unused-local
Expand Down Expand Up @@ -253,10 +255,15 @@ local function call_openai_api_stream(question, code_lang, code_content, on_chun
})
end

---@param question string
---@param code_lang string
---@param code_content string
---@param on_chunk fun(chunk: string): any
---@param on_complete fun(err: string|nil): any
function M.call_ai_api_stream(question, code_lang, code_content, on_chunk, on_complete)
if config.get().provider == "openai" or config.get().provider == "azure" then
if Config.provider == "openai" or Config.provider == "azure" then
call_openai_api_stream(question, code_lang, code_content, on_chunk, on_complete)
elseif config.get().provider == "claude" then
elseif Config.provider == "claude" then
call_claude_api_stream(question, code_lang, code_content, on_chunk, on_complete)
end
end
Expand Down
Loading

0 comments on commit f8cbc88

Please sign in to comment.