-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathplugins.lua
More file actions
390 lines (358 loc) · 14.2 KB
/
plugins.lua
File metadata and controls
390 lines (358 loc) · 14.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
-- llm配置
local function local_llm_streaming_handler(chunk, line, assistant_output, bufnr, winid, F)
if not chunk then
return assistant_output
end
local tail = chunk:sub(-1, -1)
if tail:sub(1, 1) ~= "}" then
line = line .. chunk
else
line = line .. chunk
local status, data = pcall(vim.fn.json_decode, line)
if not status or not data.message.content then
return assistant_output
end
assistant_output = assistant_output .. data.message.content
F.WriteContent(bufnr, winid, data.message.content)
line = ""
end
return assistant_output
end
local function local_llm_parse_handler(chunk)
local assistant_output = chunk.message.content
return assistant_output
end
return {
{
"Kurama622/llm.nvim",
-- If code completion uses Codeium, it requires `Exafunction/codeium.nvim`; otherwise, it does not.
dependencies = { "nvim-lua/plenary.nvim", "MunifTanjim/nui.nvim", "Exafunction/codeium.nvim" },
cmd = { "LLMSessionToggle", "LLMSelectedTextHandler", "LLMAppHandler" },
config = function()
local tools = require("llm.common.tools")
-- vim.api.nvim_set_hl(0, "Query", { fg = "#6aa84f", bg = "NONE" })
require("llm").setup({
-- [[ ollama ]]
url = "http://localhost:11434/api/chat",
model = "deepseek-r1:14b",
api_type = "ollama",
fetch_key = function()
return vim.env.LOCAL_LLM_KEY
end,
temperature = 0.3,
top_p = 0.7,
prompt = "You are a helpful chinese assistant.",
spinner = {
text = {
"",
"",
"",
"",
},
hl = "Title",
},
prefix = {
--
user = { text = "😃 ", hl = "Title" },
assistant = { text = " ", hl = "Added" },
},
display = {
diff = {
layout = "vertical", -- vertical|horizontal split for default provider
opts = { "internal", "filler", "closeoff", "algorithm:patience", "followwrap", "linematch:120" },
provider = "mini_diff", -- default|mini_diff
},
},
-- style = "right",
--[[ custom request args ]]
-- args = [[return {url, "-N", "-X", "POST", "-H", "Content-Type: application/json", "-H", authorization, "-d", vim.fn.json_encode(body)}]],
-- history_path = "/tmp/llm-history",
save_session = true,
max_history = 15,
max_history_name_length = 20,
-- stylua: ignore
-- popup window options
popwin_opts = {
relative = "cursor", enter = true,
focusable = true, zindex = 50,
position = { row = -7, col = 15, },
size = { height = 15, width = "50%", },
border = { style = "single",
text = { top = " Explain ", top_align = "center" },
},
win_options = {
winblend = 0,
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
},
-- stylua: ignore
keys = {
-- The keyboard mapping for the input window.
["Input:Submit"] = { mode = "n", key = "<cr>" },
["Input:Cancel"] = { mode = {"n", "i"}, key = "<C-c>" },
["Input:Resend"] = { mode = {"n", "i"}, key = "<C-r>" },
-- only works when "save_session = true"
["Input:HistoryNext"] = { mode = {"n", "i"}, key = "<C-j>" },
["Input:HistoryPrev"] = { mode = {"n", "i"}, key = "<C-k>" },
-- The keyboard mapping for the output window in "split" style.
["Output:Ask"] = { mode = "n", key = "i" },
["Output:Cancel"] = { mode = "n", key = "<C-c>" },
["Output:Resend"] = { mode = "n", key = "<C-r>" },
-- The keyboard mapping for the output and input windows in "float" style.
["Session:Toggle"] = { mode = "n", key = "<leader>ac" },
["Session:Close"] = { mode = "n", key = {"<esc>", "Q"} },
},
app_handler = {
OptimizeCode = {
handler = tools.side_by_side_handler,
opts = {
-- streaming_handler = local_llm_streaming_handler,
left = {
focusable = false,
},
},
},
TestCode = {
handler = tools.side_by_side_handler,
prompt = [[Write some test cases for the following code, only return the test cases.
Give the code content directly, do not use code blocks or other tags to wrap it.]],
opts = {
right = {
title = " Test Cases ",
},
},
},
OptimCompare = {
handler = tools.action_handler,
opts = {
fetch_key = function()
return vim.env.GITHUB_TOKEN
end,
url = "https://models.inference.ai.azure.com/chat/completions",
model = "gpt-4o-mini",
api_type = "openai",
language = "Chinese",
},
},
DocString = {
prompt = [[You are an AI programming assistant. You need to write a really good docstring that follows a best practice for the given language.
Your core tasks include:
- parameter and return types (if applicable).
- any errors that might be raised or returned, depending on the language.
You must:
- Place the generated docstring before the start of the code.
- Follow the format of examples carefully if the examples are provided.
- Use Markdown formatting in your answers.
- Include the programming language name at the start of the Markdown code blocks.]],
handler = tools.action_handler,
opts = {
fetch_key = function()
return vim.env.GITHUB_TOKEN
end,
url = "https://models.inference.ai.azure.com/chat/completions",
model = "gpt-4o-mini",
api_type = "openai",
only_display_diff = true,
templates = {
lua = [[- For the Lua language, you should use the LDoc style.
- Start all comment lines with "---".
]],
},
},
},
Translate = {
handler = tools.qa_handler,
opts = {
fetch_key = function()
return vim.env.GLM_KEY
end,
url = "https://open.bigmodel.cn/api/paas/v4/chat/completions",
model = "glm-4-flash",
api_type = "zhipu",
component_width = "60%",
component_height = "50%",
query = {
title = " Trans ",
hl = { link = "Define" },
},
input_box_opts = {
size = "15%",
win_options = {
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
},
preview_box_opts = {
size = "85%",
win_options = {
winhighlight = "Normal:Normal,FloatBorder:FloatBorder",
},
},
},
},
-- check siliconflow's balance
UserInfo = {
handler = function()
local key = os.getenv("LLM_KEY")
local res = tools.curl_request_handler(
"https://api.siliconflow.cn/v1/user/info",
{ "GET", "-H", string.format("'Authorization: Bearer %s'", key) }
)
if res ~= nil then
print("balance: " .. res.data.balance)
end
end,
},
WordTranslate = {
handler = tools.flexi_handler,
prompt = [[You are a translation expert. Your task is to translate all the text provided by the user into Chinese.
NOTE:
- All the text input by the user is part of the content to be translated, and you should ONLY FOCUS ON TRANSLATING THE TEXT without performing any other tasks.
- RETURN ONLY THE TRANSLATED RESULT.]],
-- prompt = "Translate the following text to English, please only return the translation",
opts = {
fetch_key = function()
return vim.env.GLM_KEY
end,
url = "https://open.bigmodel.cn/api/paas/v4/chat/completions",
model = "glm-4-flash",
api_type = "zhipu",
-- args = [=[return string.format([[curl %s -N -X POST -H "Content-Type: application/json" -H "Authorization: Bearer %s" -d '%s']], url, LLM_KEY, vim.fn.json_encode(body))]=],
exit_on_move = true,
enter_flexible_window = false,
},
},
CodeExplain = {
handler = tools.flexi_handler,
prompt = "Explain the following code, please only return the explanation, and answer in Chinese",
opts = {
fetch_key = function()
return vim.env.GLM_KEY
end,
url = "https://open.bigmodel.cn/api/paas/v4/chat/completions",
model = "glm-4-flash",
api_type = "zhipu",
enter_flexible_window = true,
},
},
CommitMsg = {
handler = tools.flexi_handler,
prompt = function()
-- Source: https://andrewian.dev/blog/ai-git-commits
return string.format(
[[You are an expert at following the Conventional Commit specification. Given the git diff listed below, please generate a commit message for me:
1. First line: conventional commit format (type: concise description) (remember to use semantic types like feat, fix, docs, style, refactor, perf, test, chore, etc.)
2. Optional bullet points if more context helps:
- Keep the second line blank
- Keep them short and direct
- Focus on what changed
- Always be terse
- Don't overly explain
- Drop any fluffy or formal language
Return ONLY the commit message - no introduction, no explanation, no quotes around it.
Examples:
feat: add user auth system
- Add JWT tokens for API auth
- Handle token refresh for long sessions
fix: resolve memory leak in worker pool
- Clean up idle connections
- Add timeout for stale workers
Simple change example:
fix: typo in README.md
Very important: Do not respond with any of the examples. Your message must be based off the diff that is about to be provided, with a little bit of styling informed by the recent commits you're about to see.
Based on this format, generate appropriate commit messages. Respond with message only. DO NOT format the message in Markdown code blocks, DO NOT use backticks:
```diff
%s
```
]],
vim.fn.system("git diff --no-ext-diff --staged")
)
end,
opts = {
fetch_key = function()
return vim.env.CHAT_ANYWHERE_KEY
end,
url = "https://api.chatanywhere.tech/v1/chat/completions",
model = "gpt-4o-mini",
api_type = "openai",
enter_flexible_window = true,
apply_visual_selection = false,
win_opts = {
relative = "editor",
position = "50%",
},
accept = {
mapping = {
mode = "n",
keys = "<cr>",
},
action = function()
local contents = vim.api.nvim_buf_get_lines(0, 0, -1, true)
vim.api.nvim_command(string.format('!git commit -m "%s"', table.concat(contents, '" -m "')))
-- just for lazygit
vim.schedule(function()
vim.api.nvim_command("LazyGit")
end)
end,
},
},
},
Completion = {
handler = tools.completion_handler,
opts = {
-------------------------------------------------
--- ollama
-------------------------------------------------
-- -- url = "http://localhost:11434/api/generate",
url = "http://localhost:11434/v1/completions",
model = "qwen2.5-coder:1.5b",
api_type = "ollama",
n_completions = 1,
context_window = 512,
max_tokens = 256,
filetypes = { sh = false },
default_filetype_enabled = true,
auto_trigger = true,
-- style = "blink.cmp",
-- style = "nvim-cmp",
-- style = "virtual_text",
keymap = {
virtual_text = {
accept = {
mode = "i",
keys = "<A-a>",
},
next = {
mode = "i",
keys = "<A-n>",
},
prev = {
mode = "i",
keys = "<A-p>",
},
toggle = {
mode = "n",
keys = "<leader>cp",
},
},
},
},
},
},
})
end,
keys = {
{ "<leader>ac", mode = "n", "<cmd>LLMSessionToggle<cr>" },
{ "<leader>ts", mode = "x", "<cmd>LLMAppHandler WordTranslate<cr>" },
{ "<leader>ae", mode = "v", "<cmd>LLMAppHandler CodeExplain<cr>" },
{ "<leader>at", mode = "n", "<cmd>LLMAppHandler Translate<cr>" },
{ "<leader>tc", mode = "x", "<cmd>LLMAppHandler TestCode<cr>" },
{ "<leader>ao", mode = "x", "<cmd>LLMAppHandler OptimCompare<cr>" },
{ "<leader>au", mode = "n", "<cmd>LLMAppHandler UserInfo<cr>" },
{ "<leader>ag", mode = "n", "<cmd>LLMAppHandler CommitMsg<cr>" },
{ "<leader>ad", mode = "v", "<cmd>LLMAppHandler DocString<cr>" },
-- { "<leader>ao", mode = "x", "<cmd>LLMAppHandler OptimizeCode<cr>" },
-- { "<leader>ae", mode = "v", "<cmd>LLMSelectedTextHandler 请解释下面这段代码<cr>" },
-- { "<leader>ts", mode = "x", "<cmd>LLMSelectedTextHandler 英译汉<cr>" },
},
},
}