From 08e1618ec2a23b242908927884c3647c5a8ca30f Mon Sep 17 00:00:00 2001 From: Oli Morris Date: Mon, 23 Dec 2024 08:57:18 +0000 Subject: [PATCH] docs: update docs --- doc/codecompanion.txt | 1017 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1017 insertions(+) create mode 100644 doc/codecompanion.txt diff --git a/doc/codecompanion.txt b/doc/codecompanion.txt new file mode 100644 index 00000000..827a3c3a --- /dev/null +++ b/doc/codecompanion.txt @@ -0,0 +1,1017 @@ +*codecompanion.txt* For NVIM v0.10.0 Last change: 2024 December 23 + +============================================================================== +Table of Contents *codecompanion-table-of-contents* + + - Features |codecompanion-features| + - Requirements |codecompanion-requirements| + - Installation |codecompanion-installation| + - Quickstart |codecompanion-quickstart| + - Configuration |codecompanion-configuration| + - Advanced Usage |codecompanion-advanced-usage| + - Extras |codecompanion-extras| + - Troubleshooting |codecompanion-troubleshooting| + +FEATURES *codecompanion-features* + +- Copilot Chat meets Zed AI , in Neovim +- Support for Anthropic, Copilot, Gemini, Ollama, OpenAI, Azure OpenAI, HuggingFace and xAI LLMs (or bring your own!) +- Inline transformations, code creation and refactoring +- Variables, Slash Commands, Agents/Tools and Workflows to improve LLM output +- Built in prompt library for common tasks like advice on LSP errors and code explanations +- Create your own custom prompts, Variables and Slash Commands +- Have multiple chats open at the same time +- Async execution for fast performance + + +REQUIREMENTS *codecompanion-requirements* + +- The `curl` library +- Neovim 0.10.0 or greater +- _(Optional)_ An API key for your chosen LLM + + +INSTALLATION *codecompanion-installation* + + + [!IMPORTANT] The plugin requires the markdown Tree-sitter parser to be + installed with `:TSInstall markdown` +Install the plugin with your preferred package manager: + +**Lazy.nvim** + +>lua + { + "olimorris/codecompanion.nvim", + dependencies = { + "nvim-lua/plenary.nvim", + "nvim-treesitter/nvim-treesitter", + }, + config = true + } +< + +**Packer** + +>lua + use({ + "olimorris/codecompanion.nvim", + config = function() + require("codecompanion").setup() + end, + requires = { + "nvim-lua/plenary.nvim", + "nvim-treesitter/nvim-treesitter", + } + }) +< + +**vim-plug** + +>vim + call plug#begin() + + Plug 'nvim-lua/plenary.nvim' + Plug 'nvim-treesitter/nvim-treesitter' + Plug 'olimorris/codecompanion.nvim' + + call plug#end() + + lua << EOF + require("codecompanion").setup() + EOF +< + +**Completion** + +When conversing with the LLM, you can leverage variables, slash commands and +tools in the chat buffer. Out of the box, the plugin will display these to you +via a native Neovim completion menu (which you’ll need to trigger with +``). However, it also has support for nvim-cmp + and blink.cmp +. The former, requires no setup however to +enable completions for `blink.cmp`, please ensure you’ve enabled it in your +config: + +>lua + { + "saghen/blink.cmp", + ---@module 'blink.cmp' + ---@type blink.cmp.Config + opts = { + sources = { + default = { "codecompanion" }, + providers = { + codecompanion = { + name = "CodeCompanion", + module = "codecompanion.providers.completion.blink", + enabled = true, + }, + }, + }, + }, + }, +< + +**Slash Commands** + +To better utilise Slash Commands, Telescope.nvim +, mini.pick + or fzf lua + can also be installed. Please refer to +the |codecompanion-chat-buffer| section for more information. + +**Pinned plugins** + +As per #377 , if +you pin your plugins to the latest releases, consider setting plenary.nvim to: + +>lua + { "nvim-lua/plenary.nvim", branch = "master" }, +< + +**Prettify with render-markdown.nvim** + +Add the following to your dependencies: + +>lua + { "MeanderingProgrammer/render-markdown.nvim", ft = { "markdown", "codecompanion" } }, +< + + +QUICKSTART *codecompanion-quickstart* + + + [!NOTE] Okay, okay…it’s not quite a quickstart as you’ll need to + configure an |codecompanion-adapter| and I recommend starting from the + |codecompanion-configuration| section to understand how the plugin works. +**Chat Buffer** + +Run `:CodeCompanionChat` to open the chat buffer. Type your prompt and press +``. Or, run `:CodeCompanionChat why are Lua and Neovim so perfect +together?` to send a prompt directly to the chat buffer. Toggle the chat buffer +with `:CodeCompanionChat Toggle`. + +You can add context from your code base by using _Variables_ and _Slash +Commands_ in the chat buffer. + +_Variables_, accessed via `#`, contain data about the present state of Neovim: + +- `#buffer` - Shares the current buffer’s code. You can also specify line numbers with `#buffer:8-20` +- `#lsp` - Shares LSP information and code for the current buffer +- `#viewport` - Shares the buffers and lines that you see in the Neovim viewport + +_Slash commands_, accessed via `/`, run commands to insert additional context +into the chat buffer: + +- `/buffer` - Insert open buffers +- `/fetch` - Insert URL contents +- `/file` - Insert a file +- `/help` - Insert content from help tags +- `/now` - Insert the current date and time +- `/symbols` - Insert symbols from a selected file +- `/terminal` - Insert terminal output + +_Tools_, accessed via `@`, allow the LLM to function as an agent and carry out +actions: + +- `@cmd_runner` - The LLM will run shell commands (subject to approval) +- `@editor` - The LLM will edit code in a Neovim buffer +- `@files` - The LLM will can work with files on the file system (subject to approval) +- `@rag` - The LLM will browse and search the internet for real-time information to supplement its response + +Tools can also be grouped together to form _Agents_, which are also accessed +via `@` in the chat buffer: + +- `@full_stack_dev` - Contains the `cmd_runner`, `editor` and `files` tools. + + + [!IMPORTANT] These have been designed to work with native Neovim completions + and also with nvim-cmp . To open the + completion menu use `` in insert mode when in the chat buffer. +**Inline Assistant** + +Run `:CodeCompanion ` to call the inline assistant. The assistant +will evaluate the prompt and either write code or open a chat buffer. You can +also make a visual selection and call the assistant. + +The assistant has knowledge of your last conversation from a chat buffer. A +prompt such as `:CodeCompanion add the new function here` will see the +assistant add a code block directly into the current buffer. + +For convenience, you can call prompts from the |codecompanion-prompt-library| +via the assistant such as `:'<,'>CodeCompanion /buffer what does this file +do?`. The prompt library comes with the following defaults: + +- `/buffer` - Send the current buffer to the LLM alongside a prompt +- `/commit` - Generate a commit message +- `/explain` - Explain how selected code in a buffer works +- `/fix` - Fix the selected code +- `/lsp` - Explain the LSP diagnostics for the selected code +- `/tests` - Generate unit tests for selected code + +There are keymaps available to accept or reject edits from the LLM in the +|codecompanion-inline-assistant| section. + +**Commands** + +Use CodeCompanion to create Neovim commands in command-line mode +(|Command-line|) via `:CodeCompanionCmd `. + +**Action Palette** + +Run `:CodeCompanionActions` to open the action palette, which gives you access +to all functionality of the plugin. By default the plugin uses `vim.ui.select`, +however, you can change the provider by altering the +`display.action_palette.provider` config value to be `telescope` or +`mini_pick`. You can also call the Telescope extension with `:Telescope +codecompanion`. + + + [!NOTE] Some actions and prompts will only be visible if you’re in _Visual + mode_. +**List of commands** + +The plugin has three core commands: + +- `CodeCompanion` - Open the inline assistant +- `CodeCompanionChat` - Open a chat buffer +- `CodeCompanionCmd` - Generate a command in the command-liine +- `CodeCompanionActions` - Open the _Action Palette_ + +However, there are multiple options available: + +- `CodeCompanion ` - Prompt the inline assistant +- `CodeCompanion /` - Use the |codecompanion-prompt-library| with the inline assistant e.g. `/commit` +- `CodeCompanionChat ` - Send a prompt to the LLM via a chat buffer +- `CodeCompanionChat ` - Open a chat buffer with a specific adapter +- `CodeCompanionChat Toggle` - Toggle a chat buffer +- `CodeCompanionChat Add` - Add visually selected chat to the current chat buffer + +**Suggested plugin workflow** + +For an optimum plugin workflow, I recommend the following: + +>lua + vim.api.nvim_set_keymap("n", "", "CodeCompanionActions", { noremap = true, silent = true }) + vim.api.nvim_set_keymap("v", "", "CodeCompanionActions", { noremap = true, silent = true }) + vim.api.nvim_set_keymap("n", "a", "CodeCompanionChat Toggle", { noremap = true, silent = true }) + vim.api.nvim_set_keymap("v", "a", "CodeCompanionChat Toggle", { noremap = true, silent = true }) + vim.api.nvim_set_keymap("v", "ga", "CodeCompanionChat Add", { noremap = true, silent = true }) + + -- Expand 'cc' into 'CodeCompanion' in the command line + vim.cmd([[cab cc CodeCompanion]]) +< + + + [!NOTE] You can also assign prompts from the library to specific mappings. See + the |codecompanion-prompt-library| section for more information. + +CONFIGURATION *codecompanion-configuration* + +Before configuring the plugin, it’s important to understand how it’s +structured. + +The plugin uses adapters to connect to LLMs. Out of the box, the plugin +supports: + +- Anthropic (`anthropic`) - Requires an API key and supports prompt caching +- Copilot (`copilot`) - Requires a token which is created via `:Copilot setup` in Copilot.vim +- Gemini (`gemini`) - Requires an API key +- Ollama (`ollama`) - Both local and remotely hosted +- OpenAI (`openai`) - Requires an API key +- Azure OpenAI (`azure_openai`) - Requires an Azure OpenAI service with a model deployment +- xAI (`xai`) - Requires an API key +- HuggingFace (`huggingface`) - Requires a Serveless Inference API key from HuggingFace.co + +The plugin utilises objects called Strategies. These are the different ways +that a user can interact with the plugin. The _chat_ strategy harnesses a +buffer to allow direct conversation with the LLM. The _inline_ strategy allows +for output from the LLM to be written directly into a pre-existing Neovim +buffer. The _agent_ and _workflow_ strategies are wrappers for the _chat_ +strategy, allowing for |codecompanion-tool-use| and +|codecompanion-agentic-workflows|. + +The plugin allows you to specify adapters for each strategy and also for each +|codecompanion-prompt-library| entry. + + +CHANGING THE DEFAULTS ~ + +The default config can be found in the config.lua + +file and the defaults can be changed by calling the `setup` function: + +>lua + require("codecompanion").setup({ + display = { + diff = { + provider = "mini_diff", + }, + }, + opts = { + log_level = "DEBUG", + }, + }) +< + +Please refer to the |codecompanion-adapter| section below in order to configure +adapters. + +**Changing the System Prompt** + +The default system prompt has been carefully curated to deliver responses which +are similar to GitHub Copilot Chat, no matter which LLM you use. That is, +you’ll receive responses which are terse, professional and with expertise in +coding. However, you can modify the `opts.system_prompt` table in the config to +suit your needs. You can also set it as a function which can receive the +current chat buffer’s adapter as a parameter, giving you the option of +setting system prompts that are LLM or model specific: + +>lua + require("codecompanion").setup({ + opts = { + ---@param adapter CodeCompanion.Adapter + ---@return string + system_prompt = function(adapter) + if adapter.schema.model.default == "llama3.1:latest" then + return "My custom system prompt" + end + return "My default system prompt" + end + } + }) +< + +**Changing the language** + +The plugin enables the language for non-code responses to be changed. You can +configure this in your setup: + +>lua + require('codecompanion').setup({ + opts = { + language = "English" -- Default is "English" + } + }) +< + +**Changing or adding to the default keymaps** + +The chat buffer comes with a number of +pre-defined keymaps which you can customize: + +>lua + require('codecompanion').setup({ + strategies = { + chat = { + keymaps = { + send = { + modes = { + -- Only send a response to the LLM with + n = { "" }, + }, + }, + } + } + } + }) +< + +You can also add your own keymaps: + +>lua + require('codecompanion').setup({ + strategies = { + chat = { + keymaps = { + hide = { + modes = { + n = "gh", + }, + -- Add your own custom callback that receives the chat buffer object + callback = function(chat) + chat.ui:hide() + end, + description = "Hide the chat buffer", + }, + } + } + } + }) +< + + +ADAPTERS ~ + +Please refer to your chosen adapter + +to understand its configuration. You will need to set an API key for +non-locally hosted LLMs. + + + [!TIP] To create your own adapter or better understand how they work, please + refer to the ADAPTERS guide. +**Changing the Default Adapter** + +To specify a different adapter to the default (`openai`), simply change the +`strategies.*` table: + +>lua + require("codecompanion").setup({ + strategies = { + chat = { + adapter = "anthropic", + }, + inline = { + adapter = "copilot", + }, + }, + }) +< + +**Setting an API Key** + +>lua + require("codecompanion").setup({ + adapters = { + anthropic = function() + return require("codecompanion.adapters").extend("anthropic", { + env = { + api_key = "MY_OTHER_ANTHROPIC_KEY" + }, + }) + end, + }, + }) +< + +In the example above, we’re using the base of the Anthropic adapter but +changing the name of the default API key which it uses. + +**Setting an API Key Using a Command** + +Having API keys in plain text in your shell is not always safe. Thanks to this +PR , you can run +commands from within your config by prefixing them with `cmd:`. In the example +below, we’re using the 1Password CLI to read an OpenAI credential. + +>lua + require("codecompanion").setup({ + adapters = { + openai = function() + return require("codecompanion.adapters").extend("openai", { + env = { + api_key = "cmd:op read op://personal/OpenAI/credential --no-newline", + }, + }) + end, + }, + }) +< + +**Using Ollama Remotely** + +To use Ollama remotely, change the URL in the `env` table, set an API key and +pass it via an "Authorization" header: + +>lua + require("codecompanion").setup({ + adapters = { + ollama = function() + return require("codecompanion.adapters").extend("ollama", { + env = { + url = "https://my_ollama_url", + api_key = "OLLAMA_API_KEY", + }, + headers = { + ["Content-Type"] = "application/json", + ["Authorization"] = "Bearer ${api_key}", + }, + parameters = { + sync = true, + }, + }) + end, + }, + }) +< + +**Using OpenAI compatible Models like LMStudio or self-hosted models** + +To use any other OpenAI compatible models, change the URL in the `env` table, +set an API key: + +>lua + require("codecompanion").setup({ + adapters = { + ollama = function() + return require("codecompanion.adapters").extend("openai_compatible", { + env = { + url = "http[s]://open_compatible_ai_url", -- optional: default value is ollama url http://127.0.0.1:11434 + api_key = "OpenAI_API_KEY", -- optional: if your endpoint is authenticated + chat_url = "/v1/chat/completions", -- optional: default value, override if different + }, + }) + end, + }, + }) +< + +**Using Azure OpenAI** + +To use Azure OpenAI, you need to have an Azure OpenAI service, an API key, and +a model deployment. Follow these steps to configure the adapter: + +1. Create an Azure OpenAI service in your Azure portal. +2. Deploy a model in the Azure OpenAI service. +3. Obtain the API key from the Azure portal. + +Then, configure the adapter in your setup as follows: + +>lua + require("codecompanion").setup({ + strategies = { + chat = { + adapter = "azure_openai", + }, + inline = { + adapter = "azure_openai", + }, + }, + adapters = { + azure_openai = function() + return require("codecompanion.adapters").extend("azure_openai", { + env = { + api_key = 'YOUR_AZURE_OPENAI_API_KEY', + endpoint = 'YOUR_AZURE_OPENAI_ENDPOINT', + }, + schema = { + model = { + default = "YOUR_DEPLOYMENT_NAME", + } + }, + }) + end, + }, + }) +< + +**Connecting via a Proxy** + +You can also connect via a proxy: + +>lua + require("codecompanion").setup({ + adapters = { + opts = { + allow_insecure = true, -- Use if required + proxy = "socks5://127.0.0.1:9999" + } + }, + }) +< + +**Changing an Adapter’s Default Model** + +A common ask is to change an adapter’s default model. This can be done by +altering the `schema.model.default` table: + +>lua + require("codecompanion").setup({ + adapters = { + anthropic = function() + return require("codecompanion.adapters").extend("anthropic", { + schema = { + model = { + default = "claude-3-opus-20240229", + }, + }, + }) + end, + }, + }) +< + +**Configuring Adapter Settings** + +LLMs have many settings such as _model_, _temperature_ and _max_tokens_. In an +adapter, these sit within a schema table and can be configured during setup: + +>lua + require("codecompanion").setup({ + adapters = { + llama3 = function() + return require("codecompanion.adapters").extend("ollama", { + name = "llama3", -- Give this adapter a different name to differentiate it from the default ollama adapter + schema = { + model = { + default = "llama3:latest", + }, + num_ctx = { + default = 16384, + }, + num_predict = { + default = -1, + }, + }, + }) + end, + }, + }) +< + +**Set a Global Adapter with a Global Variable** + +In some cases, it may be helpful to set a global adapter across both the `chat` +and `inline` strategies, on the fly. Perhaps, if your LLM of choice is down or +you’re without internet. This can be achieved by setting the +`vim.g.codecompanion_adapter` variable to the name of an adapter in the config. +This also prevents you from having to go into every chat buffer that you have +open to manually set the adapter. + + +ADVANCED USAGE *codecompanion-advanced-usage* + + +PROMPT LIBRARY ~ + +The plugin comes with a number of pre-built prompts. As per the config +, +these can be called via keymaps or via the cmdline. These prompts have been +carefully curated to mimic those in GitHub’s Copilot Chat +. +Of course, you can create your own prompts and add them to the Action Palette +or even to the slash command completion menu in the chat buffer. Please see the +RECIPES guide for more information. + +**Using Keymaps** + +You can call a prompt from the library via a keymap using the `prompt` helper: + +>lua + vim.api.nvim_set_keymap("v", "ce", "", { + callback = function() + require("codecompanion").prompt("explain") + end, + noremap = true, + silent = true, + }) +< + +In the example above, we’ve set a visual keymap that will trigger the Explain +prompt. Providing the `short_name` of the prompt as an argument to the helper +(e.g.� "commit") will resolve the strategy down to an action. + + +THE CHAT BUFFER ~ + +The chat buffer is where you converse with an LLM from within Neovim. The chat +buffer has been designed to be turn based, whereby you send a message and the +LLM replies. Messages are segmented by H2 headers and once a message has been +sent, it cannot be edited. You can also have multiple chat buffers open at the +same. + +The look and feel of the chat buffer can be customised as per the +`display.chat` table in the config +. +You can also add additional _Variables_ and _Slash Commands_ which can then be +referenced in the chat buffer. + +**Slash Commands** + +As outlined in the |codecompanion-quickstart| section, Slash Commands allow you +to easily share additional context with your LLM from the chat buffer. Some of +the commands also allow for multiple providers: + +- `/buffer` - Has `default`, `telescope` and `fzf_lua` providers +- `/files` - Has `default`, `telescope`, `mini_pick` and `fzf_lua` providers +- `/help` - Has `telescope`, `mini_pick` and `fzf_lua` providers +- `/symbols` - Has `default`, `telescope`, `mini_pick` and `fzf_lua` providers + +Please refer to the config + +to see how to change the default provider. + +**References** + +When Slash Commands or Variables are used, a block quote is added to the chat +buffer referencing what’s been shared with the LLM. When a conversation +becomes long, this allows you to keep track of what’s been shared. You can +modify these block quotes to remove references from being shared with the LLM +which will alter the history of the conversation. This can be useful to +minimize token consumption. + +**Keymaps** + +When in the chat buffer, press `?` to bring up a menu that lists the available +keymaps, variables, slash commands and tools. Currently, the keymaps available +to you in normal mode are: + +- `|` to send a message to the LLM +- `` to close the chat buffer +- `q` to stop the current request +- `ga` to change the adapter for the currentchat +- `gc` to insert a codeblock in the chat buffer +- `gd` to view/debug the chat buffer’s contents +- `gf` to fold any codeblocks in the chat buffer +- `gr` to regenerate the last response +- `gs` to toggle the system prompt on/off +- `gx` to clear the chat buffer’s contents +- `gy` to yank the last codeblock in the chat buffer +- `[[` to move to the previous header +- `]]` to move to the next header +- `{` to move to the previous chat +- `}` to move to the next chat + +and in insert mode: + +- `` to open the completion menu (if nvim-cmp isn’t installed) + +**Settings** + +You can display your selected adapter’s schema at the top of the buffer, if +`display.chat.show_settings` is set to `true`. This allows you to vary the +response from the LLM. + + +INLINE ASSISTANT ~ + + + [!NOTE] If you’ve set `opts.send_code = false` in your config then the plugin + will endeavour to ensure no code is sent to the LLM. +One of the challenges with inline editing is determining how the LLM’s +response should be handled in the buffer. If you’ve prompted the LLM to +_“create a table of 5 common text editors”_ then you may wish for the +response to be placed at the cursor’s position in the current buffer. +However, if you asked the LLM to _“refactor this function”_ then you’d +expect the response to _replace_ a visual selection. The plugin will use the +inline LLM you’ve specified in your config to determine if the response +should… + +- _replace_ - replace a visual selection you’ve made +- _add_ - be added in the current buffer at the cursor position +- _new_ - be placed in a new buffer +- _chat_ - be placed in a chat buffer + +By default, an inline assistant prompt will trigger the diff feature, showing +differences between the original buffer and the changes from the LLM. This can +be turned off in your config via the `display.diff.provider` table. You can +also choose to accept or reject the LLM’s suggestions with the following +keymaps: + +- `ga` - Accept an inline edit +- `gr` - Reject an inline edit + + +AGENTS / TOOLS ~ + +As outlined by Andrew Ng in Agentic Design Patterns Part 3, Tool Use +, +LLMs can act as agents by leveraging external tools. Andrew notes some common +examples such as web searching or code execution that have obvious benefits +when using LLMs. + +In the plugin, tools are simply context that’s given to an LLM via a `system` +prompt and Agents are groupings of tools. These give LLM’s knowledge and a +defined schema which can be included in the response for the plugin to parse, +execute and feedback on. Agents and tools can be added as a participant to the +chat buffer by using the `@` key. + +More information on how agents and tools work and how you can create your own +can be found in the TOOLS guide. + + +AGENTIC WORKFLOWS ~ + +Agentic Workflows prompt an LLM multiple times, giving them the ability to +build their answer step-by-step instead of at once. This leads to much better +output as outlined by Andrew +Ng. Infact, it’s possible for older models like GPT 3.5 to outperform newer +models (using traditional zero-shot inference). + +Implementing Andrew’s advice, at various stages of a pre-defined workflow, +the plugin will automatically prompt the LLM without any input or triggering +required from the user. The plugin contains a default `Code workflow`, as part +of the prompt library, which guides the LLM into writing better code. + +Of course you can add new workflows by following the RECIPES +guide. + + +EXTRAS *codecompanion-extras* + +**Highlight Groups** + +The plugin sets the following highlight groups during setup: + +- `CodeCompanionChatHeader` - The headers in the chat buffer +- `CodeCompanionChatSeparator` - Separator between headings in the chat buffer +- `CodeCompanionChatTokens` - Virtual text in the chat buffer showing the token count +- `CodeCompanionChatAgent` - Agents in the chat buffer +- `CodeCompanionChatTool` - Tools in the chat buffer +- `CodeCompanionChatVariable` - Variables in the chat buffer +- `CodeCompanionVirtualText` - All other virtual text in the plugin + +**Events/Hooks** + +The plugin fires many events during its lifecycle: + +- `CodeCompanionChatOpened` - Fired after a chat has been opened +- `CodeCompanionChatClosed` - Fired after a chat has been closed +- `CodeCompanionChatAdapter` - Fired after the adapter has been set in the chat +- `CodeCompanionChatModel` - Fired after the model has been set in the chat +- `CodeCompanionToolAdded` - Fired when a tool has been added to a chat +- `CodeCompanionAgentStarted` - Fired when an agent has been initiated in the chat +- `CodeCompanionAgentFinished` - Fired when an agent has finished all tool executions +- `CodeCompanionInlineStarted` - Fired at the start of the Inline strategy +- `CodeCompanionInlineFinished` - Fired at the end of the Inline strategy +- `CodeCompanionRequestStarted` - Fired at the start of any API request +- `CodeCompanionRequestFinished` - Fired at the end of any API request +- `CodeCompanionDiffAttached` - Fired when in Diff mode +- `CodeCompanionDiffDetached` - Fired when exiting Diff mode + + + [!TIP] Some events are sent with a data payload which can be leveraged. +Events can be hooked into as follows: + +>lua + local group = vim.api.nvim_create_augroup("CodeCompanionHooks", {}) + + vim.api.nvim_create_autocmd({ "User" }, { + pattern = "CodeCompanionInline*", + group = group, + callback = function(request) + if request.match == "CodeCompanionInlineFinished" then + -- Format the buffer after the inline request has completed + require("conform").format({ bufnr = request.buf }) + end + end, + }) +< + +**Statuslines** + +You can incorporate a visual indication to show when the plugin is +communicating with an LLM in your Neovim configuration. Below are examples for +two popular statusline plugins. + +_lualine.nvim:_ + +>lua + local M = require("lualine.component"):extend() + + M.processing = false + M.spinner_index = 1 + + local spinner_symbols = { + "⠋", + "⠙", + "⠹", + "⠸", + "⠼", + "⠴", + "⠦", + "⠧", + "⠇", + "⠏", + } + local spinner_symbols_len = 10 + + -- Initializer + function M:init(options) + M.super.init(self, options) + + local group = vim.api.nvim_create_augroup("CodeCompanionHooks", {}) + + vim.api.nvim_create_autocmd({ "User" }, { + pattern = "CodeCompanionRequest*", + group = group, + callback = function(request) + if request.match == "CodeCompanionRequestStarted" then + self.processing = true + elseif request.match == "CodeCompanionRequestFinished" then + self.processing = false + end + end, + }) + end + + -- Function that runs every time statusline is updated + function M:update_status() + if self.processing then + self.spinner_index = (self.spinner_index % spinner_symbols_len) + 1 + return spinner_symbols[self.spinner_index] + else + return nil + end + end + + return M +< + +_heirline.nvim:_ + +>lua + local CodeCompanion = { + static = { + processing = false, + }, + update = { + "User", + pattern = "CodeCompanionRequest*", + callback = function(self, args) + if args.match == "CodeCompanionRequestStarted" then + self.processing = true + elseif args.match == "CodeCompanionRequestFinished" then + self.processing = false + end + vim.cmd("redrawstatus") + end, + }, + { + condition = function(self) + return self.processing + end, + provider = " ", + hl = { fg = "yellow" }, + }, + } +< + +**Legendary.nvim** + +The plugin also supports the amazing legendary.nvim + plugin. Simply enable it in +your config: + +>lua + require('legendary').setup({ + extensions = { + codecompanion = true, + }, + }) +< + +**Mini.Diff** + +If you’re using mini.diff you can +put an icon in the statusline to indicate which diff is currently in use in a +buffer: + +>lua + local function diff_source() + local bufnr, diff_source, icon + bufnr = vim.api.nvim_get_current_buf() + diff_source = vim.b[bufnr].diffCompGit + if not diff_source then + return "" + end + if diff_source == "git" then + icon = "󰊤 " + elseif diff_source == "codecompanion" then + icon = " " + end + return string.format("%%#StatusLineLSP#%s", icon) + end +< + + +TROUBLESHOOTING *codecompanion-troubleshooting* + +Before raising an issue +, there are a number of +steps you can take to troubleshoot a problem: + +**Checkhealth** + +Run `:checkhealth codecompanion` and check all dependencies are installed +correctly. Also take note of the log file path. + +**Turn on logging** + +Update your config and turn debug logging on: + +>lua + opts = { + log_level = "DEBUG", -- or "TRACE" + } +< + +and inspect the log file as per the location from the checkhealth command. + +**Try with a minimal.lua file** + +A large proportion of issues which are raised in Neovim plugins are to do with +a user’s own config. That’s why I always ask users to fill in a +`minimal.lua` file when they raise an issue. We can rule out their config being +an issue and it allows me to recreate the problem. + +For this purpose, I have included a minimal.lua + file in +the repository for you to test out if you’re facing issues. Simply copy the +file, edit it and run neovim with `nvim --clean -u minimal.lua`. + +Generated by panvimdoc + +vim:tw=78:ts=8:noet:ft=help:norl: