Skip to content

Commit d618b2f

Browse files
authored
Merge pull request #280 from drivecore/feature/local-openai-support
feature: local openai support
2 parents 6a3a392 + c04ee43 commit d618b2f

File tree

14 files changed

+120
-28
lines changed

14 files changed

+120
-28
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,8 +83,8 @@ export default {
8383
profile: false,
8484
tokenCache: true,
8585

86-
// Ollama configuration (if using local models)
87-
ollamaBaseUrl: 'http://localhost:11434',
86+
// Base URL configuration (for providers that need it)
87+
baseUrl: 'http://localhost:11434', // Example for Ollama
8888
};
8989
```
9090

mycoder.config.js

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,9 @@ export default {
1818
//model: 'llama3.2:3b',
1919
//provider: 'xai',
2020
//model: 'grok-2-latest',
21+
//provider: 'openai',
22+
//model: 'qwen2.5-coder:14b',
23+
//baseUrl: 'http://192.168.2.66:80/v1-openai',
2124
maxTokens: 4096,
2225
temperature: 0.7,
2326

packages/agent/src/core/llm/__tests__/openai.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ describe('OpenAIProvider', () => {
177177
'role' in toolUseMessage
178178
) {
179179
expect(toolUseMessage.role).toBe('assistant');
180-
expect(toolUseMessage.content).toBe(null);
180+
expect(toolUseMessage.content).toBe(''); // required by gpustack' implementation of openai SDK.
181181

182182
if (
183183
'tool_calls' in toolUseMessage &&

packages/agent/src/core/llm/provider.ts

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -57,12 +57,6 @@ export const providerConfig: Record<string, ProviderConfig> = {
5757
model: 'gpt-4o-2024-05-13',
5858
factory: (model, options) => new OpenAIProvider(model, options),
5959
},
60-
gpustack: {
61-
docsUrl: 'https://mycoder.ai/docs/provider/local-openai',
62-
model: 'llama3.2',
63-
baseUrl: 'http://localhost:80',
64-
factory: (model, options) => new OpenAIProvider(model, options),
65-
},
6660
ollama: {
6761
docsUrl: 'https://mycoder.ai/docs/provider/ollama',
6862
model: 'llama3.2',

packages/agent/src/core/llm/providers/openai.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ export class OpenAIProvider implements LLMProvider {
154154
// so we'll include it as a function call in an assistant message
155155
return {
156156
role: 'assistant',
157-
content: null,
157+
content: '',
158158
tool_calls: [
159159
{
160160
id: msg.id,

packages/agent/src/tools/getTools.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ import { fetchTool } from './io/fetch.js';
1010
import { textEditorTool } from './io/textEditor.js';
1111
import { createMcpTool } from './mcp.js';
1212
import { listBackgroundToolsTool } from './system/listBackgroundTools.js';
13-
import { respawnTool } from './system/respawn.js';
1413
import { sequenceCompleteTool } from './system/sequenceComplete.js';
1514
import { shellMessageTool } from './system/shellMessage.js';
1615
import { shellStartTool } from './system/shellStart.js';
@@ -39,7 +38,7 @@ export function getTools(options?: GetToolsOptions): Tool[] {
3938
shellMessageTool as unknown as Tool,
4039
browseStartTool as unknown as Tool,
4140
browseMessageTool as unknown as Tool,
42-
respawnTool as unknown as Tool,
41+
//respawnTool as unknown as Tool, this is a confusing tool for now.
4342
sleepTool as unknown as Tool,
4443
listBackgroundToolsTool as unknown as Tool,
4544
];

packages/agent/src/tools/system/respawn.test.ts

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,8 @@ import { respawnTool } from './respawn';
88
const toolContext: ToolContext = getMockToolContext();
99

1010
describe('respawnTool', () => {
11-
it('should have correct name and description', () => {
11+
it('should have correct name', () => {
1212
expect(respawnTool.name).toBe('respawn');
13-
expect(respawnTool.description).toContain('Resets the agent context');
1413
});
1514

1615
it('should execute and return confirmation message', async () => {

packages/agent/src/tools/system/respawn.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ const returnSchema = z.object({
2020
export const respawnTool: Tool = {
2121
name: 'respawn',
2222
description:
23-
'Resets the agent context to just the system prompt and provided context',
23+
'Resets the current conversation to just the system prompt and provided input context to this tool.',
2424
logPrefix: '🔄',
2525
parameters: parameterSchema,
2626
returns: returnSchema,

packages/cli/src/commands/$default.ts

Lines changed: 11 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -114,18 +114,25 @@ export async function executePrompt(
114114
throw new Error(`Unknown provider: ${config.provider}`);
115115
}
116116

117-
const { keyName } = providerSettings;
117+
// only validate key if baseUrl is not set, otherwise we assume the user is using a local provider
118118
let apiKey: string | undefined = undefined;
119+
const { keyName } = providerSettings;
119120
if (keyName) {
120121
// Then fall back to environment variable
122+
logger.info(`Looking API key in env: ${keyName}`);
121123
apiKey = process.env[keyName];
122-
if (!apiKey) {
123-
logger.error(getProviderApiKeyError(config.provider));
124-
throw new Error(`${config.provider} API key not found`);
124+
if (!config.baseUrl) {
125+
if (!apiKey) {
126+
logger.error(getProviderApiKeyError(config.provider));
127+
throw new Error(`${config.provider} API key not found`);
128+
}
125129
}
126130
}
127131

128132
logger.info(`LLM: ${config.provider}/${config.model}`);
133+
if (apiKey) {
134+
logger.info(`Using API key: ${apiKey.slice(0, 4)}...`);
135+
}
129136
if (config.baseUrl) {
130137
// For Ollama, we check if the base URL is set
131138
logger.info(`Using base url: ${config.baseUrl}`);

packages/docs/docs/providers/anthropic.md

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,6 @@ export default {
3232
provider: 'anthropic',
3333
model: 'claude-3-7-sonnet-20250219',
3434

35-
// Optional: Set API key directly (environment variable is preferred)
36-
// anthropicApiKey: 'your_api_key_here',
37-
3835
// Other MyCoder settings
3936
maxTokens: 4096,
4037
temperature: 0.7,

packages/docs/docs/providers/index.mdx

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,8 +11,9 @@ MyCoder supports multiple Language Model (LLM) providers, giving you flexibility
1111
MyCoder currently supports the following LLM providers:
1212

1313
- [**Anthropic**](./anthropic.md) - Claude models from Anthropic
14-
- [**OpenAI**](./openai.md) - GPT models from OpenAI
14+
- [**OpenAI**](./openai.md) - GPT models from OpenAI (and OpenAI compatible providers)
1515
- [**Ollama**](./ollama.md) - Self-hosted open-source models via Ollama
16+
- [**xAI**](./xai.md) - Grok models from xAI
1617

1718
## Configuring Providers
1819

@@ -52,3 +53,4 @@ For detailed instructions on setting up each provider, see the provider-specific
5253
- [Anthropic Configuration](./anthropic.md)
5354
- [OpenAI Configuration](./openai.md)
5455
- [Ollama Configuration](./ollama.md)
56+
- [xAI Configuration](./xai.md)

packages/docs/docs/providers/ollama.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ export default {
6262
model: 'medragondot/Sky-T1-32B-Preview:latest',
6363

6464
// Optional: Custom base URL (defaults to http://localhost:11434)
65-
// ollamaBaseUrl: 'http://localhost:11434',
65+
// baseUrl: 'http://localhost:11434',
6666

6767
// Other MyCoder settings
6868
maxTokens: 4096,

packages/docs/docs/providers/openai.md

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,6 @@ export default {
3838
provider: 'openai',
3939
model: 'gpt-4o',
4040

41-
// Optional: Set API key directly (environment variable is preferred)
42-
// openaiApiKey: 'your_api_key_here',
43-
// openaiOrganization: 'your_organization_id',
44-
4541
// Other MyCoder settings
4642
maxTokens: 4096,
4743
temperature: 0.7,
@@ -60,6 +56,24 @@ MyCoder supports all OpenAI models that have tool/function calling capabilities.
6056

6157
You can use any other OpenAI model that supports function calling with MyCoder. The OpenAI provider is not limited to just these listed models.
6258

59+
## Using OpenAI Compatible Providers
60+
61+
A number of providers offer OpenAI compatible REST API endpoints, such as xAI and [GPUStack](https://gpustack.ai). To point the OpenAI provider to a different provider REST API set the `baseUrl` and also, if applicable, the `OPENAI_API_KEY` to their required key. For example:
62+
63+
```javascript
64+
export default {
65+
// Provider selection
66+
provider: 'openai',
67+
model: 'qwen2.5',
68+
baseUrl: 'http://localhost/v1-openai',
69+
70+
// Other MyCoder settings
71+
maxTokens: 4096,
72+
temperature: 0.7,
73+
// ...
74+
};
75+
```
76+
6377
## Best Practices
6478

6579
- GPT-4o provides the best balance of performance and cost for most MyCoder tasks

packages/docs/docs/providers/xai.md

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
---
2+
sidebar_position: 6
3+
---
4+
5+
# xAI (Grok)
6+
7+
[xAI](https://x.ai/) is the company behind Grok, a powerful large language model designed to be helpful, harmless, and honest. Grok models offer strong reasoning capabilities and support for tool calling.
8+
9+
## Setup
10+
11+
To use Grok models with MyCoder, you need an xAI API key:
12+
13+
1. Create an account at [xAI](https://x.ai/)
14+
2. Navigate to the API Keys section and create a new API key
15+
3. Set the API key as an environment variable or in your configuration file
16+
17+
### Environment Variables
18+
19+
You can set the xAI API key as an environment variable:
20+
21+
```bash
22+
export XAI_API_KEY=your_api_key_here
23+
```
24+
25+
### Configuration
26+
27+
Configure MyCoder to use xAI's Grok in your `mycoder.config.js` file:
28+
29+
```javascript
30+
export default {
31+
// Provider selection
32+
provider: 'xai',
33+
model: 'grok-2-latest',
34+
35+
// Other MyCoder settings
36+
maxTokens: 4096,
37+
temperature: 0.7,
38+
// ...
39+
};
40+
```
41+
42+
## Supported Models
43+
44+
xAI offers several Grok models with different capabilities:
45+
46+
- `grok-2-latest` (recommended) - The latest Grok-2 model with strong reasoning and tool-calling capabilities
47+
- `grok-1` - The original Grok model
48+
49+
## Best Practices
50+
51+
- Grok models excel at coding tasks and technical problem-solving
52+
- They have strong tool-calling capabilities, making them suitable for MyCoder workflows
53+
- For complex programming tasks, use Grok-2 models for best results
54+
- Provide clear, specific instructions for optimal results
55+
56+
## Custom Base URL
57+
58+
If you need to use a different base URL for the xAI API (for example, if you're using a proxy or if xAI changes their API endpoint), you can specify it in your configuration:
59+
60+
```javascript
61+
export default {
62+
provider: 'xai',
63+
model: 'grok-2-latest',
64+
baseUrl: 'https://api.x.ai/v1', // Default xAI API URL
65+
};
66+
```
67+
68+
## Troubleshooting
69+
70+
If you encounter issues with xAI's Grok:
71+
72+
- Verify your API key is correct and has sufficient quota
73+
- Check that you're using a supported model name
74+
- For tool-calling issues, ensure your functions are properly formatted
75+
- Monitor your token usage to avoid unexpected costs
76+
77+
For more information, visit the [xAI Documentation](https://x.ai/docs).

0 commit comments

Comments
 (0)