Skip to content

Commit

Permalink
Update default model to gpt-4o-mini
Browse files Browse the repository at this point in the history
  • Loading branch information
johnd0e authored and umputun committed Oct 20, 2024
1 parent 340655b commit 4bc148a
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ openai:
--openai.apibase= custom openai API base, default is https://api.openai.com/v1 [$OPENAI_API_BASE]
--openai.veto veto mode, confirm detected spam [$OPENAI_VETO]
--openai.prompt= openai system prompt, if empty uses builtin default [$OPENAI_PROMPT]
--openai.model= openai model (default: gpt-4) [$OPENAI_MODEL]
--openai.model= openai model (default: gpt-4o-mini) [$OPENAI_MODEL]
--openai.max-tokens-response= openai max tokens in response (default: 1024) [$OPENAI_MAX_TOKENS_RESPONSE]
--openai.max-tokens-request= openai max tokens in request (default: 2048) [$OPENAI_MAX_TOKENS_REQUEST]
--openai.max-symbols-request= openai max symbols in request, failback if tokenizer failed (default: 16000) [$OPENAI_MAX_SYMBOLS_REQUEST]
Expand Down
2 changes: 1 addition & 1 deletion app/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ type options struct {
APIBase string `long:"apibase" env:"API_BASE" description:"custom openai API base, default is https://api.openai.com/v1"`
Veto bool `long:"veto" env:"VETO" description:"veto mode, confirm detected spam"`
Prompt string `long:"prompt" env:"PROMPT" default:"" description:"openai system prompt, if empty uses builtin default"`
Model string `long:"model" env:"MODEL" default:"gpt-4" description:"openai model"`
Model string `long:"model" env:"MODEL" default:"gpt-4o-mini" description:"openai model"`
MaxTokensResponse int `long:"max-tokens-response" env:"MAX_TOKENS_RESPONSE" default:"1024" description:"openai max tokens in response"`
MaxTokensRequestMaxTokensRequest int `long:"max-tokens-request" env:"MAX_TOKENS_REQUEST" default:"2048" description:"openai max tokens in request"`
MaxSymbolsRequest int `long:"max-symbols-request" env:"MAX_SYMBOLS_REQUEST" default:"16000" description:"openai max symbols in request, failback if tokenizer failed"`
Expand Down
2 changes: 1 addition & 1 deletion lib/tgspam/openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func newOpenAIChecker(client openAIClient, params OpenAIConfig) *openAIChecker {
params.MaxSymbolsRequest = 8192
}
if params.Model == "" {
params.Model = "gpt-4"
params.Model = "gpt-4o-mini"
}
if params.RetryCount <= 0 {
params.RetryCount = 1
Expand Down
2 changes: 1 addition & 1 deletion lib/tgspam/openai_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ func TestOpenAIChecker_Check(t *testing.T) {
MaxTokensResponse: 300,
MaxTokensRequest: 3000,
MaxSymbolsRequest: 12000,
Model: "gpt-4",
Model: "gpt-4o-mini",
})

t.Run("spam response", func(t *testing.T) {
Expand Down

0 comments on commit 4bc148a

Please # to comment.