From 432ca8b7af8b8d3a7ec30151ca38308470473292 Mon Sep 17 00:00:00 2001
From: "bryan.jia" <iccccing@gmail.com>
Date: Fri, 28 Feb 2025 16:00:20 +0800
Subject: [PATCH] fix: ollama model provider can not set 'num_ctx' etc.
 parameter #2442

langchain-openai is not compatible with parameter Settings in ollama, such as num_ctx. Therefore, you need to create model instances using langchain-ollama

(cherry picked from commit 42ae7b443d7320ace464e9a12abcbb48d7fabc9c)
---
 .../impl/ollama_model_provider/model/llm.py           | 11 +++++------
 pyproject.toml                                        |  1 +
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py b/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py
index 7c98f7e5cef..6cd291ff3cc 100644
--- a/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/ollama_model_provider/model/llm.py
@@ -10,7 +10,8 @@
 from urllib.parse import urlparse, ParseResult
 
 from langchain_core.messages import BaseMessage, get_buffer_string
-from langchain_openai.chat_models import ChatOpenAI
+from langchain_ollama.chat_models import ChatOllama
+
 
 from common.config.tokenizer_manage_config import TokenizerManage
 from setting.models_provider.base_model_provider import MaxKBBaseModel
@@ -24,7 +25,7 @@ def get_base_url(url: str):
     return result_url[:-1] if result_url.endswith("/") else result_url
 
 
-class OllamaChatModel(MaxKBBaseModel, ChatOpenAI):
+class OllamaChatModel(MaxKBBaseModel, ChatOllama):
     @staticmethod
     def is_cache_model():
         return False
@@ -33,12 +34,10 @@ def is_cache_model():
     def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
         api_base = model_credential.get('api_base', '')
         base_url = get_base_url(api_base)
-        base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1')
         optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
 
-        return OllamaChatModel(model=model_name, openai_api_base=base_url,
-                               openai_api_key=model_credential.get('api_key'),
-                               stream_usage=True, **optional_params)
+        return OllamaChatModel(model=model_name, base_url=base_url,
+                               stream=True, **optional_params)
 
     def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
         tokenizer = TokenizerManage.get_tokenizer()
diff --git a/pyproject.toml b/pyproject.toml
index a5d175b855b..8134688e758 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,6 +28,7 @@ pycryptodome = "^3.19.0"
 beautifulsoup4 = "^4.12.2"
 html2text = "^2024.2.26"
 langchain-openai = "^0.1.8"
+langchain-ollama = "0.1.3"
 django-ipware = "^6.0.4"
 django-apscheduler = "^0.6.2"
 pymupdf = "1.24.9"