-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdeepseek_app.py
97 lines (77 loc) · 3.22 KB
/
deepseek_app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
# import streamlit as st
# import ollama
# desired_model = "deepseek-r1:1.5b"
# st.title = "GoldenBrain Chat Based On DeepSeek"
# def generate_response(question):
# response = ollama.chat(desired_model,
# messages=[
# {"role": "user",
# "content": question}
# ])
# st.info(response['message']["content"])
# with st.form(key='my_form'):
# text = st.text_area("Enter your question here and click on the submit button")
# submit_button = st.form_submit_button(label='Submit')
# if submit_button:
# generate_response(text)
import streamlit as st
import ollama
# Set the desired model
desired_model = "deepseek-r1:1.5b"
# Set the title of the Streamlit app
st.title=("GoldenBrain Chat Based On DeepSeek") # Fixed: Use st.title() correctly
# Initialize session state to store the conversation history
if "conversation" not in st.session_state:
st.session_state.conversation = []
# Initialize session state for dark/light mode
if "dark_mode" not in st.session_state:
st.session_state.dark_mode = False
# Function to toggle dark/light mode
def toggle_dark_mode():
st.session_state.dark_mode = not st.session_state.dark_mode
# Inform the user that themes cannot be toggled dynamically
st.warning("Dynamic theme switching is not supported by Streamlit. Please configure the theme in the Streamlit settings.")
# Add a button to toggle dark/light mode
st.button("Toggle Dark/Light Mode", on_click=toggle_dark_mode)
# Function to stream the response from the model
def stream_response(question):
response = ollama.chat(
desired_model,
messages=[{"role": "user", "content": question}],
stream=True # Enable streaming
)
full_response = ""
placeholder = st.empty() # Placeholder to stream the response
for chunk in response:
chunk_content = chunk['message']['content']
full_response += chunk_content
placeholder.markdown(f"**GoldenBrain:** {full_response}") # Update the placeholder with the latest response
return full_response
# Display the conversation history
for message in st.session_state.conversation:
with st.chat_message(message["role"]): # Use st.chat_message for better chat UI
st.markdown(message["content"])
# Input for a new question
if prompt := st.chat_input("Enter your question here"):
# Add the user's question to the conversation history
st.session_state.conversation.append({"role": "user", "content": prompt})
# Display the user's question
with st.chat_message("user"):
st.markdown(prompt)
# Stream the response from the model
with st.chat_message("assistant"):
response = stream_response(prompt)
st.markdown(response) # Display the response only once
# Add the model's response to the conversation history
# st.session_state.conversation.append({"role": "assistant", "content": response})
# JavaScript to auto-scroll to the bottom of the page
st.markdown(
"""
<script>
window.addEventListener('load', function() {
window.scrollTo(0, document.body.scrollHeight);
});
</script>
""",
unsafe_allow_html=True
)