Skip to content

Commit

Permalink
black patch on outstanding files that were causing workflow fails on …
Browse files Browse the repository at this point in the history
…PRs (#193)
  • Loading branch information
cpacker authored Oct 30, 2023
1 parent c01df67 commit 5e682c2
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 19 deletions.
9 changes: 4 additions & 5 deletions memgpt/autogen/examples/agent_autoreply.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
code_execution_config={"last_n_messages": 2, "work_dir": "groupchat"},
human_input_mode="TERMINATE", # needed?
default_auto_reply="You are going to figure all out by your own. "
"Work by yourself, the user won't reply until you output `TERMINATE` to end the conversation.",
"Work by yourself, the user won't reply until you output `TERMINATE` to end the conversation.",
)

if not USE_MEMGPT:
Expand All @@ -43,7 +43,7 @@
name="Coder",
llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).",
f"(which I make sure to tell everyone I work with).",
human_input_mode="TERMINATE",
)

Expand All @@ -54,13 +54,12 @@
"MemGPT_coder",
llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).",
f"(which I make sure to tell everyone I work with).",
human_input_mode="TERMINATE",
)

# Begin the group chat with a message from the user
user_proxy.initiate_chat(
coder,
message="I want to design an app to make me one million dollars in one month. "
"Tell me all the details, then try out every steps.",
message="I want to design an app to make me one million dollars in one month. " "Tell me all the details, then try out every steps.",
)
13 changes: 6 additions & 7 deletions memgpt/autogen/examples/agent_groupchat.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@
coder = create_autogen_memgpt_agent(
"MemGPT_coder",
persona_description="I am a 10x engineer, trained in Python. I was the first engineer at Uber "
"(which I make sure to tell everyone I work with).",
"(which I make sure to tell everyone I work with).",
user_description=f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
f"and a product manager ({pm.name}).",
# extra options
# interface_kwargs={"debug": True},
)
Expand All @@ -69,9 +69,9 @@
"MemGPT_coder",
llm_config=llm_config,
system_message=f"I am a 10x engineer, trained in Python. I was the first engineer at Uber "
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
f"(which I make sure to tell everyone I work with).\n"
f"You are participating in a group chat with a user ({user_proxy.name}) "
f"and a product manager ({pm.name}).",
)

# Initialize the group chat between the user and two LLM agents (PM and coder)
Expand All @@ -81,6 +81,5 @@
# Begin the group chat with a message from the user
user_proxy.initiate_chat(
manager,
message="I want to design an app to make me one million dollars in one month. "
"Yes, your heard that right.",
message="I want to design an app to make me one million dollars in one month. " "Yes, your heard that right.",
)
8 changes: 3 additions & 5 deletions memgpt/autogen/memgpt_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def create_memgpt_autogen_agent_from_config(
groupchat = GroupChat(
agents=[autogen_memgpt_agent, coop_agent1, coop_agent2],
messages=[],
max_round=12 if max_consecutive_auto_reply is None else max_consecutive_auto_reply
max_round=12 if max_consecutive_auto_reply is None else max_consecutive_auto_reply,
)
manager = GroupChatManager(name=name, groupchat=groupchat, llm_config=llm_config)
return manager
Expand Down Expand Up @@ -146,9 +146,7 @@ def __init__(
self.register_reply([Agent, None], MemGPTAgent._generate_reply_for_user_message)
self.messages_processed_up_to_idx = 0

self._is_termination_msg = (
is_termination_msg if is_termination_msg is not None else (lambda x: x == "TERMINATE")
)
self._is_termination_msg = is_termination_msg if is_termination_msg is not None else (lambda x: x == "TERMINATE")

def format_other_agent_message(self, msg):
if "name" in msg:
Expand Down Expand Up @@ -220,7 +218,7 @@ async def _a_generate_reply_for_user_message(
break

# Stop the conversation
if self._is_termination_msg(new_messages[-1]['content']):
if self._is_termination_msg(new_messages[-1]["content"]):
return True, None

# Pass back to AutoGen the pretty-printed calls MemGPT made to the interface
Expand Down
4 changes: 3 additions & 1 deletion memgpt/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,9 @@ async def config_init(cls: Type["Config"], config_file: str = None):
).ask_async()

self.archival_storage_index = None
self.preload_archival = await questionary.confirm("Would you like to preload anything into MemGPT's archival memory?", default=False).ask_async()
self.preload_archival = await questionary.confirm(
"Would you like to preload anything into MemGPT's archival memory?", default=False
).ask_async()
if self.preload_archival:
self.load_type = await questionary.select(
"What would you like to load?",
Expand Down
2 changes: 1 addition & 1 deletion memgpt/memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,7 @@ def search(self, query_string, count=None, start=None):
# from pprint import pprint
# pprint(results)
return results, len(results)

async def a_search(self, query_string, count=None, start=None):
return self.search(query_string, count, start)

Expand Down

0 comments on commit 5e682c2

Please # to comment.