From 1dd5deacb0f18def88c9e6e96bf51c3865e86eda Mon Sep 17 00:00:00 2001 From: Soumya Mondal Date: Thu, 27 Mar 2025 02:49:39 +0530 Subject: [PATCH 1/5] Update default model name in custom command to 'gemma3:4b' --- initialterm/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/initialterm/main.py b/initialterm/main.py index d1f7f9d..5f939cd 100644 --- a/initialterm/main.py +++ b/initialterm/main.py @@ -116,7 +116,7 @@ def custom_cmd(os_name, model_name): break -def start_custom_cmd(model_name='llama3.2:3b'): +def start_custom_cmd(model_name='gemma3:4b'): """ Initializes the application and starts the custom command prompt. @@ -147,7 +147,7 @@ def start_custom_cmd(model_name='llama3.2:3b'): parser = argparse.ArgumentParser() parser.add_argument('--spawn', action='store_true') - parser.add_argument('--model', type=str, default='llama3.2:3b', help='Specify the model name to use') + parser.add_argument('--model', type=str, default='gemma3:4b', help='Specify the model name to use') args = parser.parse_args() if not args.spawn: From d4f6ae3cf8707d5d0e552cacbf19d87f8b5930e4 Mon Sep 17 00:00:00 2001 From: Soumya Mondal Date: Thu, 27 Mar 2025 02:59:23 +0530 Subject: [PATCH 2/5] Add session management for user queries and enhance README with features --- README.md | 7 +++++++ initialterm/main.py | 51 ++++++++++++++++++++++++++++++++++++++------- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 09170bf..eb831ed 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,13 @@ InitialTerm is a terminal application that leverages the Ollama API to convert u **Please ensure that you review and understand the commands before executing them to prevent any unintended consequences.** +### Features + +- **Ollama API Integration**: Converts user queries into command-line commands using the Ollama API. +- **Cross-Platform Support**: Works on Windows, macOS, and Linux. +- **Session Memory**: Stores the conversation in a Session for long-term memory based on the current session. +- **User Confirmation**: Prompts for user confirmation before executing generated commands. + --- # InitialTerm diff --git a/initialterm/main.py b/initialterm/main.py index 5f939cd..a4917c3 100644 --- a/initialterm/main.py +++ b/initialterm/main.py @@ -5,6 +5,7 @@ import os import sys import logging +import json # Configure logging logging.basicConfig(level=logging.DEBUG, @@ -24,7 +25,33 @@ class Color: PINK = '\033[95m' -def ollama_api_call(os_name, command, model_name): +def load_session(session_file): + """ + Loads the session data from a JSON file. + + Parameters: + session_file (str): The path to the session file. + + Returns: + list: The session data. + """ + if os.path.exists(session_file): + with open(session_file, 'r') as file: + return json.load(file) + return [] + +def save_session(session_file, session_data): + """ + Saves the session data to a JSON file. + + Parameters: + session_file (str): The path to the session file. + session_data (list): The session data to save. + """ + with open(session_file, 'w') as file: + json.dump(session_data, file, indent=4) + +def ollama_api_call(os_name, command, model_name, session_data): """ Calls the Ollama API to convert user queries into command-line commands. @@ -32,15 +59,17 @@ def ollama_api_call(os_name, command, model_name): os_name (str): The operating system name. command (str): The user query to convert. model_name (str): The model name to use for the API call. + session_data (list): The session data to include in the API call. Returns: str: The generated command-line command. """ logging.debug(f"Calling Ollama API with OS: {os_name}, command: {command}, and model: {model_name}") + messages = session_data + [{'role': 'user', 'content': f'I am using {os_name} operating system which does not have any extentions instaalled and I want to Convert the user query: {command} to commandline / terminal code. Only output one line of terminal command please. Do not add any other text as the intention is to copy paste this generated output directly in terminal and run.'}] stream = ollama.chat( model=model_name, options={'temperature': 0.1}, - messages=[{'role': 'user', 'content': f'I am using {os_name} operating system which does not have any extentions instaalled and I want to Convert the user query: {command} to commandline / terminal code. Only output one line of terminal command please. Do not add any other text as the intention is to copy paste this generated output directly in terminal and run.'}], + messages=messages, stream=True, ) logging.debug("Ollama API call completed") @@ -53,10 +82,11 @@ def ollama_api_call(os_name, command, model_name): strdata = ''.join([chunk for chunk in stream_data]).replace("`", "").replace("```sh", "").replace("\n", "").replace("```bash", "") print(f"\n{Color.BLUE}Finished.\nGenerated: {strdata}\n{Color.RESET}") + session_data.append({'role': 'assistant', 'content': strdata.strip().replace('`', '')}) return strdata.strip().replace('`', '') -def echo_and_execute(command, os_name, model_name): +def echo_and_execute(command, os_name, model_name, session_data): """ Executes a command generated by the Ollama API and handles user confirmation. @@ -64,10 +94,11 @@ def echo_and_execute(command, os_name, model_name): command (str): The command to execute. os_name (str): The operating system name. model_name (str): The model name used for the API call. + session_data (list): The session data to include in the API call. """ logging.info(f"Executing command: {command} on OS: {os_name}") try: - command_to_execue = ollama_api_call(os_name, command, model_name) + command_to_execue = ollama_api_call(os_name, command, model_name, session_data) confirm = input(f"Generated command is: {Color.CYAN}'{command_to_execue}', shall we continue? (Y/N):{Color.RESET}# ").strip().lower() if confirm.lower() in ['y', 'yes', 'yup']: result = subprocess.run(command_to_execue, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -88,17 +119,20 @@ def echo_and_execute(command, os_name, model_name): print(f"{Color.RED}An exception occurred while executing the command: {e}{Color.RESET}", file=sys.stderr) -def custom_cmd(os_name, model_name): +def custom_cmd(os_name, model_name, session_file): """ Starts a custom command prompt for executing user queries. Parameters: os_name (str): The operating system name. model_name (str): The model name to use for the API call. + session_file (str): The path to the session file. """ logging.info(f"Starting custom command prompt for {os_name} with model {model_name}") print(f"{Color.CYAN}Welcome to the Initial Terminal command prompt for {os_name} with model {model_name}!\n Ollama with {model_name} LLM running locally for inference\n{Color.RESET}\n Type quit/exit to exit") + session_data = load_session(session_file) + while True: try: input_str = input(f"{Color.GREEN}_____________\nCommand to execute :\n{Color.RESET}# ") @@ -108,7 +142,9 @@ def custom_cmd(os_name, model_name): print(f"{Color.GREEN}Exiting the custom command prompt.{Color.RESET}") break - echo_and_execute(input_str, os_name, model_name) + session_data.append({'role': 'user', 'content': input_str}) + echo_and_execute(input_str, os_name, model_name, session_data) + save_session(session_file, session_data) except KeyboardInterrupt: logging.info("Exiting custom command prompt due to keyboard interrupt") @@ -139,7 +175,8 @@ def start_custom_cmd(model_name='gemma3:4b'): os_name = os_name_mapping.get(os_name, 'Unsupported OS') - custom_cmd(os_name, model_name) + session_file = f"session_{os_name.lower()}.json" + custom_cmd(os_name, model_name, session_file) if __name__ == "__main__": From 407b24d83dc22572427a0108ef9f10f66f30ecff Mon Sep 17 00:00:00 2001 From: Soumya Mondal Date: Thu, 27 Mar 2025 03:16:32 +0530 Subject: [PATCH 3/5] Add auto error correction functionality incase of command output occured any error --- initialterm/main.py | 46 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/initialterm/main.py b/initialterm/main.py index a4917c3..ec479f1 100644 --- a/initialterm/main.py +++ b/initialterm/main.py @@ -85,6 +85,39 @@ def ollama_api_call(os_name, command, model_name, session_data): session_data.append({'role': 'assistant', 'content': strdata.strip().replace('`', '')}) return strdata.strip().replace('`', '') +def ollama_api_correct_error(os_name, error_message, model_name, session_data): + """ + Calls the Ollama API to correct errors in the command. + + Parameters: + os_name (str): The operating system name. + error_message (str): The error message to correct. + model_name (str): The model name to use for the API call. + session_data (list): The session data to include in the API call. + + Returns: + str: The corrected command-line command. + """ + logging.debug(f"Calling Ollama API to correct error with OS: {os_name}, error: {error_message}, and model: {model_name}") + messages = session_data + [{'role': 'user', 'content': f'I am using {os_name} operating system and encountered the following error while executing a command: {error_message}. Please provide a corrected command to resolve this error.'}] + stream = ollama.chat( + model=model_name, + options={'temperature': 0.1}, + messages=messages, + stream=True, + ) + logging.debug("Ollama API error correction call completed") + + stream_data = [] + for chunk in stream: + stream_data.append(chunk['message']['content']) + print(f"{Color.BLUE}{chunk['message']['content']}{Color.RESET}", end='', flush=True) + + strdata = ''.join([chunk for chunk in stream_data]).replace("`", "").replace("```sh", "").replace("\n", "").replace("```bash", "") + print(f"\n{Color.BLUE}Finished.\nCorrected: {strdata}\n{Color.RESET}") + + session_data.append({'role': 'assistant', 'content': strdata.strip().replace('`', '')}) + return strdata.strip().replace('`', '') def echo_and_execute(command, os_name, model_name, session_data): """ @@ -114,6 +147,19 @@ def echo_and_execute(command, os_name, model_name, session_data): if error: logging.error(f"Command error: {error}") print(f"\n{Color.RED}Error: {error}{Color.RESET}") + corrected_command = ollama_api_correct_error(os_name, error, model_name, session_data) + confirm = input(f"Corrected command is: {Color.CYAN}'{corrected_command}', shall we continue? (Y/N):{Color.RESET}# ").strip().lower() + if confirm.lower() in ['y', 'yes', 'yup']: + result = subprocess.run(corrected_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + logging.info(f"Corrected command executed: {corrected_command}") + output = result.stdout.decode().strip() + error = result.stderr.decode().strip() + if output: + logging.debug(f"Corrected command output: {output}") + print(f"\n{Color.GREEN}# Output:{Color.RESET}\n{Color.PINK}{output}{Color.RESET}") + if error: + logging.error(f"Corrected command error: {error}") + print(f"\n{Color.RED}Error: {error}{Color.RESET}") except Exception as e: logging.exception(f"An exception occurred: {e}") print(f"{Color.RED}An exception occurred while executing the command: {e}{Color.RESET}", file=sys.stderr) From bba29939036ac2626f9230b0842ab65228a4e0c0 Mon Sep 17 00:00:00 2001 From: Soumya Mondal Date: Thu, 27 Mar 2025 03:31:24 +0530 Subject: [PATCH 4/5] Add session file generation for custom commands --- initialterm/main.py | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/initialterm/main.py b/initialterm/main.py index ec479f1..d746aa2 100644 --- a/initialterm/main.py +++ b/initialterm/main.py @@ -6,6 +6,7 @@ import sys import logging import json +import uuid # Configure logging logging.basicConfig(level=logging.DEBUG, @@ -25,6 +26,22 @@ class Color: PINK = '\033[95m' +def get_session_file(os_name): + """ + Gets the path to the session file. + + Parameters: + os_name (str): The operating system name. + + Returns: + str: The path to the session file. + """ + session_dir = os.path.join(os.path.expanduser("~"), ".conversation") + if not os.path.exists(session_dir): + os.makedirs(session_dir) + session_id = uuid.uuid4().hex + return os.path.join(session_dir, f"session_{os_name.lower()}_{session_id}.json") + def load_session(session_file): """ Loads the session data from a JSON file. @@ -165,18 +182,18 @@ def echo_and_execute(command, os_name, model_name, session_data): print(f"{Color.RED}An exception occurred while executing the command: {e}{Color.RESET}", file=sys.stderr) -def custom_cmd(os_name, model_name, session_file): +def custom_cmd(os_name, model_name): """ Starts a custom command prompt for executing user queries. Parameters: os_name (str): The operating system name. model_name (str): The model name to use for the API call. - session_file (str): The path to the session file. """ logging.info(f"Starting custom command prompt for {os_name} with model {model_name}") print(f"{Color.CYAN}Welcome to the Initial Terminal command prompt for {os_name} with model {model_name}!\n Ollama with {model_name} LLM running locally for inference\n{Color.RESET}\n Type quit/exit to exit") + session_file = get_session_file(os_name) session_data = load_session(session_file) while True: @@ -221,8 +238,7 @@ def start_custom_cmd(model_name='gemma3:4b'): os_name = os_name_mapping.get(os_name, 'Unsupported OS') - session_file = f"session_{os_name.lower()}.json" - custom_cmd(os_name, model_name, session_file) + custom_cmd(os_name, model_name) if __name__ == "__main__": From b94faca159dbc7a1920b9f6c15328cffe3bbcda1 Mon Sep 17 00:00:00 2001 From: Soumya Mondal Date: Tue, 29 Apr 2025 18:46:08 +0530 Subject: [PATCH 5/5] Update default model name to 'gemma3:latest' and improve start script path --- initialterm/main.py | 4 ++-- start.sh | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/initialterm/main.py b/initialterm/main.py index d746aa2..6dc7644 100644 --- a/initialterm/main.py +++ b/initialterm/main.py @@ -215,7 +215,7 @@ def custom_cmd(os_name, model_name): break -def start_custom_cmd(model_name='gemma3:4b'): +def start_custom_cmd(model_name='gemma3:latest'): """ Initializes the application and starts the custom command prompt. @@ -246,7 +246,7 @@ def start_custom_cmd(model_name='gemma3:4b'): parser = argparse.ArgumentParser() parser.add_argument('--spawn', action='store_true') - parser.add_argument('--model', type=str, default='gemma3:4b', help='Specify the model name to use') + parser.add_argument('--model', type=str, default='gemma3:latest', help='Specify the model name to use') args = parser.parse_args() if not args.spawn: diff --git a/start.sh b/start.sh index f89779e..f519727 100755 --- a/start.sh +++ b/start.sh @@ -1,7 +1,7 @@ #!/bin/bash - +cd /media/tyson/TYSON/initialterm/ # Activate the Python environment -source ~/initialterm/.venv/bin/activate +source /media/tyson/TYSON/initialterm/.venv/bin/activate # Start the initialterm command -initialterm \ No newline at end of file +initialterm