Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

support workflow command #148

Open
wants to merge 9 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 21 additions & 3 deletions devchat/_cli/prompt.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import json
import sys
from typing import List, Optional
import rich_click as click
from devchat.engine import run_command
from devchat.assistant import Assistant
from devchat.openai.openai_chat import OpenAIChat, OpenAIChatConfig
from devchat.store import Store
Expand All @@ -24,10 +26,15 @@
help='Path to a JSON file with functions for the prompt.')
@click.option('-n', '--function-name',
help='Specify the function name when the content is the output of a function.')
@click.option('-ns', '--not-store', is_flag=True, default=False, required=False,
help='Do not save the conversation to the store.')
@click.option('-a', '--auto', is_flag=True, default=False, required=False,
help='Answer question by function-calling.')
def prompt(content: Optional[str], parent: Optional[str], reference: Optional[List[str]],
instruct: Optional[List[str]], context: Optional[List[str]],
model: Optional[str], config_str: Optional[str] = None,
functions: Optional[str] = None, function_name: Optional[str] = None):
functions: Optional[str] = None, function_name: Optional[str] = None,
not_store: Optional[bool] = False, auto: Optional[bool] = False):
"""
This command performs interactions with the specified large language model (LLM)
by sending prompts and receiving responses.
Expand Down Expand Up @@ -82,9 +89,9 @@ def prompt(content: Optional[str], parent: Optional[str], reference: Optional[Li
openai_config = OpenAIChatConfig(model=model, **parameters_data)

chat = OpenAIChat(openai_config)
store = Store(repo_chat_dir, chat)
chat_store = Store(repo_chat_dir, chat)

assistant = Assistant(chat, store, config.max_input_tokens)
assistant = Assistant(chat, chat_store, config.max_input_tokens, not not_store)

functions_data = None
if functions is not None:
Expand All @@ -94,5 +101,16 @@ def prompt(content: Optional[str], parent: Optional[str], reference: Optional[Li
parent=parent, references=reference,
function_name=function_name)

click.echo(assistant.prompt.formatted_header())
command_result = run_command(
model,
assistant.prompt.messages,
content,
parent,
context_contents,
auto)
if command_result is not None:
sys.exit(command_result[0])

for response in assistant.iterate_response():
click.echo(response, nl=False)
20 changes: 12 additions & 8 deletions devchat/assistant.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import openai
from devchat.message import Message
from devchat.chat import Chat
from devchat.openai.openai_prompt import OpenAIPrompt
from devchat.store import Store
from devchat.utils import get_logger

Expand All @@ -12,7 +13,7 @@


class Assistant:
def __init__(self, chat: Chat, store: Store, max_prompt_tokens: int):
def __init__(self, chat: Chat, store: Store, max_prompt_tokens: int, need_store: bool):
"""
Initializes an Assistant object.

Expand All @@ -23,6 +24,11 @@ def __init__(self, chat: Chat, store: Store, max_prompt_tokens: int):
self._store = store
self._prompt = None
self.token_limit = max_prompt_tokens
self._need_store = need_store

@property
def prompt(self) -> OpenAIPrompt:
return self._prompt

@property
def available_tokens(self) -> int:
Expand Down Expand Up @@ -92,7 +98,6 @@ def iterate_response(self) -> Iterator[str]:
Iterator[str]: An iterator over response strings from the chat API.
"""
if self._chat.config.stream:
first_chunk = True
created_time = int(time.time())
config_params = self._chat.config.dict(exclude_unset=True)
for chunk in self._chat.stream_response(self._prompt):
Expand All @@ -114,21 +119,20 @@ def iterate_response(self) -> Iterator[str]:
chunk['choices'][0]['delta']['role']='assistant'

delta = self._prompt.append_response(json.dumps(chunk))
if first_chunk:
first_chunk = False
yield self._prompt.formatted_header()
yield delta
if not self._prompt.responses:
raise RuntimeError("No responses returned from the chat API")
self._store.store_prompt(self._prompt)
yield self._prompt.formatted_footer(0) + '\n'
if self._need_store:
self._store.store_prompt(self._prompt)
yield self._prompt.formatted_footer(0) + '\n'
for index in range(1, len(self._prompt.responses)):
yield self._prompt.formatted_full_response(index) + '\n'
else:
response_str = self._chat.complete_response(self._prompt)
self._prompt.set_response(response_str)
if not self._prompt.responses:
raise RuntimeError("No responses returned from the chat API")
self._store.store_prompt(self._prompt)
if self._need_store:
self._store.store_prompt(self._prompt)
for index in range(len(self._prompt.responses)):
yield self._prompt.formatted_full_response(index) + '\n'
4 changes: 3 additions & 1 deletion devchat/engine/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
from .command_parser import parse_command, Command, CommandParser
from .namespace import Namespace
from .recursive_prompter import RecursivePrompter
from .router import run_command

__all__ = [
'parse_command',
'Command',
'CommandParser',
'Namespace',
'RecursivePrompter'
'RecursivePrompter',
'run_command'
]
198 changes: 198 additions & 0 deletions devchat/engine/command_runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
"""
Run Command with a input text.
"""
import os
import sys
import json
import threading
import subprocess
from typing import List
import shlex

import openai

from devchat.utils import get_logger
from .command_parser import Command


logger = get_logger(__name__)


# Equivalent of CommandRun in Python\which executes subprocesses
class CommandRunner:
def __init__(self, model_name: str):
self.process = None
self._model_name = model_name

def _call_function_by_llm(self,
command_name: str,
command: Command,
history_messages: List[dict]):
"""
command needs multi parameters, so we need parse each
parameter by LLM from input_text
"""
properties = {}
required = []
for key, value in command.parameters.items():
properties[key] = {}
for key1, value1 in value.dict().items():
if key1 not in ['type', 'description', 'enum'] or value1 is None:
continue
properties[key][key1] = value1
required.append(key)

tools = [
{
"type": "function",
"function": {
"name": command_name,
"description": command.description,
"parameters": {
"type": "object",
"properties": properties,
"required": required,
},
}
}
]

client = openai.OpenAI(
api_key=os.environ.get("OPENAI_API_KEY", None),
base_url=os.environ.get("OPENAI_API_BASE", None)
)

connection_error = ''
for _1 in range(3):
try:
response = client.chat.completions.create(
messages=history_messages,
model="gpt-3.5-turbo-16k",
stream=False,
tools=tools,
tool_choice={"type": "function", "function": {"name": command_name}}
)

respose_message = response.dict()["choices"][0]["message"]
if not respose_message['tool_calls']:
return None
tool_call = respose_message['tool_calls'][0]['function']
if tool_call['name'] != command_name:
return None
parameters = json.loads(tool_call['arguments'])
return parameters
except (ConnectionError, openai.APIConnectionError) as err:
connection_error = err
continue
except Exception as err:
print("Exception:", err, file=sys.stderr, flush=True)
logger.exception("Call command by LLM error: %s", err)
return None
print("Connect Error:", connection_error, file=sys.stderr, flush=True)
return None


def run_command(self,
command_name: str,
command: Command,
history_messages: List[dict],
input_text: str,
parent_hash: str,
context_contents: List[str]):
"""
if command has parameters, then generate command parameters from input by LLM
if command.input is "required", and input is null, then return error
"""
if command.parameters and len(command.parameters) > 0:
if not self._model_name.startswith("gpt-"):
return None

arguments = self._call_function_by_llm(command_name, command, history_messages)
if not arguments:
print("No valid parameters generated by LLM", file=sys.stderr, flush=True)
return (-1, "")
return self.run_command_with_parameters(
command,
{
"input": input_text.strip().replace(f'/{command_name}', ''),
**arguments
},
parent_hash,
context_contents)

return self.run_command_with_parameters(
command,
{
"input": input_text.strip().replace(f'/{command_name}', '')
},
parent_hash,
context_contents)


def run_command_with_parameters(self,
command: Command,
parameters: dict[str, str],
parent_hash: str,
context_contents: List[str]):
"""
replace $xxx in command.steps[0].run with parameters[xxx]
then run command.steps[0].run
"""
def pipe_reader(pipe, out_data, out_flag):
while pipe:
data = pipe.read(1)
if data == '':
break
out_data['out'] += data
print(data, end='', file=out_flag, flush=True)

try:
# add environment variables to parameters
if parent_hash:
os.environ['PARENT_HASH'] = parent_hash
if context_contents:
os.environ['CONTEXT_CONTENTS'] = json.dumps(context_contents)
for env_var in os.environ:
parameters[env_var] = os.environ[env_var]
parameters["command_python"] = os.environ['command_python']

command_run = command.steps[0]["run"]
# Replace parameters in command run
for parameter in parameters:
command_run = command_run.replace('$' + parameter, str(parameters[parameter]))

# Run command_run
env = os.environ.copy()
if 'PYTHONPATH' in env:
del env['PYTHONPATH']
# result = subprocess.run(command_run, shell=True, env=env)
# return result
with subprocess.Popen(
shlex.split(command_run),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
text=True
) as process:

stdout_data = {'out': ''}
stderr_data = {'out': ''}

stdout_thread = threading.Thread(
target=pipe_reader,
args=(process.stdout, stdout_data, sys.stdout))
stderr_thread = threading.Thread(
target=pipe_reader,
args=(process.stderr, stderr_data, sys.stderr))

stdout_thread.start()
stderr_thread.start()

stdout_thread.join()
stderr_thread.join()
exit_code = process.wait()
return (exit_code, stdout_data["out"])
return (-1, "")
except Exception as err:
print("Exception:", type(err), err, file=sys.stderr, flush=True)
return (-1, "")
Loading