-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcommand-anything.py
More file actions
executable file
·118 lines (100 loc) · 3.96 KB
/
command-anything.py
File metadata and controls
executable file
·118 lines (100 loc) · 3.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#!/usr/bin/env python3
import argparse
import sys
import time
import threading
import itertools
import asyncio
from pydantic import BaseModel, Field
from typing import List
from openai import AsyncOpenAI
class LoadingAnimation:
def __init__(self):
self.spinner = itertools.cycle(['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'])
self.busy = False
self.thread = None
self.current_status = ""
def animate(self):
while self.busy:
sys.stdout.write(f'\r{next(self.spinner)} {self.current_status}')
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\r' + ' ' * (len(self.current_status) + 2) + '\r')
sys.stdout.flush()
def start(self, status):
self.current_status = status
self.busy = True
self.thread = threading.Thread(target=self.animate)
self.thread.start()
def stop(self):
self.busy = False
if self.thread:
self.thread.join()
async def main():
parser = argparse.ArgumentParser(description='Generate, preview, and optionally execute a script from an LLM.')
parser.add_argument('prompt', nargs='*', help='The prompt to send to the LLM.')
parser.add_argument('-m', '--model', default='gpt-4o', help='The model to use.')
parser.add_argument('-p', '--parameters', nargs='*', help='Additional parameters for the prompt.')
parser.add_argument('-l', '--language', default='python', help='Programming language of the script.')
args = parser.parse_args()
loading = LoadingAnimation()
client = AsyncOpenAI()
prompt_text = ' '.join(args.prompt)
if args.parameters:
prompt_text += ' ' + ' '.join(args.parameters)
# Define the Pydantic model for the structured response
class ScriptOutput(BaseModel):
reasoning: str = Field(..., description="Explanation of how the script works.")
script: str = Field(..., description=f"The {args.language} script code.")
prompt_with_instructions = f"Write a {args.language} script that does the following:\n\n{prompt_text}\n\n" \
f"Provide a brief reasoning and the script in JSON format matching the specified schema."
try:
# Only show animation while waiting for the API response
loading.start("Waiting for LLM response...")
completion = await client.beta.chat.completions.parse(
model=args.model,
messages=[
{'role': 'user', 'content': prompt_with_instructions}
],
response_format=ScriptOutput,
temperature=0,
)
loading.stop()
except Exception as e:
loading.stop()
print(f"\nError communicating with OpenAI API: {e}")
sys.exit(1)
# Extract the assistant's response
message = completion.choices[0].message
if hasattr(message, 'parsed'):
output = message.parsed
reasoning = output.reasoning
script = output.script
elif hasattr(message, 'refusal'):
print("\nThe assistant refused to provide a script.")
sys.exit(1)
else:
print("\nError: No parsed response received.")
sys.exit(1)
print('\nReasoning:\n')
print(reasoning)
print('\nGenerated Script:\n')
print(script)
print('\nDo you want to execute this script? (y/n)')
choice = input().lower()
if choice == 'yes' or choice == 'y':
if args.language.lower() == 'python':
try:
loading.start("Executing script")
exec_globals = {}
exec(script, exec_globals)
loading.stop()
except Exception as e:
loading.stop()
print(f"\nAn error occurred while executing the script: {e}")
else:
print('\nAutomatic execution is only supported for Python scripts.')
else:
print('\nExecution cancelled.')
if __name__ == '__main__':
asyncio.run(main())