Skip to content

Commit a593c32

Browse files
collijkntindle
andauthored
Move more app files to app package (Significant-Gravitas#5036)
Co-authored-by: Nicholas Tindle <[email protected]>
1 parent 7cd407b commit a593c32

File tree

10 files changed

+186
-201
lines changed

10 files changed

+186
-201
lines changed

autogpt/app/main.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,14 @@
1313
from autogpt.agents import Agent, AgentThoughts, CommandArgs, CommandName
1414
from autogpt.app.configurator import create_config
1515
from autogpt.app.setup import prompt_user
16+
from autogpt.app.spinner import Spinner
17+
from autogpt.app.utils import (
18+
clean_input,
19+
get_current_git_branch,
20+
get_latest_bulletin,
21+
get_legal_warning,
22+
markdown_to_ansi_style,
23+
)
1624
from autogpt.commands import COMMAND_CATEGORIES
1725
from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key
1826
from autogpt.llm.api_manager import ApiManager
@@ -22,14 +30,6 @@
2230
from autogpt.plugins import scan_plugins
2331
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
2432
from autogpt.speech import say_text
25-
from autogpt.spinner import Spinner
26-
from autogpt.utils import (
27-
clean_input,
28-
get_current_git_branch,
29-
get_latest_bulletin,
30-
get_legal_warning,
31-
markdown_to_ansi_style,
32-
)
3333
from autogpt.workspace import Workspace
3434
from scripts.install_plugin_deps import install_plugin_dependencies
3535

autogpt/app/setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from colorama import Fore, Style
66
from jinja2 import Template
77

8-
from autogpt import utils
8+
from autogpt.app import utils
99
from autogpt.config import Config
1010
from autogpt.config.ai_config import AIConfig
1111
from autogpt.llm.base import ChatSequence, Message
File renamed without changes.

autogpt/app/utils.py

Lines changed: 147 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,147 @@
1+
import os
2+
import re
3+
4+
import requests
5+
from colorama import Fore, Style
6+
from git.repo import Repo
7+
from prompt_toolkit import ANSI, PromptSession
8+
from prompt_toolkit.history import InMemoryHistory
9+
10+
from autogpt.config import Config
11+
from autogpt.logs import logger
12+
13+
session = PromptSession(history=InMemoryHistory())
14+
15+
16+
def clean_input(config: Config, prompt: str = "", talk=False):
17+
try:
18+
if config.chat_messages_enabled:
19+
for plugin in config.plugins:
20+
if not hasattr(plugin, "can_handle_user_input"):
21+
continue
22+
if not plugin.can_handle_user_input(user_input=prompt):
23+
continue
24+
plugin_response = plugin.user_input(user_input=prompt)
25+
if not plugin_response:
26+
continue
27+
if plugin_response.lower() in [
28+
"yes",
29+
"yeah",
30+
"y",
31+
"ok",
32+
"okay",
33+
"sure",
34+
"alright",
35+
]:
36+
return config.authorise_key
37+
elif plugin_response.lower() in [
38+
"no",
39+
"nope",
40+
"n",
41+
"negative",
42+
]:
43+
return config.exit_key
44+
return plugin_response
45+
46+
# ask for input, default when just pressing Enter is y
47+
logger.info("Asking user via keyboard...")
48+
49+
# handle_sigint must be set to False, so the signal handler in the
50+
# autogpt/main.py could be employed properly. This referes to
51+
# https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
52+
answer = session.prompt(ANSI(prompt), handle_sigint=False)
53+
return answer
54+
except KeyboardInterrupt:
55+
logger.info("You interrupted Auto-GPT")
56+
logger.info("Quitting...")
57+
exit(0)
58+
59+
60+
def get_bulletin_from_web():
61+
try:
62+
response = requests.get(
63+
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
64+
)
65+
if response.status_code == 200:
66+
return response.text
67+
except requests.exceptions.RequestException:
68+
pass
69+
70+
return ""
71+
72+
73+
def get_current_git_branch() -> str:
74+
try:
75+
repo = Repo(search_parent_directories=True)
76+
branch = repo.active_branch
77+
return branch.name
78+
except:
79+
return ""
80+
81+
82+
def get_latest_bulletin() -> tuple[str, bool]:
83+
exists = os.path.exists("data/CURRENT_BULLETIN.md")
84+
current_bulletin = ""
85+
if exists:
86+
current_bulletin = open(
87+
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
88+
).read()
89+
new_bulletin = get_bulletin_from_web()
90+
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
91+
92+
news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
93+
if new_bulletin or current_bulletin:
94+
news_header += (
95+
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
96+
"If you don't wish to see this message, you "
97+
"can run Auto-GPT with the *--skip-news* flag.\n"
98+
)
99+
100+
if new_bulletin and is_new_news:
101+
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
102+
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
103+
104+
return f"{news_header}\n{current_bulletin}", is_new_news
105+
106+
107+
def markdown_to_ansi_style(markdown: str):
108+
ansi_lines: list[str] = []
109+
for line in markdown.split("\n"):
110+
line_style = ""
111+
112+
if line.startswith("# "):
113+
line_style += Style.BRIGHT
114+
else:
115+
line = re.sub(
116+
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
117+
rf"{Style.BRIGHT}\1{Style.NORMAL}",
118+
line,
119+
)
120+
121+
if re.match(r"^#+ ", line) is not None:
122+
line_style += Fore.CYAN
123+
line = re.sub(r"^#+ ", "", line)
124+
125+
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
126+
return "\n".join(ansi_lines)
127+
128+
129+
def get_legal_warning() -> str:
130+
legal_text = """
131+
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
132+
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
133+
134+
## Introduction
135+
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
136+
137+
## No Liability for Actions of the System
138+
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
139+
140+
## User Responsibility and Respondeat Superior Liability
141+
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
142+
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
143+
144+
## Indemnification
145+
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
146+
"""
147+
return legal_text

autogpt/processing/text.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,15 @@
1010
from autogpt.llm.providers.openai import OPEN_AI_MODELS
1111
from autogpt.llm.utils import count_string_tokens, create_chat_completion
1212
from autogpt.logs import logger
13-
from autogpt.utils import batch
13+
14+
15+
def batch(iterable, max_batch_length: int, overlap: int = 0):
16+
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
17+
# batched('ABCDEFG', 3) --> ABC DEF G
18+
if max_batch_length < 1:
19+
raise ValueError("n must be at least one")
20+
for i in range(0, len(iterable), max_batch_length - overlap):
21+
yield iterable[i : i + max_batch_length]
1422

1523

1624
def _max_chunk_length(model: str, max: Optional[int] = None) -> int:

autogpt/utils.py

Lines changed: 1 addition & 169 deletions
Original file line numberDiff line numberDiff line change
@@ -1,70 +1,5 @@
1-
import os
2-
import re
3-
4-
import requests
51
import yaml
6-
from colorama import Fore, Style
7-
from git.repo import Repo
8-
from prompt_toolkit import ANSI, PromptSession
9-
from prompt_toolkit.history import InMemoryHistory
10-
11-
from autogpt.config import Config
12-
from autogpt.logs import logger
13-
14-
session = PromptSession(history=InMemoryHistory())
15-
16-
17-
def batch(iterable, max_batch_length: int, overlap: int = 0):
18-
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
19-
# batched('ABCDEFG', 3) --> ABC DEF G
20-
if max_batch_length < 1:
21-
raise ValueError("n must be at least one")
22-
for i in range(0, len(iterable), max_batch_length - overlap):
23-
yield iterable[i : i + max_batch_length]
24-
25-
26-
def clean_input(config: Config, prompt: str = "", talk=False):
27-
try:
28-
if config.chat_messages_enabled:
29-
for plugin in config.plugins:
30-
if not hasattr(plugin, "can_handle_user_input"):
31-
continue
32-
if not plugin.can_handle_user_input(user_input=prompt):
33-
continue
34-
plugin_response = plugin.user_input(user_input=prompt)
35-
if not plugin_response:
36-
continue
37-
if plugin_response.lower() in [
38-
"yes",
39-
"yeah",
40-
"y",
41-
"ok",
42-
"okay",
43-
"sure",
44-
"alright",
45-
]:
46-
return config.authorise_key
47-
elif plugin_response.lower() in [
48-
"no",
49-
"nope",
50-
"n",
51-
"negative",
52-
]:
53-
return config.exit_key
54-
return plugin_response
55-
56-
# ask for input, default when just pressing Enter is y
57-
logger.info("Asking user via keyboard...")
58-
59-
# handle_sigint must be set to False, so the signal handler in the
60-
# autogpt/main.py could be employed properly. This referes to
61-
# https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
62-
answer = session.prompt(ANSI(prompt), handle_sigint=False)
63-
return answer
64-
except KeyboardInterrupt:
65-
logger.info("You interrupted Auto-GPT")
66-
logger.info("Quitting...")
67-
exit(0)
2+
from colorama import Fore
683

694

705
def validate_yaml_file(file: str):
@@ -80,106 +15,3 @@ def validate_yaml_file(file: str):
8015
)
8116

8217
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
83-
84-
85-
def readable_file_size(size, decimal_places=2):
86-
"""Converts the given size in bytes to a readable format.
87-
Args:
88-
size: Size in bytes
89-
decimal_places (int): Number of decimal places to display
90-
"""
91-
for unit in ["B", "KB", "MB", "GB", "TB"]:
92-
if size < 1024.0:
93-
break
94-
size /= 1024.0
95-
return f"{size:.{decimal_places}f} {unit}"
96-
97-
98-
def get_bulletin_from_web():
99-
try:
100-
response = requests.get(
101-
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
102-
)
103-
if response.status_code == 200:
104-
return response.text
105-
except requests.exceptions.RequestException:
106-
pass
107-
108-
return ""
109-
110-
111-
def get_current_git_branch() -> str:
112-
try:
113-
repo = Repo(search_parent_directories=True)
114-
branch = repo.active_branch
115-
return branch.name
116-
except:
117-
return ""
118-
119-
120-
def get_latest_bulletin() -> tuple[str, bool]:
121-
exists = os.path.exists("data/CURRENT_BULLETIN.md")
122-
current_bulletin = ""
123-
if exists:
124-
current_bulletin = open(
125-
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
126-
).read()
127-
new_bulletin = get_bulletin_from_web()
128-
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
129-
130-
news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
131-
if new_bulletin or current_bulletin:
132-
news_header += (
133-
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
134-
"If you don't wish to see this message, you "
135-
"can run Auto-GPT with the *--skip-news* flag.\n"
136-
)
137-
138-
if new_bulletin and is_new_news:
139-
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
140-
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
141-
142-
return f"{news_header}\n{current_bulletin}", is_new_news
143-
144-
145-
def markdown_to_ansi_style(markdown: str):
146-
ansi_lines: list[str] = []
147-
for line in markdown.split("\n"):
148-
line_style = ""
149-
150-
if line.startswith("# "):
151-
line_style += Style.BRIGHT
152-
else:
153-
line = re.sub(
154-
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
155-
rf"{Style.BRIGHT}\1{Style.NORMAL}",
156-
line,
157-
)
158-
159-
if re.match(r"^#+ ", line) is not None:
160-
line_style += Fore.CYAN
161-
line = re.sub(r"^#+ ", "", line)
162-
163-
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
164-
return "\n".join(ansi_lines)
165-
166-
167-
def get_legal_warning() -> str:
168-
legal_text = """
169-
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
170-
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
171-
172-
## Introduction
173-
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
174-
175-
## No Liability for Actions of the System
176-
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
177-
178-
## User Responsibility and Respondeat Superior Liability
179-
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
180-
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
181-
182-
## Indemnification
183-
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
184-
"""
185-
return legal_text

tests/challenges/utils.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,9 @@ def input_generator() -> Generator[str, None, None]:
3838
yield from input_sequence
3939

4040
gen = input_generator()
41-
monkeypatch.setattr("autogpt.utils.session.prompt", lambda _, **kwargs: next(gen))
41+
monkeypatch.setattr(
42+
"autogpt.app.utils.session.prompt", lambda _, **kwargs: next(gen)
43+
)
4244

4345

4446
def setup_mock_log_cycle_agent_name(

0 commit comments

Comments
 (0)