Skip to content

Commit bc627da

Browse files
committed
Update camel_example and camel_rag_example
1 parent b140715 commit bc627da

File tree

4 files changed

+61
-12
lines changed

4 files changed

+61
-12
lines changed
Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
{
2+
"description": "Using Firefox, navigate to the CAMEL-AI GitHub repository (https://github.com/camel-ai/camel). Download the example code file 'examples/role_playing.py', then use Visual Studio Code to open it and save a copy to '/home/crab/camel_examples/role_playing.py'.",
3+
"tasks": [
4+
{
5+
"task": "a313ea4d-e501-4971-b4fe-db2aad19eac1",
6+
"attribute": {
7+
"url": "https://raw.githubusercontent.com/camel-ai/camel/master/examples/role_playing.py",
8+
"file_path": "/home/crab/camel_examples/role_playing.py"
9+
},
10+
"output": "/home/crab/camel_examples/role_playing.py"
11+
}
12+
],
13+
"adjlist": "0",
14+
"id": "camel-example-001"
15+
}

crab/agents/backend_models/camel_rag_model.py

Lines changed: 26 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616
from crab import BackendOutput, MessageType
1717
from crab.agents.backend_models.camel_model import CamelModel
1818
from camel.messages import BaseMessage
19+
from langchain.schema import Document
1920

2021
try:
2122
from camel.embeddings import OpenAIEmbedding
@@ -83,13 +84,19 @@ def _enhance_with_context(self, messages: List[Tuple[str, MessageType]]) -> List
8384
""
8485
)
8586

86-
retrieved_info = self.retriever.query(
87-
query=query,
88-
top_k=self.top_k,
89-
similarity_threshold=self.similarity_threshold,
90-
)
87+
try:
88+
retrieved_info = self.retriever.query(
89+
query=query,
90+
top_k=self.top_k,
91+
similarity_threshold=self.similarity_threshold,
92+
)
93+
except Exception:
94+
return messages
95+
96+
if not retrieved_info:
97+
return messages
9198

92-
if not retrieved_info or retrieved_info[0].get('text', '').startswith('No suitable information'):
99+
if not retrieved_info[0].get('payload'):
93100
return messages
94101

95102
context = "Relevant context:\n\n"
@@ -106,3 +113,16 @@ def _enhance_with_context(self, messages: List[Tuple[str, MessageType]]) -> List
106113
def chat(self, messages: List[Tuple[str, MessageType]]) -> BackendOutput:
107114
enhanced_messages = self._enhance_with_context(messages)
108115
return super().chat(enhanced_messages)
116+
117+
def get_relevant_content(self, query: str) -> List[Document]:
118+
if not self.vector_storage:
119+
return []
120+
121+
try:
122+
return self.retriever.query(
123+
query=query,
124+
top_k=self.top_k,
125+
similarity_threshold=self.similarity_threshold,
126+
)
127+
except Exception:
128+
return []

examples/camel_example.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ def start_benchmark(benchmark: Benchmark, agent: SingleAgentPolicy):
6262

6363
if __name__ == "__main__":
6464
benchmark = create_benchmark(template_benchmark_config)
65+
#TODO: Use new task config
6566
task, action_space = benchmark.start_task("0")
6667
env_descriptions = benchmark.get_env_descriptions()
6768

examples/camel_rag_example.py

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,21 +23,38 @@
2323
from crab.benchmarks.template import template_benchmark_config
2424
from camel.types import ModelType, ModelPlatformType
2525

26-
26+
# TODO: Add new benchmark template
2727
def start_benchmark(benchmark: Benchmark, agent: SingleAgentPolicy):
2828
for step in range(20):
2929
print("=" * 40)
3030
print(f"Start agent step {step}:")
3131
observation = benchmark.observe()["template_env"]
3232
print(f"Current environment observation: {observation}")
33+
34+
try:
35+
rag_content = agent.model_backend.get_relevant_content(str(observation))
36+
print(colored("\nRelevant RAG content:", "magenta"))
37+
if rag_content:
38+
for idx, content in enumerate(rag_content, 1):
39+
print(colored(f"\nDocument {idx}:", "magenta"))
40+
if isinstance(content, dict):
41+
print(colored(f"Source: {content.get('content path', 'Unknown')}", "yellow"))
42+
print(colored(f"Content: {content.get('text', '')[:500]}...", "white"))
43+
else:
44+
print(colored(f"Content: {str(content)[:500]}...", "white"))
45+
else:
46+
print(colored("No relevant content found", "yellow"))
47+
except Exception as e:
48+
print(colored(f"Error retrieving RAG content: {str(e)}", "red"))
49+
3350
response = agent.chat(
3451
{
3552
"template_env": [
3653
(f"Current environment observation: {observation}", 0),
3754
]
3855
}
3956
)
40-
print(colored(f"Agent take action: {response}", "blue"))
57+
print(colored(f"\nAgent take action: {response}", "blue"))
4158

4259
for action in response:
4360
response = benchmark.step(
@@ -74,7 +91,6 @@ def prepare_vim_docs():
7491
response = requests.get(base_url)
7592
soup = BeautifulSoup(response.text, 'html.parser')
7693

77-
# Process the main page first
7894
main_content = soup.get_text(separator='\n', strip=True)
7995
with open(os.path.join(content_dir, "main.txt"), 'w', encoding='utf-8') as f:
8096
f.write(f"Source: {base_url}\n\n{main_content}")
@@ -91,14 +107,12 @@ def prepare_vim_docs():
91107
try:
92108
print(colored(f"Processing page {idx}/{total_links}: {href}", "yellow"))
93109

94-
# Fetch and process page
95110
page_response = requests.get(full_url)
96111
page_soup = BeautifulSoup(page_response.text, 'html.parser')
97112
for tag in page_soup(['script', 'style']):
98113
tag.decompose()
99114
content = page_soup.get_text(separator='\n', strip=True)
100115

101-
# Save content
102116
filename = os.path.join(content_dir, f"{href.replace('/', '_')}.txt")
103117
with open(filename, 'w', encoding='utf-8') as f:
104118
f.write(f"Source: {full_url}\n\n{content}")
@@ -115,7 +129,6 @@ def prepare_vim_docs():
115129
if __name__ == "__main__":
116130
print(colored("=== Starting RAG-enhanced benchmark ===", "cyan"))
117131

118-
# Initialize benchmark and environment
119132
print(colored("\nInitializing benchmark environment...", "yellow"))
120133
benchmark = create_benchmark(template_benchmark_config)
121134
task, action_space = benchmark.start_task("0")

0 commit comments

Comments
 (0)