|
12 | 12 | # limitations under the License. |
13 | 13 | # =========== Copyright 2024 @ CAMEL-AI.org. All Rights Reserved. =========== |
14 | 14 | import os |
15 | | -from time import sleep |
16 | 15 | from typing import Any |
17 | 16 |
|
18 | 17 | from PIL.Image import Image |
| 18 | +from tenacity import retry, stop_after_attempt, wait_fixed |
19 | 19 |
|
20 | 20 | from crab import Action, ActionOutput, BackendModel, BackendOutput, Message, MessageType |
21 | 21 | from crab.utils.common import base64_to_image, json_expand_refs |
|
28 | 28 | Part, |
29 | 29 | Tool, |
30 | 30 | ) |
31 | | - from google.api_core.exceptions import ResourceExhausted |
32 | 31 | from google.generativeai.types import content_types |
33 | 32 |
|
34 | 33 | gemini_model_enable = True |
@@ -121,40 +120,31 @@ def record_message( |
121 | 120 | {"role": response_message.role, "parts": response_message.parts} |
122 | 121 | ) |
123 | 122 |
|
| 123 | + @retry(wait=wait_fixed(10), stop=stop_after_attempt(7)) |
124 | 124 | def call_api(self, request_messages: list) -> Content: |
125 | | - while True: |
126 | | - try: |
127 | | - if self.action_schema is not None: |
128 | | - tool_config = content_types.to_tool_config( |
129 | | - { |
130 | | - "function_calling_config": { |
131 | | - "mode": "ANY" if self.tool_call_required else "AUTO" |
132 | | - } |
133 | | - } |
134 | | - ) |
135 | | - response = self.client.GenerativeModel( |
136 | | - self.model, system_instruction=self.system_message |
137 | | - ).generate_content( |
138 | | - contents=request_messages, |
139 | | - tools=self.action_schema, |
140 | | - tool_config=tool_config, |
141 | | - # **self.parameters, |
142 | | - ) |
143 | | - else: |
144 | | - response = self.client.GenerativeModel( |
145 | | - self.model, system_instruction=self.system_message |
146 | | - ).generate_content( |
147 | | - contents=request_messages, |
148 | | - # **self.parameters, |
149 | | - ) |
150 | | - except ResourceExhausted: |
151 | | - print( |
152 | | - "ResourceExhausted: 429 Resource has been exhausted.", |
153 | | - " Please waiting...", |
154 | | - ) |
155 | | - sleep(10) |
156 | | - else: |
157 | | - break |
| 125 | + if self.action_schema is not None: |
| 126 | + tool_config = content_types.to_tool_config( |
| 127 | + { |
| 128 | + "function_calling_config": { |
| 129 | + "mode": "ANY" if self.tool_call_required else "AUTO" |
| 130 | + } |
| 131 | + } |
| 132 | + ) |
| 133 | + response = self.client.GenerativeModel( |
| 134 | + self.model, system_instruction=self.system_message |
| 135 | + ).generate_content( |
| 136 | + contents=request_messages, |
| 137 | + tools=self.action_schema, |
| 138 | + tool_config=tool_config, |
| 139 | + # **self.parameters, # TODO(Tianqi): Fix this line in the future |
| 140 | + ) |
| 141 | + else: |
| 142 | + response = self.client.GenerativeModel( |
| 143 | + self.model, system_instruction=self.system_message |
| 144 | + ).generate_content( |
| 145 | + contents=request_messages, |
| 146 | + # **self.parameters, # TODO(Tianqi): Fix this line in the future |
| 147 | + ) |
158 | 148 |
|
159 | 149 | self.token_usage += response.candidates[0].token_count |
160 | 150 | return response.candidates[0].content |
|
0 commit comments