@@ -102,7 +102,7 @@ def run(
102102 top_p : float = 1.0 ,
103103 name : Text = "model_process" ,
104104 timeout : float = 300 ,
105- parameters : Optional [Dict ] = {} ,
105+ parameters : Optional [Dict ] = None ,
106106 wait_time : float = 0.5 ,
107107 ) -> Dict :
108108 """Synchronously running a Large Language Model (LLM) model.
@@ -117,21 +117,23 @@ def run(
117117 top_p (float, optional): Top P. Defaults to 1.0.
118118 name (Text, optional): ID given to a call. Defaults to "model_process".
119119 timeout (float, optional): total polling time. Defaults to 300.
120- parameters (Dict, optional): optional parameters to the model. Defaults to "{}" .
120+ parameters (Dict, optional): optional parameters to the model. Defaults to None .
121121 wait_time (float, optional): wait time in seconds between polling calls. Defaults to 0.5.
122122
123123 Returns:
124124 Dict: parsed output from model
125125 """
126126 start = time .time ()
127+ if parameters is None :
128+ parameters = {}
127129 parameters .update (
128130 {
129- "context" : parameters [ "context" ] if " context" in parameters else context ,
130- "prompt" : parameters [ "prompt" ] if " prompt" in parameters else prompt ,
131- "history" : parameters [ "history" ] if " history" in parameters else history ,
132- "temperature" : parameters [ "temperature" ] if " temperature" in parameters else temperature ,
133- "max_tokens" : parameters [ "max_tokens" ] if " max_tokens" in parameters else max_tokens ,
134- "top_p" : parameters [ "top_p" ] if " top_p" in parameters else top_p ,
131+ "context" : parameters . get ( "context" , context ) ,
132+ "prompt" : parameters . get ( "prompt" , prompt ) ,
133+ "history" : parameters . get ( "history" , history ) ,
134+ "temperature" : parameters . get ( "temperature" , temperature ) ,
135+ "max_tokens" : parameters . get ( "max_tokens" , max_tokens ) ,
136+ "top_p" : parameters . get ( "top_p" , top_p ) ,
135137 }
136138 )
137139 payload = build_payload (data = data , parameters = parameters )
@@ -160,7 +162,7 @@ def run_async(
160162 max_tokens : int = 128 ,
161163 top_p : float = 1.0 ,
162164 name : Text = "model_process" ,
163- parameters : Optional [Dict ] = {} ,
165+ parameters : Optional [Dict ] = None ,
164166 ) -> Dict :
165167 """Runs asynchronously a model call.
166168
@@ -173,21 +175,23 @@ def run_async(
173175 max_tokens (int, optional): Maximum Generation Tokens. Defaults to 128.
174176 top_p (float, optional): Top P. Defaults to 1.0.
175177 name (Text, optional): ID given to a call. Defaults to "model_process".
176- parameters (Dict, optional): optional parameters to the model. Defaults to "{}" .
178+ parameters (Dict, optional): optional parameters to the model. Defaults to None .
177179
178180 Returns:
179181 dict: polling URL in response
180182 """
181183 url = f"{ self .url } /{ self .id } "
182184 logging .debug (f"Model Run Async: Start service for { name } - { url } " )
185+ if parameters is None :
186+ parameters = {}
183187 parameters .update (
184188 {
185- "context" : parameters [ "context" ] if " context" in parameters else context ,
186- "prompt" : parameters [ "prompt" ] if " prompt" in parameters else prompt ,
187- "history" : parameters [ "history" ] if " history" in parameters else history ,
188- "temperature" : parameters [ "temperature" ] if " temperature" in parameters else temperature ,
189- "max_tokens" : parameters [ "max_tokens" ] if " max_tokens" in parameters else max_tokens ,
190- "top_p" : parameters [ "top_p" ] if " top_p" in parameters else top_p ,
189+ "context" : parameters . get ( "context" , context ) ,
190+ "prompt" : parameters . get ( "prompt" , prompt ) ,
191+ "history" : parameters . get ( "history" , history ) ,
192+ "temperature" : parameters . get ( "temperature" , temperature ) ,
193+ "max_tokens" : parameters . get ( "max_tokens" , max_tokens ) ,
194+ "top_p" : parameters . get ( "top_p" , top_p ) ,
191195 }
192196 )
193197 payload = build_payload (data = data , parameters = parameters )
0 commit comments