1+ import time
12from typing import Dict , Union
23from ldclient import Context , LDClient
3- from ldai .tracking_utils import usage_to_token_metrics
4- from ldai .types import BedrockTokenUsage , FeedbackKind , TokenUsage , UnderscoreTokenUsage
4+ from ldai .types import BedrockTokenUsage , FeedbackKind , OpenAITokenUsage , TokenUsage , UnderscoreTokenUsage
55
66class LDAIConfigTracker :
77 def __init__ (self , ld_client : LDClient , variation_id : str , config_key : str , context : Context ):
@@ -19,23 +19,37 @@ def get_track_data(self):
1919 def track_duration (self , duration : int ) -> None :
2020 self .ld_client .track ('$ld:ai:duration:total' , self .context , self .get_track_data (), duration )
2121
22- def track_tokens (self , tokens : Union [TokenUsage , UnderscoreTokenUsage , BedrockTokenUsage ]) -> None :
23- token_metrics = usage_to_token_metrics (tokens )
24- if token_metrics ['total' ] > 0 :
25- self .ld_client .track ('$ld:ai:tokens:total' , self .context , self .get_track_data (), token_metrics ['total' ])
26- if token_metrics ['input' ] > 0 :
27- self .ld_client .track ('$ld:ai:tokens:input' , self .context , self .get_track_data (), token_metrics ['input' ])
28- if token_metrics ['output' ] > 0 :
29- self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics ['output' ])
22+ def track_duration_of (self , func , * args , ** kwargs ):
23+ start_time = time .time ()
24+ result = func (* args , ** kwargs )
25+ end_time = time .time ()
26+ duration = int ((end_time - start_time ) * 1000 ) # duration in milliseconds
27+ self .track_duration (duration )
28+ return result
3029
3130 def track_error (self , error : int ) -> None :
3231 self .ld_client .track ('$ld:ai:error' , self .context , self .get_track_data (), error )
3332
34- def track_generation (self , generation : int ) -> None :
35- self .ld_client .track ('$ld:ai:generation' , self .context , self .get_track_data (), generation )
36-
3733 def track_feedback (self , feedback : Dict [str , FeedbackKind ]) -> None :
3834 if feedback ['kind' ] == FeedbackKind .Positive :
3935 self .ld_client .track ('$ld:ai:feedback:user:positive' , self .context , self .get_track_data (), 1 )
4036 elif feedback ['kind' ] == FeedbackKind .Negative :
41- self .ld_client .track ('$ld:ai:feedback:user:negative' , self .context , self .get_track_data (), 1 )
37+ self .ld_client .track ('$ld:ai:feedback:user:negative' , self .context , self .get_track_data (), 1 )
38+
39+ def track_generation (self , generation : int ) -> None :
40+ self .ld_client .track ('$ld:ai:generation' , self .context , self .get_track_data (), generation )
41+
42+ def track_openai (self , func , * args , ** kwargs ):
43+ result = self .track_duration_of (func , * args , ** kwargs )
44+ if result .usage :
45+ self .track_tokens (OpenAITokenUsage (result .usage ))
46+ return result
47+
48+ def track_tokens (self , tokens : Union [TokenUsage , UnderscoreTokenUsage , BedrockTokenUsage ]) -> None :
49+ token_metrics = tokens .to_metrics ()
50+ if token_metrics ['total' ] > 0 :
51+ self .ld_client .track ('$ld:ai:tokens:total' , self .context , self .get_track_data (), token_metrics ['total' ])
52+ if token_metrics ['input' ] > 0 :
53+ self .ld_client .track ('$ld:ai:tokens:input' , self .context , self .get_track_data (), token_metrics ['input' ])
54+ if token_metrics ['output' ] > 0 :
55+ self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics ['output' ])
0 commit comments