66
77@dataclass
88class TokenMetrics ():
9+ """
10+ Metrics for token usage in AI operations.
11+
12+ :param total: Total number of tokens used.
13+ :param input: Number of input tokens.
14+ :param output: Number of output tokens.
15+ """
916 total : int
1017 input : int
1118 output : int # type: ignore
1219
1320@dataclass
1421class FeedbackKind (Enum ):
22+ """
23+ Types of feedback that can be provided for AI operations.
24+ """
1525 Positive = "positive"
1626 Negative = "negative"
1727
1828@dataclass
1929class TokenUsage ():
30+ """
31+ Tracks token usage for AI operations.
32+
33+ :param total_tokens: Total number of tokens used.
34+ :param prompt_tokens: Number of tokens in the prompt.
35+ :param completion_tokens: Number of tokens in the completion.
36+ """
2037 total_tokens : int
2138 prompt_tokens : int
2239 completion_tokens : int
2340
2441 def to_metrics (self ):
42+ """
43+ Convert token usage to metrics format.
44+
45+ :return: Dictionary containing token metrics.
46+ """
2547 return {
2648 'total' : self ['total_tokens' ],
2749 'input' : self ['prompt_tokens' ],
@@ -30,18 +52,38 @@ def to_metrics(self):
3052
3153@dataclass
3254class LDOpenAIUsage ():
55+ """
56+ LaunchDarkly-specific OpenAI usage tracking.
57+
58+ :param total_tokens: Total number of tokens used.
59+ :param prompt_tokens: Number of tokens in the prompt.
60+ :param completion_tokens: Number of tokens in the completion.
61+ """
3362 total_tokens : int
3463 prompt_tokens : int
3564 completion_tokens : int
3665
3766@dataclass
3867class OpenAITokenUsage :
68+ """
69+ Tracks OpenAI-specific token usage.
70+ """
3971 def __init__ (self , data : LDOpenAIUsage ):
72+ """
73+ Initialize OpenAI token usage tracking.
74+
75+ :param data: OpenAI usage data.
76+ """
4077 self .total_tokens = data .total_tokens
4178 self .prompt_tokens = data .prompt_tokens
4279 self .completion_tokens = data .completion_tokens
4380
4481 def to_metrics (self ) -> TokenMetrics :
82+ """
83+ Convert OpenAI token usage to metrics format.
84+
85+ :return: TokenMetrics object containing usage data.
86+ """
4587 return TokenMetrics (
4688 total = self .total_tokens ,
4789 input = self .prompt_tokens ,
@@ -50,35 +92,75 @@ def to_metrics(self) -> TokenMetrics:
5092
5193@dataclass
5294class BedrockTokenUsage :
95+ """
96+ Tracks AWS Bedrock-specific token usage.
97+ """
5398 def __init__ (self , data : dict ):
99+ """
100+ Initialize Bedrock token usage tracking.
101+
102+ :param data: Dictionary containing Bedrock usage data.
103+ """
54104 self .totalTokens = data .get ('totalTokens' , 0 )
55105 self .inputTokens = data .get ('inputTokens' , 0 )
56106 self .outputTokens = data .get ('outputTokens' , 0 )
57107
58108 def to_metrics (self ) -> TokenMetrics :
109+ """
110+ Convert Bedrock token usage to metrics format.
111+
112+ :return: TokenMetrics object containing usage data.
113+ """
59114 return TokenMetrics (
60115 total = self .totalTokens ,
61116 input = self .inputTokens ,
62117 output = self .outputTokens ,
63118 )
64119
65120class LDAIConfigTracker :
121+ """
122+ Tracks configuration and usage metrics for LaunchDarkly AI operations.
123+ """
66124 def __init__ (self , ld_client : LDClient , version_key : str , config_key : str , context : Context ):
125+ """
126+ Initialize an AI configuration tracker.
127+
128+ :param ld_client: LaunchDarkly client instance.
129+ :param version_key: Version key for tracking.
130+ :param config_key: Configuration key for tracking.
131+ :param context: Context for evaluation.
132+ """
67133 self .ld_client = ld_client
68134 self .version_key = version_key
69135 self .config_key = config_key
70136 self .context = context
71137
72138 def get_track_data (self ):
139+ """
140+ Get tracking data for events.
141+
142+ :return: Dictionary containing version and config keys.
143+ """
73144 return {
74145 'versionKey' : self .version_key ,
75146 'configKey' : self .config_key ,
76147 }
77148
78149 def track_duration (self , duration : int ) -> None :
150+ """
151+ Track the duration of an AI operation.
152+
153+ :param duration: Duration in milliseconds.
154+ """
79155 self .ld_client .track ('$ld:ai:duration:total' , self .context , self .get_track_data (), duration )
80156
81157 def track_duration_of (self , func ):
158+ """
159+ Track the duration of a function execution.
160+
161+ :param func: Function to track.
162+ :return: Result of the tracked function.
163+ """
82164 start_time = time .time ()
83165 result = func ()
84166 end_time = time .time ()
@@ -87,21 +169,41 @@ def track_duration_of(self, func):
87169 return result
88170
89171 def track_feedback (self , feedback : Dict [str , FeedbackKind ]) -> None :
172+ """
173+ Track user feedback for an AI operation.
174+
175+ :param feedback: Dictionary containing feedback kind.
176+ """
90177 if feedback ['kind' ] == FeedbackKind .Positive :
91178 self .ld_client .track ('$ld:ai:feedback:user:positive' , self .context , self .get_track_data (), 1 )
92179 elif feedback ['kind' ] == FeedbackKind .Negative :
93180 self .ld_client .track ('$ld:ai:feedback:user:negative' , self .context , self .get_track_data (), 1 )
94181
95182 def track_success (self ) -> None :
183+ """
184+ Track a successful AI generation.
185+ """
96186 self .ld_client .track ('$ld:ai:generation' , self .context , self .get_track_data (), 1 )
97187
98188 def track_openai (self , func ):
189+ """
190+ Track OpenAI-specific operations.
191+
192+ :param func: Function to track.
193+ :return: Result of the tracked function.
194+ """
99195 result = self .track_duration_of (func )
100196 if result .usage :
101197 self .track_tokens (OpenAITokenUsage (result .usage ))
102198 return result
103199
104- def track_bedrock_converse (self , res : dict ) -> dict :
200+ def track_bedrock_converse (self , res : dict ) -> dict :
201+ """
202+ Track AWS Bedrock conversation operations.
203+
204+ :param res: Response dictionary from Bedrock.
205+ :return: The original response dictionary.
206+ """
105207 status_code = res .get ('$metadata' , {}).get ('httpStatusCode' , 0 )
106208 if status_code == 200 :
107209 self .track_success ()
@@ -115,10 +217,15 @@ def track_bedrock_converse(self, res: dict) -> dict:
115217 return res
116218
117219 def track_tokens (self , tokens : Union [TokenUsage , BedrockTokenUsage ]) -> None :
220+ """
221+ Track token usage metrics.
222+
223+ :param tokens: Token usage data from either custom, OpenAI, or Bedrock sources.
224+ """
118225 token_metrics = tokens .to_metrics ()
119226 if token_metrics .total > 0 :
120227 self .ld_client .track ('$ld:ai:tokens:total' , self .context , self .get_track_data (), token_metrics .total )
121228 if token_metrics .input > 0 :
122229 self .ld_client .track ('$ld:ai:tokens:input' , self .context , self .get_track_data (), token_metrics .input )
123230 if token_metrics .output > 0 :
124- self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics .output )
231+ self .ld_client .track ('$ld:ai:tokens:output' , self .context , self .get_track_data (), token_metrics .output )
0 commit comments