-
Notifications
You must be signed in to change notification settings - Fork 7
/
v_2.go
489 lines (448 loc) · 23.1 KB
/
v_2.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
// This file was auto-generated by Fern from our API Definition.
package api
import (
json "encoding/json"
fmt "fmt"
core "github.com/cohere-ai/cohere-go/v2/core"
)
type V2ChatRequest struct {
// Defaults to `false`.
//
// When `true`, the response will be a SSE stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`.
//
// Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
//
// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
//
// The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/v2/docs/chat-fine-tuning) model.
Model string `json:"model" url:"-"`
Messages ChatMessages `json:"messages,omitempty" url:"-"`
// A list of available tools (functions) that the model may suggest invoking before producing a text response.
//
// When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
Tools []*ToolV2 `json:"tools,omitempty" url:"-"`
// When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Structured Outputs (Tools) guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools).
//
// **Note**: The first few requests with a new set of tools will take longer to process.
StrictTools *bool `json:"strict_tools,omitempty" url:"-"`
// A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata.
Documents []*V2ChatRequestDocumentsItem `json:"documents,omitempty" url:"-"`
CitationOptions *CitationOptions `json:"citation_options,omitempty" url:"-"`
ResponseFormat *ResponseFormatV2 `json:"response_format,omitempty" url:"-"`
// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
// When `OFF` is specified, the safety instruction will be omitted.
//
// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
//
// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
//
// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
SafetyMode *V2ChatRequestSafetyMode `json:"safety_mode,omitempty" url:"-"`
// The maximum number of tokens the model will generate as part of the response.
//
// **Note**: Setting a low value may result in incomplete generations.
MaxTokens *int `json:"max_tokens,omitempty" url:"-"`
// A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
StopSequences []string `json:"stop_sequences,omitempty" url:"-"`
// Defaults to `0.3`.
//
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
//
// Randomness can be further maximized by increasing the value of the `p` parameter.
Temperature *float64 `json:"temperature,omitempty" url:"-"`
// If specified, the backend will make a best effort to sample tokens
// deterministically, such that repeated requests with the same
// seed and parameters should return the same result. However,
// determinism cannot be totally guaranteed.
Seed *int `json:"seed,omitempty" url:"-"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"-"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"-"`
// Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
// Defaults to `0`, min value of `0`, max value of `500`.
K *float64 `json:"k,omitempty" url:"-"`
// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
P *float64 `json:"p,omitempty" url:"-"`
// Whether to return the prompt in the response.
ReturnPrompt *bool `json:"return_prompt,omitempty" url:"-"`
// Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
Logprobs *bool `json:"logprobs,omitempty" url:"-"`
stream bool
}
func (v *V2ChatRequest) Stream() bool {
return v.stream
}
func (v *V2ChatRequest) UnmarshalJSON(data []byte) error {
type unmarshaler V2ChatRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*v = V2ChatRequest(body)
v.stream = false
return nil
}
func (v *V2ChatRequest) MarshalJSON() ([]byte, error) {
type embed V2ChatRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*v),
Stream: false,
}
return json.Marshal(marshaler)
}
type V2ChatStreamRequest struct {
// Defaults to `false`.
//
// When `true`, the response will be a SSE stream of events. The final event will contain the complete response, and will have an `event_type` of `"stream-end"`.
//
// Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
//
// Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker/Bedrock, Private Deployments
//
// The name of a compatible [Cohere model](https://docs.cohere.com/v2/docs/models) (such as command-r or command-r-plus) or the ID of a [fine-tuned](https://docs.cohere.com/v2/docs/chat-fine-tuning) model.
Model string `json:"model" url:"-"`
Messages ChatMessages `json:"messages,omitempty" url:"-"`
// A list of available tools (functions) that the model may suggest invoking before producing a text response.
//
// When `tools` is passed (without `tool_results`), the `text` content in the response will be empty and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
Tools []*ToolV2 `json:"tools,omitempty" url:"-"`
// When set to `true`, tool calls in the Assistant message will be forced to follow the tool definition strictly. Learn more in the [Structured Outputs (Tools) guide](https://docs.cohere.com/docs/structured-outputs-json#structured-outputs-tools).
//
// **Note**: The first few requests with a new set of tools will take longer to process.
StrictTools *bool `json:"strict_tools,omitempty" url:"-"`
// A list of relevant documents that the model can cite to generate a more accurate reply. Each document is either a string or document object with content and metadata.
Documents []*V2ChatStreamRequestDocumentsItem `json:"documents,omitempty" url:"-"`
CitationOptions *CitationOptions `json:"citation_options,omitempty" url:"-"`
ResponseFormat *ResponseFormatV2 `json:"response_format,omitempty" url:"-"`
// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
// When `OFF` is specified, the safety instruction will be omitted.
//
// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
//
// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
//
// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
SafetyMode *V2ChatStreamRequestSafetyMode `json:"safety_mode,omitempty" url:"-"`
// The maximum number of tokens the model will generate as part of the response.
//
// **Note**: Setting a low value may result in incomplete generations.
MaxTokens *int `json:"max_tokens,omitempty" url:"-"`
// A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
StopSequences []string `json:"stop_sequences,omitempty" url:"-"`
// Defaults to `0.3`.
//
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
//
// Randomness can be further maximized by increasing the value of the `p` parameter.
Temperature *float64 `json:"temperature,omitempty" url:"-"`
// If specified, the backend will make a best effort to sample tokens
// deterministically, such that repeated requests with the same
// seed and parameters should return the same result. However,
// determinism cannot be totally guaranteed.
Seed *int `json:"seed,omitempty" url:"-"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"-"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"-"`
// Ensures that only the top `k` most likely tokens are considered for generation at each step. When `k` is set to `0`, k-sampling is disabled.
// Defaults to `0`, min value of `0`, max value of `500`.
K *float64 `json:"k,omitempty" url:"-"`
// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
P *float64 `json:"p,omitempty" url:"-"`
// Whether to return the prompt in the response.
ReturnPrompt *bool `json:"return_prompt,omitempty" url:"-"`
// Defaults to `false`. When set to `true`, the log probabilities of the generated tokens will be included in the response.
Logprobs *bool `json:"logprobs,omitempty" url:"-"`
stream bool
}
func (v *V2ChatStreamRequest) Stream() bool {
return v.stream
}
func (v *V2ChatStreamRequest) UnmarshalJSON(data []byte) error {
type unmarshaler V2ChatStreamRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*v = V2ChatStreamRequest(body)
v.stream = true
return nil
}
func (v *V2ChatStreamRequest) MarshalJSON() ([]byte, error) {
type embed V2ChatStreamRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*v),
Stream: true,
}
return json.Marshal(marshaler)
}
type V2EmbedRequest struct {
// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
Texts []string `json:"texts,omitempty" url:"-"`
// An array of image data URIs for the model to embed. Maximum number of images per call is `1`.
//
// The image must be a valid [data URI](https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data). The image must be in either `image/jpeg` or `image/png` format and has a maximum size of 5MB.
Images []string `json:"images,omitempty" url:"-"`
// Defaults to embed-english-v2.0
//
// The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](https://docs.cohere.com/docs/training-custom-models) can also be supplied with their full ID.
//
// Available models and corresponding embedding dimensions:
//
// * `embed-english-v3.0` 1024
// * `embed-multilingual-v3.0` 1024
// * `embed-english-light-v3.0` 384
// * `embed-multilingual-light-v3.0` 384
//
// * `embed-english-v2.0` 4096
// * `embed-english-light-v2.0` 1024
// * `embed-multilingual-v2.0` 768
Model string `json:"model" url:"-"`
InputType EmbedInputType `json:"input_type" url:"-"`
// Specifies the types of embeddings you want to get back. Can be one or more of the following types.
//
// * `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
// * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
// * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
// * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
EmbeddingTypes []EmbeddingType `json:"embedding_types,omitempty" url:"-"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
//
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
//
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *V2EmbedRequestTruncate `json:"truncate,omitempty" url:"-"`
}
type V2RerankRequest struct {
// The identifier of the model to use, eg `rerank-v3.5`.
Model string `json:"model" url:"-"`
// The search query
Query string `json:"query" url:"-"`
// A list of texts that will be compared to the `query`.
// For optimal performance we recommend against sending more than 1,000 documents in a single request.
//
// **Note**: long documents will automatically be truncated to the value of `max_tokens_per_doc`.
//
// **Note**: structured data should be formatted as YAML strings for best performance.
Documents []string `json:"documents,omitempty" url:"-"`
// Limits the number of returned rerank results to the specified value. If not passed, all the rerank results will be returned.
TopN *int `json:"top_n,omitempty" url:"-"`
// - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request.
// - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request.
ReturnDocuments *bool `json:"return_documents,omitempty" url:"-"`
// Defaults to `4096`. Long documents will be automatically truncated to the specified number of tokens.
MaxTokensPerDoc *int `json:"max_tokens_per_doc,omitempty" url:"-"`
}
type V2ChatRequestDocumentsItem struct {
String string
Document *Document
}
func (v *V2ChatRequestDocumentsItem) UnmarshalJSON(data []byte) error {
var valueString string
if err := json.Unmarshal(data, &valueString); err == nil {
v.String = valueString
return nil
}
valueDocument := new(Document)
if err := json.Unmarshal(data, &valueDocument); err == nil {
v.Document = valueDocument
return nil
}
return fmt.Errorf("%s cannot be deserialized as a %T", data, v)
}
func (v V2ChatRequestDocumentsItem) MarshalJSON() ([]byte, error) {
if v.String != "" {
return json.Marshal(v.String)
}
if v.Document != nil {
return json.Marshal(v.Document)
}
return nil, fmt.Errorf("type %T does not include a non-empty union type", v)
}
type V2ChatRequestDocumentsItemVisitor interface {
VisitString(string) error
VisitDocument(*Document) error
}
func (v *V2ChatRequestDocumentsItem) Accept(visitor V2ChatRequestDocumentsItemVisitor) error {
if v.String != "" {
return visitor.VisitString(v.String)
}
if v.Document != nil {
return visitor.VisitDocument(v.Document)
}
return fmt.Errorf("type %T does not include a non-empty union type", v)
}
// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
// When `OFF` is specified, the safety instruction will be omitted.
//
// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
//
// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
//
// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
type V2ChatRequestSafetyMode string
const (
V2ChatRequestSafetyModeContextual V2ChatRequestSafetyMode = "CONTEXTUAL"
V2ChatRequestSafetyModeStrict V2ChatRequestSafetyMode = "STRICT"
V2ChatRequestSafetyModeOff V2ChatRequestSafetyMode = "OFF"
)
func NewV2ChatRequestSafetyModeFromString(s string) (V2ChatRequestSafetyMode, error) {
switch s {
case "CONTEXTUAL":
return V2ChatRequestSafetyModeContextual, nil
case "STRICT":
return V2ChatRequestSafetyModeStrict, nil
case "OFF":
return V2ChatRequestSafetyModeOff, nil
}
var t V2ChatRequestSafetyMode
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (v V2ChatRequestSafetyMode) Ptr() *V2ChatRequestSafetyMode {
return &v
}
type V2ChatStreamRequestDocumentsItem struct {
String string
Document *Document
}
func (v *V2ChatStreamRequestDocumentsItem) UnmarshalJSON(data []byte) error {
var valueString string
if err := json.Unmarshal(data, &valueString); err == nil {
v.String = valueString
return nil
}
valueDocument := new(Document)
if err := json.Unmarshal(data, &valueDocument); err == nil {
v.Document = valueDocument
return nil
}
return fmt.Errorf("%s cannot be deserialized as a %T", data, v)
}
func (v V2ChatStreamRequestDocumentsItem) MarshalJSON() ([]byte, error) {
if v.String != "" {
return json.Marshal(v.String)
}
if v.Document != nil {
return json.Marshal(v.Document)
}
return nil, fmt.Errorf("type %T does not include a non-empty union type", v)
}
type V2ChatStreamRequestDocumentsItemVisitor interface {
VisitString(string) error
VisitDocument(*Document) error
}
func (v *V2ChatStreamRequestDocumentsItem) Accept(visitor V2ChatStreamRequestDocumentsItemVisitor) error {
if v.String != "" {
return visitor.VisitString(v.String)
}
if v.Document != nil {
return visitor.VisitDocument(v.Document)
}
return fmt.Errorf("type %T does not include a non-empty union type", v)
}
// Used to select the [safety instruction](https://docs.cohere.com/v2/docs/safety-modes) inserted into the prompt. Defaults to `CONTEXTUAL`.
// When `OFF` is specified, the safety instruction will be omitted.
//
// Safety modes are not yet configurable in combination with `tools`, `tool_results` and `documents` parameters.
//
// **Note**: This parameter is only compatible with models [Command R 08-2024](https://docs.cohere.com/v2/docs/command-r#august-2024-release), [Command R+ 08-2024](https://docs.cohere.com/v2/docs/command-r-plus#august-2024-release) and newer.
//
// **Note**: `command-r7b-12-2024` only supports `"CONTEXTUAL"` and `"STRICT"` modes.
type V2ChatStreamRequestSafetyMode string
const (
V2ChatStreamRequestSafetyModeContextual V2ChatStreamRequestSafetyMode = "CONTEXTUAL"
V2ChatStreamRequestSafetyModeStrict V2ChatStreamRequestSafetyMode = "STRICT"
V2ChatStreamRequestSafetyModeOff V2ChatStreamRequestSafetyMode = "OFF"
)
func NewV2ChatStreamRequestSafetyModeFromString(s string) (V2ChatStreamRequestSafetyMode, error) {
switch s {
case "CONTEXTUAL":
return V2ChatStreamRequestSafetyModeContextual, nil
case "STRICT":
return V2ChatStreamRequestSafetyModeStrict, nil
case "OFF":
return V2ChatStreamRequestSafetyModeOff, nil
}
var t V2ChatStreamRequestSafetyMode
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (v V2ChatStreamRequestSafetyMode) Ptr() *V2ChatStreamRequestSafetyMode {
return &v
}
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
//
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
//
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
type V2EmbedRequestTruncate string
const (
V2EmbedRequestTruncateNone V2EmbedRequestTruncate = "NONE"
V2EmbedRequestTruncateStart V2EmbedRequestTruncate = "START"
V2EmbedRequestTruncateEnd V2EmbedRequestTruncate = "END"
)
func NewV2EmbedRequestTruncateFromString(s string) (V2EmbedRequestTruncate, error) {
switch s {
case "NONE":
return V2EmbedRequestTruncateNone, nil
case "START":
return V2EmbedRequestTruncateStart, nil
case "END":
return V2EmbedRequestTruncateEnd, nil
}
var t V2EmbedRequestTruncate
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (v V2EmbedRequestTruncate) Ptr() *V2EmbedRequestTruncate {
return &v
}
type V2RerankResponse struct {
Id *string `json:"id,omitempty" url:"id,omitempty"`
// An ordered list of ranked documents
Results []*V2RerankResponseResultsItem `json:"results,omitempty" url:"results,omitempty"`
Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"`
extraProperties map[string]interface{}
_rawJSON json.RawMessage
}
func (v *V2RerankResponse) GetExtraProperties() map[string]interface{} {
return v.extraProperties
}
func (v *V2RerankResponse) UnmarshalJSON(data []byte) error {
type unmarshaler V2RerankResponse
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*v = V2RerankResponse(value)
extraProperties, err := core.ExtractExtraProperties(data, *v)
if err != nil {
return err
}
v.extraProperties = extraProperties
v._rawJSON = json.RawMessage(data)
return nil
}
func (v *V2RerankResponse) String() string {
if len(v._rawJSON) > 0 {
if value, err := core.StringifyJSON(v._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(v); err == nil {
return value
}
return fmt.Sprintf("%#v", v)
}