Skip to content

Commit

Permalink
Added the possibility to deactivate functions depending on models
Browse files Browse the repository at this point in the history
  • Loading branch information
tristandostaler authored Sep 24, 2024
1 parent ae6e392 commit 2c853d5
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 10 deletions.
35 changes: 26 additions & 9 deletions src/api/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import { isAzureEndpoint } from '@utils/api';
import { adjustConfigAndRemoveConfigContentInMessages } from './helper';
import { limitMessageTokens } from '@utils/messageUtils';
import { functionsSchemas, functionsSchemaTokens } from './functions'
import { minResponseSize } from '@constants/chat';
import { modelCost, minResponseSize } from '@constants/chat';


async function prepareStreamAndGetResponse(customHeaders: Record<string, string> | undefined, messagesToSend: MessageInterface[], config: ConfigInterface, apiKey: string | undefined, endpoint: string, stream: boolean) {
Expand All @@ -19,8 +19,12 @@ async function prepareStreamAndGetResponse(customHeaders: Record<string, string>
if (tempConfig.max_tokens > minResponseSize + 100) {
minResponseLength = minResponseSize;
}
var functionsSchemaTokenLength = functionsSchemaTokens(tempConfig.model);
tempConfig.max_tokens -= functionsSchemaTokenLength;

var functionsSchemaTokenLength = 0;
if(modelCost[tempConfig.model].supportFunctions) {
functionsSchemaTokenLength = functionsSchemaTokens(tempConfig.model);
tempConfig.max_tokens -= functionsSchemaTokenLength;
}

const adjustedMessagesTuple = limitMessageTokens(
messagesToSend,
Expand All @@ -46,16 +50,29 @@ async function prepareStreamAndGetResponse(customHeaders: Record<string, string>
}
}

const response = await fetch(endpoint, {
method: 'POST',
headers,
body: JSON.stringify({
var bodyToSend = "";
if(modelCost[tempConfig.model].supportFunctions) {
bodyToSend = JSON.stringify({
messages,
...tempConfig,
max_tokens: undefined,
stream: stream,
functions: functionsSchemas
}),
});
} else {
bodyToSend = JSON.stringify({
messages,
...tempConfig,
max_tokens: undefined,
stream: stream
});
}


const response = await fetch(endpoint, {
method: 'POST',
headers,
body: bodyToSend,
});
return response;
}
Expand Down Expand Up @@ -87,7 +104,7 @@ export const getChatCompletionStream = async (
if (text.includes('model_not_found')) {
throw new Error(
text +
'\nMessage from Better ChatGPT:\nPlease ensure that you have access to the GPT-4 API!'
'\nMessage from Better ChatGPT:\nPlease ensure that you have access to the selected API!'
);
} else {
throw new Error(
Expand Down
6 changes: 6 additions & 0 deletions src/constants/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,31 +20,37 @@ export const modelCost = {
prompt: { price: 0.015, unit: 1000 },
completion: { price: 0.06, unit: 1000 },
modelMaxToken: 32768,
supportFunctions: false,
},
'o1-mini': {
prompt: { price: 0.003, unit: 1000 },
completion: { price: 0.012, unit: 1000 },
modelMaxToken: 65536,
supportFunctions: false,
},
'gpt-4': {
prompt: { price: 0.03, unit: 1000 },
completion: { price: 0.06, unit: 1000 },
modelMaxToken: 8192,
supportFunctions: true,
},
'gpt-4-turbo': {
prompt: { price: 0.01, unit: 1000 },
completion: { price: 0.03, unit: 1000 },
modelMaxToken: 128000,
supportFunctions: true,
},
'gpt-4o': {
prompt: { price: 0.005, unit: 1000 },
completion: { price: 0.015, unit: 1000 },
modelMaxToken: 128000,
supportFunctions: true,
},
'gpt-4o-mini': {
prompt: { price: 0.00015, unit: 1000 },
completion: { price: 0.006, unit: 1000 },
modelMaxToken: 128000,
supportFunctions: true,
}
};

Expand Down
2 changes: 1 addition & 1 deletion src/utils/messageUtils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ export const limitMessageTokens = (

for (let i = messages.length - 1; i >= 0; i--) {
if (messages[i].locked || i == messages.length - 1) {
const count = countTokens([messages[i]], model);
const count = countTokens([messages[i]], model); // TODO: validate if counting the functions is stil valid when functions are not supported
tokenCount += count;
}
}
Expand Down

0 comments on commit 2c853d5

Please sign in to comment.