@@ -24,7 +24,18 @@ const showLoadingAnimation = (isLoading) => {
2424 }
2525}
2626
27+ const showErrorMessage = ( isError ) => {
28+ const loadingScreen = document . querySelector ( '.error' ) ;
29+ if ( isError ) {
30+ loadingScreen . classList . remove ( 'hidden' ) ;
31+ } else {
32+ loadingScreen . classList . add ( 'hidden' ) ;
33+ }
34+ }
35+
2736const startGame = async ( genre ) => {
37+ showErrorMessage ( false ) ;
38+
2839 // Message to send to ChatGPT to start the game
2940 chatGptMessages . push ( {
3041 role : 'system' ,
@@ -35,29 +46,41 @@ const startGame = async (genre) => {
3546 'Your responses are just in JSON format like this example:\n\n###\n\n {"setting":"setting description","actions":["action 1", "action 2", "action 3"]}\n\n###\n\n'
3647 } ) ;
3748
38- showLoadingAnimation ( true ) ;
49+ let chatResponseJson ;
50+
51+ try {
52+ showLoadingAnimation ( true ) ;
3953
40- // Send request to ChatGPT Chat Completion API
41- // https://platform.openai.com/docs/api-reference/chat/create
42- const chatJSON = await makeRequest (
43- _CONFIG_ . API_BASE_URL + '/chat/completions' ,
44- {
54+ // Send request to ChatGPT Chat Completion API
55+ // https://platform.openai.com/docs/api-reference/chat/create
56+ chatResponseJson = await makeRequest ( _CONFIG_ . API_BASE_URL + '/chat/completions' , {
4557 model : _CONFIG_ . GPT_MODEL ,
4658 messages : chatGptMessages ,
4759 temperature : 0.7
4860 // The model predicts which text is most likely to follow the text preceding it.
49- // Temperature is a value between 0 and 1 that essentially lets you control how confident the model should be when making these predictions.
50- // Lowering temperature means it will take fewer risks, and completions will be more accurate and deterministic.
51- // Increasing temperature will result in more diverse completions.
52- } ) ;
61+ // Temperature is a value between 0 and 1 that essentially lets you control how confident the model should be
62+ // when making these predictions. Lowering temperature means it will take fewer risks, and completions will be
63+ // more accurate and deterministic. Increasing temperature will result in more diverse completions.
64+ } ) ;
5365
54- const message = chatJSON . choices [ 0 ] . message ;
55- const content = JSON . parse ( message . content ) ;
56- const { setting, actions } = content ;
57- console . log ( 'SETTING:' , setting ) ;
58- console . log ( 'ACTIONS:' , actions ) ;
66+ const message = chatResponseJson . choices [ 0 ] . message ;
67+ const content = JSON . parse ( message . content ) ;
68+ const { setting, actions} = content ;
69+ console . log ( 'SETTING:' , setting ) ;
70+ console . log ( 'ACTIONS:' , actions ) ;
5971
60- showLoadingAnimation ( false ) ;
72+ showLoadingAnimation ( false ) ;
73+ } catch ( error ) {
74+ let errorMessages = `<p>${ error . message } </p>` ;
75+
76+ if ( chatResponseJson . error ) {
77+ errorMessages += `<p>${ chatResponseJson . error . message } </p>` ;
78+ }
79+
80+ showLoadingAnimation ( false ) ;
81+ document . querySelector ( '.error-messages' ) . innerHTML = errorMessages ;
82+ showErrorMessage ( true ) ;
83+ }
6184}
6285
6386const init = ( ) => {
0 commit comments