@@ -84,16 +84,16 @@ class Gen {
8484 *
8585 * @returns {Promise<string|Buffer> } - The generated image, either as base64 string or Buffer.
8686 */
87- static async generate_image_from_desc ( promptString , openaiKey , imageApiKey , is_base64 = true ,
88- provider = SupportedImageModels . STABILITY , customProxyHelper = null ) {
87+ static async generate_image_from_desc ( promptString , openaiKey , imageApiKey , is_base64 = true , width = 1024 ,
88+ height = 1024 , provider = SupportedImageModels . STABILITY , customProxyHelper = null ) {
8989
9090 const imageDescription = await Gen . getImageDescription ( promptString , openaiKey , customProxyHelper ) ;
9191 const imgModel = new RemoteImageModel ( imageApiKey , provider ) ;
9292 const images = await imgModel . generateImages (
9393 new ImageModelInput ( { prompt : imageDescription ,
9494 numberOfImages : 1 ,
95- width : 512 ,
96- height : 512 ,
95+ width : width ,
96+ height : height ,
9797 responseFormat : 'b64_json' } ) ) ;
9898 if ( is_base64 ) {
9999 return images [ 0 ] ;
@@ -121,6 +121,8 @@ class Gen {
121121 tokeSize = 8000 ;
122122 } else if ( model_name . includes ( 'gpt-4' ) ) {
123123 tokeSize = 4000 ;
124+ } else if ( model_name . includes ( 'gpt-4o' ) ) {
125+ tokeSize = 20000 ;
124126 }
125127
126128 // prepare the bot
@@ -130,7 +132,7 @@ class Gen {
130132 // set the user message with the template
131133 input . addUserMessage ( promptTemp . format ( { 'text' : text } ) ) ;
132134 const responses = await chatbot . chat ( input ) ;
133- return JSON . parse ( responses [ 0 ] . trim ( ) ) ;
135+ return JSON . parse ( responses [ 0 ] . trim ( ) . replace ( '```json' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) ) ;
134136 }
135137
136138 static async save_html_page ( text , folder , file_name , openaiKey , model_name = 'gpt-4' , customProxyHelper = null ) {
@@ -158,15 +160,18 @@ class Gen {
158160 tokeSize = 8000 ;
159161 } else if ( model_name . includes ( 'gpt-4' ) ) {
160162 tokeSize = 3900 ;
163+ } else if ( model_name . includes ( 'gpt-4o' ) ) {
164+ tokeSize = 20000 ;
161165 }
166+
162167 const chatbot = new Chatbot ( openaiKey , SupportedChatModels . OPENAI , customProxyHelper ) ;
163168 const input = new ChatGPTInput ( 'Generate HTML graphs from the CSV data and ensure the response is a valid JSON to parse with full HTML code.' ,
164169 { maxTokens : tokeSize , model : model_name , temperature :0.3 } ) ;
165170 // set the user message with the template
166171 input . addUserMessage ( promptTemp . format ( { 'count' : num_graphs , 'topic' : topic , 'text' : csvStrData } ) ) ;
167172 const responses = await chatbot . chat ( input ) ;
168173
169- return JSON . parse ( responses [ 0 ] . trim ( ) ) [ 0 ] ;
174+ return JSON . parse ( responses [ 0 ] . trim ( ) . replace ( '```json' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) . replace ( '```' , '' ) ) [ 0 ] ;
170175 }
171176
172177
0 commit comments