From ab578e80687e912ccd02a1d686fa7e6f68e44556 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 01:00:25 +0800 Subject: [PATCH 01/14] chore: char/v1/chat.proto --- go.mod | 1 + go.sum | 2 + pkg/gen/api/auth/v1/auth.pb.go | 2 +- pkg/gen/api/chat/v1/chat.pb.go | 225 +++++++++++++++--- pkg/gen/api/comment/v1/comment.pb.go | 2 +- pkg/gen/api/project/v1/project.pb.go | 2 +- pkg/gen/api/shared/v1/shared.pb.go | 2 +- pkg/gen/api/user/v1/user.pb.go | 2 +- proto/chat/v1/chat.proto | 120 +++++----- .../src/pkg/gen/apiclient/auth/v1/auth_pb.ts | 2 +- .../src/pkg/gen/apiclient/chat/v1/chat_pb.ts | 75 +++++- .../gen/apiclient/comment/v1/comment_pb.ts | 2 +- .../gen/apiclient/project/v1/project_pb.ts | 2 +- .../pkg/gen/apiclient/shared/v1/shared_pb.ts | 2 +- .../src/pkg/gen/apiclient/user/v1/user_pb.ts | 2 +- 15 files changed, 327 insertions(+), 116 deletions(-) diff --git a/go.mod b/go.mod index 4dc59a93..97d4f37c 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 github.com/joho/godotenv v1.5.1 github.com/openai/openai-go/v2 v2.7.1 + github.com/openai/openai-go/v3 v3.12.0 github.com/samber/lo v1.51.0 github.com/stretchr/testify v1.10.0 go.mongodb.org/mongo-driver/v2 v2.3.0 diff --git a/go.sum b/go.sum index 41824e06..1943dc8d 100644 --- a/go.sum +++ b/go.sum @@ -90,6 +90,8 @@ github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/openai/openai-go/v2 v2.7.1 h1:/tfvTJhfv7hTSL8mWwc5VL4WLLSDL5yn9VqVykdu9r8= github.com/openai/openai-go/v2 v2.7.1/go.mod h1:jrJs23apqJKKbT+pqtFgNKpRju/KP9zpUTZhz3GElQE= +github.com/openai/openai-go/v3 v3.12.0 h1:NkrImaglFQeDycc/n/fEmpFV8kKr8snl9/8X2x4eHOg= +github.com/openai/openai-go/v3 v3.12.0/go.mod h1:cdufnVK14cWcT9qA1rRtrXx4FTRsgbDPW7Ia7SS5cZo= github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= diff --git a/pkg/gen/api/auth/v1/auth.pb.go b/pkg/gen/api/auth/v1/auth.pb.go index 87514ddd..569ea4e8 100644 --- a/pkg/gen/api/auth/v1/auth.pb.go +++ b/pkg/gen/api/auth/v1/auth.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc (unknown) // source: auth/v1/auth.proto diff --git a/pkg/gen/api/chat/v1/chat.pb.go b/pkg/gen/api/chat/v1/chat.pb.go index 7f048947..ba97e54f 100644 --- a/pkg/gen/api/chat/v1/chat.pb.go +++ b/pkg/gen/api/chat/v1/chat.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc (unknown) // source: chat/v1/chat.proto @@ -109,7 +109,7 @@ type ConversationType int32 const ( ConversationType_CONVERSATION_TYPE_UNSPECIFIED ConversationType = 0 - ConversationType_CONVERSATION_TYPE_DEBUG ConversationType = 1 // does not contain any customized messages, the inapp_history and openai_history are synced. + ConversationType_CONVERSATION_TYPE_DEBUG ConversationType = 1 // does not contain any customized messages, the ) // Enum value maps for ConversationType. @@ -657,7 +657,8 @@ type Conversation struct { state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` - LanguageModel LanguageModel `protobuf:"varint,2,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` + LanguageModel LanguageModel `protobuf:"varint,2,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` // deprecated: use model_slug instead + ModelSlug *string `protobuf:"bytes,5,opt,name=model_slug,json=modelSlug,proto3,oneof" json:"model_slug,omitempty"` // new: model slug string // If list conversations, then messages length is 0. Messages []*Message `protobuf:"bytes,4,rep,name=messages,proto3" json:"messages,omitempty"` unknownFields protoimpl.UnknownFields @@ -715,6 +716,13 @@ func (x *Conversation) GetLanguageModel() LanguageModel { return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } +func (x *Conversation) GetModelSlug() string { + if x != nil && x.ModelSlug != nil { + return *x.ModelSlug + } + return "" +} + func (x *Conversation) GetMessages() []*Message { if x != nil { return x.Messages @@ -904,11 +912,15 @@ type CreateConversationMessageRequest struct { ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // If conversation_id is not provided, // a new conversation will be created and the id will be returned. - ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` - LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` - UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` - UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` - ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` + ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` + // Types that are valid to be assigned to Model: + // + // *CreateConversationMessageRequest_LanguageModel + // *CreateConversationMessageRequest_ModelSlug + Model isCreateConversationMessageRequest_Model `protobuf_oneof:"model"` + UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` + UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` + ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -957,13 +969,31 @@ func (x *CreateConversationMessageRequest) GetConversationId() string { return "" } +func (x *CreateConversationMessageRequest) GetModel() isCreateConversationMessageRequest_Model { + if x != nil { + return x.Model + } + return nil +} + func (x *CreateConversationMessageRequest) GetLanguageModel() LanguageModel { if x != nil { - return x.LanguageModel + if x, ok := x.Model.(*CreateConversationMessageRequest_LanguageModel); ok { + return x.LanguageModel + } } return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } +func (x *CreateConversationMessageRequest) GetModelSlug() string { + if x != nil { + if x, ok := x.Model.(*CreateConversationMessageRequest_ModelSlug); ok { + return x.ModelSlug + } + } + return "" +} + func (x *CreateConversationMessageRequest) GetUserMessage() string { if x != nil { return x.UserMessage @@ -985,6 +1015,22 @@ func (x *CreateConversationMessageRequest) GetConversationType() ConversationTyp return ConversationType_CONVERSATION_TYPE_UNSPECIFIED } +type isCreateConversationMessageRequest_Model interface { + isCreateConversationMessageRequest_Model() +} + +type CreateConversationMessageRequest_LanguageModel struct { + LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel,oneof"` // deprecated: use model_slug instead +} + +type CreateConversationMessageRequest_ModelSlug struct { + ModelSlug string `protobuf:"bytes,7,opt,name=model_slug,json=modelSlug,proto3,oneof"` // new: model slug string +} + +func (*CreateConversationMessageRequest_LanguageModel) isCreateConversationMessageRequest_Model() {} + +func (*CreateConversationMessageRequest_ModelSlug) isCreateConversationMessageRequest_Model() {} + type CreateConversationMessageResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"` @@ -1341,9 +1387,13 @@ func (x *ListSupportedModelsResponse) GetModels() []*SupportedModel { type StreamInitialization struct { state protoimpl.MessageState `protogen:"open.v1"` ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` - LanguageModel LanguageModel `protobuf:"varint,5,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + // Types that are valid to be assigned to Model: + // + // *StreamInitialization_LanguageModel + // *StreamInitialization_ModelSlug + Model isStreamInitialization_Model `protobuf_oneof:"model"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StreamInitialization) Reset() { @@ -1383,13 +1433,47 @@ func (x *StreamInitialization) GetConversationId() string { return "" } +func (x *StreamInitialization) GetModel() isStreamInitialization_Model { + if x != nil { + return x.Model + } + return nil +} + func (x *StreamInitialization) GetLanguageModel() LanguageModel { if x != nil { - return x.LanguageModel + if x, ok := x.Model.(*StreamInitialization_LanguageModel); ok { + return x.LanguageModel + } } return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } +func (x *StreamInitialization) GetModelSlug() string { + if x != nil { + if x, ok := x.Model.(*StreamInitialization_ModelSlug); ok { + return x.ModelSlug + } + } + return "" +} + +type isStreamInitialization_Model interface { + isStreamInitialization_Model() +} + +type StreamInitialization_LanguageModel struct { + LanguageModel LanguageModel `protobuf:"varint,5,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel,oneof"` // deprecated: use model_slug instead +} + +type StreamInitialization_ModelSlug struct { + ModelSlug string `protobuf:"bytes,6,opt,name=model_slug,json=modelSlug,proto3,oneof"` // new: model slug string +} + +func (*StreamInitialization_LanguageModel) isStreamInitialization_Model() {} + +func (*StreamInitialization_ModelSlug) isStreamInitialization_Model() {} + // Designed as StreamPartBegin and StreamPartEnd to // handle the case where assistant and tool are called at the same time. // @@ -1700,13 +1784,17 @@ func (x *StreamError) GetErrorMessage() string { // // the conversation will be created and returned. type CreateConversationMessageStreamRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` - LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` - UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` - UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` - ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` + // Types that are valid to be assigned to Model: + // + // *CreateConversationMessageStreamRequest_LanguageModel + // *CreateConversationMessageStreamRequest_ModelSlug + Model isCreateConversationMessageStreamRequest_Model `protobuf_oneof:"model"` + UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` + UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` + ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1755,13 +1843,31 @@ func (x *CreateConversationMessageStreamRequest) GetConversationId() string { return "" } +func (x *CreateConversationMessageStreamRequest) GetModel() isCreateConversationMessageStreamRequest_Model { + if x != nil { + return x.Model + } + return nil +} + func (x *CreateConversationMessageStreamRequest) GetLanguageModel() LanguageModel { if x != nil { - return x.LanguageModel + if x, ok := x.Model.(*CreateConversationMessageStreamRequest_LanguageModel); ok { + return x.LanguageModel + } } return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } +func (x *CreateConversationMessageStreamRequest) GetModelSlug() string { + if x != nil { + if x, ok := x.Model.(*CreateConversationMessageStreamRequest_ModelSlug); ok { + return x.ModelSlug + } + } + return "" +} + func (x *CreateConversationMessageStreamRequest) GetUserMessage() string { if x != nil { return x.UserMessage @@ -1783,6 +1889,24 @@ func (x *CreateConversationMessageStreamRequest) GetConversationType() Conversat return ConversationType_CONVERSATION_TYPE_UNSPECIFIED } +type isCreateConversationMessageStreamRequest_Model interface { + isCreateConversationMessageStreamRequest_Model() +} + +type CreateConversationMessageStreamRequest_LanguageModel struct { + LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel,oneof"` // deprecated: use model_slug instead +} + +type CreateConversationMessageStreamRequest_ModelSlug struct { + ModelSlug string `protobuf:"bytes,7,opt,name=model_slug,json=modelSlug,proto3,oneof"` // new: model slug string +} + +func (*CreateConversationMessageStreamRequest_LanguageModel) isCreateConversationMessageStreamRequest_Model() { +} + +func (*CreateConversationMessageStreamRequest_ModelSlug) isCreateConversationMessageStreamRequest_Model() { +} + // Response for streaming a message within an existing conversation type CreateConversationMessageStreamResponse struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -1987,12 +2111,15 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\aMessage\x12\x1d\n" + "\n" + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + - "\apayload\x18\x03 \x01(\v2\x17.chat.v1.MessagePayloadR\apayload\"\xa1\x01\n" + + "\apayload\x18\x03 \x01(\v2\x17.chat.v1.MessagePayloadR\apayload\"\xd4\x01\n" + "\fConversation\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + "\x05title\x18\x03 \x01(\tR\x05title\x12=\n" + - "\x0elanguage_model\x18\x02 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12,\n" + - "\bmessages\x18\x04 \x03(\v2\x10.chat.v1.MessageR\bmessages\"M\n" + + "\x0elanguage_model\x18\x02 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\"\n" + + "\n" + + "model_slug\x18\x05 \x01(\tH\x00R\tmodelSlug\x88\x01\x01\x12,\n" + + "\bmessages\x18\x04 \x03(\v2\x10.chat.v1.MessageR\bmessagesB\r\n" + + "\v_model_slug\"M\n" + "\x18ListConversationsRequest\x12\"\n" + "\n" + "project_id\x18\x01 \x01(\tH\x00R\tprojectId\x88\x01\x01B\r\n" + @@ -2002,15 +2129,18 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\x16GetConversationRequest\x12'\n" + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"T\n" + "\x17GetConversationResponse\x129\n" + - "\fconversation\x18\x01 \x01(\v2\x15.chat.v1.ConversationR\fconversation\"\x92\x03\n" + + "\fconversation\x18\x01 \x01(\v2\x15.chat.v1.ConversationR\fconversation\"\xbe\x03\n" + " CreateConversationMessageRequest\x12\x1d\n" + "\n" + "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" + - "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12=\n" + - "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12!\n" + + "\x0fconversation_id\x18\x02 \x01(\tH\x01R\x0econversationId\x88\x01\x01\x12?\n" + + "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelH\x00R\rlanguageModel\x12\x1f\n" + + "\n" + + "model_slug\x18\a \x01(\tH\x00R\tmodelSlug\x12!\n" + "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" + - "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" + - "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x02R\x10conversationType\x88\x01\x01B\x12\n" + + "\x12user_selected_text\x18\x05 \x01(\tH\x02R\x10userSelectedText\x88\x01\x01\x12K\n" + + "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x03R\x10conversationType\x88\x01\x01B\a\n" + + "\x05modelB\x12\n" + "\x10_conversation_idB\x15\n" + "\x13_user_selected_textB\x14\n" + "\x12_conversation_type\"^\n" + @@ -2029,10 +2159,13 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\x04slug\x18\x02 \x01(\tR\x04slug\"\x1c\n" + "\x1aListSupportedModelsRequest\"N\n" + "\x1bListSupportedModelsResponse\x12/\n" + - "\x06models\x18\x01 \x03(\v2\x17.chat.v1.SupportedModelR\x06models\"~\n" + + "\x06models\x18\x01 \x03(\v2\x17.chat.v1.SupportedModelR\x06models\"\xaa\x01\n" + "\x14StreamInitialization\x12'\n" + - "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12=\n" + - "\x0elanguage_model\x18\x05 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\"c\n" + + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12?\n" + + "\x0elanguage_model\x18\x05 \x01(\x0e2\x16.chat.v1.LanguageModelH\x00R\rlanguageModel\x12\x1f\n" + + "\n" + + "model_slug\x18\x06 \x01(\tH\x00R\tmodelSlugB\a\n" + + "\x05model\"c\n" + "\x0fStreamPartBegin\x12\x1d\n" + "\n" + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + @@ -2052,15 +2185,18 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\x12StreamFinalization\x12'\n" + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"2\n" + "\vStreamError\x12#\n" + - "\rerror_message\x18\x01 \x01(\tR\ferrorMessage\"\x98\x03\n" + + "\rerror_message\x18\x01 \x01(\tR\ferrorMessage\"\xc4\x03\n" + "&CreateConversationMessageStreamRequest\x12\x1d\n" + "\n" + "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" + - "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12=\n" + - "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12!\n" + + "\x0fconversation_id\x18\x02 \x01(\tH\x01R\x0econversationId\x88\x01\x01\x12?\n" + + "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelH\x00R\rlanguageModel\x12\x1f\n" + + "\n" + + "model_slug\x18\a \x01(\tH\x00R\tmodelSlug\x12!\n" + "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" + - "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" + - "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x02R\x10conversationType\x88\x01\x01B\x12\n" + + "\x12user_selected_text\x18\x05 \x01(\tH\x02R\x10userSelectedText\x88\x01\x01\x12K\n" + + "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x03R\x10conversationType\x88\x01\x01B\a\n" + + "\x05modelB\x12\n" + "\x10_conversation_idB\x15\n" + "\x13_user_selected_textB\x14\n" + "\x12_conversation_type\"\xb9\x04\n" + @@ -2215,9 +2351,20 @@ func file_chat_v1_chat_proto_init() { (*MessagePayload_ToolCall)(nil), (*MessagePayload_Unknown)(nil), } + file_chat_v1_chat_proto_msgTypes[8].OneofWrappers = []any{} file_chat_v1_chat_proto_msgTypes[9].OneofWrappers = []any{} - file_chat_v1_chat_proto_msgTypes[13].OneofWrappers = []any{} - file_chat_v1_chat_proto_msgTypes[29].OneofWrappers = []any{} + file_chat_v1_chat_proto_msgTypes[13].OneofWrappers = []any{ + (*CreateConversationMessageRequest_LanguageModel)(nil), + (*CreateConversationMessageRequest_ModelSlug)(nil), + } + file_chat_v1_chat_proto_msgTypes[22].OneofWrappers = []any{ + (*StreamInitialization_LanguageModel)(nil), + (*StreamInitialization_ModelSlug)(nil), + } + file_chat_v1_chat_proto_msgTypes[29].OneofWrappers = []any{ + (*CreateConversationMessageStreamRequest_LanguageModel)(nil), + (*CreateConversationMessageStreamRequest_ModelSlug)(nil), + } file_chat_v1_chat_proto_msgTypes[30].OneofWrappers = []any{ (*CreateConversationMessageStreamResponse_StreamInitialization)(nil), (*CreateConversationMessageStreamResponse_StreamPartBegin)(nil), diff --git a/pkg/gen/api/comment/v1/comment.pb.go b/pkg/gen/api/comment/v1/comment.pb.go index 8daf2720..b19607bd 100644 --- a/pkg/gen/api/comment/v1/comment.pb.go +++ b/pkg/gen/api/comment/v1/comment.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc (unknown) // source: comment/v1/comment.proto diff --git a/pkg/gen/api/project/v1/project.pb.go b/pkg/gen/api/project/v1/project.pb.go index f67566ca..99113e09 100644 --- a/pkg/gen/api/project/v1/project.pb.go +++ b/pkg/gen/api/project/v1/project.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc (unknown) // source: project/v1/project.proto diff --git a/pkg/gen/api/shared/v1/shared.pb.go b/pkg/gen/api/shared/v1/shared.pb.go index 58d084f2..5c3eb7c8 100644 --- a/pkg/gen/api/shared/v1/shared.pb.go +++ b/pkg/gen/api/shared/v1/shared.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc (unknown) // source: shared/v1/shared.proto diff --git a/pkg/gen/api/user/v1/user.pb.go b/pkg/gen/api/user/v1/user.pb.go index 85603cf2..c54615c4 100644 --- a/pkg/gen/api/user/v1/user.pb.go +++ b/pkg/gen/api/user/v1/user.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc (unknown) // source: user/v1/user.proto diff --git a/proto/chat/v1/chat.proto b/proto/chat/v1/chat.proto index ab8b7e12..2e4bee9f 100644 --- a/proto/chat/v1/chat.proto +++ b/proto/chat/v1/chat.proto @@ -7,35 +7,50 @@ import "google/api/annotations.proto"; option go_package = "paperdebugger/pkg/gen/api/chat/v1;chatv1"; service ChatService { - rpc ListConversations(ListConversationsRequest) returns (ListConversationsResponse) { - option (google.api.http) = {get: "/_pd/api/v1/chats/conversations"}; + rpc ListConversations(ListConversationsRequest) + returns (ListConversationsResponse) { + option (google.api.http) = { + get : "/_pd/api/v1/chats/conversations" + }; } - rpc GetConversation(GetConversationRequest) returns (GetConversationResponse) { - option (google.api.http) = {get: "/_pd/api/v1/chats/conversations/{conversation_id}"}; + rpc GetConversation(GetConversationRequest) + returns (GetConversationResponse) { + option (google.api.http) = { + get : "/_pd/api/v1/chats/conversations/{conversation_id}" + }; } - rpc CreateConversationMessage(CreateConversationMessageRequest) returns (CreateConversationMessageResponse) { + rpc CreateConversationMessage(CreateConversationMessageRequest) + returns (CreateConversationMessageResponse) { option (google.api.http) = { - post: "/_pd/api/v1/chats/conversations/messages" - body: "*" + post : "/_pd/api/v1/chats/conversations/messages" + body : "*" }; } - rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) returns (stream CreateConversationMessageStreamResponse) { + rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) + returns (stream CreateConversationMessageStreamResponse) { option (google.api.http) = { - post: "/_pd/api/v1/chats/conversations/messages/stream" - body: "*" + post : "/_pd/api/v1/chats/conversations/messages/stream" + body : "*" }; } - rpc UpdateConversation(UpdateConversationRequest) returns (UpdateConversationResponse) { + rpc UpdateConversation(UpdateConversationRequest) + returns (UpdateConversationResponse) { option (google.api.http) = { - patch: "/_pd/api/v1/chats/conversations/{conversation_id}" - body: "*" + patch : "/_pd/api/v1/chats/conversations/{conversation_id}" + body : "*" }; } - rpc DeleteConversation(DeleteConversationRequest) returns (DeleteConversationResponse) { - option (google.api.http) = {delete: "/_pd/api/v1/chats/conversations/{conversation_id}"}; + rpc DeleteConversation(DeleteConversationRequest) + returns (DeleteConversationResponse) { + option (google.api.http) = { + delete : "/_pd/api/v1/chats/conversations/{conversation_id}" + }; } - rpc ListSupportedModels(ListSupportedModelsRequest) returns (ListSupportedModelsResponse) { - option (google.api.http) = {get: "/_pd/api/v1/chats/models"}; + rpc ListSupportedModels(ListSupportedModelsRequest) + returns (ListSupportedModelsResponse) { + option (google.api.http) = { + get : "/_pd/api/v1/chats/models" + }; } } @@ -59,9 +74,9 @@ enum LanguageModel { message MessageTypeToolCall { string name = 1; - string args = 2; // Json string + string args = 2; // Json string string result = 3; // Json string - string error = 4; // Json string + string error = 4; // Json string } message MessageTypeToolCallPrepareArguments { @@ -69,22 +84,16 @@ message MessageTypeToolCallPrepareArguments { string args = 2; // Json string } -message MessageTypeSystem { - string content = 1; -} +message MessageTypeSystem { string content = 1; } -message MessageTypeAssistant { - string content = 1; -} +message MessageTypeAssistant { string content = 1; } message MessageTypeUser { string content = 1; optional string selected_text = 2; } -message MessageTypeUnknown { - string description = 1; -} +message MessageTypeUnknown { string description = 1; } message MessagePayload { oneof message_type { @@ -105,56 +114,48 @@ message Message { message Conversation { string id = 1; string title = 3; - LanguageModel language_model = 2; + LanguageModel language_model = 2; // deprecated: use model_slug instead + optional string model_slug = 5; // new: model slug string // If list conversations, then messages length is 0. repeated Message messages = 4; } -message ListConversationsRequest { - optional string project_id = 1; -} +message ListConversationsRequest { optional string project_id = 1; } message ListConversationsResponse { // In this response, the length of conversations[i].messages should be 0. repeated Conversation conversations = 1; } -message GetConversationRequest { - string conversation_id = 1; -} +message GetConversationRequest { string conversation_id = 1; } -message GetConversationResponse { - Conversation conversation = 1; -} +message GetConversationResponse { Conversation conversation = 1; } message CreateConversationMessageRequest { string project_id = 1; // If conversation_id is not provided, // a new conversation will be created and the id will be returned. optional string conversation_id = 2; - LanguageModel language_model = 3; + oneof model { + LanguageModel language_model = 3; // deprecated: use model_slug instead + string model_slug = 7; // new: model slug string + } string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; } -message CreateConversationMessageResponse { - Conversation conversation = 1; -} +message CreateConversationMessageResponse { Conversation conversation = 1; } message UpdateConversationRequest { string conversation_id = 1; string title = 2; } -message UpdateConversationResponse { - Conversation conversation = 1; -} +message UpdateConversationResponse { Conversation conversation = 1; } -message DeleteConversationRequest { - string conversation_id = 1; -} +message DeleteConversationRequest { string conversation_id = 1; } message DeleteConversationResponse { // explicitly empty @@ -169,16 +170,17 @@ message ListSupportedModelsRequest { // explicitly empty } -message ListSupportedModelsResponse { - repeated SupportedModel models = 1; -} +message ListSupportedModelsResponse { repeated SupportedModel models = 1; } // ============================== Streaming Messages // Information sent once at the beginning of a new conversation stream message StreamInitialization { string conversation_id = 1; - LanguageModel language_model = 5; + oneof model { + LanguageModel language_model = 5; // deprecated: use model_slug instead + string model_slug = 6; // new: model slug string + } } // Designed as StreamPartBegin and StreamPartEnd to @@ -195,7 +197,7 @@ message StreamPartBegin { // and the StreamPartEnd can be directly called when the result is ready. message MessageChunk { string message_id = 1; // The id of the message that this chunk belongs to - string delta = 2; // The small piece of text + string delta = 2; // The small piece of text } message IncompleteIndicator { @@ -217,9 +219,7 @@ message StreamFinalization { // it should be called after the entire API call is finished. } -message StreamError { - string error_message = 1; -} +message StreamError { string error_message = 1; } // Currently, we inject two types of messages: // 1. System message @@ -227,7 +227,8 @@ message StreamError { enum ConversationType { CONVERSATION_TYPE_UNSPECIFIED = 0; - CONVERSATION_TYPE_DEBUG = 1; // does not contain any customized messages, the inapp_history and openai_history are synced. + CONVERSATION_TYPE_DEBUG = 1; // does not contain any customized messages, the + // inapp_history and openai_history are synced. // CONVERSATION_TYPE_NO_SYSTEM_MESSAGE_INJECTION = 2; // CONVERSATION_TYPE_NO_USER_MESSAGE_INJECTION = 3; } @@ -238,7 +239,10 @@ enum ConversationType { message CreateConversationMessageStreamRequest { string project_id = 1; optional string conversation_id = 2; - LanguageModel language_model = 3; + oneof model { + LanguageModel language_model = 3; // deprecated: use model_slug instead + string model_slug = 7; // new: model slug string + } string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; diff --git a/webapp/_webapp/src/pkg/gen/apiclient/auth/v1/auth_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/auth/v1/auth_pb.ts index 04201eaa..1c0c4dc1 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/auth/v1/auth_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/auth/v1/auth_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.2 with parameter "target=ts" // @generated from file auth/v1/auth.proto (package auth.v1, syntax proto3) /* eslint-disable */ diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts index 2e15f8ea..aed8f7dd 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.2 with parameter "target=ts" // @generated from file chat/v1/chat.proto (package chat.v1, syntax proto3) /* eslint-disable */ @@ -11,7 +11,7 @@ import type { Message as Message$1 } from "@bufbuild/protobuf"; * Describes the file chat/v1/chat.proto. */ export const file_chat_v1_chat: GenFile = /*@__PURE__*/ - fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCJ9CgxDb252ZXJzYXRpb24SCgoCaWQYASABKAkSDQoFdGl0bGUYAyABKAkSLgoObGFuZ3VhZ2VfbW9kZWwYAiABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwSIgoIbWVzc2FnZXMYBCADKAsyEC5jaGF0LnYxLk1lc3NhZ2UiQgoYTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0EhcKCnByb2plY3RfaWQYASABKAlIAIgBAUINCgtfcHJvamVjdF9pZCJJChlMaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlEiwKDWNvbnZlcnNhdGlvbnMYASADKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiIxChZHZXRDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSJGChdHZXRDb252ZXJzYXRpb25SZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiK3AgogQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QSEgoKcHJvamVjdF9pZBgBIAEoCRIcCg9jb252ZXJzYXRpb25faWQYAiABKAlIAIgBARIuCg5sYW5ndWFnZV9tb2RlbBgDIAEoDjIWLmNoYXQudjEuTGFuZ3VhZ2VNb2RlbBIUCgx1c2VyX21lc3NhZ2UYBCABKAkSHwoSdXNlcl9zZWxlY3RlZF90ZXh0GAUgASgJSAGIAQESOQoRY29udmVyc2F0aW9uX3R5cGUYBiABKA4yGS5jaGF0LnYxLkNvbnZlcnNhdGlvblR5cGVIAogBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlIlAKIUNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiJDChlVcGRhdGVDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCRINCgV0aXRsZRgCIAEoCSJJChpVcGRhdGVDb252ZXJzYXRpb25SZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiI0ChlEZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSIcChpEZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSIsCg5TdXBwb3J0ZWRNb2RlbBIMCgRuYW1lGAEgASgJEgwKBHNsdWcYAiABKAkiHAoaTGlzdFN1cHBvcnRlZE1vZGVsc1JlcXVlc3QiRgobTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlEicKBm1vZGVscxgBIAMoCzIXLmNoYXQudjEuU3VwcG9ydGVkTW9kZWwiXwoUU3RyZWFtSW5pdGlhbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEi4KDmxhbmd1YWdlX21vZGVsGAUgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsIk8KD1N0cmVhbVBhcnRCZWdpbhISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYxLk1lc3NhZ2VQYXlsb2FkIjEKDE1lc3NhZ2VDaHVuaxISCgptZXNzYWdlX2lkGAEgASgJEg0KBWRlbHRhGAIgASgJIjoKE0luY29tcGxldGVJbmRpY2F0b3ISDgoGcmVhc29uGAEgASgJEhMKC3Jlc3BvbnNlX2lkGAIgASgJIk0KDVN0cmVhbVBhcnRFbmQSEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCItChJTdHJlYW1GaW5hbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIiQKC1N0cmVhbUVycm9yEhUKDWVycm9yX21lc3NhZ2UYASABKAkivQIKJkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSACIAQESLgoObGFuZ3VhZ2VfbW9kZWwYAyABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwSFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgBiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52MS5Db252ZXJzYXRpb25UeXBlSAKIAQFCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSK/AwonQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlEj4KFXN0cmVhbV9pbml0aWFsaXphdGlvbhgBIAEoCzIdLmNoYXQudjEuU3RyZWFtSW5pdGlhbGl6YXRpb25IABI1ChFzdHJlYW1fcGFydF9iZWdpbhgCIAEoCzIYLmNoYXQudjEuU3RyZWFtUGFydEJlZ2luSAASLgoNbWVzc2FnZV9jaHVuaxgDIAEoCzIVLmNoYXQudjEuTWVzc2FnZUNodW5rSAASPAoUaW5jb21wbGV0ZV9pbmRpY2F0b3IYBCABKAsyHC5jaGF0LnYxLkluY29tcGxldGVJbmRpY2F0b3JIABIxCg9zdHJlYW1fcGFydF9lbmQYBSABKAsyFi5jaGF0LnYxLlN0cmVhbVBhcnRFbmRIABI6ChNzdHJlYW1fZmluYWxpemF0aW9uGAYgASgLMhsuY2hhdC52MS5TdHJlYW1GaW5hbGl6YXRpb25IABIsCgxzdHJlYW1fZXJyb3IYByABKAsyFC5jaGF0LnYxLlN0cmVhbUVycm9ySABCEgoQcmVzcG9uc2VfcGF5bG9hZCr/AwoNTGFuZ3VhZ2VNb2RlbBIeChpMQU5HVUFHRV9NT0RFTF9VTlNQRUNJRklFRBAAEh8KG0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0TxABEiQKIExBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0MV9NSU5JEAISHwobTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDQxEAQSHgoaTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDUQBxIjCh9MQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9NSU5JEAgSIwofTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfTkFOTxAJEioKJkxBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X0NIQVRfTEFURVNUEAoSHAoYTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xEAsSIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xX01JTkkQDBIcChhMQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzMQDRIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzNfTUlOSRAOEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PNF9NSU5JEA8SKwonTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0NPREVYX01JTklfTEFURVNUEBAqUgoQQ29udmVyc2F0aW9uVHlwZRIhCh1DT05WRVJTQVRJT05fVFlQRV9VTlNQRUNJRklFRBAAEhsKF0NPTlZFUlNBVElPTl9UWVBFX0RFQlVHEAEy0ggKC0NoYXRTZXJ2aWNlEoMBChFMaXN0Q29udmVyc2F0aW9ucxIhLmNoYXQudjEuTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0GiIuY2hhdC52MS5MaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMSjwEKD0dldENvbnZlcnNhdGlvbhIfLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVxdWVzdBogLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMxIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKnAQoZQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZRIpLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QaKi5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZSIzgtPkkwItOgEqIigvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzEsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52MS5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjEuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YxL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MUIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MTtjaGF0djGiAgNDWFiqAgdDaGF0LlYxygIHQ2hhdFxWMeICE0NoYXRcVjFcR1BCTWV0YWRhdGHqAghDaGF0OjpWMWIGcHJvdG8z", [file_google_api_annotations]); + fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCKlAQoMQ29udmVyc2F0aW9uEgoKAmlkGAEgASgJEg0KBXRpdGxlGAMgASgJEi4KDmxhbmd1YWdlX21vZGVsGAIgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhcKCm1vZGVsX3NsdWcYBSABKAlIAIgBARIiCghtZXNzYWdlcxgEIAMoCzIQLmNoYXQudjEuTWVzc2FnZUINCgtfbW9kZWxfc2x1ZyJCChhMaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QSFwoKcHJvamVjdF9pZBgBIAEoCUgAiAEBQg0KC19wcm9qZWN0X2lkIkkKGUxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2USLAoNY29udmVyc2F0aW9ucxgBIAMoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjEKFkdldENvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIkYKF0dldENvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uItgCCiBDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgBiAEBEjAKDmxhbmd1YWdlX21vZGVsGAMgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsSAASFAoKbW9kZWxfc2x1ZxgHIAEoCUgAEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAogBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjEuQ29udmVyc2F0aW9uVHlwZUgDiAEBQgcKBW1vZGVsQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUiUAohQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIkMKGVVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEg0KBXRpdGxlGAIgASgJIkkKGlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjQKGURlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIhwKGkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIiwKDlN1cHBvcnRlZE1vZGVsEgwKBG5hbWUYASABKAkSDAoEc2x1ZxgCIAEoCSIcChpMaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdCJGChtMaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2USJwoGbW9kZWxzGAEgAygLMhcuY2hhdC52MS5TdXBwb3J0ZWRNb2RlbCKAAQoUU3RyZWFtSW5pdGlhbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEjAKDmxhbmd1YWdlX21vZGVsGAUgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsSAASFAoKbW9kZWxfc2x1ZxgGIAEoCUgAQgcKBW1vZGVsIk8KD1N0cmVhbVBhcnRCZWdpbhISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYxLk1lc3NhZ2VQYXlsb2FkIjEKDE1lc3NhZ2VDaHVuaxISCgptZXNzYWdlX2lkGAEgASgJEg0KBWRlbHRhGAIgASgJIjoKE0luY29tcGxldGVJbmRpY2F0b3ISDgoGcmVhc29uGAEgASgJEhMKC3Jlc3BvbnNlX2lkGAIgASgJIk0KDVN0cmVhbVBhcnRFbmQSEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCItChJTdHJlYW1GaW5hbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIiQKC1N0cmVhbUVycm9yEhUKDWVycm9yX21lc3NhZ2UYASABKAki3gIKJkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSAGIAQESMAoObGFuZ3VhZ2VfbW9kZWwYAyABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWxIABIUCgptb2RlbF9zbHVnGAcgASgJSAASFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgCiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52MS5Db252ZXJzYXRpb25UeXBlSAOIAQFCBwoFbW9kZWxCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSK/AwonQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlEj4KFXN0cmVhbV9pbml0aWFsaXphdGlvbhgBIAEoCzIdLmNoYXQudjEuU3RyZWFtSW5pdGlhbGl6YXRpb25IABI1ChFzdHJlYW1fcGFydF9iZWdpbhgCIAEoCzIYLmNoYXQudjEuU3RyZWFtUGFydEJlZ2luSAASLgoNbWVzc2FnZV9jaHVuaxgDIAEoCzIVLmNoYXQudjEuTWVzc2FnZUNodW5rSAASPAoUaW5jb21wbGV0ZV9pbmRpY2F0b3IYBCABKAsyHC5jaGF0LnYxLkluY29tcGxldGVJbmRpY2F0b3JIABIxCg9zdHJlYW1fcGFydF9lbmQYBSABKAsyFi5jaGF0LnYxLlN0cmVhbVBhcnRFbmRIABI6ChNzdHJlYW1fZmluYWxpemF0aW9uGAYgASgLMhsuY2hhdC52MS5TdHJlYW1GaW5hbGl6YXRpb25IABIsCgxzdHJlYW1fZXJyb3IYByABKAsyFC5jaGF0LnYxLlN0cmVhbUVycm9ySABCEgoQcmVzcG9uc2VfcGF5bG9hZCr/AwoNTGFuZ3VhZ2VNb2RlbBIeChpMQU5HVUFHRV9NT0RFTF9VTlNQRUNJRklFRBAAEh8KG0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0TxABEiQKIExBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0MV9NSU5JEAISHwobTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDQxEAQSHgoaTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDUQBxIjCh9MQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9NSU5JEAgSIwofTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfTkFOTxAJEioKJkxBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X0NIQVRfTEFURVNUEAoSHAoYTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xEAsSIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xX01JTkkQDBIcChhMQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzMQDRIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzNfTUlOSRAOEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PNF9NSU5JEA8SKwonTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0NPREVYX01JTklfTEFURVNUEBAqUgoQQ29udmVyc2F0aW9uVHlwZRIhCh1DT05WRVJTQVRJT05fVFlQRV9VTlNQRUNJRklFRBAAEhsKF0NPTlZFUlNBVElPTl9UWVBFX0RFQlVHEAEy0ggKC0NoYXRTZXJ2aWNlEoMBChFMaXN0Q29udmVyc2F0aW9ucxIhLmNoYXQudjEuTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0GiIuY2hhdC52MS5MaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMSjwEKD0dldENvbnZlcnNhdGlvbhIfLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVxdWVzdBogLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMxIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKnAQoZQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZRIpLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QaKi5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZSIzgtPkkwItOgEqIigvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzEsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52MS5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjEuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YxL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MUIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MTtjaGF0djGiAgNDWFiqAgdDaGF0LlYxygIHQ2hhdFxWMeICE0NoYXRcVjFcR1BCTWV0YWRhdGHqAghDaGF0OjpWMWIGcHJvdG8z", [file_google_api_annotations]); /** * @generated from message chat.v1.MessageTypeToolCall @@ -238,10 +238,19 @@ export type Conversation = Message$1<"chat.v1.Conversation"> & { title: string; /** + * deprecated: use model_slug instead + * * @generated from field: chat.v1.LanguageModel language_model = 2; */ languageModel: LanguageModel; + /** + * new: model slug string + * + * @generated from field: optional string model_slug = 5; + */ + modelSlug?: string; + /** * If list conversations, then messages length is 0. * @@ -345,9 +354,25 @@ export type CreateConversationMessageRequest = Message$1<"chat.v1.CreateConversa conversationId?: string; /** - * @generated from field: chat.v1.LanguageModel language_model = 3; + * @generated from oneof chat.v1.CreateConversationMessageRequest.model */ - languageModel: LanguageModel; + model: { + /** + * deprecated: use model_slug instead + * + * @generated from field: chat.v1.LanguageModel language_model = 3; + */ + value: LanguageModel; + case: "languageModel"; + } | { + /** + * new: model slug string + * + * @generated from field: string model_slug = 7; + */ + value: string; + case: "modelSlug"; + } | { case: undefined; value?: undefined }; /** * @generated from field: string user_message = 4; @@ -526,9 +551,25 @@ export type StreamInitialization = Message$1<"chat.v1.StreamInitialization"> & { conversationId: string; /** - * @generated from field: chat.v1.LanguageModel language_model = 5; + * @generated from oneof chat.v1.StreamInitialization.model */ - languageModel: LanguageModel; + model: { + /** + * deprecated: use model_slug instead + * + * @generated from field: chat.v1.LanguageModel language_model = 5; + */ + value: LanguageModel; + case: "languageModel"; + } | { + /** + * new: model slug string + * + * @generated from field: string model_slug = 6; + */ + value: string; + case: "modelSlug"; + } | { case: undefined; value?: undefined }; }; /** @@ -699,9 +740,25 @@ export type CreateConversationMessageStreamRequest = Message$1<"chat.v1.CreateCo conversationId?: string; /** - * @generated from field: chat.v1.LanguageModel language_model = 3; + * @generated from oneof chat.v1.CreateConversationMessageStreamRequest.model */ - languageModel: LanguageModel; + model: { + /** + * deprecated: use model_slug instead + * + * @generated from field: chat.v1.LanguageModel language_model = 3; + */ + value: LanguageModel; + case: "languageModel"; + } | { + /** + * new: model slug string + * + * @generated from field: string model_slug = 7; + */ + value: string; + case: "modelSlug"; + } | { case: undefined; value?: undefined }; /** * @generated from field: string user_message = 4; @@ -880,7 +937,7 @@ export enum ConversationType { UNSPECIFIED = 0, /** - * does not contain any customized messages, the inapp_history and openai_history are synced. + * does not contain any customized messages, the * * @generated from enum value: CONVERSATION_TYPE_DEBUG = 1; */ diff --git a/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts index d865420c..ddc57ed1 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.2 with parameter "target=ts" // @generated from file comment/v1/comment.proto (package comment.v1, syntax proto3) /* eslint-disable */ diff --git a/webapp/_webapp/src/pkg/gen/apiclient/project/v1/project_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/project/v1/project_pb.ts index f6186351..0fb41e97 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/project/v1/project_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/project/v1/project_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.2 with parameter "target=ts" // @generated from file project/v1/project.proto (package project.v1, syntax proto3) /* eslint-disable */ diff --git a/webapp/_webapp/src/pkg/gen/apiclient/shared/v1/shared_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/shared/v1/shared_pb.ts index 39093c32..7d17d73d 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/shared/v1/shared_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/shared/v1/shared_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.2 with parameter "target=ts" // @generated from file shared/v1/shared.proto (package shared.v1, syntax proto3) /* eslint-disable */ diff --git a/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts index 5a831081..ced72da5 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/user/v1/user_pb.ts @@ -1,4 +1,4 @@ -// @generated by protoc-gen-es v2.10.1 with parameter "target=ts" +// @generated by protoc-gen-es v2.10.2 with parameter "target=ts" // @generated from file user/v1/user.proto (package user.v1, syntax proto3) /* eslint-disable */ From d2cb831be76083f59520e9b6491505eb409c635c Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 01:15:28 +0800 Subject: [PATCH 02/14] feat: implement modelSlug in backend --- .../api/chat/create_conversation_message.go | 15 ++++++++--- .../create_conversation_message_stream.go | 4 ++- internal/models/conversation.go | 1 + internal/services/chat.go | 3 ++- .../services/toolkit/client/completion.go | 10 +++---- .../toolkit/client/get_conversation_title.go | 2 +- internal/services/toolkit/client/utils.go | 27 +++++++++---------- internal/services/toolkit/handler/stream.go | 24 ++++++++++++----- 8 files changed, 55 insertions(+), 31 deletions(-) diff --git a/internal/api/chat/create_conversation_message.go b/internal/api/chat/create_conversation_message.go index 9f78a2ac..c3f9aeb6 100644 --- a/internal/api/chat/create_conversation_message.go +++ b/internal/api/chat/create_conversation_message.go @@ -116,6 +116,7 @@ func (s *ChatServer) createConversation( userMessage string, userSelectedText string, languageModel models.LanguageModel, + modelSlug string, conversationType chatv1.ConversationType, ) (*models.Conversation, error) { systemPrompt, err := s.chatService.GetSystemPrompt(ctx, latexFullSource, projectInstructions, userInstructions, conversationType) @@ -135,7 +136,7 @@ func (s *ChatServer) createConversation( } return s.chatService.InsertConversationToDB( - ctx, userId, projectId, languageModel, messages, oaiHistory.OfInputItemList, + ctx, userId, projectId, languageModel, modelSlug, messages, oaiHistory.OfInputItemList, ) } @@ -180,7 +181,7 @@ func (s *ChatServer) appendConversationMessage( // 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 // conversationType 可以在一次 conversation 中多次切换 -func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, languageModel models.LanguageModel, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { +func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, languageModel models.LanguageModel, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { actor, err := contextutil.GetActor(ctx) if err != nil { return ctx, nil, nil, err @@ -224,6 +225,7 @@ func (s *ChatServer) prepare(ctx context.Context, projectId string, conversation userMessage, userSelectedText, languageModel, + modelSlug, conversationType, ) } else { @@ -258,6 +260,12 @@ func (s *ChatServer) CreateConversationMessage( req *chatv1.CreateConversationMessageRequest, ) (*chatv1.CreateConversationMessageResponse, error) { languageModel := models.LanguageModel(req.GetLanguageModel()) + modelSlug := req.GetModelSlug() + // still using old api + if modelSlug == "" { + modelSlug = languageModel.Name() + } + ctx, conversation, settings, err := s.prepare( ctx, req.GetProjectId(), @@ -265,6 +273,7 @@ func (s *ChatServer) CreateConversationMessage( req.GetUserMessage(), req.GetUserSelectedText(), languageModel, + modelSlug, req.GetConversationType(), ) if err != nil { @@ -275,7 +284,7 @@ func (s *ChatServer) CreateConversationMessage( Endpoint: s.cfg.OpenAIBaseURL, APIKey: settings.OpenAIAPIKey, } - openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletion(ctx, languageModel, conversation.OpenaiChatHistory, llmProvider) + openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletion(ctx, modelSlug, languageModel, conversation.OpenaiChatHistory, llmProvider) if err != nil { return nil, err } diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index 0e659a28..d635ccf7 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -26,6 +26,7 @@ func (s *ChatServer) CreateConversationMessageStream( ctx := stream.Context() languageModel := models.LanguageModel(req.GetLanguageModel()) + modelSlug := req.GetModelSlug() ctx, conversation, settings, err := s.prepare( ctx, req.GetProjectId(), @@ -33,6 +34,7 @@ func (s *ChatServer) CreateConversationMessageStream( req.GetUserMessage(), req.GetUserSelectedText(), languageModel, + modelSlug, req.GetConversationType(), ) if err != nil { @@ -45,7 +47,7 @@ func (s *ChatServer) CreateConversationMessageStream( APIKey: settings.OpenAIAPIKey, } - openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletionStream(ctx, stream, conversation.ID.Hex(), languageModel, conversation.OpenaiChatHistory, llmProvider) + openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletionStream(ctx, stream, conversation.ID.Hex(), modelSlug, &languageModel, conversation.OpenaiChatHistory, llmProvider) if err != nil { return s.sendStreamError(stream, err) } diff --git a/internal/models/conversation.go b/internal/models/conversation.go index 23b0e2b3..70d48300 100644 --- a/internal/models/conversation.go +++ b/internal/models/conversation.go @@ -11,6 +11,7 @@ type Conversation struct { ProjectID string `bson:"project_id"` Title string `bson:"title"` LanguageModel LanguageModel `bson:"language_model"` + ModelSlug string `bson:"model_slug"` InappChatHistory []bson.M `bson:"inapp_chat_history"` // Store as raw BSON to avoid protobuf decoding issues OpenaiChatHistory responses.ResponseInputParam `bson:"openai_chat_history"` // 实际上发给 GPT 的聊天历史 diff --git a/internal/services/chat.go b/internal/services/chat.go index 131be4d9..ebd0fef1 100644 --- a/internal/services/chat.go +++ b/internal/services/chat.go @@ -92,7 +92,7 @@ func (s *ChatService) GetPrompt(ctx context.Context, content string, selectedTex return strings.TrimSpace(userPromptBuffer.String()), nil } -func (s *ChatService) InsertConversationToDB(ctx context.Context, userID bson.ObjectID, projectID string, languageModel models.LanguageModel, inappChatHistory []*chatv1.Message, openaiChatHistory responses.ResponseInputParam) (*models.Conversation, error) { +func (s *ChatService) InsertConversationToDB(ctx context.Context, userID bson.ObjectID, projectID string, languageModel models.LanguageModel, modelSlug string, inappChatHistory []*chatv1.Message, openaiChatHistory responses.ResponseInputParam) (*models.Conversation, error) { // Convert protobuf messages to BSON bsonMessages := make([]bson.M, len(inappChatHistory)) for i := range inappChatHistory { @@ -117,6 +117,7 @@ func (s *ChatService) InsertConversationToDB(ctx context.Context, userID bson.Ob ProjectID: projectID, Title: DefaultConversationTitle, LanguageModel: languageModel, + ModelSlug: modelSlug, InappChatHistory: bsonMessages, OpenaiChatHistory: openaiChatHistory, } diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index 6bc73b88..e9d7d6e9 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -21,8 +21,8 @@ import ( // 1. The full chat history sent to the language model (including any tool call results). // 2. The incremental chat history visible to the user (including tool call results and assistant responses). // 3. An error, if any occurred during the process. -func (a *AIClient) ChatCompletion(ctx context.Context, languageModel models.LanguageModel, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { - openaiChatHistory, inappChatHistory, err := a.ChatCompletionStream(ctx, nil, "", languageModel, messages, llmProvider) +func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, languageModel models.LanguageModel, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { + openaiChatHistory, inappChatHistory, err := a.ChatCompletionStream(ctx, nil, "", modelSlug, &languageModel, messages, llmProvider) if err != nil { return nil, nil, err } @@ -50,11 +50,11 @@ func (a *AIClient) ChatCompletion(ctx context.Context, languageModel models.Lang // - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop. // - If no tool calls are needed, it appends the assistant's response and exits the loop. // - Finally, it returns the updated chat histories and any error encountered. -func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, languageModel models.LanguageModel, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { +func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, languageModel *models.LanguageModel, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { openaiChatHistory := responses.ResponseNewParamsInputUnion{OfInputItemList: messages} inappChatHistory := []chatv1.Message{} - streamHandler := handler.NewStreamHandler(callbackStream, conversationId, languageModel) + streamHandler := handler.NewStreamHandler(callbackStream, conversationId, modelSlug, languageModel) streamHandler.SendInitialization() defer func() { @@ -62,7 +62,7 @@ func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chat }() oaiClient := a.GetOpenAIClient(llmProvider) - params := getDefaultParams(languageModel, openaiChatHistory, a.toolCallHandler.Registry) + params := getDefaultParams(modelSlug, openaiChatHistory, a.toolCallHandler.Registry) for { params.Input = openaiChatHistory diff --git a/internal/services/toolkit/client/get_conversation_title.go b/internal/services/toolkit/client/get_conversation_title.go index f956bf0d..f2b26a00 100644 --- a/internal/services/toolkit/client/get_conversation_title.go +++ b/internal/services/toolkit/client/get_conversation_title.go @@ -29,7 +29,7 @@ func (a *AIClient) GetConversationTitle(ctx context.Context, inappChatHistory [] message := strings.Join(messages, "\n") message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message) - _, resp, err := a.ChatCompletion(ctx, models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), responses.ResponseInputParam{ + _, resp, err := a.ChatCompletion(ctx, "gpt-4.1-mini", models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), responses.ResponseInputParam{ { OfInputMessage: &responses.ResponseInputItemMessageParam{ Role: "system", diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index d2b4d4c1..f9c0269f 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -6,7 +6,6 @@ This file contains utility functions for the client package. (Mainly miscellaneo It is used to append assistant responses to both OpenAI and in-app chat histories, and to create response items for chat interactions. */ import ( - "paperdebugger/internal/models" "paperdebugger/internal/services/toolkit/registry" chatv1 "paperdebugger/pkg/gen/api/chat/v1" @@ -43,26 +42,26 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI // getDefaultParams constructs the default parameters for a chat completion request. // The tool registry is managed centrally by the registry package. // The chat history is constructed manually, so Store must be set to false. -func getDefaultParams(languageModel models.LanguageModel, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { - if languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_MINI) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_NANO) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_CHAT_LATEST) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O4_MINI) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O3_MINI) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O3) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O1_MINI) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O1) || - languageModel == models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_CODEX_MINI_LATEST) { +func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { + if modelSlug == "gpt-5" || + modelSlug == "gpt-5-mini" || + modelSlug == "gpt-5-nano" || + modelSlug == "gpt-5-chat-latest" || + modelSlug == "o4-mini" || + modelSlug == "o3-mini" || + modelSlug == "o3" || + modelSlug == "o1-mini" || + modelSlug == "o1" || + modelSlug == "codex-mini-latest" { return responses.ResponseNewParams{ - Model: languageModel.Name(), + Model: modelSlug, Tools: toolRegistry.GetTools(), Input: chatHistory, Store: openai.Bool(false), } } return responses.ResponseNewParams{ - Model: languageModel.Name(), + Model: modelSlug, Temperature: openai.Float(0.7), MaxOutputTokens: openai.Int(4000), // DEBUG POINT: change this to test the frontend handler Tools: toolRegistry.GetTools(), // 工具注册由 registry 统一管理 diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index 78eb9e27..94116a62 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -10,17 +10,20 @@ import ( type StreamHandler struct { callbackStream chatv1.ChatService_CreateConversationMessageStreamServer conversationId string - languageModel models.LanguageModel + modelSlug string + languageModel *models.LanguageModel } func NewStreamHandler( callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, - languageModel models.LanguageModel, + modelSlug string, + languageModel *models.LanguageModel, ) *StreamHandler { return &StreamHandler{ callbackStream: callbackStream, conversationId: conversationId, + modelSlug: modelSlug, languageModel: languageModel, } } @@ -29,12 +32,21 @@ func (h *StreamHandler) SendInitialization() { if h.callbackStream == nil { return } + streamInit := &chatv1.StreamInitialization{ + ConversationId: h.conversationId, + } + if h.languageModel != nil { + streamInit.Model = &chatv1.StreamInitialization_LanguageModel{ + LanguageModel: chatv1.LanguageModel(*h.languageModel), + } + } else { + streamInit.Model = &chatv1.StreamInitialization_ModelSlug{ + ModelSlug: h.modelSlug, + } + } h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamInitialization{ - StreamInitialization: &chatv1.StreamInitialization{ - ConversationId: h.conversationId, - LanguageModel: chatv1.LanguageModel(h.languageModel), - }, + StreamInitialization: streamInit, }, }) } From 1bee9c0a0959c6c69f28ba57a25dea969bcb860a Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 01:19:42 +0800 Subject: [PATCH 03/14] chore: chat.proto --- internal/api/chat/create_conversation_message_stream.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index d635ccf7..912b29f5 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -43,8 +43,7 @@ func (s *ChatServer) CreateConversationMessageStream( // 用法跟 ChatCompletion 一样,只是传递了 stream 参数 llmProvider := &models.LLMProviderConfig{ - Endpoint: s.cfg.OpenAIBaseURL, - APIKey: settings.OpenAIAPIKey, + APIKey: settings.OpenAIAPIKey, } openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletionStream(ctx, stream, conversation.ID.Hex(), modelSlug, &languageModel, conversation.OpenaiChatHistory, llmProvider) From d1ca145df3f25275c1fe98290c63fc0db37ae2df Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 01:28:49 +0800 Subject: [PATCH 04/14] fix: compatibility --- internal/services/toolkit/client/completion.go | 2 +- internal/services/toolkit/client/utils.go | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index e9d7d6e9..13a0f07a 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -62,7 +62,7 @@ func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chat }() oaiClient := a.GetOpenAIClient(llmProvider) - params := getDefaultParams(modelSlug, openaiChatHistory, a.toolCallHandler.Registry) + params := getDefaultParams(*languageModel, modelSlug, openaiChatHistory, a.toolCallHandler.Registry) for { params.Input = openaiChatHistory diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index f9c0269f..780badcb 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -6,6 +6,7 @@ This file contains utility functions for the client package. (Mainly miscellaneo It is used to append assistant responses to both OpenAI and in-app chat histories, and to create response items for chat interactions. */ import ( + "paperdebugger/internal/models" "paperdebugger/internal/services/toolkit/registry" chatv1 "paperdebugger/pkg/gen/api/chat/v1" @@ -42,7 +43,11 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI // getDefaultParams constructs the default parameters for a chat completion request. // The tool registry is managed centrally by the registry package. // The chat history is constructed manually, so Store must be set to false. -func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { +func getDefaultParams(languageModel models.LanguageModel, modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { + if modelSlug == "" { + modelSlug = languageModel.Name() + } + if modelSlug == "gpt-5" || modelSlug == "gpt-5-mini" || modelSlug == "gpt-5-nano" || From 3b19704b7ef8bb32a379850c8266418a5df7bab1 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 01:31:16 +0800 Subject: [PATCH 05/14] docs: comemnts --- internal/api/chat/create_conversation_message.go | 2 +- internal/services/toolkit/client/utils.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/api/chat/create_conversation_message.go b/internal/api/chat/create_conversation_message.go index c3f9aeb6..34565f37 100644 --- a/internal/api/chat/create_conversation_message.go +++ b/internal/api/chat/create_conversation_message.go @@ -261,7 +261,7 @@ func (s *ChatServer) CreateConversationMessage( ) (*chatv1.CreateConversationMessageResponse, error) { languageModel := models.LanguageModel(req.GetLanguageModel()) modelSlug := req.GetModelSlug() - // still using old api + // still using old api. This backward compatibility fallback converts the deprecated LanguageModel enum to a model slug. if modelSlug == "" { modelSlug = languageModel.Name() } diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index 780badcb..f1141f9f 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -44,6 +44,7 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI // The tool registry is managed centrally by the registry package. // The chat history is constructed manually, so Store must be set to false. func getDefaultParams(languageModel models.LanguageModel, modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { + // This backward compatibility fallback converts the deprecated LanguageModel enum to a model slug. if modelSlug == "" { modelSlug = languageModel.Name() } From 236b9b1a4f5873aa91c90b49d386afdf78524afc Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 12:59:31 +0800 Subject: [PATCH 06/14] fix: remove language model inside services --- .../create_conversation_message_stream.go | 2 +- .../services/toolkit/client/completion.go | 8 +- internal/services/toolkit/client/utils.go | 8 +- internal/services/toolkit/handler/stream.go | 29 +-- pkg/gen/api/chat/v1/chat.pb.go | 189 ++++-------------- proto/chat/v1/chat.proto | 19 +- webapp/_webapp/src/hooks/useLanguageModels.ts | 77 +------ .../_webapp/src/hooks/useSendMessageStream.ts | 5 +- .../src/pkg/gen/apiclient/chat/v1/chat_pb.ts | 89 ++++----- .../stores/conversation/conversation-store.ts | 4 +- .../stores/conversation/handlers/converter.ts | 6 +- .../handlers/handleStreamInitialization.ts | 2 +- .../chat/footer/toolbar/model-selection.tsx | 9 +- 13 files changed, 122 insertions(+), 325 deletions(-) diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index 912b29f5..2de51a1e 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -46,7 +46,7 @@ func (s *ChatServer) CreateConversationMessageStream( APIKey: settings.OpenAIAPIKey, } - openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletionStream(ctx, stream, conversation.ID.Hex(), modelSlug, &languageModel, conversation.OpenaiChatHistory, llmProvider) + openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletionStream(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistory, llmProvider) if err != nil { return s.sendStreamError(stream, err) } diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index 13a0f07a..447beca3 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -22,7 +22,7 @@ import ( // 2. The incremental chat history visible to the user (including tool call results and assistant responses). // 3. An error, if any occurred during the process. func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, languageModel models.LanguageModel, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { - openaiChatHistory, inappChatHistory, err := a.ChatCompletionStream(ctx, nil, "", modelSlug, &languageModel, messages, llmProvider) + openaiChatHistory, inappChatHistory, err := a.ChatCompletionStream(ctx, nil, "", modelSlug, messages, llmProvider) if err != nil { return nil, nil, err } @@ -50,11 +50,11 @@ func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, languag // - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop. // - If no tool calls are needed, it appends the assistant's response and exits the loop. // - Finally, it returns the updated chat histories and any error encountered. -func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, languageModel *models.LanguageModel, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { +func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { openaiChatHistory := responses.ResponseNewParamsInputUnion{OfInputItemList: messages} inappChatHistory := []chatv1.Message{} - streamHandler := handler.NewStreamHandler(callbackStream, conversationId, modelSlug, languageModel) + streamHandler := handler.NewStreamHandler(callbackStream, conversationId, modelSlug) streamHandler.SendInitialization() defer func() { @@ -62,7 +62,7 @@ func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chat }() oaiClient := a.GetOpenAIClient(llmProvider) - params := getDefaultParams(*languageModel, modelSlug, openaiChatHistory, a.toolCallHandler.Registry) + params := getDefaultParams(modelSlug, openaiChatHistory, a.toolCallHandler.Registry) for { params.Input = openaiChatHistory diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index f1141f9f..f9c0269f 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -6,7 +6,6 @@ This file contains utility functions for the client package. (Mainly miscellaneo It is used to append assistant responses to both OpenAI and in-app chat histories, and to create response items for chat interactions. */ import ( - "paperdebugger/internal/models" "paperdebugger/internal/services/toolkit/registry" chatv1 "paperdebugger/pkg/gen/api/chat/v1" @@ -43,12 +42,7 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI // getDefaultParams constructs the default parameters for a chat completion request. // The tool registry is managed centrally by the registry package. // The chat history is constructed manually, so Store must be set to false. -func getDefaultParams(languageModel models.LanguageModel, modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { - // This backward compatibility fallback converts the deprecated LanguageModel enum to a model slug. - if modelSlug == "" { - modelSlug = languageModel.Name() - } - +func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { if modelSlug == "gpt-5" || modelSlug == "gpt-5-mini" || modelSlug == "gpt-5-nano" || diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index 94116a62..e0c3298e 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -1,7 +1,6 @@ package handler import ( - "paperdebugger/internal/models" chatv1 "paperdebugger/pkg/gen/api/chat/v1" "github.com/openai/openai-go/v2/responses" @@ -11,20 +10,29 @@ type StreamHandler struct { callbackStream chatv1.ChatService_CreateConversationMessageStreamServer conversationId string modelSlug string - languageModel *models.LanguageModel } func NewStreamHandler( callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, - languageModel *models.LanguageModel, ) *StreamHandler { return &StreamHandler{ callbackStream: callbackStream, conversationId: conversationId, modelSlug: modelSlug, - languageModel: languageModel, + } +} + +func (h *StreamHandler) ConvertSlugToLanguageModel() chatv1.LanguageModel { + // TODO: finish this. + switch h.modelSlug { + case "gpt-4o": + return chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT4O + case "gpt-4.1-mini": + return chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI + default: + return chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41 } } @@ -32,18 +40,13 @@ func (h *StreamHandler) SendInitialization() { if h.callbackStream == nil { return } + streamInit := &chatv1.StreamInitialization{ ConversationId: h.conversationId, + ModelSlug: h.modelSlug, + LanguageModel: h.ConvertSlugToLanguageModel(), } - if h.languageModel != nil { - streamInit.Model = &chatv1.StreamInitialization_LanguageModel{ - LanguageModel: chatv1.LanguageModel(*h.languageModel), - } - } else { - streamInit.Model = &chatv1.StreamInitialization_ModelSlug{ - ModelSlug: h.modelSlug, - } - } + h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamInitialization{ StreamInitialization: streamInit, diff --git a/pkg/gen/api/chat/v1/chat.pb.go b/pkg/gen/api/chat/v1/chat.pb.go index ba97e54f..78e1997d 100644 --- a/pkg/gen/api/chat/v1/chat.pb.go +++ b/pkg/gen/api/chat/v1/chat.pb.go @@ -912,15 +912,12 @@ type CreateConversationMessageRequest struct { ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` // If conversation_id is not provided, // a new conversation will be created and the id will be returned. - ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` - // Types that are valid to be assigned to Model: - // - // *CreateConversationMessageRequest_LanguageModel - // *CreateConversationMessageRequest_ModelSlug - Model isCreateConversationMessageRequest_Model `protobuf_oneof:"model"` - UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` - UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` - ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` + ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` + LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` // deprecated: use model_slug instead + ModelSlug string `protobuf:"bytes,7,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` // new: model slug string + UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` + UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` + ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -969,27 +966,16 @@ func (x *CreateConversationMessageRequest) GetConversationId() string { return "" } -func (x *CreateConversationMessageRequest) GetModel() isCreateConversationMessageRequest_Model { - if x != nil { - return x.Model - } - return nil -} - func (x *CreateConversationMessageRequest) GetLanguageModel() LanguageModel { if x != nil { - if x, ok := x.Model.(*CreateConversationMessageRequest_LanguageModel); ok { - return x.LanguageModel - } + return x.LanguageModel } return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } func (x *CreateConversationMessageRequest) GetModelSlug() string { if x != nil { - if x, ok := x.Model.(*CreateConversationMessageRequest_ModelSlug); ok { - return x.ModelSlug - } + return x.ModelSlug } return "" } @@ -1015,22 +1001,6 @@ func (x *CreateConversationMessageRequest) GetConversationType() ConversationTyp return ConversationType_CONVERSATION_TYPE_UNSPECIFIED } -type isCreateConversationMessageRequest_Model interface { - isCreateConversationMessageRequest_Model() -} - -type CreateConversationMessageRequest_LanguageModel struct { - LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel,oneof"` // deprecated: use model_slug instead -} - -type CreateConversationMessageRequest_ModelSlug struct { - ModelSlug string `protobuf:"bytes,7,opt,name=model_slug,json=modelSlug,proto3,oneof"` // new: model slug string -} - -func (*CreateConversationMessageRequest_LanguageModel) isCreateConversationMessageRequest_Model() {} - -func (*CreateConversationMessageRequest_ModelSlug) isCreateConversationMessageRequest_Model() {} - type CreateConversationMessageResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"` @@ -1387,13 +1357,10 @@ func (x *ListSupportedModelsResponse) GetModels() []*SupportedModel { type StreamInitialization struct { state protoimpl.MessageState `protogen:"open.v1"` ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` - // Types that are valid to be assigned to Model: - // - // *StreamInitialization_LanguageModel - // *StreamInitialization_ModelSlug - Model isStreamInitialization_Model `protobuf_oneof:"model"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + LanguageModel LanguageModel `protobuf:"varint,5,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` // deprecated: use model_slug instead + ModelSlug string `protobuf:"bytes,6,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` // new: model slug string + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StreamInitialization) Reset() { @@ -1433,47 +1400,20 @@ func (x *StreamInitialization) GetConversationId() string { return "" } -func (x *StreamInitialization) GetModel() isStreamInitialization_Model { - if x != nil { - return x.Model - } - return nil -} - func (x *StreamInitialization) GetLanguageModel() LanguageModel { if x != nil { - if x, ok := x.Model.(*StreamInitialization_LanguageModel); ok { - return x.LanguageModel - } + return x.LanguageModel } return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } func (x *StreamInitialization) GetModelSlug() string { if x != nil { - if x, ok := x.Model.(*StreamInitialization_ModelSlug); ok { - return x.ModelSlug - } + return x.ModelSlug } return "" } -type isStreamInitialization_Model interface { - isStreamInitialization_Model() -} - -type StreamInitialization_LanguageModel struct { - LanguageModel LanguageModel `protobuf:"varint,5,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel,oneof"` // deprecated: use model_slug instead -} - -type StreamInitialization_ModelSlug struct { - ModelSlug string `protobuf:"bytes,6,opt,name=model_slug,json=modelSlug,proto3,oneof"` // new: model slug string -} - -func (*StreamInitialization_LanguageModel) isStreamInitialization_Model() {} - -func (*StreamInitialization_ModelSlug) isStreamInitialization_Model() {} - // Designed as StreamPartBegin and StreamPartEnd to // handle the case where assistant and tool are called at the same time. // @@ -1784,17 +1724,14 @@ func (x *StreamError) GetErrorMessage() string { // // the conversation will be created and returned. type CreateConversationMessageStreamRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` - // Types that are valid to be assigned to Model: - // - // *CreateConversationMessageStreamRequest_LanguageModel - // *CreateConversationMessageStreamRequest_ModelSlug - Model isCreateConversationMessageStreamRequest_Model `protobuf_oneof:"model"` - UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` - UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` - ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` + LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` // deprecated: use model_slug instead + ModelSlug string `protobuf:"bytes,7,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` // new: model slug string + UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` + UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` + ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v1.ConversationType,oneof" json:"conversation_type,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1843,27 +1780,16 @@ func (x *CreateConversationMessageStreamRequest) GetConversationId() string { return "" } -func (x *CreateConversationMessageStreamRequest) GetModel() isCreateConversationMessageStreamRequest_Model { - if x != nil { - return x.Model - } - return nil -} - func (x *CreateConversationMessageStreamRequest) GetLanguageModel() LanguageModel { if x != nil { - if x, ok := x.Model.(*CreateConversationMessageStreamRequest_LanguageModel); ok { - return x.LanguageModel - } + return x.LanguageModel } return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } func (x *CreateConversationMessageStreamRequest) GetModelSlug() string { if x != nil { - if x, ok := x.Model.(*CreateConversationMessageStreamRequest_ModelSlug); ok { - return x.ModelSlug - } + return x.ModelSlug } return "" } @@ -1889,24 +1815,6 @@ func (x *CreateConversationMessageStreamRequest) GetConversationType() Conversat return ConversationType_CONVERSATION_TYPE_UNSPECIFIED } -type isCreateConversationMessageStreamRequest_Model interface { - isCreateConversationMessageStreamRequest_Model() -} - -type CreateConversationMessageStreamRequest_LanguageModel struct { - LanguageModel LanguageModel `protobuf:"varint,3,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel,oneof"` // deprecated: use model_slug instead -} - -type CreateConversationMessageStreamRequest_ModelSlug struct { - ModelSlug string `protobuf:"bytes,7,opt,name=model_slug,json=modelSlug,proto3,oneof"` // new: model slug string -} - -func (*CreateConversationMessageStreamRequest_LanguageModel) isCreateConversationMessageStreamRequest_Model() { -} - -func (*CreateConversationMessageStreamRequest_ModelSlug) isCreateConversationMessageStreamRequest_Model() { -} - // Response for streaming a message within an existing conversation type CreateConversationMessageStreamResponse struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -2129,18 +2037,17 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\x16GetConversationRequest\x12'\n" + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"T\n" + "\x17GetConversationResponse\x129\n" + - "\fconversation\x18\x01 \x01(\v2\x15.chat.v1.ConversationR\fconversation\"\xbe\x03\n" + + "\fconversation\x18\x01 \x01(\v2\x15.chat.v1.ConversationR\fconversation\"\xb1\x03\n" + " CreateConversationMessageRequest\x12\x1d\n" + "\n" + "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" + - "\x0fconversation_id\x18\x02 \x01(\tH\x01R\x0econversationId\x88\x01\x01\x12?\n" + - "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelH\x00R\rlanguageModel\x12\x1f\n" + + "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12=\n" + + "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\x1d\n" + "\n" + - "model_slug\x18\a \x01(\tH\x00R\tmodelSlug\x12!\n" + + "model_slug\x18\a \x01(\tR\tmodelSlug\x12!\n" + "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" + - "\x12user_selected_text\x18\x05 \x01(\tH\x02R\x10userSelectedText\x88\x01\x01\x12K\n" + - "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x03R\x10conversationType\x88\x01\x01B\a\n" + - "\x05modelB\x12\n" + + "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" + + "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x02R\x10conversationType\x88\x01\x01B\x12\n" + "\x10_conversation_idB\x15\n" + "\x13_user_selected_textB\x14\n" + "\x12_conversation_type\"^\n" + @@ -2159,13 +2066,12 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\x04slug\x18\x02 \x01(\tR\x04slug\"\x1c\n" + "\x1aListSupportedModelsRequest\"N\n" + "\x1bListSupportedModelsResponse\x12/\n" + - "\x06models\x18\x01 \x03(\v2\x17.chat.v1.SupportedModelR\x06models\"\xaa\x01\n" + + "\x06models\x18\x01 \x03(\v2\x17.chat.v1.SupportedModelR\x06models\"\x9d\x01\n" + "\x14StreamInitialization\x12'\n" + - "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12?\n" + - "\x0elanguage_model\x18\x05 \x01(\x0e2\x16.chat.v1.LanguageModelH\x00R\rlanguageModel\x12\x1f\n" + + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12=\n" + + "\x0elanguage_model\x18\x05 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\x1d\n" + "\n" + - "model_slug\x18\x06 \x01(\tH\x00R\tmodelSlugB\a\n" + - "\x05model\"c\n" + + "model_slug\x18\x06 \x01(\tR\tmodelSlug\"c\n" + "\x0fStreamPartBegin\x12\x1d\n" + "\n" + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + @@ -2185,18 +2091,17 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\x12StreamFinalization\x12'\n" + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"2\n" + "\vStreamError\x12#\n" + - "\rerror_message\x18\x01 \x01(\tR\ferrorMessage\"\xc4\x03\n" + + "\rerror_message\x18\x01 \x01(\tR\ferrorMessage\"\xb7\x03\n" + "&CreateConversationMessageStreamRequest\x12\x1d\n" + "\n" + "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" + - "\x0fconversation_id\x18\x02 \x01(\tH\x01R\x0econversationId\x88\x01\x01\x12?\n" + - "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelH\x00R\rlanguageModel\x12\x1f\n" + + "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12=\n" + + "\x0elanguage_model\x18\x03 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\x1d\n" + "\n" + - "model_slug\x18\a \x01(\tH\x00R\tmodelSlug\x12!\n" + + "model_slug\x18\a \x01(\tR\tmodelSlug\x12!\n" + "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" + - "\x12user_selected_text\x18\x05 \x01(\tH\x02R\x10userSelectedText\x88\x01\x01\x12K\n" + - "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x03R\x10conversationType\x88\x01\x01B\a\n" + - "\x05modelB\x12\n" + + "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" + + "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v1.ConversationTypeH\x02R\x10conversationType\x88\x01\x01B\x12\n" + "\x10_conversation_idB\x15\n" + "\x13_user_selected_textB\x14\n" + "\x12_conversation_type\"\xb9\x04\n" + @@ -2353,18 +2258,8 @@ func file_chat_v1_chat_proto_init() { } file_chat_v1_chat_proto_msgTypes[8].OneofWrappers = []any{} file_chat_v1_chat_proto_msgTypes[9].OneofWrappers = []any{} - file_chat_v1_chat_proto_msgTypes[13].OneofWrappers = []any{ - (*CreateConversationMessageRequest_LanguageModel)(nil), - (*CreateConversationMessageRequest_ModelSlug)(nil), - } - file_chat_v1_chat_proto_msgTypes[22].OneofWrappers = []any{ - (*StreamInitialization_LanguageModel)(nil), - (*StreamInitialization_ModelSlug)(nil), - } - file_chat_v1_chat_proto_msgTypes[29].OneofWrappers = []any{ - (*CreateConversationMessageStreamRequest_LanguageModel)(nil), - (*CreateConversationMessageStreamRequest_ModelSlug)(nil), - } + file_chat_v1_chat_proto_msgTypes[13].OneofWrappers = []any{} + file_chat_v1_chat_proto_msgTypes[29].OneofWrappers = []any{} file_chat_v1_chat_proto_msgTypes[30].OneofWrappers = []any{ (*CreateConversationMessageStreamResponse_StreamInitialization)(nil), (*CreateConversationMessageStreamResponse_StreamPartBegin)(nil), diff --git a/proto/chat/v1/chat.proto b/proto/chat/v1/chat.proto index 2e4bee9f..553ec394 100644 --- a/proto/chat/v1/chat.proto +++ b/proto/chat/v1/chat.proto @@ -136,11 +136,8 @@ message CreateConversationMessageRequest { // If conversation_id is not provided, // a new conversation will be created and the id will be returned. optional string conversation_id = 2; - oneof model { - LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string - } - + LanguageModel language_model = 3; // deprecated: use model_slug instead + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; @@ -177,10 +174,8 @@ message ListSupportedModelsResponse { repeated SupportedModel models = 1; } // Information sent once at the beginning of a new conversation stream message StreamInitialization { string conversation_id = 1; - oneof model { - LanguageModel language_model = 5; // deprecated: use model_slug instead - string model_slug = 6; // new: model slug string - } + LanguageModel language_model = 5; // deprecated: use model_slug instead + string model_slug = 6; // new: model slug string } // Designed as StreamPartBegin and StreamPartEnd to @@ -239,10 +234,8 @@ enum ConversationType { message CreateConversationMessageStreamRequest { string project_id = 1; optional string conversation_id = 2; - oneof model { - LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string - } + LanguageModel language_model = 3; // deprecated: use model_slug instead + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; diff --git a/webapp/_webapp/src/hooks/useLanguageModels.ts b/webapp/_webapp/src/hooks/useLanguageModels.ts index 5985e479..918d9010 100644 --- a/webapp/_webapp/src/hooks/useLanguageModels.ts +++ b/webapp/_webapp/src/hooks/useLanguageModels.ts @@ -1,78 +1,11 @@ import { useCallback, useMemo } from "react"; -import { LanguageModel, SupportedModel } from "../pkg/gen/apiclient/chat/v1/chat_pb"; +import { SupportedModel } from "../pkg/gen/apiclient/chat/v1/chat_pb"; import { useConversationStore } from "../stores/conversation/conversation-store"; import { useListSupportedModelsQuery } from "../query"; export type Model = { name: string; slug: string; - languageModel: LanguageModel; -}; - -const slugToLanguageModel = (slug: string) => { - switch (slug) { - case "gpt-4.1": - return LanguageModel.OPENAI_GPT41; - case "gpt-4o": - return LanguageModel.OPENAI_GPT4O; - case "gpt-4.1-mini": - return LanguageModel.OPENAI_GPT41_MINI; - case "gpt-5": - return LanguageModel.OPENAI_GPT5; - case "gpt-5-mini": - return LanguageModel.OPENAI_GPT5_MINI; - case "gpt-5-nano": - return LanguageModel.OPENAI_GPT5_NANO; - case "gpt-5-chat-latest": - return LanguageModel.OPENAI_GPT5_CHAT_LATEST; - case "o1": - return LanguageModel.OPENAI_O1; - case "o1-mini": - return LanguageModel.OPENAI_O1_MINI; - case "o3": - return LanguageModel.OPENAI_O3; - case "o3-mini": - return LanguageModel.OPENAI_O3_MINI; - case "o4-mini": - return LanguageModel.OPENAI_O4_MINI; - case "codex-mini-latest": - return LanguageModel.OPENAI_CODEX_MINI_LATEST; - default: - return LanguageModel.OPENAI_GPT41; - } -}; - -const languageModelToSlug = (languageModel: LanguageModel) => { - switch (languageModel) { - case LanguageModel.OPENAI_GPT41: - return "gpt-4.1"; - case LanguageModel.OPENAI_GPT4O: - return "gpt-4o"; - case LanguageModel.OPENAI_GPT41_MINI: - return "gpt-4.1-mini"; - case LanguageModel.OPENAI_GPT5: - return "gpt-5"; - case LanguageModel.OPENAI_GPT5_MINI: - return "gpt-5-mini"; - case LanguageModel.OPENAI_GPT5_NANO: - return "gpt-5-nano"; - case LanguageModel.OPENAI_GPT5_CHAT_LATEST: - return "gpt-5-chat-latest"; - case LanguageModel.OPENAI_O1: - return "o1"; - case LanguageModel.OPENAI_O1_MINI: - return "o1-mini"; - case LanguageModel.OPENAI_O3: - return "o3"; - case LanguageModel.OPENAI_O3_MINI: - return "o3-mini"; - case LanguageModel.OPENAI_O4_MINI: - return "o4-mini"; - case LanguageModel.OPENAI_CODEX_MINI_LATEST: - return "codex-mini-latest"; - default: - return "gpt-4.1"; - } }; // Fallback models in case the API fails @@ -80,14 +13,12 @@ const fallbackModels: Model[] = [ { name: "GPT-4.1", slug: "gpt-4.1", - languageModel: LanguageModel.OPENAI_GPT41, }, ]; const mapSupportedModelToModel = (supportedModel: SupportedModel): Model => ({ name: supportedModel.name, slug: supportedModel.slug, - languageModel: slugToLanguageModel(supportedModel.slug), }); export const useLanguageModels = () => { @@ -102,15 +33,15 @@ export const useLanguageModels = () => { }, [supportedModelsResponse]); const currentModel = useMemo(() => { - const model = models.find((m) => m.slug === languageModelToSlug(currentConversation.languageModel)); + const model = models.find((m) => m.slug === currentConversation.modelSlug); return model || models[0]; - }, [models, currentConversation.languageModel]); + }, [models, currentConversation.modelSlug]); const setModel = useCallback( (model: Model) => { setCurrentConversation({ ...currentConversation, - languageModel: slugToLanguageModel(model.slug), + modelSlug: model.slug, }); }, [setCurrentConversation, currentConversation], diff --git a/webapp/_webapp/src/hooks/useSendMessageStream.ts b/webapp/_webapp/src/hooks/useSendMessageStream.ts index 4026237e..9ec51912 100644 --- a/webapp/_webapp/src/hooks/useSendMessageStream.ts +++ b/webapp/_webapp/src/hooks/useSendMessageStream.ts @@ -73,7 +73,10 @@ export function useSendMessageStream() { const request: PlainMessage = { projectId: getProjectId(), conversationId: currentConversation.id, - languageModel: currentConversation.languageModel, + model: { + case: "modelSlug", + value: currentConversation.modelSlug!, + }, userMessage: message, userSelectedText: selectedText, conversationType: conversationMode === "debug" ? ConversationType.DEBUG : ConversationType.UNSPECIFIED, diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts index aed8f7dd..7876665c 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts @@ -11,7 +11,7 @@ import type { Message as Message$1 } from "@bufbuild/protobuf"; * Describes the file chat/v1/chat.proto. */ export const file_chat_v1_chat: GenFile = /*@__PURE__*/ - fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCKlAQoMQ29udmVyc2F0aW9uEgoKAmlkGAEgASgJEg0KBXRpdGxlGAMgASgJEi4KDmxhbmd1YWdlX21vZGVsGAIgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhcKCm1vZGVsX3NsdWcYBSABKAlIAIgBARIiCghtZXNzYWdlcxgEIAMoCzIQLmNoYXQudjEuTWVzc2FnZUINCgtfbW9kZWxfc2x1ZyJCChhMaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QSFwoKcHJvamVjdF9pZBgBIAEoCUgAiAEBQg0KC19wcm9qZWN0X2lkIkkKGUxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2USLAoNY29udmVyc2F0aW9ucxgBIAMoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjEKFkdldENvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIkYKF0dldENvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uItgCCiBDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgBiAEBEjAKDmxhbmd1YWdlX21vZGVsGAMgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsSAASFAoKbW9kZWxfc2x1ZxgHIAEoCUgAEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAogBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjEuQ29udmVyc2F0aW9uVHlwZUgDiAEBQgcKBW1vZGVsQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUiUAohQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIkMKGVVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEg0KBXRpdGxlGAIgASgJIkkKGlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjQKGURlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIhwKGkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIiwKDlN1cHBvcnRlZE1vZGVsEgwKBG5hbWUYASABKAkSDAoEc2x1ZxgCIAEoCSIcChpMaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdCJGChtMaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2USJwoGbW9kZWxzGAEgAygLMhcuY2hhdC52MS5TdXBwb3J0ZWRNb2RlbCKAAQoUU3RyZWFtSW5pdGlhbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEjAKDmxhbmd1YWdlX21vZGVsGAUgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsSAASFAoKbW9kZWxfc2x1ZxgGIAEoCUgAQgcKBW1vZGVsIk8KD1N0cmVhbVBhcnRCZWdpbhISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYxLk1lc3NhZ2VQYXlsb2FkIjEKDE1lc3NhZ2VDaHVuaxISCgptZXNzYWdlX2lkGAEgASgJEg0KBWRlbHRhGAIgASgJIjoKE0luY29tcGxldGVJbmRpY2F0b3ISDgoGcmVhc29uGAEgASgJEhMKC3Jlc3BvbnNlX2lkGAIgASgJIk0KDVN0cmVhbVBhcnRFbmQSEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCItChJTdHJlYW1GaW5hbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIiQKC1N0cmVhbUVycm9yEhUKDWVycm9yX21lc3NhZ2UYASABKAki3gIKJkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSAGIAQESMAoObGFuZ3VhZ2VfbW9kZWwYAyABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWxIABIUCgptb2RlbF9zbHVnGAcgASgJSAASFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgCiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52MS5Db252ZXJzYXRpb25UeXBlSAOIAQFCBwoFbW9kZWxCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSK/AwonQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlEj4KFXN0cmVhbV9pbml0aWFsaXphdGlvbhgBIAEoCzIdLmNoYXQudjEuU3RyZWFtSW5pdGlhbGl6YXRpb25IABI1ChFzdHJlYW1fcGFydF9iZWdpbhgCIAEoCzIYLmNoYXQudjEuU3RyZWFtUGFydEJlZ2luSAASLgoNbWVzc2FnZV9jaHVuaxgDIAEoCzIVLmNoYXQudjEuTWVzc2FnZUNodW5rSAASPAoUaW5jb21wbGV0ZV9pbmRpY2F0b3IYBCABKAsyHC5jaGF0LnYxLkluY29tcGxldGVJbmRpY2F0b3JIABIxCg9zdHJlYW1fcGFydF9lbmQYBSABKAsyFi5jaGF0LnYxLlN0cmVhbVBhcnRFbmRIABI6ChNzdHJlYW1fZmluYWxpemF0aW9uGAYgASgLMhsuY2hhdC52MS5TdHJlYW1GaW5hbGl6YXRpb25IABIsCgxzdHJlYW1fZXJyb3IYByABKAsyFC5jaGF0LnYxLlN0cmVhbUVycm9ySABCEgoQcmVzcG9uc2VfcGF5bG9hZCr/AwoNTGFuZ3VhZ2VNb2RlbBIeChpMQU5HVUFHRV9NT0RFTF9VTlNQRUNJRklFRBAAEh8KG0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0TxABEiQKIExBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0MV9NSU5JEAISHwobTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDQxEAQSHgoaTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDUQBxIjCh9MQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9NSU5JEAgSIwofTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfTkFOTxAJEioKJkxBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X0NIQVRfTEFURVNUEAoSHAoYTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xEAsSIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xX01JTkkQDBIcChhMQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzMQDRIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzNfTUlOSRAOEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PNF9NSU5JEA8SKwonTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0NPREVYX01JTklfTEFURVNUEBAqUgoQQ29udmVyc2F0aW9uVHlwZRIhCh1DT05WRVJTQVRJT05fVFlQRV9VTlNQRUNJRklFRBAAEhsKF0NPTlZFUlNBVElPTl9UWVBFX0RFQlVHEAEy0ggKC0NoYXRTZXJ2aWNlEoMBChFMaXN0Q29udmVyc2F0aW9ucxIhLmNoYXQudjEuTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0GiIuY2hhdC52MS5MaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMSjwEKD0dldENvbnZlcnNhdGlvbhIfLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVxdWVzdBogLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMxIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKnAQoZQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZRIpLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QaKi5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZSIzgtPkkwItOgEqIigvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzEsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52MS5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjEuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YxL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MUIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MTtjaGF0djGiAgNDWFiqAgdDaGF0LlYxygIHQ2hhdFxWMeICE0NoYXRcVjFcR1BCTWV0YWRhdGHqAghDaGF0OjpWMWIGcHJvdG8z", [file_google_api_annotations]); + fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCKlAQoMQ29udmVyc2F0aW9uEgoKAmlkGAEgASgJEg0KBXRpdGxlGAMgASgJEi4KDmxhbmd1YWdlX21vZGVsGAIgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhcKCm1vZGVsX3NsdWcYBSABKAlIAIgBARIiCghtZXNzYWdlcxgEIAMoCzIQLmNoYXQudjEuTWVzc2FnZUINCgtfbW9kZWxfc2x1ZyJCChhMaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QSFwoKcHJvamVjdF9pZBgBIAEoCUgAiAEBQg0KC19wcm9qZWN0X2lkIkkKGUxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2USLAoNY29udmVyc2F0aW9ucxgBIAMoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjEKFkdldENvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIkYKF0dldENvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIssCCiBDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgAiAEBEi4KDmxhbmd1YWdlX21vZGVsGAMgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhIKCm1vZGVsX3NsdWcYByABKAkSFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgBiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52MS5Db252ZXJzYXRpb25UeXBlSAKIAQFCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSJQCiFDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52MS5Db252ZXJzYXRpb24iQwoZVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkSDQoFdGl0bGUYAiABKAkiSQoaVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52MS5Db252ZXJzYXRpb24iNAoZRGVsZXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiHAoaRGVsZXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiLAoOU3VwcG9ydGVkTW9kZWwSDAoEbmFtZRgBIAEoCRIMCgRzbHVnGAIgASgJIhwKGkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0IkYKG0xpc3RTdXBwb3J0ZWRNb2RlbHNSZXNwb25zZRInCgZtb2RlbHMYASADKAsyFy5jaGF0LnYxLlN1cHBvcnRlZE1vZGVsInMKFFN0cmVhbUluaXRpYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCRIuCg5sYW5ndWFnZV9tb2RlbBgFIAEoDjIWLmNoYXQudjEuTGFuZ3VhZ2VNb2RlbBISCgptb2RlbF9zbHVnGAYgASgJIk8KD1N0cmVhbVBhcnRCZWdpbhISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYxLk1lc3NhZ2VQYXlsb2FkIjEKDE1lc3NhZ2VDaHVuaxISCgptZXNzYWdlX2lkGAEgASgJEg0KBWRlbHRhGAIgASgJIjoKE0luY29tcGxldGVJbmRpY2F0b3ISDgoGcmVhc29uGAEgASgJEhMKC3Jlc3BvbnNlX2lkGAIgASgJIk0KDVN0cmVhbVBhcnRFbmQSEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCItChJTdHJlYW1GaW5hbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIiQKC1N0cmVhbUVycm9yEhUKDWVycm9yX21lc3NhZ2UYASABKAki0QIKJkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSACIAQESLgoObGFuZ3VhZ2VfbW9kZWwYAyABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwSEgoKbW9kZWxfc2x1ZxgHIAEoCRIUCgx1c2VyX21lc3NhZ2UYBCABKAkSHwoSdXNlcl9zZWxlY3RlZF90ZXh0GAUgASgJSAGIAQESOQoRY29udmVyc2F0aW9uX3R5cGUYBiABKA4yGS5jaGF0LnYxLkNvbnZlcnNhdGlvblR5cGVIAogBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlIr8DCidDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2USPgoVc3RyZWFtX2luaXRpYWxpemF0aW9uGAEgASgLMh0uY2hhdC52MS5TdHJlYW1Jbml0aWFsaXphdGlvbkgAEjUKEXN0cmVhbV9wYXJ0X2JlZ2luGAIgASgLMhguY2hhdC52MS5TdHJlYW1QYXJ0QmVnaW5IABIuCg1tZXNzYWdlX2NodW5rGAMgASgLMhUuY2hhdC52MS5NZXNzYWdlQ2h1bmtIABI8ChRpbmNvbXBsZXRlX2luZGljYXRvchgEIAEoCzIcLmNoYXQudjEuSW5jb21wbGV0ZUluZGljYXRvckgAEjEKD3N0cmVhbV9wYXJ0X2VuZBgFIAEoCzIWLmNoYXQudjEuU3RyZWFtUGFydEVuZEgAEjoKE3N0cmVhbV9maW5hbGl6YXRpb24YBiABKAsyGy5jaGF0LnYxLlN0cmVhbUZpbmFsaXphdGlvbkgAEiwKDHN0cmVhbV9lcnJvchgHIAEoCzIULmNoYXQudjEuU3RyZWFtRXJyb3JIAEISChByZXNwb25zZV9wYXlsb2FkKv8DCg1MYW5ndWFnZU1vZGVsEh4KGkxBTkdVQUdFX01PREVMX1VOU1BFQ0lGSUVEEAASHwobTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDRPEAESJAogTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDQxX01JTkkQAhIfChtMQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNDEQBBIeChpMQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNRAHEiMKH0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X01JTkkQCBIjCh9MQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9OQU5PEAkSKgomTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfQ0hBVF9MQVRFU1QQChIcChhMQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzEQCxIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzFfTUlOSRAMEhwKGExBTkdVQUdFX01PREVMX09QRU5BSV9PMxANEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PM19NSU5JEA4SIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX080X01JTkkQDxIrCidMQU5HVUFHRV9NT0RFTF9PUEVOQUlfQ09ERVhfTUlOSV9MQVRFU1QQECpSChBDb252ZXJzYXRpb25UeXBlEiEKHUNPTlZFUlNBVElPTl9UWVBFX1VOU1BFQ0lGSUVEEAASGwoXQ09OVkVSU0FUSU9OX1RZUEVfREVCVUcQATLSCAoLQ2hhdFNlcnZpY2USgwEKEUxpc3RDb252ZXJzYXRpb25zEiEuY2hhdC52MS5MaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QaIi5jaGF0LnYxLkxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2UiJ4LT5JMCIRIfL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucxKPAQoPR2V0Q29udmVyc2F0aW9uEh8uY2hhdC52MS5HZXRDb252ZXJzYXRpb25SZXF1ZXN0GiAuY2hhdC52MS5HZXRDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzEjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EqcBChlDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlEikuY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVxdWVzdBoqLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlc3BvbnNlIjOC0+STAi06ASoiKC9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMSwgEKH0NyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW0SLy5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0GjAuY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2UiOoLT5JMCNDoBKiIvL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy9tZXNzYWdlcy9zdHJlYW0wARKbAQoSVXBkYXRlQ29udmVyc2F0aW9uEiIuY2hhdC52MS5VcGRhdGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52MS5VcGRhdGVDb252ZXJzYXRpb25SZXNwb25zZSI8gtPkkwI2OgEqMjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EpgBChJEZWxldGVDb252ZXJzYXRpb24SIi5jaGF0LnYxLkRlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QaIy5jaGF0LnYxLkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIjmC0+STAjMqMS9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SggEKE0xpc3RTdXBwb3J0ZWRNb2RlbHMSIy5jaGF0LnYxLkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0GiQuY2hhdC52MS5MaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2UiIILT5JMCGhIYL19wZC9hcGkvdjEvY2hhdHMvbW9kZWxzQn8KC2NvbS5jaGF0LnYxQglDaGF0UHJvdG9QAVoocGFwZXJkZWJ1Z2dlci9wa2cvZ2VuL2FwaS9jaGF0L3YxO2NoYXR2MaICA0NYWKoCB0NoYXQuVjHKAgdDaGF0XFYx4gITQ2hhdFxWMVxHUEJNZXRhZGF0YeoCCENoYXQ6OlYxYgZwcm90bzM", [file_google_api_annotations]); /** * @generated from message chat.v1.MessageTypeToolCall @@ -354,25 +354,18 @@ export type CreateConversationMessageRequest = Message$1<"chat.v1.CreateConversa conversationId?: string; /** - * @generated from oneof chat.v1.CreateConversationMessageRequest.model + * deprecated: use model_slug instead + * + * @generated from field: chat.v1.LanguageModel language_model = 3; */ - model: { - /** - * deprecated: use model_slug instead - * - * @generated from field: chat.v1.LanguageModel language_model = 3; - */ - value: LanguageModel; - case: "languageModel"; - } | { - /** - * new: model slug string - * - * @generated from field: string model_slug = 7; - */ - value: string; - case: "modelSlug"; - } | { case: undefined; value?: undefined }; + languageModel: LanguageModel; + + /** + * new: model slug string + * + * @generated from field: string model_slug = 7; + */ + modelSlug: string; /** * @generated from field: string user_message = 4; @@ -551,25 +544,18 @@ export type StreamInitialization = Message$1<"chat.v1.StreamInitialization"> & { conversationId: string; /** - * @generated from oneof chat.v1.StreamInitialization.model + * deprecated: use model_slug instead + * + * @generated from field: chat.v1.LanguageModel language_model = 5; */ - model: { - /** - * deprecated: use model_slug instead - * - * @generated from field: chat.v1.LanguageModel language_model = 5; - */ - value: LanguageModel; - case: "languageModel"; - } | { - /** - * new: model slug string - * - * @generated from field: string model_slug = 6; - */ - value: string; - case: "modelSlug"; - } | { case: undefined; value?: undefined }; + languageModel: LanguageModel; + + /** + * new: model slug string + * + * @generated from field: string model_slug = 6; + */ + modelSlug: string; }; /** @@ -740,25 +726,18 @@ export type CreateConversationMessageStreamRequest = Message$1<"chat.v1.CreateCo conversationId?: string; /** - * @generated from oneof chat.v1.CreateConversationMessageStreamRequest.model + * deprecated: use model_slug instead + * + * @generated from field: chat.v1.LanguageModel language_model = 3; */ - model: { - /** - * deprecated: use model_slug instead - * - * @generated from field: chat.v1.LanguageModel language_model = 3; - */ - value: LanguageModel; - case: "languageModel"; - } | { - /** - * new: model slug string - * - * @generated from field: string model_slug = 7; - */ - value: string; - case: "modelSlug"; - } | { case: undefined; value?: undefined }; + languageModel: LanguageModel; + + /** + * new: model slug string + * + * @generated from field: string model_slug = 7; + */ + modelSlug: string; /** * @generated from field: string user_message = 4; diff --git a/webapp/_webapp/src/stores/conversation/conversation-store.ts b/webapp/_webapp/src/stores/conversation/conversation-store.ts index 76392711..5e213448 100644 --- a/webapp/_webapp/src/stores/conversation/conversation-store.ts +++ b/webapp/_webapp/src/stores/conversation/conversation-store.ts @@ -1,5 +1,5 @@ import { create } from "zustand"; -import { Conversation, ConversationSchema, LanguageModel } from "../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Conversation, ConversationSchema } from "../../pkg/gen/apiclient/chat/v1/chat_pb"; import { fromJson } from "@bufbuild/protobuf"; interface ConversationStore { @@ -24,7 +24,7 @@ export const useConversationStore = create((set, get) => ({ export function newConversation(): Conversation { return fromJson(ConversationSchema, { id: "", - languageModel: LanguageModel.OPENAI_GPT41, + modelSlug: "gpt-4.1", title: "New Conversation", messages: [], }); diff --git a/webapp/_webapp/src/stores/conversation/handlers/converter.ts b/webapp/_webapp/src/stores/conversation/handlers/converter.ts index bc70ccf4..6d289445 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/converter.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/converter.ts @@ -1,5 +1,5 @@ import { fromJson } from "@bufbuild/protobuf"; -import { Conversation, LanguageModel, Message, MessageSchema } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Conversation, Message, MessageSchema } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; import { MessageEntry, MessageEntryStatus } from "../types"; import { useStreamingMessageStore } from "../../streaming-message-store"; import { flushSync } from "react-dom"; @@ -41,7 +41,7 @@ export const convertMessageEntryToMessage = (messageEntry: MessageEntry): Messag return undefined; }; -export const flushStreamingMessageToConversation = (conversationId?: string, languageModel?: LanguageModel) => { +export const flushStreamingMessageToConversation = (conversationId?: string, modelSlug?: string) => { const flushMessages = useStreamingMessageStore .getState() .streamingMessage.parts.map((part) => { @@ -59,7 +59,7 @@ export const flushStreamingMessageToConversation = (conversationId?: string, lan useConversationStore.getState().updateCurrentConversation((prev: Conversation) => ({ ...prev, id: conversationId ?? prev.id, - languageModel: languageModel ?? prev.languageModel, + modelSlug: modelSlug ?? prev.modelSlug, messages: [...prev.messages, ...flushMessages], })); }); diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts index b5427833..61831c6c 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts @@ -24,6 +24,6 @@ export function handleStreamInitialization(streamInit: StreamInitialization, ref logWarn("Streaming message parts length is not 1, this may indicate some stale messages in the store"); } - flushStreamingMessageToConversation(streamInit.conversationId, streamInit.languageModel); + flushStreamingMessageToConversation(streamInit.conversationId, streamInit.model.case === "modelSlug" ? streamInit.model.value : undefined); refetchConversationList(); // Here we refetch conversation list because user may send chat message and immediately open history to view. } diff --git a/webapp/_webapp/src/views/chat/footer/toolbar/model-selection.tsx b/webapp/_webapp/src/views/chat/footer/toolbar/model-selection.tsx index 4c1d48d4..2d4f17fc 100644 --- a/webapp/_webapp/src/views/chat/footer/toolbar/model-selection.tsx +++ b/webapp/_webapp/src/views/chat/footer/toolbar/model-selection.tsx @@ -1,7 +1,6 @@ import { useCallback, useMemo } from "react"; import { SelectionItem, Selection } from "./selection"; import { useLanguageModels } from "../../../../hooks/useLanguageModels"; -import { LanguageModel } from "../../../../pkg/gen/apiclient/chat/v1/chat_pb"; import { useConversationUiStore } from "../../../../stores/conversation/conversation-ui-store"; type ModelSelectionProps = { @@ -11,17 +10,17 @@ type ModelSelectionProps = { export function ModelSelection({ onSelectModel }: ModelSelectionProps) { const { inputRef } = useConversationUiStore(); const { models, setModel } = useLanguageModels(); - const items: SelectionItem[] = useMemo(() => { + const items: SelectionItem[] = useMemo(() => { return models.map((model) => ({ title: model.name, subtitle: model.slug, - value: model.languageModel, + value: model.slug, })); }, [models]); const onSelect = useCallback( - (item: SelectionItem) => { - setModel(models.find((m) => m.languageModel === item.value)!); + (item: SelectionItem) => { + setModel(models.find((m) => m.slug === item.value)!); onSelectModel(); inputRef.current?.focus(); }, From 7b4ed62f24ea40b714b11dbfc0d05dd03bfc0a63 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 13:02:39 +0800 Subject: [PATCH 07/14] internal services --- .../api/chat/create_conversation_message.go | 79 +------------------ .../create_conversation_message_stream.go | 6 +- internal/services/chat.go | 3 +- .../services/toolkit/client/completion.go | 4 +- .../toolkit/client/get_conversation_title.go | 2 +- 5 files changed, 10 insertions(+), 84 deletions(-) diff --git a/internal/api/chat/create_conversation_message.go b/internal/api/chat/create_conversation_message.go index 34565f37..cfe7730d 100644 --- a/internal/api/chat/create_conversation_message.go +++ b/internal/api/chat/create_conversation_message.go @@ -3,7 +3,6 @@ package chat import ( "context" - "paperdebugger/internal/api/mapper" "paperdebugger/internal/libs/contextutil" "paperdebugger/internal/libs/shared" "paperdebugger/internal/models" @@ -115,7 +114,6 @@ func (s *ChatServer) createConversation( userInstructions string, userMessage string, userSelectedText string, - languageModel models.LanguageModel, modelSlug string, conversationType chatv1.ConversationType, ) (*models.Conversation, error) { @@ -136,7 +134,7 @@ func (s *ChatServer) createConversation( } return s.chatService.InsertConversationToDB( - ctx, userId, projectId, languageModel, modelSlug, messages, oaiHistory.OfInputItemList, + ctx, userId, projectId, modelSlug, messages, oaiHistory.OfInputItemList, ) } @@ -181,7 +179,7 @@ func (s *ChatServer) appendConversationMessage( // 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 // conversationType 可以在一次 conversation 中多次切换 -func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, languageModel models.LanguageModel, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { +func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { actor, err := contextutil.GetActor(ctx) if err != nil { return ctx, nil, nil, err @@ -224,7 +222,6 @@ func (s *ChatServer) prepare(ctx context.Context, projectId string, conversation userInstructions, userMessage, userSelectedText, - languageModel, modelSlug, conversationType, ) @@ -253,75 +250,3 @@ func (s *ChatServer) prepare(ctx context.Context, projectId string, conversation return ctx, conversation, settings, nil } - -// Deprecated: Use CreateConversationMessageStream instead. -func (s *ChatServer) CreateConversationMessage( - ctx context.Context, - req *chatv1.CreateConversationMessageRequest, -) (*chatv1.CreateConversationMessageResponse, error) { - languageModel := models.LanguageModel(req.GetLanguageModel()) - modelSlug := req.GetModelSlug() - // still using old api. This backward compatibility fallback converts the deprecated LanguageModel enum to a model slug. - if modelSlug == "" { - modelSlug = languageModel.Name() - } - - ctx, conversation, settings, err := s.prepare( - ctx, - req.GetProjectId(), - req.GetConversationId(), - req.GetUserMessage(), - req.GetUserSelectedText(), - languageModel, - modelSlug, - req.GetConversationType(), - ) - if err != nil { - return nil, err - } - - llmProvider := &models.LLMProviderConfig{ - Endpoint: s.cfg.OpenAIBaseURL, - APIKey: settings.OpenAIAPIKey, - } - openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletion(ctx, modelSlug, languageModel, conversation.OpenaiChatHistory, llmProvider) - if err != nil { - return nil, err - } - - bsonMessages := make([]bson.M, len(inappChatHistory)) - for i := range inappChatHistory { - bsonMsg, err := convertToBSON(&inappChatHistory[i]) - if err != nil { - return nil, err - } - bsonMessages[i] = bsonMsg - } - conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMessages...) - conversation.OpenaiChatHistory = openaiChatHistory - - if err := s.chatService.UpdateConversation(conversation); err != nil { - return nil, err - } - - go func() { - protoMessages := make([]*chatv1.Message, len(conversation.InappChatHistory)) - for i, bsonMsg := range conversation.InappChatHistory { - protoMessages[i] = mapper.BSONToChatMessage(bsonMsg) - } - title, err := s.aiClient.GetConversationTitle(ctx, protoMessages, llmProvider) - if err != nil { - s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) - return - } - conversation.Title = title - if err := s.chatService.UpdateConversation(conversation); err != nil { - s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex()) - return - } - }() - - return &chatv1.CreateConversationMessageResponse{ - Conversation: mapper.MapModelConversationToProto(conversation), - }, nil -} diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index 2de51a1e..e996d3a5 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -25,15 +25,17 @@ func (s *ChatServer) CreateConversationMessageStream( ) error { ctx := stream.Context() - languageModel := models.LanguageModel(req.GetLanguageModel()) modelSlug := req.GetModelSlug() + if modelSlug == "" { + modelSlug = models.LanguageModel(req.GetLanguageModel()).Name() + } + ctx, conversation, settings, err := s.prepare( ctx, req.GetProjectId(), req.GetConversationId(), req.GetUserMessage(), req.GetUserSelectedText(), - languageModel, modelSlug, req.GetConversationType(), ) diff --git a/internal/services/chat.go b/internal/services/chat.go index ebd0fef1..825eecfc 100644 --- a/internal/services/chat.go +++ b/internal/services/chat.go @@ -92,7 +92,7 @@ func (s *ChatService) GetPrompt(ctx context.Context, content string, selectedTex return strings.TrimSpace(userPromptBuffer.String()), nil } -func (s *ChatService) InsertConversationToDB(ctx context.Context, userID bson.ObjectID, projectID string, languageModel models.LanguageModel, modelSlug string, inappChatHistory []*chatv1.Message, openaiChatHistory responses.ResponseInputParam) (*models.Conversation, error) { +func (s *ChatService) InsertConversationToDB(ctx context.Context, userID bson.ObjectID, projectID string, modelSlug string, inappChatHistory []*chatv1.Message, openaiChatHistory responses.ResponseInputParam) (*models.Conversation, error) { // Convert protobuf messages to BSON bsonMessages := make([]bson.M, len(inappChatHistory)) for i := range inappChatHistory { @@ -116,7 +116,6 @@ func (s *ChatService) InsertConversationToDB(ctx context.Context, userID bson.Ob UserID: userID, ProjectID: projectID, Title: DefaultConversationTitle, - LanguageModel: languageModel, ModelSlug: modelSlug, InappChatHistory: bsonMessages, OpenaiChatHistory: openaiChatHistory, diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index 447beca3..f4c13259 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -14,14 +14,14 @@ import ( // Parameters: // // ctx: The context for controlling cancellation and deadlines. -// languageModel: The language model to use for completion (e.g., GPT-3.5, GPT-4). +// modelSlug: The language model to use for completion (e.g., GPT-3.5, GPT-4). // messages: The full chat history (as input) to send to the language model. // // Returns: // 1. The full chat history sent to the language model (including any tool call results). // 2. The incremental chat history visible to the user (including tool call results and assistant responses). // 3. An error, if any occurred during the process. -func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, languageModel models.LanguageModel, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { +func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { openaiChatHistory, inappChatHistory, err := a.ChatCompletionStream(ctx, nil, "", modelSlug, messages, llmProvider) if err != nil { return nil, nil, err diff --git a/internal/services/toolkit/client/get_conversation_title.go b/internal/services/toolkit/client/get_conversation_title.go index f2b26a00..283e689f 100644 --- a/internal/services/toolkit/client/get_conversation_title.go +++ b/internal/services/toolkit/client/get_conversation_title.go @@ -29,7 +29,7 @@ func (a *AIClient) GetConversationTitle(ctx context.Context, inappChatHistory [] message := strings.Join(messages, "\n") message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message) - _, resp, err := a.ChatCompletion(ctx, "gpt-4.1-mini", models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), responses.ResponseInputParam{ + _, resp, err := a.ChatCompletion(ctx, "gpt-4.1-mini", responses.ResponseInputParam{ { OfInputMessage: &responses.ResponseInputItemMessageParam{ Role: "system", From 28c689f3d192a3d8d20abfb0d51b75bdc4fc002d Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 13:13:03 +0800 Subject: [PATCH 08/14] fix --- internal/api/mapper/conversation.go | 7 ++++ internal/models/language_model.go | 33 +++++++++++++++++++ internal/services/toolkit/handler/stream.go | 15 ++------- pkg/gen/api/chat/v1/chat.pb.go | 16 ++++----- proto/chat/v1/chat.proto | 2 +- .../src/pkg/gen/apiclient/chat/v1/chat_pb.ts | 6 ++-- 6 files changed, 53 insertions(+), 26 deletions(-) diff --git a/internal/api/mapper/conversation.go b/internal/api/mapper/conversation.go index 129dabd2..807839d3 100644 --- a/internal/api/mapper/conversation.go +++ b/internal/api/mapper/conversation.go @@ -32,10 +32,17 @@ func MapModelConversationToProto(conversation *models.Conversation) *chatv1.Conv return msg.GetPayload().GetMessageType() != &chatv1.MessagePayload_System{} }) + // backward compatibility + modelSlug := conversation.ModelSlug + if modelSlug == "" { + modelSlug = chatv1.LanguageModel(conversation.LanguageModel).String() + } + return &chatv1.Conversation{ Id: conversation.ID.Hex(), Title: conversation.Title, LanguageModel: chatv1.LanguageModel(conversation.LanguageModel), + ModelSlug: modelSlug, Messages: filteredMessages, } } diff --git a/internal/models/language_model.go b/internal/models/language_model.go index 7f1e8df0..d15fa939 100644 --- a/internal/models/language_model.go +++ b/internal/models/language_model.go @@ -56,3 +56,36 @@ func (x LanguageModel) Name() string { return openai.ChatModelGPT5 } } + +func LanguageModelFromSlug(slug string) LanguageModel { + switch slug { + case "gpt-4o": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT4O) + case "gpt-4.1": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41) + case "gpt-4.1-mini": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI) + case "gpt-5": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5) + case "gpt-5-mini": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_MINI) + case "gpt-5-nano": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_NANO) + case "gpt-5-chat-latest": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_CHAT_LATEST) + case "o1": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O1) + case "o1-mini": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O1_MINI) + case "o3": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O3) + case "o3-mini": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O3_MINI) + case "o4-mini": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O4_MINI) + case "codex-mini-latest": + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_CODEX_MINI_LATEST) + default: + return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_UNSPECIFIED) + } +} diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index e0c3298e..d1ec497a 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -1,6 +1,7 @@ package handler import ( + "paperdebugger/internal/models" chatv1 "paperdebugger/pkg/gen/api/chat/v1" "github.com/openai/openai-go/v2/responses" @@ -24,18 +25,6 @@ func NewStreamHandler( } } -func (h *StreamHandler) ConvertSlugToLanguageModel() chatv1.LanguageModel { - // TODO: finish this. - switch h.modelSlug { - case "gpt-4o": - return chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT4O - case "gpt-4.1-mini": - return chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI - default: - return chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41 - } -} - func (h *StreamHandler) SendInitialization() { if h.callbackStream == nil { return @@ -44,7 +33,7 @@ func (h *StreamHandler) SendInitialization() { streamInit := &chatv1.StreamInitialization{ ConversationId: h.conversationId, ModelSlug: h.modelSlug, - LanguageModel: h.ConvertSlugToLanguageModel(), + LanguageModel: chatv1.LanguageModel(models.LanguageModelFromSlug(h.modelSlug)), // compatible with old code } h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{ diff --git a/pkg/gen/api/chat/v1/chat.pb.go b/pkg/gen/api/chat/v1/chat.pb.go index 78e1997d..33751f72 100644 --- a/pkg/gen/api/chat/v1/chat.pb.go +++ b/pkg/gen/api/chat/v1/chat.pb.go @@ -658,7 +658,7 @@ type Conversation struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` LanguageModel LanguageModel `protobuf:"varint,2,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` // deprecated: use model_slug instead - ModelSlug *string `protobuf:"bytes,5,opt,name=model_slug,json=modelSlug,proto3,oneof" json:"model_slug,omitempty"` // new: model slug string + ModelSlug string `protobuf:"bytes,5,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` // new: model slug string // If list conversations, then messages length is 0. Messages []*Message `protobuf:"bytes,4,rep,name=messages,proto3" json:"messages,omitempty"` unknownFields protoimpl.UnknownFields @@ -717,8 +717,8 @@ func (x *Conversation) GetLanguageModel() LanguageModel { } func (x *Conversation) GetModelSlug() string { - if x != nil && x.ModelSlug != nil { - return *x.ModelSlug + if x != nil { + return x.ModelSlug } return "" } @@ -2019,15 +2019,14 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\aMessage\x12\x1d\n" + "\n" + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + - "\apayload\x18\x03 \x01(\v2\x17.chat.v1.MessagePayloadR\apayload\"\xd4\x01\n" + + "\apayload\x18\x03 \x01(\v2\x17.chat.v1.MessagePayloadR\apayload\"\xc0\x01\n" + "\fConversation\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + "\x05title\x18\x03 \x01(\tR\x05title\x12=\n" + - "\x0elanguage_model\x18\x02 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\"\n" + + "\x0elanguage_model\x18\x02 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\x1d\n" + "\n" + - "model_slug\x18\x05 \x01(\tH\x00R\tmodelSlug\x88\x01\x01\x12,\n" + - "\bmessages\x18\x04 \x03(\v2\x10.chat.v1.MessageR\bmessagesB\r\n" + - "\v_model_slug\"M\n" + + "model_slug\x18\x05 \x01(\tR\tmodelSlug\x12,\n" + + "\bmessages\x18\x04 \x03(\v2\x10.chat.v1.MessageR\bmessages\"M\n" + "\x18ListConversationsRequest\x12\"\n" + "\n" + "project_id\x18\x01 \x01(\tH\x00R\tprojectId\x88\x01\x01B\r\n" + @@ -2256,7 +2255,6 @@ func file_chat_v1_chat_proto_init() { (*MessagePayload_ToolCall)(nil), (*MessagePayload_Unknown)(nil), } - file_chat_v1_chat_proto_msgTypes[8].OneofWrappers = []any{} file_chat_v1_chat_proto_msgTypes[9].OneofWrappers = []any{} file_chat_v1_chat_proto_msgTypes[13].OneofWrappers = []any{} file_chat_v1_chat_proto_msgTypes[29].OneofWrappers = []any{} diff --git a/proto/chat/v1/chat.proto b/proto/chat/v1/chat.proto index 553ec394..7451e7bf 100644 --- a/proto/chat/v1/chat.proto +++ b/proto/chat/v1/chat.proto @@ -115,7 +115,7 @@ message Conversation { string id = 1; string title = 3; LanguageModel language_model = 2; // deprecated: use model_slug instead - optional string model_slug = 5; // new: model slug string + string model_slug = 5; // new: model slug string // If list conversations, then messages length is 0. repeated Message messages = 4; } diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts index 7876665c..a9c99b12 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts @@ -11,7 +11,7 @@ import type { Message as Message$1 } from "@bufbuild/protobuf"; * Describes the file chat/v1/chat.proto. */ export const file_chat_v1_chat: GenFile = /*@__PURE__*/ - fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCKlAQoMQ29udmVyc2F0aW9uEgoKAmlkGAEgASgJEg0KBXRpdGxlGAMgASgJEi4KDmxhbmd1YWdlX21vZGVsGAIgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhcKCm1vZGVsX3NsdWcYBSABKAlIAIgBARIiCghtZXNzYWdlcxgEIAMoCzIQLmNoYXQudjEuTWVzc2FnZUINCgtfbW9kZWxfc2x1ZyJCChhMaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QSFwoKcHJvamVjdF9pZBgBIAEoCUgAiAEBQg0KC19wcm9qZWN0X2lkIkkKGUxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2USLAoNY29udmVyc2F0aW9ucxgBIAMoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjEKFkdldENvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIkYKF0dldENvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIssCCiBDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgAiAEBEi4KDmxhbmd1YWdlX21vZGVsGAMgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhIKCm1vZGVsX3NsdWcYByABKAkSFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgBiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52MS5Db252ZXJzYXRpb25UeXBlSAKIAQFCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSJQCiFDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52MS5Db252ZXJzYXRpb24iQwoZVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkSDQoFdGl0bGUYAiABKAkiSQoaVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52MS5Db252ZXJzYXRpb24iNAoZRGVsZXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiHAoaRGVsZXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiLAoOU3VwcG9ydGVkTW9kZWwSDAoEbmFtZRgBIAEoCRIMCgRzbHVnGAIgASgJIhwKGkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0IkYKG0xpc3RTdXBwb3J0ZWRNb2RlbHNSZXNwb25zZRInCgZtb2RlbHMYASADKAsyFy5jaGF0LnYxLlN1cHBvcnRlZE1vZGVsInMKFFN0cmVhbUluaXRpYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCRIuCg5sYW5ndWFnZV9tb2RlbBgFIAEoDjIWLmNoYXQudjEuTGFuZ3VhZ2VNb2RlbBISCgptb2RlbF9zbHVnGAYgASgJIk8KD1N0cmVhbVBhcnRCZWdpbhISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYxLk1lc3NhZ2VQYXlsb2FkIjEKDE1lc3NhZ2VDaHVuaxISCgptZXNzYWdlX2lkGAEgASgJEg0KBWRlbHRhGAIgASgJIjoKE0luY29tcGxldGVJbmRpY2F0b3ISDgoGcmVhc29uGAEgASgJEhMKC3Jlc3BvbnNlX2lkGAIgASgJIk0KDVN0cmVhbVBhcnRFbmQSEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCItChJTdHJlYW1GaW5hbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIiQKC1N0cmVhbUVycm9yEhUKDWVycm9yX21lc3NhZ2UYASABKAki0QIKJkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSACIAQESLgoObGFuZ3VhZ2VfbW9kZWwYAyABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwSEgoKbW9kZWxfc2x1ZxgHIAEoCRIUCgx1c2VyX21lc3NhZ2UYBCABKAkSHwoSdXNlcl9zZWxlY3RlZF90ZXh0GAUgASgJSAGIAQESOQoRY29udmVyc2F0aW9uX3R5cGUYBiABKA4yGS5jaGF0LnYxLkNvbnZlcnNhdGlvblR5cGVIAogBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlIr8DCidDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2USPgoVc3RyZWFtX2luaXRpYWxpemF0aW9uGAEgASgLMh0uY2hhdC52MS5TdHJlYW1Jbml0aWFsaXphdGlvbkgAEjUKEXN0cmVhbV9wYXJ0X2JlZ2luGAIgASgLMhguY2hhdC52MS5TdHJlYW1QYXJ0QmVnaW5IABIuCg1tZXNzYWdlX2NodW5rGAMgASgLMhUuY2hhdC52MS5NZXNzYWdlQ2h1bmtIABI8ChRpbmNvbXBsZXRlX2luZGljYXRvchgEIAEoCzIcLmNoYXQudjEuSW5jb21wbGV0ZUluZGljYXRvckgAEjEKD3N0cmVhbV9wYXJ0X2VuZBgFIAEoCzIWLmNoYXQudjEuU3RyZWFtUGFydEVuZEgAEjoKE3N0cmVhbV9maW5hbGl6YXRpb24YBiABKAsyGy5jaGF0LnYxLlN0cmVhbUZpbmFsaXphdGlvbkgAEiwKDHN0cmVhbV9lcnJvchgHIAEoCzIULmNoYXQudjEuU3RyZWFtRXJyb3JIAEISChByZXNwb25zZV9wYXlsb2FkKv8DCg1MYW5ndWFnZU1vZGVsEh4KGkxBTkdVQUdFX01PREVMX1VOU1BFQ0lGSUVEEAASHwobTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDRPEAESJAogTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDQxX01JTkkQAhIfChtMQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNDEQBBIeChpMQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNRAHEiMKH0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X01JTkkQCBIjCh9MQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9OQU5PEAkSKgomTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfQ0hBVF9MQVRFU1QQChIcChhMQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzEQCxIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzFfTUlOSRAMEhwKGExBTkdVQUdFX01PREVMX09QRU5BSV9PMxANEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PM19NSU5JEA4SIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX080X01JTkkQDxIrCidMQU5HVUFHRV9NT0RFTF9PUEVOQUlfQ09ERVhfTUlOSV9MQVRFU1QQECpSChBDb252ZXJzYXRpb25UeXBlEiEKHUNPTlZFUlNBVElPTl9UWVBFX1VOU1BFQ0lGSUVEEAASGwoXQ09OVkVSU0FUSU9OX1RZUEVfREVCVUcQATLSCAoLQ2hhdFNlcnZpY2USgwEKEUxpc3RDb252ZXJzYXRpb25zEiEuY2hhdC52MS5MaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QaIi5jaGF0LnYxLkxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2UiJ4LT5JMCIRIfL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucxKPAQoPR2V0Q29udmVyc2F0aW9uEh8uY2hhdC52MS5HZXRDb252ZXJzYXRpb25SZXF1ZXN0GiAuY2hhdC52MS5HZXRDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzEjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EqcBChlDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlEikuY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVxdWVzdBoqLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlc3BvbnNlIjOC0+STAi06ASoiKC9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMSwgEKH0NyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW0SLy5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0GjAuY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2UiOoLT5JMCNDoBKiIvL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy9tZXNzYWdlcy9zdHJlYW0wARKbAQoSVXBkYXRlQ29udmVyc2F0aW9uEiIuY2hhdC52MS5VcGRhdGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52MS5VcGRhdGVDb252ZXJzYXRpb25SZXNwb25zZSI8gtPkkwI2OgEqMjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EpgBChJEZWxldGVDb252ZXJzYXRpb24SIi5jaGF0LnYxLkRlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QaIy5jaGF0LnYxLkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIjmC0+STAjMqMS9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SggEKE0xpc3RTdXBwb3J0ZWRNb2RlbHMSIy5jaGF0LnYxLkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0GiQuY2hhdC52MS5MaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2UiIILT5JMCGhIYL19wZC9hcGkvdjEvY2hhdHMvbW9kZWxzQn8KC2NvbS5jaGF0LnYxQglDaGF0UHJvdG9QAVoocGFwZXJkZWJ1Z2dlci9wa2cvZ2VuL2FwaS9jaGF0L3YxO2NoYXR2MaICA0NYWKoCB0NoYXQuVjHKAgdDaGF0XFYx4gITQ2hhdFxWMVxHUEJNZXRhZGF0YeoCCENoYXQ6OlYxYgZwcm90bzM", [file_google_api_annotations]); + fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCKRAQoMQ29udmVyc2F0aW9uEgoKAmlkGAEgASgJEg0KBXRpdGxlGAMgASgJEi4KDmxhbmd1YWdlX21vZGVsGAIgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhIKCm1vZGVsX3NsdWcYBSABKAkSIgoIbWVzc2FnZXMYBCADKAsyEC5jaGF0LnYxLk1lc3NhZ2UiQgoYTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0EhcKCnByb2plY3RfaWQYASABKAlIAIgBAUINCgtfcHJvamVjdF9pZCJJChlMaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlEiwKDWNvbnZlcnNhdGlvbnMYASADKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiIxChZHZXRDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSJGChdHZXRDb252ZXJzYXRpb25SZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiLLAgogQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QSEgoKcHJvamVjdF9pZBgBIAEoCRIcCg9jb252ZXJzYXRpb25faWQYAiABKAlIAIgBARIuCg5sYW5ndWFnZV9tb2RlbBgDIAEoDjIWLmNoYXQudjEuTGFuZ3VhZ2VNb2RlbBISCgptb2RlbF9zbHVnGAcgASgJEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAYgBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjEuQ29udmVyc2F0aW9uVHlwZUgCiAEBQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUiUAohQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIkMKGVVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEg0KBXRpdGxlGAIgASgJIkkKGlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjQKGURlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIhwKGkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIiwKDlN1cHBvcnRlZE1vZGVsEgwKBG5hbWUYASABKAkSDAoEc2x1ZxgCIAEoCSIcChpMaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdCJGChtMaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2USJwoGbW9kZWxzGAEgAygLMhcuY2hhdC52MS5TdXBwb3J0ZWRNb2RlbCJzChRTdHJlYW1Jbml0aWFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkSLgoObGFuZ3VhZ2VfbW9kZWwYBSABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwSEgoKbW9kZWxfc2x1ZxgGIAEoCSJPCg9TdHJlYW1QYXJ0QmVnaW4SEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCIxCgxNZXNzYWdlQ2h1bmsSEgoKbWVzc2FnZV9pZBgBIAEoCRINCgVkZWx0YRgCIAEoCSI6ChNJbmNvbXBsZXRlSW5kaWNhdG9yEg4KBnJlYXNvbhgBIAEoCRITCgtyZXNwb25zZV9pZBgCIAEoCSJNCg1TdHJlYW1QYXJ0RW5kEhIKCm1lc3NhZ2VfaWQYASABKAkSKAoHcGF5bG9hZBgDIAEoCzIXLmNoYXQudjEuTWVzc2FnZVBheWxvYWQiLQoSU3RyZWFtRmluYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSIkCgtTdHJlYW1FcnJvchIVCg1lcnJvcl9tZXNzYWdlGAEgASgJItECCiZDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgAiAEBEi4KDmxhbmd1YWdlX21vZGVsGAMgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhIKCm1vZGVsX3NsdWcYByABKAkSFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgBiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52MS5Db252ZXJzYXRpb25UeXBlSAKIAQFCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSK/AwonQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlEj4KFXN0cmVhbV9pbml0aWFsaXphdGlvbhgBIAEoCzIdLmNoYXQudjEuU3RyZWFtSW5pdGlhbGl6YXRpb25IABI1ChFzdHJlYW1fcGFydF9iZWdpbhgCIAEoCzIYLmNoYXQudjEuU3RyZWFtUGFydEJlZ2luSAASLgoNbWVzc2FnZV9jaHVuaxgDIAEoCzIVLmNoYXQudjEuTWVzc2FnZUNodW5rSAASPAoUaW5jb21wbGV0ZV9pbmRpY2F0b3IYBCABKAsyHC5jaGF0LnYxLkluY29tcGxldGVJbmRpY2F0b3JIABIxCg9zdHJlYW1fcGFydF9lbmQYBSABKAsyFi5jaGF0LnYxLlN0cmVhbVBhcnRFbmRIABI6ChNzdHJlYW1fZmluYWxpemF0aW9uGAYgASgLMhsuY2hhdC52MS5TdHJlYW1GaW5hbGl6YXRpb25IABIsCgxzdHJlYW1fZXJyb3IYByABKAsyFC5jaGF0LnYxLlN0cmVhbUVycm9ySABCEgoQcmVzcG9uc2VfcGF5bG9hZCr/AwoNTGFuZ3VhZ2VNb2RlbBIeChpMQU5HVUFHRV9NT0RFTF9VTlNQRUNJRklFRBAAEh8KG0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0TxABEiQKIExBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0MV9NSU5JEAISHwobTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDQxEAQSHgoaTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDUQBxIjCh9MQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9NSU5JEAgSIwofTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfTkFOTxAJEioKJkxBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X0NIQVRfTEFURVNUEAoSHAoYTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xEAsSIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xX01JTkkQDBIcChhMQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzMQDRIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzNfTUlOSRAOEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PNF9NSU5JEA8SKwonTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0NPREVYX01JTklfTEFURVNUEBAqUgoQQ29udmVyc2F0aW9uVHlwZRIhCh1DT05WRVJTQVRJT05fVFlQRV9VTlNQRUNJRklFRBAAEhsKF0NPTlZFUlNBVElPTl9UWVBFX0RFQlVHEAEy0ggKC0NoYXRTZXJ2aWNlEoMBChFMaXN0Q29udmVyc2F0aW9ucxIhLmNoYXQudjEuTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0GiIuY2hhdC52MS5MaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMSjwEKD0dldENvbnZlcnNhdGlvbhIfLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVxdWVzdBogLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMxIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKnAQoZQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZRIpLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QaKi5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZSIzgtPkkwItOgEqIigvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzEsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52MS5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjEuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YxL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MUIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MTtjaGF0djGiAgNDWFiqAgdDaGF0LlYxygIHQ2hhdFxWMeICE0NoYXRcVjFcR1BCTWV0YWRhdGHqAghDaGF0OjpWMWIGcHJvdG8z", [file_google_api_annotations]); /** * @generated from message chat.v1.MessageTypeToolCall @@ -247,9 +247,9 @@ export type Conversation = Message$1<"chat.v1.Conversation"> & { /** * new: model slug string * - * @generated from field: optional string model_slug = 5; + * @generated from field: string model_slug = 5; */ - modelSlug?: string; + modelSlug: string; /** * If list conversations, then messages length is 0. From 335dada2c567ce05c01483b464666b42109a6e39 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 13:21:49 +0800 Subject: [PATCH 09/14] =?UTF-8?q?fix:=20=E7=9F=B3=E5=B1=B1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/api/mapper/conversation.go | 3 +-- internal/services/toolkit/client/utils.go | 25 ++++++++++++--------- internal/services/toolkit/handler/stream.go | 12 +++++----- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/internal/api/mapper/conversation.go b/internal/api/mapper/conversation.go index 807839d3..b8463653 100644 --- a/internal/api/mapper/conversation.go +++ b/internal/api/mapper/conversation.go @@ -32,7 +32,6 @@ func MapModelConversationToProto(conversation *models.Conversation) *chatv1.Conv return msg.GetPayload().GetMessageType() != &chatv1.MessagePayload_System{} }) - // backward compatibility modelSlug := conversation.ModelSlug if modelSlug == "" { modelSlug = chatv1.LanguageModel(conversation.LanguageModel).String() @@ -41,7 +40,7 @@ func MapModelConversationToProto(conversation *models.Conversation) *chatv1.Conv return &chatv1.Conversation{ Id: conversation.ID.Hex(), Title: conversation.Title, - LanguageModel: chatv1.LanguageModel(conversation.LanguageModel), + LanguageModel: chatv1.LanguageModel(conversation.LanguageModel), // backward compatibility ModelSlug: modelSlug, Messages: filteredMessages, } diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index f9c0269f..39b24d87 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -11,6 +11,7 @@ import ( "github.com/openai/openai-go/v2" "github.com/openai/openai-go/v2/responses" + "github.com/samber/lo" ) // appendAssistantTextResponse appends the assistant's response to both OpenAI and in-app chat histories. @@ -43,16 +44,19 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI // The tool registry is managed centrally by the registry package. // The chat history is constructed manually, so Store must be set to false. func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { - if modelSlug == "gpt-5" || - modelSlug == "gpt-5-mini" || - modelSlug == "gpt-5-nano" || - modelSlug == "gpt-5-chat-latest" || - modelSlug == "o4-mini" || - modelSlug == "o3-mini" || - modelSlug == "o3" || - modelSlug == "o1-mini" || - modelSlug == "o1" || - modelSlug == "codex-mini-latest" { + var reasoningModels = []string{ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o4-mini", + "o3-mini", + "o3", + "o1-mini", + "o1", + "codex-mini-latest", + } + if lo.Contains(reasoningModels, modelSlug) { return responses.ResponseNewParams{ Model: modelSlug, Tools: toolRegistry.GetTools(), @@ -60,6 +64,7 @@ func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsI Store: openai.Bool(false), } } + return responses.ResponseNewParams{ Model: modelSlug, Temperature: openai.Float(0.7), diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index d1ec497a..641c16f7 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -30,15 +30,13 @@ func (h *StreamHandler) SendInitialization() { return } - streamInit := &chatv1.StreamInitialization{ - ConversationId: h.conversationId, - ModelSlug: h.modelSlug, - LanguageModel: chatv1.LanguageModel(models.LanguageModelFromSlug(h.modelSlug)), // compatible with old code - } - h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamInitialization{ - StreamInitialization: streamInit, + StreamInitialization: &chatv1.StreamInitialization{ + ConversationId: h.conversationId, + ModelSlug: h.modelSlug, + LanguageModel: chatv1.LanguageModel(models.LanguageModelFromSlug(h.modelSlug)), // compatible with old code + }, }, }) } From 88028c2c66dfcc969b0daa381b21c7f9ffdc1ea8 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 13:25:23 +0800 Subject: [PATCH 10/14] frontend --- webapp/_webapp/src/hooks/useSendMessageStream.ts | 7 +++---- .../conversation/handlers/handleStreamInitialization.ts | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/webapp/_webapp/src/hooks/useSendMessageStream.ts b/webapp/_webapp/src/hooks/useSendMessageStream.ts index 9ec51912..a1329535 100644 --- a/webapp/_webapp/src/hooks/useSendMessageStream.ts +++ b/webapp/_webapp/src/hooks/useSendMessageStream.ts @@ -3,6 +3,7 @@ import { ConversationType, CreateConversationMessageStreamRequest, IncompleteIndicator, + LanguageModel, StreamFinalization, } from "../pkg/gen/apiclient/chat/v1/chat_pb"; import { PlainMessage } from "../query/types"; @@ -73,10 +74,8 @@ export function useSendMessageStream() { const request: PlainMessage = { projectId: getProjectId(), conversationId: currentConversation.id, - model: { - case: "modelSlug", - value: currentConversation.modelSlug!, - }, + languageModel: LanguageModel.UNSPECIFIED, // backward compatibility + modelSlug: currentConversation.modelSlug, userMessage: message, userSelectedText: selectedText, conversationType: conversationMode === "debug" ? ConversationType.DEBUG : ConversationType.UNSPECIFIED, diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts index 61831c6c..1970eac7 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts @@ -24,6 +24,6 @@ export function handleStreamInitialization(streamInit: StreamInitialization, ref logWarn("Streaming message parts length is not 1, this may indicate some stale messages in the store"); } - flushStreamingMessageToConversation(streamInit.conversationId, streamInit.model.case === "modelSlug" ? streamInit.model.value : undefined); + flushStreamingMessageToConversation(streamInit.conversationId, streamInit.modelSlug); refetchConversationList(); // Here we refetch conversation list because user may send chat message and immediately open history to view. } From a35d23b9652d5af6688aac08d922f0cdfc436e61 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 13:41:42 +0800 Subject: [PATCH 11/14] fix --- internal/api/mapper/conversation.go | 9 ++++++-- internal/models/language_model.go | 33 +++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/internal/api/mapper/conversation.go b/internal/api/mapper/conversation.go index b8463653..23bc7f11 100644 --- a/internal/api/mapper/conversation.go +++ b/internal/api/mapper/conversation.go @@ -34,13 +34,18 @@ func MapModelConversationToProto(conversation *models.Conversation) *chatv1.Conv modelSlug := conversation.ModelSlug if modelSlug == "" { - modelSlug = chatv1.LanguageModel(conversation.LanguageModel).String() + modelSlug = models.SlugFromLanguageModel(models.LanguageModel(conversation.LanguageModel)) + } + + languageModel := chatv1.LanguageModel(conversation.LanguageModel) + if languageModel == chatv1.LanguageModel_LANGUAGE_MODEL_UNSPECIFIED { + languageModel = chatv1.LanguageModel(models.LanguageModelFromSlug(modelSlug)) } return &chatv1.Conversation{ Id: conversation.ID.Hex(), Title: conversation.Title, - LanguageModel: chatv1.LanguageModel(conversation.LanguageModel), // backward compatibility + LanguageModel: languageModel, // backward compatibility ModelSlug: modelSlug, Messages: filteredMessages, } diff --git a/internal/models/language_model.go b/internal/models/language_model.go index d15fa939..73c94d25 100644 --- a/internal/models/language_model.go +++ b/internal/models/language_model.go @@ -89,3 +89,36 @@ func LanguageModelFromSlug(slug string) LanguageModel { return LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_UNSPECIFIED) } } + +func SlugFromLanguageModel(languageModel LanguageModel) string { + switch languageModel { + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT4O): + return "gpt-4o" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41): + return "gpt-4.1" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI): + return "gpt-4.1-mini" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5): + return "gpt-5" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_MINI): + return "gpt-5-mini" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_NANO): + return "gpt-5-nano" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT5_CHAT_LATEST): + return "gpt-5-chat-latest" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O1): + return "o1" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O1_MINI): + return "o1-mini" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O3): + return "o3" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O3_MINI): + return "o3-mini" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_O4_MINI): + return "o4-mini" + case LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_CODEX_MINI_LATEST): + return "codex-mini-latest" + default: + return "unknown" + } +} From 5e9ba3f31bb33541408b0b8f495bb291c28fd053 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 13:48:55 +0800 Subject: [PATCH 12/14] format --- proto/chat/v1/chat.proto | 109 ++++++++++-------- webapp/_webapp/package.json | 2 +- .../message-entry-container/tools/jsonrpc.tsx | 1 - .../tools/review-paper.tsx | 6 +- .../message-entry-container/tools/tools.tsx | 12 +- webapp/_webapp/src/main.tsx | 10 +- webapp/_webapp/vite.config.ts | 29 +++-- 7 files changed, 86 insertions(+), 83 deletions(-) diff --git a/proto/chat/v1/chat.proto b/proto/chat/v1/chat.proto index 7451e7bf..c662416e 100644 --- a/proto/chat/v1/chat.proto +++ b/proto/chat/v1/chat.proto @@ -7,50 +7,35 @@ import "google/api/annotations.proto"; option go_package = "paperdebugger/pkg/gen/api/chat/v1;chatv1"; service ChatService { - rpc ListConversations(ListConversationsRequest) - returns (ListConversationsResponse) { - option (google.api.http) = { - get : "/_pd/api/v1/chats/conversations" - }; + rpc ListConversations(ListConversationsRequest) returns (ListConversationsResponse) { + option (google.api.http) = {get: "/_pd/api/v1/chats/conversations"}; } - rpc GetConversation(GetConversationRequest) - returns (GetConversationResponse) { - option (google.api.http) = { - get : "/_pd/api/v1/chats/conversations/{conversation_id}" - }; + rpc GetConversation(GetConversationRequest) returns (GetConversationResponse) { + option (google.api.http) = {get: "/_pd/api/v1/chats/conversations/{conversation_id}"}; } - rpc CreateConversationMessage(CreateConversationMessageRequest) - returns (CreateConversationMessageResponse) { + rpc CreateConversationMessage(CreateConversationMessageRequest) returns (CreateConversationMessageResponse) { option (google.api.http) = { - post : "/_pd/api/v1/chats/conversations/messages" - body : "*" + post: "/_pd/api/v1/chats/conversations/messages" + body: "*" }; } - rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) - returns (stream CreateConversationMessageStreamResponse) { + rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) returns (stream CreateConversationMessageStreamResponse) { option (google.api.http) = { - post : "/_pd/api/v1/chats/conversations/messages/stream" - body : "*" + post: "/_pd/api/v1/chats/conversations/messages/stream" + body: "*" }; } - rpc UpdateConversation(UpdateConversationRequest) - returns (UpdateConversationResponse) { + rpc UpdateConversation(UpdateConversationRequest) returns (UpdateConversationResponse) { option (google.api.http) = { - patch : "/_pd/api/v1/chats/conversations/{conversation_id}" - body : "*" + patch: "/_pd/api/v1/chats/conversations/{conversation_id}" + body: "*" }; } - rpc DeleteConversation(DeleteConversationRequest) - returns (DeleteConversationResponse) { - option (google.api.http) = { - delete : "/_pd/api/v1/chats/conversations/{conversation_id}" - }; + rpc DeleteConversation(DeleteConversationRequest) returns (DeleteConversationResponse) { + option (google.api.http) = {delete: "/_pd/api/v1/chats/conversations/{conversation_id}"}; } - rpc ListSupportedModels(ListSupportedModelsRequest) - returns (ListSupportedModelsResponse) { - option (google.api.http) = { - get : "/_pd/api/v1/chats/models" - }; + rpc ListSupportedModels(ListSupportedModelsRequest) returns (ListSupportedModelsResponse) { + option (google.api.http) = {get: "/_pd/api/v1/chats/models"}; } } @@ -74,9 +59,9 @@ enum LanguageModel { message MessageTypeToolCall { string name = 1; - string args = 2; // Json string + string args = 2; // Json string string result = 3; // Json string - string error = 4; // Json string + string error = 4; // Json string } message MessageTypeToolCallPrepareArguments { @@ -84,16 +69,22 @@ message MessageTypeToolCallPrepareArguments { string args = 2; // Json string } -message MessageTypeSystem { string content = 1; } +message MessageTypeSystem { + string content = 1; +} -message MessageTypeAssistant { string content = 1; } +message MessageTypeAssistant { + string content = 1; +} message MessageTypeUser { string content = 1; optional string selected_text = 2; } -message MessageTypeUnknown { string description = 1; } +message MessageTypeUnknown { + string description = 1; +} message MessagePayload { oneof message_type { @@ -115,21 +106,27 @@ message Conversation { string id = 1; string title = 3; LanguageModel language_model = 2; // deprecated: use model_slug instead - string model_slug = 5; // new: model slug string + string model_slug = 5; // new: model slug string // If list conversations, then messages length is 0. repeated Message messages = 4; } -message ListConversationsRequest { optional string project_id = 1; } +message ListConversationsRequest { + optional string project_id = 1; +} message ListConversationsResponse { // In this response, the length of conversations[i].messages should be 0. repeated Conversation conversations = 1; } -message GetConversationRequest { string conversation_id = 1; } +message GetConversationRequest { + string conversation_id = 1; +} -message GetConversationResponse { Conversation conversation = 1; } +message GetConversationResponse { + Conversation conversation = 1; +} message CreateConversationMessageRequest { string project_id = 1; @@ -137,22 +134,28 @@ message CreateConversationMessageRequest { // a new conversation will be created and the id will be returned. optional string conversation_id = 2; LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; } -message CreateConversationMessageResponse { Conversation conversation = 1; } +message CreateConversationMessageResponse { + Conversation conversation = 1; +} message UpdateConversationRequest { string conversation_id = 1; string title = 2; } -message UpdateConversationResponse { Conversation conversation = 1; } +message UpdateConversationResponse { + Conversation conversation = 1; +} -message DeleteConversationRequest { string conversation_id = 1; } +message DeleteConversationRequest { + string conversation_id = 1; +} message DeleteConversationResponse { // explicitly empty @@ -167,7 +170,9 @@ message ListSupportedModelsRequest { // explicitly empty } -message ListSupportedModelsResponse { repeated SupportedModel models = 1; } +message ListSupportedModelsResponse { + repeated SupportedModel models = 1; +} // ============================== Streaming Messages @@ -175,7 +180,7 @@ message ListSupportedModelsResponse { repeated SupportedModel models = 1; } message StreamInitialization { string conversation_id = 1; LanguageModel language_model = 5; // deprecated: use model_slug instead - string model_slug = 6; // new: model slug string + string model_slug = 6; // new: model slug string } // Designed as StreamPartBegin and StreamPartEnd to @@ -192,7 +197,7 @@ message StreamPartBegin { // and the StreamPartEnd can be directly called when the result is ready. message MessageChunk { string message_id = 1; // The id of the message that this chunk belongs to - string delta = 2; // The small piece of text + string delta = 2; // The small piece of text } message IncompleteIndicator { @@ -214,7 +219,9 @@ message StreamFinalization { // it should be called after the entire API call is finished. } -message StreamError { string error_message = 1; } +message StreamError { + string error_message = 1; +} // Currently, we inject two types of messages: // 1. System message @@ -223,7 +230,7 @@ message StreamError { string error_message = 1; } enum ConversationType { CONVERSATION_TYPE_UNSPECIFIED = 0; CONVERSATION_TYPE_DEBUG = 1; // does not contain any customized messages, the - // inapp_history and openai_history are synced. + // inapp_history and openai_history are synced. // CONVERSATION_TYPE_NO_SYSTEM_MESSAGE_INJECTION = 2; // CONVERSATION_TYPE_NO_USER_MESSAGE_INJECTION = 3; } @@ -235,7 +242,7 @@ message CreateConversationMessageStreamRequest { string project_id = 1; optional string conversation_id = 2; LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; diff --git a/webapp/_webapp/package.json b/webapp/_webapp/package.json index 8b08c29e..42fb6315 100644 --- a/webapp/_webapp/package.json +++ b/webapp/_webapp/package.json @@ -74,4 +74,4 @@ "typescript-eslint": "^8.33.0", "vite": "^6.3.5" } -} \ No newline at end of file +} diff --git a/webapp/_webapp/src/components/message-entry-container/tools/jsonrpc.tsx b/webapp/_webapp/src/components/message-entry-container/tools/jsonrpc.tsx index 6ab12bb2..86474e2e 100644 --- a/webapp/_webapp/src/components/message-entry-container/tools/jsonrpc.tsx +++ b/webapp/_webapp/src/components/message-entry-container/tools/jsonrpc.tsx @@ -8,7 +8,6 @@ type JsonRpcProps = { }; export const JsonRpc = ({ functionName, preparing, animated }: JsonRpcProps) => { - if (preparing) { return (
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/review-paper.tsx b/webapp/_webapp/src/components/message-entry-container/tools/review-paper.tsx index c0cabbc7..bd5d4f96 100644 --- a/webapp/_webapp/src/components/message-entry-container/tools/review-paper.tsx +++ b/webapp/_webapp/src/components/message-entry-container/tools/review-paper.tsx @@ -58,9 +58,9 @@ export const ReviewPaperCard = ({ jsonRpcResult, preparing, animated }: ReviewPa {jsonRpcResult.result && (
- ℹ️ Review paper is currently scaled back to balance cost. Presently it identifies issues in Title, Abstract, and Introduction. - We are working to support the full review flow again. - If you find the input might not be properly passed, try highlighting the relevant sections and adding to chat. + ℹ️ Review paper is currently scaled back to balance cost. Presently it identifies issues in Title, + Abstract, and Introduction. We are working to support the full review flow again. If you find the input + might not be properly passed, try highlighting the relevant sections and adding to chat.
)} diff --git a/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx b/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx index de9899b8..7b01a7c7 100644 --- a/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx +++ b/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx @@ -57,20 +57,12 @@ export default function Tools({ messageId, functionName, message, error, prepari /> ); } else if (XTRA_MCP_TOOL_NAMES.includes(functionName)) { - return ( - - ); + return ; } // fallback to unknown tool card if the json rpc result is not defined if (jsonRpcResult) { - return ( - - ); + return ; } else { return ; } diff --git a/webapp/_webapp/src/main.tsx b/webapp/_webapp/src/main.tsx index 291773d3..bdec8369 100644 --- a/webapp/_webapp/src/main.tsx +++ b/webapp/_webapp/src/main.tsx @@ -21,20 +21,20 @@ import { DevTools } from "./views/devtools"; import { usePromptLibraryStore } from "./stores/prompt-library-store"; import { TopMenuButton } from "./components/top-menu-button"; import { Logo } from "./components/logo"; -import { getWebInstrumentations, initializeFaro } from '@grafana/faro-web-sdk'; -import { TracingInstrumentation } from '@grafana/faro-web-tracing'; +import { getWebInstrumentations, initializeFaro } from "@grafana/faro-web-sdk"; +import { TracingInstrumentation } from "@grafana/faro-web-tracing"; import { getManifest } from "./libs/manifest"; initializeFaro({ - url: 'https://faro-collector-prod-ap-southeast-1.grafana.net/collect/79c7648395df4df8b58c228fad42af57', + url: "https://faro-collector-prod-ap-southeast-1.grafana.net/collect/79c7648395df4df8b58c228fad42af57", app: { name: getManifest().name, version: getManifest().version, - environment: 'production' + environment: "production", }, sessionTracking: { samplingRate: 1, - persistent: true + persistent: true, }, instrumentations: [ // Mandatory, omits default instrumentations otherwise. diff --git a/webapp/_webapp/vite.config.ts b/webapp/_webapp/vite.config.ts index e560b798..ed3b3931 100644 --- a/webapp/_webapp/vite.config.ts +++ b/webapp/_webapp/vite.config.ts @@ -1,4 +1,4 @@ -import faroUploader from '@grafana/faro-rollup-plugin'; +import faroUploader from "@grafana/faro-rollup-plugin"; import react from "@vitejs/plugin-react-swc"; import fs from "fs"; import { produce } from "immer"; @@ -15,17 +15,22 @@ function generateConfig( return produce( { base: "/_pd/webapp", - plugins: [react(), faroUploader({ - appName: 'PaperDebugger', - endpoint: 'https://faro-api-prod-ap-southeast-1.grafana.net/faro/api/v1', - appId: '921', - stackId: '1466738', - verbose: true, - // instructions on how to obtain your API key are in the documentation - // https://grafana.com/docs/grafana-cloud/monitor-applications/frontend-observability/sourcemap-upload-plugins/#obtain-an-api-key - apiKey: process.env.GRAFANA_API_KEY || "glc_eyJvIjoiMTYxNTMzNCIsIm4iOiJwYXBlcmRlYnVnZ2VyLXNvdXJjZW1hcC1hY2Nlc3MtcG9saWN5LWNocm9tZS1leHRlbnNpb24iLCJrIjoiMzc4MnUzUDY1WjgyaVlpaGhEdUl0d0wxIiwibSI6eyJyIjoicHJvZC1hcC1zb3V0aGVhc3QtMSJ9fQ==", - gzipContents: true, - })], + plugins: [ + react(), + faroUploader({ + appName: "PaperDebugger", + endpoint: "https://faro-api-prod-ap-southeast-1.grafana.net/faro/api/v1", + appId: "921", + stackId: "1466738", + verbose: true, + // instructions on how to obtain your API key are in the documentation + // https://grafana.com/docs/grafana-cloud/monitor-applications/frontend-observability/sourcemap-upload-plugins/#obtain-an-api-key + apiKey: + process.env.GRAFANA_API_KEY || + "glc_eyJvIjoiMTYxNTMzNCIsIm4iOiJwYXBlcmRlYnVnZ2VyLXNvdXJjZW1hcC1hY2Nlc3MtcG9saWN5LWNocm9tZS1leHRlbnNpb24iLCJrIjoiMzc4MnUzUDY1WjgyaVlpaGhEdUl0d0wxIiwibSI6eyJyIjoicHJvZC1hcC1zb3V0aGVhc3QtMSJ9fQ==", + gzipContents: true, + }), + ], esbuild: { charset: "ascii", }, From 77faa530a8732afdf6c81c46bd11c456a415918b Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 14:33:32 +0800 Subject: [PATCH 13/14] remove modelslug --- internal/api/mapper/conversation.go | 4 +- internal/services/toolkit/handler/stream.go | 4 +- pkg/gen/api/chat/v1/chat.pb.go | 29 +---- proto/chat/v1/chat.proto | 107 ++++++++---------- .../src/pkg/gen/apiclient/chat/v1/chat_pb.ts | 17 +-- 5 files changed, 61 insertions(+), 100 deletions(-) diff --git a/internal/api/mapper/conversation.go b/internal/api/mapper/conversation.go index 23bc7f11..88ca05a0 100644 --- a/internal/api/mapper/conversation.go +++ b/internal/api/mapper/conversation.go @@ -46,7 +46,7 @@ func MapModelConversationToProto(conversation *models.Conversation) *chatv1.Conv Id: conversation.ID.Hex(), Title: conversation.Title, LanguageModel: languageModel, // backward compatibility - ModelSlug: modelSlug, - Messages: filteredMessages, + // ModelSlug: modelSlug, + Messages: filteredMessages, } } diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index 641c16f7..96ca6668 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -34,8 +34,8 @@ func (h *StreamHandler) SendInitialization() { ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamInitialization{ StreamInitialization: &chatv1.StreamInitialization{ ConversationId: h.conversationId, - ModelSlug: h.modelSlug, - LanguageModel: chatv1.LanguageModel(models.LanguageModelFromSlug(h.modelSlug)), // compatible with old code + // ModelSlug: h.modelSlug, + LanguageModel: chatv1.LanguageModel(models.LanguageModelFromSlug(h.modelSlug)), // compatible with old code }, }, }) diff --git a/pkg/gen/api/chat/v1/chat.pb.go b/pkg/gen/api/chat/v1/chat.pb.go index 33751f72..662833e0 100644 --- a/pkg/gen/api/chat/v1/chat.pb.go +++ b/pkg/gen/api/chat/v1/chat.pb.go @@ -658,7 +658,7 @@ type Conversation struct { Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` LanguageModel LanguageModel `protobuf:"varint,2,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` // deprecated: use model_slug instead - ModelSlug string `protobuf:"bytes,5,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` // new: model slug string + // string model_slug = 5; // new: model slug string // If list conversations, then messages length is 0. Messages []*Message `protobuf:"bytes,4,rep,name=messages,proto3" json:"messages,omitempty"` unknownFields protoimpl.UnknownFields @@ -716,13 +716,6 @@ func (x *Conversation) GetLanguageModel() LanguageModel { return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } -func (x *Conversation) GetModelSlug() string { - if x != nil { - return x.ModelSlug - } - return "" -} - func (x *Conversation) GetMessages() []*Message { if x != nil { return x.Messages @@ -1358,7 +1351,6 @@ type StreamInitialization struct { state protoimpl.MessageState `protogen:"open.v1"` ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` LanguageModel LanguageModel `protobuf:"varint,5,opt,name=language_model,json=languageModel,proto3,enum=chat.v1.LanguageModel" json:"language_model,omitempty"` // deprecated: use model_slug instead - ModelSlug string `protobuf:"bytes,6,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` // new: model slug string unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1407,13 +1399,6 @@ func (x *StreamInitialization) GetLanguageModel() LanguageModel { return LanguageModel_LANGUAGE_MODEL_UNSPECIFIED } -func (x *StreamInitialization) GetModelSlug() string { - if x != nil { - return x.ModelSlug - } - return "" -} - // Designed as StreamPartBegin and StreamPartEnd to // handle the case where assistant and tool are called at the same time. // @@ -2019,13 +2004,11 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\aMessage\x12\x1d\n" + "\n" + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + - "\apayload\x18\x03 \x01(\v2\x17.chat.v1.MessagePayloadR\apayload\"\xc0\x01\n" + + "\apayload\x18\x03 \x01(\v2\x17.chat.v1.MessagePayloadR\apayload\"\xa1\x01\n" + "\fConversation\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + "\x05title\x18\x03 \x01(\tR\x05title\x12=\n" + - "\x0elanguage_model\x18\x02 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\x1d\n" + - "\n" + - "model_slug\x18\x05 \x01(\tR\tmodelSlug\x12,\n" + + "\x0elanguage_model\x18\x02 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12,\n" + "\bmessages\x18\x04 \x03(\v2\x10.chat.v1.MessageR\bmessages\"M\n" + "\x18ListConversationsRequest\x12\"\n" + "\n" + @@ -2065,12 +2048,10 @@ const file_chat_v1_chat_proto_rawDesc = "" + "\x04slug\x18\x02 \x01(\tR\x04slug\"\x1c\n" + "\x1aListSupportedModelsRequest\"N\n" + "\x1bListSupportedModelsResponse\x12/\n" + - "\x06models\x18\x01 \x03(\v2\x17.chat.v1.SupportedModelR\x06models\"\x9d\x01\n" + + "\x06models\x18\x01 \x03(\v2\x17.chat.v1.SupportedModelR\x06models\"~\n" + "\x14StreamInitialization\x12'\n" + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12=\n" + - "\x0elanguage_model\x18\x05 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\x12\x1d\n" + - "\n" + - "model_slug\x18\x06 \x01(\tR\tmodelSlug\"c\n" + + "\x0elanguage_model\x18\x05 \x01(\x0e2\x16.chat.v1.LanguageModelR\rlanguageModel\"c\n" + "\x0fStreamPartBegin\x12\x1d\n" + "\n" + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + diff --git a/proto/chat/v1/chat.proto b/proto/chat/v1/chat.proto index c662416e..d52b3a1c 100644 --- a/proto/chat/v1/chat.proto +++ b/proto/chat/v1/chat.proto @@ -7,35 +7,50 @@ import "google/api/annotations.proto"; option go_package = "paperdebugger/pkg/gen/api/chat/v1;chatv1"; service ChatService { - rpc ListConversations(ListConversationsRequest) returns (ListConversationsResponse) { - option (google.api.http) = {get: "/_pd/api/v1/chats/conversations"}; + rpc ListConversations(ListConversationsRequest) + returns (ListConversationsResponse) { + option (google.api.http) = { + get : "/_pd/api/v1/chats/conversations" + }; } - rpc GetConversation(GetConversationRequest) returns (GetConversationResponse) { - option (google.api.http) = {get: "/_pd/api/v1/chats/conversations/{conversation_id}"}; + rpc GetConversation(GetConversationRequest) + returns (GetConversationResponse) { + option (google.api.http) = { + get : "/_pd/api/v1/chats/conversations/{conversation_id}" + }; } - rpc CreateConversationMessage(CreateConversationMessageRequest) returns (CreateConversationMessageResponse) { + rpc CreateConversationMessage(CreateConversationMessageRequest) + returns (CreateConversationMessageResponse) { option (google.api.http) = { - post: "/_pd/api/v1/chats/conversations/messages" - body: "*" + post : "/_pd/api/v1/chats/conversations/messages" + body : "*" }; } - rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) returns (stream CreateConversationMessageStreamResponse) { + rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) + returns (stream CreateConversationMessageStreamResponse) { option (google.api.http) = { - post: "/_pd/api/v1/chats/conversations/messages/stream" - body: "*" + post : "/_pd/api/v1/chats/conversations/messages/stream" + body : "*" }; } - rpc UpdateConversation(UpdateConversationRequest) returns (UpdateConversationResponse) { + rpc UpdateConversation(UpdateConversationRequest) + returns (UpdateConversationResponse) { option (google.api.http) = { - patch: "/_pd/api/v1/chats/conversations/{conversation_id}" - body: "*" + patch : "/_pd/api/v1/chats/conversations/{conversation_id}" + body : "*" }; } - rpc DeleteConversation(DeleteConversationRequest) returns (DeleteConversationResponse) { - option (google.api.http) = {delete: "/_pd/api/v1/chats/conversations/{conversation_id}"}; + rpc DeleteConversation(DeleteConversationRequest) + returns (DeleteConversationResponse) { + option (google.api.http) = { + delete : "/_pd/api/v1/chats/conversations/{conversation_id}" + }; } - rpc ListSupportedModels(ListSupportedModelsRequest) returns (ListSupportedModelsResponse) { - option (google.api.http) = {get: "/_pd/api/v1/chats/models"}; + rpc ListSupportedModels(ListSupportedModelsRequest) + returns (ListSupportedModelsResponse) { + option (google.api.http) = { + get : "/_pd/api/v1/chats/models" + }; } } @@ -59,9 +74,9 @@ enum LanguageModel { message MessageTypeToolCall { string name = 1; - string args = 2; // Json string + string args = 2; // Json string string result = 3; // Json string - string error = 4; // Json string + string error = 4; // Json string } message MessageTypeToolCallPrepareArguments { @@ -69,22 +84,16 @@ message MessageTypeToolCallPrepareArguments { string args = 2; // Json string } -message MessageTypeSystem { - string content = 1; -} +message MessageTypeSystem { string content = 1; } -message MessageTypeAssistant { - string content = 1; -} +message MessageTypeAssistant { string content = 1; } message MessageTypeUser { string content = 1; optional string selected_text = 2; } -message MessageTypeUnknown { - string description = 1; -} +message MessageTypeUnknown { string description = 1; } message MessagePayload { oneof message_type { @@ -106,27 +115,21 @@ message Conversation { string id = 1; string title = 3; LanguageModel language_model = 2; // deprecated: use model_slug instead - string model_slug = 5; // new: model slug string + // string model_slug = 5; // new: model slug string // If list conversations, then messages length is 0. repeated Message messages = 4; } -message ListConversationsRequest { - optional string project_id = 1; -} +message ListConversationsRequest { optional string project_id = 1; } message ListConversationsResponse { // In this response, the length of conversations[i].messages should be 0. repeated Conversation conversations = 1; } -message GetConversationRequest { - string conversation_id = 1; -} +message GetConversationRequest { string conversation_id = 1; } -message GetConversationResponse { - Conversation conversation = 1; -} +message GetConversationResponse { Conversation conversation = 1; } message CreateConversationMessageRequest { string project_id = 1; @@ -134,28 +137,22 @@ message CreateConversationMessageRequest { // a new conversation will be created and the id will be returned. optional string conversation_id = 2; LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; } -message CreateConversationMessageResponse { - Conversation conversation = 1; -} +message CreateConversationMessageResponse { Conversation conversation = 1; } message UpdateConversationRequest { string conversation_id = 1; string title = 2; } -message UpdateConversationResponse { - Conversation conversation = 1; -} +message UpdateConversationResponse { Conversation conversation = 1; } -message DeleteConversationRequest { - string conversation_id = 1; -} +message DeleteConversationRequest { string conversation_id = 1; } message DeleteConversationResponse { // explicitly empty @@ -170,9 +167,7 @@ message ListSupportedModelsRequest { // explicitly empty } -message ListSupportedModelsResponse { - repeated SupportedModel models = 1; -} +message ListSupportedModelsResponse { repeated SupportedModel models = 1; } // ============================== Streaming Messages @@ -180,7 +175,7 @@ message ListSupportedModelsResponse { message StreamInitialization { string conversation_id = 1; LanguageModel language_model = 5; // deprecated: use model_slug instead - string model_slug = 6; // new: model slug string + // string model_slug = 6; // new: model slug string } // Designed as StreamPartBegin and StreamPartEnd to @@ -197,7 +192,7 @@ message StreamPartBegin { // and the StreamPartEnd can be directly called when the result is ready. message MessageChunk { string message_id = 1; // The id of the message that this chunk belongs to - string delta = 2; // The small piece of text + string delta = 2; // The small piece of text } message IncompleteIndicator { @@ -219,9 +214,7 @@ message StreamFinalization { // it should be called after the entire API call is finished. } -message StreamError { - string error_message = 1; -} +message StreamError { string error_message = 1; } // Currently, we inject two types of messages: // 1. System message @@ -242,7 +235,7 @@ message CreateConversationMessageStreamRequest { string project_id = 1; optional string conversation_id = 2; LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts index a9c99b12..279b164f 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v1/chat_pb.ts @@ -11,7 +11,7 @@ import type { Message as Message$1 } from "@bufbuild/protobuf"; * Describes the file chat/v1/chat.proto. */ export const file_chat_v1_chat: GenFile = /*@__PURE__*/ - fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCKRAQoMQ29udmVyc2F0aW9uEgoKAmlkGAEgASgJEg0KBXRpdGxlGAMgASgJEi4KDmxhbmd1YWdlX21vZGVsGAIgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhIKCm1vZGVsX3NsdWcYBSABKAkSIgoIbWVzc2FnZXMYBCADKAsyEC5jaGF0LnYxLk1lc3NhZ2UiQgoYTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0EhcKCnByb2plY3RfaWQYASABKAlIAIgBAUINCgtfcHJvamVjdF9pZCJJChlMaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlEiwKDWNvbnZlcnNhdGlvbnMYASADKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiIxChZHZXRDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSJGChdHZXRDb252ZXJzYXRpb25SZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiLLAgogQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QSEgoKcHJvamVjdF9pZBgBIAEoCRIcCg9jb252ZXJzYXRpb25faWQYAiABKAlIAIgBARIuCg5sYW5ndWFnZV9tb2RlbBgDIAEoDjIWLmNoYXQudjEuTGFuZ3VhZ2VNb2RlbBISCgptb2RlbF9zbHVnGAcgASgJEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAYgBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjEuQ29udmVyc2F0aW9uVHlwZUgCiAEBQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUiUAohQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIkMKGVVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEg0KBXRpdGxlGAIgASgJIkkKGlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjQKGURlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIhwKGkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIiwKDlN1cHBvcnRlZE1vZGVsEgwKBG5hbWUYASABKAkSDAoEc2x1ZxgCIAEoCSIcChpMaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdCJGChtMaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2USJwoGbW9kZWxzGAEgAygLMhcuY2hhdC52MS5TdXBwb3J0ZWRNb2RlbCJzChRTdHJlYW1Jbml0aWFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkSLgoObGFuZ3VhZ2VfbW9kZWwYBSABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwSEgoKbW9kZWxfc2x1ZxgGIAEoCSJPCg9TdHJlYW1QYXJ0QmVnaW4SEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCIxCgxNZXNzYWdlQ2h1bmsSEgoKbWVzc2FnZV9pZBgBIAEoCRINCgVkZWx0YRgCIAEoCSI6ChNJbmNvbXBsZXRlSW5kaWNhdG9yEg4KBnJlYXNvbhgBIAEoCRITCgtyZXNwb25zZV9pZBgCIAEoCSJNCg1TdHJlYW1QYXJ0RW5kEhIKCm1lc3NhZ2VfaWQYASABKAkSKAoHcGF5bG9hZBgDIAEoCzIXLmNoYXQudjEuTWVzc2FnZVBheWxvYWQiLQoSU3RyZWFtRmluYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSIkCgtTdHJlYW1FcnJvchIVCg1lcnJvcl9tZXNzYWdlGAEgASgJItECCiZDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgAiAEBEi4KDmxhbmd1YWdlX21vZGVsGAMgASgOMhYuY2hhdC52MS5MYW5ndWFnZU1vZGVsEhIKCm1vZGVsX3NsdWcYByABKAkSFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgBiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52MS5Db252ZXJzYXRpb25UeXBlSAKIAQFCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSK/AwonQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlEj4KFXN0cmVhbV9pbml0aWFsaXphdGlvbhgBIAEoCzIdLmNoYXQudjEuU3RyZWFtSW5pdGlhbGl6YXRpb25IABI1ChFzdHJlYW1fcGFydF9iZWdpbhgCIAEoCzIYLmNoYXQudjEuU3RyZWFtUGFydEJlZ2luSAASLgoNbWVzc2FnZV9jaHVuaxgDIAEoCzIVLmNoYXQudjEuTWVzc2FnZUNodW5rSAASPAoUaW5jb21wbGV0ZV9pbmRpY2F0b3IYBCABKAsyHC5jaGF0LnYxLkluY29tcGxldGVJbmRpY2F0b3JIABIxCg9zdHJlYW1fcGFydF9lbmQYBSABKAsyFi5jaGF0LnYxLlN0cmVhbVBhcnRFbmRIABI6ChNzdHJlYW1fZmluYWxpemF0aW9uGAYgASgLMhsuY2hhdC52MS5TdHJlYW1GaW5hbGl6YXRpb25IABIsCgxzdHJlYW1fZXJyb3IYByABKAsyFC5jaGF0LnYxLlN0cmVhbUVycm9ySABCEgoQcmVzcG9uc2VfcGF5bG9hZCr/AwoNTGFuZ3VhZ2VNb2RlbBIeChpMQU5HVUFHRV9NT0RFTF9VTlNQRUNJRklFRBAAEh8KG0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0TxABEiQKIExBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0MV9NSU5JEAISHwobTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDQxEAQSHgoaTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDUQBxIjCh9MQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9NSU5JEAgSIwofTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfTkFOTxAJEioKJkxBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X0NIQVRfTEFURVNUEAoSHAoYTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xEAsSIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08xX01JTkkQDBIcChhMQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzMQDRIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzNfTUlOSRAOEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PNF9NSU5JEA8SKwonTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0NPREVYX01JTklfTEFURVNUEBAqUgoQQ29udmVyc2F0aW9uVHlwZRIhCh1DT05WRVJTQVRJT05fVFlQRV9VTlNQRUNJRklFRBAAEhsKF0NPTlZFUlNBVElPTl9UWVBFX0RFQlVHEAEy0ggKC0NoYXRTZXJ2aWNlEoMBChFMaXN0Q29udmVyc2F0aW9ucxIhLmNoYXQudjEuTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0GiIuY2hhdC52MS5MaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMSjwEKD0dldENvbnZlcnNhdGlvbhIfLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVxdWVzdBogLmNoYXQudjEuR2V0Q29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMxIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKnAQoZQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZRIpLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QaKi5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZSIzgtPkkwItOgEqIigvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzEsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjEuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52MS5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52MS5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjEuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YxL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MUIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MTtjaGF0djGiAgNDWFiqAgdDaGF0LlYxygIHQ2hhdFxWMeICE0NoYXRcVjFcR1BCTWV0YWRhdGHqAghDaGF0OjpWMWIGcHJvdG8z", [file_google_api_annotations]); + fileDesc("ChJjaGF0L3YxL2NoYXQucHJvdG8SB2NoYXQudjEiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIicKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkiUAoPTWVzc2FnZVR5cGVVc2VyEg8KB2NvbnRlbnQYASABKAkSGgoNc2VsZWN0ZWRfdGV4dBgCIAEoCUgAiAEBQhAKDl9zZWxlY3RlZF90ZXh0IikKEk1lc3NhZ2VUeXBlVW5rbm93bhITCgtkZXNjcmlwdGlvbhgBIAEoCSLkAgoOTWVzc2FnZVBheWxvYWQSLAoGc3lzdGVtGAEgASgLMhouY2hhdC52MS5NZXNzYWdlVHlwZVN5c3RlbUgAEigKBHVzZXIYAiABKAsyGC5jaGF0LnYxLk1lc3NhZ2VUeXBlVXNlckgAEjIKCWFzc2lzdGFudBgDIAEoCzIdLmNoYXQudjEuTWVzc2FnZVR5cGVBc3Npc3RhbnRIABJTCht0b29sX2NhbGxfcHJlcGFyZV9hcmd1bWVudHMYBCABKAsyLC5jaGF0LnYxLk1lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzSAASMQoJdG9vbF9jYWxsGAUgASgLMhwuY2hhdC52MS5NZXNzYWdlVHlwZVRvb2xDYWxsSAASLgoHdW5rbm93bhgGIAEoCzIbLmNoYXQudjEuTWVzc2FnZVR5cGVVbmtub3duSABCDgoMbWVzc2FnZV90eXBlIkcKB01lc3NhZ2USEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52MS5NZXNzYWdlUGF5bG9hZCJ9CgxDb252ZXJzYXRpb24SCgoCaWQYASABKAkSDQoFdGl0bGUYAyABKAkSLgoObGFuZ3VhZ2VfbW9kZWwYAiABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwSIgoIbWVzc2FnZXMYBCADKAsyEC5jaGF0LnYxLk1lc3NhZ2UiQgoYTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0EhcKCnByb2plY3RfaWQYASABKAlIAIgBAUINCgtfcHJvamVjdF9pZCJJChlMaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlEiwKDWNvbnZlcnNhdGlvbnMYASADKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiIxChZHZXRDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSJGChdHZXRDb252ZXJzYXRpb25SZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYxLkNvbnZlcnNhdGlvbiLLAgogQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QSEgoKcHJvamVjdF9pZBgBIAEoCRIcCg9jb252ZXJzYXRpb25faWQYAiABKAlIAIgBARIuCg5sYW5ndWFnZV9tb2RlbBgDIAEoDjIWLmNoYXQudjEuTGFuZ3VhZ2VNb2RlbBISCgptb2RlbF9zbHVnGAcgASgJEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAYgBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjEuQ29udmVyc2F0aW9uVHlwZUgCiAEBQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUiUAohQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIkMKGVVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEg0KBXRpdGxlGAIgASgJIkkKGlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjEuQ29udmVyc2F0aW9uIjQKGURlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIhwKGkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIiwKDlN1cHBvcnRlZE1vZGVsEgwKBG5hbWUYASABKAkSDAoEc2x1ZxgCIAEoCSIcChpMaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdCJGChtMaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2USJwoGbW9kZWxzGAEgAygLMhcuY2hhdC52MS5TdXBwb3J0ZWRNb2RlbCJfChRTdHJlYW1Jbml0aWFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkSLgoObGFuZ3VhZ2VfbW9kZWwYBSABKA4yFi5jaGF0LnYxLkxhbmd1YWdlTW9kZWwiTwoPU3RyZWFtUGFydEJlZ2luEhIKCm1lc3NhZ2VfaWQYASABKAkSKAoHcGF5bG9hZBgDIAEoCzIXLmNoYXQudjEuTWVzc2FnZVBheWxvYWQiMQoMTWVzc2FnZUNodW5rEhIKCm1lc3NhZ2VfaWQYASABKAkSDQoFZGVsdGEYAiABKAkiOgoTSW5jb21wbGV0ZUluZGljYXRvchIOCgZyZWFzb24YASABKAkSEwoLcmVzcG9uc2VfaWQYAiABKAkiTQoNU3RyZWFtUGFydEVuZBISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYxLk1lc3NhZ2VQYXlsb2FkIi0KElN0cmVhbUZpbmFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkiJAoLU3RyZWFtRXJyb3ISFQoNZXJyb3JfbWVzc2FnZRgBIAEoCSLRAgomQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlcXVlc3QSEgoKcHJvamVjdF9pZBgBIAEoCRIcCg9jb252ZXJzYXRpb25faWQYAiABKAlIAIgBARIuCg5sYW5ndWFnZV9tb2RlbBgDIAEoDjIWLmNoYXQudjEuTGFuZ3VhZ2VNb2RlbBISCgptb2RlbF9zbHVnGAcgASgJEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAYgBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjEuQ29udmVyc2F0aW9uVHlwZUgCiAEBQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUivwMKJ0NyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXNwb25zZRI+ChVzdHJlYW1faW5pdGlhbGl6YXRpb24YASABKAsyHS5jaGF0LnYxLlN0cmVhbUluaXRpYWxpemF0aW9uSAASNQoRc3RyZWFtX3BhcnRfYmVnaW4YAiABKAsyGC5jaGF0LnYxLlN0cmVhbVBhcnRCZWdpbkgAEi4KDW1lc3NhZ2VfY2h1bmsYAyABKAsyFS5jaGF0LnYxLk1lc3NhZ2VDaHVua0gAEjwKFGluY29tcGxldGVfaW5kaWNhdG9yGAQgASgLMhwuY2hhdC52MS5JbmNvbXBsZXRlSW5kaWNhdG9ySAASMQoPc3RyZWFtX3BhcnRfZW5kGAUgASgLMhYuY2hhdC52MS5TdHJlYW1QYXJ0RW5kSAASOgoTc3RyZWFtX2ZpbmFsaXphdGlvbhgGIAEoCzIbLmNoYXQudjEuU3RyZWFtRmluYWxpemF0aW9uSAASLAoMc3RyZWFtX2Vycm9yGAcgASgLMhQuY2hhdC52MS5TdHJlYW1FcnJvckgAQhIKEHJlc3BvbnNlX3BheWxvYWQq/wMKDUxhbmd1YWdlTW9kZWwSHgoaTEFOR1VBR0VfTU9ERUxfVU5TUEVDSUZJRUQQABIfChtMQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNE8QARIkCiBMQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNDFfTUlOSRACEh8KG0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ0MRAEEh4KGkxBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1EAcSIwofTEFOR1VBR0VfTU9ERUxfT1BFTkFJX0dQVDVfTUlOSRAIEiMKH0xBTkdVQUdFX01PREVMX09QRU5BSV9HUFQ1X05BTk8QCRIqCiZMQU5HVUFHRV9NT0RFTF9PUEVOQUlfR1BUNV9DSEFUX0xBVEVTVBAKEhwKGExBTkdVQUdFX01PREVMX09QRU5BSV9PMRALEiEKHUxBTkdVQUdFX01PREVMX09QRU5BSV9PMV9NSU5JEAwSHAoYTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08zEA0SIQodTEFOR1VBR0VfTU9ERUxfT1BFTkFJX08zX01JTkkQDhIhCh1MQU5HVUFHRV9NT0RFTF9PUEVOQUlfTzRfTUlOSRAPEisKJ0xBTkdVQUdFX01PREVMX09QRU5BSV9DT0RFWF9NSU5JX0xBVEVTVBAQKlIKEENvbnZlcnNhdGlvblR5cGUSIQodQ09OVkVSU0FUSU9OX1RZUEVfVU5TUEVDSUZJRUQQABIbChdDT05WRVJTQVRJT05fVFlQRV9ERUJVRxABMtIICgtDaGF0U2VydmljZRKDAQoRTGlzdENvbnZlcnNhdGlvbnMSIS5jaGF0LnYxLkxpc3RDb252ZXJzYXRpb25zUmVxdWVzdBoiLmNoYXQudjEuTGlzdENvbnZlcnNhdGlvbnNSZXNwb25zZSIngtPkkwIhEh8vX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zEo8BCg9HZXRDb252ZXJzYXRpb24SHy5jaGF0LnYxLkdldENvbnZlcnNhdGlvblJlcXVlc3QaIC5jaGF0LnYxLkdldENvbnZlcnNhdGlvblJlc3BvbnNlIjmC0+STAjMSMS9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SpwEKGUNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2USKS5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXF1ZXN0GiouY2hhdC52MS5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlUmVzcG9uc2UiM4LT5JMCLToBKiIoL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy9tZXNzYWdlcxLCAQofQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbRIvLmNoYXQudjEuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlcXVlc3QaMC5jaGF0LnYxLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXNwb25zZSI6gtPkkwI0OgEqIi8vX3BkL2FwaS92MS9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzL3N0cmVhbTABEpsBChJVcGRhdGVDb252ZXJzYXRpb24SIi5jaGF0LnYxLlVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QaIy5jaGF0LnYxLlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIjyC0+STAjY6ASoyMS9fcGQvYXBpL3YxL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SmAEKEkRlbGV0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjEuRGVsZXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjEuRGVsZXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMyoxL19wZC9hcGkvdjEvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKCAQoTTGlzdFN1cHBvcnRlZE1vZGVscxIjLmNoYXQudjEuTGlzdFN1cHBvcnRlZE1vZGVsc1JlcXVlc3QaJC5jaGF0LnYxLkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXNwb25zZSIggtPkkwIaEhgvX3BkL2FwaS92MS9jaGF0cy9tb2RlbHNCfwoLY29tLmNoYXQudjFCCUNoYXRQcm90b1ABWihwYXBlcmRlYnVnZ2VyL3BrZy9nZW4vYXBpL2NoYXQvdjE7Y2hhdHYxogIDQ1hYqgIHQ2hhdC5WMcoCB0NoYXRcVjHiAhNDaGF0XFYxXEdQQk1ldGFkYXRh6gIIQ2hhdDo6VjFiBnByb3RvMw", [file_google_api_annotations]); /** * @generated from message chat.v1.MessageTypeToolCall @@ -245,13 +245,7 @@ export type Conversation = Message$1<"chat.v1.Conversation"> & { languageModel: LanguageModel; /** - * new: model slug string - * - * @generated from field: string model_slug = 5; - */ - modelSlug: string; - - /** + * string model_slug = 5; // new: model slug string * If list conversations, then messages length is 0. * * @generated from field: repeated chat.v1.Message messages = 4; @@ -549,13 +543,6 @@ export type StreamInitialization = Message$1<"chat.v1.StreamInitialization"> & { * @generated from field: chat.v1.LanguageModel language_model = 5; */ languageModel: LanguageModel; - - /** - * new: model slug string - * - * @generated from field: string model_slug = 6; - */ - modelSlug: string; }; /** From 209b6f4f0bf5a9614518276472568450eea5a1e3 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Wed, 17 Dec 2025 14:36:18 +0800 Subject: [PATCH 14/14] format --- proto/chat/v1/chat.proto | 103 +++++++++++++++++++++------------------ 1 file changed, 55 insertions(+), 48 deletions(-) diff --git a/proto/chat/v1/chat.proto b/proto/chat/v1/chat.proto index d52b3a1c..b8d6f5b9 100644 --- a/proto/chat/v1/chat.proto +++ b/proto/chat/v1/chat.proto @@ -7,50 +7,35 @@ import "google/api/annotations.proto"; option go_package = "paperdebugger/pkg/gen/api/chat/v1;chatv1"; service ChatService { - rpc ListConversations(ListConversationsRequest) - returns (ListConversationsResponse) { - option (google.api.http) = { - get : "/_pd/api/v1/chats/conversations" - }; + rpc ListConversations(ListConversationsRequest) returns (ListConversationsResponse) { + option (google.api.http) = {get: "/_pd/api/v1/chats/conversations"}; } - rpc GetConversation(GetConversationRequest) - returns (GetConversationResponse) { - option (google.api.http) = { - get : "/_pd/api/v1/chats/conversations/{conversation_id}" - }; + rpc GetConversation(GetConversationRequest) returns (GetConversationResponse) { + option (google.api.http) = {get: "/_pd/api/v1/chats/conversations/{conversation_id}"}; } - rpc CreateConversationMessage(CreateConversationMessageRequest) - returns (CreateConversationMessageResponse) { + rpc CreateConversationMessage(CreateConversationMessageRequest) returns (CreateConversationMessageResponse) { option (google.api.http) = { - post : "/_pd/api/v1/chats/conversations/messages" - body : "*" + post: "/_pd/api/v1/chats/conversations/messages" + body: "*" }; } - rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) - returns (stream CreateConversationMessageStreamResponse) { + rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) returns (stream CreateConversationMessageStreamResponse) { option (google.api.http) = { - post : "/_pd/api/v1/chats/conversations/messages/stream" - body : "*" + post: "/_pd/api/v1/chats/conversations/messages/stream" + body: "*" }; } - rpc UpdateConversation(UpdateConversationRequest) - returns (UpdateConversationResponse) { + rpc UpdateConversation(UpdateConversationRequest) returns (UpdateConversationResponse) { option (google.api.http) = { - patch : "/_pd/api/v1/chats/conversations/{conversation_id}" - body : "*" + patch: "/_pd/api/v1/chats/conversations/{conversation_id}" + body: "*" }; } - rpc DeleteConversation(DeleteConversationRequest) - returns (DeleteConversationResponse) { - option (google.api.http) = { - delete : "/_pd/api/v1/chats/conversations/{conversation_id}" - }; + rpc DeleteConversation(DeleteConversationRequest) returns (DeleteConversationResponse) { + option (google.api.http) = {delete: "/_pd/api/v1/chats/conversations/{conversation_id}"}; } - rpc ListSupportedModels(ListSupportedModelsRequest) - returns (ListSupportedModelsResponse) { - option (google.api.http) = { - get : "/_pd/api/v1/chats/models" - }; + rpc ListSupportedModels(ListSupportedModelsRequest) returns (ListSupportedModelsResponse) { + option (google.api.http) = {get: "/_pd/api/v1/chats/models"}; } } @@ -74,9 +59,9 @@ enum LanguageModel { message MessageTypeToolCall { string name = 1; - string args = 2; // Json string + string args = 2; // Json string string result = 3; // Json string - string error = 4; // Json string + string error = 4; // Json string } message MessageTypeToolCallPrepareArguments { @@ -84,16 +69,22 @@ message MessageTypeToolCallPrepareArguments { string args = 2; // Json string } -message MessageTypeSystem { string content = 1; } +message MessageTypeSystem { + string content = 1; +} -message MessageTypeAssistant { string content = 1; } +message MessageTypeAssistant { + string content = 1; +} message MessageTypeUser { string content = 1; optional string selected_text = 2; } -message MessageTypeUnknown { string description = 1; } +message MessageTypeUnknown { + string description = 1; +} message MessagePayload { oneof message_type { @@ -120,16 +111,22 @@ message Conversation { repeated Message messages = 4; } -message ListConversationsRequest { optional string project_id = 1; } +message ListConversationsRequest { + optional string project_id = 1; +} message ListConversationsResponse { // In this response, the length of conversations[i].messages should be 0. repeated Conversation conversations = 1; } -message GetConversationRequest { string conversation_id = 1; } +message GetConversationRequest { + string conversation_id = 1; +} -message GetConversationResponse { Conversation conversation = 1; } +message GetConversationResponse { + Conversation conversation = 1; +} message CreateConversationMessageRequest { string project_id = 1; @@ -137,22 +134,28 @@ message CreateConversationMessageRequest { // a new conversation will be created and the id will be returned. optional string conversation_id = 2; LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6; } -message CreateConversationMessageResponse { Conversation conversation = 1; } +message CreateConversationMessageResponse { + Conversation conversation = 1; +} message UpdateConversationRequest { string conversation_id = 1; string title = 2; } -message UpdateConversationResponse { Conversation conversation = 1; } +message UpdateConversationResponse { + Conversation conversation = 1; +} -message DeleteConversationRequest { string conversation_id = 1; } +message DeleteConversationRequest { + string conversation_id = 1; +} message DeleteConversationResponse { // explicitly empty @@ -167,7 +170,9 @@ message ListSupportedModelsRequest { // explicitly empty } -message ListSupportedModelsResponse { repeated SupportedModel models = 1; } +message ListSupportedModelsResponse { + repeated SupportedModel models = 1; +} // ============================== Streaming Messages @@ -192,7 +197,7 @@ message StreamPartBegin { // and the StreamPartEnd can be directly called when the result is ready. message MessageChunk { string message_id = 1; // The id of the message that this chunk belongs to - string delta = 2; // The small piece of text + string delta = 2; // The small piece of text } message IncompleteIndicator { @@ -214,7 +219,9 @@ message StreamFinalization { // it should be called after the entire API call is finished. } -message StreamError { string error_message = 1; } +message StreamError { + string error_message = 1; +} // Currently, we inject two types of messages: // 1. System message @@ -235,7 +242,7 @@ message CreateConversationMessageStreamRequest { string project_id = 1; optional string conversation_id = 2; LanguageModel language_model = 3; // deprecated: use model_slug instead - string model_slug = 7; // new: model slug string + string model_slug = 7; // new: model slug string string user_message = 4; optional string user_selected_text = 5; optional ConversationType conversation_type = 6;