From 81489a100147e2eba148283bdf237b393a9ed9c2 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 15:55:32 +0800 Subject: [PATCH 01/28] proto: v2 --- pkg/gen/api/chat/v2/chat.pb.go | 2158 +++++++++++++++++ pkg/gen/api/chat/v2/chat.pb.gw.go | 580 +++++ pkg/gen/api/chat/v2/chat_grpc.pb.go | 353 +++ proto/chat/v2/chat.proto | 241 ++ .../src/pkg/gen/apiclient/chat/v2/chat_pb.ts | 880 +++++++ 5 files changed, 4212 insertions(+) create mode 100644 pkg/gen/api/chat/v2/chat.pb.go create mode 100644 pkg/gen/api/chat/v2/chat.pb.gw.go create mode 100644 pkg/gen/api/chat/v2/chat_grpc.pb.go create mode 100644 proto/chat/v2/chat.proto create mode 100644 webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts diff --git a/pkg/gen/api/chat/v2/chat.pb.go b/pkg/gen/api/chat/v2/chat.pb.go new file mode 100644 index 00000000..f92367eb --- /dev/null +++ b/pkg/gen/api/chat/v2/chat.pb.go @@ -0,0 +1,2158 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.11 +// protoc (unknown) +// source: chat/v2/chat.proto + +package chatv2 + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ConversationType int32 + +const ( + ConversationType_CONVERSATION_TYPE_UNSPECIFIED ConversationType = 0 + ConversationType_CONVERSATION_TYPE_DEBUG ConversationType = 1 // does not contain any customized messages, the +) + +// Enum value maps for ConversationType. +var ( + ConversationType_name = map[int32]string{ + 0: "CONVERSATION_TYPE_UNSPECIFIED", + 1: "CONVERSATION_TYPE_DEBUG", + } + ConversationType_value = map[string]int32{ + "CONVERSATION_TYPE_UNSPECIFIED": 0, + "CONVERSATION_TYPE_DEBUG": 1, + } +) + +func (x ConversationType) Enum() *ConversationType { + p := new(ConversationType) + *p = x + return p +} + +func (x ConversationType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConversationType) Descriptor() protoreflect.EnumDescriptor { + return file_chat_v2_chat_proto_enumTypes[0].Descriptor() +} + +func (ConversationType) Type() protoreflect.EnumType { + return &file_chat_v2_chat_proto_enumTypes[0] +} + +func (x ConversationType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConversationType.Descriptor instead. +func (ConversationType) EnumDescriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{0} +} + +type MessageTypeToolCall struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Args string `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` // Json string + Result string `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` // Json string + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` // Json string + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageTypeToolCall) Reset() { + *x = MessageTypeToolCall{} + mi := &file_chat_v2_chat_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageTypeToolCall) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTypeToolCall) ProtoMessage() {} + +func (x *MessageTypeToolCall) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTypeToolCall.ProtoReflect.Descriptor instead. +func (*MessageTypeToolCall) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{0} +} + +func (x *MessageTypeToolCall) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MessageTypeToolCall) GetArgs() string { + if x != nil { + return x.Args + } + return "" +} + +func (x *MessageTypeToolCall) GetResult() string { + if x != nil { + return x.Result + } + return "" +} + +func (x *MessageTypeToolCall) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type MessageTypeToolCallPrepareArguments struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Args string `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` // Json string + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageTypeToolCallPrepareArguments) Reset() { + *x = MessageTypeToolCallPrepareArguments{} + mi := &file_chat_v2_chat_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageTypeToolCallPrepareArguments) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTypeToolCallPrepareArguments) ProtoMessage() {} + +func (x *MessageTypeToolCallPrepareArguments) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTypeToolCallPrepareArguments.ProtoReflect.Descriptor instead. +func (*MessageTypeToolCallPrepareArguments) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{1} +} + +func (x *MessageTypeToolCallPrepareArguments) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *MessageTypeToolCallPrepareArguments) GetArgs() string { + if x != nil { + return x.Args + } + return "" +} + +type MessageTypeSystem struct { + state protoimpl.MessageState `protogen:"open.v1"` + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageTypeSystem) Reset() { + *x = MessageTypeSystem{} + mi := &file_chat_v2_chat_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageTypeSystem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTypeSystem) ProtoMessage() {} + +func (x *MessageTypeSystem) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTypeSystem.ProtoReflect.Descriptor instead. +func (*MessageTypeSystem) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{2} +} + +func (x *MessageTypeSystem) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +type MessageTypeAssistant struct { + state protoimpl.MessageState `protogen:"open.v1"` + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + ModelSlug string `protobuf:"bytes,2,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageTypeAssistant) Reset() { + *x = MessageTypeAssistant{} + mi := &file_chat_v2_chat_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageTypeAssistant) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTypeAssistant) ProtoMessage() {} + +func (x *MessageTypeAssistant) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTypeAssistant.ProtoReflect.Descriptor instead. +func (*MessageTypeAssistant) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{3} +} + +func (x *MessageTypeAssistant) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *MessageTypeAssistant) GetModelSlug() string { + if x != nil { + return x.ModelSlug + } + return "" +} + +type MessageTypeUser struct { + state protoimpl.MessageState `protogen:"open.v1"` + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + SelectedText *string `protobuf:"bytes,2,opt,name=selected_text,json=selectedText,proto3,oneof" json:"selected_text,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageTypeUser) Reset() { + *x = MessageTypeUser{} + mi := &file_chat_v2_chat_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageTypeUser) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTypeUser) ProtoMessage() {} + +func (x *MessageTypeUser) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTypeUser.ProtoReflect.Descriptor instead. +func (*MessageTypeUser) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{4} +} + +func (x *MessageTypeUser) GetContent() string { + if x != nil { + return x.Content + } + return "" +} + +func (x *MessageTypeUser) GetSelectedText() string { + if x != nil && x.SelectedText != nil { + return *x.SelectedText + } + return "" +} + +type MessageTypeUnknown struct { + state protoimpl.MessageState `protogen:"open.v1"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageTypeUnknown) Reset() { + *x = MessageTypeUnknown{} + mi := &file_chat_v2_chat_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageTypeUnknown) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTypeUnknown) ProtoMessage() {} + +func (x *MessageTypeUnknown) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTypeUnknown.ProtoReflect.Descriptor instead. +func (*MessageTypeUnknown) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{5} +} + +func (x *MessageTypeUnknown) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type MessagePayload struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to MessageType: + // + // *MessagePayload_System + // *MessagePayload_User + // *MessagePayload_Assistant + // *MessagePayload_ToolCallPrepareArguments + // *MessagePayload_ToolCall + // *MessagePayload_Unknown + MessageType isMessagePayload_MessageType `protobuf_oneof:"message_type"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessagePayload) Reset() { + *x = MessagePayload{} + mi := &file_chat_v2_chat_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessagePayload) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessagePayload) ProtoMessage() {} + +func (x *MessagePayload) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessagePayload.ProtoReflect.Descriptor instead. +func (*MessagePayload) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{6} +} + +func (x *MessagePayload) GetMessageType() isMessagePayload_MessageType { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *MessagePayload) GetSystem() *MessageTypeSystem { + if x != nil { + if x, ok := x.MessageType.(*MessagePayload_System); ok { + return x.System + } + } + return nil +} + +func (x *MessagePayload) GetUser() *MessageTypeUser { + if x != nil { + if x, ok := x.MessageType.(*MessagePayload_User); ok { + return x.User + } + } + return nil +} + +func (x *MessagePayload) GetAssistant() *MessageTypeAssistant { + if x != nil { + if x, ok := x.MessageType.(*MessagePayload_Assistant); ok { + return x.Assistant + } + } + return nil +} + +func (x *MessagePayload) GetToolCallPrepareArguments() *MessageTypeToolCallPrepareArguments { + if x != nil { + if x, ok := x.MessageType.(*MessagePayload_ToolCallPrepareArguments); ok { + return x.ToolCallPrepareArguments + } + } + return nil +} + +func (x *MessagePayload) GetToolCall() *MessageTypeToolCall { + if x != nil { + if x, ok := x.MessageType.(*MessagePayload_ToolCall); ok { + return x.ToolCall + } + } + return nil +} + +func (x *MessagePayload) GetUnknown() *MessageTypeUnknown { + if x != nil { + if x, ok := x.MessageType.(*MessagePayload_Unknown); ok { + return x.Unknown + } + } + return nil +} + +type isMessagePayload_MessageType interface { + isMessagePayload_MessageType() +} + +type MessagePayload_System struct { + System *MessageTypeSystem `protobuf:"bytes,1,opt,name=system,proto3,oneof"` +} + +type MessagePayload_User struct { + User *MessageTypeUser `protobuf:"bytes,2,opt,name=user,proto3,oneof"` +} + +type MessagePayload_Assistant struct { + Assistant *MessageTypeAssistant `protobuf:"bytes,3,opt,name=assistant,proto3,oneof"` +} + +type MessagePayload_ToolCallPrepareArguments struct { + ToolCallPrepareArguments *MessageTypeToolCallPrepareArguments `protobuf:"bytes,4,opt,name=tool_call_prepare_arguments,json=toolCallPrepareArguments,proto3,oneof"` +} + +type MessagePayload_ToolCall struct { + ToolCall *MessageTypeToolCall `protobuf:"bytes,5,opt,name=tool_call,json=toolCall,proto3,oneof"` +} + +type MessagePayload_Unknown struct { + Unknown *MessageTypeUnknown `protobuf:"bytes,6,opt,name=unknown,proto3,oneof"` +} + +func (*MessagePayload_System) isMessagePayload_MessageType() {} + +func (*MessagePayload_User) isMessagePayload_MessageType() {} + +func (*MessagePayload_Assistant) isMessagePayload_MessageType() {} + +func (*MessagePayload_ToolCallPrepareArguments) isMessagePayload_MessageType() {} + +func (*MessagePayload_ToolCall) isMessagePayload_MessageType() {} + +func (*MessagePayload_Unknown) isMessagePayload_MessageType() {} + +type Message struct { + state protoimpl.MessageState `protogen:"open.v1"` + MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + Payload *MessagePayload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Message) Reset() { + *x = Message{} + mi := &file_chat_v2_chat_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{7} +} + +func (x *Message) GetMessageId() string { + if x != nil { + return x.MessageId + } + return "" +} + +func (x *Message) GetPayload() *MessagePayload { + if x != nil { + return x.Payload + } + return nil +} + +type Conversation struct { + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + ModelSlug string `protobuf:"bytes,3,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` + // If list conversations, then messages length is 0. + Messages []*Message `protobuf:"bytes,4,rep,name=messages,proto3" json:"messages,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Conversation) Reset() { + *x = Conversation{} + mi := &file_chat_v2_chat_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Conversation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Conversation) ProtoMessage() {} + +func (x *Conversation) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Conversation.ProtoReflect.Descriptor instead. +func (*Conversation) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{8} +} + +func (x *Conversation) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Conversation) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Conversation) GetModelSlug() string { + if x != nil { + return x.ModelSlug + } + return "" +} + +func (x *Conversation) GetMessages() []*Message { + if x != nil { + return x.Messages + } + return nil +} + +type ListConversationsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ProjectId *string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3,oneof" json:"project_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListConversationsRequest) Reset() { + *x = ListConversationsRequest{} + mi := &file_chat_v2_chat_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListConversationsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListConversationsRequest) ProtoMessage() {} + +func (x *ListConversationsRequest) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListConversationsRequest.ProtoReflect.Descriptor instead. +func (*ListConversationsRequest) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{9} +} + +func (x *ListConversationsRequest) GetProjectId() string { + if x != nil && x.ProjectId != nil { + return *x.ProjectId + } + return "" +} + +type ListConversationsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // In this response, the length of conversations[i].messages should be 0. + Conversations []*Conversation `protobuf:"bytes,1,rep,name=conversations,proto3" json:"conversations,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListConversationsResponse) Reset() { + *x = ListConversationsResponse{} + mi := &file_chat_v2_chat_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListConversationsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListConversationsResponse) ProtoMessage() {} + +func (x *ListConversationsResponse) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListConversationsResponse.ProtoReflect.Descriptor instead. +func (*ListConversationsResponse) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{10} +} + +func (x *ListConversationsResponse) GetConversations() []*Conversation { + if x != nil { + return x.Conversations + } + return nil +} + +type GetConversationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConversationRequest) Reset() { + *x = GetConversationRequest{} + mi := &file_chat_v2_chat_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConversationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConversationRequest) ProtoMessage() {} + +func (x *GetConversationRequest) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConversationRequest.ProtoReflect.Descriptor instead. +func (*GetConversationRequest) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{11} +} + +func (x *GetConversationRequest) GetConversationId() string { + if x != nil { + return x.ConversationId + } + return "" +} + +type GetConversationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConversationResponse) Reset() { + *x = GetConversationResponse{} + mi := &file_chat_v2_chat_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConversationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConversationResponse) ProtoMessage() {} + +func (x *GetConversationResponse) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConversationResponse.ProtoReflect.Descriptor instead. +func (*GetConversationResponse) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{12} +} + +func (x *GetConversationResponse) GetConversation() *Conversation { + if x != nil { + return x.Conversation + } + return nil +} + +type CreateConversationMessageRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // If conversation_id is not provided, + // a new conversation will be created and the id will be returned. + ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` + ModelSlug string `protobuf:"bytes,3,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` + UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` + UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` + ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v2.ConversationType,oneof" json:"conversation_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateConversationMessageRequest) Reset() { + *x = CreateConversationMessageRequest{} + mi := &file_chat_v2_chat_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateConversationMessageRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateConversationMessageRequest) ProtoMessage() {} + +func (x *CreateConversationMessageRequest) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateConversationMessageRequest.ProtoReflect.Descriptor instead. +func (*CreateConversationMessageRequest) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{13} +} + +func (x *CreateConversationMessageRequest) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *CreateConversationMessageRequest) GetConversationId() string { + if x != nil && x.ConversationId != nil { + return *x.ConversationId + } + return "" +} + +func (x *CreateConversationMessageRequest) GetModelSlug() string { + if x != nil { + return x.ModelSlug + } + return "" +} + +func (x *CreateConversationMessageRequest) GetUserMessage() string { + if x != nil { + return x.UserMessage + } + return "" +} + +func (x *CreateConversationMessageRequest) GetUserSelectedText() string { + if x != nil && x.UserSelectedText != nil { + return *x.UserSelectedText + } + return "" +} + +func (x *CreateConversationMessageRequest) GetConversationType() ConversationType { + if x != nil && x.ConversationType != nil { + return *x.ConversationType + } + return ConversationType_CONVERSATION_TYPE_UNSPECIFIED +} + +type CreateConversationMessageResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateConversationMessageResponse) Reset() { + *x = CreateConversationMessageResponse{} + mi := &file_chat_v2_chat_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateConversationMessageResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateConversationMessageResponse) ProtoMessage() {} + +func (x *CreateConversationMessageResponse) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateConversationMessageResponse.ProtoReflect.Descriptor instead. +func (*CreateConversationMessageResponse) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{14} +} + +func (x *CreateConversationMessageResponse) GetConversation() *Conversation { + if x != nil { + return x.Conversation + } + return nil +} + +type UpdateConversationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConversationRequest) Reset() { + *x = UpdateConversationRequest{} + mi := &file_chat_v2_chat_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConversationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConversationRequest) ProtoMessage() {} + +func (x *UpdateConversationRequest) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConversationRequest.ProtoReflect.Descriptor instead. +func (*UpdateConversationRequest) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{15} +} + +func (x *UpdateConversationRequest) GetConversationId() string { + if x != nil { + return x.ConversationId + } + return "" +} + +func (x *UpdateConversationRequest) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +type UpdateConversationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConversationResponse) Reset() { + *x = UpdateConversationResponse{} + mi := &file_chat_v2_chat_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConversationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConversationResponse) ProtoMessage() {} + +func (x *UpdateConversationResponse) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConversationResponse.ProtoReflect.Descriptor instead. +func (*UpdateConversationResponse) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{16} +} + +func (x *UpdateConversationResponse) GetConversation() *Conversation { + if x != nil { + return x.Conversation + } + return nil +} + +type DeleteConversationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteConversationRequest) Reset() { + *x = DeleteConversationRequest{} + mi := &file_chat_v2_chat_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteConversationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteConversationRequest) ProtoMessage() {} + +func (x *DeleteConversationRequest) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteConversationRequest.ProtoReflect.Descriptor instead. +func (*DeleteConversationRequest) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{17} +} + +func (x *DeleteConversationRequest) GetConversationId() string { + if x != nil { + return x.ConversationId + } + return "" +} + +type DeleteConversationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteConversationResponse) Reset() { + *x = DeleteConversationResponse{} + mi := &file_chat_v2_chat_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteConversationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteConversationResponse) ProtoMessage() {} + +func (x *DeleteConversationResponse) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteConversationResponse.ProtoReflect.Descriptor instead. +func (*DeleteConversationResponse) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{18} +} + +type SupportedModel struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Slug string `protobuf:"bytes,2,opt,name=slug,proto3" json:"slug,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SupportedModel) Reset() { + *x = SupportedModel{} + mi := &file_chat_v2_chat_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SupportedModel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SupportedModel) ProtoMessage() {} + +func (x *SupportedModel) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SupportedModel.ProtoReflect.Descriptor instead. +func (*SupportedModel) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{19} +} + +func (x *SupportedModel) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SupportedModel) GetSlug() string { + if x != nil { + return x.Slug + } + return "" +} + +type ListSupportedModelsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSupportedModelsRequest) Reset() { + *x = ListSupportedModelsRequest{} + mi := &file_chat_v2_chat_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSupportedModelsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSupportedModelsRequest) ProtoMessage() {} + +func (x *ListSupportedModelsRequest) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSupportedModelsRequest.ProtoReflect.Descriptor instead. +func (*ListSupportedModelsRequest) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{20} +} + +type ListSupportedModelsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Models []*SupportedModel `protobuf:"bytes,1,rep,name=models,proto3" json:"models,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSupportedModelsResponse) Reset() { + *x = ListSupportedModelsResponse{} + mi := &file_chat_v2_chat_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSupportedModelsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSupportedModelsResponse) ProtoMessage() {} + +func (x *ListSupportedModelsResponse) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSupportedModelsResponse.ProtoReflect.Descriptor instead. +func (*ListSupportedModelsResponse) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{21} +} + +func (x *ListSupportedModelsResponse) GetModels() []*SupportedModel { + if x != nil { + return x.Models + } + return nil +} + +// Information sent once at the beginning of a new conversation stream +type StreamInitialization struct { + state protoimpl.MessageState `protogen:"open.v1"` + ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` + ModelSlug string `protobuf:"bytes,2,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamInitialization) Reset() { + *x = StreamInitialization{} + mi := &file_chat_v2_chat_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamInitialization) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInitialization) ProtoMessage() {} + +func (x *StreamInitialization) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInitialization.ProtoReflect.Descriptor instead. +func (*StreamInitialization) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{22} +} + +func (x *StreamInitialization) GetConversationId() string { + if x != nil { + return x.ConversationId + } + return "" +} + +func (x *StreamInitialization) GetModelSlug() string { + if x != nil { + return x.ModelSlug + } + return "" +} + +// Designed as StreamPartBegin and StreamPartEnd to +// handle the case where assistant and tool are called at the same time. +// +// User: Please answer me "Ok I will do that", then call "get_weather" +// Assistant: Ok I will do that + Tool: get_weather +type StreamPartBegin struct { + state protoimpl.MessageState `protogen:"open.v1"` + MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + Payload *MessagePayload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamPartBegin) Reset() { + *x = StreamPartBegin{} + mi := &file_chat_v2_chat_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamPartBegin) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamPartBegin) ProtoMessage() {} + +func (x *StreamPartBegin) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamPartBegin.ProtoReflect.Descriptor instead. +func (*StreamPartBegin) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{23} +} + +func (x *StreamPartBegin) GetMessageId() string { + if x != nil { + return x.MessageId + } + return "" +} + +func (x *StreamPartBegin) GetPayload() *MessagePayload { + if x != nil { + return x.Payload + } + return nil +} + +// Note: After the StreamPartBegin of tool_call, there can be no MessageChunk, +// +// and the StreamPartEnd can be directly called when the result is ready. +type MessageChunk struct { + state protoimpl.MessageState `protogen:"open.v1"` + MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` // The id of the message that this chunk belongs to + Delta string `protobuf:"bytes,2,opt,name=delta,proto3" json:"delta,omitempty"` // The small piece of text + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MessageChunk) Reset() { + *x = MessageChunk{} + mi := &file_chat_v2_chat_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MessageChunk) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageChunk) ProtoMessage() {} + +func (x *MessageChunk) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageChunk.ProtoReflect.Descriptor instead. +func (*MessageChunk) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{24} +} + +func (x *MessageChunk) GetMessageId() string { + if x != nil { + return x.MessageId + } + return "" +} + +func (x *MessageChunk) GetDelta() string { + if x != nil { + return x.Delta + } + return "" +} + +type IncompleteIndicator struct { + state protoimpl.MessageState `protogen:"open.v1"` + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + ResponseId string `protobuf:"bytes,2,opt,name=response_id,json=responseId,proto3" json:"response_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IncompleteIndicator) Reset() { + *x = IncompleteIndicator{} + mi := &file_chat_v2_chat_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IncompleteIndicator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IncompleteIndicator) ProtoMessage() {} + +func (x *IncompleteIndicator) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IncompleteIndicator.ProtoReflect.Descriptor instead. +func (*IncompleteIndicator) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{25} +} + +func (x *IncompleteIndicator) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *IncompleteIndicator) GetResponseId() string { + if x != nil { + return x.ResponseId + } + return "" +} + +type StreamPartEnd struct { + state protoimpl.MessageState `protogen:"open.v1"` + MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + Payload *MessagePayload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamPartEnd) Reset() { + *x = StreamPartEnd{} + mi := &file_chat_v2_chat_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamPartEnd) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamPartEnd) ProtoMessage() {} + +func (x *StreamPartEnd) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamPartEnd.ProtoReflect.Descriptor instead. +func (*StreamPartEnd) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{26} +} + +func (x *StreamPartEnd) GetMessageId() string { + if x != nil { + return x.MessageId + } + return "" +} + +func (x *StreamPartEnd) GetPayload() *MessagePayload { + if x != nil { + return x.Payload + } + return nil +} + +// Sent when the current AI response is fully streamed +type StreamFinalization struct { + state protoimpl.MessageState `protogen:"open.v1"` + ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamFinalization) Reset() { + *x = StreamFinalization{} + mi := &file_chat_v2_chat_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamFinalization) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamFinalization) ProtoMessage() {} + +func (x *StreamFinalization) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamFinalization.ProtoReflect.Descriptor instead. +func (*StreamFinalization) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{27} +} + +func (x *StreamFinalization) GetConversationId() string { + if x != nil { + return x.ConversationId + } + return "" +} + +type StreamError struct { + state protoimpl.MessageState `protogen:"open.v1"` + ErrorMessage string `protobuf:"bytes,1,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamError) Reset() { + *x = StreamError{} + mi := &file_chat_v2_chat_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamError) ProtoMessage() {} + +func (x *StreamError) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamError.ProtoReflect.Descriptor instead. +func (*StreamError) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{28} +} + +func (x *StreamError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +// This message should be the same as CreateConversationMessageRequest +// Note: If conversation_id is provided, +// +// the conversation will be created and returned. +type CreateConversationMessageStreamRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` + ModelSlug string `protobuf:"bytes,3,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` + UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` + UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` + ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v2.ConversationType,oneof" json:"conversation_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateConversationMessageStreamRequest) Reset() { + *x = CreateConversationMessageStreamRequest{} + mi := &file_chat_v2_chat_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateConversationMessageStreamRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateConversationMessageStreamRequest) ProtoMessage() {} + +func (x *CreateConversationMessageStreamRequest) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateConversationMessageStreamRequest.ProtoReflect.Descriptor instead. +func (*CreateConversationMessageStreamRequest) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{29} +} + +func (x *CreateConversationMessageStreamRequest) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *CreateConversationMessageStreamRequest) GetConversationId() string { + if x != nil && x.ConversationId != nil { + return *x.ConversationId + } + return "" +} + +func (x *CreateConversationMessageStreamRequest) GetModelSlug() string { + if x != nil { + return x.ModelSlug + } + return "" +} + +func (x *CreateConversationMessageStreamRequest) GetUserMessage() string { + if x != nil { + return x.UserMessage + } + return "" +} + +func (x *CreateConversationMessageStreamRequest) GetUserSelectedText() string { + if x != nil && x.UserSelectedText != nil { + return *x.UserSelectedText + } + return "" +} + +func (x *CreateConversationMessageStreamRequest) GetConversationType() ConversationType { + if x != nil && x.ConversationType != nil { + return *x.ConversationType + } + return ConversationType_CONVERSATION_TYPE_UNSPECIFIED +} + +// Response for streaming a message within an existing conversation +type CreateConversationMessageStreamResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to ResponsePayload: + // + // *CreateConversationMessageStreamResponse_StreamInitialization + // *CreateConversationMessageStreamResponse_StreamPartBegin + // *CreateConversationMessageStreamResponse_MessageChunk + // *CreateConversationMessageStreamResponse_IncompleteIndicator + // *CreateConversationMessageStreamResponse_StreamPartEnd + // *CreateConversationMessageStreamResponse_StreamFinalization + // *CreateConversationMessageStreamResponse_StreamError + ResponsePayload isCreateConversationMessageStreamResponse_ResponsePayload `protobuf_oneof:"response_payload"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateConversationMessageStreamResponse) Reset() { + *x = CreateConversationMessageStreamResponse{} + mi := &file_chat_v2_chat_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateConversationMessageStreamResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateConversationMessageStreamResponse) ProtoMessage() {} + +func (x *CreateConversationMessageStreamResponse) ProtoReflect() protoreflect.Message { + mi := &file_chat_v2_chat_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateConversationMessageStreamResponse.ProtoReflect.Descriptor instead. +func (*CreateConversationMessageStreamResponse) Descriptor() ([]byte, []int) { + return file_chat_v2_chat_proto_rawDescGZIP(), []int{30} +} + +func (x *CreateConversationMessageStreamResponse) GetResponsePayload() isCreateConversationMessageStreamResponse_ResponsePayload { + if x != nil { + return x.ResponsePayload + } + return nil +} + +func (x *CreateConversationMessageStreamResponse) GetStreamInitialization() *StreamInitialization { + if x != nil { + if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamInitialization); ok { + return x.StreamInitialization + } + } + return nil +} + +func (x *CreateConversationMessageStreamResponse) GetStreamPartBegin() *StreamPartBegin { + if x != nil { + if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamPartBegin); ok { + return x.StreamPartBegin + } + } + return nil +} + +func (x *CreateConversationMessageStreamResponse) GetMessageChunk() *MessageChunk { + if x != nil { + if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_MessageChunk); ok { + return x.MessageChunk + } + } + return nil +} + +func (x *CreateConversationMessageStreamResponse) GetIncompleteIndicator() *IncompleteIndicator { + if x != nil { + if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_IncompleteIndicator); ok { + return x.IncompleteIndicator + } + } + return nil +} + +func (x *CreateConversationMessageStreamResponse) GetStreamPartEnd() *StreamPartEnd { + if x != nil { + if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamPartEnd); ok { + return x.StreamPartEnd + } + } + return nil +} + +func (x *CreateConversationMessageStreamResponse) GetStreamFinalization() *StreamFinalization { + if x != nil { + if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamFinalization); ok { + return x.StreamFinalization + } + } + return nil +} + +func (x *CreateConversationMessageStreamResponse) GetStreamError() *StreamError { + if x != nil { + if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamError); ok { + return x.StreamError + } + } + return nil +} + +type isCreateConversationMessageStreamResponse_ResponsePayload interface { + isCreateConversationMessageStreamResponse_ResponsePayload() +} + +type CreateConversationMessageStreamResponse_StreamInitialization struct { + StreamInitialization *StreamInitialization `protobuf:"bytes,1,opt,name=stream_initialization,json=streamInitialization,proto3,oneof"` +} + +type CreateConversationMessageStreamResponse_StreamPartBegin struct { + StreamPartBegin *StreamPartBegin `protobuf:"bytes,2,opt,name=stream_part_begin,json=streamPartBegin,proto3,oneof"` +} + +type CreateConversationMessageStreamResponse_MessageChunk struct { + MessageChunk *MessageChunk `protobuf:"bytes,3,opt,name=message_chunk,json=messageChunk,proto3,oneof"` +} + +type CreateConversationMessageStreamResponse_IncompleteIndicator struct { + IncompleteIndicator *IncompleteIndicator `protobuf:"bytes,4,opt,name=incomplete_indicator,json=incompleteIndicator,proto3,oneof"` +} + +type CreateConversationMessageStreamResponse_StreamPartEnd struct { + StreamPartEnd *StreamPartEnd `protobuf:"bytes,5,opt,name=stream_part_end,json=streamPartEnd,proto3,oneof"` +} + +type CreateConversationMessageStreamResponse_StreamFinalization struct { + StreamFinalization *StreamFinalization `protobuf:"bytes,6,opt,name=stream_finalization,json=streamFinalization,proto3,oneof"` +} + +type CreateConversationMessageStreamResponse_StreamError struct { + StreamError *StreamError `protobuf:"bytes,7,opt,name=stream_error,json=streamError,proto3,oneof"` +} + +func (*CreateConversationMessageStreamResponse_StreamInitialization) isCreateConversationMessageStreamResponse_ResponsePayload() { +} + +func (*CreateConversationMessageStreamResponse_StreamPartBegin) isCreateConversationMessageStreamResponse_ResponsePayload() { +} + +func (*CreateConversationMessageStreamResponse_MessageChunk) isCreateConversationMessageStreamResponse_ResponsePayload() { +} + +func (*CreateConversationMessageStreamResponse_IncompleteIndicator) isCreateConversationMessageStreamResponse_ResponsePayload() { +} + +func (*CreateConversationMessageStreamResponse_StreamPartEnd) isCreateConversationMessageStreamResponse_ResponsePayload() { +} + +func (*CreateConversationMessageStreamResponse_StreamFinalization) isCreateConversationMessageStreamResponse_ResponsePayload() { +} + +func (*CreateConversationMessageStreamResponse_StreamError) isCreateConversationMessageStreamResponse_ResponsePayload() { +} + +var File_chat_v2_chat_proto protoreflect.FileDescriptor + +const file_chat_v2_chat_proto_rawDesc = "" + + "\n" + + "\x12chat/v2/chat.proto\x12\achat.v2\x1a\x1cgoogle/api/annotations.proto\"k\n" + + "\x13MessageTypeToolCall\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" + + "\x04args\x18\x02 \x01(\tR\x04args\x12\x16\n" + + "\x06result\x18\x03 \x01(\tR\x06result\x12\x14\n" + + "\x05error\x18\x04 \x01(\tR\x05error\"M\n" + + "#MessageTypeToolCallPrepareArguments\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" + + "\x04args\x18\x02 \x01(\tR\x04args\"-\n" + + "\x11MessageTypeSystem\x12\x18\n" + + "\acontent\x18\x01 \x01(\tR\acontent\"O\n" + + "\x14MessageTypeAssistant\x12\x18\n" + + "\acontent\x18\x01 \x01(\tR\acontent\x12\x1d\n" + + "\n" + + "model_slug\x18\x02 \x01(\tR\tmodelSlug\"g\n" + + "\x0fMessageTypeUser\x12\x18\n" + + "\acontent\x18\x01 \x01(\tR\acontent\x12(\n" + + "\rselected_text\x18\x02 \x01(\tH\x00R\fselectedText\x88\x01\x01B\x10\n" + + "\x0e_selected_text\"6\n" + + "\x12MessageTypeUnknown\x12 \n" + + "\vdescription\x18\x01 \x01(\tR\vdescription\"\xaa\x03\n" + + "\x0eMessagePayload\x124\n" + + "\x06system\x18\x01 \x01(\v2\x1a.chat.v2.MessageTypeSystemH\x00R\x06system\x12.\n" + + "\x04user\x18\x02 \x01(\v2\x18.chat.v2.MessageTypeUserH\x00R\x04user\x12=\n" + + "\tassistant\x18\x03 \x01(\v2\x1d.chat.v2.MessageTypeAssistantH\x00R\tassistant\x12m\n" + + "\x1btool_call_prepare_arguments\x18\x04 \x01(\v2,.chat.v2.MessageTypeToolCallPrepareArgumentsH\x00R\x18toolCallPrepareArguments\x12;\n" + + "\ttool_call\x18\x05 \x01(\v2\x1c.chat.v2.MessageTypeToolCallH\x00R\btoolCall\x127\n" + + "\aunknown\x18\x06 \x01(\v2\x1b.chat.v2.MessageTypeUnknownH\x00R\aunknownB\x0e\n" + + "\fmessage_type\"[\n" + + "\aMessage\x12\x1d\n" + + "\n" + + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + + "\apayload\x18\x03 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\"\x81\x01\n" + + "\fConversation\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + + "\x05title\x18\x02 \x01(\tR\x05title\x12\x1d\n" + + "\n" + + "model_slug\x18\x03 \x01(\tR\tmodelSlug\x12,\n" + + "\bmessages\x18\x04 \x03(\v2\x10.chat.v2.MessageR\bmessages\"M\n" + + "\x18ListConversationsRequest\x12\"\n" + + "\n" + + "project_id\x18\x01 \x01(\tH\x00R\tprojectId\x88\x01\x01B\r\n" + + "\v_project_id\"X\n" + + "\x19ListConversationsResponse\x12;\n" + + "\rconversations\x18\x01 \x03(\v2\x15.chat.v2.ConversationR\rconversations\"A\n" + + "\x16GetConversationRequest\x12'\n" + + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"T\n" + + "\x17GetConversationResponse\x129\n" + + "\fconversation\x18\x01 \x01(\v2\x15.chat.v2.ConversationR\fconversation\"\xf2\x02\n" + + " CreateConversationMessageRequest\x12\x1d\n" + + "\n" + + "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" + + "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12\x1d\n" + + "\n" + + "model_slug\x18\x03 \x01(\tR\tmodelSlug\x12!\n" + + "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" + + "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" + + "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v2.ConversationTypeH\x02R\x10conversationType\x88\x01\x01B\x12\n" + + "\x10_conversation_idB\x15\n" + + "\x13_user_selected_textB\x14\n" + + "\x12_conversation_type\"^\n" + + "!CreateConversationMessageResponse\x129\n" + + "\fconversation\x18\x01 \x01(\v2\x15.chat.v2.ConversationR\fconversation\"Z\n" + + "\x19UpdateConversationRequest\x12'\n" + + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12\x14\n" + + "\x05title\x18\x02 \x01(\tR\x05title\"W\n" + + "\x1aUpdateConversationResponse\x129\n" + + "\fconversation\x18\x01 \x01(\v2\x15.chat.v2.ConversationR\fconversation\"D\n" + + "\x19DeleteConversationRequest\x12'\n" + + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"\x1c\n" + + "\x1aDeleteConversationResponse\"8\n" + + "\x0eSupportedModel\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" + + "\x04slug\x18\x02 \x01(\tR\x04slug\"\x1c\n" + + "\x1aListSupportedModelsRequest\"N\n" + + "\x1bListSupportedModelsResponse\x12/\n" + + "\x06models\x18\x01 \x03(\v2\x17.chat.v2.SupportedModelR\x06models\"^\n" + + "\x14StreamInitialization\x12'\n" + + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12\x1d\n" + + "\n" + + "model_slug\x18\x02 \x01(\tR\tmodelSlug\"c\n" + + "\x0fStreamPartBegin\x12\x1d\n" + + "\n" + + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + + "\apayload\x18\x03 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\"C\n" + + "\fMessageChunk\x12\x1d\n" + + "\n" + + "message_id\x18\x01 \x01(\tR\tmessageId\x12\x14\n" + + "\x05delta\x18\x02 \x01(\tR\x05delta\"N\n" + + "\x13IncompleteIndicator\x12\x16\n" + + "\x06reason\x18\x01 \x01(\tR\x06reason\x12\x1f\n" + + "\vresponse_id\x18\x02 \x01(\tR\n" + + "responseId\"a\n" + + "\rStreamPartEnd\x12\x1d\n" + + "\n" + + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + + "\apayload\x18\x03 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\"=\n" + + "\x12StreamFinalization\x12'\n" + + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"2\n" + + "\vStreamError\x12#\n" + + "\rerror_message\x18\x01 \x01(\tR\ferrorMessage\"\xf8\x02\n" + + "&CreateConversationMessageStreamRequest\x12\x1d\n" + + "\n" + + "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" + + "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12\x1d\n" + + "\n" + + "model_slug\x18\x03 \x01(\tR\tmodelSlug\x12!\n" + + "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" + + "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" + + "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v2.ConversationTypeH\x02R\x10conversationType\x88\x01\x01B\x12\n" + + "\x10_conversation_idB\x15\n" + + "\x13_user_selected_textB\x14\n" + + "\x12_conversation_type\"\xb9\x04\n" + + "'CreateConversationMessageStreamResponse\x12T\n" + + "\x15stream_initialization\x18\x01 \x01(\v2\x1d.chat.v2.StreamInitializationH\x00R\x14streamInitialization\x12F\n" + + "\x11stream_part_begin\x18\x02 \x01(\v2\x18.chat.v2.StreamPartBeginH\x00R\x0fstreamPartBegin\x12<\n" + + "\rmessage_chunk\x18\x03 \x01(\v2\x15.chat.v2.MessageChunkH\x00R\fmessageChunk\x12Q\n" + + "\x14incomplete_indicator\x18\x04 \x01(\v2\x1c.chat.v2.IncompleteIndicatorH\x00R\x13incompleteIndicator\x12@\n" + + "\x0fstream_part_end\x18\x05 \x01(\v2\x16.chat.v2.StreamPartEndH\x00R\rstreamPartEnd\x12N\n" + + "\x13stream_finalization\x18\x06 \x01(\v2\x1b.chat.v2.StreamFinalizationH\x00R\x12streamFinalization\x129\n" + + "\fstream_error\x18\a \x01(\v2\x14.chat.v2.StreamErrorH\x00R\vstreamErrorB\x12\n" + + "\x10response_payload*R\n" + + "\x10ConversationType\x12!\n" + + "\x1dCONVERSATION_TYPE_UNSPECIFIED\x10\x00\x12\x1b\n" + + "\x17CONVERSATION_TYPE_DEBUG\x10\x012\xd2\b\n" + + "\vChatService\x12\x83\x01\n" + + "\x11ListConversations\x12!.chat.v2.ListConversationsRequest\x1a\".chat.v2.ListConversationsResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/_pd/api/v2/chats/conversations\x12\x8f\x01\n" + + "\x0fGetConversation\x12\x1f.chat.v2.GetConversationRequest\x1a .chat.v2.GetConversationResponse\"9\x82\xd3\xe4\x93\x023\x121/_pd/api/v2/chats/conversations/{conversation_id}\x12\xa7\x01\n" + + "\x19CreateConversationMessage\x12).chat.v2.CreateConversationMessageRequest\x1a*.chat.v2.CreateConversationMessageResponse\"3\x82\xd3\xe4\x93\x02-:\x01*\"(/_pd/api/v2/chats/conversations/messages\x12\xc2\x01\n" + + "\x1fCreateConversationMessageStream\x12/.chat.v2.CreateConversationMessageStreamRequest\x1a0.chat.v2.CreateConversationMessageStreamResponse\":\x82\xd3\xe4\x93\x024:\x01*\"//_pd/api/v2/chats/conversations/messages/stream0\x01\x12\x9b\x01\n" + + "\x12UpdateConversation\x12\".chat.v2.UpdateConversationRequest\x1a#.chat.v2.UpdateConversationResponse\"<\x82\xd3\xe4\x93\x026:\x01*21/_pd/api/v2/chats/conversations/{conversation_id}\x12\x98\x01\n" + + "\x12DeleteConversation\x12\".chat.v2.DeleteConversationRequest\x1a#.chat.v2.DeleteConversationResponse\"9\x82\xd3\xe4\x93\x023*1/_pd/api/v2/chats/conversations/{conversation_id}\x12\x82\x01\n" + + "\x13ListSupportedModels\x12#.chat.v2.ListSupportedModelsRequest\x1a$.chat.v2.ListSupportedModelsResponse\" \x82\xd3\xe4\x93\x02\x1a\x12\x18/_pd/api/v2/chats/modelsB\x7f\n" + + "\vcom.chat.v2B\tChatProtoP\x01Z(paperdebugger/pkg/gen/api/chat/v2;chatv2\xa2\x02\x03CXX\xaa\x02\aChat.V2\xca\x02\aChat\\V2\xe2\x02\x13Chat\\V2\\GPBMetadata\xea\x02\bChat::V2b\x06proto3" + +var ( + file_chat_v2_chat_proto_rawDescOnce sync.Once + file_chat_v2_chat_proto_rawDescData []byte +) + +func file_chat_v2_chat_proto_rawDescGZIP() []byte { + file_chat_v2_chat_proto_rawDescOnce.Do(func() { + file_chat_v2_chat_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_chat_v2_chat_proto_rawDesc), len(file_chat_v2_chat_proto_rawDesc))) + }) + return file_chat_v2_chat_proto_rawDescData +} + +var file_chat_v2_chat_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_chat_v2_chat_proto_msgTypes = make([]protoimpl.MessageInfo, 31) +var file_chat_v2_chat_proto_goTypes = []any{ + (ConversationType)(0), // 0: chat.v2.ConversationType + (*MessageTypeToolCall)(nil), // 1: chat.v2.MessageTypeToolCall + (*MessageTypeToolCallPrepareArguments)(nil), // 2: chat.v2.MessageTypeToolCallPrepareArguments + (*MessageTypeSystem)(nil), // 3: chat.v2.MessageTypeSystem + (*MessageTypeAssistant)(nil), // 4: chat.v2.MessageTypeAssistant + (*MessageTypeUser)(nil), // 5: chat.v2.MessageTypeUser + (*MessageTypeUnknown)(nil), // 6: chat.v2.MessageTypeUnknown + (*MessagePayload)(nil), // 7: chat.v2.MessagePayload + (*Message)(nil), // 8: chat.v2.Message + (*Conversation)(nil), // 9: chat.v2.Conversation + (*ListConversationsRequest)(nil), // 10: chat.v2.ListConversationsRequest + (*ListConversationsResponse)(nil), // 11: chat.v2.ListConversationsResponse + (*GetConversationRequest)(nil), // 12: chat.v2.GetConversationRequest + (*GetConversationResponse)(nil), // 13: chat.v2.GetConversationResponse + (*CreateConversationMessageRequest)(nil), // 14: chat.v2.CreateConversationMessageRequest + (*CreateConversationMessageResponse)(nil), // 15: chat.v2.CreateConversationMessageResponse + (*UpdateConversationRequest)(nil), // 16: chat.v2.UpdateConversationRequest + (*UpdateConversationResponse)(nil), // 17: chat.v2.UpdateConversationResponse + (*DeleteConversationRequest)(nil), // 18: chat.v2.DeleteConversationRequest + (*DeleteConversationResponse)(nil), // 19: chat.v2.DeleteConversationResponse + (*SupportedModel)(nil), // 20: chat.v2.SupportedModel + (*ListSupportedModelsRequest)(nil), // 21: chat.v2.ListSupportedModelsRequest + (*ListSupportedModelsResponse)(nil), // 22: chat.v2.ListSupportedModelsResponse + (*StreamInitialization)(nil), // 23: chat.v2.StreamInitialization + (*StreamPartBegin)(nil), // 24: chat.v2.StreamPartBegin + (*MessageChunk)(nil), // 25: chat.v2.MessageChunk + (*IncompleteIndicator)(nil), // 26: chat.v2.IncompleteIndicator + (*StreamPartEnd)(nil), // 27: chat.v2.StreamPartEnd + (*StreamFinalization)(nil), // 28: chat.v2.StreamFinalization + (*StreamError)(nil), // 29: chat.v2.StreamError + (*CreateConversationMessageStreamRequest)(nil), // 30: chat.v2.CreateConversationMessageStreamRequest + (*CreateConversationMessageStreamResponse)(nil), // 31: chat.v2.CreateConversationMessageStreamResponse +} +var file_chat_v2_chat_proto_depIdxs = []int32{ + 3, // 0: chat.v2.MessagePayload.system:type_name -> chat.v2.MessageTypeSystem + 5, // 1: chat.v2.MessagePayload.user:type_name -> chat.v2.MessageTypeUser + 4, // 2: chat.v2.MessagePayload.assistant:type_name -> chat.v2.MessageTypeAssistant + 2, // 3: chat.v2.MessagePayload.tool_call_prepare_arguments:type_name -> chat.v2.MessageTypeToolCallPrepareArguments + 1, // 4: chat.v2.MessagePayload.tool_call:type_name -> chat.v2.MessageTypeToolCall + 6, // 5: chat.v2.MessagePayload.unknown:type_name -> chat.v2.MessageTypeUnknown + 7, // 6: chat.v2.Message.payload:type_name -> chat.v2.MessagePayload + 8, // 7: chat.v2.Conversation.messages:type_name -> chat.v2.Message + 9, // 8: chat.v2.ListConversationsResponse.conversations:type_name -> chat.v2.Conversation + 9, // 9: chat.v2.GetConversationResponse.conversation:type_name -> chat.v2.Conversation + 0, // 10: chat.v2.CreateConversationMessageRequest.conversation_type:type_name -> chat.v2.ConversationType + 9, // 11: chat.v2.CreateConversationMessageResponse.conversation:type_name -> chat.v2.Conversation + 9, // 12: chat.v2.UpdateConversationResponse.conversation:type_name -> chat.v2.Conversation + 20, // 13: chat.v2.ListSupportedModelsResponse.models:type_name -> chat.v2.SupportedModel + 7, // 14: chat.v2.StreamPartBegin.payload:type_name -> chat.v2.MessagePayload + 7, // 15: chat.v2.StreamPartEnd.payload:type_name -> chat.v2.MessagePayload + 0, // 16: chat.v2.CreateConversationMessageStreamRequest.conversation_type:type_name -> chat.v2.ConversationType + 23, // 17: chat.v2.CreateConversationMessageStreamResponse.stream_initialization:type_name -> chat.v2.StreamInitialization + 24, // 18: chat.v2.CreateConversationMessageStreamResponse.stream_part_begin:type_name -> chat.v2.StreamPartBegin + 25, // 19: chat.v2.CreateConversationMessageStreamResponse.message_chunk:type_name -> chat.v2.MessageChunk + 26, // 20: chat.v2.CreateConversationMessageStreamResponse.incomplete_indicator:type_name -> chat.v2.IncompleteIndicator + 27, // 21: chat.v2.CreateConversationMessageStreamResponse.stream_part_end:type_name -> chat.v2.StreamPartEnd + 28, // 22: chat.v2.CreateConversationMessageStreamResponse.stream_finalization:type_name -> chat.v2.StreamFinalization + 29, // 23: chat.v2.CreateConversationMessageStreamResponse.stream_error:type_name -> chat.v2.StreamError + 10, // 24: chat.v2.ChatService.ListConversations:input_type -> chat.v2.ListConversationsRequest + 12, // 25: chat.v2.ChatService.GetConversation:input_type -> chat.v2.GetConversationRequest + 14, // 26: chat.v2.ChatService.CreateConversationMessage:input_type -> chat.v2.CreateConversationMessageRequest + 30, // 27: chat.v2.ChatService.CreateConversationMessageStream:input_type -> chat.v2.CreateConversationMessageStreamRequest + 16, // 28: chat.v2.ChatService.UpdateConversation:input_type -> chat.v2.UpdateConversationRequest + 18, // 29: chat.v2.ChatService.DeleteConversation:input_type -> chat.v2.DeleteConversationRequest + 21, // 30: chat.v2.ChatService.ListSupportedModels:input_type -> chat.v2.ListSupportedModelsRequest + 11, // 31: chat.v2.ChatService.ListConversations:output_type -> chat.v2.ListConversationsResponse + 13, // 32: chat.v2.ChatService.GetConversation:output_type -> chat.v2.GetConversationResponse + 15, // 33: chat.v2.ChatService.CreateConversationMessage:output_type -> chat.v2.CreateConversationMessageResponse + 31, // 34: chat.v2.ChatService.CreateConversationMessageStream:output_type -> chat.v2.CreateConversationMessageStreamResponse + 17, // 35: chat.v2.ChatService.UpdateConversation:output_type -> chat.v2.UpdateConversationResponse + 19, // 36: chat.v2.ChatService.DeleteConversation:output_type -> chat.v2.DeleteConversationResponse + 22, // 37: chat.v2.ChatService.ListSupportedModels:output_type -> chat.v2.ListSupportedModelsResponse + 31, // [31:38] is the sub-list for method output_type + 24, // [24:31] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_chat_v2_chat_proto_init() } +func file_chat_v2_chat_proto_init() { + if File_chat_v2_chat_proto != nil { + return + } + file_chat_v2_chat_proto_msgTypes[4].OneofWrappers = []any{} + file_chat_v2_chat_proto_msgTypes[6].OneofWrappers = []any{ + (*MessagePayload_System)(nil), + (*MessagePayload_User)(nil), + (*MessagePayload_Assistant)(nil), + (*MessagePayload_ToolCallPrepareArguments)(nil), + (*MessagePayload_ToolCall)(nil), + (*MessagePayload_Unknown)(nil), + } + file_chat_v2_chat_proto_msgTypes[9].OneofWrappers = []any{} + file_chat_v2_chat_proto_msgTypes[13].OneofWrappers = []any{} + file_chat_v2_chat_proto_msgTypes[29].OneofWrappers = []any{} + file_chat_v2_chat_proto_msgTypes[30].OneofWrappers = []any{ + (*CreateConversationMessageStreamResponse_StreamInitialization)(nil), + (*CreateConversationMessageStreamResponse_StreamPartBegin)(nil), + (*CreateConversationMessageStreamResponse_MessageChunk)(nil), + (*CreateConversationMessageStreamResponse_IncompleteIndicator)(nil), + (*CreateConversationMessageStreamResponse_StreamPartEnd)(nil), + (*CreateConversationMessageStreamResponse_StreamFinalization)(nil), + (*CreateConversationMessageStreamResponse_StreamError)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_chat_v2_chat_proto_rawDesc), len(file_chat_v2_chat_proto_rawDesc)), + NumEnums: 1, + NumMessages: 31, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_chat_v2_chat_proto_goTypes, + DependencyIndexes: file_chat_v2_chat_proto_depIdxs, + EnumInfos: file_chat_v2_chat_proto_enumTypes, + MessageInfos: file_chat_v2_chat_proto_msgTypes, + }.Build() + File_chat_v2_chat_proto = out.File + file_chat_v2_chat_proto_goTypes = nil + file_chat_v2_chat_proto_depIdxs = nil +} diff --git a/pkg/gen/api/chat/v2/chat.pb.gw.go b/pkg/gen/api/chat/v2/chat.pb.gw.go new file mode 100644 index 00000000..4d33af53 --- /dev/null +++ b/pkg/gen/api/chat/v2/chat.pb.gw.go @@ -0,0 +1,580 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: chat/v2/chat.proto + +/* +Package chatv2 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package chatv2 + +import ( + "context" + "errors" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// Suppress "imported and not used" errors +var ( + _ codes.Code + _ io.Reader + _ status.Status + _ = errors.New + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join +) + +var filter_ChatService_ListConversations_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} + +func request_ChatService_ListConversations_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListConversationsRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ChatService_ListConversations_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := client.ListConversations(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ChatService_ListConversations_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListConversationsRequest + metadata runtime.ServerMetadata + ) + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ChatService_ListConversations_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.ListConversations(ctx, &protoReq) + return msg, metadata, err +} + +func request_ChatService_GetConversation_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq GetConversationRequest + metadata runtime.ServerMetadata + err error + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["conversation_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id") + } + protoReq.ConversationId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err) + } + msg, err := client.GetConversation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ChatService_GetConversation_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq GetConversationRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["conversation_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id") + } + protoReq.ConversationId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err) + } + msg, err := server.GetConversation(ctx, &protoReq) + return msg, metadata, err +} + +func request_ChatService_CreateConversationMessage_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateConversationMessageRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + msg, err := client.CreateConversationMessage(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ChatService_CreateConversationMessage_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq CreateConversationMessageRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + msg, err := server.CreateConversationMessage(ctx, &protoReq) + return msg, metadata, err +} + +func request_ChatService_CreateConversationMessageStream_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (ChatService_CreateConversationMessageStreamClient, runtime.ServerMetadata, error) { + var ( + protoReq CreateConversationMessageStreamRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + stream, err := client.CreateConversationMessageStream(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +func request_ChatService_UpdateConversation_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq UpdateConversationRequest + metadata runtime.ServerMetadata + err error + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["conversation_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id") + } + protoReq.ConversationId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err) + } + msg, err := client.UpdateConversation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ChatService_UpdateConversation_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq UpdateConversationRequest + metadata runtime.ServerMetadata + err error + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + val, ok := pathParams["conversation_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id") + } + protoReq.ConversationId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err) + } + msg, err := server.UpdateConversation(ctx, &protoReq) + return msg, metadata, err +} + +func request_ChatService_DeleteConversation_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq DeleteConversationRequest + metadata runtime.ServerMetadata + err error + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + val, ok := pathParams["conversation_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id") + } + protoReq.ConversationId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err) + } + msg, err := client.DeleteConversation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ChatService_DeleteConversation_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq DeleteConversationRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["conversation_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id") + } + protoReq.ConversationId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err) + } + msg, err := server.DeleteConversation(ctx, &protoReq) + return msg, metadata, err +} + +func request_ChatService_ListSupportedModels_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListSupportedModelsRequest + metadata runtime.ServerMetadata + ) + if req.Body != nil { + _, _ = io.Copy(io.Discard, req.Body) + } + msg, err := client.ListSupportedModels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_ChatService_ListSupportedModels_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListSupportedModelsRequest + metadata runtime.ServerMetadata + ) + msg, err := server.ListSupportedModels(ctx, &protoReq) + return msg, metadata, err +} + +// RegisterChatServiceHandlerServer registers the http handlers for service ChatService to "mux". +// UnaryRPC :call ChatServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterChatServiceHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. +func RegisterChatServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ChatServiceServer) error { + mux.Handle(http.MethodGet, pattern_ChatService_ListConversations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/ListConversations", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ChatService_ListConversations_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_ListConversations_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ChatService_GetConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/GetConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ChatService_GetConversation_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_GetConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/CreateConversationMessage", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/messages")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ChatService_CreateConversationMessage_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_CreateConversationMessage_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessageStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") + _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + }) + mux.Handle(http.MethodPatch, pattern_ChatService_UpdateConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/UpdateConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ChatService_UpdateConversation_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_UpdateConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodDelete, pattern_ChatService_DeleteConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/DeleteConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ChatService_DeleteConversation_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_DeleteConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ChatService_ListSupportedModels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/ListSupportedModels", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/models")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_ChatService_ListSupportedModels_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_ListSupportedModels_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + + return nil +} + +// RegisterChatServiceHandlerFromEndpoint is same as RegisterChatServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterChatServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.NewClient(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + return RegisterChatServiceHandler(ctx, mux, conn) +} + +// RegisterChatServiceHandler registers the http handlers for service ChatService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterChatServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterChatServiceHandlerClient(ctx, mux, NewChatServiceClient(conn)) +} + +// RegisterChatServiceHandlerClient registers the http handlers for service ChatService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ChatServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ChatServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ChatServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares. +func RegisterChatServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ChatServiceClient) error { + mux.Handle(http.MethodGet, pattern_ChatService_ListConversations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/ListConversations", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ChatService_ListConversations_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_ListConversations_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ChatService_GetConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/GetConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ChatService_GetConversation_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_GetConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/CreateConversationMessage", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/messages")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ChatService_CreateConversationMessage_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_CreateConversationMessage_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessageStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/CreateConversationMessageStream", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/messages/stream")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ChatService_CreateConversationMessageStream_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_CreateConversationMessageStream_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodPatch, pattern_ChatService_UpdateConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/UpdateConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ChatService_UpdateConversation_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_UpdateConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodDelete, pattern_ChatService_DeleteConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/DeleteConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ChatService_DeleteConversation_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_DeleteConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_ChatService_ListSupportedModels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/ListSupportedModels", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/models")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_ChatService_ListSupportedModels_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_ChatService_ListSupportedModels_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) + return nil +} + +var ( + pattern_ChatService_ListConversations_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"_pd", "api", "v2", "chats", "conversations"}, "")) + pattern_ChatService_GetConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, "")) + pattern_ChatService_CreateConversationMessage_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "messages"}, "")) + pattern_ChatService_CreateConversationMessageStream_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5, 2, 6}, []string{"_pd", "api", "v2", "chats", "conversations", "messages", "stream"}, "")) + pattern_ChatService_UpdateConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, "")) + pattern_ChatService_DeleteConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, "")) + pattern_ChatService_ListSupportedModels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"_pd", "api", "v2", "chats", "models"}, "")) +) + +var ( + forward_ChatService_ListConversations_0 = runtime.ForwardResponseMessage + forward_ChatService_GetConversation_0 = runtime.ForwardResponseMessage + forward_ChatService_CreateConversationMessage_0 = runtime.ForwardResponseMessage + forward_ChatService_CreateConversationMessageStream_0 = runtime.ForwardResponseStream + forward_ChatService_UpdateConversation_0 = runtime.ForwardResponseMessage + forward_ChatService_DeleteConversation_0 = runtime.ForwardResponseMessage + forward_ChatService_ListSupportedModels_0 = runtime.ForwardResponseMessage +) diff --git a/pkg/gen/api/chat/v2/chat_grpc.pb.go b/pkg/gen/api/chat/v2/chat_grpc.pb.go new file mode 100644 index 00000000..3226480a --- /dev/null +++ b/pkg/gen/api/chat/v2/chat_grpc.pb.go @@ -0,0 +1,353 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.0 +// - protoc (unknown) +// source: chat/v2/chat.proto + +package chatv2 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + ChatService_ListConversations_FullMethodName = "/chat.v2.ChatService/ListConversations" + ChatService_GetConversation_FullMethodName = "/chat.v2.ChatService/GetConversation" + ChatService_CreateConversationMessage_FullMethodName = "/chat.v2.ChatService/CreateConversationMessage" + ChatService_CreateConversationMessageStream_FullMethodName = "/chat.v2.ChatService/CreateConversationMessageStream" + ChatService_UpdateConversation_FullMethodName = "/chat.v2.ChatService/UpdateConversation" + ChatService_DeleteConversation_FullMethodName = "/chat.v2.ChatService/DeleteConversation" + ChatService_ListSupportedModels_FullMethodName = "/chat.v2.ChatService/ListSupportedModels" +) + +// ChatServiceClient is the client API for ChatService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ChatServiceClient interface { + ListConversations(ctx context.Context, in *ListConversationsRequest, opts ...grpc.CallOption) (*ListConversationsResponse, error) + GetConversation(ctx context.Context, in *GetConversationRequest, opts ...grpc.CallOption) (*GetConversationResponse, error) + CreateConversationMessage(ctx context.Context, in *CreateConversationMessageRequest, opts ...grpc.CallOption) (*CreateConversationMessageResponse, error) + CreateConversationMessageStream(ctx context.Context, in *CreateConversationMessageStreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateConversationMessageStreamResponse], error) + UpdateConversation(ctx context.Context, in *UpdateConversationRequest, opts ...grpc.CallOption) (*UpdateConversationResponse, error) + DeleteConversation(ctx context.Context, in *DeleteConversationRequest, opts ...grpc.CallOption) (*DeleteConversationResponse, error) + ListSupportedModels(ctx context.Context, in *ListSupportedModelsRequest, opts ...grpc.CallOption) (*ListSupportedModelsResponse, error) +} + +type chatServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewChatServiceClient(cc grpc.ClientConnInterface) ChatServiceClient { + return &chatServiceClient{cc} +} + +func (c *chatServiceClient) ListConversations(ctx context.Context, in *ListConversationsRequest, opts ...grpc.CallOption) (*ListConversationsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListConversationsResponse) + err := c.cc.Invoke(ctx, ChatService_ListConversations_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *chatServiceClient) GetConversation(ctx context.Context, in *GetConversationRequest, opts ...grpc.CallOption) (*GetConversationResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetConversationResponse) + err := c.cc.Invoke(ctx, ChatService_GetConversation_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *chatServiceClient) CreateConversationMessage(ctx context.Context, in *CreateConversationMessageRequest, opts ...grpc.CallOption) (*CreateConversationMessageResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateConversationMessageResponse) + err := c.cc.Invoke(ctx, ChatService_CreateConversationMessage_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *chatServiceClient) CreateConversationMessageStream(ctx context.Context, in *CreateConversationMessageStreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateConversationMessageStreamResponse], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ChatService_ServiceDesc.Streams[0], ChatService_CreateConversationMessageStream_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[CreateConversationMessageStreamRequest, CreateConversationMessageStreamResponse]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ChatService_CreateConversationMessageStreamClient = grpc.ServerStreamingClient[CreateConversationMessageStreamResponse] + +func (c *chatServiceClient) UpdateConversation(ctx context.Context, in *UpdateConversationRequest, opts ...grpc.CallOption) (*UpdateConversationResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateConversationResponse) + err := c.cc.Invoke(ctx, ChatService_UpdateConversation_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *chatServiceClient) DeleteConversation(ctx context.Context, in *DeleteConversationRequest, opts ...grpc.CallOption) (*DeleteConversationResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DeleteConversationResponse) + err := c.cc.Invoke(ctx, ChatService_DeleteConversation_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *chatServiceClient) ListSupportedModels(ctx context.Context, in *ListSupportedModelsRequest, opts ...grpc.CallOption) (*ListSupportedModelsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListSupportedModelsResponse) + err := c.cc.Invoke(ctx, ChatService_ListSupportedModels_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ChatServiceServer is the server API for ChatService service. +// All implementations must embed UnimplementedChatServiceServer +// for forward compatibility. +type ChatServiceServer interface { + ListConversations(context.Context, *ListConversationsRequest) (*ListConversationsResponse, error) + GetConversation(context.Context, *GetConversationRequest) (*GetConversationResponse, error) + CreateConversationMessage(context.Context, *CreateConversationMessageRequest) (*CreateConversationMessageResponse, error) + CreateConversationMessageStream(*CreateConversationMessageStreamRequest, grpc.ServerStreamingServer[CreateConversationMessageStreamResponse]) error + UpdateConversation(context.Context, *UpdateConversationRequest) (*UpdateConversationResponse, error) + DeleteConversation(context.Context, *DeleteConversationRequest) (*DeleteConversationResponse, error) + ListSupportedModels(context.Context, *ListSupportedModelsRequest) (*ListSupportedModelsResponse, error) + mustEmbedUnimplementedChatServiceServer() +} + +// UnimplementedChatServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedChatServiceServer struct{} + +func (UnimplementedChatServiceServer) ListConversations(context.Context, *ListConversationsRequest) (*ListConversationsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListConversations not implemented") +} +func (UnimplementedChatServiceServer) GetConversation(context.Context, *GetConversationRequest) (*GetConversationResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetConversation not implemented") +} +func (UnimplementedChatServiceServer) CreateConversationMessage(context.Context, *CreateConversationMessageRequest) (*CreateConversationMessageResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateConversationMessage not implemented") +} +func (UnimplementedChatServiceServer) CreateConversationMessageStream(*CreateConversationMessageStreamRequest, grpc.ServerStreamingServer[CreateConversationMessageStreamResponse]) error { + return status.Error(codes.Unimplemented, "method CreateConversationMessageStream not implemented") +} +func (UnimplementedChatServiceServer) UpdateConversation(context.Context, *UpdateConversationRequest) (*UpdateConversationResponse, error) { + return nil, status.Error(codes.Unimplemented, "method UpdateConversation not implemented") +} +func (UnimplementedChatServiceServer) DeleteConversation(context.Context, *DeleteConversationRequest) (*DeleteConversationResponse, error) { + return nil, status.Error(codes.Unimplemented, "method DeleteConversation not implemented") +} +func (UnimplementedChatServiceServer) ListSupportedModels(context.Context, *ListSupportedModelsRequest) (*ListSupportedModelsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListSupportedModels not implemented") +} +func (UnimplementedChatServiceServer) mustEmbedUnimplementedChatServiceServer() {} +func (UnimplementedChatServiceServer) testEmbeddedByValue() {} + +// UnsafeChatServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ChatServiceServer will +// result in compilation errors. +type UnsafeChatServiceServer interface { + mustEmbedUnimplementedChatServiceServer() +} + +func RegisterChatServiceServer(s grpc.ServiceRegistrar, srv ChatServiceServer) { + // If the following call panics, it indicates UnimplementedChatServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ChatService_ServiceDesc, srv) +} + +func _ChatService_ListConversations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListConversationsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ChatServiceServer).ListConversations(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ChatService_ListConversations_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ChatServiceServer).ListConversations(ctx, req.(*ListConversationsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ChatService_GetConversation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConversationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ChatServiceServer).GetConversation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ChatService_GetConversation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ChatServiceServer).GetConversation(ctx, req.(*GetConversationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ChatService_CreateConversationMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateConversationMessageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ChatServiceServer).CreateConversationMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ChatService_CreateConversationMessage_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ChatServiceServer).CreateConversationMessage(ctx, req.(*CreateConversationMessageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ChatService_CreateConversationMessageStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(CreateConversationMessageStreamRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ChatServiceServer).CreateConversationMessageStream(m, &grpc.GenericServerStream[CreateConversationMessageStreamRequest, CreateConversationMessageStreamResponse]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ChatService_CreateConversationMessageStreamServer = grpc.ServerStreamingServer[CreateConversationMessageStreamResponse] + +func _ChatService_UpdateConversation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConversationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ChatServiceServer).UpdateConversation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ChatService_UpdateConversation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ChatServiceServer).UpdateConversation(ctx, req.(*UpdateConversationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ChatService_DeleteConversation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteConversationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ChatServiceServer).DeleteConversation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ChatService_DeleteConversation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ChatServiceServer).DeleteConversation(ctx, req.(*DeleteConversationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ChatService_ListSupportedModels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSupportedModelsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ChatServiceServer).ListSupportedModels(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ChatService_ListSupportedModels_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ChatServiceServer).ListSupportedModels(ctx, req.(*ListSupportedModelsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ChatService_ServiceDesc is the grpc.ServiceDesc for ChatService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ChatService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "chat.v2.ChatService", + HandlerType: (*ChatServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListConversations", + Handler: _ChatService_ListConversations_Handler, + }, + { + MethodName: "GetConversation", + Handler: _ChatService_GetConversation_Handler, + }, + { + MethodName: "CreateConversationMessage", + Handler: _ChatService_CreateConversationMessage_Handler, + }, + { + MethodName: "UpdateConversation", + Handler: _ChatService_UpdateConversation_Handler, + }, + { + MethodName: "DeleteConversation", + Handler: _ChatService_DeleteConversation_Handler, + }, + { + MethodName: "ListSupportedModels", + Handler: _ChatService_ListSupportedModels_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "CreateConversationMessageStream", + Handler: _ChatService_CreateConversationMessageStream_Handler, + ServerStreams: true, + }, + }, + Metadata: "chat/v2/chat.proto", +} diff --git a/proto/chat/v2/chat.proto b/proto/chat/v2/chat.proto new file mode 100644 index 00000000..a6399dcd --- /dev/null +++ b/proto/chat/v2/chat.proto @@ -0,0 +1,241 @@ +syntax = "proto3"; + +package chat.v2; + +import "google/api/annotations.proto"; + +option go_package = "paperdebugger/pkg/gen/api/chat/v2;chatv2"; + +service ChatService { + rpc ListConversations(ListConversationsRequest) returns (ListConversationsResponse) { + option (google.api.http) = {get: "/_pd/api/v2/chats/conversations"}; + } + rpc GetConversation(GetConversationRequest) returns (GetConversationResponse) { + option (google.api.http) = {get: "/_pd/api/v2/chats/conversations/{conversation_id}"}; + } + rpc CreateConversationMessage(CreateConversationMessageRequest) returns (CreateConversationMessageResponse) { + option (google.api.http) = { + post: "/_pd/api/v2/chats/conversations/messages" + body: "*" + }; + } + rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) returns (stream CreateConversationMessageStreamResponse) { + option (google.api.http) = { + post: "/_pd/api/v2/chats/conversations/messages/stream" + body: "*" + }; + } + rpc UpdateConversation(UpdateConversationRequest) returns (UpdateConversationResponse) { + option (google.api.http) = { + patch: "/_pd/api/v2/chats/conversations/{conversation_id}" + body: "*" + }; + } + rpc DeleteConversation(DeleteConversationRequest) returns (DeleteConversationResponse) { + option (google.api.http) = {delete: "/_pd/api/v2/chats/conversations/{conversation_id}"}; + } + rpc ListSupportedModels(ListSupportedModelsRequest) returns (ListSupportedModelsResponse) { + option (google.api.http) = {get: "/_pd/api/v2/chats/models"}; + } +} + +message MessageTypeToolCall { + string name = 1; + string args = 2; // Json string + string result = 3; // Json string + string error = 4; // Json string +} + +message MessageTypeToolCallPrepareArguments { + string name = 1; + string args = 2; // Json string +} + +message MessageTypeSystem { + string content = 1; +} + +message MessageTypeAssistant { + string content = 1; + string model_slug = 2; +} + +message MessageTypeUser { + string content = 1; + optional string selected_text = 2; +} + +message MessageTypeUnknown { + string description = 1; +} + +message MessagePayload { + oneof message_type { + MessageTypeSystem system = 1; + MessageTypeUser user = 2; + MessageTypeAssistant assistant = 3; + MessageTypeToolCallPrepareArguments tool_call_prepare_arguments = 4; + MessageTypeToolCall tool_call = 5; + MessageTypeUnknown unknown = 6; + } +} + +message Message { + string message_id = 1; + MessagePayload payload = 3; +} + +message Conversation { + string id = 1; + string title = 2; + string model_slug = 3; + // If list conversations, then messages length is 0. + repeated Message messages = 4; +} + +message ListConversationsRequest { + optional string project_id = 1; +} + +message ListConversationsResponse { + // In this response, the length of conversations[i].messages should be 0. + repeated Conversation conversations = 1; +} + +message GetConversationRequest { + string conversation_id = 1; +} + +message GetConversationResponse { + Conversation conversation = 1; +} + +message CreateConversationMessageRequest { + string project_id = 1; + // If conversation_id is not provided, + // a new conversation will be created and the id will be returned. + optional string conversation_id = 2; + string model_slug = 3; + string user_message = 4; + optional string user_selected_text = 5; + optional ConversationType conversation_type = 6; +} + +message CreateConversationMessageResponse { + Conversation conversation = 1; +} + +message UpdateConversationRequest { + string conversation_id = 1; + string title = 2; +} + +message UpdateConversationResponse { + Conversation conversation = 1; +} + +message DeleteConversationRequest { + string conversation_id = 1; +} + +message DeleteConversationResponse { + // explicitly empty +} + +message SupportedModel { + string name = 1; + string slug = 2; +} + +message ListSupportedModelsRequest { + // explicitly empty +} + +message ListSupportedModelsResponse { + repeated SupportedModel models = 1; +} + +// ============================== Streaming Messages + +// Information sent once at the beginning of a new conversation stream +message StreamInitialization { + string conversation_id = 1; + string model_slug = 2; +} + +// Designed as StreamPartBegin and StreamPartEnd to +// handle the case where assistant and tool are called at the same time. +// +// User: Please answer me "Ok I will do that", then call "get_weather" +// Assistant: Ok I will do that + Tool: get_weather +message StreamPartBegin { + string message_id = 1; + MessagePayload payload = 3; +} + +// Note: After the StreamPartBegin of tool_call, there can be no MessageChunk, +// and the StreamPartEnd can be directly called when the result is ready. +message MessageChunk { + string message_id = 1; // The id of the message that this chunk belongs to + string delta = 2; // The small piece of text +} + +message IncompleteIndicator { + string reason = 1; + string response_id = 2; +} + +message StreamPartEnd { + string message_id = 1; + MessagePayload payload = 3; +} + +// Sent when the current AI response is fully streamed +message StreamFinalization { + string conversation_id = 1; + // Do not return the full Conversation here. + // If the user wants, they can call the GetConversation API themselves. + // Note: Do not call GetConversation when receiving streamFinalization, + // it should be called after the entire API call is finished. +} + +message StreamError { + string error_message = 1; +} + +// Currently, we inject two types of messages: +// 1. System message +// 2. User message + +enum ConversationType { + CONVERSATION_TYPE_UNSPECIFIED = 0; + CONVERSATION_TYPE_DEBUG = 1; // does not contain any customized messages, the + // inapp_history and openai_history are synced. + // CONVERSATION_TYPE_NO_SYSTEM_MESSAGE_INJECTION = 2; + // CONVERSATION_TYPE_NO_USER_MESSAGE_INJECTION = 3; +} + +// This message should be the same as CreateConversationMessageRequest +// Note: If conversation_id is provided, +// the conversation will be created and returned. +message CreateConversationMessageStreamRequest { + string project_id = 1; + optional string conversation_id = 2; + string model_slug = 3; + string user_message = 4; + optional string user_selected_text = 5; + optional ConversationType conversation_type = 6; +} + +// Response for streaming a message within an existing conversation +message CreateConversationMessageStreamResponse { + oneof response_payload { + StreamInitialization stream_initialization = 1; + StreamPartBegin stream_part_begin = 2; + MessageChunk message_chunk = 3; + IncompleteIndicator incomplete_indicator = 4; + StreamPartEnd stream_part_end = 5; + StreamFinalization stream_finalization = 6; + StreamError stream_error = 7; + } +} diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts new file mode 100644 index 00000000..dbbd6824 --- /dev/null +++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts @@ -0,0 +1,880 @@ +// @generated by protoc-gen-es v2.10.2 with parameter "target=ts" +// @generated from file chat/v2/chat.proto (package chat.v2, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2"; +import { enumDesc, fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2"; +import { file_google_api_annotations } from "@buf/googleapis_googleapis.bufbuild_es/google/api/annotations_pb"; +import type { Message as Message$1 } from "@bufbuild/protobuf"; + +/** + * Describes the file chat/v2/chat.proto. + */ +export const file_chat_v2_chat: GenFile = /*@__PURE__*/ + fileDesc("ChJjaGF0L3YyL2NoYXQucHJvdG8SB2NoYXQudjIiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIjsKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJQCg9NZXNzYWdlVHlwZVVzZXISDwoHY29udGVudBgBIAEoCRIaCg1zZWxlY3RlZF90ZXh0GAIgASgJSACIAQFCEAoOX3NlbGVjdGVkX3RleHQiKQoSTWVzc2FnZVR5cGVVbmtub3duEhMKC2Rlc2NyaXB0aW9uGAEgASgJIuQCCg5NZXNzYWdlUGF5bG9hZBIsCgZzeXN0ZW0YASABKAsyGi5jaGF0LnYyLk1lc3NhZ2VUeXBlU3lzdGVtSAASKAoEdXNlchgCIAEoCzIYLmNoYXQudjIuTWVzc2FnZVR5cGVVc2VySAASMgoJYXNzaXN0YW50GAMgASgLMh0uY2hhdC52Mi5NZXNzYWdlVHlwZUFzc2lzdGFudEgAElMKG3Rvb2xfY2FsbF9wcmVwYXJlX2FyZ3VtZW50cxgEIAEoCzIsLmNoYXQudjIuTWVzc2FnZVR5cGVUb29sQ2FsbFByZXBhcmVBcmd1bWVudHNIABIxCgl0b29sX2NhbGwYBSABKAsyHC5jaGF0LnYyLk1lc3NhZ2VUeXBlVG9vbENhbGxIABIuCgd1bmtub3duGAYgASgLMhsuY2hhdC52Mi5NZXNzYWdlVHlwZVVua25vd25IAEIOCgxtZXNzYWdlX3R5cGUiRwoHTWVzc2FnZRISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkImEKDENvbnZlcnNhdGlvbhIKCgJpZBgBIAEoCRINCgV0aXRsZRgCIAEoCRISCgptb2RlbF9zbHVnGAMgASgJEiIKCG1lc3NhZ2VzGAQgAygLMhAuY2hhdC52Mi5NZXNzYWdlIkIKGExpc3RDb252ZXJzYXRpb25zUmVxdWVzdBIXCgpwcm9qZWN0X2lkGAEgASgJSACIAQFCDQoLX3Byb2plY3RfaWQiSQoZTGlzdENvbnZlcnNhdGlvbnNSZXNwb25zZRIsCg1jb252ZXJzYXRpb25zGAEgAygLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iMQoWR2V0Q29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiRgoXR2V0Q29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24imwIKIENyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSACIAQESEgoKbW9kZWxfc2x1ZxgDIAEoCRIUCgx1c2VyX21lc3NhZ2UYBCABKAkSHwoSdXNlcl9zZWxlY3RlZF90ZXh0GAUgASgJSAGIAQESOQoRY29udmVyc2F0aW9uX3R5cGUYBiABKA4yGS5jaGF0LnYyLkNvbnZlcnNhdGlvblR5cGVIAogBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlIlAKIUNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYyLkNvbnZlcnNhdGlvbiJDChlVcGRhdGVDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCRINCgV0aXRsZRgCIAEoCSJJChpVcGRhdGVDb252ZXJzYXRpb25SZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYyLkNvbnZlcnNhdGlvbiI0ChlEZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSIcChpEZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSIsCg5TdXBwb3J0ZWRNb2RlbBIMCgRuYW1lGAEgASgJEgwKBHNsdWcYAiABKAkiHAoaTGlzdFN1cHBvcnRlZE1vZGVsc1JlcXVlc3QiRgobTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlEicKBm1vZGVscxgBIAMoCzIXLmNoYXQudjIuU3VwcG9ydGVkTW9kZWwiQwoUU3RyZWFtSW5pdGlhbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEhIKCm1vZGVsX3NsdWcYAiABKAkiTwoPU3RyZWFtUGFydEJlZ2luEhIKCm1lc3NhZ2VfaWQYASABKAkSKAoHcGF5bG9hZBgDIAEoCzIXLmNoYXQudjIuTWVzc2FnZVBheWxvYWQiMQoMTWVzc2FnZUNodW5rEhIKCm1lc3NhZ2VfaWQYASABKAkSDQoFZGVsdGEYAiABKAkiOgoTSW5jb21wbGV0ZUluZGljYXRvchIOCgZyZWFzb24YASABKAkSEwoLcmVzcG9uc2VfaWQYAiABKAkiTQoNU3RyZWFtUGFydEVuZBISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkIi0KElN0cmVhbUZpbmFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkiJAoLU3RyZWFtRXJyb3ISFQoNZXJyb3JfbWVzc2FnZRgBIAEoCSKhAgomQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlcXVlc3QSEgoKcHJvamVjdF9pZBgBIAEoCRIcCg9jb252ZXJzYXRpb25faWQYAiABKAlIAIgBARISCgptb2RlbF9zbHVnGAMgASgJEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAYgBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjIuQ29udmVyc2F0aW9uVHlwZUgCiAEBQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUivwMKJ0NyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXNwb25zZRI+ChVzdHJlYW1faW5pdGlhbGl6YXRpb24YASABKAsyHS5jaGF0LnYyLlN0cmVhbUluaXRpYWxpemF0aW9uSAASNQoRc3RyZWFtX3BhcnRfYmVnaW4YAiABKAsyGC5jaGF0LnYyLlN0cmVhbVBhcnRCZWdpbkgAEi4KDW1lc3NhZ2VfY2h1bmsYAyABKAsyFS5jaGF0LnYyLk1lc3NhZ2VDaHVua0gAEjwKFGluY29tcGxldGVfaW5kaWNhdG9yGAQgASgLMhwuY2hhdC52Mi5JbmNvbXBsZXRlSW5kaWNhdG9ySAASMQoPc3RyZWFtX3BhcnRfZW5kGAUgASgLMhYuY2hhdC52Mi5TdHJlYW1QYXJ0RW5kSAASOgoTc3RyZWFtX2ZpbmFsaXphdGlvbhgGIAEoCzIbLmNoYXQudjIuU3RyZWFtRmluYWxpemF0aW9uSAASLAoMc3RyZWFtX2Vycm9yGAcgASgLMhQuY2hhdC52Mi5TdHJlYW1FcnJvckgAQhIKEHJlc3BvbnNlX3BheWxvYWQqUgoQQ29udmVyc2F0aW9uVHlwZRIhCh1DT05WRVJTQVRJT05fVFlQRV9VTlNQRUNJRklFRBAAEhsKF0NPTlZFUlNBVElPTl9UWVBFX0RFQlVHEAEy0ggKC0NoYXRTZXJ2aWNlEoMBChFMaXN0Q29udmVyc2F0aW9ucxIhLmNoYXQudjIuTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0GiIuY2hhdC52Mi5MaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMSjwEKD0dldENvbnZlcnNhdGlvbhIfLmNoYXQudjIuR2V0Q29udmVyc2F0aW9uUmVxdWVzdBogLmNoYXQudjIuR2V0Q29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMxIxL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKnAQoZQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZRIpLmNoYXQudjIuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QaKi5jaGF0LnYyLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZSIzgtPkkwItOgEqIigvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzEsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52Mi5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjIuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52Mi5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjIuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YyL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MkIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MjtjaGF0djKiAgNDWFiqAgdDaGF0LlYyygIHQ2hhdFxWMuICE0NoYXRcVjJcR1BCTWV0YWRhdGHqAghDaGF0OjpWMmIGcHJvdG8z", [file_google_api_annotations]); + +/** + * @generated from message chat.v2.MessageTypeToolCall + */ +export type MessageTypeToolCall = Message$1<"chat.v2.MessageTypeToolCall"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * Json string + * + * @generated from field: string args = 2; + */ + args: string; + + /** + * Json string + * + * @generated from field: string result = 3; + */ + result: string; + + /** + * Json string + * + * @generated from field: string error = 4; + */ + error: string; +}; + +/** + * Describes the message chat.v2.MessageTypeToolCall. + * Use `create(MessageTypeToolCallSchema)` to create a new message. + */ +export const MessageTypeToolCallSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 0); + +/** + * @generated from message chat.v2.MessageTypeToolCallPrepareArguments + */ +export type MessageTypeToolCallPrepareArguments = Message$1<"chat.v2.MessageTypeToolCallPrepareArguments"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * Json string + * + * @generated from field: string args = 2; + */ + args: string; +}; + +/** + * Describes the message chat.v2.MessageTypeToolCallPrepareArguments. + * Use `create(MessageTypeToolCallPrepareArgumentsSchema)` to create a new message. + */ +export const MessageTypeToolCallPrepareArgumentsSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 1); + +/** + * @generated from message chat.v2.MessageTypeSystem + */ +export type MessageTypeSystem = Message$1<"chat.v2.MessageTypeSystem"> & { + /** + * @generated from field: string content = 1; + */ + content: string; +}; + +/** + * Describes the message chat.v2.MessageTypeSystem. + * Use `create(MessageTypeSystemSchema)` to create a new message. + */ +export const MessageTypeSystemSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 2); + +/** + * @generated from message chat.v2.MessageTypeAssistant + */ +export type MessageTypeAssistant = Message$1<"chat.v2.MessageTypeAssistant"> & { + /** + * @generated from field: string content = 1; + */ + content: string; + + /** + * @generated from field: string model_slug = 2; + */ + modelSlug: string; +}; + +/** + * Describes the message chat.v2.MessageTypeAssistant. + * Use `create(MessageTypeAssistantSchema)` to create a new message. + */ +export const MessageTypeAssistantSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 3); + +/** + * @generated from message chat.v2.MessageTypeUser + */ +export type MessageTypeUser = Message$1<"chat.v2.MessageTypeUser"> & { + /** + * @generated from field: string content = 1; + */ + content: string; + + /** + * @generated from field: optional string selected_text = 2; + */ + selectedText?: string; +}; + +/** + * Describes the message chat.v2.MessageTypeUser. + * Use `create(MessageTypeUserSchema)` to create a new message. + */ +export const MessageTypeUserSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 4); + +/** + * @generated from message chat.v2.MessageTypeUnknown + */ +export type MessageTypeUnknown = Message$1<"chat.v2.MessageTypeUnknown"> & { + /** + * @generated from field: string description = 1; + */ + description: string; +}; + +/** + * Describes the message chat.v2.MessageTypeUnknown. + * Use `create(MessageTypeUnknownSchema)` to create a new message. + */ +export const MessageTypeUnknownSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 5); + +/** + * @generated from message chat.v2.MessagePayload + */ +export type MessagePayload = Message$1<"chat.v2.MessagePayload"> & { + /** + * @generated from oneof chat.v2.MessagePayload.message_type + */ + messageType: { + /** + * @generated from field: chat.v2.MessageTypeSystem system = 1; + */ + value: MessageTypeSystem; + case: "system"; + } | { + /** + * @generated from field: chat.v2.MessageTypeUser user = 2; + */ + value: MessageTypeUser; + case: "user"; + } | { + /** + * @generated from field: chat.v2.MessageTypeAssistant assistant = 3; + */ + value: MessageTypeAssistant; + case: "assistant"; + } | { + /** + * @generated from field: chat.v2.MessageTypeToolCallPrepareArguments tool_call_prepare_arguments = 4; + */ + value: MessageTypeToolCallPrepareArguments; + case: "toolCallPrepareArguments"; + } | { + /** + * @generated from field: chat.v2.MessageTypeToolCall tool_call = 5; + */ + value: MessageTypeToolCall; + case: "toolCall"; + } | { + /** + * @generated from field: chat.v2.MessageTypeUnknown unknown = 6; + */ + value: MessageTypeUnknown; + case: "unknown"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message chat.v2.MessagePayload. + * Use `create(MessagePayloadSchema)` to create a new message. + */ +export const MessagePayloadSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 6); + +/** + * @generated from message chat.v2.Message + */ +export type Message = Message$1<"chat.v2.Message"> & { + /** + * @generated from field: string message_id = 1; + */ + messageId: string; + + /** + * @generated from field: chat.v2.MessagePayload payload = 3; + */ + payload?: MessagePayload; +}; + +/** + * Describes the message chat.v2.Message. + * Use `create(MessageSchema)` to create a new message. + */ +export const MessageSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 7); + +/** + * @generated from message chat.v2.Conversation + */ +export type Conversation = Message$1<"chat.v2.Conversation"> & { + /** + * @generated from field: string id = 1; + */ + id: string; + + /** + * @generated from field: string title = 2; + */ + title: string; + + /** + * @generated from field: string model_slug = 3; + */ + modelSlug: string; + + /** + * If list conversations, then messages length is 0. + * + * @generated from field: repeated chat.v2.Message messages = 4; + */ + messages: Message[]; +}; + +/** + * Describes the message chat.v2.Conversation. + * Use `create(ConversationSchema)` to create a new message. + */ +export const ConversationSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 8); + +/** + * @generated from message chat.v2.ListConversationsRequest + */ +export type ListConversationsRequest = Message$1<"chat.v2.ListConversationsRequest"> & { + /** + * @generated from field: optional string project_id = 1; + */ + projectId?: string; +}; + +/** + * Describes the message chat.v2.ListConversationsRequest. + * Use `create(ListConversationsRequestSchema)` to create a new message. + */ +export const ListConversationsRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 9); + +/** + * @generated from message chat.v2.ListConversationsResponse + */ +export type ListConversationsResponse = Message$1<"chat.v2.ListConversationsResponse"> & { + /** + * In this response, the length of conversations[i].messages should be 0. + * + * @generated from field: repeated chat.v2.Conversation conversations = 1; + */ + conversations: Conversation[]; +}; + +/** + * Describes the message chat.v2.ListConversationsResponse. + * Use `create(ListConversationsResponseSchema)` to create a new message. + */ +export const ListConversationsResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 10); + +/** + * @generated from message chat.v2.GetConversationRequest + */ +export type GetConversationRequest = Message$1<"chat.v2.GetConversationRequest"> & { + /** + * @generated from field: string conversation_id = 1; + */ + conversationId: string; +}; + +/** + * Describes the message chat.v2.GetConversationRequest. + * Use `create(GetConversationRequestSchema)` to create a new message. + */ +export const GetConversationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 11); + +/** + * @generated from message chat.v2.GetConversationResponse + */ +export type GetConversationResponse = Message$1<"chat.v2.GetConversationResponse"> & { + /** + * @generated from field: chat.v2.Conversation conversation = 1; + */ + conversation?: Conversation; +}; + +/** + * Describes the message chat.v2.GetConversationResponse. + * Use `create(GetConversationResponseSchema)` to create a new message. + */ +export const GetConversationResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 12); + +/** + * @generated from message chat.v2.CreateConversationMessageRequest + */ +export type CreateConversationMessageRequest = Message$1<"chat.v2.CreateConversationMessageRequest"> & { + /** + * @generated from field: string project_id = 1; + */ + projectId: string; + + /** + * If conversation_id is not provided, + * a new conversation will be created and the id will be returned. + * + * @generated from field: optional string conversation_id = 2; + */ + conversationId?: string; + + /** + * @generated from field: string model_slug = 3; + */ + modelSlug: string; + + /** + * @generated from field: string user_message = 4; + */ + userMessage: string; + + /** + * @generated from field: optional string user_selected_text = 5; + */ + userSelectedText?: string; + + /** + * @generated from field: optional chat.v2.ConversationType conversation_type = 6; + */ + conversationType?: ConversationType; +}; + +/** + * Describes the message chat.v2.CreateConversationMessageRequest. + * Use `create(CreateConversationMessageRequestSchema)` to create a new message. + */ +export const CreateConversationMessageRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 13); + +/** + * @generated from message chat.v2.CreateConversationMessageResponse + */ +export type CreateConversationMessageResponse = Message$1<"chat.v2.CreateConversationMessageResponse"> & { + /** + * @generated from field: chat.v2.Conversation conversation = 1; + */ + conversation?: Conversation; +}; + +/** + * Describes the message chat.v2.CreateConversationMessageResponse. + * Use `create(CreateConversationMessageResponseSchema)` to create a new message. + */ +export const CreateConversationMessageResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 14); + +/** + * @generated from message chat.v2.UpdateConversationRequest + */ +export type UpdateConversationRequest = Message$1<"chat.v2.UpdateConversationRequest"> & { + /** + * @generated from field: string conversation_id = 1; + */ + conversationId: string; + + /** + * @generated from field: string title = 2; + */ + title: string; +}; + +/** + * Describes the message chat.v2.UpdateConversationRequest. + * Use `create(UpdateConversationRequestSchema)` to create a new message. + */ +export const UpdateConversationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 15); + +/** + * @generated from message chat.v2.UpdateConversationResponse + */ +export type UpdateConversationResponse = Message$1<"chat.v2.UpdateConversationResponse"> & { + /** + * @generated from field: chat.v2.Conversation conversation = 1; + */ + conversation?: Conversation; +}; + +/** + * Describes the message chat.v2.UpdateConversationResponse. + * Use `create(UpdateConversationResponseSchema)` to create a new message. + */ +export const UpdateConversationResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 16); + +/** + * @generated from message chat.v2.DeleteConversationRequest + */ +export type DeleteConversationRequest = Message$1<"chat.v2.DeleteConversationRequest"> & { + /** + * @generated from field: string conversation_id = 1; + */ + conversationId: string; +}; + +/** + * Describes the message chat.v2.DeleteConversationRequest. + * Use `create(DeleteConversationRequestSchema)` to create a new message. + */ +export const DeleteConversationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 17); + +/** + * explicitly empty + * + * @generated from message chat.v2.DeleteConversationResponse + */ +export type DeleteConversationResponse = Message$1<"chat.v2.DeleteConversationResponse"> & { +}; + +/** + * Describes the message chat.v2.DeleteConversationResponse. + * Use `create(DeleteConversationResponseSchema)` to create a new message. + */ +export const DeleteConversationResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 18); + +/** + * @generated from message chat.v2.SupportedModel + */ +export type SupportedModel = Message$1<"chat.v2.SupportedModel"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * @generated from field: string slug = 2; + */ + slug: string; +}; + +/** + * Describes the message chat.v2.SupportedModel. + * Use `create(SupportedModelSchema)` to create a new message. + */ +export const SupportedModelSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 19); + +/** + * explicitly empty + * + * @generated from message chat.v2.ListSupportedModelsRequest + */ +export type ListSupportedModelsRequest = Message$1<"chat.v2.ListSupportedModelsRequest"> & { +}; + +/** + * Describes the message chat.v2.ListSupportedModelsRequest. + * Use `create(ListSupportedModelsRequestSchema)` to create a new message. + */ +export const ListSupportedModelsRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 20); + +/** + * @generated from message chat.v2.ListSupportedModelsResponse + */ +export type ListSupportedModelsResponse = Message$1<"chat.v2.ListSupportedModelsResponse"> & { + /** + * @generated from field: repeated chat.v2.SupportedModel models = 1; + */ + models: SupportedModel[]; +}; + +/** + * Describes the message chat.v2.ListSupportedModelsResponse. + * Use `create(ListSupportedModelsResponseSchema)` to create a new message. + */ +export const ListSupportedModelsResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 21); + +/** + * Information sent once at the beginning of a new conversation stream + * + * @generated from message chat.v2.StreamInitialization + */ +export type StreamInitialization = Message$1<"chat.v2.StreamInitialization"> & { + /** + * @generated from field: string conversation_id = 1; + */ + conversationId: string; + + /** + * @generated from field: string model_slug = 2; + */ + modelSlug: string; +}; + +/** + * Describes the message chat.v2.StreamInitialization. + * Use `create(StreamInitializationSchema)` to create a new message. + */ +export const StreamInitializationSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 22); + +/** + * Designed as StreamPartBegin and StreamPartEnd to + * handle the case where assistant and tool are called at the same time. + * + * User: Please answer me "Ok I will do that", then call "get_weather" + * Assistant: Ok I will do that + Tool: get_weather + * + * @generated from message chat.v2.StreamPartBegin + */ +export type StreamPartBegin = Message$1<"chat.v2.StreamPartBegin"> & { + /** + * @generated from field: string message_id = 1; + */ + messageId: string; + + /** + * @generated from field: chat.v2.MessagePayload payload = 3; + */ + payload?: MessagePayload; +}; + +/** + * Describes the message chat.v2.StreamPartBegin. + * Use `create(StreamPartBeginSchema)` to create a new message. + */ +export const StreamPartBeginSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 23); + +/** + * Note: After the StreamPartBegin of tool_call, there can be no MessageChunk, + * and the StreamPartEnd can be directly called when the result is ready. + * + * @generated from message chat.v2.MessageChunk + */ +export type MessageChunk = Message$1<"chat.v2.MessageChunk"> & { + /** + * The id of the message that this chunk belongs to + * + * @generated from field: string message_id = 1; + */ + messageId: string; + + /** + * The small piece of text + * + * @generated from field: string delta = 2; + */ + delta: string; +}; + +/** + * Describes the message chat.v2.MessageChunk. + * Use `create(MessageChunkSchema)` to create a new message. + */ +export const MessageChunkSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 24); + +/** + * @generated from message chat.v2.IncompleteIndicator + */ +export type IncompleteIndicator = Message$1<"chat.v2.IncompleteIndicator"> & { + /** + * @generated from field: string reason = 1; + */ + reason: string; + + /** + * @generated from field: string response_id = 2; + */ + responseId: string; +}; + +/** + * Describes the message chat.v2.IncompleteIndicator. + * Use `create(IncompleteIndicatorSchema)` to create a new message. + */ +export const IncompleteIndicatorSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 25); + +/** + * @generated from message chat.v2.StreamPartEnd + */ +export type StreamPartEnd = Message$1<"chat.v2.StreamPartEnd"> & { + /** + * @generated from field: string message_id = 1; + */ + messageId: string; + + /** + * @generated from field: chat.v2.MessagePayload payload = 3; + */ + payload?: MessagePayload; +}; + +/** + * Describes the message chat.v2.StreamPartEnd. + * Use `create(StreamPartEndSchema)` to create a new message. + */ +export const StreamPartEndSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 26); + +/** + * Sent when the current AI response is fully streamed + * + * @generated from message chat.v2.StreamFinalization + */ +export type StreamFinalization = Message$1<"chat.v2.StreamFinalization"> & { + /** + * Do not return the full Conversation here. + * If the user wants, they can call the GetConversation API themselves. + * Note: Do not call GetConversation when receiving streamFinalization, + * it should be called after the entire API call is finished. + * + * @generated from field: string conversation_id = 1; + */ + conversationId: string; +}; + +/** + * Describes the message chat.v2.StreamFinalization. + * Use `create(StreamFinalizationSchema)` to create a new message. + */ +export const StreamFinalizationSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 27); + +/** + * @generated from message chat.v2.StreamError + */ +export type StreamError = Message$1<"chat.v2.StreamError"> & { + /** + * @generated from field: string error_message = 1; + */ + errorMessage: string; +}; + +/** + * Describes the message chat.v2.StreamError. + * Use `create(StreamErrorSchema)` to create a new message. + */ +export const StreamErrorSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 28); + +/** + * This message should be the same as CreateConversationMessageRequest + * Note: If conversation_id is provided, + * the conversation will be created and returned. + * + * @generated from message chat.v2.CreateConversationMessageStreamRequest + */ +export type CreateConversationMessageStreamRequest = Message$1<"chat.v2.CreateConversationMessageStreamRequest"> & { + /** + * @generated from field: string project_id = 1; + */ + projectId: string; + + /** + * @generated from field: optional string conversation_id = 2; + */ + conversationId?: string; + + /** + * @generated from field: string model_slug = 3; + */ + modelSlug: string; + + /** + * @generated from field: string user_message = 4; + */ + userMessage: string; + + /** + * @generated from field: optional string user_selected_text = 5; + */ + userSelectedText?: string; + + /** + * @generated from field: optional chat.v2.ConversationType conversation_type = 6; + */ + conversationType?: ConversationType; +}; + +/** + * Describes the message chat.v2.CreateConversationMessageStreamRequest. + * Use `create(CreateConversationMessageStreamRequestSchema)` to create a new message. + */ +export const CreateConversationMessageStreamRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 29); + +/** + * Response for streaming a message within an existing conversation + * + * @generated from message chat.v2.CreateConversationMessageStreamResponse + */ +export type CreateConversationMessageStreamResponse = Message$1<"chat.v2.CreateConversationMessageStreamResponse"> & { + /** + * @generated from oneof chat.v2.CreateConversationMessageStreamResponse.response_payload + */ + responsePayload: { + /** + * @generated from field: chat.v2.StreamInitialization stream_initialization = 1; + */ + value: StreamInitialization; + case: "streamInitialization"; + } | { + /** + * @generated from field: chat.v2.StreamPartBegin stream_part_begin = 2; + */ + value: StreamPartBegin; + case: "streamPartBegin"; + } | { + /** + * @generated from field: chat.v2.MessageChunk message_chunk = 3; + */ + value: MessageChunk; + case: "messageChunk"; + } | { + /** + * @generated from field: chat.v2.IncompleteIndicator incomplete_indicator = 4; + */ + value: IncompleteIndicator; + case: "incompleteIndicator"; + } | { + /** + * @generated from field: chat.v2.StreamPartEnd stream_part_end = 5; + */ + value: StreamPartEnd; + case: "streamPartEnd"; + } | { + /** + * @generated from field: chat.v2.StreamFinalization stream_finalization = 6; + */ + value: StreamFinalization; + case: "streamFinalization"; + } | { + /** + * @generated from field: chat.v2.StreamError stream_error = 7; + */ + value: StreamError; + case: "streamError"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message chat.v2.CreateConversationMessageStreamResponse. + * Use `create(CreateConversationMessageStreamResponseSchema)` to create a new message. + */ +export const CreateConversationMessageStreamResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_chat_v2_chat, 30); + +/** + * @generated from enum chat.v2.ConversationType + */ +export enum ConversationType { + /** + * @generated from enum value: CONVERSATION_TYPE_UNSPECIFIED = 0; + */ + UNSPECIFIED = 0, + + /** + * does not contain any customized messages, the + * + * @generated from enum value: CONVERSATION_TYPE_DEBUG = 1; + */ + DEBUG = 1, +} + +/** + * Describes the enum chat.v2.ConversationType. + */ +export const ConversationTypeSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_chat_v2_chat, 0); + +/** + * @generated from service chat.v2.ChatService + */ +export const ChatService: GenService<{ + /** + * @generated from rpc chat.v2.ChatService.ListConversations + */ + listConversations: { + methodKind: "unary"; + input: typeof ListConversationsRequestSchema; + output: typeof ListConversationsResponseSchema; + }, + /** + * @generated from rpc chat.v2.ChatService.GetConversation + */ + getConversation: { + methodKind: "unary"; + input: typeof GetConversationRequestSchema; + output: typeof GetConversationResponseSchema; + }, + /** + * @generated from rpc chat.v2.ChatService.CreateConversationMessage + */ + createConversationMessage: { + methodKind: "unary"; + input: typeof CreateConversationMessageRequestSchema; + output: typeof CreateConversationMessageResponseSchema; + }, + /** + * @generated from rpc chat.v2.ChatService.CreateConversationMessageStream + */ + createConversationMessageStream: { + methodKind: "server_streaming"; + input: typeof CreateConversationMessageStreamRequestSchema; + output: typeof CreateConversationMessageStreamResponseSchema; + }, + /** + * @generated from rpc chat.v2.ChatService.UpdateConversation + */ + updateConversation: { + methodKind: "unary"; + input: typeof UpdateConversationRequestSchema; + output: typeof UpdateConversationResponseSchema; + }, + /** + * @generated from rpc chat.v2.ChatService.DeleteConversation + */ + deleteConversation: { + methodKind: "unary"; + input: typeof DeleteConversationRequestSchema; + output: typeof DeleteConversationResponseSchema; + }, + /** + * @generated from rpc chat.v2.ChatService.ListSupportedModels + */ + listSupportedModels: { + methodKind: "unary"; + input: typeof ListSupportedModelsRequestSchema; + output: typeof ListSupportedModelsResponseSchema; + }, +}> = /*@__PURE__*/ + serviceDesc(file_chat_v2_chat, 0); + From bb810311cccfd797bcfa59387931b7aa628c2701 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 19:54:14 +0800 Subject: [PATCH 02/28] chore: pure rename v1 --- .../create_conversation_message_stream.go | 2 +- internal/api/chat/delete_conversation.go | 5 ++- internal/api/chat/get_conversation.go | 5 ++- internal/api/chat/list_conversations.go | 5 ++- internal/api/chat/list_supported_models.go | 2 +- internal/api/chat/server.go | 38 ++++++++++++++----- internal/api/chat/update_conversation.go | 2 +- .../toolkit/client/get_conversation_title.go | 4 +- internal/services/toolkit/handler/stream.go | 35 +++++++++++------ internal/services/toolkit/handler/toolcall.go | 2 +- 10 files changed, 68 insertions(+), 32 deletions(-) diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index e996d3a5..4323e5ff 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -19,7 +19,7 @@ func (s *ChatServer) sendStreamError(stream chatv1.ChatService_CreateConversatio }) } -func (s *ChatServer) CreateConversationMessageStream( +func (s *ChatServerV1) CreateConversationMessageStream( req *chatv1.CreateConversationMessageStreamRequest, stream chatv1.ChatService_CreateConversationMessageStreamServer, ) error { diff --git a/internal/api/chat/delete_conversation.go b/internal/api/chat/delete_conversation.go index 824db1ba..b759b069 100644 --- a/internal/api/chat/delete_conversation.go +++ b/internal/api/chat/delete_conversation.go @@ -3,12 +3,13 @@ package chat import ( "context" - "go.mongodb.org/mongo-driver/v2/bson" "paperdebugger/internal/libs/contextutil" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + + "go.mongodb.org/mongo-driver/v2/bson" ) -func (s *ChatServer) DeleteConversation( +func (s *ChatServerV1) DeleteConversation( ctx context.Context, req *chatv1.DeleteConversationRequest, ) (*chatv1.DeleteConversationResponse, error) { diff --git a/internal/api/chat/get_conversation.go b/internal/api/chat/get_conversation.go index 3b960b99..b75223e0 100644 --- a/internal/api/chat/get_conversation.go +++ b/internal/api/chat/get_conversation.go @@ -3,13 +3,14 @@ package chat import ( "context" - "go.mongodb.org/mongo-driver/v2/bson" "paperdebugger/internal/api/mapper" "paperdebugger/internal/libs/contextutil" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + + "go.mongodb.org/mongo-driver/v2/bson" ) -func (s *ChatServer) GetConversation( +func (s *ChatServerV1) GetConversation( ctx context.Context, req *chatv1.GetConversationRequest, ) (*chatv1.GetConversationResponse, error) { diff --git a/internal/api/chat/list_conversations.go b/internal/api/chat/list_conversations.go index 9a0f1232..948be113 100644 --- a/internal/api/chat/list_conversations.go +++ b/internal/api/chat/list_conversations.go @@ -3,14 +3,15 @@ package chat import ( "context" - "github.com/samber/lo" "paperdebugger/internal/api/mapper" "paperdebugger/internal/libs/contextutil" "paperdebugger/internal/models" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + + "github.com/samber/lo" ) -func (s *ChatServer) ListConversations( +func (s *ChatServerV1) ListConversations( ctx context.Context, req *chatv1.ListConversationsRequest, ) (*chatv1.ListConversationsResponse, error) { diff --git a/internal/api/chat/list_supported_models.go b/internal/api/chat/list_supported_models.go index cf032b55..17e01e72 100644 --- a/internal/api/chat/list_supported_models.go +++ b/internal/api/chat/list_supported_models.go @@ -10,7 +10,7 @@ import ( "github.com/openai/openai-go/v2" ) -func (s *ChatServer) ListSupportedModels( +func (s *ChatServerV1) ListSupportedModels( ctx context.Context, req *chatv1.ListSupportedModelsRequest, ) (*chatv1.ListSupportedModelsResponse, error) { diff --git a/internal/api/chat/server.go b/internal/api/chat/server.go index f12c4646..5c52bff4 100644 --- a/internal/api/chat/server.go +++ b/internal/api/chat/server.go @@ -6,11 +6,10 @@ import ( "paperdebugger/internal/services" aiclient "paperdebugger/internal/services/toolkit/client" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" ) type ChatServer struct { - chatv1.UnimplementedChatServiceServer - aiClient *aiclient.AIClient chatService *services.ChatService projectService *services.ProjectService @@ -19,6 +18,16 @@ type ChatServer struct { cfg *cfg.Cfg } +type ChatServerV1 struct { + chatv1.UnimplementedChatServiceServer + *ChatServer +} + +type ChatServerV2 struct { + chatv2.UnimplementedChatServiceServer + *ChatServer +} + func NewChatServer( aiClient *aiclient.AIClient, chatService *services.ChatService, @@ -27,12 +36,23 @@ func NewChatServer( logger *logger.Logger, cfg *cfg.Cfg, ) chatv1.ChatServiceServer { - return &ChatServer{ - aiClient: aiClient, - chatService: chatService, - projectService: projectService, - userService: userService, - logger: logger, - cfg: cfg, + return &ChatServerV1{ + ChatServer: &ChatServer{ + aiClient: aiClient, + chatService: chatService, + projectService: projectService, + userService: userService, + logger: logger, + cfg: cfg, + }, + } +} + +func NewChatServerV2(v1Server chatv1.ChatServiceServer) chatv2.ChatServiceServer { + if s, ok := v1Server.(*ChatServerV1); ok { + return &ChatServerV2{ + ChatServer: s.ChatServer, + } } + return nil } diff --git a/internal/api/chat/update_conversation.go b/internal/api/chat/update_conversation.go index 02aaec18..eb116428 100644 --- a/internal/api/chat/update_conversation.go +++ b/internal/api/chat/update_conversation.go @@ -11,7 +11,7 @@ import ( "go.mongodb.org/mongo-driver/v2/bson" ) -func (s *ChatServer) UpdateConversation( +func (s *ChatServerV1) UpdateConversation( ctx context.Context, req *chatv1.UpdateConversationRequest, ) (*chatv1.UpdateConversationResponse, error) { diff --git a/internal/services/toolkit/client/get_conversation_title.go b/internal/services/toolkit/client/get_conversation_title.go index 283e689f..e6960e81 100644 --- a/internal/services/toolkit/client/get_conversation_title.go +++ b/internal/services/toolkit/client/get_conversation_title.go @@ -7,6 +7,7 @@ import ( "paperdebugger/internal/models" "strings" + "paperdebugger/internal/api/mapper" chatv1 "paperdebugger/pkg/gen/api/chat/v1" "github.com/openai/openai-go/v2/responses" @@ -55,7 +56,8 @@ func (a *AIClient) GetConversationTitle(ctx context.Context, inappChatHistory [] return "Untitled", nil } - title := strings.TrimSpace(resp[0].Payload.GetAssistant().GetContent()) + msg := mapper.BSONToChatMessage(resp[0]) + title := strings.TrimSpace(msg.Payload.GetAssistant().GetContent()) title = strings.TrimLeft(title, "\"") title = strings.TrimRight(title, "\"") title = strings.TrimSpace(title) diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index 96ca6668..cf8a524e 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -7,25 +7,36 @@ import ( "github.com/openai/openai-go/v2/responses" ) -type StreamHandler struct { +type StreamHandler interface { + SendInitialization() + HandleAddedItem(responses.ResponseStreamEventUnion) + HandleDoneItem(responses.ResponseStreamEventUnion) + HandleTextDelta(responses.ResponseStreamEventUnion) + SendIncompleteIndicator(reason string, responseId string) + SendFinalization() + SendToolCallBegin(toolCall responses.ResponseFunctionToolCall) + SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) +} + +type StreamHandlerV1 struct { callbackStream chatv1.ChatService_CreateConversationMessageStreamServer conversationId string modelSlug string } -func NewStreamHandler( +func NewStreamHandlerV1( callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, -) *StreamHandler { - return &StreamHandler{ +) StreamHandler { + return &StreamHandlerV1{ callbackStream: callbackStream, conversationId: conversationId, modelSlug: modelSlug, } } -func (h *StreamHandler) SendInitialization() { +func (h *StreamHandlerV1) SendInitialization() { if h.callbackStream == nil { return } @@ -41,7 +52,7 @@ func (h *StreamHandler) SendInitialization() { }) } -func (h *StreamHandler) HandleAddedItem(chunk responses.ResponseStreamEventUnion) { +func (h *StreamHandlerV1) HandleAddedItem(chunk responses.ResponseStreamEventUnion) { if h.callbackStream == nil { return } @@ -76,7 +87,7 @@ func (h *StreamHandler) HandleAddedItem(chunk responses.ResponseStreamEventUnion } } -func (h *StreamHandler) HandleDoneItem(chunk responses.ResponseStreamEventUnion) { +func (h *StreamHandlerV1) HandleDoneItem(chunk responses.ResponseStreamEventUnion) { if h.callbackStream == nil { return } @@ -131,7 +142,7 @@ func (h *StreamHandler) HandleDoneItem(chunk responses.ResponseStreamEventUnion) } } -func (h *StreamHandler) HandleTextDelta(chunk responses.ResponseStreamEventUnion) { +func (h *StreamHandlerV1) HandleTextDelta(chunk responses.ResponseStreamEventUnion) { if h.callbackStream == nil { return } @@ -145,7 +156,7 @@ func (h *StreamHandler) HandleTextDelta(chunk responses.ResponseStreamEventUnion }) } -func (h *StreamHandler) SendIncompleteIndicator(reason string, responseId string) { +func (h *StreamHandlerV1) SendIncompleteIndicator(reason string, responseId string) { if h.callbackStream == nil { return } @@ -159,7 +170,7 @@ func (h *StreamHandler) SendIncompleteIndicator(reason string, responseId string }) } -func (h *StreamHandler) SendFinalization() { +func (h *StreamHandlerV1) SendFinalization() { if h.callbackStream == nil { return } @@ -172,7 +183,7 @@ func (h *StreamHandler) SendFinalization() { }) } -func (h *StreamHandler) SendToolCallBegin(toolCall responses.ResponseFunctionToolCall) { +func (h *StreamHandlerV1) SendToolCallBegin(toolCall responses.ResponseFunctionToolCall) { if h.callbackStream == nil { return } @@ -193,7 +204,7 @@ func (h *StreamHandler) SendToolCallBegin(toolCall responses.ResponseFunctionToo }) } -func (h *StreamHandler) SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) { +func (h *StreamHandlerV1) SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) { if h.callbackStream == nil { return } diff --git a/internal/services/toolkit/handler/toolcall.go b/internal/services/toolkit/handler/toolcall.go index 8cead912..2b3db2a2 100644 --- a/internal/services/toolkit/handler/toolcall.go +++ b/internal/services/toolkit/handler/toolcall.go @@ -38,7 +38,7 @@ func NewToolCallHandler(toolRegistry *registry.ToolRegistry) *ToolCallHandler { // - openaiChatHistory: The OpenAI-compatible chat history including tool call and output items. // - inappChatHistory: The in-app chat history as a slice of chatv1.Message, reflecting tool call events. // - error: Any error encountered during processing (always nil in current implementation). -func (h *ToolCallHandler) HandleToolCalls(ctx context.Context, outputs []responses.ResponseOutputItemUnion, streamHandler *StreamHandler) (responses.ResponseNewParamsInputUnion, []chatv1.Message, error) { +func (h *ToolCallHandler) HandleToolCalls(ctx context.Context, outputs []responses.ResponseOutputItemUnion, streamHandler StreamHandler) (responses.ResponseNewParamsInputUnion, []chatv1.Message, error) { openaiChatHistory := responses.ResponseNewParamsInputUnion{} // Accumulates OpenAI chat history items inappChatHistory := []chatv1.Message{} // Accumulates in-app chat history messages From 3f1742a1c2a450c6bf7af64186911befc596e303 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 19:58:29 +0800 Subject: [PATCH 03/28] remove CreateConversationMessage --- pkg/gen/api/chat/v2/chat.pb.go | 350 ++++++++-------------------- pkg/gen/api/chat/v2/chat.pb.gw.go | 66 ------ pkg/gen/api/chat/v2/chat_grpc.pb.go | 38 --- proto/chat/v2/chat.proto | 21 -- 4 files changed, 99 insertions(+), 376 deletions(-) diff --git a/pkg/gen/api/chat/v2/chat.pb.go b/pkg/gen/api/chat/v2/chat.pb.go index f92367eb..b3843c18 100644 --- a/pkg/gen/api/chat/v2/chat.pb.go +++ b/pkg/gen/api/chat/v2/chat.pb.go @@ -824,136 +824,6 @@ func (x *GetConversationResponse) GetConversation() *Conversation { return nil } -type CreateConversationMessageRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // If conversation_id is not provided, - // a new conversation will be created and the id will be returned. - ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"` - ModelSlug string `protobuf:"bytes,3,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"` - UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"` - UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"` - ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v2.ConversationType,oneof" json:"conversation_type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateConversationMessageRequest) Reset() { - *x = CreateConversationMessageRequest{} - mi := &file_chat_v2_chat_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateConversationMessageRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateConversationMessageRequest) ProtoMessage() {} - -func (x *CreateConversationMessageRequest) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateConversationMessageRequest.ProtoReflect.Descriptor instead. -func (*CreateConversationMessageRequest) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{13} -} - -func (x *CreateConversationMessageRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -func (x *CreateConversationMessageRequest) GetConversationId() string { - if x != nil && x.ConversationId != nil { - return *x.ConversationId - } - return "" -} - -func (x *CreateConversationMessageRequest) GetModelSlug() string { - if x != nil { - return x.ModelSlug - } - return "" -} - -func (x *CreateConversationMessageRequest) GetUserMessage() string { - if x != nil { - return x.UserMessage - } - return "" -} - -func (x *CreateConversationMessageRequest) GetUserSelectedText() string { - if x != nil && x.UserSelectedText != nil { - return *x.UserSelectedText - } - return "" -} - -func (x *CreateConversationMessageRequest) GetConversationType() ConversationType { - if x != nil && x.ConversationType != nil { - return *x.ConversationType - } - return ConversationType_CONVERSATION_TYPE_UNSPECIFIED -} - -type CreateConversationMessageResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateConversationMessageResponse) Reset() { - *x = CreateConversationMessageResponse{} - mi := &file_chat_v2_chat_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateConversationMessageResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateConversationMessageResponse) ProtoMessage() {} - -func (x *CreateConversationMessageResponse) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateConversationMessageResponse.ProtoReflect.Descriptor instead. -func (*CreateConversationMessageResponse) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{14} -} - -func (x *CreateConversationMessageResponse) GetConversation() *Conversation { - if x != nil { - return x.Conversation - } - return nil -} - type UpdateConversationRequest struct { state protoimpl.MessageState `protogen:"open.v1"` ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` @@ -964,7 +834,7 @@ type UpdateConversationRequest struct { func (x *UpdateConversationRequest) Reset() { *x = UpdateConversationRequest{} - mi := &file_chat_v2_chat_proto_msgTypes[15] + mi := &file_chat_v2_chat_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -976,7 +846,7 @@ func (x *UpdateConversationRequest) String() string { func (*UpdateConversationRequest) ProtoMessage() {} func (x *UpdateConversationRequest) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[15] + mi := &file_chat_v2_chat_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -989,7 +859,7 @@ func (x *UpdateConversationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateConversationRequest.ProtoReflect.Descriptor instead. func (*UpdateConversationRequest) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{15} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{13} } func (x *UpdateConversationRequest) GetConversationId() string { @@ -1015,7 +885,7 @@ type UpdateConversationResponse struct { func (x *UpdateConversationResponse) Reset() { *x = UpdateConversationResponse{} - mi := &file_chat_v2_chat_proto_msgTypes[16] + mi := &file_chat_v2_chat_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1027,7 +897,7 @@ func (x *UpdateConversationResponse) String() string { func (*UpdateConversationResponse) ProtoMessage() {} func (x *UpdateConversationResponse) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[16] + mi := &file_chat_v2_chat_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1040,7 +910,7 @@ func (x *UpdateConversationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateConversationResponse.ProtoReflect.Descriptor instead. func (*UpdateConversationResponse) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{16} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{14} } func (x *UpdateConversationResponse) GetConversation() *Conversation { @@ -1059,7 +929,7 @@ type DeleteConversationRequest struct { func (x *DeleteConversationRequest) Reset() { *x = DeleteConversationRequest{} - mi := &file_chat_v2_chat_proto_msgTypes[17] + mi := &file_chat_v2_chat_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1071,7 +941,7 @@ func (x *DeleteConversationRequest) String() string { func (*DeleteConversationRequest) ProtoMessage() {} func (x *DeleteConversationRequest) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[17] + mi := &file_chat_v2_chat_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1084,7 +954,7 @@ func (x *DeleteConversationRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteConversationRequest.ProtoReflect.Descriptor instead. func (*DeleteConversationRequest) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{17} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{15} } func (x *DeleteConversationRequest) GetConversationId() string { @@ -1102,7 +972,7 @@ type DeleteConversationResponse struct { func (x *DeleteConversationResponse) Reset() { *x = DeleteConversationResponse{} - mi := &file_chat_v2_chat_proto_msgTypes[18] + mi := &file_chat_v2_chat_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1114,7 +984,7 @@ func (x *DeleteConversationResponse) String() string { func (*DeleteConversationResponse) ProtoMessage() {} func (x *DeleteConversationResponse) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[18] + mi := &file_chat_v2_chat_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1127,7 +997,7 @@ func (x *DeleteConversationResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteConversationResponse.ProtoReflect.Descriptor instead. func (*DeleteConversationResponse) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{18} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{16} } type SupportedModel struct { @@ -1140,7 +1010,7 @@ type SupportedModel struct { func (x *SupportedModel) Reset() { *x = SupportedModel{} - mi := &file_chat_v2_chat_proto_msgTypes[19] + mi := &file_chat_v2_chat_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1152,7 +1022,7 @@ func (x *SupportedModel) String() string { func (*SupportedModel) ProtoMessage() {} func (x *SupportedModel) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[19] + mi := &file_chat_v2_chat_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1165,7 +1035,7 @@ func (x *SupportedModel) ProtoReflect() protoreflect.Message { // Deprecated: Use SupportedModel.ProtoReflect.Descriptor instead. func (*SupportedModel) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{19} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{17} } func (x *SupportedModel) GetName() string { @@ -1190,7 +1060,7 @@ type ListSupportedModelsRequest struct { func (x *ListSupportedModelsRequest) Reset() { *x = ListSupportedModelsRequest{} - mi := &file_chat_v2_chat_proto_msgTypes[20] + mi := &file_chat_v2_chat_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1202,7 +1072,7 @@ func (x *ListSupportedModelsRequest) String() string { func (*ListSupportedModelsRequest) ProtoMessage() {} func (x *ListSupportedModelsRequest) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[20] + mi := &file_chat_v2_chat_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1215,7 +1085,7 @@ func (x *ListSupportedModelsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSupportedModelsRequest.ProtoReflect.Descriptor instead. func (*ListSupportedModelsRequest) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{20} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{18} } type ListSupportedModelsResponse struct { @@ -1227,7 +1097,7 @@ type ListSupportedModelsResponse struct { func (x *ListSupportedModelsResponse) Reset() { *x = ListSupportedModelsResponse{} - mi := &file_chat_v2_chat_proto_msgTypes[21] + mi := &file_chat_v2_chat_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1239,7 +1109,7 @@ func (x *ListSupportedModelsResponse) String() string { func (*ListSupportedModelsResponse) ProtoMessage() {} func (x *ListSupportedModelsResponse) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[21] + mi := &file_chat_v2_chat_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1252,7 +1122,7 @@ func (x *ListSupportedModelsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSupportedModelsResponse.ProtoReflect.Descriptor instead. func (*ListSupportedModelsResponse) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{21} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{19} } func (x *ListSupportedModelsResponse) GetModels() []*SupportedModel { @@ -1273,7 +1143,7 @@ type StreamInitialization struct { func (x *StreamInitialization) Reset() { *x = StreamInitialization{} - mi := &file_chat_v2_chat_proto_msgTypes[22] + mi := &file_chat_v2_chat_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1285,7 +1155,7 @@ func (x *StreamInitialization) String() string { func (*StreamInitialization) ProtoMessage() {} func (x *StreamInitialization) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[22] + mi := &file_chat_v2_chat_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1298,7 +1168,7 @@ func (x *StreamInitialization) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamInitialization.ProtoReflect.Descriptor instead. func (*StreamInitialization) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{22} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{20} } func (x *StreamInitialization) GetConversationId() string { @@ -1330,7 +1200,7 @@ type StreamPartBegin struct { func (x *StreamPartBegin) Reset() { *x = StreamPartBegin{} - mi := &file_chat_v2_chat_proto_msgTypes[23] + mi := &file_chat_v2_chat_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1342,7 +1212,7 @@ func (x *StreamPartBegin) String() string { func (*StreamPartBegin) ProtoMessage() {} func (x *StreamPartBegin) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[23] + mi := &file_chat_v2_chat_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1355,7 +1225,7 @@ func (x *StreamPartBegin) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamPartBegin.ProtoReflect.Descriptor instead. func (*StreamPartBegin) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{23} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{21} } func (x *StreamPartBegin) GetMessageId() string { @@ -1385,7 +1255,7 @@ type MessageChunk struct { func (x *MessageChunk) Reset() { *x = MessageChunk{} - mi := &file_chat_v2_chat_proto_msgTypes[24] + mi := &file_chat_v2_chat_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1397,7 +1267,7 @@ func (x *MessageChunk) String() string { func (*MessageChunk) ProtoMessage() {} func (x *MessageChunk) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[24] + mi := &file_chat_v2_chat_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1410,7 +1280,7 @@ func (x *MessageChunk) ProtoReflect() protoreflect.Message { // Deprecated: Use MessageChunk.ProtoReflect.Descriptor instead. func (*MessageChunk) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{24} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{22} } func (x *MessageChunk) GetMessageId() string { @@ -1437,7 +1307,7 @@ type IncompleteIndicator struct { func (x *IncompleteIndicator) Reset() { *x = IncompleteIndicator{} - mi := &file_chat_v2_chat_proto_msgTypes[25] + mi := &file_chat_v2_chat_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1449,7 +1319,7 @@ func (x *IncompleteIndicator) String() string { func (*IncompleteIndicator) ProtoMessage() {} func (x *IncompleteIndicator) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[25] + mi := &file_chat_v2_chat_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1462,7 +1332,7 @@ func (x *IncompleteIndicator) ProtoReflect() protoreflect.Message { // Deprecated: Use IncompleteIndicator.ProtoReflect.Descriptor instead. func (*IncompleteIndicator) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{25} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{23} } func (x *IncompleteIndicator) GetReason() string { @@ -1489,7 +1359,7 @@ type StreamPartEnd struct { func (x *StreamPartEnd) Reset() { *x = StreamPartEnd{} - mi := &file_chat_v2_chat_proto_msgTypes[26] + mi := &file_chat_v2_chat_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1501,7 +1371,7 @@ func (x *StreamPartEnd) String() string { func (*StreamPartEnd) ProtoMessage() {} func (x *StreamPartEnd) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[26] + mi := &file_chat_v2_chat_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1514,7 +1384,7 @@ func (x *StreamPartEnd) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamPartEnd.ProtoReflect.Descriptor instead. func (*StreamPartEnd) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{26} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{24} } func (x *StreamPartEnd) GetMessageId() string { @@ -1541,7 +1411,7 @@ type StreamFinalization struct { func (x *StreamFinalization) Reset() { *x = StreamFinalization{} - mi := &file_chat_v2_chat_proto_msgTypes[27] + mi := &file_chat_v2_chat_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1553,7 +1423,7 @@ func (x *StreamFinalization) String() string { func (*StreamFinalization) ProtoMessage() {} func (x *StreamFinalization) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[27] + mi := &file_chat_v2_chat_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1566,7 +1436,7 @@ func (x *StreamFinalization) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamFinalization.ProtoReflect.Descriptor instead. func (*StreamFinalization) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{27} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{25} } func (x *StreamFinalization) GetConversationId() string { @@ -1585,7 +1455,7 @@ type StreamError struct { func (x *StreamError) Reset() { *x = StreamError{} - mi := &file_chat_v2_chat_proto_msgTypes[28] + mi := &file_chat_v2_chat_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1597,7 +1467,7 @@ func (x *StreamError) String() string { func (*StreamError) ProtoMessage() {} func (x *StreamError) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[28] + mi := &file_chat_v2_chat_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1610,7 +1480,7 @@ func (x *StreamError) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamError.ProtoReflect.Descriptor instead. func (*StreamError) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{28} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{26} } func (x *StreamError) GetErrorMessage() string { @@ -1638,7 +1508,7 @@ type CreateConversationMessageStreamRequest struct { func (x *CreateConversationMessageStreamRequest) Reset() { *x = CreateConversationMessageStreamRequest{} - mi := &file_chat_v2_chat_proto_msgTypes[29] + mi := &file_chat_v2_chat_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1650,7 +1520,7 @@ func (x *CreateConversationMessageStreamRequest) String() string { func (*CreateConversationMessageStreamRequest) ProtoMessage() {} func (x *CreateConversationMessageStreamRequest) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[29] + mi := &file_chat_v2_chat_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1663,7 +1533,7 @@ func (x *CreateConversationMessageStreamRequest) ProtoReflect() protoreflect.Mes // Deprecated: Use CreateConversationMessageStreamRequest.ProtoReflect.Descriptor instead. func (*CreateConversationMessageStreamRequest) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{29} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{27} } func (x *CreateConversationMessageStreamRequest) GetProjectId() string { @@ -1727,7 +1597,7 @@ type CreateConversationMessageStreamResponse struct { func (x *CreateConversationMessageStreamResponse) Reset() { *x = CreateConversationMessageStreamResponse{} - mi := &file_chat_v2_chat_proto_msgTypes[30] + mi := &file_chat_v2_chat_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1739,7 +1609,7 @@ func (x *CreateConversationMessageStreamResponse) String() string { func (*CreateConversationMessageStreamResponse) ProtoMessage() {} func (x *CreateConversationMessageStreamResponse) ProtoReflect() protoreflect.Message { - mi := &file_chat_v2_chat_proto_msgTypes[30] + mi := &file_chat_v2_chat_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1752,7 +1622,7 @@ func (x *CreateConversationMessageStreamResponse) ProtoReflect() protoreflect.Me // Deprecated: Use CreateConversationMessageStreamResponse.ProtoReflect.Descriptor instead. func (*CreateConversationMessageStreamResponse) Descriptor() ([]byte, []int) { - return file_chat_v2_chat_proto_rawDescGZIP(), []int{30} + return file_chat_v2_chat_proto_rawDescGZIP(), []int{28} } func (x *CreateConversationMessageStreamResponse) GetResponsePayload() isCreateConversationMessageStreamResponse_ResponsePayload { @@ -1930,20 +1800,6 @@ const file_chat_v2_chat_proto_rawDesc = "" + "\x16GetConversationRequest\x12'\n" + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"T\n" + "\x17GetConversationResponse\x129\n" + - "\fconversation\x18\x01 \x01(\v2\x15.chat.v2.ConversationR\fconversation\"\xf2\x02\n" + - " CreateConversationMessageRequest\x12\x1d\n" + - "\n" + - "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" + - "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12\x1d\n" + - "\n" + - "model_slug\x18\x03 \x01(\tR\tmodelSlug\x12!\n" + - "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" + - "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" + - "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v2.ConversationTypeH\x02R\x10conversationType\x88\x01\x01B\x12\n" + - "\x10_conversation_idB\x15\n" + - "\x13_user_selected_textB\x14\n" + - "\x12_conversation_type\"^\n" + - "!CreateConversationMessageResponse\x129\n" + "\fconversation\x18\x01 \x01(\v2\x15.chat.v2.ConversationR\fconversation\"Z\n" + "\x19UpdateConversationRequest\x12'\n" + "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12\x14\n" + @@ -2006,11 +1862,10 @@ const file_chat_v2_chat_proto_rawDesc = "" + "\x10response_payload*R\n" + "\x10ConversationType\x12!\n" + "\x1dCONVERSATION_TYPE_UNSPECIFIED\x10\x00\x12\x1b\n" + - "\x17CONVERSATION_TYPE_DEBUG\x10\x012\xd2\b\n" + + "\x17CONVERSATION_TYPE_DEBUG\x10\x012\xa8\a\n" + "\vChatService\x12\x83\x01\n" + "\x11ListConversations\x12!.chat.v2.ListConversationsRequest\x1a\".chat.v2.ListConversationsResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/_pd/api/v2/chats/conversations\x12\x8f\x01\n" + - "\x0fGetConversation\x12\x1f.chat.v2.GetConversationRequest\x1a .chat.v2.GetConversationResponse\"9\x82\xd3\xe4\x93\x023\x121/_pd/api/v2/chats/conversations/{conversation_id}\x12\xa7\x01\n" + - "\x19CreateConversationMessage\x12).chat.v2.CreateConversationMessageRequest\x1a*.chat.v2.CreateConversationMessageResponse\"3\x82\xd3\xe4\x93\x02-:\x01*\"(/_pd/api/v2/chats/conversations/messages\x12\xc2\x01\n" + + "\x0fGetConversation\x12\x1f.chat.v2.GetConversationRequest\x1a .chat.v2.GetConversationResponse\"9\x82\xd3\xe4\x93\x023\x121/_pd/api/v2/chats/conversations/{conversation_id}\x12\xc2\x01\n" + "\x1fCreateConversationMessageStream\x12/.chat.v2.CreateConversationMessageStreamRequest\x1a0.chat.v2.CreateConversationMessageStreamResponse\":\x82\xd3\xe4\x93\x024:\x01*\"//_pd/api/v2/chats/conversations/messages/stream0\x01\x12\x9b\x01\n" + "\x12UpdateConversation\x12\".chat.v2.UpdateConversationRequest\x1a#.chat.v2.UpdateConversationResponse\"<\x82\xd3\xe4\x93\x026:\x01*21/_pd/api/v2/chats/conversations/{conversation_id}\x12\x98\x01\n" + "\x12DeleteConversation\x12\".chat.v2.DeleteConversationRequest\x1a#.chat.v2.DeleteConversationResponse\"9\x82\xd3\xe4\x93\x023*1/_pd/api/v2/chats/conversations/{conversation_id}\x12\x82\x01\n" + @@ -2030,7 +1885,7 @@ func file_chat_v2_chat_proto_rawDescGZIP() []byte { } var file_chat_v2_chat_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_chat_v2_chat_proto_msgTypes = make([]protoimpl.MessageInfo, 31) +var file_chat_v2_chat_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_chat_v2_chat_proto_goTypes = []any{ (ConversationType)(0), // 0: chat.v2.ConversationType (*MessageTypeToolCall)(nil), // 1: chat.v2.MessageTypeToolCall @@ -2046,24 +1901,22 @@ var file_chat_v2_chat_proto_goTypes = []any{ (*ListConversationsResponse)(nil), // 11: chat.v2.ListConversationsResponse (*GetConversationRequest)(nil), // 12: chat.v2.GetConversationRequest (*GetConversationResponse)(nil), // 13: chat.v2.GetConversationResponse - (*CreateConversationMessageRequest)(nil), // 14: chat.v2.CreateConversationMessageRequest - (*CreateConversationMessageResponse)(nil), // 15: chat.v2.CreateConversationMessageResponse - (*UpdateConversationRequest)(nil), // 16: chat.v2.UpdateConversationRequest - (*UpdateConversationResponse)(nil), // 17: chat.v2.UpdateConversationResponse - (*DeleteConversationRequest)(nil), // 18: chat.v2.DeleteConversationRequest - (*DeleteConversationResponse)(nil), // 19: chat.v2.DeleteConversationResponse - (*SupportedModel)(nil), // 20: chat.v2.SupportedModel - (*ListSupportedModelsRequest)(nil), // 21: chat.v2.ListSupportedModelsRequest - (*ListSupportedModelsResponse)(nil), // 22: chat.v2.ListSupportedModelsResponse - (*StreamInitialization)(nil), // 23: chat.v2.StreamInitialization - (*StreamPartBegin)(nil), // 24: chat.v2.StreamPartBegin - (*MessageChunk)(nil), // 25: chat.v2.MessageChunk - (*IncompleteIndicator)(nil), // 26: chat.v2.IncompleteIndicator - (*StreamPartEnd)(nil), // 27: chat.v2.StreamPartEnd - (*StreamFinalization)(nil), // 28: chat.v2.StreamFinalization - (*StreamError)(nil), // 29: chat.v2.StreamError - (*CreateConversationMessageStreamRequest)(nil), // 30: chat.v2.CreateConversationMessageStreamRequest - (*CreateConversationMessageStreamResponse)(nil), // 31: chat.v2.CreateConversationMessageStreamResponse + (*UpdateConversationRequest)(nil), // 14: chat.v2.UpdateConversationRequest + (*UpdateConversationResponse)(nil), // 15: chat.v2.UpdateConversationResponse + (*DeleteConversationRequest)(nil), // 16: chat.v2.DeleteConversationRequest + (*DeleteConversationResponse)(nil), // 17: chat.v2.DeleteConversationResponse + (*SupportedModel)(nil), // 18: chat.v2.SupportedModel + (*ListSupportedModelsRequest)(nil), // 19: chat.v2.ListSupportedModelsRequest + (*ListSupportedModelsResponse)(nil), // 20: chat.v2.ListSupportedModelsResponse + (*StreamInitialization)(nil), // 21: chat.v2.StreamInitialization + (*StreamPartBegin)(nil), // 22: chat.v2.StreamPartBegin + (*MessageChunk)(nil), // 23: chat.v2.MessageChunk + (*IncompleteIndicator)(nil), // 24: chat.v2.IncompleteIndicator + (*StreamPartEnd)(nil), // 25: chat.v2.StreamPartEnd + (*StreamFinalization)(nil), // 26: chat.v2.StreamFinalization + (*StreamError)(nil), // 27: chat.v2.StreamError + (*CreateConversationMessageStreamRequest)(nil), // 28: chat.v2.CreateConversationMessageStreamRequest + (*CreateConversationMessageStreamResponse)(nil), // 29: chat.v2.CreateConversationMessageStreamResponse } var file_chat_v2_chat_proto_depIdxs = []int32{ 3, // 0: chat.v2.MessagePayload.system:type_name -> chat.v2.MessageTypeSystem @@ -2076,39 +1929,35 @@ var file_chat_v2_chat_proto_depIdxs = []int32{ 8, // 7: chat.v2.Conversation.messages:type_name -> chat.v2.Message 9, // 8: chat.v2.ListConversationsResponse.conversations:type_name -> chat.v2.Conversation 9, // 9: chat.v2.GetConversationResponse.conversation:type_name -> chat.v2.Conversation - 0, // 10: chat.v2.CreateConversationMessageRequest.conversation_type:type_name -> chat.v2.ConversationType - 9, // 11: chat.v2.CreateConversationMessageResponse.conversation:type_name -> chat.v2.Conversation - 9, // 12: chat.v2.UpdateConversationResponse.conversation:type_name -> chat.v2.Conversation - 20, // 13: chat.v2.ListSupportedModelsResponse.models:type_name -> chat.v2.SupportedModel - 7, // 14: chat.v2.StreamPartBegin.payload:type_name -> chat.v2.MessagePayload - 7, // 15: chat.v2.StreamPartEnd.payload:type_name -> chat.v2.MessagePayload - 0, // 16: chat.v2.CreateConversationMessageStreamRequest.conversation_type:type_name -> chat.v2.ConversationType - 23, // 17: chat.v2.CreateConversationMessageStreamResponse.stream_initialization:type_name -> chat.v2.StreamInitialization - 24, // 18: chat.v2.CreateConversationMessageStreamResponse.stream_part_begin:type_name -> chat.v2.StreamPartBegin - 25, // 19: chat.v2.CreateConversationMessageStreamResponse.message_chunk:type_name -> chat.v2.MessageChunk - 26, // 20: chat.v2.CreateConversationMessageStreamResponse.incomplete_indicator:type_name -> chat.v2.IncompleteIndicator - 27, // 21: chat.v2.CreateConversationMessageStreamResponse.stream_part_end:type_name -> chat.v2.StreamPartEnd - 28, // 22: chat.v2.CreateConversationMessageStreamResponse.stream_finalization:type_name -> chat.v2.StreamFinalization - 29, // 23: chat.v2.CreateConversationMessageStreamResponse.stream_error:type_name -> chat.v2.StreamError - 10, // 24: chat.v2.ChatService.ListConversations:input_type -> chat.v2.ListConversationsRequest - 12, // 25: chat.v2.ChatService.GetConversation:input_type -> chat.v2.GetConversationRequest - 14, // 26: chat.v2.ChatService.CreateConversationMessage:input_type -> chat.v2.CreateConversationMessageRequest - 30, // 27: chat.v2.ChatService.CreateConversationMessageStream:input_type -> chat.v2.CreateConversationMessageStreamRequest - 16, // 28: chat.v2.ChatService.UpdateConversation:input_type -> chat.v2.UpdateConversationRequest - 18, // 29: chat.v2.ChatService.DeleteConversation:input_type -> chat.v2.DeleteConversationRequest - 21, // 30: chat.v2.ChatService.ListSupportedModels:input_type -> chat.v2.ListSupportedModelsRequest - 11, // 31: chat.v2.ChatService.ListConversations:output_type -> chat.v2.ListConversationsResponse - 13, // 32: chat.v2.ChatService.GetConversation:output_type -> chat.v2.GetConversationResponse - 15, // 33: chat.v2.ChatService.CreateConversationMessage:output_type -> chat.v2.CreateConversationMessageResponse - 31, // 34: chat.v2.ChatService.CreateConversationMessageStream:output_type -> chat.v2.CreateConversationMessageStreamResponse - 17, // 35: chat.v2.ChatService.UpdateConversation:output_type -> chat.v2.UpdateConversationResponse - 19, // 36: chat.v2.ChatService.DeleteConversation:output_type -> chat.v2.DeleteConversationResponse - 22, // 37: chat.v2.ChatService.ListSupportedModels:output_type -> chat.v2.ListSupportedModelsResponse - 31, // [31:38] is the sub-list for method output_type - 24, // [24:31] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 9, // 10: chat.v2.UpdateConversationResponse.conversation:type_name -> chat.v2.Conversation + 18, // 11: chat.v2.ListSupportedModelsResponse.models:type_name -> chat.v2.SupportedModel + 7, // 12: chat.v2.StreamPartBegin.payload:type_name -> chat.v2.MessagePayload + 7, // 13: chat.v2.StreamPartEnd.payload:type_name -> chat.v2.MessagePayload + 0, // 14: chat.v2.CreateConversationMessageStreamRequest.conversation_type:type_name -> chat.v2.ConversationType + 21, // 15: chat.v2.CreateConversationMessageStreamResponse.stream_initialization:type_name -> chat.v2.StreamInitialization + 22, // 16: chat.v2.CreateConversationMessageStreamResponse.stream_part_begin:type_name -> chat.v2.StreamPartBegin + 23, // 17: chat.v2.CreateConversationMessageStreamResponse.message_chunk:type_name -> chat.v2.MessageChunk + 24, // 18: chat.v2.CreateConversationMessageStreamResponse.incomplete_indicator:type_name -> chat.v2.IncompleteIndicator + 25, // 19: chat.v2.CreateConversationMessageStreamResponse.stream_part_end:type_name -> chat.v2.StreamPartEnd + 26, // 20: chat.v2.CreateConversationMessageStreamResponse.stream_finalization:type_name -> chat.v2.StreamFinalization + 27, // 21: chat.v2.CreateConversationMessageStreamResponse.stream_error:type_name -> chat.v2.StreamError + 10, // 22: chat.v2.ChatService.ListConversations:input_type -> chat.v2.ListConversationsRequest + 12, // 23: chat.v2.ChatService.GetConversation:input_type -> chat.v2.GetConversationRequest + 28, // 24: chat.v2.ChatService.CreateConversationMessageStream:input_type -> chat.v2.CreateConversationMessageStreamRequest + 14, // 25: chat.v2.ChatService.UpdateConversation:input_type -> chat.v2.UpdateConversationRequest + 16, // 26: chat.v2.ChatService.DeleteConversation:input_type -> chat.v2.DeleteConversationRequest + 19, // 27: chat.v2.ChatService.ListSupportedModels:input_type -> chat.v2.ListSupportedModelsRequest + 11, // 28: chat.v2.ChatService.ListConversations:output_type -> chat.v2.ListConversationsResponse + 13, // 29: chat.v2.ChatService.GetConversation:output_type -> chat.v2.GetConversationResponse + 29, // 30: chat.v2.ChatService.CreateConversationMessageStream:output_type -> chat.v2.CreateConversationMessageStreamResponse + 15, // 31: chat.v2.ChatService.UpdateConversation:output_type -> chat.v2.UpdateConversationResponse + 17, // 32: chat.v2.ChatService.DeleteConversation:output_type -> chat.v2.DeleteConversationResponse + 20, // 33: chat.v2.ChatService.ListSupportedModels:output_type -> chat.v2.ListSupportedModelsResponse + 28, // [28:34] is the sub-list for method output_type + 22, // [22:28] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name } func init() { file_chat_v2_chat_proto_init() } @@ -2126,9 +1975,8 @@ func file_chat_v2_chat_proto_init() { (*MessagePayload_Unknown)(nil), } file_chat_v2_chat_proto_msgTypes[9].OneofWrappers = []any{} - file_chat_v2_chat_proto_msgTypes[13].OneofWrappers = []any{} - file_chat_v2_chat_proto_msgTypes[29].OneofWrappers = []any{} - file_chat_v2_chat_proto_msgTypes[30].OneofWrappers = []any{ + file_chat_v2_chat_proto_msgTypes[27].OneofWrappers = []any{} + file_chat_v2_chat_proto_msgTypes[28].OneofWrappers = []any{ (*CreateConversationMessageStreamResponse_StreamInitialization)(nil), (*CreateConversationMessageStreamResponse_StreamPartBegin)(nil), (*CreateConversationMessageStreamResponse_MessageChunk)(nil), @@ -2143,7 +1991,7 @@ func file_chat_v2_chat_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_chat_v2_chat_proto_rawDesc), len(file_chat_v2_chat_proto_rawDesc)), NumEnums: 1, - NumMessages: 31, + NumMessages: 29, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/gen/api/chat/v2/chat.pb.gw.go b/pkg/gen/api/chat/v2/chat.pb.gw.go index 4d33af53..81f7e4e6 100644 --- a/pkg/gen/api/chat/v2/chat.pb.gw.go +++ b/pkg/gen/api/chat/v2/chat.pb.gw.go @@ -109,33 +109,6 @@ func local_request_ChatService_GetConversation_0(ctx context.Context, marshaler return msg, metadata, err } -func request_ChatService_CreateConversationMessage_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq CreateConversationMessageRequest - metadata runtime.ServerMetadata - ) - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if req.Body != nil { - _, _ = io.Copy(io.Discard, req.Body) - } - msg, err := client.CreateConversationMessage(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err -} - -func local_request_ChatService_CreateConversationMessage_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var ( - protoReq CreateConversationMessageRequest - metadata runtime.ServerMetadata - ) - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - msg, err := server.CreateConversationMessage(ctx, &protoReq) - return msg, metadata, err -} - func request_ChatService_CreateConversationMessageStream_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (ChatService_CreateConversationMessageStreamClient, runtime.ServerMetadata, error) { var ( protoReq CreateConversationMessageStreamRequest @@ -310,26 +283,6 @@ func RegisterChatServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux } forward_ChatService_GetConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/CreateConversationMessage", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/messages")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_ChatService_CreateConversationMessage_0(annotatedContext, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - forward_ChatService_CreateConversationMessage_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessageStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") @@ -471,23 +424,6 @@ func RegisterChatServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux } forward_ChatService_GetConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessage_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/CreateConversationMessage", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/messages")) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_ChatService_CreateConversationMessage_0(annotatedContext, inboundMarshaler, client, req, pathParams) - annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) - if err != nil { - runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) - return - } - forward_ChatService_CreateConversationMessage_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - }) mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessageStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() @@ -562,7 +498,6 @@ func RegisterChatServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux var ( pattern_ChatService_ListConversations_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"_pd", "api", "v2", "chats", "conversations"}, "")) pattern_ChatService_GetConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, "")) - pattern_ChatService_CreateConversationMessage_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "messages"}, "")) pattern_ChatService_CreateConversationMessageStream_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5, 2, 6}, []string{"_pd", "api", "v2", "chats", "conversations", "messages", "stream"}, "")) pattern_ChatService_UpdateConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, "")) pattern_ChatService_DeleteConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, "")) @@ -572,7 +507,6 @@ var ( var ( forward_ChatService_ListConversations_0 = runtime.ForwardResponseMessage forward_ChatService_GetConversation_0 = runtime.ForwardResponseMessage - forward_ChatService_CreateConversationMessage_0 = runtime.ForwardResponseMessage forward_ChatService_CreateConversationMessageStream_0 = runtime.ForwardResponseStream forward_ChatService_UpdateConversation_0 = runtime.ForwardResponseMessage forward_ChatService_DeleteConversation_0 = runtime.ForwardResponseMessage diff --git a/pkg/gen/api/chat/v2/chat_grpc.pb.go b/pkg/gen/api/chat/v2/chat_grpc.pb.go index 3226480a..8303a8a8 100644 --- a/pkg/gen/api/chat/v2/chat_grpc.pb.go +++ b/pkg/gen/api/chat/v2/chat_grpc.pb.go @@ -21,7 +21,6 @@ const _ = grpc.SupportPackageIsVersion9 const ( ChatService_ListConversations_FullMethodName = "/chat.v2.ChatService/ListConversations" ChatService_GetConversation_FullMethodName = "/chat.v2.ChatService/GetConversation" - ChatService_CreateConversationMessage_FullMethodName = "/chat.v2.ChatService/CreateConversationMessage" ChatService_CreateConversationMessageStream_FullMethodName = "/chat.v2.ChatService/CreateConversationMessageStream" ChatService_UpdateConversation_FullMethodName = "/chat.v2.ChatService/UpdateConversation" ChatService_DeleteConversation_FullMethodName = "/chat.v2.ChatService/DeleteConversation" @@ -34,7 +33,6 @@ const ( type ChatServiceClient interface { ListConversations(ctx context.Context, in *ListConversationsRequest, opts ...grpc.CallOption) (*ListConversationsResponse, error) GetConversation(ctx context.Context, in *GetConversationRequest, opts ...grpc.CallOption) (*GetConversationResponse, error) - CreateConversationMessage(ctx context.Context, in *CreateConversationMessageRequest, opts ...grpc.CallOption) (*CreateConversationMessageResponse, error) CreateConversationMessageStream(ctx context.Context, in *CreateConversationMessageStreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateConversationMessageStreamResponse], error) UpdateConversation(ctx context.Context, in *UpdateConversationRequest, opts ...grpc.CallOption) (*UpdateConversationResponse, error) DeleteConversation(ctx context.Context, in *DeleteConversationRequest, opts ...grpc.CallOption) (*DeleteConversationResponse, error) @@ -69,16 +67,6 @@ func (c *chatServiceClient) GetConversation(ctx context.Context, in *GetConversa return out, nil } -func (c *chatServiceClient) CreateConversationMessage(ctx context.Context, in *CreateConversationMessageRequest, opts ...grpc.CallOption) (*CreateConversationMessageResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CreateConversationMessageResponse) - err := c.cc.Invoke(ctx, ChatService_CreateConversationMessage_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - func (c *chatServiceClient) CreateConversationMessageStream(ctx context.Context, in *CreateConversationMessageStreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateConversationMessageStreamResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &ChatService_ServiceDesc.Streams[0], ChatService_CreateConversationMessageStream_FullMethodName, cOpts...) @@ -134,7 +122,6 @@ func (c *chatServiceClient) ListSupportedModels(ctx context.Context, in *ListSup type ChatServiceServer interface { ListConversations(context.Context, *ListConversationsRequest) (*ListConversationsResponse, error) GetConversation(context.Context, *GetConversationRequest) (*GetConversationResponse, error) - CreateConversationMessage(context.Context, *CreateConversationMessageRequest) (*CreateConversationMessageResponse, error) CreateConversationMessageStream(*CreateConversationMessageStreamRequest, grpc.ServerStreamingServer[CreateConversationMessageStreamResponse]) error UpdateConversation(context.Context, *UpdateConversationRequest) (*UpdateConversationResponse, error) DeleteConversation(context.Context, *DeleteConversationRequest) (*DeleteConversationResponse, error) @@ -155,9 +142,6 @@ func (UnimplementedChatServiceServer) ListConversations(context.Context, *ListCo func (UnimplementedChatServiceServer) GetConversation(context.Context, *GetConversationRequest) (*GetConversationResponse, error) { return nil, status.Error(codes.Unimplemented, "method GetConversation not implemented") } -func (UnimplementedChatServiceServer) CreateConversationMessage(context.Context, *CreateConversationMessageRequest) (*CreateConversationMessageResponse, error) { - return nil, status.Error(codes.Unimplemented, "method CreateConversationMessage not implemented") -} func (UnimplementedChatServiceServer) CreateConversationMessageStream(*CreateConversationMessageStreamRequest, grpc.ServerStreamingServer[CreateConversationMessageStreamResponse]) error { return status.Error(codes.Unimplemented, "method CreateConversationMessageStream not implemented") } @@ -227,24 +211,6 @@ func _ChatService_GetConversation_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _ChatService_CreateConversationMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateConversationMessageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ChatServiceServer).CreateConversationMessage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: ChatService_CreateConversationMessage_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ChatServiceServer).CreateConversationMessage(ctx, req.(*CreateConversationMessageRequest)) - } - return interceptor(ctx, in, info, handler) -} - func _ChatService_CreateConversationMessageStream_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(CreateConversationMessageStreamRequest) if err := stream.RecvMsg(m); err != nil { @@ -325,10 +291,6 @@ var ChatService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetConversation", Handler: _ChatService_GetConversation_Handler, }, - { - MethodName: "CreateConversationMessage", - Handler: _ChatService_CreateConversationMessage_Handler, - }, { MethodName: "UpdateConversation", Handler: _ChatService_UpdateConversation_Handler, diff --git a/proto/chat/v2/chat.proto b/proto/chat/v2/chat.proto index a6399dcd..62ae9d02 100644 --- a/proto/chat/v2/chat.proto +++ b/proto/chat/v2/chat.proto @@ -13,12 +13,6 @@ service ChatService { rpc GetConversation(GetConversationRequest) returns (GetConversationResponse) { option (google.api.http) = {get: "/_pd/api/v2/chats/conversations/{conversation_id}"}; } - rpc CreateConversationMessage(CreateConversationMessageRequest) returns (CreateConversationMessageResponse) { - option (google.api.http) = { - post: "/_pd/api/v2/chats/conversations/messages" - body: "*" - }; - } rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) returns (stream CreateConversationMessageStreamResponse) { option (google.api.http) = { post: "/_pd/api/v2/chats/conversations/messages/stream" @@ -110,21 +104,6 @@ message GetConversationResponse { Conversation conversation = 1; } -message CreateConversationMessageRequest { - string project_id = 1; - // If conversation_id is not provided, - // a new conversation will be created and the id will be returned. - optional string conversation_id = 2; - string model_slug = 3; - string user_message = 4; - optional string user_selected_text = 5; - optional ConversationType conversation_type = 6; -} - -message CreateConversationMessageResponse { - Conversation conversation = 1; -} - message UpdateConversationRequest { string conversation_id = 1; string title = 2; From 0ae5adfbbec1cd5b0f90e05cd8efa8a84ddae08f Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 20:38:29 +0800 Subject: [PATCH 04/28] v2 --- .../api/chat/create_conversation_message.go | 251 ------------- .../create_conversation_message_stream.go | 251 ++++++++++++- .../create_conversation_message_stream_v2.go | 331 ++++++++++++++++++ internal/api/chat/delete_conversation.go | 2 +- internal/api/chat/delete_conversation_v2.go | 31 ++ internal/api/chat/get_conversation.go | 2 +- internal/api/chat/get_conversation_v2.go | 35 ++ internal/api/chat/list_conversations.go | 2 +- internal/api/chat/list_conversations_v2.go | 33 ++ internal/api/chat/list_supported_models.go | 49 +++ internal/api/chat/server.go | 18 +- internal/api/chat/update_conversation.go | 4 +- internal/api/chat/update_conversation_v2.go | 46 +++ internal/api/mapper/conversation_v2.go | 46 +++ internal/services/chat_v2.go | 206 +++++++++++ internal/services/toolkit/client/client.go | 4 +- internal/services/toolkit/client/client_v2.go | 108 ++++++ .../services/toolkit/client/completion.go | 8 +- .../services/toolkit/client/completion_v2.go | 125 +++++++ .../toolkit/client/get_conversation_title.go | 6 +- .../client/get_conversation_title_v2.go | 67 ++++ internal/services/toolkit/client/utils.go | 25 ++ .../services/toolkit/handler/stream_v2.go | 223 ++++++++++++ .../services/toolkit/handler/toolcall_v2.go | 103 ++++++ internal/wire_gen.go | 7 +- .../src/pkg/gen/apiclient/chat/v2/chat_pb.ts | 104 +----- 26 files changed, 1722 insertions(+), 365 deletions(-) create mode 100644 internal/api/chat/create_conversation_message_stream_v2.go create mode 100644 internal/api/chat/delete_conversation_v2.go create mode 100644 internal/api/chat/get_conversation_v2.go create mode 100644 internal/api/chat/list_conversations_v2.go create mode 100644 internal/api/chat/update_conversation_v2.go create mode 100644 internal/api/mapper/conversation_v2.go create mode 100644 internal/services/chat_v2.go create mode 100644 internal/services/toolkit/client/client_v2.go create mode 100644 internal/services/toolkit/client/completion_v2.go create mode 100644 internal/services/toolkit/client/get_conversation_title_v2.go create mode 100644 internal/services/toolkit/handler/stream_v2.go create mode 100644 internal/services/toolkit/handler/toolcall_v2.go diff --git a/internal/api/chat/create_conversation_message.go b/internal/api/chat/create_conversation_message.go index cfe7730d..5c2cd9a8 100644 --- a/internal/api/chat/create_conversation_message.go +++ b/internal/api/chat/create_conversation_message.go @@ -1,252 +1 @@ package chat - -import ( - "context" - - "paperdebugger/internal/libs/contextutil" - "paperdebugger/internal/libs/shared" - "paperdebugger/internal/models" - chatv1 "paperdebugger/pkg/gen/api/chat/v1" - - "github.com/google/uuid" - "github.com/openai/openai-go/v2/responses" - "go.mongodb.org/mongo-driver/v2/bson" - "go.mongodb.org/mongo-driver/v2/mongo" - "google.golang.org/protobuf/encoding/protojson" -) - -// 设计理念: -// 发送给 GPT 之前,消息列表已经构造进 Conversation 对象中(也保存在数据库里) -// 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList) - -// buildUserMessage constructs both the user-facing message and the OpenAI input message -func (s *ChatServer) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv1.ConversationType) (*chatv1.Message, *responses.ResponseInputItemUnionParam, error) { - userPrompt, err := s.chatService.GetPrompt(ctx, userMessage, userSelectedText, conversationType) - if err != nil { - return nil, nil, err - } - - var inappMessage *chatv1.Message - switch conversationType { - case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG: - inappMessage = &chatv1.Message{ - MessageId: "pd_msg_user_" + uuid.New().String(), - Payload: &chatv1.MessagePayload{ - MessageType: &chatv1.MessagePayload_User{ - User: &chatv1.MessageTypeUser{ - Content: userPrompt, - }, - }, - }, - } - default: - inappMessage = &chatv1.Message{ - MessageId: "pd_msg_user_" + uuid.New().String(), - Payload: &chatv1.MessagePayload{ - MessageType: &chatv1.MessagePayload_User{ - User: &chatv1.MessageTypeUser{ - Content: userMessage, - SelectedText: &userSelectedText, - }, - }, - }, - } - } - - openaiMessage := &responses.ResponseInputItemUnionParam{ - OfInputMessage: &responses.ResponseInputItemMessageParam{ - Role: "user", - Content: responses.ResponseInputMessageContentListParam{ - responses.ResponseInputContentParamOfInputText(userPrompt), - }, - }, - } - - return inappMessage, openaiMessage, nil -} - -// buildSystemMessage constructs both the user-facing system message and the OpenAI input message -func (s *ChatServer) buildSystemMessage(systemPrompt string) (*chatv1.Message, *responses.ResponseInputItemUnionParam) { - inappMessage := &chatv1.Message{ - MessageId: "pd_msg_system_" + uuid.New().String(), - Payload: &chatv1.MessagePayload{ - MessageType: &chatv1.MessagePayload_System{ - System: &chatv1.MessageTypeSystem{ - Content: systemPrompt, - }, - }, - }, - } - - openaiMessage := &responses.ResponseInputItemUnionParam{ - OfInputMessage: &responses.ResponseInputItemMessageParam{ - Role: "system", - Content: responses.ResponseInputMessageContentListParam{ - responses.ResponseInputContentParamOfInputText(systemPrompt), - }, - }, - } - - return inappMessage, openaiMessage -} - -// convertToBSON converts a protobuf message to BSON -func convertToBSON(msg *chatv1.Message) (bson.M, error) { - jsonBytes, err := protojson.Marshal(msg) - if err != nil { - return nil, err - } - var bsonMsg bson.M - if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil { - return nil, err - } - return bsonMsg, nil -} - -// 创建对话并写入数据库 -// 返回 Conversation 对象 -func (s *ChatServer) createConversation( - ctx context.Context, - userId bson.ObjectID, - projectId string, - latexFullSource string, - projectInstructions string, - userInstructions string, - userMessage string, - userSelectedText string, - modelSlug string, - conversationType chatv1.ConversationType, -) (*models.Conversation, error) { - systemPrompt, err := s.chatService.GetSystemPrompt(ctx, latexFullSource, projectInstructions, userInstructions, conversationType) - if err != nil { - return nil, err - } - - _, openaiSystemMsg := s.buildSystemMessage(systemPrompt) - inappUserMsg, openaiUserMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType) - if err != nil { - return nil, err - } - - messages := []*chatv1.Message{inappUserMsg} - oaiHistory := responses.ResponseNewParamsInputUnion{ - OfInputItemList: responses.ResponseInputParam{*openaiSystemMsg, *openaiUserMsg}, - } - - return s.chatService.InsertConversationToDB( - ctx, userId, projectId, modelSlug, messages, oaiHistory.OfInputItemList, - ) -} - -// 追加消息到对话并写入数据库 -// 返回 Conversation 对象 -func (s *ChatServer) appendConversationMessage( - ctx context.Context, - userId bson.ObjectID, - conversationId string, - userMessage string, - userSelectedText string, - conversationType chatv1.ConversationType, -) (*models.Conversation, error) { - objectID, err := bson.ObjectIDFromHex(conversationId) - if err != nil { - return nil, err - } - - conversation, err := s.chatService.GetConversation(ctx, userId, objectID) - if err != nil { - return nil, err - } - - userMsg, userOaiMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType) - if err != nil { - return nil, err - } - - bsonMsg, err := convertToBSON(userMsg) - if err != nil { - return nil, err - } - conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMsg) - conversation.OpenaiChatHistory = append(conversation.OpenaiChatHistory, *userOaiMsg) - - if err := s.chatService.UpdateConversation(conversation); err != nil { - return nil, err - } - - return conversation, nil -} - -// 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 -// conversationType 可以在一次 conversation 中多次切换 -func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { - actor, err := contextutil.GetActor(ctx) - if err != nil { - return ctx, nil, nil, err - } - - project, err := s.projectService.GetProject(ctx, actor.ID, projectId) - if err != nil && err != mongo.ErrNoDocuments { - return ctx, nil, nil, err - } - - userInstructions, err := s.userService.GetUserInstructions(ctx, actor.ID) - if err != nil { - return ctx, nil, nil, err - } - - var latexFullSource string - switch conversationType { - case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG: - latexFullSource = "latex_full_source is not available in debug mode" - default: - if project == nil || project.IsOutOfDate() { - return ctx, nil, nil, shared.ErrProjectOutOfDate("project is out of date") - } - - latexFullSource, err = project.GetFullContent() - if err != nil { - return ctx, nil, nil, err - } - } - - var conversation *models.Conversation - - if conversationId == "" { - conversation, err = s.createConversation( - ctx, - actor.ID, - projectId, - latexFullSource, - project.Instructions, - userInstructions, - userMessage, - userSelectedText, - modelSlug, - conversationType, - ) - } else { - conversation, err = s.appendConversationMessage( - ctx, - actor.ID, - conversationId, - userMessage, - userSelectedText, - conversationType, - ) - } - - if err != nil { - return ctx, nil, nil, err - } - - ctx = contextutil.SetProjectID(ctx, conversation.ProjectID) - ctx = contextutil.SetConversationID(ctx, conversation.ID.Hex()) - - settings, err := s.userService.GetUserSettings(ctx, actor.ID) - if err != nil { - return ctx, conversation, nil, err - } - - return ctx, conversation, settings, nil -} diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index 4323e5ff..01149e1e 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -1,12 +1,19 @@ package chat import ( + "context" "paperdebugger/internal/api/mapper" + "paperdebugger/internal/libs/contextutil" + "paperdebugger/internal/libs/shared" "paperdebugger/internal/models" "paperdebugger/internal/services" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + "github.com/google/uuid" + "github.com/openai/openai-go/v2/responses" "go.mongodb.org/mongo-driver/v2/bson" + "go.mongodb.org/mongo-driver/v2/mongo" + "google.golang.org/protobuf/encoding/protojson" ) func (s *ChatServer) sendStreamError(stream chatv1.ChatService_CreateConversationMessageStreamServer, err error) error { @@ -19,6 +26,242 @@ func (s *ChatServer) sendStreamError(stream chatv1.ChatService_CreateConversatio }) } +// 设计理念: +// 发送给 GPT 之前,消息列表已经构造进 Conversation 对象中(也保存在数据库里) +// 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList) + +// buildUserMessage constructs both the user-facing message and the OpenAI input message +func (s *ChatServer) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv1.ConversationType) (*chatv1.Message, *responses.ResponseInputItemUnionParam, error) { + userPrompt, err := s.chatServiceV1.GetPrompt(ctx, userMessage, userSelectedText, conversationType) + if err != nil { + return nil, nil, err + } + + var inappMessage *chatv1.Message + switch conversationType { + case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG: + inappMessage = &chatv1.Message{ + MessageId: "pd_msg_user_" + uuid.New().String(), + Payload: &chatv1.MessagePayload{ + MessageType: &chatv1.MessagePayload_User{ + User: &chatv1.MessageTypeUser{ + Content: userPrompt, + }, + }, + }, + } + default: + inappMessage = &chatv1.Message{ + MessageId: "pd_msg_user_" + uuid.New().String(), + Payload: &chatv1.MessagePayload{ + MessageType: &chatv1.MessagePayload_User{ + User: &chatv1.MessageTypeUser{ + Content: userMessage, + SelectedText: &userSelectedText, + }, + }, + }, + } + } + + openaiMessage := &responses.ResponseInputItemUnionParam{ + OfInputMessage: &responses.ResponseInputItemMessageParam{ + Role: "user", + Content: responses.ResponseInputMessageContentListParam{ + responses.ResponseInputContentParamOfInputText(userPrompt), + }, + }, + } + + return inappMessage, openaiMessage, nil +} + +// buildSystemMessage constructs both the user-facing system message and the OpenAI input message +func (s *ChatServer) buildSystemMessage(systemPrompt string) (*chatv1.Message, *responses.ResponseInputItemUnionParam) { + inappMessage := &chatv1.Message{ + MessageId: "pd_msg_system_" + uuid.New().String(), + Payload: &chatv1.MessagePayload{ + MessageType: &chatv1.MessagePayload_System{ + System: &chatv1.MessageTypeSystem{ + Content: systemPrompt, + }, + }, + }, + } + + openaiMessage := &responses.ResponseInputItemUnionParam{ + OfInputMessage: &responses.ResponseInputItemMessageParam{ + Role: "system", + Content: responses.ResponseInputMessageContentListParam{ + responses.ResponseInputContentParamOfInputText(systemPrompt), + }, + }, + } + + return inappMessage, openaiMessage +} + +// convertToBSON converts a protobuf message to BSON +func convertToBSON(msg *chatv1.Message) (bson.M, error) { + jsonBytes, err := protojson.Marshal(msg) + if err != nil { + return nil, err + } + var bsonMsg bson.M + if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil { + return nil, err + } + return bsonMsg, nil +} + +// 创建对话并写入数据库 +// 返回 Conversation 对象 +func (s *ChatServer) createConversation( + ctx context.Context, + userId bson.ObjectID, + projectId string, + latexFullSource string, + projectInstructions string, + userInstructions string, + userMessage string, + userSelectedText string, + modelSlug string, + conversationType chatv1.ConversationType, +) (*models.Conversation, error) { + systemPrompt, err := s.chatServiceV1.GetSystemPrompt(ctx, latexFullSource, projectInstructions, userInstructions, conversationType) + if err != nil { + return nil, err + } + + _, openaiSystemMsg := s.buildSystemMessage(systemPrompt) + inappUserMsg, openaiUserMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType) + if err != nil { + return nil, err + } + + messages := []*chatv1.Message{inappUserMsg} + oaiHistory := responses.ResponseNewParamsInputUnion{ + OfInputItemList: responses.ResponseInputParam{*openaiSystemMsg, *openaiUserMsg}, + } + + return s.chatServiceV1.InsertConversationToDB( + ctx, userId, projectId, modelSlug, messages, oaiHistory.OfInputItemList, + ) +} + +// 追加消息到对话并写入数据库 +// 返回 Conversation 对象 +func (s *ChatServer) appendConversationMessage( + ctx context.Context, + userId bson.ObjectID, + conversationId string, + userMessage string, + userSelectedText string, + conversationType chatv1.ConversationType, +) (*models.Conversation, error) { + objectID, err := bson.ObjectIDFromHex(conversationId) + if err != nil { + return nil, err + } + + conversation, err := s.chatServiceV1.GetConversation(ctx, userId, objectID) + if err != nil { + return nil, err + } + + userMsg, userOaiMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType) + if err != nil { + return nil, err + } + + bsonMsg, err := convertToBSON(userMsg) + if err != nil { + return nil, err + } + conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMsg) + conversation.OpenaiChatHistory = append(conversation.OpenaiChatHistory, *userOaiMsg) + + if err := s.chatServiceV1.UpdateConversation(conversation); err != nil { + return nil, err + } + + return conversation, nil +} + +// 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 +// conversationType 可以在一次 conversation 中多次切换 +func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return ctx, nil, nil, err + } + + project, err := s.projectService.GetProject(ctx, actor.ID, projectId) + if err != nil && err != mongo.ErrNoDocuments { + return ctx, nil, nil, err + } + + userInstructions, err := s.userService.GetUserInstructions(ctx, actor.ID) + if err != nil { + return ctx, nil, nil, err + } + + var latexFullSource string + switch conversationType { + case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG: + latexFullSource = "latex_full_source is not available in debug mode" + default: + if project == nil || project.IsOutOfDate() { + return ctx, nil, nil, shared.ErrProjectOutOfDate("project is out of date") + } + + latexFullSource, err = project.GetFullContent() + if err != nil { + return ctx, nil, nil, err + } + } + + var conversation *models.Conversation + + if conversationId == "" { + conversation, err = s.createConversation( + ctx, + actor.ID, + projectId, + latexFullSource, + project.Instructions, + userInstructions, + userMessage, + userSelectedText, + modelSlug, + conversationType, + ) + } else { + conversation, err = s.appendConversationMessage( + ctx, + actor.ID, + conversationId, + userMessage, + userSelectedText, + conversationType, + ) + } + + if err != nil { + return ctx, nil, nil, err + } + + ctx = contextutil.SetProjectID(ctx, conversation.ProjectID) + ctx = contextutil.SetConversationID(ctx, conversation.ID.Hex()) + + settings, err := s.userService.GetUserSettings(ctx, actor.ID) + if err != nil { + return ctx, conversation, nil, err + } + + return ctx, conversation, settings, nil +} + func (s *ChatServerV1) CreateConversationMessageStream( req *chatv1.CreateConversationMessageStreamRequest, stream chatv1.ChatService_CreateConversationMessageStreamServer, @@ -48,7 +291,7 @@ func (s *ChatServerV1) CreateConversationMessageStream( APIKey: settings.OpenAIAPIKey, } - openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletionStream(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistory, llmProvider) + openaiChatHistory, inappChatHistory, err := s.aiClientV1.ChatCompletionStreamV1(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistory, llmProvider) if err != nil { return s.sendStreamError(stream, err) } @@ -64,7 +307,7 @@ func (s *ChatServerV1) CreateConversationMessageStream( } conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMessages...) conversation.OpenaiChatHistory = openaiChatHistory - if err := s.chatService.UpdateConversation(conversation); err != nil { + if err := s.chatServiceV1.UpdateConversation(conversation); err != nil { return s.sendStreamError(stream, err) } @@ -74,13 +317,13 @@ func (s *ChatServerV1) CreateConversationMessageStream( for i, bsonMsg := range conversation.InappChatHistory { protoMessages[i] = mapper.BSONToChatMessage(bsonMsg) } - title, err := s.aiClient.GetConversationTitle(ctx, protoMessages, llmProvider) + title, err := s.aiClientV1.GetConversationTitle(ctx, protoMessages, llmProvider) if err != nil { s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) return } conversation.Title = title - if err := s.chatService.UpdateConversation(conversation); err != nil { + if err := s.chatServiceV1.UpdateConversation(conversation); err != nil { s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex()) return } diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go new file mode 100644 index 00000000..1b6cc9c9 --- /dev/null +++ b/internal/api/chat/create_conversation_message_stream_v2.go @@ -0,0 +1,331 @@ +package chat + +import ( + "context" + "paperdebugger/internal/api/mapper" + "paperdebugger/internal/libs/contextutil" + "paperdebugger/internal/libs/shared" + "paperdebugger/internal/models" + "paperdebugger/internal/services" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/google/uuid" + "github.com/openai/openai-go/v2/responses" + "go.mongodb.org/mongo-driver/v2/bson" + "go.mongodb.org/mongo-driver/v2/mongo" + "google.golang.org/protobuf/encoding/protojson" +) + +func (s *ChatServerV2) sendStreamError(stream chatv2.ChatService_CreateConversationMessageStreamServer, err error) error { + return stream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamError{ + StreamError: &chatv2.StreamError{ + ErrorMessage: err.Error(), + }, + }, + }) +} + +// 设计理念: +// 发送给 GPT 之前,消息列表已经构造进 Conversation 对象中(也保存在数据库里) +// 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList) + +// buildUserMessage constructs both the user-facing message and the OpenAI input message +func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv2.ConversationType) (*chatv2.Message, *responses.ResponseInputItemUnionParam, error) { + userPrompt, err := s.chatServiceV2.GetPrompt(ctx, userMessage, userSelectedText, conversationType) + if err != nil { + return nil, nil, err + } + + var inappMessage *chatv2.Message + switch conversationType { + case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG: + inappMessage = &chatv2.Message{ + MessageId: "pd_msg_user_" + uuid.New().String(), + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_User{ + User: &chatv2.MessageTypeUser{ + Content: userPrompt, + }, + }, + }, + } + default: + inappMessage = &chatv2.Message{ + MessageId: "pd_msg_user_" + uuid.New().String(), + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_User{ + User: &chatv2.MessageTypeUser{ + Content: userMessage, + SelectedText: &userSelectedText, + }, + }, + }, + } + } + + openaiMessage := &responses.ResponseInputItemUnionParam{ + OfInputMessage: &responses.ResponseInputItemMessageParam{ + Role: "user", + Content: responses.ResponseInputMessageContentListParam{ + responses.ResponseInputContentParamOfInputText(userPrompt), + }, + }, + } + + return inappMessage, openaiMessage, nil +} + +// buildSystemMessage constructs both the user-facing system message and the OpenAI input message +func (s *ChatServerV2) buildSystemMessage(systemPrompt string) (*chatv2.Message, *responses.ResponseInputItemUnionParam) { + inappMessage := &chatv2.Message{ + MessageId: "pd_msg_system_" + uuid.New().String(), + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_System{ + System: &chatv2.MessageTypeSystem{ + Content: systemPrompt, + }, + }, + }, + } + + openaiMessage := &responses.ResponseInputItemUnionParam{ + OfInputMessage: &responses.ResponseInputItemMessageParam{ + Role: "system", + Content: responses.ResponseInputMessageContentListParam{ + responses.ResponseInputContentParamOfInputText(systemPrompt), + }, + }, + } + + return inappMessage, openaiMessage +} + +// convertToBSON converts a protobuf message to BSON +func convertToBSONV2(msg *chatv2.Message) (bson.M, error) { + jsonBytes, err := protojson.Marshal(msg) + if err != nil { + return nil, err + } + var bsonMsg bson.M + if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil { + return nil, err + } + return bsonMsg, nil +} + +// 创建对话并写入数据库 +// 返回 Conversation 对象 +func (s *ChatServerV2) createConversation( + ctx context.Context, + userId bson.ObjectID, + projectId string, + latexFullSource string, + projectInstructions string, + userInstructions string, + userMessage string, + userSelectedText string, + modelSlug string, + conversationType chatv2.ConversationType, +) (*models.Conversation, error) { + systemPrompt, err := s.chatServiceV2.GetSystemPromptV2(ctx, latexFullSource, projectInstructions, userInstructions, conversationType) + if err != nil { + return nil, err + } + + _, openaiSystemMsg := s.buildSystemMessage(systemPrompt) + inappUserMsg, openaiUserMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType) + if err != nil { + return nil, err + } + + messages := []*chatv2.Message{inappUserMsg} + oaiHistory := responses.ResponseNewParamsInputUnion{ + OfInputItemList: responses.ResponseInputParam{*openaiSystemMsg, *openaiUserMsg}, + } + + return s.chatServiceV2.InsertConversationToDBV2( + ctx, userId, projectId, modelSlug, messages, oaiHistory.OfInputItemList, + ) +} + +// 追加消息到对话并写入数据库 +// 返回 Conversation 对象 +func (s *ChatServerV2) appendConversationMessage( + ctx context.Context, + userId bson.ObjectID, + conversationId string, + userMessage string, + userSelectedText string, + conversationType chatv2.ConversationType, +) (*models.Conversation, error) { + objectID, err := bson.ObjectIDFromHex(conversationId) + if err != nil { + return nil, err + } + + conversation, err := s.chatServiceV2.GetConversationV2(ctx, userId, objectID) + if err != nil { + return nil, err + } + + userMsg, userOaiMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType) + if err != nil { + return nil, err + } + + bsonMsg, err := convertToBSONV2(userMsg) + if err != nil { + return nil, err + } + conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMsg) + conversation.OpenaiChatHistory = append(conversation.OpenaiChatHistory, *userOaiMsg) + + if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { + return nil, err + } + + return conversation, nil +} + +// 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 +// conversationType 可以在一次 conversation 中多次切换 +func (s *ChatServerV2) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv2.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return ctx, nil, nil, err + } + + project, err := s.projectService.GetProject(ctx, actor.ID, projectId) + if err != nil && err != mongo.ErrNoDocuments { + return ctx, nil, nil, err + } + + userInstructions, err := s.userService.GetUserInstructions(ctx, actor.ID) + if err != nil { + return ctx, nil, nil, err + } + + var latexFullSource string + switch conversationType { + case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG: + latexFullSource = "latex_full_source is not available in debug mode" + default: + if project == nil || project.IsOutOfDate() { + return ctx, nil, nil, shared.ErrProjectOutOfDate("project is out of date") + } + + latexFullSource, err = project.GetFullContent() + if err != nil { + return ctx, nil, nil, err + } + } + + var conversation *models.Conversation + + if conversationId == "" { + conversation, err = s.createConversation( + ctx, + actor.ID, + projectId, + latexFullSource, + project.Instructions, + userInstructions, + userMessage, + userSelectedText, + modelSlug, + conversationType, + ) + } else { + conversation, err = s.appendConversationMessage( + ctx, + actor.ID, + conversationId, + userMessage, + userSelectedText, + conversationType, + ) + } + + if err != nil { + return ctx, nil, nil, err + } + + ctx = contextutil.SetProjectID(ctx, conversation.ProjectID) + ctx = contextutil.SetConversationID(ctx, conversation.ID.Hex()) + + settings, err := s.userService.GetUserSettings(ctx, actor.ID) + if err != nil { + return ctx, conversation, nil, err + } + + return ctx, conversation, settings, nil +} + +func (s *ChatServerV2) CreateConversationMessageStream( + req *chatv2.CreateConversationMessageStreamRequest, + stream chatv2.ChatService_CreateConversationMessageStreamServer, +) error { + ctx := stream.Context() + + modelSlug := req.GetModelSlug() + ctx, conversation, settings, err := s.prepare( + ctx, + req.GetProjectId(), + req.GetConversationId(), + req.GetUserMessage(), + req.GetUserSelectedText(), + modelSlug, + req.GetConversationType(), + ) + if err != nil { + return s.sendStreamError(stream, err) + } + + // 用法跟 ChatCompletion 一样,只是传递了 stream 参数 + llmProvider := &models.LLMProviderConfig{ + APIKey: settings.OpenAIAPIKey, + } + + openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistory, llmProvider) + if err != nil { + return s.sendStreamError(stream, err) + } + + // 附加消息到对话 + bsonMessages := make([]bson.M, len(inappChatHistory)) + for i := range inappChatHistory { + bsonMsg, err := convertToBSONV2(&inappChatHistory[i]) + if err != nil { + return s.sendStreamError(stream, err) + } + bsonMessages[i] = bsonMsg + } + conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMessages...) + conversation.OpenaiChatHistory = openaiChatHistory + if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { + return s.sendStreamError(stream, err) + } + + if conversation.Title == services.DefaultConversationTitle { + go func() { + protoMessages := make([]*chatv2.Message, len(conversation.InappChatHistory)) + for i, bsonMsg := range conversation.InappChatHistory { + protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg) + } + title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider) + if err != nil { + s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) + return + } + conversation.Title = title + if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { + s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex()) + return + } + }() + } + + // The final conversation object is NOT returned + return nil +} diff --git a/internal/api/chat/delete_conversation.go b/internal/api/chat/delete_conversation.go index b759b069..bb3463dd 100644 --- a/internal/api/chat/delete_conversation.go +++ b/internal/api/chat/delete_conversation.go @@ -23,7 +23,7 @@ func (s *ChatServerV1) DeleteConversation( return nil, err } - err = s.chatService.DeleteConversation(ctx, actor.ID, conversationID) + err = s.chatServiceV1.DeleteConversation(ctx, actor.ID, conversationID) if err != nil { return nil, err } diff --git a/internal/api/chat/delete_conversation_v2.go b/internal/api/chat/delete_conversation_v2.go new file mode 100644 index 00000000..ee16c222 --- /dev/null +++ b/internal/api/chat/delete_conversation_v2.go @@ -0,0 +1,31 @@ +package chat + +import ( + "context" + + "paperdebugger/internal/libs/contextutil" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "go.mongodb.org/mongo-driver/v2/bson" +) + +func (s *ChatServerV2) DeleteConversation( + ctx context.Context, + req *chatv2.DeleteConversationRequest, +) (*chatv2.DeleteConversationResponse, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return nil, err + } + + objectID, err := bson.ObjectIDFromHex(req.GetConversationId()) + if err != nil { + return nil, err + } + + if err := s.chatServiceV2.DeleteConversationV2(ctx, actor.ID, objectID); err != nil { + return nil, err + } + + return &chatv2.DeleteConversationResponse{}, nil +} diff --git a/internal/api/chat/get_conversation.go b/internal/api/chat/get_conversation.go index b75223e0..de1463c6 100644 --- a/internal/api/chat/get_conversation.go +++ b/internal/api/chat/get_conversation.go @@ -24,7 +24,7 @@ func (s *ChatServerV1) GetConversation( return nil, err } - conversation, err := s.chatService.GetConversation(ctx, actor.ID, conversationID) + conversation, err := s.chatServiceV1.GetConversation(ctx, actor.ID, conversationID) if err != nil { return nil, err } diff --git a/internal/api/chat/get_conversation_v2.go b/internal/api/chat/get_conversation_v2.go new file mode 100644 index 00000000..2d69920e --- /dev/null +++ b/internal/api/chat/get_conversation_v2.go @@ -0,0 +1,35 @@ +package chat + +import ( + "context" + + "paperdebugger/internal/api/mapper" + "paperdebugger/internal/libs/contextutil" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "go.mongodb.org/mongo-driver/v2/bson" +) + +func (s *ChatServerV2) GetConversation( + ctx context.Context, + req *chatv2.GetConversationRequest, +) (*chatv2.GetConversationResponse, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return nil, err + } + + conversationID, err := bson.ObjectIDFromHex(req.GetConversationId()) + if err != nil { + return nil, err + } + + conversation, err := s.chatServiceV2.GetConversationV2(ctx, actor.ID, conversationID) + if err != nil { + return nil, err + } + + return &chatv2.GetConversationResponse{ + Conversation: mapper.MapModelConversationToProtoV2(conversation), + }, nil +} diff --git a/internal/api/chat/list_conversations.go b/internal/api/chat/list_conversations.go index 948be113..4db2b65f 100644 --- a/internal/api/chat/list_conversations.go +++ b/internal/api/chat/list_conversations.go @@ -20,7 +20,7 @@ func (s *ChatServerV1) ListConversations( return nil, err } - conversations, err := s.chatService.ListConversations(ctx, actor.ID, req.GetProjectId()) + conversations, err := s.chatServiceV1.ListConversations(ctx, actor.ID, req.GetProjectId()) if err != nil { return nil, err } diff --git a/internal/api/chat/list_conversations_v2.go b/internal/api/chat/list_conversations_v2.go new file mode 100644 index 00000000..2b6fbf2e --- /dev/null +++ b/internal/api/chat/list_conversations_v2.go @@ -0,0 +1,33 @@ +package chat + +import ( + "context" + + "paperdebugger/internal/api/mapper" + "paperdebugger/internal/libs/contextutil" + "paperdebugger/internal/models" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/samber/lo" +) + +func (s *ChatServerV2) ListConversations( + ctx context.Context, + req *chatv2.ListConversationsRequest, +) (*chatv2.ListConversationsResponse, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return nil, err + } + + conversations, err := s.chatServiceV2.ListConversationsV2(ctx, actor.ID, req.GetProjectId()) + if err != nil { + return nil, err + } + + return &chatv2.ListConversationsResponse{ + Conversations: lo.Map(conversations, func(conversation *models.Conversation, _ int) *chatv2.Conversation { + return mapper.MapModelConversationToProtoV2(conversation) + }), + }, nil +} diff --git a/internal/api/chat/list_supported_models.go b/internal/api/chat/list_supported_models.go index 17e01e72..6e4db6d7 100644 --- a/internal/api/chat/list_supported_models.go +++ b/internal/api/chat/list_supported_models.go @@ -6,6 +6,7 @@ import ( "paperdebugger/internal/libs/contextutil" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" "github.com/openai/openai-go/v2" ) @@ -102,3 +103,51 @@ func (s *ChatServerV1) ListSupportedModels( Models: models, }, nil } + +func (s *ChatServerV2) ListSupportedModels( + ctx context.Context, + req *chatv2.ListSupportedModelsRequest, +) (*chatv2.ListSupportedModelsResponse, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return nil, err + } + + settings, err := s.userService.GetUserSettings(ctx, actor.ID) + if err != nil { + return nil, err + } + + var models []*chatv2.SupportedModel + // Copied from V1 logic + if strings.TrimSpace(settings.OpenAIAPIKey) == "" { + models = []*chatv2.SupportedModel{ + {Name: "GPT-4o", Slug: openai.ChatModelGPT4o}, + {Name: "GPT-4.1", Slug: openai.ChatModelGPT4_1}, + {Name: "GPT-4.1-mini", Slug: openai.ChatModelGPT4_1Mini}, + } + } else { + models = []*chatv2.SupportedModel{ + {Name: "GPT 4o", Slug: openai.ChatModelGPT4o}, + {Name: "GPT 4.1", Slug: openai.ChatModelGPT4_1}, + {Name: "GPT 4.1 mini", Slug: openai.ChatModelGPT4_1Mini}, + {Name: "GPT 5", Slug: openai.ChatModelGPT5}, + {Name: "GPT 5 mini", Slug: openai.ChatModelGPT5Mini}, + {Name: "GPT 5 nano", Slug: openai.ChatModelGPT5Nano}, + {Name: "GPT 5 Chat Latest", Slug: openai.ChatModelGPT5ChatLatest}, + {Name: "o1", Slug: openai.ChatModelO1}, + {Name: "o1 mini", Slug: openai.ChatModelO1Mini}, + {Name: "o3", Slug: openai.ChatModelO3}, + {Name: "o3 mini", Slug: openai.ChatModelO3Mini}, + {Name: "o4 mini", Slug: openai.ChatModelO4Mini}, + {Name: "Codex Mini Latest", Slug: openai.ChatModelCodexMiniLatest}, + } + } + + return &chatv2.ListSupportedModelsResponse{ + Models: models, + }, nil +} + +// CreateConversationMessageStream is more complex as it involves streaming response mapping. +// I'll implement it separately or in the same file. diff --git a/internal/api/chat/server.go b/internal/api/chat/server.go index 5c52bff4..e45621fe 100644 --- a/internal/api/chat/server.go +++ b/internal/api/chat/server.go @@ -10,8 +10,10 @@ import ( ) type ChatServer struct { - aiClient *aiclient.AIClient - chatService *services.ChatService + aiClientV1 *aiclient.AIClient + aiClientV2 *aiclient.AIClientV2 + chatServiceV1 *services.ChatService + chatServiceV2 *services.ChatServiceV2 projectService *services.ProjectService userService *services.UserService logger *logger.Logger @@ -29,8 +31,10 @@ type ChatServerV2 struct { } func NewChatServer( - aiClient *aiclient.AIClient, + aiClientV1 *aiclient.AIClient, + aiClientV2 *aiclient.AIClientV2, chatService *services.ChatService, + chatServiceV2 *services.ChatServiceV2, projectService *services.ProjectService, userService *services.UserService, logger *logger.Logger, @@ -38,17 +42,19 @@ func NewChatServer( ) chatv1.ChatServiceServer { return &ChatServerV1{ ChatServer: &ChatServer{ - aiClient: aiClient, - chatService: chatService, + aiClientV1: aiClientV1, + aiClientV2: aiClientV2, projectService: projectService, userService: userService, logger: logger, + chatServiceV1: chatService, + chatServiceV2: chatServiceV2, cfg: cfg, }, } } -func NewChatServerV2(v1Server chatv1.ChatServiceServer) chatv2.ChatServiceServer { +func NewChatServerV2(v1Server chatv1.ChatServiceServer, chatService *services.ChatServiceV2) chatv2.ChatServiceServer { if s, ok := v1Server.(*ChatServerV1); ok { return &ChatServerV2{ ChatServer: s.ChatServer, diff --git a/internal/api/chat/update_conversation.go b/internal/api/chat/update_conversation.go index eb116428..4503d4f9 100644 --- a/internal/api/chat/update_conversation.go +++ b/internal/api/chat/update_conversation.go @@ -25,7 +25,7 @@ func (s *ChatServerV1) UpdateConversation( return nil, err } - conversation, err := s.chatService.GetConversation(ctx, actor.ID, conversationID) + conversation, err := s.chatServiceV1.GetConversation(ctx, actor.ID, conversationID) if err != nil { return nil, err } @@ -35,7 +35,7 @@ func (s *ChatServerV1) UpdateConversation( } conversation.Title = req.GetTitle() - err = s.chatService.UpdateConversation(conversation) + err = s.chatServiceV1.UpdateConversation(conversation) if err != nil { return nil, err } diff --git a/internal/api/chat/update_conversation_v2.go b/internal/api/chat/update_conversation_v2.go new file mode 100644 index 00000000..d6855de3 --- /dev/null +++ b/internal/api/chat/update_conversation_v2.go @@ -0,0 +1,46 @@ +package chat + +import ( + "context" + + "paperdebugger/internal/api/mapper" + "paperdebugger/internal/libs/contextutil" + "paperdebugger/internal/libs/shared" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "go.mongodb.org/mongo-driver/v2/bson" +) + +func (s *ChatServerV2) UpdateConversation( + ctx context.Context, + req *chatv2.UpdateConversationRequest, +) (*chatv2.UpdateConversationResponse, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return nil, err + } + + conversationID, err := bson.ObjectIDFromHex(req.GetConversationId()) + if err != nil { + return nil, err + } + + conversation, err := s.chatServiceV2.GetConversationV2(ctx, actor.ID, conversationID) + if err != nil { + return nil, err + } + + if req.GetTitle() == "" { + return nil, shared.ErrBadRequest("title is required") + } + + conversation.Title = req.GetTitle() + err = s.chatServiceV2.UpdateConversationV2(conversation) + if err != nil { + return nil, err + } + + return &chatv2.UpdateConversationResponse{ + Conversation: mapper.MapModelConversationToProtoV2(conversation), + }, nil +} diff --git a/internal/api/mapper/conversation_v2.go b/internal/api/mapper/conversation_v2.go new file mode 100644 index 00000000..23c10fae --- /dev/null +++ b/internal/api/mapper/conversation_v2.go @@ -0,0 +1,46 @@ +package mapper + +import ( + "paperdebugger/internal/models" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/samber/lo" + "go.mongodb.org/mongo-driver/v2/bson" + "google.golang.org/protobuf/encoding/protojson" +) + +func BSONToChatMessageV2(msg bson.M) *chatv2.Message { + jsonBytes, err := bson.MarshalExtJSON(msg, true, true) + if err != nil { + return nil + } + + m := &chatv2.Message{} + if err := protojson.Unmarshal(jsonBytes, m); err != nil { + return nil + } + return m +} + +func MapModelConversationToProtoV2(conversation *models.Conversation) *chatv2.Conversation { + // Convert BSON messages back to protobuf messages + filteredMessages := lo.Map(conversation.InappChatHistory, func(msg bson.M, _ int) *chatv2.Message { + return BSONToChatMessageV2(msg) + }) + + filteredMessages = lo.Filter(filteredMessages, func(msg *chatv2.Message, _ int) bool { + return msg.GetPayload().GetMessageType() != &chatv2.MessagePayload_System{} + }) + + modelSlug := conversation.ModelSlug + if modelSlug == "" { + modelSlug = models.SlugFromLanguageModel(models.LanguageModel(conversation.LanguageModel)) + } + + return &chatv2.Conversation{ + Id: conversation.ID.Hex(), + Title: conversation.Title, + ModelSlug: modelSlug, + Messages: filteredMessages, + } +} diff --git a/internal/services/chat_v2.go b/internal/services/chat_v2.go new file mode 100644 index 00000000..5eac89ba --- /dev/null +++ b/internal/services/chat_v2.go @@ -0,0 +1,206 @@ +package services + +import ( + "bytes" + "context" + _ "embed" + "strings" + "text/template" + "time" + + "paperdebugger/internal/libs/cfg" + "paperdebugger/internal/libs/db" + "paperdebugger/internal/libs/logger" + "paperdebugger/internal/models" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v2/responses" + "go.mongodb.org/mongo-driver/v2/bson" + "go.mongodb.org/mongo-driver/v2/mongo" + "go.mongodb.org/mongo-driver/v2/mongo/options" + "google.golang.org/protobuf/encoding/protojson" +) + +//go:embed system_prompt_default.tmpl +var systemPromptDefaultTemplateV2 string + +//go:embed system_prompt_debug.tmpl +var systemPromptDebugTemplateV2 string + +//go:embed user_prompt_default.tmpl +var userPromptDefaultTemplateV2 string + +//go:embed user_prompt_debug.tmpl +var userPromptDebugTemplateV2 string + +type ChatServiceV2 struct { + BaseService + conversationCollection *mongo.Collection +} + +// define default conversation title +const DefaultConversationTitleV2 = "New Conversation ." + +func NewChatServiceV2(db *db.DB, cfg *cfg.Cfg, logger *logger.Logger) *ChatServiceV2 { + base := NewBaseService(db, cfg, logger) + return &ChatServiceV2{ + BaseService: base, + conversationCollection: base.db.Collection((models.Conversation{}).CollectionName()), + } +} + +func (s *ChatServiceV2) GetSystemPromptV2(ctx context.Context, fullContent string, projectInstructions string, userInstructions string, conversationType chatv2.ConversationType) (string, error) { + var systemPromptString string + switch conversationType { + case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG: + systemPromptString = systemPromptDebugTemplateV2 + default: + systemPromptString = systemPromptDefaultTemplateV2 + } + + tmpl := template.Must(template.New("system_prompt").Parse(systemPromptString)) + + var systemPromptBuffer bytes.Buffer + if err := tmpl.Execute(&systemPromptBuffer, map[string]string{ + "FullContent": fullContent, + "ProjectInstructions": projectInstructions, + "UserInstructions": userInstructions, + }); err != nil { + return "", err + } + return strings.TrimSpace(systemPromptBuffer.String()), nil +} + +func (s *ChatServiceV2) GetPrompt(ctx context.Context, content string, selectedText string, conversationType chatv2.ConversationType) (string, error) { + var userPromptString string + switch conversationType { + case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG: + userPromptString = userPromptDebugTemplateV2 + default: + userPromptString = userPromptDefaultTemplateV2 + } + + tmpl := template.Must(template.New("user_prompt").Parse(userPromptString)) + + var userPromptBuffer bytes.Buffer + if err := tmpl.Execute(&userPromptBuffer, map[string]string{ + "UserInput": content, + "SelectedText": selectedText, + }); err != nil { + return "", err + } + return strings.TrimSpace(userPromptBuffer.String()), nil +} + +func (s *ChatServiceV2) InsertConversationToDBV2(ctx context.Context, userID bson.ObjectID, projectID string, modelSlug string, inappChatHistory []*chatv2.Message, openaiChatHistory responses.ResponseInputParam) (*models.Conversation, error) { + // Convert protobuf messages to BSON + bsonMessages := make([]bson.M, len(inappChatHistory)) + for i := range inappChatHistory { + jsonBytes, err := protojson.Marshal(inappChatHistory[i]) + if err != nil { + return nil, err + } + var bsonMsg bson.M + if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil { + return nil, err + } + bsonMessages[i] = bsonMsg + } + + conversation := &models.Conversation{ + BaseModel: models.BaseModel{ + ID: bson.NewObjectID(), + CreatedAt: bson.NewDateTimeFromTime(time.Now()), + UpdatedAt: bson.NewDateTimeFromTime(time.Now()), + }, + UserID: userID, + ProjectID: projectID, + Title: DefaultConversationTitleV2, + ModelSlug: modelSlug, + InappChatHistory: bsonMessages, + OpenaiChatHistory: openaiChatHistory, + } + _, err := s.conversationCollection.InsertOne(ctx, conversation) + if err != nil { + return nil, err + } + return conversation, nil +} + +func (s *ChatServiceV2) ListConversationsV2(ctx context.Context, userID bson.ObjectID, projectID string) ([]*models.Conversation, error) { + filter := bson.M{ + "user_id": userID, + "project_id": projectID, + "$or": []bson.M{ + {"deleted_at": nil}, + {"deleted_at": bson.M{"$exists": false}}, + }, + } + opts := options.Find(). + SetProjection(bson.M{ + "inapp_chat_history": 0, + "openai_chat_history": 0, + }). + SetSort(bson.M{"updated_at": -1}). + SetLimit(50) + cursor, err := s.conversationCollection.Find(ctx, filter, opts) + if err != nil { + return nil, err + } + + var conversations []*models.Conversation + err = cursor.All(ctx, &conversations) + if err != nil { + return nil, err + } + return conversations, nil +} + +func (s *ChatServiceV2) GetConversationV2(ctx context.Context, userID bson.ObjectID, conversationID bson.ObjectID) (*models.Conversation, error) { + conversation := &models.Conversation{} + err := s.conversationCollection.FindOne(ctx, bson.M{ + "_id": conversationID, + "user_id": userID, + "$or": []bson.M{ + {"deleted_at": nil}, + {"deleted_at": bson.M{"$exists": false}}, + }, + }).Decode(conversation) + if err != nil { + return nil, err + } + return conversation, nil +} + +func (s *ChatServiceV2) UpdateConversationV2(conversation *models.Conversation) error { + conversation.UpdatedAt = bson.NewDateTimeFromTime(time.Now()) + _, err := s.conversationCollection.UpdateOne( + context.Background(), + bson.M{ + "_id": conversation.ID, + "$or": []bson.M{ + {"deleted_at": nil}, + {"deleted_at": bson.M{"$exists": false}}, + }, + }, + bson.M{"$set": conversation}, + ) + return err +} + +func (s *ChatServiceV2) DeleteConversationV2(ctx context.Context, userID bson.ObjectID, conversationID bson.ObjectID) error { + now := bson.NewDateTimeFromTime(time.Now()) + _, err := s.conversationCollection.UpdateOne( + ctx, + bson.M{ + "_id": conversationID, + "user_id": userID, + "$or": []bson.M{ + {"deleted_at": nil}, + {"deleted_at": bson.M{"$exists": false}}, + }, + }, + bson.M{"$set": bson.M{"deleted_at": now, "updated_at": now}}, + ) + return err +} diff --git a/internal/services/toolkit/client/client.go b/internal/services/toolkit/client/client.go index 68599397..ee01ccd9 100644 --- a/internal/services/toolkit/client/client.go +++ b/internal/services/toolkit/client/client.go @@ -17,8 +17,8 @@ import ( ) type AIClient struct { - toolCallHandler *handler.ToolCallHandler - + toolCallHandler *handler.ToolCallHandler + toolCallHandlerV2 *handler.ToolCallHandlerV2 db *mongo.Database functionCallCollection *mongo.Collection diff --git a/internal/services/toolkit/client/client_v2.go b/internal/services/toolkit/client/client_v2.go new file mode 100644 index 00000000..1bcb901e --- /dev/null +++ b/internal/services/toolkit/client/client_v2.go @@ -0,0 +1,108 @@ +package client + +import ( + "paperdebugger/internal/libs/cfg" + "paperdebugger/internal/libs/db" + "paperdebugger/internal/libs/logger" + "paperdebugger/internal/models" + "paperdebugger/internal/services" + "paperdebugger/internal/services/toolkit/handler" + "paperdebugger/internal/services/toolkit/registry" + "paperdebugger/internal/services/toolkit/tools/xtramcp" + + "github.com/openai/openai-go/v2" + "github.com/openai/openai-go/v2/option" + "go.mongodb.org/mongo-driver/v2/mongo" +) + +type AIClientV2 struct { + toolCallHandler *handler.ToolCallHandlerV2 + db *mongo.Database + functionCallCollection *mongo.Collection + + reverseCommentService *services.ReverseCommentService + projectService *services.ProjectService + cfg *cfg.Cfg + logger *logger.Logger +} + +// SetOpenAIClient sets the appropriate OpenAI client based on the LLM provider config. +// If the config specifies a custom endpoint and API key, a new client is created for that endpoint. +func (a *AIClientV2) GetOpenAIClient(llmConfig *models.LLMProviderConfig) *openai.Client { + var Endpoint string = llmConfig.Endpoint + var APIKey string = llmConfig.APIKey + + if Endpoint == "" { + Endpoint = a.cfg.OpenAIBaseURL + } + + if APIKey == "" { + APIKey = a.cfg.OpenAIAPIKey + } + + opts := []option.RequestOption{ + option.WithAPIKey(APIKey), + option.WithBaseURL(Endpoint), + } + + client := openai.NewClient(opts...) + return &client +} + +func NewAIClientV2( + db *db.DB, + + reverseCommentService *services.ReverseCommentService, + projectService *services.ProjectService, + cfg *cfg.Cfg, + logger *logger.Logger, +) *AIClientV2 { + database := db.Database("paperdebugger") + oaiClient := openai.NewClient( + option.WithBaseURL(cfg.OpenAIBaseURL), + option.WithAPIKey(cfg.OpenAIAPIKey), + ) + CheckOpenAIWorks(oaiClient, logger) + // toolPaperScore := tools.NewPaperScoreTool(db, projectService) + // toolPaperScoreComment := tools.NewPaperScoreCommentTool(db, projectService, reverseCommentService) + + toolRegistry := registry.NewToolRegistry() + + // toolRegistry.Register("always_exception", tools.AlwaysExceptionToolDescription, tools.AlwaysExceptionTool) + // toolRegistry.Register("greeting", tools.GreetingToolDescription, tools.GreetingTool) + + // Load tools dynamically from backend + xtraMCPLoader := xtramcp.NewXtraMCPLoader(db, projectService, cfg.XtraMCPURI) + + // initialize MCP session first and log session ID + sessionID, err := xtraMCPLoader.InitializeMCP() + if err != nil { + logger.Errorf("[AI Client] Failed to initialize XtraMCP session: %v", err) + // TODO: Fallback to static tools or exit? + } else { + logger.Info("[AI Client] XtraMCP session initialized", "sessionID", sessionID) + + // dynamically load all tools from XtraMCP backend + err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry) + if err != nil { + logger.Errorf("[AI Client] Failed to load XtraMCP tools: %v", err) + } else { + logger.Info("[AI Client] Successfully loaded XtraMCP tools") + } + } + + toolCallHandler := handler.NewToolCallHandlerV2(toolRegistry) + client := &AIClientV2{ + toolCallHandler: toolCallHandler, + + db: database, + functionCallCollection: database.Collection((models.FunctionCall{}).CollectionName()), + + reverseCommentService: reverseCommentService, + projectService: projectService, + cfg: cfg, + logger: logger, + } + + return client +} diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index f4c13259..7c90cc32 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -21,8 +21,8 @@ import ( // 1. The full chat history sent to the language model (including any tool call results). // 2. The incremental chat history visible to the user (including tool call results and assistant responses). // 3. An error, if any occurred during the process. -func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { - openaiChatHistory, inappChatHistory, err := a.ChatCompletionStream(ctx, nil, "", modelSlug, messages, llmProvider) +func (a *AIClient) ChatCompletionV1(ctx context.Context, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { + openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV1(ctx, nil, "", modelSlug, messages, llmProvider) if err != nil { return nil, nil, err } @@ -50,11 +50,11 @@ func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, message // - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop. // - If no tool calls are needed, it appends the assistant's response and exits the loop. // - Finally, it returns the updated chat histories and any error encountered. -func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { +func (a *AIClient) ChatCompletionStreamV1(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) { openaiChatHistory := responses.ResponseNewParamsInputUnion{OfInputItemList: messages} inappChatHistory := []chatv1.Message{} - streamHandler := handler.NewStreamHandler(callbackStream, conversationId, modelSlug) + streamHandler := handler.NewStreamHandlerV1(callbackStream, conversationId, modelSlug) streamHandler.SendInitialization() defer func() { diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go new file mode 100644 index 00000000..f6dcaa51 --- /dev/null +++ b/internal/services/toolkit/client/completion_v2.go @@ -0,0 +1,125 @@ +package client + +import ( + "context" + "paperdebugger/internal/models" + "paperdebugger/internal/services/toolkit/handler" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v2/responses" +) + +// ChatCompletion orchestrates a chat completion process with a language model (e.g., GPT), handling tool calls and message history management. +// +// Parameters: +// +// ctx: The context for controlling cancellation and deadlines. +// modelSlug: The language model to use for completion (e.g., GPT-3.5, GPT-4). +// messages: The full chat history (as input) to send to the language model. +// +// Returns: +// 1. The full chat history sent to the language model (including any tool call results). +// 2. The incremental chat history visible to the user (including tool call results and assistant responses). +// 3. An error, if any occurred during the process. +func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv2.Message, error) { + openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider) + if err != nil { + return nil, nil, err + } + return openaiChatHistory, inappChatHistory, nil +} + +// ChatCompletionStream orchestrates a streaming chat completion process with a language model (e.g., GPT), handling tool calls, message history management, and real-time streaming of responses to the client. +// +// Parameters: +// +// ctx: The context for controlling cancellation and deadlines. +// callbackStream: The gRPC stream to which incremental responses are sent in real time. +// conversationId: The unique identifier for the conversation session in PaperDebugger. +// languageModel: The language model to use for completion (e.g., GPT-3.5, GPT-4). +// messages: The full chat history (as input) to send to the language model. +// +// Returns: (same as ChatCompletion) +// 1. The full chat history sent to the language model (including any tool call results). +// 2. The incremental chat history visible to the user (including tool call results and assistant responses). +// 3. An error, if any occurred during the process. (However, in the streaming mode, the error is not returned, but sending by callbackStream) +// +// This function works as follows: (same as ChatCompletion) +// - It initializes the chat history for the language model and the user, and sets up a stream handler for real-time updates. +// - It repeatedly sends the current chat history to the language model, receives streaming responses, and forwards them to the client as they arrive. +// - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop. +// - If no tool calls are needed, it appends the assistant's response and exits the loop. +// - Finally, it returns the updated chat histories and any error encountered. +func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv2.Message, error) { + openaiChatHistory := responses.ResponseNewParamsInputUnion{OfInputItemList: messages} + inappChatHistory := []chatv2.Message{} + + streamHandler := handler.NewStreamHandlerV2(callbackStream, conversationId, modelSlug) + + streamHandler.SendInitialization() + defer func() { + streamHandler.SendFinalization() + }() + + oaiClient := a.GetOpenAIClient(llmProvider) + params := getDefaultParams(modelSlug, openaiChatHistory, a.toolCallHandler.Registry) + + for { + params.Input = openaiChatHistory + var openaiOutput []responses.ResponseOutputItemUnion + stream := oaiClient.Responses.NewStreaming(context.Background(), params) + + for stream.Next() { + // time.Sleep(200 * time.Millisecond) // DEBUG POINT: change this to test in a slow mode + chunk := stream.Current() + switch chunk.Type { + case "response.output_item.added": + streamHandler.HandleAddedItem(chunk) + case "response.output_item.done": + streamHandler.HandleDoneItem(chunk) // send part end + case "response.incomplete": + // incomplete happens after "output_item.done" (if it happens) + // It's an indicator that the response is incomplete. + openaiOutput = chunk.Response.Output + streamHandler.SendIncompleteIndicator(chunk.Response.IncompleteDetails.Reason, chunk.Response.ID) + case "response.completed": + openaiOutput = chunk.Response.Output + case "response.output_text.delta": + streamHandler.HandleTextDelta(chunk) + } + } + + if err := stream.Err(); err != nil { + return nil, nil, err + } + + // 把 openai 的 response 记录下来,然后执行调用(如果有) + for _, item := range openaiOutput { + if item.Type == "message" && item.Role == "assistant" { + appendAssistantTextResponseV2(&openaiChatHistory, &inappChatHistory, item) + } + } + + // 执行调用(如果有),返回增量数据 + openaiToolHistory, inappToolHistory, err := a.toolCallHandler.HandleToolCallsV2(ctx, openaiOutput, streamHandler) + if err != nil { + return nil, nil, err + } + + // 把工具调用结果记录下来 + if len(openaiToolHistory.OfInputItemList) > 0 { + openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, openaiToolHistory.OfInputItemList...) + inappChatHistory = append(inappChatHistory, inappToolHistory...) + } else { + // response stream is finished, if there is no tool call, then break + break + } + } + + ptrChatHistory := make([]*chatv2.Message, len(inappChatHistory)) + for i := range inappChatHistory { + ptrChatHistory[i] = &inappChatHistory[i] + } + + return openaiChatHistory.OfInputItemList, inappChatHistory, nil +} diff --git a/internal/services/toolkit/client/get_conversation_title.go b/internal/services/toolkit/client/get_conversation_title.go index e6960e81..bd89040f 100644 --- a/internal/services/toolkit/client/get_conversation_title.go +++ b/internal/services/toolkit/client/get_conversation_title.go @@ -7,7 +7,6 @@ import ( "paperdebugger/internal/models" "strings" - "paperdebugger/internal/api/mapper" chatv1 "paperdebugger/pkg/gen/api/chat/v1" "github.com/openai/openai-go/v2/responses" @@ -30,7 +29,7 @@ func (a *AIClient) GetConversationTitle(ctx context.Context, inappChatHistory [] message := strings.Join(messages, "\n") message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message) - _, resp, err := a.ChatCompletion(ctx, "gpt-4.1-mini", responses.ResponseInputParam{ + _, resp, err := a.ChatCompletionV1(ctx, "gpt-4.1-mini", responses.ResponseInputParam{ { OfInputMessage: &responses.ResponseInputItemMessageParam{ Role: "system", @@ -56,8 +55,7 @@ func (a *AIClient) GetConversationTitle(ctx context.Context, inappChatHistory [] return "Untitled", nil } - msg := mapper.BSONToChatMessage(resp[0]) - title := strings.TrimSpace(msg.Payload.GetAssistant().GetContent()) + title := strings.TrimSpace(resp[0].Payload.GetAssistant().GetContent()) title = strings.TrimLeft(title, "\"") title = strings.TrimRight(title, "\"") title = strings.TrimSpace(title) diff --git a/internal/services/toolkit/client/get_conversation_title_v2.go b/internal/services/toolkit/client/get_conversation_title_v2.go new file mode 100644 index 00000000..2d56231f --- /dev/null +++ b/internal/services/toolkit/client/get_conversation_title_v2.go @@ -0,0 +1,67 @@ +package client + +// TODO: This file should not place in the client package. +import ( + "context" + "fmt" + "paperdebugger/internal/models" + "strings" + + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v2/responses" + "github.com/samber/lo" +) + +func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig) (string, error) { + messages := lo.Map(inappChatHistory, func(message *chatv2.Message, _ int) string { + if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_Assistant); ok { + return fmt.Sprintf("Assistant: %s", message.Payload.GetAssistant().GetContent()) + } + if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_User); ok { + return fmt.Sprintf("User: %s", message.Payload.GetUser().GetContent()) + } + if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_ToolCall); ok { + return fmt.Sprintf("Tool '%s' called", message.Payload.GetToolCall().GetName()) + } + return "" + }) + message := strings.Join(messages, "\n") + message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message) + + _, resp, err := a.ChatCompletionV2(ctx, "gpt-4.1-mini", responses.ResponseInputParam{ + { + OfInputMessage: &responses.ResponseInputItemMessageParam{ + Role: "system", + Content: responses.ResponseInputMessageContentListParam{ + responses.ResponseInputContentParamOfInputText(`You are a helpful assistant that generates a title for a conversation.`), + }, + }, + }, + { + OfInputMessage: &responses.ResponseInputItemMessageParam{ + Role: "user", + Content: responses.ResponseInputMessageContentListParam{ + responses.ResponseInputContentParamOfInputText(message), + }, + }, + }, + }, llmProvider) + if err != nil { + return "", err + } + + if len(resp) == 0 { + return "Untitled", nil + } + + title := strings.TrimSpace(resp[0].Payload.GetAssistant().GetContent()) + title = strings.TrimLeft(title, "\"") + title = strings.TrimRight(title, "\"") + title = strings.TrimSpace(title) + if title == "" { + return "Untitled", nil + } + + return title, nil +} diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index 39b24d87..8915b2c9 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -8,6 +8,7 @@ It is used to append assistant responses to both OpenAI and in-app chat historie import ( "paperdebugger/internal/services/toolkit/registry" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" "github.com/openai/openai-go/v2" "github.com/openai/openai-go/v2/responses" @@ -40,6 +41,30 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI }) } +func appendAssistantTextResponseV2(openaiChatHistory *responses.ResponseNewParamsInputUnion, inappChatHistory *[]chatv2.Message, item responses.ResponseOutputItemUnion) { + text := item.Content[0].Text + response := responses.ResponseInputItemUnionParam{ + OfOutputMessage: &responses.ResponseOutputMessageParam{ + Content: []responses.ResponseOutputMessageContentUnionParam{ + { + OfOutputText: &responses.ResponseOutputTextParam{Text: text}, + }, + }, + }, + } + openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, response) + *inappChatHistory = append(*inappChatHistory, chatv2.Message{ + MessageId: "openai_" + item.ID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_Assistant{ + Assistant: &chatv2.MessageTypeAssistant{ + Content: text, + }, + }, + }, + }) +} + // getDefaultParams constructs the default parameters for a chat completion request. // The tool registry is managed centrally by the registry package. // The chat history is constructed manually, so Store must be set to false. diff --git a/internal/services/toolkit/handler/stream_v2.go b/internal/services/toolkit/handler/stream_v2.go new file mode 100644 index 00000000..18aaa6d6 --- /dev/null +++ b/internal/services/toolkit/handler/stream_v2.go @@ -0,0 +1,223 @@ +package handler + +import ( + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v2/responses" +) + +type StreamHandlerV2 struct { + callbackStream chatv2.ChatService_CreateConversationMessageStreamServer + conversationId string + modelSlug string +} + +func NewStreamHandlerV2( + callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, + conversationId string, + modelSlug string, +) StreamHandler { + return &StreamHandlerV2{ + callbackStream: callbackStream, + conversationId: conversationId, + modelSlug: modelSlug, + } +} + +func (h *StreamHandlerV2) SendInitialization() { + if h.callbackStream == nil { + return + } + + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamInitialization{ + StreamInitialization: &chatv2.StreamInitialization{ + ConversationId: h.conversationId, + ModelSlug: h.modelSlug, + }, + }, + }) +} + +func (h *StreamHandlerV2) HandleAddedItem(chunk responses.ResponseStreamEventUnion) { + if h.callbackStream == nil { + return + } + if chunk.Item.Type == "message" { + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ + StreamPartBegin: &chatv2.StreamPartBegin{ + MessageId: "openai_" + chunk.Item.ID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_Assistant{ + Assistant: &chatv2.MessageTypeAssistant{ + ModelSlug: h.modelSlug, + }, + }, + }, + }, + }, + }) + } else if chunk.Item.Type == "function_call" { + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ + StreamPartBegin: &chatv2.StreamPartBegin{ + MessageId: "openai_" + chunk.Item.ID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{ + ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{ + Name: chunk.Item.Name, + }, + }, + }, + }, + }, + }) + } +} + +func (h *StreamHandlerV2) HandleDoneItem(chunk responses.ResponseStreamEventUnion) { + if h.callbackStream == nil { + return + } + item := chunk.Item + switch item.Type { + case "message": + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ + StreamPartEnd: &chatv2.StreamPartEnd{ + MessageId: "openai_" + item.ID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_Assistant{ + Assistant: &chatv2.MessageTypeAssistant{ + Content: item.Content[0].Text, + ModelSlug: h.modelSlug, + }, + }, + }, + }, + }, + }) + case "function_call": + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ + StreamPartEnd: &chatv2.StreamPartEnd{ + MessageId: "openai_" + item.ID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{ + ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{ + Name: item.Name, + Args: item.Arguments, + }, + }, + }, + }, + }, + }) + default: + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ + StreamPartEnd: &chatv2.StreamPartEnd{ + MessageId: "openai_" + item.ID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_Unknown{ + Unknown: &chatv2.MessageTypeUnknown{ + Description: "Unknown message type: " + item.Type, + }, + }, + }, + }, + }, + }) + } +} + +func (h *StreamHandlerV2) HandleTextDelta(chunk responses.ResponseStreamEventUnion) { + if h.callbackStream == nil { + return + } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_MessageChunk{ + MessageChunk: &chatv2.MessageChunk{ + MessageId: "openai_" + chunk.ItemID, + Delta: chunk.Delta, + }, + }, + }) +} + +func (h *StreamHandlerV2) SendIncompleteIndicator(reason string, responseId string) { + if h.callbackStream == nil { + return + } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_IncompleteIndicator{ + IncompleteIndicator: &chatv2.IncompleteIndicator{ + Reason: reason, + ResponseId: responseId, + }, + }, + }) +} + +func (h *StreamHandlerV2) SendFinalization() { + if h.callbackStream == nil { + return + } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamFinalization{ + StreamFinalization: &chatv2.StreamFinalization{ + ConversationId: h.conversationId, + }, + }, + }) +} + +func (h *StreamHandlerV2) SendToolCallBegin(toolCall responses.ResponseFunctionToolCall) { + if h.callbackStream == nil { + return + } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ + StreamPartBegin: &chatv2.StreamPartBegin{ + MessageId: "openai_" + toolCall.CallID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCall{ + ToolCall: &chatv2.MessageTypeToolCall{ + Name: toolCall.Name, + Args: toolCall.Arguments, + }, + }, + }, + }, + }, + }) +} + +func (h *StreamHandlerV2) SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) { + if h.callbackStream == nil { + return + } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ + StreamPartEnd: &chatv2.StreamPartEnd{ + MessageId: "openai_" + toolCall.CallID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCall{ + ToolCall: &chatv2.MessageTypeToolCall{ + Name: toolCall.Name, + Args: toolCall.Arguments, + Result: result, + Error: func() string { + if err != nil { + return err.Error() + } + return "" + }(), + }, + }, + }, + }, + }, + }) +} diff --git a/internal/services/toolkit/handler/toolcall_v2.go b/internal/services/toolkit/handler/toolcall_v2.go new file mode 100644 index 00000000..9c442c19 --- /dev/null +++ b/internal/services/toolkit/handler/toolcall_v2.go @@ -0,0 +1,103 @@ +package handler + +import ( + "context" + "paperdebugger/internal/services/toolkit/registry" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v2/responses" +) + +const ( + messageTypeFunctionCallV2 = "function_call" + messageTypeMessageV2 = "message" + roleAssistantV2 = "assistant" +) + +// ToolCallHandler is responsible for handling tool calls by dispatching them to the appropriate tool registry +// and managing the chat history for both OpenAI and in-app chat systems. +type ToolCallHandlerV2 struct { + Registry *registry.ToolRegistry // Registry containing available tools for function calls +} + +func NewToolCallHandlerV2(toolRegistry *registry.ToolRegistry) *ToolCallHandlerV2 { + return &ToolCallHandlerV2{ + Registry: toolRegistry, + } +} + +// HandleToolCalls processes a list of tool call outputs, invokes the corresponding tools, and constructs +// both OpenAI and in-app chat histories reflecting the tool call and its result. +// +// Parameters: +// ctx: The context for cancellation and deadlines. +// outputs: A slice of ResponseOutputItemUnion representing outputs from the model, possibly containing tool calls. +// streamHandler: Optional handler for streaming tool call events (can be nil). +// +// Returns: +// - openaiChatHistory: The OpenAI-compatible chat history including tool call and output items. +// - inappChatHistory: The in-app chat history as a slice of chatv2.Message, reflecting tool call events. +// - error: Any error encountered during processing (always nil in current implementation). +func (h *ToolCallHandlerV2) HandleToolCallsV2(ctx context.Context, outputs []responses.ResponseOutputItemUnion, streamHandler StreamHandler) (responses.ResponseNewParamsInputUnion, []chatv2.Message, error) { + openaiChatHistory := responses.ResponseNewParamsInputUnion{} // Accumulates OpenAI chat history items + inappChatHistory := []chatv2.Message{} // Accumulates in-app chat history messages + + // Iterate over each output item to process tool calls + for _, output := range outputs { + if output.Type == messageTypeFunctionCallV2 { + toolCall := output.AsFunctionCall() + + // According to OpenAI, function_call and function_call_output must appear in pairs in the chat history. + // Add the function call to the OpenAI chat history. + openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, responses.ResponseInputItemParamOfFunctionCall( + toolCall.Arguments, + toolCall.CallID, + toolCall.Name, + )) + + // Notify the stream handler that a tool call is beginning. + if streamHandler != nil { + streamHandler.SendToolCallBegin(toolCall) + } + result, err := h.Registry.Call(ctx, toolCall.CallID, toolCall.Name, []byte(toolCall.Arguments)) + if streamHandler != nil { + streamHandler.SendToolCallEnd(toolCall, result, err) + } + + if err != nil { + // If there was an error, append an error output to OpenAI chat history and in-app chat history. + openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, responses.ResponseInputItemParamOfFunctionCallOutput(toolCall.CallID, "Error: "+err.Error())) + inappChatHistory = append(inappChatHistory, chatv2.Message{ + MessageId: "openai_" + toolCall.CallID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCall{ + ToolCall: &chatv2.MessageTypeToolCall{ + Name: toolCall.Name, + Args: toolCall.Arguments, + Error: err.Error(), + }, + }, + }, + }) + } else { + // On success, append the result to both OpenAI and in-app chat histories. + openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, responses.ResponseInputItemParamOfFunctionCallOutput(toolCall.CallID, result)) + inappChatHistory = append(inappChatHistory, chatv2.Message{ + MessageId: "openai_" + toolCall.CallID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCall{ + ToolCall: &chatv2.MessageTypeToolCall{ + Name: toolCall.Name, + Args: toolCall.Arguments, + Result: result, + }, + }, + }, + }) + } + } + } + + // Return both chat histories and nil error (no error aggregation in this implementation) + return openaiChatHistory, inappChatHistory, nil +} diff --git a/internal/wire_gen.go b/internal/wire_gen.go index 726f8f87..d467b70c 100644 --- a/internal/wire_gen.go +++ b/internal/wire_gen.go @@ -7,7 +7,6 @@ package internal import ( - "github.com/google/wire" "paperdebugger/internal/api" "paperdebugger/internal/api/auth" "paperdebugger/internal/api/chat" @@ -19,6 +18,8 @@ import ( "paperdebugger/internal/libs/logger" "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/client" + + "github.com/google/wire" ) // Injectors from wire.go: @@ -36,8 +37,10 @@ func InitializeApp() (*api.Server, error) { projectService := services.NewProjectService(dbDB, cfgCfg, loggerLogger) reverseCommentService := services.NewReverseCommentService(dbDB, cfgCfg, loggerLogger, projectService) aiClient := client.NewAIClient(dbDB, reverseCommentService, projectService, cfgCfg, loggerLogger) + aiClientV2 := client.NewAIClientV2(dbDB, reverseCommentService, projectService, cfgCfg, loggerLogger) chatService := services.NewChatService(dbDB, cfgCfg, loggerLogger) - chatServiceServer := chat.NewChatServer(aiClient, chatService, projectService, userService, loggerLogger, cfgCfg) + chatServiceV2 := services.NewChatServiceV2(dbDB, cfgCfg, loggerLogger) + chatServiceServer := chat.NewChatServer(aiClient, aiClientV2, chatService, chatServiceV2, projectService, userService, loggerLogger, cfgCfg) promptService := services.NewPromptService(dbDB, cfgCfg, loggerLogger) userServiceServer := user.NewUserServer(userService, promptService, cfgCfg, loggerLogger) projectServiceServer := project.NewProjectServer(projectService, loggerLogger, cfgCfg) diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts index dbbd6824..f4c56bf8 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts @@ -11,7 +11,7 @@ import type { Message as Message$1 } from "@bufbuild/protobuf"; * Describes the file chat/v2/chat.proto. */ export const file_chat_v2_chat: GenFile = /*@__PURE__*/ - fileDesc("ChJjaGF0L3YyL2NoYXQucHJvdG8SB2NoYXQudjIiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIjsKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJQCg9NZXNzYWdlVHlwZVVzZXISDwoHY29udGVudBgBIAEoCRIaCg1zZWxlY3RlZF90ZXh0GAIgASgJSACIAQFCEAoOX3NlbGVjdGVkX3RleHQiKQoSTWVzc2FnZVR5cGVVbmtub3duEhMKC2Rlc2NyaXB0aW9uGAEgASgJIuQCCg5NZXNzYWdlUGF5bG9hZBIsCgZzeXN0ZW0YASABKAsyGi5jaGF0LnYyLk1lc3NhZ2VUeXBlU3lzdGVtSAASKAoEdXNlchgCIAEoCzIYLmNoYXQudjIuTWVzc2FnZVR5cGVVc2VySAASMgoJYXNzaXN0YW50GAMgASgLMh0uY2hhdC52Mi5NZXNzYWdlVHlwZUFzc2lzdGFudEgAElMKG3Rvb2xfY2FsbF9wcmVwYXJlX2FyZ3VtZW50cxgEIAEoCzIsLmNoYXQudjIuTWVzc2FnZVR5cGVUb29sQ2FsbFByZXBhcmVBcmd1bWVudHNIABIxCgl0b29sX2NhbGwYBSABKAsyHC5jaGF0LnYyLk1lc3NhZ2VUeXBlVG9vbENhbGxIABIuCgd1bmtub3duGAYgASgLMhsuY2hhdC52Mi5NZXNzYWdlVHlwZVVua25vd25IAEIOCgxtZXNzYWdlX3R5cGUiRwoHTWVzc2FnZRISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkImEKDENvbnZlcnNhdGlvbhIKCgJpZBgBIAEoCRINCgV0aXRsZRgCIAEoCRISCgptb2RlbF9zbHVnGAMgASgJEiIKCG1lc3NhZ2VzGAQgAygLMhAuY2hhdC52Mi5NZXNzYWdlIkIKGExpc3RDb252ZXJzYXRpb25zUmVxdWVzdBIXCgpwcm9qZWN0X2lkGAEgASgJSACIAQFCDQoLX3Byb2plY3RfaWQiSQoZTGlzdENvbnZlcnNhdGlvbnNSZXNwb25zZRIsCg1jb252ZXJzYXRpb25zGAEgAygLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iMQoWR2V0Q29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiRgoXR2V0Q29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24imwIKIENyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSACIAQESEgoKbW9kZWxfc2x1ZxgDIAEoCRIUCgx1c2VyX21lc3NhZ2UYBCABKAkSHwoSdXNlcl9zZWxlY3RlZF90ZXh0GAUgASgJSAGIAQESOQoRY29udmVyc2F0aW9uX3R5cGUYBiABKA4yGS5jaGF0LnYyLkNvbnZlcnNhdGlvblR5cGVIAogBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlIlAKIUNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYyLkNvbnZlcnNhdGlvbiJDChlVcGRhdGVDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCRINCgV0aXRsZRgCIAEoCSJJChpVcGRhdGVDb252ZXJzYXRpb25SZXNwb25zZRIrCgxjb252ZXJzYXRpb24YASABKAsyFS5jaGF0LnYyLkNvbnZlcnNhdGlvbiI0ChlEZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0EhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSIcChpEZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSIsCg5TdXBwb3J0ZWRNb2RlbBIMCgRuYW1lGAEgASgJEgwKBHNsdWcYAiABKAkiHAoaTGlzdFN1cHBvcnRlZE1vZGVsc1JlcXVlc3QiRgobTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlEicKBm1vZGVscxgBIAMoCzIXLmNoYXQudjIuU3VwcG9ydGVkTW9kZWwiQwoUU3RyZWFtSW5pdGlhbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEhIKCm1vZGVsX3NsdWcYAiABKAkiTwoPU3RyZWFtUGFydEJlZ2luEhIKCm1lc3NhZ2VfaWQYASABKAkSKAoHcGF5bG9hZBgDIAEoCzIXLmNoYXQudjIuTWVzc2FnZVBheWxvYWQiMQoMTWVzc2FnZUNodW5rEhIKCm1lc3NhZ2VfaWQYASABKAkSDQoFZGVsdGEYAiABKAkiOgoTSW5jb21wbGV0ZUluZGljYXRvchIOCgZyZWFzb24YASABKAkSEwoLcmVzcG9uc2VfaWQYAiABKAkiTQoNU3RyZWFtUGFydEVuZBISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkIi0KElN0cmVhbUZpbmFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkiJAoLU3RyZWFtRXJyb3ISFQoNZXJyb3JfbWVzc2FnZRgBIAEoCSKhAgomQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlcXVlc3QSEgoKcHJvamVjdF9pZBgBIAEoCRIcCg9jb252ZXJzYXRpb25faWQYAiABKAlIAIgBARISCgptb2RlbF9zbHVnGAMgASgJEhQKDHVzZXJfbWVzc2FnZRgEIAEoCRIfChJ1c2VyX3NlbGVjdGVkX3RleHQYBSABKAlIAYgBARI5ChFjb252ZXJzYXRpb25fdHlwZRgGIAEoDjIZLmNoYXQudjIuQ29udmVyc2F0aW9uVHlwZUgCiAEBQhIKEF9jb252ZXJzYXRpb25faWRCFQoTX3VzZXJfc2VsZWN0ZWRfdGV4dEIUChJfY29udmVyc2F0aW9uX3R5cGUivwMKJ0NyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXNwb25zZRI+ChVzdHJlYW1faW5pdGlhbGl6YXRpb24YASABKAsyHS5jaGF0LnYyLlN0cmVhbUluaXRpYWxpemF0aW9uSAASNQoRc3RyZWFtX3BhcnRfYmVnaW4YAiABKAsyGC5jaGF0LnYyLlN0cmVhbVBhcnRCZWdpbkgAEi4KDW1lc3NhZ2VfY2h1bmsYAyABKAsyFS5jaGF0LnYyLk1lc3NhZ2VDaHVua0gAEjwKFGluY29tcGxldGVfaW5kaWNhdG9yGAQgASgLMhwuY2hhdC52Mi5JbmNvbXBsZXRlSW5kaWNhdG9ySAASMQoPc3RyZWFtX3BhcnRfZW5kGAUgASgLMhYuY2hhdC52Mi5TdHJlYW1QYXJ0RW5kSAASOgoTc3RyZWFtX2ZpbmFsaXphdGlvbhgGIAEoCzIbLmNoYXQudjIuU3RyZWFtRmluYWxpemF0aW9uSAASLAoMc3RyZWFtX2Vycm9yGAcgASgLMhQuY2hhdC52Mi5TdHJlYW1FcnJvckgAQhIKEHJlc3BvbnNlX3BheWxvYWQqUgoQQ29udmVyc2F0aW9uVHlwZRIhCh1DT05WRVJTQVRJT05fVFlQRV9VTlNQRUNJRklFRBAAEhsKF0NPTlZFUlNBVElPTl9UWVBFX0RFQlVHEAEy0ggKC0NoYXRTZXJ2aWNlEoMBChFMaXN0Q29udmVyc2F0aW9ucxIhLmNoYXQudjIuTGlzdENvbnZlcnNhdGlvbnNSZXF1ZXN0GiIuY2hhdC52Mi5MaXN0Q29udmVyc2F0aW9uc1Jlc3BvbnNlIieC0+STAiESHy9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMSjwEKD0dldENvbnZlcnNhdGlvbhIfLmNoYXQudjIuR2V0Q29udmVyc2F0aW9uUmVxdWVzdBogLmNoYXQudjIuR2V0Q29udmVyc2F0aW9uUmVzcG9uc2UiOYLT5JMCMxIxL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKnAQoZQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZRIpLmNoYXQudjIuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVJlcXVlc3QaKi5jaGF0LnYyLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VSZXNwb25zZSIzgtPkkwItOgEqIigvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL21lc3NhZ2VzEsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52Mi5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjIuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52Mi5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjIuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YyL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MkIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MjtjaGF0djKiAgNDWFiqAgdDaGF0LlYyygIHQ2hhdFxWMuICE0NoYXRcVjJcR1BCTWV0YWRhdGHqAghDaGF0OjpWMmIGcHJvdG8z", [file_google_api_annotations]); + fileDesc("ChJjaGF0L3YyL2NoYXQucHJvdG8SB2NoYXQudjIiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIjsKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJQCg9NZXNzYWdlVHlwZVVzZXISDwoHY29udGVudBgBIAEoCRIaCg1zZWxlY3RlZF90ZXh0GAIgASgJSACIAQFCEAoOX3NlbGVjdGVkX3RleHQiKQoSTWVzc2FnZVR5cGVVbmtub3duEhMKC2Rlc2NyaXB0aW9uGAEgASgJIuQCCg5NZXNzYWdlUGF5bG9hZBIsCgZzeXN0ZW0YASABKAsyGi5jaGF0LnYyLk1lc3NhZ2VUeXBlU3lzdGVtSAASKAoEdXNlchgCIAEoCzIYLmNoYXQudjIuTWVzc2FnZVR5cGVVc2VySAASMgoJYXNzaXN0YW50GAMgASgLMh0uY2hhdC52Mi5NZXNzYWdlVHlwZUFzc2lzdGFudEgAElMKG3Rvb2xfY2FsbF9wcmVwYXJlX2FyZ3VtZW50cxgEIAEoCzIsLmNoYXQudjIuTWVzc2FnZVR5cGVUb29sQ2FsbFByZXBhcmVBcmd1bWVudHNIABIxCgl0b29sX2NhbGwYBSABKAsyHC5jaGF0LnYyLk1lc3NhZ2VUeXBlVG9vbENhbGxIABIuCgd1bmtub3duGAYgASgLMhsuY2hhdC52Mi5NZXNzYWdlVHlwZVVua25vd25IAEIOCgxtZXNzYWdlX3R5cGUiRwoHTWVzc2FnZRISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkImEKDENvbnZlcnNhdGlvbhIKCgJpZBgBIAEoCRINCgV0aXRsZRgCIAEoCRISCgptb2RlbF9zbHVnGAMgASgJEiIKCG1lc3NhZ2VzGAQgAygLMhAuY2hhdC52Mi5NZXNzYWdlIkIKGExpc3RDb252ZXJzYXRpb25zUmVxdWVzdBIXCgpwcm9qZWN0X2lkGAEgASgJSACIAQFCDQoLX3Byb2plY3RfaWQiSQoZTGlzdENvbnZlcnNhdGlvbnNSZXNwb25zZRIsCg1jb252ZXJzYXRpb25zGAEgAygLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iMQoWR2V0Q29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiRgoXR2V0Q29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iQwoZVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkSDQoFdGl0bGUYAiABKAkiSQoaVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iNAoZRGVsZXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiHAoaRGVsZXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiLAoOU3VwcG9ydGVkTW9kZWwSDAoEbmFtZRgBIAEoCRIMCgRzbHVnGAIgASgJIhwKGkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0IkYKG0xpc3RTdXBwb3J0ZWRNb2RlbHNSZXNwb25zZRInCgZtb2RlbHMYASADKAsyFy5jaGF0LnYyLlN1cHBvcnRlZE1vZGVsIkMKFFN0cmVhbUluaXRpYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCRISCgptb2RlbF9zbHVnGAIgASgJIk8KD1N0cmVhbVBhcnRCZWdpbhISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkIjEKDE1lc3NhZ2VDaHVuaxISCgptZXNzYWdlX2lkGAEgASgJEg0KBWRlbHRhGAIgASgJIjoKE0luY29tcGxldGVJbmRpY2F0b3ISDgoGcmVhc29uGAEgASgJEhMKC3Jlc3BvbnNlX2lkGAIgASgJIk0KDVN0cmVhbVBhcnRFbmQSEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52Mi5NZXNzYWdlUGF5bG9hZCItChJTdHJlYW1GaW5hbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIiQKC1N0cmVhbUVycm9yEhUKDWVycm9yX21lc3NhZ2UYASABKAkioQIKJkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSACIAQESEgoKbW9kZWxfc2x1ZxgDIAEoCRIUCgx1c2VyX21lc3NhZ2UYBCABKAkSHwoSdXNlcl9zZWxlY3RlZF90ZXh0GAUgASgJSAGIAQESOQoRY29udmVyc2F0aW9uX3R5cGUYBiABKA4yGS5jaGF0LnYyLkNvbnZlcnNhdGlvblR5cGVIAogBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlIr8DCidDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2USPgoVc3RyZWFtX2luaXRpYWxpemF0aW9uGAEgASgLMh0uY2hhdC52Mi5TdHJlYW1Jbml0aWFsaXphdGlvbkgAEjUKEXN0cmVhbV9wYXJ0X2JlZ2luGAIgASgLMhguY2hhdC52Mi5TdHJlYW1QYXJ0QmVnaW5IABIuCg1tZXNzYWdlX2NodW5rGAMgASgLMhUuY2hhdC52Mi5NZXNzYWdlQ2h1bmtIABI8ChRpbmNvbXBsZXRlX2luZGljYXRvchgEIAEoCzIcLmNoYXQudjIuSW5jb21wbGV0ZUluZGljYXRvckgAEjEKD3N0cmVhbV9wYXJ0X2VuZBgFIAEoCzIWLmNoYXQudjIuU3RyZWFtUGFydEVuZEgAEjoKE3N0cmVhbV9maW5hbGl6YXRpb24YBiABKAsyGy5jaGF0LnYyLlN0cmVhbUZpbmFsaXphdGlvbkgAEiwKDHN0cmVhbV9lcnJvchgHIAEoCzIULmNoYXQudjIuU3RyZWFtRXJyb3JIAEISChByZXNwb25zZV9wYXlsb2FkKlIKEENvbnZlcnNhdGlvblR5cGUSIQodQ09OVkVSU0FUSU9OX1RZUEVfVU5TUEVDSUZJRUQQABIbChdDT05WRVJTQVRJT05fVFlQRV9ERUJVRxABMqgHCgtDaGF0U2VydmljZRKDAQoRTGlzdENvbnZlcnNhdGlvbnMSIS5jaGF0LnYyLkxpc3RDb252ZXJzYXRpb25zUmVxdWVzdBoiLmNoYXQudjIuTGlzdENvbnZlcnNhdGlvbnNSZXNwb25zZSIngtPkkwIhEh8vX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zEo8BCg9HZXRDb252ZXJzYXRpb24SHy5jaGF0LnYyLkdldENvbnZlcnNhdGlvblJlcXVlc3QaIC5jaGF0LnYyLkdldENvbnZlcnNhdGlvblJlc3BvbnNlIjmC0+STAjMSMS9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SwgEKH0NyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW0SLy5jaGF0LnYyLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0GjAuY2hhdC52Mi5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2UiOoLT5JMCNDoBKiIvL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy9tZXNzYWdlcy9zdHJlYW0wARKbAQoSVXBkYXRlQ29udmVyc2F0aW9uEiIuY2hhdC52Mi5VcGRhdGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52Mi5VcGRhdGVDb252ZXJzYXRpb25SZXNwb25zZSI8gtPkkwI2OgEqMjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EpgBChJEZWxldGVDb252ZXJzYXRpb24SIi5jaGF0LnYyLkRlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QaIy5jaGF0LnYyLkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIjmC0+STAjMqMS9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SggEKE0xpc3RTdXBwb3J0ZWRNb2RlbHMSIy5jaGF0LnYyLkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0GiQuY2hhdC52Mi5MaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2UiIILT5JMCGhIYL19wZC9hcGkvdjIvY2hhdHMvbW9kZWxzQn8KC2NvbS5jaGF0LnYyQglDaGF0UHJvdG9QAVoocGFwZXJkZWJ1Z2dlci9wa2cvZ2VuL2FwaS9jaGF0L3YyO2NoYXR2MqICA0NYWKoCB0NoYXQuVjLKAgdDaGF0XFYy4gITQ2hhdFxWMlxHUEJNZXRhZGF0YeoCCENoYXQ6OlYyYgZwcm90bzM", [file_google_api_annotations]); /** * @generated from message chat.v2.MessageTypeToolCall @@ -332,68 +332,6 @@ export type GetConversationResponse = Message$1<"chat.v2.GetConversationResponse export const GetConversationResponseSchema: GenMessage = /*@__PURE__*/ messageDesc(file_chat_v2_chat, 12); -/** - * @generated from message chat.v2.CreateConversationMessageRequest - */ -export type CreateConversationMessageRequest = Message$1<"chat.v2.CreateConversationMessageRequest"> & { - /** - * @generated from field: string project_id = 1; - */ - projectId: string; - - /** - * If conversation_id is not provided, - * a new conversation will be created and the id will be returned. - * - * @generated from field: optional string conversation_id = 2; - */ - conversationId?: string; - - /** - * @generated from field: string model_slug = 3; - */ - modelSlug: string; - - /** - * @generated from field: string user_message = 4; - */ - userMessage: string; - - /** - * @generated from field: optional string user_selected_text = 5; - */ - userSelectedText?: string; - - /** - * @generated from field: optional chat.v2.ConversationType conversation_type = 6; - */ - conversationType?: ConversationType; -}; - -/** - * Describes the message chat.v2.CreateConversationMessageRequest. - * Use `create(CreateConversationMessageRequestSchema)` to create a new message. - */ -export const CreateConversationMessageRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 13); - -/** - * @generated from message chat.v2.CreateConversationMessageResponse - */ -export type CreateConversationMessageResponse = Message$1<"chat.v2.CreateConversationMessageResponse"> & { - /** - * @generated from field: chat.v2.Conversation conversation = 1; - */ - conversation?: Conversation; -}; - -/** - * Describes the message chat.v2.CreateConversationMessageResponse. - * Use `create(CreateConversationMessageResponseSchema)` to create a new message. - */ -export const CreateConversationMessageResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 14); - /** * @generated from message chat.v2.UpdateConversationRequest */ @@ -414,7 +352,7 @@ export type UpdateConversationRequest = Message$1<"chat.v2.UpdateConversationReq * Use `create(UpdateConversationRequestSchema)` to create a new message. */ export const UpdateConversationRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 15); + messageDesc(file_chat_v2_chat, 13); /** * @generated from message chat.v2.UpdateConversationResponse @@ -431,7 +369,7 @@ export type UpdateConversationResponse = Message$1<"chat.v2.UpdateConversationRe * Use `create(UpdateConversationResponseSchema)` to create a new message. */ export const UpdateConversationResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 16); + messageDesc(file_chat_v2_chat, 14); /** * @generated from message chat.v2.DeleteConversationRequest @@ -448,7 +386,7 @@ export type DeleteConversationRequest = Message$1<"chat.v2.DeleteConversationReq * Use `create(DeleteConversationRequestSchema)` to create a new message. */ export const DeleteConversationRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 17); + messageDesc(file_chat_v2_chat, 15); /** * explicitly empty @@ -463,7 +401,7 @@ export type DeleteConversationResponse = Message$1<"chat.v2.DeleteConversationRe * Use `create(DeleteConversationResponseSchema)` to create a new message. */ export const DeleteConversationResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 18); + messageDesc(file_chat_v2_chat, 16); /** * @generated from message chat.v2.SupportedModel @@ -485,7 +423,7 @@ export type SupportedModel = Message$1<"chat.v2.SupportedModel"> & { * Use `create(SupportedModelSchema)` to create a new message. */ export const SupportedModelSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 19); + messageDesc(file_chat_v2_chat, 17); /** * explicitly empty @@ -500,7 +438,7 @@ export type ListSupportedModelsRequest = Message$1<"chat.v2.ListSupportedModelsR * Use `create(ListSupportedModelsRequestSchema)` to create a new message. */ export const ListSupportedModelsRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 20); + messageDesc(file_chat_v2_chat, 18); /** * @generated from message chat.v2.ListSupportedModelsResponse @@ -517,7 +455,7 @@ export type ListSupportedModelsResponse = Message$1<"chat.v2.ListSupportedModels * Use `create(ListSupportedModelsResponseSchema)` to create a new message. */ export const ListSupportedModelsResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 21); + messageDesc(file_chat_v2_chat, 19); /** * Information sent once at the beginning of a new conversation stream @@ -541,7 +479,7 @@ export type StreamInitialization = Message$1<"chat.v2.StreamInitialization"> & { * Use `create(StreamInitializationSchema)` to create a new message. */ export const StreamInitializationSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 22); + messageDesc(file_chat_v2_chat, 20); /** * Designed as StreamPartBegin and StreamPartEnd to @@ -569,7 +507,7 @@ export type StreamPartBegin = Message$1<"chat.v2.StreamPartBegin"> & { * Use `create(StreamPartBeginSchema)` to create a new message. */ export const StreamPartBeginSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 23); + messageDesc(file_chat_v2_chat, 21); /** * Note: After the StreamPartBegin of tool_call, there can be no MessageChunk, @@ -598,7 +536,7 @@ export type MessageChunk = Message$1<"chat.v2.MessageChunk"> & { * Use `create(MessageChunkSchema)` to create a new message. */ export const MessageChunkSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 24); + messageDesc(file_chat_v2_chat, 22); /** * @generated from message chat.v2.IncompleteIndicator @@ -620,7 +558,7 @@ export type IncompleteIndicator = Message$1<"chat.v2.IncompleteIndicator"> & { * Use `create(IncompleteIndicatorSchema)` to create a new message. */ export const IncompleteIndicatorSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 25); + messageDesc(file_chat_v2_chat, 23); /** * @generated from message chat.v2.StreamPartEnd @@ -642,7 +580,7 @@ export type StreamPartEnd = Message$1<"chat.v2.StreamPartEnd"> & { * Use `create(StreamPartEndSchema)` to create a new message. */ export const StreamPartEndSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 26); + messageDesc(file_chat_v2_chat, 24); /** * Sent when the current AI response is fully streamed @@ -666,7 +604,7 @@ export type StreamFinalization = Message$1<"chat.v2.StreamFinalization"> & { * Use `create(StreamFinalizationSchema)` to create a new message. */ export const StreamFinalizationSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 27); + messageDesc(file_chat_v2_chat, 25); /** * @generated from message chat.v2.StreamError @@ -683,7 +621,7 @@ export type StreamError = Message$1<"chat.v2.StreamError"> & { * Use `create(StreamErrorSchema)` to create a new message. */ export const StreamErrorSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 28); + messageDesc(file_chat_v2_chat, 26); /** * This message should be the same as CreateConversationMessageRequest @@ -729,7 +667,7 @@ export type CreateConversationMessageStreamRequest = Message$1<"chat.v2.CreateCo * Use `create(CreateConversationMessageStreamRequestSchema)` to create a new message. */ export const CreateConversationMessageStreamRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 29); + messageDesc(file_chat_v2_chat, 27); /** * Response for streaming a message within an existing conversation @@ -790,7 +728,7 @@ export type CreateConversationMessageStreamResponse = Message$1<"chat.v2.CreateC * Use `create(CreateConversationMessageStreamResponseSchema)` to create a new message. */ export const CreateConversationMessageStreamResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_chat_v2_chat, 30); + messageDesc(file_chat_v2_chat, 28); /** * @generated from enum chat.v2.ConversationType @@ -835,14 +773,6 @@ export const ChatService: GenService<{ input: typeof GetConversationRequestSchema; output: typeof GetConversationResponseSchema; }, - /** - * @generated from rpc chat.v2.ChatService.CreateConversationMessage - */ - createConversationMessage: { - methodKind: "unary"; - input: typeof CreateConversationMessageRequestSchema; - output: typeof CreateConversationMessageResponseSchema; - }, /** * @generated from rpc chat.v2.ChatService.CreateConversationMessageStream */ From 9a418e434bbe1c13299d6f4f793ac6d65cf167f1 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 21:53:50 +0800 Subject: [PATCH 05/28] v2 api new files --- internal/services/toolkit/client/client.go | 48 +---------------- internal/services/toolkit/client/client_v2.go | 31 +---------- .../services/toolkit/client/completion.go | 5 -- .../services/toolkit/client/completion_v2.go | 5 -- internal/services/toolkit/client/utils.go | 54 +++++++++++++++++++ internal/services/toolkit/handler/stream.go | 3 ++ .../services/toolkit/handler/stream_v2.go | 3 ++ 7 files changed, 64 insertions(+), 85 deletions(-) diff --git a/internal/services/toolkit/client/client.go b/internal/services/toolkit/client/client.go index ee01ccd9..d8642ef3 100644 --- a/internal/services/toolkit/client/client.go +++ b/internal/services/toolkit/client/client.go @@ -1,15 +1,12 @@ package client import ( - "context" "paperdebugger/internal/libs/cfg" "paperdebugger/internal/libs/db" "paperdebugger/internal/libs/logger" "paperdebugger/internal/models" "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/handler" - "paperdebugger/internal/services/toolkit/registry" - "paperdebugger/internal/services/toolkit/tools/xtramcp" "github.com/openai/openai-go/v2" "github.com/openai/openai-go/v2/option" @@ -18,7 +15,6 @@ import ( type AIClient struct { toolCallHandler *handler.ToolCallHandler - toolCallHandlerV2 *handler.ToolCallHandlerV2 db *mongo.Database functionCallCollection *mongo.Collection @@ -65,35 +61,10 @@ func NewAIClient( option.WithAPIKey(cfg.OpenAIAPIKey), ) CheckOpenAIWorks(oaiClient, logger) - // toolPaperScore := tools.NewPaperScoreTool(db, projectService) - // toolPaperScoreComment := tools.NewPaperScoreCommentTool(db, projectService, reverseCommentService) - - toolRegistry := registry.NewToolRegistry() - - // toolRegistry.Register("always_exception", tools.AlwaysExceptionToolDescription, tools.AlwaysExceptionTool) - // toolRegistry.Register("greeting", tools.GreetingToolDescription, tools.GreetingTool) - - // Load tools dynamically from backend - xtraMCPLoader := xtramcp.NewXtraMCPLoader(db, projectService, cfg.XtraMCPURI) - - // initialize MCP session first and log session ID - sessionID, err := xtraMCPLoader.InitializeMCP() - if err != nil { - logger.Errorf("[AI Client] Failed to initialize XtraMCP session: %v", err) - // TODO: Fallback to static tools or exit? - } else { - logger.Info("[AI Client] XtraMCP session initialized", "sessionID", sessionID) - - // dynamically load all tools from XtraMCP backend - err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry) - if err != nil { - logger.Errorf("[AI Client] Failed to load XtraMCP tools: %v", err) - } else { - logger.Info("[AI Client] Successfully loaded XtraMCP tools") - } - } + toolRegistry := initializeToolkit(db, projectService, cfg, logger) toolCallHandler := handler.NewToolCallHandler(toolRegistry) + client := &AIClient{ toolCallHandler: toolCallHandler, @@ -108,18 +79,3 @@ func NewAIClient( return client } - -func CheckOpenAIWorks(oaiClient openai.Client, logger *logger.Logger) { - logger.Info("[AI Client] checking if openai client works") - chatCompletion, err := oaiClient.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{ - Messages: []openai.ChatCompletionMessageParamUnion{ - openai.UserMessage("Say 'openai client works'"), - }, - Model: openai.ChatModelGPT4o, - }) - if err != nil { - logger.Errorf("[AI Client] openai client does not work: %v", err) - return - } - logger.Info("[AI Client] openai client works", "response", chatCompletion.Choices[0].Message.Content) -} diff --git a/internal/services/toolkit/client/client_v2.go b/internal/services/toolkit/client/client_v2.go index 1bcb901e..89169544 100644 --- a/internal/services/toolkit/client/client_v2.go +++ b/internal/services/toolkit/client/client_v2.go @@ -7,8 +7,6 @@ import ( "paperdebugger/internal/models" "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/handler" - "paperdebugger/internal/services/toolkit/registry" - "paperdebugger/internal/services/toolkit/tools/xtramcp" "github.com/openai/openai-go/v2" "github.com/openai/openai-go/v2/option" @@ -63,35 +61,10 @@ func NewAIClientV2( option.WithAPIKey(cfg.OpenAIAPIKey), ) CheckOpenAIWorks(oaiClient, logger) - // toolPaperScore := tools.NewPaperScoreTool(db, projectService) - // toolPaperScoreComment := tools.NewPaperScoreCommentTool(db, projectService, reverseCommentService) - - toolRegistry := registry.NewToolRegistry() - - // toolRegistry.Register("always_exception", tools.AlwaysExceptionToolDescription, tools.AlwaysExceptionTool) - // toolRegistry.Register("greeting", tools.GreetingToolDescription, tools.GreetingTool) - - // Load tools dynamically from backend - xtraMCPLoader := xtramcp.NewXtraMCPLoader(db, projectService, cfg.XtraMCPURI) - - // initialize MCP session first and log session ID - sessionID, err := xtraMCPLoader.InitializeMCP() - if err != nil { - logger.Errorf("[AI Client] Failed to initialize XtraMCP session: %v", err) - // TODO: Fallback to static tools or exit? - } else { - logger.Info("[AI Client] XtraMCP session initialized", "sessionID", sessionID) - - // dynamically load all tools from XtraMCP backend - err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry) - if err != nil { - logger.Errorf("[AI Client] Failed to load XtraMCP tools: %v", err) - } else { - logger.Info("[AI Client] Successfully loaded XtraMCP tools") - } - } + toolRegistry := initializeToolkit(db, projectService, cfg, logger) toolCallHandler := handler.NewToolCallHandlerV2(toolRegistry) + client := &AIClientV2{ toolCallHandler: toolCallHandler, diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index 7c90cc32..862d7af8 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -116,10 +116,5 @@ func (a *AIClient) ChatCompletionStreamV1(ctx context.Context, callbackStream ch } } - ptrChatHistory := make([]*chatv1.Message, len(inappChatHistory)) - for i := range inappChatHistory { - ptrChatHistory[i] = &inappChatHistory[i] - } - return openaiChatHistory.OfInputItemList, inappChatHistory, nil } diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go index f6dcaa51..a1b73134 100644 --- a/internal/services/toolkit/client/completion_v2.go +++ b/internal/services/toolkit/client/completion_v2.go @@ -116,10 +116,5 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream } } - ptrChatHistory := make([]*chatv2.Message, len(inappChatHistory)) - for i := range inappChatHistory { - ptrChatHistory[i] = &inappChatHistory[i] - } - return openaiChatHistory.OfInputItemList, inappChatHistory, nil } diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index 8915b2c9..73d1e56f 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -6,7 +6,13 @@ This file contains utility functions for the client package. (Mainly miscellaneo It is used to append assistant responses to both OpenAI and in-app chat histories, and to create response items for chat interactions. */ import ( + "context" + "paperdebugger/internal/libs/cfg" + "paperdebugger/internal/libs/db" + "paperdebugger/internal/libs/logger" + "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/registry" + "paperdebugger/internal/services/toolkit/tools/xtramcp" chatv1 "paperdebugger/pkg/gen/api/chat/v1" chatv2 "paperdebugger/pkg/gen/api/chat/v2" @@ -99,3 +105,51 @@ func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsI Store: openai.Bool(false), // Must set to false, because we are construct our own chat history. } } + +func CheckOpenAIWorks(oaiClient openai.Client, logger *logger.Logger) { + logger.Info("[AI Client] checking if openai client works") + chatCompletion, err := oaiClient.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{ + Messages: []openai.ChatCompletionMessageParamUnion{ + openai.UserMessage("Say 'openai client works'"), + }, + Model: openai.ChatModelGPT4o, + }) + if err != nil { + logger.Errorf("[AI Client] openai client does not work: %v", err) + return + } + logger.Info("[AI Client] openai client works", "response", chatCompletion.Choices[0].Message.Content) +} + +// initializeToolkit creates and initializes the tool registry with XtraMCP tools. +// This is shared between AIClient and AIClientV2 to avoid code duplication. +func initializeToolkit( + db *db.DB, + projectService *services.ProjectService, + cfg *cfg.Cfg, + logger *logger.Logger, +) *registry.ToolRegistry { + toolRegistry := registry.NewToolRegistry() + + // Load tools dynamically from backend + xtraMCPLoader := xtramcp.NewXtraMCPLoader(db, projectService, cfg.XtraMCPURI) + + // initialize MCP session first and log session ID + sessionID, err := xtraMCPLoader.InitializeMCP() + if err != nil { + logger.Errorf("[AI Client] Failed to initialize XtraMCP session: %v", err) + // TODO: Fallback to static tools or exit? + } else { + logger.Info("[AI Client] XtraMCP session initialized", "sessionID", sessionID) + + // dynamically load all tools from XtraMCP backend + err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry) + if err != nil { + logger.Errorf("[AI Client] Failed to load XtraMCP tools: %v", err) + } else { + logger.Info("[AI Client] Successfully loaded XtraMCP tools") + } + } + + return toolRegistry +} diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index cf8a524e..1a19bc35 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -18,6 +18,9 @@ type StreamHandler interface { SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) } +// Compile-time check: ensure StreamHandlerV1 implements StreamHandler interface +var _ StreamHandler = (*StreamHandlerV1)(nil) + type StreamHandlerV1 struct { callbackStream chatv1.ChatService_CreateConversationMessageStreamServer conversationId string diff --git a/internal/services/toolkit/handler/stream_v2.go b/internal/services/toolkit/handler/stream_v2.go index 18aaa6d6..ab9d048b 100644 --- a/internal/services/toolkit/handler/stream_v2.go +++ b/internal/services/toolkit/handler/stream_v2.go @@ -6,6 +6,9 @@ import ( "github.com/openai/openai-go/v2/responses" ) +// Compile-time check: ensure StreamHandlerV2 implements StreamHandler interface +var _ StreamHandler = (*StreamHandlerV2)(nil) + type StreamHandlerV2 struct { callbackStream chatv2.ChatService_CreateConversationMessageStreamServer conversationId string From f3b757d4520a964849ce93b25d0ae8635bf9a4e7 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 22:48:43 +0800 Subject: [PATCH 06/28] v2 migration --- .../create_conversation_message_stream_v2.go | 4 +- internal/models/conversation.go | 7 +- internal/services/toolkit/client/client_v2.go | 8 +- .../services/toolkit/client/completion.go | 2 +- .../services/toolkit/client/completion_v2.go | 136 ++++++++--- .../client/get_conversation_title_v2.go | 22 +- internal/services/toolkit/client/utils.go | 38 +--- internal/services/toolkit/client/utils_v2.go | 129 +++++++++++ .../services/toolkit/handler/stream_v2.go | 143 ++++++------ .../services/toolkit/handler/toolcall_v2.go | 146 +++++++----- .../services/toolkit/registry/registry_v2.go | 49 ++++ .../toolkit/tools/xtramcp/loader_v2.go | 211 ++++++++++++++++++ .../services/toolkit/tools/xtramcp/tool_v2.go | 164 ++++++++++++++ 13 files changed, 836 insertions(+), 223 deletions(-) create mode 100644 internal/services/toolkit/client/utils_v2.go create mode 100644 internal/services/toolkit/registry/registry_v2.go create mode 100644 internal/services/toolkit/tools/xtramcp/loader_v2.go create mode 100644 internal/services/toolkit/tools/xtramcp/tool_v2.go diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go index 1b6cc9c9..15406886 100644 --- a/internal/api/chat/create_conversation_message_stream_v2.go +++ b/internal/api/chat/create_conversation_message_stream_v2.go @@ -287,7 +287,7 @@ func (s *ChatServerV2) CreateConversationMessageStream( APIKey: settings.OpenAIAPIKey, } - openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistory, llmProvider) + openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider) if err != nil { return s.sendStreamError(stream, err) } @@ -302,7 +302,7 @@ func (s *ChatServerV2) CreateConversationMessageStream( bsonMessages[i] = bsonMsg } conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMessages...) - conversation.OpenaiChatHistory = openaiChatHistory + conversation.OpenaiChatHistoryCompletion = openaiChatHistory if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { return s.sendStreamError(stream, err) } diff --git a/internal/models/conversation.go b/internal/models/conversation.go index 70d48300..6f1ebdf5 100644 --- a/internal/models/conversation.go +++ b/internal/models/conversation.go @@ -2,6 +2,7 @@ package models import ( "github.com/openai/openai-go/v2/responses" + "github.com/openai/openai-go/v3" "go.mongodb.org/mongo-driver/v2/bson" ) @@ -14,8 +15,10 @@ type Conversation struct { ModelSlug string `bson:"model_slug"` InappChatHistory []bson.M `bson:"inapp_chat_history"` // Store as raw BSON to avoid protobuf decoding issues - OpenaiChatHistory responses.ResponseInputParam `bson:"openai_chat_history"` // 实际上发给 GPT 的聊天历史 - OpenaiChatParams responses.ResponseNewParams `bson:"openai_chat_params"` // 对话的参数,比如 temperature, etc. + OpenaiChatHistory responses.ResponseInputParam `bson:"openai_chat_history"` // 实际上发给 GPT 的聊天历史 + OpenaiChatParams responses.ResponseNewParams `bson:"openai_chat_params"` // 对话的参数,比如 temperature, etc. + OpenaiChatHistoryCompletion []openai.ChatCompletionMessageParamUnion `bson:"openai_chat_history_completion"` + OpenaiChatParamsCompletion openai.ChatCompletionNewParams `bson:"openai_chat_params_completion"` } func (c Conversation) CollectionName() string { diff --git a/internal/services/toolkit/client/client_v2.go b/internal/services/toolkit/client/client_v2.go index 89169544..60a8a57b 100644 --- a/internal/services/toolkit/client/client_v2.go +++ b/internal/services/toolkit/client/client_v2.go @@ -8,8 +8,8 @@ import ( "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/handler" - "github.com/openai/openai-go/v2" - "github.com/openai/openai-go/v2/option" + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/option" "go.mongodb.org/mongo-driver/v2/mongo" ) @@ -60,9 +60,9 @@ func NewAIClientV2( option.WithBaseURL(cfg.OpenAIBaseURL), option.WithAPIKey(cfg.OpenAIAPIKey), ) - CheckOpenAIWorks(oaiClient, logger) + CheckOpenAIWorksV2(oaiClient, logger) - toolRegistry := initializeToolkit(db, projectService, cfg, logger) + toolRegistry := initializeToolkitV2(db, projectService, cfg, logger) toolCallHandler := handler.NewToolCallHandlerV2(toolRegistry) client := &AIClientV2{ diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index 862d7af8..88716217 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -62,7 +62,7 @@ func (a *AIClient) ChatCompletionStreamV1(ctx context.Context, callbackStream ch }() oaiClient := a.GetOpenAIClient(llmProvider) - params := getDefaultParams(modelSlug, openaiChatHistory, a.toolCallHandler.Registry) + params := getDefaultParams(modelSlug, a.toolCallHandler.Registry) for { params.Input = openaiChatHistory diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go index a1b73134..fe3b1770 100644 --- a/internal/services/toolkit/client/completion_v2.go +++ b/internal/services/toolkit/client/completion_v2.go @@ -2,13 +2,19 @@ package client import ( "context" + "encoding/json" "paperdebugger/internal/models" "paperdebugger/internal/services/toolkit/handler" chatv2 "paperdebugger/pkg/gen/api/chat/v2" - "github.com/openai/openai-go/v2/responses" + "github.com/openai/openai-go/v3" ) +// define []openai.ChatCompletionMessageParamUnion as OpenAIChatHistory + +type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion +type AppChatHistory []chatv2.Message + // ChatCompletion orchestrates a chat completion process with a language model (e.g., GPT), handling tool calls and message history management. // // Parameters: @@ -21,7 +27,7 @@ import ( // 1. The full chat history sent to the language model (including any tool call results). // 2. The incremental chat history visible to the user (including tool call results and assistant responses). // 3. An error, if any occurred during the process. -func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv2.Message, error) { +func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) { openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider) if err != nil { return nil, nil, err @@ -50,9 +56,9 @@ func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, mes // - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop. // - If no tool calls are needed, it appends the assistant's response and exits the loop. // - Finally, it returns the updated chat histories and any error encountered. -func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv2.Message, error) { - openaiChatHistory := responses.ResponseNewParamsInputUnion{OfInputItemList: messages} - inappChatHistory := []chatv2.Message{} +func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) { + openaiChatHistory := messages + inappChatHistory := AppChatHistory{} streamHandler := handler.NewStreamHandlerV2(callbackStream, conversationId, modelSlug) @@ -62,30 +68,95 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream }() oaiClient := a.GetOpenAIClient(llmProvider) - params := getDefaultParams(modelSlug, openaiChatHistory, a.toolCallHandler.Registry) + params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry) for { - params.Input = openaiChatHistory - var openaiOutput []responses.ResponseOutputItemUnion - stream := oaiClient.Responses.NewStreaming(context.Background(), params) + params.Messages = openaiChatHistory + // var openaiOutput OpenAIChatHistory + stream := oaiClient.Chat.Completions.NewStreaming(context.Background(), params) + reasoning_content := "" + answer_content := "" + answer_content_id := "" + is_answering := false + tool_info := map[int]map[string]string{} + toolCalls := []openai.FinishedChatCompletionToolCall{} for stream.Next() { - // time.Sleep(200 * time.Millisecond) // DEBUG POINT: change this to test in a slow mode + // time.Sleep(5000 * time.Millisecond) // DEBUG POINT: change this to test in a slow mode chunk := stream.Current() - switch chunk.Type { - case "response.output_item.added": - streamHandler.HandleAddedItem(chunk) - case "response.output_item.done": - streamHandler.HandleDoneItem(chunk) // send part end - case "response.incomplete": - // incomplete happens after "output_item.done" (if it happens) - // It's an indicator that the response is incomplete. - openaiOutput = chunk.Response.Output - streamHandler.SendIncompleteIndicator(chunk.Response.IncompleteDetails.Reason, chunk.Response.ID) - case "response.completed": - openaiOutput = chunk.Response.Output - case "response.output_text.delta": - streamHandler.HandleTextDelta(chunk) + + if len(chunk.Choices) == 0 { + // 处理用量信息 + // fmt.Printf("Usage: %+v\n", chunk.Usage) + continue + } + + if chunk.Choices[0].FinishReason != "" { + // fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason) + streamHandler.HandleTextDoneItem(chunk, answer_content) + break + } + + delta := chunk.Choices[0].Delta + + if field, ok := delta.JSON.ExtraFields["reasoning_content"]; ok && field.Raw() != "null" { + var s string + err := json.Unmarshal([]byte(field.Raw()), &s) + if err != nil { + // fmt.Println(err) + } + reasoning_content += s + // fmt.Print(s) + } else { + if !is_answering { + is_answering = true + // fmt.Println("\n\n========== 回答内容 ==========") + streamHandler.HandleAddedItem(chunk) + } + + if delta.Content != "" { + answer_content += delta.Content + answer_content_id = chunk.ID + streamHandler.HandleTextDelta(chunk) + } + + if len(delta.ToolCalls) > 0 { + for _, toolCall := range delta.ToolCalls { + index := int(toolCall.Index) + + // haskey(tool_info, index) + if _, ok := tool_info[index]; !ok { + // fmt.Printf("Prepare tool %s\n", toolCall.Function.Name) + tool_info[index] = map[string]string{} + streamHandler.HandleAddedItem(chunk) + } + + if toolCall.ID != "" { + tool_info[index]["id"] = tool_info[index]["id"] + toolCall.ID + } + + if toolCall.Function.Name != "" { + tool_info[index]["name"] = tool_info[index]["name"] + toolCall.Function.Name + } + + if toolCall.Function.Arguments != "" { + tool_info[index]["arguments"] = tool_info[index]["arguments"] + toolCall.Function.Arguments + // check if arguments can be unmarshaled, if not, means the arguments are not ready + var dummy map[string]any + if err := json.Unmarshal([]byte(tool_info[index]["arguments"]), &dummy); err == nil { + streamHandler.HandleToolArgPreparedDoneItem(index, tool_info[index]["id"], tool_info[index]["name"], tool_info[index]["arguments"]) + toolCalls = append(toolCalls, openai.FinishedChatCompletionToolCall{ + Index: index, + ID: tool_info[index]["id"], + ChatCompletionMessageFunctionToolCallFunction: openai.ChatCompletionMessageFunctionToolCallFunction{ + Name: tool_info[index]["name"], + Arguments: tool_info[index]["arguments"], + }, + }) + } + } + } + } } } @@ -93,22 +164,19 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream return nil, nil, err } - // 把 openai 的 response 记录下来,然后执行调用(如果有) - for _, item := range openaiOutput { - if item.Type == "message" && item.Role == "assistant" { - appendAssistantTextResponseV2(&openaiChatHistory, &inappChatHistory, item) - } + if answer_content != "" { + appendAssistantTextResponseV2(&openaiChatHistory, &inappChatHistory, answer_content, answer_content_id) } // 执行调用(如果有),返回增量数据 - openaiToolHistory, inappToolHistory, err := a.toolCallHandler.HandleToolCallsV2(ctx, openaiOutput, streamHandler) + openaiToolHistory, inappToolHistory, err := a.toolCallHandler.HandleToolCallsV2(ctx, toolCalls, streamHandler) if err != nil { return nil, nil, err } - // 把工具调用结果记录下来 - if len(openaiToolHistory.OfInputItemList) > 0 { - openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, openaiToolHistory.OfInputItemList...) + // // 把工具调用结果记录下来 + if len(openaiToolHistory) > 0 { + openaiChatHistory = append(openaiChatHistory, openaiToolHistory...) inappChatHistory = append(inappChatHistory, inappToolHistory...) } else { // response stream is finished, if there is no tool call, then break @@ -116,5 +184,5 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream } } - return openaiChatHistory.OfInputItemList, inappChatHistory, nil + return openaiChatHistory, inappChatHistory, nil } diff --git a/internal/services/toolkit/client/get_conversation_title_v2.go b/internal/services/toolkit/client/get_conversation_title_v2.go index 2d56231f..90630f3a 100644 --- a/internal/services/toolkit/client/get_conversation_title_v2.go +++ b/internal/services/toolkit/client/get_conversation_title_v2.go @@ -9,7 +9,7 @@ import ( chatv2 "paperdebugger/pkg/gen/api/chat/v2" - "github.com/openai/openai-go/v2/responses" + "github.com/openai/openai-go/v3" "github.com/samber/lo" ) @@ -29,23 +29,9 @@ func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistor message := strings.Join(messages, "\n") message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message) - _, resp, err := a.ChatCompletionV2(ctx, "gpt-4.1-mini", responses.ResponseInputParam{ - { - OfInputMessage: &responses.ResponseInputItemMessageParam{ - Role: "system", - Content: responses.ResponseInputMessageContentListParam{ - responses.ResponseInputContentParamOfInputText(`You are a helpful assistant that generates a title for a conversation.`), - }, - }, - }, - { - OfInputMessage: &responses.ResponseInputItemMessageParam{ - Role: "user", - Content: responses.ResponseInputMessageContentListParam{ - responses.ResponseInputContentParamOfInputText(message), - }, - }, - }, + _, resp, err := a.ChatCompletionV2(ctx, openai.ChatModelGPT4_1Mini, OpenAIChatHistory{ + openai.SystemMessage("You are a helpful assistant that generates a title for a conversation."), + openai.UserMessage(message), }, llmProvider) if err != nil { return "", err diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index 73d1e56f..89e5cc89 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -14,9 +14,9 @@ import ( "paperdebugger/internal/services/toolkit/registry" "paperdebugger/internal/services/toolkit/tools/xtramcp" chatv1 "paperdebugger/pkg/gen/api/chat/v1" - chatv2 "paperdebugger/pkg/gen/api/chat/v2" "github.com/openai/openai-go/v2" + openaiv2 "github.com/openai/openai-go/v2" "github.com/openai/openai-go/v2/responses" "github.com/samber/lo" ) @@ -47,34 +47,10 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI }) } -func appendAssistantTextResponseV2(openaiChatHistory *responses.ResponseNewParamsInputUnion, inappChatHistory *[]chatv2.Message, item responses.ResponseOutputItemUnion) { - text := item.Content[0].Text - response := responses.ResponseInputItemUnionParam{ - OfOutputMessage: &responses.ResponseOutputMessageParam{ - Content: []responses.ResponseOutputMessageContentUnionParam{ - { - OfOutputText: &responses.ResponseOutputTextParam{Text: text}, - }, - }, - }, - } - openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, response) - *inappChatHistory = append(*inappChatHistory, chatv2.Message{ - MessageId: "openai_" + item.ID, - Payload: &chatv2.MessagePayload{ - MessageType: &chatv2.MessagePayload_Assistant{ - Assistant: &chatv2.MessageTypeAssistant{ - Content: text, - }, - }, - }, - }) -} - // getDefaultParams constructs the default parameters for a chat completion request. // The tool registry is managed centrally by the registry package. // The chat history is constructed manually, so Store must be set to false. -func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { +func getDefaultParams(modelSlug string, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams { var reasoningModels = []string{ "gpt-5", "gpt-5-mini", @@ -91,18 +67,16 @@ func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsI return responses.ResponseNewParams{ Model: modelSlug, Tools: toolRegistry.GetTools(), - Input: chatHistory, - Store: openai.Bool(false), + Store: openaiv2.Bool(false), } } return responses.ResponseNewParams{ Model: modelSlug, - Temperature: openai.Float(0.7), - MaxOutputTokens: openai.Int(4000), // DEBUG POINT: change this to test the frontend handler + Temperature: openaiv2.Float(0.7), + MaxOutputTokens: openaiv2.Int(4000), // DEBUG POINT: change this to test the frontend handler Tools: toolRegistry.GetTools(), // 工具注册由 registry 统一管理 - Input: chatHistory, - Store: openai.Bool(false), // Must set to false, because we are construct our own chat history. + Store: openaiv2.Bool(false), // Must set to false, because we are construct our own chat history. } } diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go new file mode 100644 index 00000000..ee685d15 --- /dev/null +++ b/internal/services/toolkit/client/utils_v2.go @@ -0,0 +1,129 @@ +package client + +/* +This file contains utility functions for the client package. (Mainly miscellaneous helpers) + +It is used to append assistant responses to both OpenAI and in-app chat histories, and to create response items for chat interactions. +*/ +import ( + "context" + "fmt" + "paperdebugger/internal/libs/cfg" + "paperdebugger/internal/libs/db" + "paperdebugger/internal/libs/logger" + "paperdebugger/internal/services" + "paperdebugger/internal/services/toolkit/registry" + "paperdebugger/internal/services/toolkit/tools/xtramcp" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + openaiv3 "github.com/openai/openai-go/v3" + "github.com/samber/lo" +) + +func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappChatHistory *AppChatHistory, content string, contentId string) { + *openaiChatHistory = append(*openaiChatHistory, openaiv3.ChatCompletionMessageParamUnion{ + OfAssistant: &openaiv3.ChatCompletionAssistantMessageParam{ + Role: "assistant", + Content: openaiv3.ChatCompletionAssistantMessageParamContentUnion{ + OfArrayOfContentParts: []openaiv3.ChatCompletionAssistantMessageParamContentArrayOfContentPartUnion{ + { + OfText: &openaiv3.ChatCompletionContentPartTextParam{ + Type: "text", + Text: content, + }, + }, + }, + }, + }, + }) + + *inappChatHistory = append(*inappChatHistory, chatv2.Message{ + MessageId: fmt.Sprintf("openai_%s", contentId), + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_Assistant{ + Assistant: &chatv2.MessageTypeAssistant{ + Content: content, + }, + }, + }, + }) +} + +func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2) openaiv3.ChatCompletionNewParams { + var reasoningModels = []string{ + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o4-mini", + "o3-mini", + "o3", + "o1-mini", + "o1", + "codex-mini-latest", + } + if lo.Contains(reasoningModels, modelSlug) { + return openaiv3.ChatCompletionNewParams{ + Model: modelSlug, + MaxCompletionTokens: openaiv3.Int(4000), + Tools: toolRegistry.GetTools(), + ParallelToolCalls: openaiv3.Bool(true), + Store: openaiv3.Bool(false), + } + } + + return openaiv3.ChatCompletionNewParams{ + Model: modelSlug, + Temperature: openaiv3.Float(0.7), + MaxCompletionTokens: openaiv3.Int(4000), // DEBUG POINT: change this to test the frontend handler + Tools: toolRegistry.GetTools(), // 工具注册由 registry 统一管理 + ParallelToolCalls: openaiv3.Bool(true), + Store: openaiv3.Bool(false), // Must set to false, because we are construct our own chat history. + } +} + +func CheckOpenAIWorksV2(oaiClient openaiv3.Client, logger *logger.Logger) { + logger.Info("[AI Client] checking if openai client works") + chatCompletion, err := oaiClient.Chat.Completions.New(context.TODO(), openaiv3.ChatCompletionNewParams{ + Messages: []openaiv3.ChatCompletionMessageParamUnion{ + openaiv3.UserMessage("Say 'openai client works'"), + }, + Model: openaiv3.ChatModelGPT4o, + }) + if err != nil { + logger.Errorf("[AI Client] openai client does not work: %v", err) + return + } + logger.Info("[AI Client] openai client works", "response", chatCompletion.Choices[0].Message.Content) +} + +func initializeToolkitV2( + db *db.DB, + projectService *services.ProjectService, + cfg *cfg.Cfg, + logger *logger.Logger, +) *registry.ToolRegistryV2 { + toolRegistry := registry.NewToolRegistryV2() + + // Load tools dynamically from backend + xtraMCPLoader := xtramcp.NewXtraMCPLoaderV2(db, projectService, cfg.XtraMCPURI) + + // initialize MCP session first and log session ID + sessionID, err := xtraMCPLoader.InitializeMCP() + if err != nil { + logger.Errorf("[AI Client V2] Failed to initialize XtraMCP session: %v", err) + // TODO: Fallback to static tools or exit? + } else { + logger.Info("[AI Client V2] XtraMCP session initialized", "sessionID", sessionID) + + // dynamically load all tools from XtraMCP backend + err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry) + if err != nil { + logger.Errorf("[AI Client V2] Failed to load XtraMCP tools: %v", err) + } else { + logger.Info("[AI Client V2] Successfully loaded XtraMCP tools") + } + } + + return toolRegistry +} diff --git a/internal/services/toolkit/handler/stream_v2.go b/internal/services/toolkit/handler/stream_v2.go index ab9d048b..ab9819e2 100644 --- a/internal/services/toolkit/handler/stream_v2.go +++ b/internal/services/toolkit/handler/stream_v2.go @@ -1,14 +1,12 @@ package handler import ( + "fmt" chatv2 "paperdebugger/pkg/gen/api/chat/v2" - "github.com/openai/openai-go/v2/responses" + "github.com/openai/openai-go/v3" ) -// Compile-time check: ensure StreamHandlerV2 implements StreamHandler interface -var _ StreamHandler = (*StreamHandlerV2)(nil) - type StreamHandlerV2 struct { callbackStream chatv2.ChatService_CreateConversationMessageStreamServer conversationId string @@ -19,7 +17,7 @@ func NewStreamHandlerV2( callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, -) StreamHandler { +) *StreamHandlerV2 { return &StreamHandlerV2{ callbackStream: callbackStream, conversationId: conversationId, @@ -31,45 +29,66 @@ func (h *StreamHandlerV2) SendInitialization() { if h.callbackStream == nil { return } + streamInit := &chatv2.StreamInitialization{ + ConversationId: h.conversationId, + ModelSlug: h.modelSlug, + } h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamInitialization{ - StreamInitialization: &chatv2.StreamInitialization{ - ConversationId: h.conversationId, - ModelSlug: h.modelSlug, - }, + StreamInitialization: streamInit, }, }) } -func (h *StreamHandlerV2) HandleAddedItem(chunk responses.ResponseStreamEventUnion) { +func (h *StreamHandlerV2) HandleAddedItem(chunk openai.ChatCompletionChunk) { if h.callbackStream == nil { return } - if chunk.Item.Type == "message" { + switch chunk.Choices[0].Delta.Role { + case "assistant": h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv2.StreamPartBegin{ - MessageId: "openai_" + chunk.Item.ID, + MessageId: "openai_" + chunk.ID, Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_Assistant{ - Assistant: &chatv2.MessageTypeAssistant{ - ModelSlug: h.modelSlug, - }, + Assistant: &chatv2.MessageTypeAssistant{}, }, }, }, }, }) - } else if chunk.Item.Type == "function_call" { + // default: + // h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + // ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ + // StreamPartBegin: &chatv2.StreamPartBegin{ + // MessageId: "openai_" + chunk.ID, + // Payload: &chatv2.MessagePayload{ + // MessageType: &chatv2.MessagePayload_Unknown{ + // Unknown: &chatv2.MessageTypeUnknown{ + // Description: fmt.Sprintf("%v", chunk.Choices[0].Delta.Role), + // }, + // }, + // }, + // }, + // }, + // }) + } + toolCalls := chunk.Choices[0].Delta.ToolCalls + for _, toolCall := range toolCalls { + if toolCall.Function.Name == "" { + continue + } h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv2.StreamPartBegin{ - MessageId: "openai_" + chunk.Item.ID, + MessageId: fmt.Sprintf("openai_toolCallPrepareArguments[%d]_%s", toolCall.Index, toolCall.ID), Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{ ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{ - Name: chunk.Item.Name, + Name: toolCall.Function.Name, + Args: "", }, }, }, @@ -79,71 +98,59 @@ func (h *StreamHandlerV2) HandleAddedItem(chunk responses.ResponseStreamEventUni } } -func (h *StreamHandlerV2) HandleDoneItem(chunk responses.ResponseStreamEventUnion) { +func (h *StreamHandlerV2) HandleTextDoneItem(chunk openai.ChatCompletionChunk, content string) { if h.callbackStream == nil { return } - item := chunk.Item - switch item.Type { - case "message": - h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ - ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ - StreamPartEnd: &chatv2.StreamPartEnd{ - MessageId: "openai_" + item.ID, - Payload: &chatv2.MessagePayload{ - MessageType: &chatv2.MessagePayload_Assistant{ - Assistant: &chatv2.MessageTypeAssistant{ - Content: item.Content[0].Text, - ModelSlug: h.modelSlug, - }, - }, - }, - }, - }, - }) - case "function_call": - h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ - ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ - StreamPartEnd: &chatv2.StreamPartEnd{ - MessageId: "openai_" + item.ID, - Payload: &chatv2.MessagePayload{ - MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{ - ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{ - Name: item.Name, - Args: item.Arguments, - }, + if chunk.Choices[0].Delta.Role != "" && chunk.Choices[0].Delta.Content != "" { + return + } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ + StreamPartEnd: &chatv2.StreamPartEnd{ + MessageId: "openai_" + chunk.ID, + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_Assistant{ + Assistant: &chatv2.MessageTypeAssistant{ + Content: content, }, }, }, }, - }) - default: - h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ - ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ - StreamPartEnd: &chatv2.StreamPartEnd{ - MessageId: "openai_" + item.ID, - Payload: &chatv2.MessagePayload{ - MessageType: &chatv2.MessagePayload_Unknown{ - Unknown: &chatv2.MessageTypeUnknown{ - Description: "Unknown message type: " + item.Type, - }, + }, + }) +} + +func (h *StreamHandlerV2) HandleToolArgPreparedDoneItem(index int, id string, name string, args string) { + if h.callbackStream == nil { + return + } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ + ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ + StreamPartEnd: &chatv2.StreamPartEnd{ + MessageId: fmt.Sprintf("openai_toolCallPrepareArguments[%d]_%s", index, id), + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{ + ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{ + Name: name, + Args: args, }, }, }, }, - }) - } + }, + }) } -func (h *StreamHandlerV2) HandleTextDelta(chunk responses.ResponseStreamEventUnion) { +func (h *StreamHandlerV2) HandleTextDelta(chunk openai.ChatCompletionChunk) { if h.callbackStream == nil { return } h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_MessageChunk{ MessageChunk: &chatv2.MessageChunk{ - MessageId: "openai_" + chunk.ItemID, - Delta: chunk.Delta, + MessageId: "openai_" + chunk.ID, + Delta: chunk.Choices[0].Delta.Content, }, }, }) @@ -176,14 +183,14 @@ func (h *StreamHandlerV2) SendFinalization() { }) } -func (h *StreamHandlerV2) SendToolCallBegin(toolCall responses.ResponseFunctionToolCall) { +func (h *StreamHandlerV2) SendToolCallBegin(toolCall openai.FinishedChatCompletionToolCall) { if h.callbackStream == nil { return } h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv2.StreamPartBegin{ - MessageId: "openai_" + toolCall.CallID, + MessageId: fmt.Sprintf("openai_tool[%d]_%s", toolCall.Index, toolCall.ID), Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_ToolCall{ ToolCall: &chatv2.MessageTypeToolCall{ @@ -197,14 +204,14 @@ func (h *StreamHandlerV2) SendToolCallBegin(toolCall responses.ResponseFunctionT }) } -func (h *StreamHandlerV2) SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) { +func (h *StreamHandlerV2) SendToolCallEnd(toolCall openai.FinishedChatCompletionToolCall, result string, err error) { if h.callbackStream == nil { return } h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ StreamPartEnd: &chatv2.StreamPartEnd{ - MessageId: "openai_" + toolCall.CallID, + MessageId: fmt.Sprintf("openai_tool[%d]_%s", toolCall.Index, toolCall.ID), Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_ToolCall{ ToolCall: &chatv2.MessageTypeToolCall{ diff --git a/internal/services/toolkit/handler/toolcall_v2.go b/internal/services/toolkit/handler/toolcall_v2.go index 9c442c19..317f32a7 100644 --- a/internal/services/toolkit/handler/toolcall_v2.go +++ b/internal/services/toolkit/handler/toolcall_v2.go @@ -2,30 +2,28 @@ package handler import ( "context" + "fmt" "paperdebugger/internal/services/toolkit/registry" chatv2 "paperdebugger/pkg/gen/api/chat/v2" - "github.com/openai/openai-go/v2/responses" -) - -const ( - messageTypeFunctionCallV2 = "function_call" - messageTypeMessageV2 = "message" - roleAssistantV2 = "assistant" + "github.com/openai/openai-go/v3" ) // ToolCallHandler is responsible for handling tool calls by dispatching them to the appropriate tool registry // and managing the chat history for both OpenAI and in-app chat systems. type ToolCallHandlerV2 struct { - Registry *registry.ToolRegistry // Registry containing available tools for function calls + Registry *registry.ToolRegistryV2 // Registry containing available tools for function calls } -func NewToolCallHandlerV2(toolRegistry *registry.ToolRegistry) *ToolCallHandlerV2 { +func NewToolCallHandlerV2(toolRegistry *registry.ToolRegistryV2) *ToolCallHandlerV2 { return &ToolCallHandlerV2{ Registry: toolRegistry, } } +type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion +type AppChatHistory []chatv2.Message + // HandleToolCalls processes a list of tool call outputs, invokes the corresponding tools, and constructs // both OpenAI and in-app chat histories reflecting the tool call and its result. // @@ -38,64 +36,88 @@ func NewToolCallHandlerV2(toolRegistry *registry.ToolRegistry) *ToolCallHandlerV // - openaiChatHistory: The OpenAI-compatible chat history including tool call and output items. // - inappChatHistory: The in-app chat history as a slice of chatv2.Message, reflecting tool call events. // - error: Any error encountered during processing (always nil in current implementation). -func (h *ToolCallHandlerV2) HandleToolCallsV2(ctx context.Context, outputs []responses.ResponseOutputItemUnion, streamHandler StreamHandler) (responses.ResponseNewParamsInputUnion, []chatv2.Message, error) { - openaiChatHistory := responses.ResponseNewParamsInputUnion{} // Accumulates OpenAI chat history items - inappChatHistory := []chatv2.Message{} // Accumulates in-app chat history messages +func (h *ToolCallHandlerV2) HandleToolCallsV2(ctx context.Context, toolCalls []openai.FinishedChatCompletionToolCall, streamHandler *StreamHandlerV2) (OpenAIChatHistory, AppChatHistory, error) { + if len(toolCalls) == 0 { + return nil, nil, nil + } + + openaiChatHistory := []openai.ChatCompletionMessageParamUnion{} // Accumulates OpenAI chat history items + inappChatHistory := []chatv2.Message{} // Accumulates in-app chat history messages + + toolCallsParam := make([]openai.ChatCompletionMessageToolCallUnionParam, len(toolCalls)) + for i, toolCall := range toolCalls { + toolCallsParam[i] = openai.ChatCompletionMessageToolCallUnionParam{ + OfFunction: &openai.ChatCompletionMessageFunctionToolCallParam{ + ID: toolCall.ID, + Type: "function", + Function: openai.ChatCompletionMessageFunctionToolCallFunctionParam{ + Name: toolCall.Name, + Arguments: toolCall.Arguments, + }, + }, + } + } + + openaiChatHistory = append(openaiChatHistory, openai.ChatCompletionMessageParamUnion{ + OfAssistant: &openai.ChatCompletionAssistantMessageParam{ + ToolCalls: toolCallsParam, + }, + }) // Iterate over each output item to process tool calls - for _, output := range outputs { - if output.Type == messageTypeFunctionCallV2 { - toolCall := output.AsFunctionCall() - - // According to OpenAI, function_call and function_call_output must appear in pairs in the chat history. - // Add the function call to the OpenAI chat history. - openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, responses.ResponseInputItemParamOfFunctionCall( - toolCall.Arguments, - toolCall.CallID, - toolCall.Name, - )) - - // Notify the stream handler that a tool call is beginning. - if streamHandler != nil { - streamHandler.SendToolCallBegin(toolCall) - } - result, err := h.Registry.Call(ctx, toolCall.CallID, toolCall.Name, []byte(toolCall.Arguments)) - if streamHandler != nil { - streamHandler.SendToolCallEnd(toolCall, result, err) - } - - if err != nil { - // If there was an error, append an error output to OpenAI chat history and in-app chat history. - openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, responses.ResponseInputItemParamOfFunctionCallOutput(toolCall.CallID, "Error: "+err.Error())) - inappChatHistory = append(inappChatHistory, chatv2.Message{ - MessageId: "openai_" + toolCall.CallID, - Payload: &chatv2.MessagePayload{ - MessageType: &chatv2.MessagePayload_ToolCall{ - ToolCall: &chatv2.MessageTypeToolCall{ - Name: toolCall.Name, - Args: toolCall.Arguments, - Error: err.Error(), - }, - }, - }, - }) - } else { - // On success, append the result to both OpenAI and in-app chat histories. - openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, responses.ResponseInputItemParamOfFunctionCallOutput(toolCall.CallID, result)) - inappChatHistory = append(inappChatHistory, chatv2.Message{ - MessageId: "openai_" + toolCall.CallID, - Payload: &chatv2.MessagePayload{ - MessageType: &chatv2.MessagePayload_ToolCall{ - ToolCall: &chatv2.MessageTypeToolCall{ - Name: toolCall.Name, - Args: toolCall.Arguments, - Result: result, - }, + for _, toolCall := range toolCalls { + if streamHandler != nil { + streamHandler.SendToolCallBegin(toolCall) + } + + toolResult, err := h.Registry.Call(ctx, toolCall.ID, toolCall.Name, []byte(toolCall.Arguments)) + + if streamHandler != nil { + streamHandler.SendToolCallEnd(toolCall, toolResult, err) + } + + resultStr := toolResult + if err != nil { + resultStr = "Error: " + err.Error() + } + + openaiChatHistory = append(openaiChatHistory, openai.ChatCompletionMessageParamUnion{ + OfTool: &openai.ChatCompletionToolMessageParam{ + Role: "tool", + ToolCallID: toolCall.ID, + Content: openai.ChatCompletionToolMessageParamContentUnion{ + OfArrayOfContentParts: []openai.ChatCompletionContentPartTextParam{ + { + Type: "text", + Text: resultStr, }, + // { + // Type: "image_url", + // ImageURL: "xxx" + // }, }, - }) - } + }, + }, + }) + + toolCallMsg := &chatv2.MessageTypeToolCall{ + Name: toolCall.Name, + Args: toolCall.Arguments, } + if err != nil { + toolCallMsg.Error = err.Error() + } else { + toolCallMsg.Result = resultStr + } + + inappChatHistory = append(inappChatHistory, chatv2.Message{ + MessageId: fmt.Sprintf("openai_toolCall[%d]_%s", toolCall.Index, toolCall.ID), + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_ToolCall{ + ToolCall: toolCallMsg, + }, + }, + }) } // Return both chat histories and nil error (no error aggregation in this implementation) diff --git a/internal/services/toolkit/registry/registry_v2.go b/internal/services/toolkit/registry/registry_v2.go new file mode 100644 index 00000000..6c129fe8 --- /dev/null +++ b/internal/services/toolkit/registry/registry_v2.go @@ -0,0 +1,49 @@ +package registry + +import ( + "context" + "encoding/json" + "fmt" + "paperdebugger/internal/services/toolkit" + + "github.com/openai/openai-go/v3" + "github.com/samber/lo" +) + +type ToolRegistryV2 struct { + tools map[string]toolkit.ToolHandler + description map[string]openai.ChatCompletionToolUnionParam +} + +func NewToolRegistryV2() *ToolRegistryV2 { + return &ToolRegistryV2{ + tools: make(map[string]toolkit.ToolHandler), + description: make(map[string]openai.ChatCompletionToolUnionParam), + } +} + +func (r *ToolRegistryV2) Register(name string, description openai.ChatCompletionToolUnionParam, handler toolkit.ToolHandler) { + r.tools[name] = handler + r.description[name] = description +} + +func (r *ToolRegistryV2) Call(ctx context.Context, toolCallId string, toolCallName string, toolCallArgs json.RawMessage) (result string, err error) { + handler, ok := r.tools[toolCallName] + if !ok { + return "", fmt.Errorf("unknown tool: %s", toolCallName) + } + result, furtherInstruction, err := handler(ctx, toolCallId, toolCallArgs) + if err != nil { + return result, err + } + + if furtherInstruction == "" { + return result, nil + } else { + return fmt.Sprintf(`%s\n%s`, result, furtherInstruction), nil + } +} + +func (r *ToolRegistryV2) GetTools() []openai.ChatCompletionToolUnionParam { + return lo.Values(r.description) +} diff --git a/internal/services/toolkit/tools/xtramcp/loader_v2.go b/internal/services/toolkit/tools/xtramcp/loader_v2.go new file mode 100644 index 00000000..b7662c4c --- /dev/null +++ b/internal/services/toolkit/tools/xtramcp/loader_v2.go @@ -0,0 +1,211 @@ +package xtramcp + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "paperdebugger/internal/libs/db" + "paperdebugger/internal/services" + "paperdebugger/internal/services/toolkit/registry" +) + +// MCPListToolsResponse represents the JSON-RPC response from tools/list method +type MCPListToolsResponseV2 struct { + JSONRPC string `json:"jsonrpc"` + ID int `json:"id"` + Result struct { + Tools []ToolSchemaV2 `json:"tools"` + } `json:"result"` +} + +// loads tools dynamically from backend +type XtraMCPLoaderV2 struct { + db *db.DB + projectService *services.ProjectService + baseURL string + client *http.Client + sessionID string // Store the MCP session ID after initialization for re-use +} + +// NewXtraMCPLoader creates a new dynamic XtraMCP loader +func NewXtraMCPLoaderV2(db *db.DB, projectService *services.ProjectService, baseURL string) *XtraMCPLoaderV2 { + return &XtraMCPLoaderV2{ + db: db, + projectService: projectService, + baseURL: baseURL, + client: &http.Client{}, + } +} + +// LoadToolsFromBackend fetches tool schemas from backend and registers them +func (loader *XtraMCPLoaderV2) LoadToolsFromBackend(toolRegistry *registry.ToolRegistryV2) error { + if loader.sessionID == "" { + return fmt.Errorf("MCP session not initialized - call InitializeMCP first") + } + + // Fetch tools from backend using the established session + toolSchemas, err := loader.fetchAvailableTools() + if err != nil { + return fmt.Errorf("failed to fetch tools from backend: %w", err) + } + + // Register each tool dynamically, passing the session ID + for _, toolSchema := range toolSchemas { + dynamicTool := NewDynamicToolV2(loader.db, loader.projectService, toolSchema, loader.baseURL, loader.sessionID) + + // Register the tool with the registry + toolRegistry.Register(toolSchema.Name, dynamicTool.Description, dynamicTool.Call) + + fmt.Printf("Registered dynamic tool: %s\n", toolSchema.Name) + } + + return nil +} + +// InitializeMCP performs the full MCP initialization handshake, stores session ID, and returns it +func (loader *XtraMCPLoaderV2) InitializeMCP() (string, error) { + // Step 1: Initialize + sessionID, err := loader.performInitialize() + if err != nil { + return "", fmt.Errorf("step 1 - initialize failed: %w", err) + } + + // Step 2: Send notifications/initialized + err = loader.sendInitializedNotification(sessionID) + if err != nil { + return "", fmt.Errorf("step 2 - notifications/initialized failed: %w", err) + } + + // Store session ID for future use and return it + loader.sessionID = sessionID + + return sessionID, nil +} + +// performInitialize performs MCP initialization (1. establish connection) +func (loader *XtraMCPLoaderV2) performInitialize() (string, error) { + initReq := map[string]interface{}{ + "jsonrpc": "2.0", + "method": "initialize", + "id": 1, + "params": map[string]interface{}{ + "protocolVersion": "2024-11-05", + "capabilities": map[string]interface{}{}, + "clientInfo": map[string]interface{}{ + "name": "paperdebugger-client", + "version": "1.0.0", + }, + }, + } + + jsonData, err := json.Marshal(initReq) + if err != nil { + return "", fmt.Errorf("failed to marshal initialize request: %w", err) + } + + req, err := http.NewRequest("POST", loader.baseURL, bytes.NewBuffer(jsonData)) + if err != nil { + return "", fmt.Errorf("failed to create initialize request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json, text/event-stream") + + resp, err := loader.client.Do(req) + if err != nil { + return "", fmt.Errorf("failed to make initialize request: %w", err) + } + defer resp.Body.Close() + + // Extract session ID from response headers + sessionID := resp.Header.Get("mcp-session-id") + if sessionID == "" { + return "", fmt.Errorf("no session ID returned from initialize") + } + + return sessionID, nil +} + +// sendInitializedNotification completes MCP initialization (acknowledges initialization) +func (loader *XtraMCPLoaderV2) sendInitializedNotification(sessionID string) error { + notifyReq := map[string]interface{}{ + "jsonrpc": "2.0", + "method": "notifications/initialized", + "params": map[string]interface{}{}, + } + + jsonData, err := json.Marshal(notifyReq) + if err != nil { + return fmt.Errorf("failed to marshal notification: %w", err) + } + + req, err := http.NewRequest("POST", loader.baseURL, bytes.NewBuffer(jsonData)) + if err != nil { + return fmt.Errorf("failed to create notification request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json, text/event-stream") + req.Header.Set("mcp-session-id", sessionID) + + resp, err := loader.client.Do(req) + if err != nil { + return fmt.Errorf("failed to send notification: %w", err) + } + defer resp.Body.Close() + + return nil +} + +// fetchAvailableTools makes a request to get available tools from backend +func (loader *XtraMCPLoaderV2) fetchAvailableTools() ([]ToolSchemaV2, error) { + // List all tools using the established session + requestBody := map[string]interface{}{ + "jsonrpc": "2.0", + "method": "tools/list", + "params": map[string]interface{}{}, + "id": 2, + } + + jsonData, err := json.Marshal(requestBody) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + req, err := http.NewRequest("POST", loader.baseURL, bytes.NewBuffer(jsonData)) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json, text/event-stream") + req.Header.Set("mcp-session-id", loader.sessionID) + + resp, err := loader.client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to make request: %w", err) + } + defer resp.Body.Close() + + // Read the raw response body (SSE format) for debugging + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("failed to read response body: %w", err) + } + + extractedJSON, err := parseSSEResponse(bodyBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse SSE response: %w", err) + } + + // Parse the extracted JSON + var mcpResponse MCPListToolsResponseV2 + err = json.Unmarshal([]byte(extractedJSON), &mcpResponse) + if err != nil { + return nil, fmt.Errorf("failed to parse JSON from SSE data: %w. JSON data: %s", err, extractedJSON) + } + + return mcpResponse.Result.Tools, nil +} diff --git a/internal/services/toolkit/tools/xtramcp/tool_v2.go b/internal/services/toolkit/tools/xtramcp/tool_v2.go new file mode 100644 index 00000000..a63a5a34 --- /dev/null +++ b/internal/services/toolkit/tools/xtramcp/tool_v2.go @@ -0,0 +1,164 @@ +package xtramcp + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "paperdebugger/internal/libs/db" + "paperdebugger/internal/services" + toolCallRecordDB "paperdebugger/internal/services/toolkit/db" + "time" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +// ToolSchema represents the schema from your backend +type ToolSchemaV2 struct { + Name string `json:"name"` + Description string `json:"description"` + InputSchema map[string]interface{} `json:"inputSchema"` + OutputSchema map[string]interface{} `json:"outputSchema"` +} + +// MCPRequest represents the JSON-RPC request structure +type MCPRequestV2 struct { + JSONRPC string `json:"jsonrpc"` + Method string `json:"method"` + ID int `json:"id"` + Params MCPParamsV2 `json:"params"` +} + +// MCPParams represents the parameters for the MCP request +type MCPParamsV2 struct { + Name string `json:"name"` + Arguments map[string]interface{} `json:"arguments"` +} + +// DynamicTool represents a generic tool that can handle any schema +type DynamicToolV2 struct { + Name string + Description openai.ChatCompletionToolUnionParam + toolCallRecordDB *toolCallRecordDB.ToolCallRecordDB + projectService *services.ProjectService + coolDownTime time.Duration + baseURL string + client *http.Client + schema map[string]interface{} + sessionID string // Reuse the session ID from initialization +} + +// NewDynamicTool creates a new dynamic tool from a schema +func NewDynamicToolV2(db *db.DB, projectService *services.ProjectService, toolSchema ToolSchemaV2, baseURL string, sessionID string) *DynamicToolV2 { + // Create tool description with the schema + description := openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: toolSchema.Name, + Description: param.NewOpt(toolSchema.Description), + Parameters: openai.FunctionParameters(toolSchema.InputSchema), + }, + }, + } + + toolCallRecordDB := toolCallRecordDB.NewToolCallRecordDB(db) + return &DynamicToolV2{ + Name: toolSchema.Name, + Description: description, + toolCallRecordDB: toolCallRecordDB, + projectService: projectService, + coolDownTime: 5 * time.Minute, + baseURL: baseURL, + client: &http.Client{}, + schema: toolSchema.InputSchema, + sessionID: sessionID, // Store the session ID for reuse + } +} + +// Call handles the tool execution (generic for any tool) +func (t *DynamicToolV2) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + // Parse arguments as generic map since we don't know the structure + var argsMap map[string]interface{} + err := json.Unmarshal(args, &argsMap) + if err != nil { + return "", "", err + } + + // Create function call record + record, err := t.toolCallRecordDB.Create(ctx, toolCallId, t.Name, argsMap) + if err != nil { + return "", "", err + } + + // Execute the tool via MCP + respStr, err := t.executeTool(argsMap) + if err != nil { + err = fmt.Errorf("failed to execute tool %s: %v", t.Name, err) + t.toolCallRecordDB.OnError(ctx, record, err) + return "", "", err + } + + rawJson, err := json.Marshal(respStr) + if err != nil { + err = fmt.Errorf("failed to marshal tool result: %v", err) + t.toolCallRecordDB.OnError(ctx, record, err) + return "", "", err + } + t.toolCallRecordDB.OnSuccess(ctx, record, string(rawJson)) + + return respStr, "", nil +} + +// executeTool makes the MCP request (generic for any tool) +func (t *DynamicToolV2) executeTool(args map[string]interface{}) (string, error) { + + request := MCPRequest{ + JSONRPC: "2.0", + Method: "tools/call", + ID: int(time.Now().Unix()), // to ensure unique ID; TODO: consider better ID generation + Params: MCPParams{ + Name: t.Name, + Arguments: args, + }, + } + + // Marshal request to JSON + jsonData, err := json.Marshal(request) + if err != nil { + return "", fmt.Errorf("failed to marshal MCP request: %w", err) + } + + // Create HTTP request + req, err := http.NewRequest("POST", t.baseURL, bytes.NewBuffer(jsonData)) + if err != nil { + return "", fmt.Errorf("failed to create HTTP request: %w", err) + } + + // Set headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json, text/event-stream") + req.Header.Set("mcp-session-id", t.sessionID) // Use the stored session ID + + // Make the request + resp, err := t.client.Do(req) + if err != nil { + return "", fmt.Errorf("failed to make request: %w", err) + } + defer resp.Body.Close() + + // Read response + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("failed to read response: %w", err) + } + + extractedJSON, err := parseSSEResponse(body) + if err != nil { + return "", fmt.Errorf("failed to parse SSE response: %w", err) + } + + return extractedJSON, nil +} From 68742ff95f5189e7440289e3b14a674418f58f7a Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 23:07:24 +0800 Subject: [PATCH 07/28] fix: wire --- .../chat/create_conversation_message_stream_v2.go | 4 ++++ internal/services/toolkit/client/utils_v2.go | 2 ++ internal/services/toolkit/handler/toolcall_v2.go | 2 ++ internal/wire.go | 2 ++ internal/wire_gen.go | 5 ++--- pkg/gen/api/chat/v2/chat.pb.go | 15 ++++++++++++--- proto/chat/v2/chat.proto | 3 ++- .../src/pkg/gen/apiclient/chat/v2/chat_pb.ts | 9 +++++++-- 8 files changed, 33 insertions(+), 9 deletions(-) diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go index 15406886..d1f672c4 100644 --- a/internal/api/chat/create_conversation_message_stream_v2.go +++ b/internal/api/chat/create_conversation_message_stream_v2.go @@ -8,6 +8,7 @@ import ( "paperdebugger/internal/models" "paperdebugger/internal/services" chatv2 "paperdebugger/pkg/gen/api/chat/v2" + "time" "github.com/google/uuid" "github.com/openai/openai-go/v2/responses" @@ -49,6 +50,7 @@ func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSe }, }, }, + Timestamp: time.Now().Unix(), } default: inappMessage = &chatv2.Message{ @@ -61,6 +63,7 @@ func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSe }, }, }, + Timestamp: time.Now().Unix(), } } @@ -87,6 +90,7 @@ func (s *ChatServerV2) buildSystemMessage(systemPrompt string) (*chatv2.Message, }, }, }, + Timestamp: time.Now().Unix(), } openaiMessage := &responses.ResponseInputItemUnionParam{ diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go index ee685d15..ddf35429 100644 --- a/internal/services/toolkit/client/utils_v2.go +++ b/internal/services/toolkit/client/utils_v2.go @@ -15,6 +15,7 @@ import ( "paperdebugger/internal/services/toolkit/registry" "paperdebugger/internal/services/toolkit/tools/xtramcp" chatv2 "paperdebugger/pkg/gen/api/chat/v2" + "time" openaiv3 "github.com/openai/openai-go/v3" "github.com/samber/lo" @@ -46,6 +47,7 @@ func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappCh }, }, }, + Timestamp: time.Now().Unix(), }) } diff --git a/internal/services/toolkit/handler/toolcall_v2.go b/internal/services/toolkit/handler/toolcall_v2.go index 317f32a7..b7e62fa1 100644 --- a/internal/services/toolkit/handler/toolcall_v2.go +++ b/internal/services/toolkit/handler/toolcall_v2.go @@ -5,6 +5,7 @@ import ( "fmt" "paperdebugger/internal/services/toolkit/registry" chatv2 "paperdebugger/pkg/gen/api/chat/v2" + "time" "github.com/openai/openai-go/v3" ) @@ -117,6 +118,7 @@ func (h *ToolCallHandlerV2) HandleToolCallsV2(ctx context.Context, toolCalls []o ToolCall: toolCallMsg, }, }, + Timestamp: time.Now().Unix(), }) } diff --git a/internal/wire.go b/internal/wire.go index afad8b9c..43674538 100644 --- a/internal/wire.go +++ b/internal/wire.go @@ -33,8 +33,10 @@ var Set = wire.NewSet( comment.NewCommentServer, aiclient.NewAIClient, + aiclient.NewAIClientV2, services.NewReverseCommentService, services.NewChatService, + services.NewChatServiceV2, services.NewTokenService, services.NewUserService, services.NewProjectService, diff --git a/internal/wire_gen.go b/internal/wire_gen.go index d467b70c..6268e274 100644 --- a/internal/wire_gen.go +++ b/internal/wire_gen.go @@ -7,6 +7,7 @@ package internal import ( + "github.com/google/wire" "paperdebugger/internal/api" "paperdebugger/internal/api/auth" "paperdebugger/internal/api/chat" @@ -18,8 +19,6 @@ import ( "paperdebugger/internal/libs/logger" "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/client" - - "github.com/google/wire" ) // Injectors from wire.go: @@ -55,4 +54,4 @@ func InitializeApp() (*api.Server, error) { // wire.go: -var Set = wire.NewSet(api.NewServer, api.NewGrpcServer, api.NewGinServer, auth.NewOAuthHandler, auth.NewAuthServer, chat.NewChatServer, user.NewUserServer, project.NewProjectServer, comment.NewCommentServer, client.NewAIClient, services.NewReverseCommentService, services.NewChatService, services.NewTokenService, services.NewUserService, services.NewProjectService, services.NewPromptService, services.NewOAuthService, cfg.GetCfg, logger.GetLogger, db.NewDB) +var Set = wire.NewSet(api.NewServer, api.NewGrpcServer, api.NewGinServer, auth.NewOAuthHandler, auth.NewAuthServer, chat.NewChatServer, user.NewUserServer, project.NewProjectServer, comment.NewCommentServer, client.NewAIClient, client.NewAIClientV2, services.NewReverseCommentService, services.NewChatService, services.NewChatServiceV2, services.NewTokenService, services.NewUserService, services.NewProjectService, services.NewPromptService, services.NewOAuthService, cfg.GetCfg, logger.GetLogger, db.NewDB) diff --git a/pkg/gen/api/chat/v2/chat.pb.go b/pkg/gen/api/chat/v2/chat.pb.go index b3843c18..6842955e 100644 --- a/pkg/gen/api/chat/v2/chat.pb.go +++ b/pkg/gen/api/chat/v2/chat.pb.go @@ -529,7 +529,8 @@ func (*MessagePayload_Unknown) isMessagePayload_MessageType() {} type Message struct { state protoimpl.MessageState `protogen:"open.v1"` MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` - Payload *MessagePayload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + Payload *MessagePayload `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -578,6 +579,13 @@ func (x *Message) GetPayload() *MessagePayload { return nil } +func (x *Message) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + type Conversation struct { state protoimpl.MessageState `protogen:"open.v1"` Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` @@ -1780,11 +1788,12 @@ const file_chat_v2_chat_proto_rawDesc = "" + "\x1btool_call_prepare_arguments\x18\x04 \x01(\v2,.chat.v2.MessageTypeToolCallPrepareArgumentsH\x00R\x18toolCallPrepareArguments\x12;\n" + "\ttool_call\x18\x05 \x01(\v2\x1c.chat.v2.MessageTypeToolCallH\x00R\btoolCall\x127\n" + "\aunknown\x18\x06 \x01(\v2\x1b.chat.v2.MessageTypeUnknownH\x00R\aunknownB\x0e\n" + - "\fmessage_type\"[\n" + + "\fmessage_type\"y\n" + "\aMessage\x12\x1d\n" + "\n" + "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" + - "\apayload\x18\x03 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\"\x81\x01\n" + + "\apayload\x18\x02 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\x12\x1c\n" + + "\ttimestamp\x18\x03 \x01(\x03R\ttimestamp\"\x81\x01\n" + "\fConversation\x12\x0e\n" + "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" + "\x05title\x18\x02 \x01(\tR\x05title\x12\x1d\n" + diff --git a/proto/chat/v2/chat.proto b/proto/chat/v2/chat.proto index 62ae9d02..51b5a47e 100644 --- a/proto/chat/v2/chat.proto +++ b/proto/chat/v2/chat.proto @@ -76,7 +76,8 @@ message MessagePayload { message Message { string message_id = 1; - MessagePayload payload = 3; + MessagePayload payload = 2; + int64 timestamp = 3; } message Conversation { diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts index f4c56bf8..cc69ad25 100644 --- a/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts +++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts @@ -11,7 +11,7 @@ import type { Message as Message$1 } from "@bufbuild/protobuf"; * Describes the file chat/v2/chat.proto. */ export const file_chat_v2_chat: GenFile = /*@__PURE__*/ - fileDesc("ChJjaGF0L3YyL2NoYXQucHJvdG8SB2NoYXQudjIiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIjsKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJQCg9NZXNzYWdlVHlwZVVzZXISDwoHY29udGVudBgBIAEoCRIaCg1zZWxlY3RlZF90ZXh0GAIgASgJSACIAQFCEAoOX3NlbGVjdGVkX3RleHQiKQoSTWVzc2FnZVR5cGVVbmtub3duEhMKC2Rlc2NyaXB0aW9uGAEgASgJIuQCCg5NZXNzYWdlUGF5bG9hZBIsCgZzeXN0ZW0YASABKAsyGi5jaGF0LnYyLk1lc3NhZ2VUeXBlU3lzdGVtSAASKAoEdXNlchgCIAEoCzIYLmNoYXQudjIuTWVzc2FnZVR5cGVVc2VySAASMgoJYXNzaXN0YW50GAMgASgLMh0uY2hhdC52Mi5NZXNzYWdlVHlwZUFzc2lzdGFudEgAElMKG3Rvb2xfY2FsbF9wcmVwYXJlX2FyZ3VtZW50cxgEIAEoCzIsLmNoYXQudjIuTWVzc2FnZVR5cGVUb29sQ2FsbFByZXBhcmVBcmd1bWVudHNIABIxCgl0b29sX2NhbGwYBSABKAsyHC5jaGF0LnYyLk1lc3NhZ2VUeXBlVG9vbENhbGxIABIuCgd1bmtub3duGAYgASgLMhsuY2hhdC52Mi5NZXNzYWdlVHlwZVVua25vd25IAEIOCgxtZXNzYWdlX3R5cGUiRwoHTWVzc2FnZRISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkImEKDENvbnZlcnNhdGlvbhIKCgJpZBgBIAEoCRINCgV0aXRsZRgCIAEoCRISCgptb2RlbF9zbHVnGAMgASgJEiIKCG1lc3NhZ2VzGAQgAygLMhAuY2hhdC52Mi5NZXNzYWdlIkIKGExpc3RDb252ZXJzYXRpb25zUmVxdWVzdBIXCgpwcm9qZWN0X2lkGAEgASgJSACIAQFCDQoLX3Byb2plY3RfaWQiSQoZTGlzdENvbnZlcnNhdGlvbnNSZXNwb25zZRIsCg1jb252ZXJzYXRpb25zGAEgAygLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iMQoWR2V0Q29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiRgoXR2V0Q29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iQwoZVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkSDQoFdGl0bGUYAiABKAkiSQoaVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2USKwoMY29udmVyc2F0aW9uGAEgASgLMhUuY2hhdC52Mi5Db252ZXJzYXRpb24iNAoZRGVsZXRlQ29udmVyc2F0aW9uUmVxdWVzdBIXCg9jb252ZXJzYXRpb25faWQYASABKAkiHAoaRGVsZXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiLAoOU3VwcG9ydGVkTW9kZWwSDAoEbmFtZRgBIAEoCRIMCgRzbHVnGAIgASgJIhwKGkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0IkYKG0xpc3RTdXBwb3J0ZWRNb2RlbHNSZXNwb25zZRInCgZtb2RlbHMYASADKAsyFy5jaGF0LnYyLlN1cHBvcnRlZE1vZGVsIkMKFFN0cmVhbUluaXRpYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCRISCgptb2RlbF9zbHVnGAIgASgJIk8KD1N0cmVhbVBhcnRCZWdpbhISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAyABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkIjEKDE1lc3NhZ2VDaHVuaxISCgptZXNzYWdlX2lkGAEgASgJEg0KBWRlbHRhGAIgASgJIjoKE0luY29tcGxldGVJbmRpY2F0b3ISDgoGcmVhc29uGAEgASgJEhMKC3Jlc3BvbnNlX2lkGAIgASgJIk0KDVN0cmVhbVBhcnRFbmQSEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52Mi5NZXNzYWdlUGF5bG9hZCItChJTdHJlYW1GaW5hbGl6YXRpb24SFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIiQKC1N0cmVhbUVycm9yEhUKDWVycm9yX21lc3NhZ2UYASABKAkioQIKJkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0EhIKCnByb2plY3RfaWQYASABKAkSHAoPY29udmVyc2F0aW9uX2lkGAIgASgJSACIAQESEgoKbW9kZWxfc2x1ZxgDIAEoCRIUCgx1c2VyX21lc3NhZ2UYBCABKAkSHwoSdXNlcl9zZWxlY3RlZF90ZXh0GAUgASgJSAGIAQESOQoRY29udmVyc2F0aW9uX3R5cGUYBiABKA4yGS5jaGF0LnYyLkNvbnZlcnNhdGlvblR5cGVIAogBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlIr8DCidDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2USPgoVc3RyZWFtX2luaXRpYWxpemF0aW9uGAEgASgLMh0uY2hhdC52Mi5TdHJlYW1Jbml0aWFsaXphdGlvbkgAEjUKEXN0cmVhbV9wYXJ0X2JlZ2luGAIgASgLMhguY2hhdC52Mi5TdHJlYW1QYXJ0QmVnaW5IABIuCg1tZXNzYWdlX2NodW5rGAMgASgLMhUuY2hhdC52Mi5NZXNzYWdlQ2h1bmtIABI8ChRpbmNvbXBsZXRlX2luZGljYXRvchgEIAEoCzIcLmNoYXQudjIuSW5jb21wbGV0ZUluZGljYXRvckgAEjEKD3N0cmVhbV9wYXJ0X2VuZBgFIAEoCzIWLmNoYXQudjIuU3RyZWFtUGFydEVuZEgAEjoKE3N0cmVhbV9maW5hbGl6YXRpb24YBiABKAsyGy5jaGF0LnYyLlN0cmVhbUZpbmFsaXphdGlvbkgAEiwKDHN0cmVhbV9lcnJvchgHIAEoCzIULmNoYXQudjIuU3RyZWFtRXJyb3JIAEISChByZXNwb25zZV9wYXlsb2FkKlIKEENvbnZlcnNhdGlvblR5cGUSIQodQ09OVkVSU0FUSU9OX1RZUEVfVU5TUEVDSUZJRUQQABIbChdDT05WRVJTQVRJT05fVFlQRV9ERUJVRxABMqgHCgtDaGF0U2VydmljZRKDAQoRTGlzdENvbnZlcnNhdGlvbnMSIS5jaGF0LnYyLkxpc3RDb252ZXJzYXRpb25zUmVxdWVzdBoiLmNoYXQudjIuTGlzdENvbnZlcnNhdGlvbnNSZXNwb25zZSIngtPkkwIhEh8vX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zEo8BCg9HZXRDb252ZXJzYXRpb24SHy5jaGF0LnYyLkdldENvbnZlcnNhdGlvblJlcXVlc3QaIC5jaGF0LnYyLkdldENvbnZlcnNhdGlvblJlc3BvbnNlIjmC0+STAjMSMS9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SwgEKH0NyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW0SLy5jaGF0LnYyLkNyZWF0ZUNvbnZlcnNhdGlvbk1lc3NhZ2VTdHJlYW1SZXF1ZXN0GjAuY2hhdC52Mi5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVzcG9uc2UiOoLT5JMCNDoBKiIvL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy9tZXNzYWdlcy9zdHJlYW0wARKbAQoSVXBkYXRlQ29udmVyc2F0aW9uEiIuY2hhdC52Mi5VcGRhdGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52Mi5VcGRhdGVDb252ZXJzYXRpb25SZXNwb25zZSI8gtPkkwI2OgEqMjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EpgBChJEZWxldGVDb252ZXJzYXRpb24SIi5jaGF0LnYyLkRlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QaIy5jaGF0LnYyLkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIjmC0+STAjMqMS9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMve2NvbnZlcnNhdGlvbl9pZH0SggEKE0xpc3RTdXBwb3J0ZWRNb2RlbHMSIy5jaGF0LnYyLkxpc3RTdXBwb3J0ZWRNb2RlbHNSZXF1ZXN0GiQuY2hhdC52Mi5MaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2UiIILT5JMCGhIYL19wZC9hcGkvdjIvY2hhdHMvbW9kZWxzQn8KC2NvbS5jaGF0LnYyQglDaGF0UHJvdG9QAVoocGFwZXJkZWJ1Z2dlci9wa2cvZ2VuL2FwaS9jaGF0L3YyO2NoYXR2MqICA0NYWKoCB0NoYXQuVjLKAgdDaGF0XFYy4gITQ2hhdFxWMlxHUEJNZXRhZGF0YeoCCENoYXQ6OlYyYgZwcm90bzM", [file_google_api_annotations]); + fileDesc("ChJjaGF0L3YyL2NoYXQucHJvdG8SB2NoYXQudjIiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIjsKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJQCg9NZXNzYWdlVHlwZVVzZXISDwoHY29udGVudBgBIAEoCRIaCg1zZWxlY3RlZF90ZXh0GAIgASgJSACIAQFCEAoOX3NlbGVjdGVkX3RleHQiKQoSTWVzc2FnZVR5cGVVbmtub3duEhMKC2Rlc2NyaXB0aW9uGAEgASgJIuQCCg5NZXNzYWdlUGF5bG9hZBIsCgZzeXN0ZW0YASABKAsyGi5jaGF0LnYyLk1lc3NhZ2VUeXBlU3lzdGVtSAASKAoEdXNlchgCIAEoCzIYLmNoYXQudjIuTWVzc2FnZVR5cGVVc2VySAASMgoJYXNzaXN0YW50GAMgASgLMh0uY2hhdC52Mi5NZXNzYWdlVHlwZUFzc2lzdGFudEgAElMKG3Rvb2xfY2FsbF9wcmVwYXJlX2FyZ3VtZW50cxgEIAEoCzIsLmNoYXQudjIuTWVzc2FnZVR5cGVUb29sQ2FsbFByZXBhcmVBcmd1bWVudHNIABIxCgl0b29sX2NhbGwYBSABKAsyHC5jaGF0LnYyLk1lc3NhZ2VUeXBlVG9vbENhbGxIABIuCgd1bmtub3duGAYgASgLMhsuY2hhdC52Mi5NZXNzYWdlVHlwZVVua25vd25IAEIOCgxtZXNzYWdlX3R5cGUiWgoHTWVzc2FnZRISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAiABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkEhEKCXRpbWVzdGFtcBgDIAEoAyJhCgxDb252ZXJzYXRpb24SCgoCaWQYASABKAkSDQoFdGl0bGUYAiABKAkSEgoKbW9kZWxfc2x1ZxgDIAEoCRIiCghtZXNzYWdlcxgEIAMoCzIQLmNoYXQudjIuTWVzc2FnZSJCChhMaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QSFwoKcHJvamVjdF9pZBgBIAEoCUgAiAEBQg0KC19wcm9qZWN0X2lkIkkKGUxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2USLAoNY29udmVyc2F0aW9ucxgBIAMoCzIVLmNoYXQudjIuQ29udmVyc2F0aW9uIjEKFkdldENvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIkYKF0dldENvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjIuQ29udmVyc2F0aW9uIkMKGVVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEg0KBXRpdGxlGAIgASgJIkkKGlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjIuQ29udmVyc2F0aW9uIjQKGURlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIhwKGkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIiwKDlN1cHBvcnRlZE1vZGVsEgwKBG5hbWUYASABKAkSDAoEc2x1ZxgCIAEoCSIcChpMaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdCJGChtMaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2USJwoGbW9kZWxzGAEgAygLMhcuY2hhdC52Mi5TdXBwb3J0ZWRNb2RlbCJDChRTdHJlYW1Jbml0aWFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJPCg9TdHJlYW1QYXJ0QmVnaW4SEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52Mi5NZXNzYWdlUGF5bG9hZCIxCgxNZXNzYWdlQ2h1bmsSEgoKbWVzc2FnZV9pZBgBIAEoCRINCgVkZWx0YRgCIAEoCSI6ChNJbmNvbXBsZXRlSW5kaWNhdG9yEg4KBnJlYXNvbhgBIAEoCRITCgtyZXNwb25zZV9pZBgCIAEoCSJNCg1TdHJlYW1QYXJ0RW5kEhIKCm1lc3NhZ2VfaWQYASABKAkSKAoHcGF5bG9hZBgDIAEoCzIXLmNoYXQudjIuTWVzc2FnZVBheWxvYWQiLQoSU3RyZWFtRmluYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSIkCgtTdHJlYW1FcnJvchIVCg1lcnJvcl9tZXNzYWdlGAEgASgJIqECCiZDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgAiAEBEhIKCm1vZGVsX3NsdWcYAyABKAkSFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgBiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52Mi5Db252ZXJzYXRpb25UeXBlSAKIAQFCEgoQX2NvbnZlcnNhdGlvbl9pZEIVChNfdXNlcl9zZWxlY3RlZF90ZXh0QhQKEl9jb252ZXJzYXRpb25fdHlwZSK/AwonQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlEj4KFXN0cmVhbV9pbml0aWFsaXphdGlvbhgBIAEoCzIdLmNoYXQudjIuU3RyZWFtSW5pdGlhbGl6YXRpb25IABI1ChFzdHJlYW1fcGFydF9iZWdpbhgCIAEoCzIYLmNoYXQudjIuU3RyZWFtUGFydEJlZ2luSAASLgoNbWVzc2FnZV9jaHVuaxgDIAEoCzIVLmNoYXQudjIuTWVzc2FnZUNodW5rSAASPAoUaW5jb21wbGV0ZV9pbmRpY2F0b3IYBCABKAsyHC5jaGF0LnYyLkluY29tcGxldGVJbmRpY2F0b3JIABIxCg9zdHJlYW1fcGFydF9lbmQYBSABKAsyFi5jaGF0LnYyLlN0cmVhbVBhcnRFbmRIABI6ChNzdHJlYW1fZmluYWxpemF0aW9uGAYgASgLMhsuY2hhdC52Mi5TdHJlYW1GaW5hbGl6YXRpb25IABIsCgxzdHJlYW1fZXJyb3IYByABKAsyFC5jaGF0LnYyLlN0cmVhbUVycm9ySABCEgoQcmVzcG9uc2VfcGF5bG9hZCpSChBDb252ZXJzYXRpb25UeXBlEiEKHUNPTlZFUlNBVElPTl9UWVBFX1VOU1BFQ0lGSUVEEAASGwoXQ09OVkVSU0FUSU9OX1RZUEVfREVCVUcQATKoBwoLQ2hhdFNlcnZpY2USgwEKEUxpc3RDb252ZXJzYXRpb25zEiEuY2hhdC52Mi5MaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QaIi5jaGF0LnYyLkxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2UiJ4LT5JMCIRIfL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucxKPAQoPR2V0Q29udmVyc2F0aW9uEh8uY2hhdC52Mi5HZXRDb252ZXJzYXRpb25SZXF1ZXN0GiAuY2hhdC52Mi5HZXRDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzEjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52Mi5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjIuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52Mi5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjIuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YyL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MkIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MjtjaGF0djKiAgNDWFiqAgdDaGF0LlYyygIHQ2hhdFxWMuICE0NoYXRcVjJcR1BCTWV0YWRhdGHqAghDaGF0OjpWMmIGcHJvdG8z", [file_google_api_annotations]); /** * @generated from message chat.v2.MessageTypeToolCall @@ -216,9 +216,14 @@ export type Message = Message$1<"chat.v2.Message"> & { messageId: string; /** - * @generated from field: chat.v2.MessagePayload payload = 3; + * @generated from field: chat.v2.MessagePayload payload = 2; */ payload?: MessagePayload; + + /** + * @generated from field: int64 timestamp = 3; + */ + timestamp: bigint; }; /** From 0fb970a6e54c490806cd3e6d682fb277bbd92379 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 23:19:14 +0800 Subject: [PATCH 08/28] web: remove v1, introduce v2 --- webapp/_webapp/src/hooks/useLanguageModels.ts | 2 +- .../_webapp/src/hooks/useSendMessageStream.ts | 6 ++--- webapp/_webapp/src/query/api.ts | 17 ++++---------- webapp/_webapp/src/query/index.ts | 13 ++--------- .../stores/conversation/conversation-store.ts | 2 +- .../stores/conversation/handlers/converter.ts | 2 +- .../handlers/handleIncompleteIndicator.ts | 2 +- .../handlers/handleMessageChunk.ts | 2 +- .../handlers/handleStreamError.ts | 2 +- .../handlers/handleStreamFinalization.ts | 2 +- .../handlers/handleStreamInitialization.ts | 2 +- .../handlers/handleStreamPartBegin.ts | 2 +- .../handlers/handleStreamPartEnd.ts | 2 +- .../_webapp/src/stores/conversation/types.ts | 2 +- .../src/stores/streaming-message-store.ts | 2 +- webapp/_webapp/src/views/chat/body/index.tsx | 2 +- .../src/views/chat/body/status-indicator.tsx | 2 +- .../views/chat/header/chat-history-modal.tsx | 2 +- .../_webapp/src/views/chat/header/index.tsx | 2 +- webapp/_webapp/src/views/chat/helper.ts | 2 +- webapp/_webapp/src/views/devtools/index.tsx | 23 ++++++++++--------- 21 files changed, 37 insertions(+), 56 deletions(-) diff --git a/webapp/_webapp/src/hooks/useLanguageModels.ts b/webapp/_webapp/src/hooks/useLanguageModels.ts index 918d9010..c0f2b98a 100644 --- a/webapp/_webapp/src/hooks/useLanguageModels.ts +++ b/webapp/_webapp/src/hooks/useLanguageModels.ts @@ -1,5 +1,5 @@ import { useCallback, useMemo } from "react"; -import { SupportedModel } from "../pkg/gen/apiclient/chat/v1/chat_pb"; +import { SupportedModel } from "../pkg/gen/apiclient/chat/v2/chat_pb"; import { useConversationStore } from "../stores/conversation/conversation-store"; import { useListSupportedModelsQuery } from "../query"; diff --git a/webapp/_webapp/src/hooks/useSendMessageStream.ts b/webapp/_webapp/src/hooks/useSendMessageStream.ts index a1329535..9fd65c7c 100644 --- a/webapp/_webapp/src/hooks/useSendMessageStream.ts +++ b/webapp/_webapp/src/hooks/useSendMessageStream.ts @@ -3,9 +3,8 @@ import { ConversationType, CreateConversationMessageStreamRequest, IncompleteIndicator, - LanguageModel, StreamFinalization, -} from "../pkg/gen/apiclient/chat/v1/chat_pb"; +} from "../pkg/gen/apiclient/chat/v2/chat_pb"; import { PlainMessage } from "../query/types"; import { useStreamingMessageStore } from "../stores/streaming-message-store"; import { getProjectId } from "../libs/helpers"; @@ -24,7 +23,7 @@ import { StreamInitialization, StreamPartBegin, StreamPartEnd, -} from "../pkg/gen/apiclient/chat/v1/chat_pb"; +} from "../pkg/gen/apiclient/chat/v2/chat_pb"; import { MessageEntry, MessageEntryStatus } from "../stores/conversation/types"; import { fromJson } from "@bufbuild/protobuf"; import { useConversationStore } from "../stores/conversation/conversation-store"; @@ -74,7 +73,6 @@ export function useSendMessageStream() { const request: PlainMessage = { projectId: getProjectId(), conversationId: currentConversation.id, - languageModel: LanguageModel.UNSPECIFIED, // backward compatibility modelSlug: currentConversation.modelSlug, userMessage: message, userSelectedText: selectedText, diff --git a/webapp/_webapp/src/query/api.ts b/webapp/_webapp/src/query/api.ts index 55ed5aa2..143aa2b8 100644 --- a/webapp/_webapp/src/query/api.ts +++ b/webapp/_webapp/src/query/api.ts @@ -1,4 +1,4 @@ -import apiclient, { RequestOptions } from "../libs/apiclient"; +import apiclient from "../libs/apiclient"; import { LoginByGoogleRequest, LoginByGoogleResponseSchema, @@ -10,8 +10,7 @@ import { RefreshTokenResponseSchema, } from "../pkg/gen/apiclient/auth/v1/auth_pb"; import { - CreateConversationMessageRequest, - CreateConversationMessageResponseSchema, + CreateConversationMessageStreamRequest, CreateConversationMessageStreamResponse, CreateConversationMessageStreamResponseSchema, DeleteConversationRequest, @@ -24,7 +23,7 @@ import { ListSupportedModelsResponseSchema, UpdateConversationRequest, UpdateConversationResponseSchema, -} from "../pkg/gen/apiclient/chat/v1/chat_pb"; +} from "../pkg/gen/apiclient/chat/v2/chat_pb"; import { GetProjectRequest, GetProjectResponseSchema, @@ -130,16 +129,8 @@ export const getConversation = async (data: PlainMessage return fromJson(GetConversationResponseSchema, response); }; -export const createConversationMessage = async ( - data: PlainMessage, - options?: RequestOptions, -) => { - const response = await apiclient.post(`/chats/conversations/messages`, data, options); - return fromJson(CreateConversationMessageResponseSchema, response); -}; - export const createConversationMessageStream = async ( - data: PlainMessage, + data: PlainMessage, onMessage: (chunk: CreateConversationMessageStreamResponse) => void, ) => { const stream = await apiclient.postStream(`/chats/conversations/messages/stream`, data); diff --git a/webapp/_webapp/src/query/index.ts b/webapp/_webapp/src/query/index.ts index ca60f155..d4e8b729 100644 --- a/webapp/_webapp/src/query/index.ts +++ b/webapp/_webapp/src/query/index.ts @@ -1,15 +1,13 @@ import { useMutation, useQuery } from "@tanstack/react-query"; import { - CreateConversationMessageResponse, DeleteConversationResponse, GetConversationResponse, ListConversationsResponse, ListSupportedModelsResponse, UpdateConversationResponse, -} from "../pkg/gen/apiclient/chat/v1/chat_pb"; +} from "../pkg/gen/apiclient/chat/v2/chat_pb"; import { UseMutationOptionsOverride, UseQueryOptionsOverride } from "./types"; import { - createConversationMessage, createPrompt, deleteConversation, deletePrompt, @@ -138,14 +136,7 @@ export const useGetConversationQuery = ( }); }; -export const useCreateConversationMessageMutation = ( - opts?: UseMutationOptionsOverride, -) => { - return useMutation({ - mutationFn: createConversationMessage, - ...opts, - }); -}; +// Removed: useCreateConversationMessageMutation - use streaming API instead export const useUpdateConversationMutation = (opts?: UseMutationOptionsOverride) => { return useMutation({ diff --git a/webapp/_webapp/src/stores/conversation/conversation-store.ts b/webapp/_webapp/src/stores/conversation/conversation-store.ts index 5e213448..fd80db8e 100644 --- a/webapp/_webapp/src/stores/conversation/conversation-store.ts +++ b/webapp/_webapp/src/stores/conversation/conversation-store.ts @@ -1,5 +1,5 @@ import { create } from "zustand"; -import { Conversation, ConversationSchema } from "../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Conversation, ConversationSchema } from "../../pkg/gen/apiclient/chat/v2/chat_pb"; import { fromJson } from "@bufbuild/protobuf"; interface ConversationStore { diff --git a/webapp/_webapp/src/stores/conversation/handlers/converter.ts b/webapp/_webapp/src/stores/conversation/handlers/converter.ts index 6d289445..1994c573 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/converter.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/converter.ts @@ -1,5 +1,5 @@ import { fromJson } from "@bufbuild/protobuf"; -import { Conversation, Message, MessageSchema } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Conversation, Message, MessageSchema } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { MessageEntry, MessageEntryStatus } from "../types"; import { useStreamingMessageStore } from "../../streaming-message-store"; import { flushSync } from "react-dom"; diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts b/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts index 83998dde..57513d9f 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts @@ -1,4 +1,4 @@ -import { IncompleteIndicator } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { IncompleteIndicator } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { useStreamingMessageStore } from "../../streaming-message-store"; export function handleIncompleteIndicator(incompleteIndicator: IncompleteIndicator) { diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts b/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts index 470f38ec..020cfb13 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts @@ -1,5 +1,5 @@ import { logError } from "../../../libs/logger"; -import { MessageChunk, MessageTypeAssistant } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { MessageChunk, MessageTypeAssistant } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { StreamingMessage } from "../../streaming-message-store"; import { MessageEntry, MessageEntryStatus } from "../types"; diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts index bd02109a..6bc4bc32 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts @@ -1,4 +1,4 @@ -import { MessageTypeAssistantSchema, StreamError } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { MessageTypeAssistantSchema, StreamError } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { errorToast } from "../../../libs/toasts"; import { OverleafAuthentication, OverleafVersionedDoc } from "../../../libs/overleaf-socket"; import { getProjectId } from "../../../libs/helpers"; diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts index 630ab4ee..be08d272 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts @@ -1,4 +1,4 @@ -import { StreamFinalization } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { StreamFinalization } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { flushStreamingMessageToConversation } from "./converter"; export function handleStreamFinalization(_finalization: StreamFinalization) { diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts index 1970eac7..c6b84eff 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts @@ -1,4 +1,4 @@ -import { StreamInitialization } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { StreamInitialization } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { useStreamingMessageStore } from "../../streaming-message-store"; import { MessageEntryStatus } from "../types"; import { logWarn } from "../../../libs/logger"; diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts index e7d457a9..7806b276 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts @@ -1,4 +1,4 @@ -import { StreamPartBegin } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { StreamPartBegin } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { StreamingMessage } from "../../streaming-message-store"; import { MessageEntry, MessageEntryStatus } from "../types"; import { logError } from "../../../libs/logger"; diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts index 7e845f21..46e6bb10 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts @@ -3,7 +3,7 @@ import { MessageTypeToolCall, MessageTypeToolCallPrepareArguments, StreamPartEnd, -} from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +} from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { StreamingMessage } from "../../streaming-message-store"; import { logError } from "../../../libs/logger"; import { MessageEntryStatus } from "../types"; diff --git a/webapp/_webapp/src/stores/conversation/types.ts b/webapp/_webapp/src/stores/conversation/types.ts index 2a619611..273f291f 100644 --- a/webapp/_webapp/src/stores/conversation/types.ts +++ b/webapp/_webapp/src/stores/conversation/types.ts @@ -4,7 +4,7 @@ import { MessageTypeToolCallPrepareArguments, MessageTypeUnknown, MessageTypeUser, -} from "../../pkg/gen/apiclient/chat/v1/chat_pb"; +} from "../../pkg/gen/apiclient/chat/v2/chat_pb"; export enum MessageEntryStatus { PREPARING = "PREPARING", diff --git a/webapp/_webapp/src/stores/streaming-message-store.ts b/webapp/_webapp/src/stores/streaming-message-store.ts index 3518f55d..a7c12f08 100644 --- a/webapp/_webapp/src/stores/streaming-message-store.ts +++ b/webapp/_webapp/src/stores/streaming-message-store.ts @@ -3,7 +3,7 @@ import { create } from "zustand"; import { MessageEntry } from "./conversation/types"; import { flushSync } from "react-dom"; -import { IncompleteIndicator } from "../pkg/gen/apiclient/chat/v1/chat_pb"; +import { IncompleteIndicator } from "../pkg/gen/apiclient/chat/v2/chat_pb"; import { SetterResetterStore } from "./types"; export type StreamingMessage = { diff --git a/webapp/_webapp/src/views/chat/body/index.tsx b/webapp/_webapp/src/views/chat/body/index.tsx index 85dcbb69..ecc492d6 100644 --- a/webapp/_webapp/src/views/chat/body/index.tsx +++ b/webapp/_webapp/src/views/chat/body/index.tsx @@ -1,6 +1,6 @@ import { useEffect, useRef, useState } from "react"; import { MessageCard } from "../../../components/message-card"; -import { Conversation } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Conversation } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { filterVisibleMessages, getPrevUserMessage, isEmptyConversation, messageToMessageEntry } from "../helper"; import { StatusIndicator } from "./status-indicator"; import { EmptyView } from "./empty-view"; diff --git a/webapp/_webapp/src/views/chat/body/status-indicator.tsx b/webapp/_webapp/src/views/chat/body/status-indicator.tsx index 92b9c740..046f0049 100644 --- a/webapp/_webapp/src/views/chat/body/status-indicator.tsx +++ b/webapp/_webapp/src/views/chat/body/status-indicator.tsx @@ -1,6 +1,6 @@ import { LoadingIndicator } from "../../../components/loading-indicator"; import { UnknownEntryMessageContainer } from "../../../components/message-entry-container/unknown-entry"; -import { Conversation } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Conversation } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { MessageEntryStatus } from "../../../stores/conversation/types"; import { useSocketStore } from "../../../stores/socket-store"; import { useStreamingMessageStore } from "../../../stores/streaming-message-store"; diff --git a/webapp/_webapp/src/views/chat/header/chat-history-modal.tsx b/webapp/_webapp/src/views/chat/header/chat-history-modal.tsx index 75c46b77..1bd6659a 100644 --- a/webapp/_webapp/src/views/chat/header/chat-history-modal.tsx +++ b/webapp/_webapp/src/views/chat/header/chat-history-modal.tsx @@ -1,7 +1,7 @@ import { Input, Listbox, ListboxItem, ListboxSection, Tooltip } from "@heroui/react"; import { Icon } from "@iconify/react"; import { useEffect, useRef, useState } from "react"; -import { Conversation } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Conversation } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { getConversation, updateConversation } from "../../../query/api"; import { errorToast } from "../../../libs/toasts"; import { useDeleteConversationMutation, useListConversationsQuery } from "../../../query"; diff --git a/webapp/_webapp/src/views/chat/header/index.tsx b/webapp/_webapp/src/views/chat/header/index.tsx index 6efa3297..d3cc907f 100644 --- a/webapp/_webapp/src/views/chat/header/index.tsx +++ b/webapp/_webapp/src/views/chat/header/index.tsx @@ -5,7 +5,7 @@ import { useConversationStore } from "../../../stores/conversation/conversation- import { flushSync } from "react-dom"; import { useStreamingMessageStore } from "../../../stores/streaming-message-store"; import { useConversationUiStore } from "../../../stores/conversation/conversation-ui-store"; -import { Message } from "../../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { Message } from "../../../pkg/gen/apiclient/chat/v2/chat_pb"; import { ChatHistoryModal } from "./chat-history-modal"; export const NewConversation = () => { diff --git a/webapp/_webapp/src/views/chat/helper.ts b/webapp/_webapp/src/views/chat/helper.ts index 118876c7..0bfd32ac 100644 --- a/webapp/_webapp/src/views/chat/helper.ts +++ b/webapp/_webapp/src/views/chat/helper.ts @@ -6,7 +6,7 @@ import { MessageTypeToolCallPrepareArguments, MessageTypeUnknown, MessageTypeUser, -} from "../../pkg/gen/apiclient/chat/v1/chat_pb"; +} from "../../pkg/gen/apiclient/chat/v2/chat_pb"; import { useConversationStore } from "../../stores/conversation/conversation-store"; import { MessageEntry, MessageEntryStatus } from "../../stores/conversation/types"; import { useStreamingMessageStore } from "../../stores/streaming-message-store"; diff --git a/webapp/_webapp/src/views/devtools/index.tsx b/webapp/_webapp/src/views/devtools/index.tsx index 40a267a4..6ee477f4 100644 --- a/webapp/_webapp/src/views/devtools/index.tsx +++ b/webapp/_webapp/src/views/devtools/index.tsx @@ -5,7 +5,7 @@ import { useStreamingMessageStore } from "../../stores/streaming-message-store"; import { MessageEntry, MessageEntryStatus } from "../../stores/conversation/types"; import { useConversationStore } from "../../stores/conversation/conversation-store"; import { fromJson } from "@bufbuild/protobuf"; -import { MessageSchema } from "../../pkg/gen/apiclient/chat/v1/chat_pb"; +import { MessageSchema } from "../../pkg/gen/apiclient/chat/v2/chat_pb"; import { isEmptyConversation } from "../chat/helper"; import { useState } from "react"; @@ -117,7 +117,7 @@ export const DevTools = () => { user: { content: "User Message Preparing", selectedText: selectedText ?? "", - $typeName: "chat.v1.MessageTypeUser", + $typeName: "chat.v2.MessageTypeUser", }, }; setStreamingMessage({ ...streamingMessage, parts: [...streamingMessage.parts, messageEntry] }); @@ -126,7 +126,7 @@ export const DevTools = () => { part.messageId === messageEntry.messageId ? { ...part, - user: { ...part.user, content: "User Message Prepared", $typeName: "chat.v1.MessageTypeUser" }, + user: { ...part.user, content: "User Message Prepared", $typeName: "chat.v2.MessageTypeUser" }, status: part.status === MessageEntryStatus.PREPARING ? MessageEntryStatus.FINALIZED : part.status, } : part, @@ -141,7 +141,7 @@ export const DevTools = () => { toolCallPrepareArguments: { name: "paper_score", args: JSON.stringify({ paper_id: "123" }), - $typeName: "chat.v1.MessageTypeToolCallPrepareArguments", + $typeName: "chat.v2.MessageTypeToolCallPrepareArguments", }, }; updateStreamingMessage((prev) => ({ ...prev, parts: [...prev.parts, messageEntry] })); @@ -154,7 +154,7 @@ export const DevTools = () => { toolCallPrepareArguments: { name: "paper_score", args: JSON.stringify({ paper_id: "123" }), - $typeName: "chat.v1.MessageTypeToolCallPrepareArguments", + $typeName: "chat.v2.MessageTypeToolCallPrepareArguments", }, } : part, @@ -173,14 +173,14 @@ export const DevTools = () => { args: JSON.stringify({ name: "Junyi" }), result: "preparing", error: "", - $typeName: "chat.v1.MessageTypeToolCall", + $typeName: "chat.v2.MessageTypeToolCall", } : { name: "paper_score", args: JSON.stringify({ paper_id: "123" }), result: '{ "percentile": 0.74829 }123', error: "", - $typeName: "chat.v1.MessageTypeToolCall", + $typeName: "chat.v2.MessageTypeToolCall", }, }; updateStreamingMessage((prev) => ({ ...prev, parts: [...prev.parts, messageEntry] })); @@ -191,8 +191,8 @@ export const DevTools = () => { ...part, status: part.status === MessageEntryStatus.PREPARING ? MessageEntryStatus.FINALIZED : part.status, toolCall: isGreeting - ? { ...part.toolCall, result: "Hello, Junyi!", $typeName: "chat.v1.MessageTypeToolCall" } - : { ...part.toolCall, $typeName: "chat.v1.MessageTypeToolCall" }, + ? { ...part.toolCall, result: "Hello, Junyi!", $typeName: "chat.v2.MessageTypeToolCall" } + : { ...part.toolCall, $typeName: "chat.v2.MessageTypeToolCall" }, } : part, ) as MessageEntry[]; @@ -203,7 +203,7 @@ export const DevTools = () => { const messageEntry: MessageEntry = { messageId: randomUUID(), status: MessageEntryStatus.PREPARING, - assistant: { content: "Assistant Response Preparing " + randomText(), $typeName: "chat.v1.MessageTypeAssistant" }, + assistant: { content: "Assistant Response Preparing " + randomText(), modelSlug: "gpt-4.1", $typeName: "chat.v2.MessageTypeAssistant" }, }; updateStreamingMessage((prev) => ({ ...prev, parts: [...prev.parts, messageEntry] })); withDelay(() => { @@ -215,7 +215,8 @@ export const DevTools = () => { assistant: { ...part.assistant, content: "Assistant Response Finalized " + randomText(), - $typeName: "chat.v1.MessageTypeAssistant", + modelSlug: "gpt-4.1", + $typeName: "chat.v2.MessageTypeAssistant", }, } : part, From cc60ae6e758e60e539a277737cb72e455a84bcb8 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Thu, 18 Dec 2025 23:30:05 +0800 Subject: [PATCH 09/28] lint + format --- webapp/_webapp/src/background.ts | 2 ++ .../tools/paper-score-comment/index.tsx | 3 +-- .../message-entry-container/tools/utils/common.tsx | 4 +++- webapp/_webapp/src/intermediate.ts | 3 ++- webapp/_webapp/src/libs/apiclient.ts | 3 +-- webapp/_webapp/src/libs/google-analytics.ts | 10 ++++------ webapp/_webapp/src/libs/permissions.ts | 4 ++++ webapp/_webapp/src/main.tsx | 14 +++++++++----- webapp/_webapp/src/stores/selection-store.ts | 5 +++-- webapp/_webapp/src/views/devtools/index.tsx | 6 +++++- .../HostPermissionWidget/useHostPermissionStore.ts | 3 ++- .../_webapp/src/views/login/login-with-apple.tsx | 3 +-- .../_webapp/src/views/login/login-with-google.tsx | 11 ++++++++++- .../src/views/login/login-with-overleaf.tsx | 11 ++++++++++- .../src/views/settings/setting-text-input.tsx | 6 +++--- 15 files changed, 60 insertions(+), 28 deletions(-) diff --git a/webapp/_webapp/src/background.ts b/webapp/_webapp/src/background.ts index 74847df2..959a456f 100644 --- a/webapp/_webapp/src/background.ts +++ b/webapp/_webapp/src/background.ts @@ -83,11 +83,13 @@ const registerContentScriptsIfPermitted = async () => { try { const { origins = [] } = await chrome.permissions.getAll(); if (!origins.length) { + // eslint-disable-next-line no-console console.log("[PaperDebugger] No origins found, skipping content script registration"); return; } await registerContentScripts(origins); } catch (error) { + // eslint-disable-next-line no-console console.error("[PaperDebugger] Unable to register content scripts", error); } }; diff --git a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx b/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx index 6c97e37d..473ad0fb 100644 --- a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx +++ b/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx @@ -49,8 +49,7 @@ export const PaperScoreCommentCard = ({ messageId, message, preparing, animated ); setSelectedComments(new Set(allCommentIds)); } - } catch (error) { - // eslint-disable-line @typescript-eslint/no-unused-vars + } catch { // Ignore parsing errors here, they'll be handled in the render } } diff --git a/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx b/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx index 92e09f2c..58cdc8ea 100644 --- a/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx +++ b/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx @@ -22,6 +22,7 @@ export const UNKNOWN_JSONRPC_RESULT: JsonRpcResult = { }, }; +// eslint-disable-next-line @typescript-eslint/no-explicit-any const isValidJsonRpcResult = (obj: any): obj is JsonRpcResult => { // Check if obj is an object and not null if (typeof obj !== "object" || obj === null) { @@ -85,7 +86,8 @@ export const parseJsonRpcResult = (message: string): JsonRpcResult | undefined = } return undefined; - } catch (error) { + } catch { + // Error parsing JSONRPC result return undefined; } }; diff --git a/webapp/_webapp/src/intermediate.ts b/webapp/_webapp/src/intermediate.ts index 70d2b617..c9becc73 100644 --- a/webapp/_webapp/src/intermediate.ts +++ b/webapp/_webapp/src/intermediate.ts @@ -90,7 +90,8 @@ function makeFunction(handlerName: string, opts?: MakeFunctionOpts): (args let getCookies: (domain: string) => Promise<{ session: string; gclb: string }>; if (import.meta.env.DEV) { - getCookies = async (_: string) => { + // eslint-disable-next-line @typescript-eslint/no-unused-vars + getCookies = async (_domain: string) => { return { session: localStorage.getItem("pd.auth.overleafSession") ?? "", gclb: localStorage.getItem("pd.auth.gclb") ?? "", diff --git a/webapp/_webapp/src/libs/apiclient.ts b/webapp/_webapp/src/libs/apiclient.ts index f02e932e..bfbb0818 100644 --- a/webapp/_webapp/src/libs/apiclient.ts +++ b/webapp/_webapp/src/libs/apiclient.ts @@ -29,7 +29,6 @@ class ApiClient { updateBaseURL(baseURL: string): void { this.axiosInstance.defaults.baseURL = baseURL; localStorage.setItem(LOCAL_STORAGE_KEY, baseURL); - console.log("apiclient baseURL updated to", baseURL); } addListener(event: "tokenRefreshed", listener: (args: { token: string; refreshToken: string }) => void): void { @@ -194,7 +193,7 @@ const LOCAL_STORAGE_KEY = "pd.devtool.endpoint"; export const getEndpointFromLocalStorage = () => { try { return localStorage.getItem(LOCAL_STORAGE_KEY) || DEFAULT_ENDPOINT; - } catch (error) { + } catch { // Fallback if localStorage is not available (e.g., in SSR) return DEFAULT_ENDPOINT; } diff --git a/webapp/_webapp/src/libs/google-analytics.ts b/webapp/_webapp/src/libs/google-analytics.ts index 9abbdb47..2085f8c2 100644 --- a/webapp/_webapp/src/libs/google-analytics.ts +++ b/webapp/_webapp/src/libs/google-analytics.ts @@ -53,17 +53,15 @@ class Analytics { ], }), }, - ).catch((_) => { - // eslint-disable-line @typescript-eslint/no-unused-vars - // logInfo("Google Analytics request failed with an exception", e); + ).catch(() => { + // logInfo("Google Analytics request failed with an exception"); }); if (!this.debug) { return; } - } catch (e) { - // eslint-disable-line @typescript-eslint/no-unused-vars - // logInfo("Google Analytics request failed with an exception", e); + } catch { + // logInfo("Google Analytics request failed with an exception"); } } diff --git a/webapp/_webapp/src/libs/permissions.ts b/webapp/_webapp/src/libs/permissions.ts index fc2d5eeb..5eeea3a4 100644 --- a/webapp/_webapp/src/libs/permissions.ts +++ b/webapp/_webapp/src/libs/permissions.ts @@ -3,12 +3,14 @@ export async function registerContentScripts(origins?: string[]) { try { const resolvedOrigins = origins ?? (await chrome.permissions.getAll()).origins ?? []; if (resolvedOrigins.length === 0) { + // eslint-disable-next-line no-console console.log("[PaperDebugger] No origins found, skipping content script registration"); return; } const scriptIds = (await chrome.scripting.getRegisteredContentScripts()).map((script) => script.id); if (scriptIds.length > 0) { + // eslint-disable-next-line no-console console.log("[PaperDebugger] Unregistering dynamic content scripts", scriptIds); await chrome.scripting.unregisterContentScripts({ ids: scriptIds }); } @@ -30,8 +32,10 @@ export async function registerContentScripts(origins?: string[]) { }, ]); + // eslint-disable-next-line no-console console.log("[PaperDebugger] Registration complete", resolvedOrigins); } catch (error) { + // eslint-disable-next-line no-console console.error("[PaperDebugger] Failed to register content scripts", error); } } diff --git a/webapp/_webapp/src/main.tsx b/webapp/_webapp/src/main.tsx index bdec8369..1e8a0586 100644 --- a/webapp/_webapp/src/main.tsx +++ b/webapp/_webapp/src/main.tsx @@ -74,12 +74,10 @@ export const Main = () => { if (disableLineWrap) { onElementAppeared(".cm-lineWrapping", (editor) => { editor.classList.remove("cm-lineWrapping"); - console.log("disable line wrap"); }); } else { onElementAppeared(".cm-content", (editor) => { editor.classList.add("cm-lineWrapping"); - console.log("enable line wrap"); }); } }, [disableLineWrap]); @@ -111,7 +109,15 @@ export const Main = () => { setSelectionRange(lastSelectionRange); setIsOpen(true); clearOverleafSelection(); - }, [setSelectedText, setSelectionRange, setIsOpen, lastSelectedText, lastSelectionRange, clearOverleafSelection]); + }, [ + setActiveTab, + setSelectedText, + setSelectionRange, + setIsOpen, + lastSelectedText, + lastSelectionRange, + clearOverleafSelection, + ]); useEffect(() => { const handleKeyDown = (event: KeyboardEvent) => { @@ -179,8 +185,6 @@ export const Main = () => { ); }; -console.log("[PaperDebugger] PaperDebugger injected, find toolbar-left or ide-redesign-toolbar-menu-bar to add button"); - if (!import.meta.env.DEV) { onElementAppeared(".toolbar-left .toolbar-item, .ide-redesign-toolbar-menu-bar", () => { logInfo("initializing"); diff --git a/webapp/_webapp/src/stores/selection-store.ts b/webapp/_webapp/src/stores/selection-store.ts index ec5a859a..84a02c43 100644 --- a/webapp/_webapp/src/stores/selection-store.ts +++ b/webapp/_webapp/src/stores/selection-store.ts @@ -37,12 +37,13 @@ export const useSelectionStore = create((set) => ({ set({ selectedText: null, selectionRange: null }); }, clearOverleafSelection: () => { - let cmContentElement = document.querySelector(".cm-content"); + const cmContentElement = document.querySelector(".cm-content"); if (!cmContentElement) { return; } - let editorViewInstance = (cmContentElement as any).cmView.view as EditorView; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const editorViewInstance = (cmContentElement as any).cmView.view as EditorView; if (!editorViewInstance) { return; } diff --git a/webapp/_webapp/src/views/devtools/index.tsx b/webapp/_webapp/src/views/devtools/index.tsx index 6ee477f4..279c2e35 100644 --- a/webapp/_webapp/src/views/devtools/index.tsx +++ b/webapp/_webapp/src/views/devtools/index.tsx @@ -203,7 +203,11 @@ export const DevTools = () => { const messageEntry: MessageEntry = { messageId: randomUUID(), status: MessageEntryStatus.PREPARING, - assistant: { content: "Assistant Response Preparing " + randomText(), modelSlug: "gpt-4.1", $typeName: "chat.v2.MessageTypeAssistant" }, + assistant: { + content: "Assistant Response Preparing " + randomText(), + modelSlug: "gpt-4.1", + $typeName: "chat.v2.MessageTypeAssistant", + }, }; updateStreamingMessage((prev) => ({ ...prev, parts: [...prev.parts, messageEntry] })); withDelay(() => { diff --git a/webapp/_webapp/src/views/extension-settings/components/HostPermissionWidget/useHostPermissionStore.ts b/webapp/_webapp/src/views/extension-settings/components/HostPermissionWidget/useHostPermissionStore.ts index 9e387cd6..4ac7f926 100644 --- a/webapp/_webapp/src/views/extension-settings/components/HostPermissionWidget/useHostPermissionStore.ts +++ b/webapp/_webapp/src/views/extension-settings/components/HostPermissionWidget/useHostPermissionStore.ts @@ -35,7 +35,7 @@ const normalizeWildcardPattern = (url: string) => { return { valid: false as const, error: "URL must start with http://, https://, or *://" }; } return { valid: true as const, origin: `${urlObj.protocol}//${urlObj.host}/*` }; - } catch (e) { + } catch { return { valid: false as const, error: @@ -57,6 +57,7 @@ interface HostPermissionState { } const handleError = (error: unknown, defaultMessage: string): string => { + // eslint-disable-next-line no-console console.error(defaultMessage, error); return error instanceof Error ? error.message : defaultMessage; }; diff --git a/webapp/_webapp/src/views/login/login-with-apple.tsx b/webapp/_webapp/src/views/login/login-with-apple.tsx index 740d07af..8b348210 100644 --- a/webapp/_webapp/src/views/login/login-with-apple.tsx +++ b/webapp/_webapp/src/views/login/login-with-apple.tsx @@ -9,7 +9,6 @@ interface LoginWithAppleProps { } export default function LoginWithApple({ isLoginLoading, setIsLoginLoading, setErrorMessage }: LoginWithAppleProps) { - // eslint-disable-next-line @typescript-eslint/no-unused-vars const onAppleLogin = useCallback(async () => { try { setErrorMessage(""); @@ -21,7 +20,7 @@ export default function LoginWithApple({ isLoginLoading, setIsLoginLoading, setE } finally { setIsLoginLoading(false); } - }, []); + }, [setErrorMessage, setIsLoginLoading]); return (
(settingKey: K) { setValue(stringValue); setOriginalValue(stringValue); } - }, [settings, settingKey]); + }, [settings]); // settingKey is an outer scope value, not a dependency const valueChanged = value !== originalValue; @@ -47,7 +47,7 @@ export function createSettingsTextInput(settingKey: K) { await updateSettings({ [settingKey]: value.trim() } as Partial>); setOriginalValue(value.trim()); setIsEditing(false); - }, [value, updateSettings, settingKey]); + }, [value, updateSettings]); // settingKey is an outer scope value, not a dependency const handleEdit = useCallback(() => { setIsEditing(true); @@ -71,7 +71,7 @@ export function createSettingsTextInput(settingKey: K) { handleCancel(); } }, - [valueChanged, isUpdating, settingKey, saveSettings, handleCancel], + [valueChanged, isUpdating, saveSettings, handleCancel], // settingKey is an outer scope value, not a dependency ); const inputClassName = cn( From 89b307adbd413b9ed78adb2c69b0130e906588f5 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 01:39:22 +0800 Subject: [PATCH 10/28] fix: types --- .../create_conversation_message_stream.go | 12 +- internal/api/chat/list_supported_models.go | 49 --------- internal/api/chat/list_supported_models_v2.go | 104 ++++++++++++++++++ internal/api/chat/server.go | 43 ++------ internal/api/chat/server_v2.go | 37 +++++++ internal/api/grpc.go | 3 + internal/api/server.go | 6 + internal/wire.go | 1 + internal/wire_gen.go | 9 +- webapp/_webapp/src/libs/apiclient.ts | 56 +++++++--- webapp/_webapp/src/main.tsx | 5 +- webapp/_webapp/src/query/api.ts | 14 +-- webapp/_webapp/src/stores/auth-store.ts | 4 +- .../src/views/login/advanced-settings.tsx | 5 +- .../sections/user-developer-tools.tsx | 5 +- 15 files changed, 232 insertions(+), 121 deletions(-) create mode 100644 internal/api/chat/list_supported_models_v2.go create mode 100644 internal/api/chat/server_v2.go diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index 01149e1e..8916f8d3 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -16,7 +16,7 @@ import ( "google.golang.org/protobuf/encoding/protojson" ) -func (s *ChatServer) sendStreamError(stream chatv1.ChatService_CreateConversationMessageStreamServer, err error) error { +func (s *ChatServerV1) sendStreamError(stream chatv1.ChatService_CreateConversationMessageStreamServer, err error) error { return stream.Send(&chatv1.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamError{ StreamError: &chatv1.StreamError{ @@ -31,7 +31,7 @@ func (s *ChatServer) sendStreamError(stream chatv1.ChatService_CreateConversatio // 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList) // buildUserMessage constructs both the user-facing message and the OpenAI input message -func (s *ChatServer) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv1.ConversationType) (*chatv1.Message, *responses.ResponseInputItemUnionParam, error) { +func (s *ChatServerV1) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv1.ConversationType) (*chatv1.Message, *responses.ResponseInputItemUnionParam, error) { userPrompt, err := s.chatServiceV1.GetPrompt(ctx, userMessage, userSelectedText, conversationType) if err != nil { return nil, nil, err @@ -77,7 +77,7 @@ func (s *ChatServer) buildUserMessage(ctx context.Context, userMessage, userSele } // buildSystemMessage constructs both the user-facing system message and the OpenAI input message -func (s *ChatServer) buildSystemMessage(systemPrompt string) (*chatv1.Message, *responses.ResponseInputItemUnionParam) { +func (s *ChatServerV1) buildSystemMessage(systemPrompt string) (*chatv1.Message, *responses.ResponseInputItemUnionParam) { inappMessage := &chatv1.Message{ MessageId: "pd_msg_system_" + uuid.New().String(), Payload: &chatv1.MessagePayload{ @@ -116,7 +116,7 @@ func convertToBSON(msg *chatv1.Message) (bson.M, error) { // 创建对话并写入数据库 // 返回 Conversation 对象 -func (s *ChatServer) createConversation( +func (s *ChatServerV1) createConversation( ctx context.Context, userId bson.ObjectID, projectId string, @@ -151,7 +151,7 @@ func (s *ChatServer) createConversation( // 追加消息到对话并写入数据库 // 返回 Conversation 对象 -func (s *ChatServer) appendConversationMessage( +func (s *ChatServerV1) appendConversationMessage( ctx context.Context, userId bson.ObjectID, conversationId string, @@ -190,7 +190,7 @@ func (s *ChatServer) appendConversationMessage( // 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 // conversationType 可以在一次 conversation 中多次切换 -func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { +func (s *ChatServerV1) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { actor, err := contextutil.GetActor(ctx) if err != nil { return ctx, nil, nil, err diff --git a/internal/api/chat/list_supported_models.go b/internal/api/chat/list_supported_models.go index 6e4db6d7..17e01e72 100644 --- a/internal/api/chat/list_supported_models.go +++ b/internal/api/chat/list_supported_models.go @@ -6,7 +6,6 @@ import ( "paperdebugger/internal/libs/contextutil" chatv1 "paperdebugger/pkg/gen/api/chat/v1" - chatv2 "paperdebugger/pkg/gen/api/chat/v2" "github.com/openai/openai-go/v2" ) @@ -103,51 +102,3 @@ func (s *ChatServerV1) ListSupportedModels( Models: models, }, nil } - -func (s *ChatServerV2) ListSupportedModels( - ctx context.Context, - req *chatv2.ListSupportedModelsRequest, -) (*chatv2.ListSupportedModelsResponse, error) { - actor, err := contextutil.GetActor(ctx) - if err != nil { - return nil, err - } - - settings, err := s.userService.GetUserSettings(ctx, actor.ID) - if err != nil { - return nil, err - } - - var models []*chatv2.SupportedModel - // Copied from V1 logic - if strings.TrimSpace(settings.OpenAIAPIKey) == "" { - models = []*chatv2.SupportedModel{ - {Name: "GPT-4o", Slug: openai.ChatModelGPT4o}, - {Name: "GPT-4.1", Slug: openai.ChatModelGPT4_1}, - {Name: "GPT-4.1-mini", Slug: openai.ChatModelGPT4_1Mini}, - } - } else { - models = []*chatv2.SupportedModel{ - {Name: "GPT 4o", Slug: openai.ChatModelGPT4o}, - {Name: "GPT 4.1", Slug: openai.ChatModelGPT4_1}, - {Name: "GPT 4.1 mini", Slug: openai.ChatModelGPT4_1Mini}, - {Name: "GPT 5", Slug: openai.ChatModelGPT5}, - {Name: "GPT 5 mini", Slug: openai.ChatModelGPT5Mini}, - {Name: "GPT 5 nano", Slug: openai.ChatModelGPT5Nano}, - {Name: "GPT 5 Chat Latest", Slug: openai.ChatModelGPT5ChatLatest}, - {Name: "o1", Slug: openai.ChatModelO1}, - {Name: "o1 mini", Slug: openai.ChatModelO1Mini}, - {Name: "o3", Slug: openai.ChatModelO3}, - {Name: "o3 mini", Slug: openai.ChatModelO3Mini}, - {Name: "o4 mini", Slug: openai.ChatModelO4Mini}, - {Name: "Codex Mini Latest", Slug: openai.ChatModelCodexMiniLatest}, - } - } - - return &chatv2.ListSupportedModelsResponse{ - Models: models, - }, nil -} - -// CreateConversationMessageStream is more complex as it involves streaming response mapping. -// I'll implement it separately or in the same file. diff --git a/internal/api/chat/list_supported_models_v2.go b/internal/api/chat/list_supported_models_v2.go new file mode 100644 index 00000000..00a607e9 --- /dev/null +++ b/internal/api/chat/list_supported_models_v2.go @@ -0,0 +1,104 @@ +package chat + +import ( + "context" + "strings" + + "paperdebugger/internal/libs/contextutil" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v3" +) + +func (s *ChatServerV2) ListSupportedModels( + ctx context.Context, + req *chatv2.ListSupportedModelsRequest, +) (*chatv2.ListSupportedModelsResponse, error) { + actor, err := contextutil.GetActor(ctx) + if err != nil { + return nil, err + } + + settings, err := s.userService.GetUserSettings(ctx, actor.ID) + if err != nil { + return nil, err + } + + var models []*chatv2.SupportedModel + if strings.TrimSpace(settings.OpenAIAPIKey) == "" { + models = []*chatv2.SupportedModel{ + { + + Name: "GPT-4o", + Slug: openai.ChatModelGPT4o, + }, + { + Name: "GPT-4.1", + Slug: openai.ChatModelGPT4_1, + }, + { + Name: "GPT-4.1-mini", + Slug: openai.ChatModelGPT4_1Mini, + }, + } + } else { + models = []*chatv2.SupportedModel{ + { + Name: "GPT 4o", + Slug: openai.ChatModelGPT4o, + }, + { + Name: "GPT 4.1", + Slug: openai.ChatModelGPT4_1, + }, + { + Name: "GPT 4.1 mini", + Slug: openai.ChatModelGPT4_1Mini, + }, + { + Name: "GPT 5", + Slug: openai.ChatModelGPT5, + }, + { + Name: "GPT 5 mini", + Slug: openai.ChatModelGPT5Mini, + }, + { + Name: "GPT 5 nano", + Slug: openai.ChatModelGPT5Nano, + }, + { + Name: "GPT 5 Chat Latest", + Slug: openai.ChatModelGPT5ChatLatest, + }, + { + Name: "o1", + Slug: openai.ChatModelO1, + }, + { + Name: "o1 mini", + Slug: openai.ChatModelO1Mini, + }, + { + Name: "o3", + Slug: openai.ChatModelO3, + }, + { + Name: "o3 mini", + Slug: openai.ChatModelO3Mini, + }, + { + Name: "o4 mini", + Slug: openai.ChatModelO4Mini, + }, + { + Name: "Codex Mini Latest", + Slug: openai.ChatModelCodexMiniLatest, + }, + } + } + + return &chatv2.ListSupportedModelsResponse{ + Models: models, + }, nil +} diff --git a/internal/api/chat/server.go b/internal/api/chat/server.go index e45621fe..332ca437 100644 --- a/internal/api/chat/server.go +++ b/internal/api/chat/server.go @@ -6,59 +6,32 @@ import ( "paperdebugger/internal/services" aiclient "paperdebugger/internal/services/toolkit/client" chatv1 "paperdebugger/pkg/gen/api/chat/v1" - chatv2 "paperdebugger/pkg/gen/api/chat/v2" ) -type ChatServer struct { +type ChatServerV1 struct { + chatv1.UnimplementedChatServiceServer aiClientV1 *aiclient.AIClient - aiClientV2 *aiclient.AIClientV2 chatServiceV1 *services.ChatService - chatServiceV2 *services.ChatServiceV2 projectService *services.ProjectService userService *services.UserService logger *logger.Logger cfg *cfg.Cfg } -type ChatServerV1 struct { - chatv1.UnimplementedChatServiceServer - *ChatServer -} - -type ChatServerV2 struct { - chatv2.UnimplementedChatServiceServer - *ChatServer -} - func NewChatServer( aiClientV1 *aiclient.AIClient, - aiClientV2 *aiclient.AIClientV2, chatService *services.ChatService, - chatServiceV2 *services.ChatServiceV2, projectService *services.ProjectService, userService *services.UserService, logger *logger.Logger, cfg *cfg.Cfg, ) chatv1.ChatServiceServer { return &ChatServerV1{ - ChatServer: &ChatServer{ - aiClientV1: aiClientV1, - aiClientV2: aiClientV2, - projectService: projectService, - userService: userService, - logger: logger, - chatServiceV1: chatService, - chatServiceV2: chatServiceV2, - cfg: cfg, - }, - } -} - -func NewChatServerV2(v1Server chatv1.ChatServiceServer, chatService *services.ChatServiceV2) chatv2.ChatServiceServer { - if s, ok := v1Server.(*ChatServerV1); ok { - return &ChatServerV2{ - ChatServer: s.ChatServer, - } + aiClientV1: aiClientV1, + projectService: projectService, + userService: userService, + logger: logger, + chatServiceV1: chatService, + cfg: cfg, } - return nil } diff --git a/internal/api/chat/server_v2.go b/internal/api/chat/server_v2.go new file mode 100644 index 00000000..be3cd379 --- /dev/null +++ b/internal/api/chat/server_v2.go @@ -0,0 +1,37 @@ +package chat + +import ( + "paperdebugger/internal/libs/cfg" + "paperdebugger/internal/libs/logger" + "paperdebugger/internal/services" + aiclient "paperdebugger/internal/services/toolkit/client" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" +) + +type ChatServerV2 struct { + chatv2.UnimplementedChatServiceServer + aiClientV2 *aiclient.AIClientV2 + chatServiceV2 *services.ChatServiceV2 + projectService *services.ProjectService + userService *services.UserService + logger *logger.Logger + cfg *cfg.Cfg +} + +func NewChatServerV2( + aiClientV2 *aiclient.AIClientV2, + chatServiceV2 *services.ChatServiceV2, + projectService *services.ProjectService, + userService *services.UserService, + logger *logger.Logger, + cfg *cfg.Cfg, +) chatv2.ChatServiceServer { + return &ChatServerV2{ + aiClientV2: aiClientV2, + projectService: projectService, + userService: userService, + logger: logger, + chatServiceV2: chatServiceV2, + cfg: cfg, + } +} diff --git a/internal/api/grpc.go b/internal/api/grpc.go index 1b8443b2..ed9dc2b0 100644 --- a/internal/api/grpc.go +++ b/internal/api/grpc.go @@ -12,6 +12,7 @@ import ( "paperdebugger/internal/services" authv1 "paperdebugger/pkg/gen/api/auth/v1" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" commentv1 "paperdebugger/pkg/gen/api/comment/v1" projectv1 "paperdebugger/pkg/gen/api/project/v1" userv1 "paperdebugger/pkg/gen/api/user/v1" @@ -101,6 +102,7 @@ func NewGrpcServer( cfg *cfg.Cfg, authServer authv1.AuthServiceServer, chatServer chatv1.ChatServiceServer, + chatServerV2 chatv2.ChatServiceServer, userServer userv1.UserServiceServer, projectServer projectv1.ProjectServiceServer, commentServer commentv1.CommentServiceServer, @@ -115,6 +117,7 @@ func NewGrpcServer( authv1.RegisterAuthServiceServer(grpcServer.Server, authServer) chatv1.RegisterChatServiceServer(grpcServer.Server, chatServer) + chatv2.RegisterChatServiceServer(grpcServer.Server, chatServerV2) userv1.RegisterUserServiceServer(grpcServer.Server, userServer) projectv1.RegisterProjectServiceServer(grpcServer.Server, projectServer) commentv1.RegisterCommentServiceServer(grpcServer.Server, commentServer) diff --git a/internal/api/server.go b/internal/api/server.go index 3203148b..405ec61b 100644 --- a/internal/api/server.go +++ b/internal/api/server.go @@ -13,6 +13,7 @@ import ( "paperdebugger/internal/libs/shared" authv1 "paperdebugger/pkg/gen/api/auth/v1" chatv1 "paperdebugger/pkg/gen/api/chat/v1" + chatv2 "paperdebugger/pkg/gen/api/chat/v2" commentv1 "paperdebugger/pkg/gen/api/comment/v1" projectv1 "paperdebugger/pkg/gen/api/project/v1" sharedv1 "paperdebugger/pkg/gen/api/shared/v1" @@ -84,6 +85,11 @@ func (s *Server) Run(addr string) { s.logger.Fatalf("failed to register chat service grpc gateway: %v", err) return } + err = chatv2.RegisterChatServiceHandler(context.Background(), mux, client) + if err != nil { + s.logger.Fatalf("failed to register chat v2 service grpc gateway: %v", err) + return + } err = userv1.RegisterUserServiceHandler(context.Background(), mux, client) if err != nil { s.logger.Fatalf("failed to register user service grpc gateway: %v", err) diff --git a/internal/wire.go b/internal/wire.go index 43674538..f823bc2e 100644 --- a/internal/wire.go +++ b/internal/wire.go @@ -28,6 +28,7 @@ var Set = wire.NewSet( auth.NewOAuthHandler, auth.NewAuthServer, chat.NewChatServer, + chat.NewChatServerV2, user.NewUserServer, project.NewProjectServer, comment.NewCommentServer, diff --git a/internal/wire_gen.go b/internal/wire_gen.go index 6268e274..75c4e91a 100644 --- a/internal/wire_gen.go +++ b/internal/wire_gen.go @@ -36,15 +36,16 @@ func InitializeApp() (*api.Server, error) { projectService := services.NewProjectService(dbDB, cfgCfg, loggerLogger) reverseCommentService := services.NewReverseCommentService(dbDB, cfgCfg, loggerLogger, projectService) aiClient := client.NewAIClient(dbDB, reverseCommentService, projectService, cfgCfg, loggerLogger) - aiClientV2 := client.NewAIClientV2(dbDB, reverseCommentService, projectService, cfgCfg, loggerLogger) chatService := services.NewChatService(dbDB, cfgCfg, loggerLogger) + chatServiceServer := chat.NewChatServer(aiClient, chatService, projectService, userService, loggerLogger, cfgCfg) + aiClientV2 := client.NewAIClientV2(dbDB, reverseCommentService, projectService, cfgCfg, loggerLogger) chatServiceV2 := services.NewChatServiceV2(dbDB, cfgCfg, loggerLogger) - chatServiceServer := chat.NewChatServer(aiClient, aiClientV2, chatService, chatServiceV2, projectService, userService, loggerLogger, cfgCfg) + chatv2ChatServiceServer := chat.NewChatServerV2(aiClientV2, chatServiceV2, projectService, userService, loggerLogger, cfgCfg) promptService := services.NewPromptService(dbDB, cfgCfg, loggerLogger) userServiceServer := user.NewUserServer(userService, promptService, cfgCfg, loggerLogger) projectServiceServer := project.NewProjectServer(projectService, loggerLogger, cfgCfg) commentServiceServer := comment.NewCommentServer(projectService, chatService, reverseCommentService, loggerLogger, cfgCfg) - grpcServer := api.NewGrpcServer(userService, cfgCfg, authServiceServer, chatServiceServer, userServiceServer, projectServiceServer, commentServiceServer) + grpcServer := api.NewGrpcServer(userService, cfgCfg, authServiceServer, chatServiceServer, chatv2ChatServiceServer, userServiceServer, projectServiceServer, commentServiceServer) oAuthService := services.NewOAuthService(dbDB, cfgCfg, loggerLogger) oAuthHandler := auth.NewOAuthHandler(oAuthService) ginServer := api.NewGinServer(cfgCfg, oAuthHandler) @@ -54,4 +55,4 @@ func InitializeApp() (*api.Server, error) { // wire.go: -var Set = wire.NewSet(api.NewServer, api.NewGrpcServer, api.NewGinServer, auth.NewOAuthHandler, auth.NewAuthServer, chat.NewChatServer, user.NewUserServer, project.NewProjectServer, comment.NewCommentServer, client.NewAIClient, client.NewAIClientV2, services.NewReverseCommentService, services.NewChatService, services.NewChatServiceV2, services.NewTokenService, services.NewUserService, services.NewProjectService, services.NewPromptService, services.NewOAuthService, cfg.GetCfg, logger.GetLogger, db.NewDB) +var Set = wire.NewSet(api.NewServer, api.NewGrpcServer, api.NewGinServer, auth.NewOAuthHandler, auth.NewAuthServer, chat.NewChatServer, chat.NewChatServerV2, user.NewUserServer, project.NewProjectServer, comment.NewCommentServer, client.NewAIClient, client.NewAIClientV2, services.NewReverseCommentService, services.NewChatService, services.NewChatServiceV2, services.NewTokenService, services.NewUserService, services.NewProjectService, services.NewPromptService, services.NewOAuthService, cfg.GetCfg, logger.GetLogger, db.NewDB) diff --git a/webapp/_webapp/src/libs/apiclient.ts b/webapp/_webapp/src/libs/apiclient.ts index bfbb0818..261613e4 100644 --- a/webapp/_webapp/src/libs/apiclient.ts +++ b/webapp/_webapp/src/libs/apiclient.ts @@ -6,18 +6,31 @@ import { EventEmitter } from "events"; import { ErrorCode, ErrorSchema } from "../pkg/gen/apiclient/shared/v1/shared_pb"; import { errorToast } from "./toasts"; +// Exhaustive type check helper - will cause compile error if a case is not handled +const assertNever = (x: never): never => { + throw new Error("Unexpected api version: " + x); +}; + export type RequestOptions = { ignoreErrorToast?: boolean; }; +export type ApiVersion = "v1" | "v2"; + +// Storage key mapping for each API version - add new versions here +const API_VERSION_STORAGE_KEYS: Record = { + v1: "pd.devtool.endpoint", + v2: "pd.devtool.endpoint.v2", +} as const; + class ApiClient { private axiosInstance: AxiosInstance; private refreshToken: string | null; private onTokenRefreshedEventEmitter: EventEmitter; - constructor(baseURL: string) { + constructor(baseURL: string, apiVersion: ApiVersion) { this.axiosInstance = axios.create({ - baseURL, + baseURL: `${baseURL}/_pd/api/${apiVersion}`, headers: { "Content-Type": "application/json", }, @@ -26,9 +39,18 @@ class ApiClient { this.onTokenRefreshedEventEmitter = new EventEmitter(); } - updateBaseURL(baseURL: string): void { - this.axiosInstance.defaults.baseURL = baseURL; - localStorage.setItem(LOCAL_STORAGE_KEY, baseURL); + updateBaseURL(baseURL: string, apiVersion: ApiVersion): void { + this.axiosInstance.defaults.baseURL = `${baseURL}/_pd/api/${apiVersion}`; + switch (apiVersion) { + case "v1": + localStorage.setItem(API_VERSION_STORAGE_KEYS.v1, this.axiosInstance.defaults.baseURL); + break; + case "v2": + localStorage.setItem(API_VERSION_STORAGE_KEYS.v2, this.axiosInstance.defaults.baseURL); + break; + default: + assertNever(apiVersion); // Compile error if a new version is added but not handled + } } addListener(event: "tokenRefreshed", listener: (args: { token: string; refreshToken: string }) => void): void { @@ -99,7 +121,7 @@ class ApiClient { const errorPayload = fromJson(ErrorSchema, errorData); if (!options?.ignoreErrorToast) { const message = errorPayload.message.replace(/^rpc error: code = Code\(\d+\) desc = /, ""); - errorToast(message, `Request Failed: ${ErrorCode[errorPayload.code]}`); + errorToast(message + ` (${config.url})`, `Request Failed: ${ErrorCode[errorPayload.code]}`); } throw errorPayload; } @@ -187,22 +209,30 @@ class ApiClient { } } -const DEFAULT_ENDPOINT = `${process.env.PD_API_ENDPOINT || "http://localhost:3000"}/_pd/api/v1`; -const LOCAL_STORAGE_KEY = "pd.devtool.endpoint"; +const DEFAULT_ENDPOINT = `${process.env.PD_API_ENDPOINT || "http://localhost:3000"}`; +const LOCAL_STORAGE_KEY_V1 = "pd.devtool.endpoint"; +const LOCAL_STORAGE_KEY_V2 = "pd.devtool.endpoint.v2"; + // Create apiclient instance with endpoint from localStorage or default export const getEndpointFromLocalStorage = () => { + var endpoint = ""; try { - return localStorage.getItem(LOCAL_STORAGE_KEY) || DEFAULT_ENDPOINT; + endpoint = localStorage.getItem(LOCAL_STORAGE_KEY_V1) || DEFAULT_ENDPOINT; } catch { // Fallback if localStorage is not available (e.g., in SSR) - return DEFAULT_ENDPOINT; + endpoint = DEFAULT_ENDPOINT; } + + return endpoint.replace("/_pd/api/v1", "").replace("/_pd/api/v2", ""); // compatible with old endpoint }; export const resetApiClientEndpoint = () => { - localStorage.removeItem(LOCAL_STORAGE_KEY); - apiclient.updateBaseURL(getEndpointFromLocalStorage()); + localStorage.removeItem(LOCAL_STORAGE_KEY_V1); + localStorage.removeItem(LOCAL_STORAGE_KEY_V2); + apiclient.updateBaseURL(getEndpointFromLocalStorage(), "v1"); + apiclientV2.updateBaseURL(getEndpointFromLocalStorage(), "v2"); }; -const apiclient = new ApiClient(getEndpointFromLocalStorage()); +const apiclient = new ApiClient(getEndpointFromLocalStorage(), "v1"); +export const apiclientV2 = new ApiClient(getEndpointFromLocalStorage(), "v2"); export default apiclient; diff --git a/webapp/_webapp/src/main.tsx b/webapp/_webapp/src/main.tsx index 1e8a0586..c8272f7e 100644 --- a/webapp/_webapp/src/main.tsx +++ b/webapp/_webapp/src/main.tsx @@ -9,7 +9,7 @@ import googleAnalytics from "./libs/google-analytics"; import { generateSHA1Hash, onElementAdded, onElementAppeared } from "./libs/helpers"; import { OverleafCodeMirror, completion, createSuggestionExtension } from "./libs/inline-suggestion"; import { logInfo } from "./libs/logger"; -import apiclient, { getEndpointFromLocalStorage } from "./libs/apiclient"; +import apiclient, { apiclientV2, getEndpointFromLocalStorage } from "./libs/apiclient"; import { Providers } from "./providers"; import { useAuthStore } from "./stores/auth-store"; import { useConversationUiStore } from "./stores/conversation/conversation-ui-store"; @@ -64,7 +64,8 @@ export const Main = () => { const { loadPrompts } = usePromptLibraryStore(); useEffect(() => { - apiclient.updateBaseURL(getEndpointFromLocalStorage()); + apiclient.updateBaseURL(getEndpointFromLocalStorage(), "v1"); + apiclientV2.updateBaseURL(getEndpointFromLocalStorage(), "v2"); login(); loadSettings(); loadPrompts(); diff --git a/webapp/_webapp/src/query/api.ts b/webapp/_webapp/src/query/api.ts index 143aa2b8..82760a65 100644 --- a/webapp/_webapp/src/query/api.ts +++ b/webapp/_webapp/src/query/api.ts @@ -1,4 +1,4 @@ -import apiclient from "../libs/apiclient"; +import apiclient, { apiclientV2 } from "../libs/apiclient"; import { LoginByGoogleRequest, LoginByGoogleResponseSchema, @@ -115,17 +115,17 @@ export const resetSettings = async () => { }; export const listConversations = async (data: PlainMessage) => { - const response = await apiclient.get("/chats/conversations", data); + const response = await apiclientV2.get("/chats/conversations", data); return fromJson(ListConversationsResponseSchema, response); }; export const listSupportedModels = async (data: PlainMessage) => { - const response = await apiclient.get("/chats/models", data); + const response = await apiclientV2.get("/chats/models", data); return fromJson(ListSupportedModelsResponseSchema, response); }; export const getConversation = async (data: PlainMessage) => { - const response = await apiclient.get(`/chats/conversations/${data.conversationId}`); + const response = await apiclientV2.get(`/chats/conversations/${data.conversationId}`); return fromJson(GetConversationResponseSchema, response); }; @@ -133,17 +133,17 @@ export const createConversationMessageStream = async ( data: PlainMessage, onMessage: (chunk: CreateConversationMessageStreamResponse) => void, ) => { - const stream = await apiclient.postStream(`/chats/conversations/messages/stream`, data); + const stream = await apiclientV2.postStream(`/chats/conversations/messages/stream`, data); await processStream(stream, CreateConversationMessageStreamResponseSchema, onMessage); }; export const deleteConversation = async (data: PlainMessage) => { - const response = await apiclient.delete(`/chats/conversations/${data.conversationId}`); + const response = await apiclientV2.delete(`/chats/conversations/${data.conversationId}`); return fromJson(DeleteConversationResponseSchema, response); }; export const updateConversation = async (data: PlainMessage) => { - const response = await apiclient.patch(`/chats/conversations/${data.conversationId}`, data); + const response = await apiclientV2.patch(`/chats/conversations/${data.conversationId}`, data); return fromJson(UpdateConversationResponseSchema, response); }; diff --git a/webapp/_webapp/src/stores/auth-store.ts b/webapp/_webapp/src/stores/auth-store.ts index c13d4b14..8bd82949 100644 --- a/webapp/_webapp/src/stores/auth-store.ts +++ b/webapp/_webapp/src/stores/auth-store.ts @@ -1,7 +1,7 @@ import { create } from "zustand"; import { PlainMessage } from "../query/types"; import { User } from "../pkg/gen/apiclient/user/v1/user_pb"; -import apiclient from "../libs/apiclient"; +import apiclient, { apiclientV2 } from "../libs/apiclient"; import { logout as apiLogout, getUser } from "../query/api"; import { logInfo } from "../libs/logger"; @@ -37,6 +37,7 @@ export const useAuthStore = create((set, get) => ({ login: async () => { const { token, refreshToken } = get(); apiclient.setTokens(token, refreshToken); + apiclientV2.setTokens(token, refreshToken); getUser() .then((resp) => { @@ -59,6 +60,7 @@ export const useAuthStore = create((set, get) => ({ // ignored } apiclient.clearTokens(); + apiclientV2.clearTokens(); set({ user: null, token: "", refreshToken: "" }); }, diff --git a/webapp/_webapp/src/views/login/advanced-settings.tsx b/webapp/_webapp/src/views/login/advanced-settings.tsx index bda16f26..f68b5817 100644 --- a/webapp/_webapp/src/views/login/advanced-settings.tsx +++ b/webapp/_webapp/src/views/login/advanced-settings.tsx @@ -1,12 +1,13 @@ import { useEffect, useState } from "react"; import { SettingItemInput } from "../settings/setting-item-input"; -import apiclient, { getEndpointFromLocalStorage, resetApiClientEndpoint } from "../../libs/apiclient"; +import apiclient, { apiclientV2, getEndpointFromLocalStorage, resetApiClientEndpoint } from "../../libs/apiclient"; export default function AdvancedSettings() { const [endpoint, setEndpoint] = useState(getEndpointFromLocalStorage()); useEffect(() => { - apiclient.updateBaseURL(endpoint); + apiclient.updateBaseURL(endpoint, "v1"); + apiclientV2.updateBaseURL(endpoint, "v2"); }, [endpoint]); return ( diff --git a/webapp/_webapp/src/views/settings/sections/user-developer-tools.tsx b/webapp/_webapp/src/views/settings/sections/user-developer-tools.tsx index d775bb43..ff7e19c3 100644 --- a/webapp/_webapp/src/views/settings/sections/user-developer-tools.tsx +++ b/webapp/_webapp/src/views/settings/sections/user-developer-tools.tsx @@ -3,14 +3,15 @@ import { SettingItemSelect } from "../setting-item-select"; import { useSettingStore } from "../../../stores/setting-store"; import { SettingItemInput } from "../setting-item-input"; import { useEffect, useState } from "react"; -import apiclient, { getEndpointFromLocalStorage, resetApiClientEndpoint } from "../../../libs/apiclient"; +import apiclient, { apiclientV2, getEndpointFromLocalStorage, resetApiClientEndpoint } from "../../../libs/apiclient"; export const UserDeveloperTools = () => { const { conversationMode, setConversationMode } = useSettingStore(); const [endpoint, setEndpoint] = useState(getEndpointFromLocalStorage()); useEffect(() => { - apiclient.updateBaseURL(endpoint); + apiclient.updateBaseURL(endpoint, "v1"); + apiclientV2.updateBaseURL(endpoint, "v2"); }, [endpoint]); return ( From 88252e7495c579bb6b910f5033df6535e35bc117 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 02:00:48 +0800 Subject: [PATCH 11/28] fix --- .../create_conversation_message_stream_v2.go | 71 +++++++------------ internal/api/chat/types.go | 10 +++ internal/services/chat_v2.go | 16 ++--- .../services/toolkit/client/completion_v2.go | 5 +- internal/services/toolkit/client/types.go | 10 +++ internal/services/toolkit/client/utils_v2.go | 5 +- internal/services/toolkit/handler/stream.go | 5 +- .../services/toolkit/handler/stream_v2.go | 3 +- 8 files changed, 63 insertions(+), 62 deletions(-) create mode 100644 internal/api/chat/types.go create mode 100644 internal/services/toolkit/client/types.go diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go index d1f672c4..ac907347 100644 --- a/internal/api/chat/create_conversation_message_stream_v2.go +++ b/internal/api/chat/create_conversation_message_stream_v2.go @@ -8,10 +8,9 @@ import ( "paperdebugger/internal/models" "paperdebugger/internal/services" chatv2 "paperdebugger/pkg/gen/api/chat/v2" - "time" "github.com/google/uuid" - "github.com/openai/openai-go/v2/responses" + "github.com/openai/openai-go/v3" "go.mongodb.org/mongo-driver/v2/bson" "go.mongodb.org/mongo-driver/v2/mongo" "google.golang.org/protobuf/encoding/protojson" @@ -32,10 +31,27 @@ func (s *ChatServerV2) sendStreamError(stream chatv2.ChatService_CreateConversat // 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList) // buildUserMessage constructs both the user-facing message and the OpenAI input message -func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv2.ConversationType) (*chatv2.Message, *responses.ResponseInputItemUnionParam, error) { +func (s *ChatServerV2) buildSystemMessage(systemPrompt string) (*chatv2.Message, openai.ChatCompletionMessageParamUnion) { + inappMessage := &chatv2.Message{ + MessageId: "pd_msg_system_" + uuid.New().String(), + Payload: &chatv2.MessagePayload{ + MessageType: &chatv2.MessagePayload_System{ + System: &chatv2.MessageTypeSystem{ + Content: systemPrompt, + }, + }, + }, + } + + openaiMessage := openai.SystemMessage(systemPrompt) + + return inappMessage, openaiMessage +} + +func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv2.ConversationType) (*chatv2.Message, openai.ChatCompletionMessageParamUnion, error) { userPrompt, err := s.chatServiceV2.GetPrompt(ctx, userMessage, userSelectedText, conversationType) if err != nil { - return nil, nil, err + return nil, openai.ChatCompletionMessageParamUnion{}, err } var inappMessage *chatv2.Message @@ -50,7 +66,6 @@ func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSe }, }, }, - Timestamp: time.Now().Unix(), } default: inappMessage = &chatv2.Message{ @@ -63,48 +78,13 @@ func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSe }, }, }, - Timestamp: time.Now().Unix(), } } - openaiMessage := &responses.ResponseInputItemUnionParam{ - OfInputMessage: &responses.ResponseInputItemMessageParam{ - Role: "user", - Content: responses.ResponseInputMessageContentListParam{ - responses.ResponseInputContentParamOfInputText(userPrompt), - }, - }, - } - + openaiMessage := openai.UserMessage(userPrompt) return inappMessage, openaiMessage, nil } -// buildSystemMessage constructs both the user-facing system message and the OpenAI input message -func (s *ChatServerV2) buildSystemMessage(systemPrompt string) (*chatv2.Message, *responses.ResponseInputItemUnionParam) { - inappMessage := &chatv2.Message{ - MessageId: "pd_msg_system_" + uuid.New().String(), - Payload: &chatv2.MessagePayload{ - MessageType: &chatv2.MessagePayload_System{ - System: &chatv2.MessageTypeSystem{ - Content: systemPrompt, - }, - }, - }, - Timestamp: time.Now().Unix(), - } - - openaiMessage := &responses.ResponseInputItemUnionParam{ - OfInputMessage: &responses.ResponseInputItemMessageParam{ - Role: "system", - Content: responses.ResponseInputMessageContentListParam{ - responses.ResponseInputContentParamOfInputText(systemPrompt), - }, - }, - } - - return inappMessage, openaiMessage -} - // convertToBSON converts a protobuf message to BSON func convertToBSONV2(msg *chatv2.Message) (bson.M, error) { jsonBytes, err := protojson.Marshal(msg) @@ -144,12 +124,13 @@ func (s *ChatServerV2) createConversation( } messages := []*chatv2.Message{inappUserMsg} - oaiHistory := responses.ResponseNewParamsInputUnion{ - OfInputItemList: responses.ResponseInputParam{*openaiSystemMsg, *openaiUserMsg}, + oaiHistory := []openai.ChatCompletionMessageParamUnion{ + openaiSystemMsg, + openaiUserMsg, } return s.chatServiceV2.InsertConversationToDBV2( - ctx, userId, projectId, modelSlug, messages, oaiHistory.OfInputItemList, + ctx, userId, projectId, modelSlug, messages, oaiHistory, ) } @@ -183,7 +164,7 @@ func (s *ChatServerV2) appendConversationMessage( return nil, err } conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMsg) - conversation.OpenaiChatHistory = append(conversation.OpenaiChatHistory, *userOaiMsg) + conversation.OpenaiChatHistoryCompletion = append(conversation.OpenaiChatHistoryCompletion, userOaiMsg) if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { return nil, err diff --git a/internal/api/chat/types.go b/internal/api/chat/types.go new file mode 100644 index 00000000..2c4515f5 --- /dev/null +++ b/internal/api/chat/types.go @@ -0,0 +1,10 @@ +package chat + +import ( + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v2" +) + +type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion +type AppChatHistory []chatv2.Message diff --git a/internal/services/chat_v2.go b/internal/services/chat_v2.go index 5eac89ba..3a9f3ef8 100644 --- a/internal/services/chat_v2.go +++ b/internal/services/chat_v2.go @@ -14,7 +14,7 @@ import ( "paperdebugger/internal/models" chatv2 "paperdebugger/pkg/gen/api/chat/v2" - "github.com/openai/openai-go/v2/responses" + "github.com/openai/openai-go/v3" "go.mongodb.org/mongo-driver/v2/bson" "go.mongodb.org/mongo-driver/v2/mongo" "go.mongodb.org/mongo-driver/v2/mongo/options" @@ -92,7 +92,7 @@ func (s *ChatServiceV2) GetPrompt(ctx context.Context, content string, selectedT return strings.TrimSpace(userPromptBuffer.String()), nil } -func (s *ChatServiceV2) InsertConversationToDBV2(ctx context.Context, userID bson.ObjectID, projectID string, modelSlug string, inappChatHistory []*chatv2.Message, openaiChatHistory responses.ResponseInputParam) (*models.Conversation, error) { +func (s *ChatServiceV2) InsertConversationToDBV2(ctx context.Context, userID bson.ObjectID, projectID string, modelSlug string, inappChatHistory []*chatv2.Message, openaiChatHistory []openai.ChatCompletionMessageParamUnion) (*models.Conversation, error) { // Convert protobuf messages to BSON bsonMessages := make([]bson.M, len(inappChatHistory)) for i := range inappChatHistory { @@ -113,12 +113,12 @@ func (s *ChatServiceV2) InsertConversationToDBV2(ctx context.Context, userID bso CreatedAt: bson.NewDateTimeFromTime(time.Now()), UpdatedAt: bson.NewDateTimeFromTime(time.Now()), }, - UserID: userID, - ProjectID: projectID, - Title: DefaultConversationTitleV2, - ModelSlug: modelSlug, - InappChatHistory: bsonMessages, - OpenaiChatHistory: openaiChatHistory, + UserID: userID, + ProjectID: projectID, + Title: DefaultConversationTitleV2, + ModelSlug: modelSlug, + InappChatHistory: bsonMessages, + OpenaiChatHistoryCompletion: openaiChatHistory, } _, err := s.conversationCollection.InsertOne(ctx, conversation) if err != nil { diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go index fe3b1770..cd810d57 100644 --- a/internal/services/toolkit/client/completion_v2.go +++ b/internal/services/toolkit/client/completion_v2.go @@ -12,9 +12,6 @@ import ( // define []openai.ChatCompletionMessageParamUnion as OpenAIChatHistory -type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion -type AppChatHistory []chatv2.Message - // ChatCompletion orchestrates a chat completion process with a language model (e.g., GPT), handling tool calls and message history management. // // Parameters: @@ -165,7 +162,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream } if answer_content != "" { - appendAssistantTextResponseV2(&openaiChatHistory, &inappChatHistory, answer_content, answer_content_id) + appendAssistantTextResponseV2(&openaiChatHistory, &inappChatHistory, answer_content, answer_content_id, modelSlug) } // 执行调用(如果有),返回增量数据 diff --git a/internal/services/toolkit/client/types.go b/internal/services/toolkit/client/types.go new file mode 100644 index 00000000..933cc186 --- /dev/null +++ b/internal/services/toolkit/client/types.go @@ -0,0 +1,10 @@ +package client + +import ( + chatv2 "paperdebugger/pkg/gen/api/chat/v2" + + "github.com/openai/openai-go/v3" +) + +type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion +type AppChatHistory []chatv2.Message diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go index ddf35429..b2880685 100644 --- a/internal/services/toolkit/client/utils_v2.go +++ b/internal/services/toolkit/client/utils_v2.go @@ -21,7 +21,7 @@ import ( "github.com/samber/lo" ) -func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappChatHistory *AppChatHistory, content string, contentId string) { +func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappChatHistory *AppChatHistory, content string, contentId string, modelSlug string) { *openaiChatHistory = append(*openaiChatHistory, openaiv3.ChatCompletionMessageParamUnion{ OfAssistant: &openaiv3.ChatCompletionAssistantMessageParam{ Role: "assistant", @@ -43,7 +43,8 @@ func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappCh Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_Assistant{ Assistant: &chatv2.MessageTypeAssistant{ - Content: content, + Content: content, + ModelSlug: modelSlug, }, }, }, diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go index 1a19bc35..87328dad 100644 --- a/internal/services/toolkit/handler/stream.go +++ b/internal/services/toolkit/handler/stream.go @@ -59,7 +59,8 @@ func (h *StreamHandlerV1) HandleAddedItem(chunk responses.ResponseStreamEventUni if h.callbackStream == nil { return } - if chunk.Item.Type == "message" { + switch chunk.Item.Type { + case "message": h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv1.StreamPartBegin{ @@ -72,7 +73,7 @@ func (h *StreamHandlerV1) HandleAddedItem(chunk responses.ResponseStreamEventUni }, }, }) - } else if chunk.Item.Type == "function_call" { + case "function_call": h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv1.StreamPartBegin{ diff --git a/internal/services/toolkit/handler/stream_v2.go b/internal/services/toolkit/handler/stream_v2.go index ab9819e2..cb46c077 100644 --- a/internal/services/toolkit/handler/stream_v2.go +++ b/internal/services/toolkit/handler/stream_v2.go @@ -112,7 +112,8 @@ func (h *StreamHandlerV2) HandleTextDoneItem(chunk openai.ChatCompletionChunk, c Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_Assistant{ Assistant: &chatv2.MessageTypeAssistant{ - Content: content, + Content: content, + ModelSlug: h.modelSlug, }, }, }, From ccc5cc750c39cd10f8bd1e150cd3be893b18f170 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 02:07:51 +0800 Subject: [PATCH 12/28] feat: webapp: copy user message --- .../_webapp/src/components/message-card.tsx | 1 + .../message-entry-container/assistant.tsx | 6 ++- .../message-entry-container/user.tsx | 48 ++++++++++++++++--- webapp/_webapp/src/index.css | 16 ++++++- 4 files changed, 60 insertions(+), 11 deletions(-) diff --git a/webapp/_webapp/src/components/message-card.tsx b/webapp/_webapp/src/components/message-card.tsx index f5582775..221c201a 100644 --- a/webapp/_webapp/src/components/message-card.tsx +++ b/webapp/_webapp/src/components/message-card.tsx @@ -84,6 +84,7 @@ export const MessageCard = memo(({ messageEntry, prevAttachment, animated }: Mes content={messageEntry.user?.content ?? ""} attachment={messageEntry.user?.selectedText ?? ""} stale={messageEntry.status === MessageEntryStatus.STALE} + messageId={messageEntry.messageId} /> ); } diff --git a/webapp/_webapp/src/components/message-entry-container/assistant.tsx b/webapp/_webapp/src/components/message-entry-container/assistant.tsx index 1a01f6da..debd8447 100644 --- a/webapp/_webapp/src/components/message-entry-container/assistant.tsx +++ b/webapp/_webapp/src/components/message-entry-container/assistant.tsx @@ -77,8 +77,10 @@ export const AssistantMessageContainer = ({ {staleComponent}
- - + + + +
diff --git a/webapp/_webapp/src/components/message-entry-container/user.tsx b/webapp/_webapp/src/components/message-entry-container/user.tsx index 249432bf..99231a04 100644 --- a/webapp/_webapp/src/components/message-entry-container/user.tsx +++ b/webapp/_webapp/src/components/message-entry-container/user.tsx @@ -1,16 +1,41 @@ -import { cn } from "@heroui/react"; +import { cn, Tooltip } from "@heroui/react"; +import { useCallback, useState } from "react"; import { AttachmentPopover } from "./attachment-popover"; +import { Icon } from "@iconify/react/dist/iconify.js"; +import googleAnalytics from "../../libs/google-analytics"; +import { getProjectId } from "../../libs/helpers"; +import { useAuthStore } from "../../stores/auth-store"; // import MarkdownComponent from "../markdown"; export const UserMessageContainer = ({ content, attachment, stale, + messageId, }: { content: string; attachment: string; stale: boolean; + messageId: string; }) => { + const { user } = useAuthStore(); + const projectId = getProjectId(); + const [copySuccess, setCopySuccess] = useState(false); + + const handleCopy = useCallback(() => { + if (content) { + googleAnalytics.fireEvent(user?.id, "messagecard_copy_user_message", { + projectId, + messageId: messageId, + }); + navigator.clipboard.writeText(content); + setCopySuccess(true); + setTimeout(() => { + setCopySuccess(false); + }, 2000); + } + }, [user?.id, projectId, content, messageId]); + const staleComponent = stale && (
Connection error.
Please reload this conversation. @@ -19,12 +44,21 @@ export const UserMessageContainer = ({ return ( // Align right
-
- {/* */} -
{content || "Error: No content"}
- {/*
*/} - {attachment && } - {staleComponent} +
+
+ + + + + +
+
+ {/* */} +
{content || "Error: No content"}
+ {/*
*/} + {attachment && } + {staleComponent} +
); diff --git a/webapp/_webapp/src/index.css b/webapp/_webapp/src/index.css index 9ea01888..d7fa21e2 100644 --- a/webapp/_webapp/src/index.css +++ b/webapp/_webapp/src/index.css @@ -324,12 +324,20 @@ body { @apply text-sm text-default-400 dark:text-default-50; } +.chat-message-entry .message-box-user-wrapper { + display: flex; + flex-direction: row; + align-items: center; + justify-content: flex-end; + gap: 4px; + width: 100%; +} + .chat-message-entry .message-box-user { max-width: 70%; - align-self: flex-end; @apply text-sm text-default-800 px-3 py-2 border border-transparent rounded-xl; @apply transition-all duration-500 ease-in-out; - @apply bg-gray-200 self-end my-2; + @apply bg-gray-200 my-2; } .chat-message-entry .message-box-assistant { @@ -350,6 +358,10 @@ body { @apply gap-2 text-gray-400 mt-2 -ml-2 opacity-0 transition-all duration-100; } +.chat-message-entry .actions.actions-left { + @apply mt-0 ml-0 mr-0; +} + .chat-message-entry:hover .actions { /* height: 24px; */ @apply opacity-100; From e0ece0c6599d371ccaeaf9f7cc264f3e6e3093e2 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 02:35:10 +0800 Subject: [PATCH 13/28] tools --- internal/services/toolkit/client/utils_v2.go | 21 +++++ .../toolkit/tools/files/file_create.go | 49 ++++++++++++ .../toolkit/tools/files/file_delete.go | 44 +++++++++++ .../services/toolkit/tools/files/file_read.go | 63 +++++++++++++++ .../toolkit/tools/files/folder_create.go | 44 +++++++++++ .../toolkit/tools/files/folder_delete.go | 54 +++++++++++++ .../toolkit/tools/files/folder_read.go | 74 +++++++++++++++++ .../toolkit/tools/files/string_search.go | 79 +++++++++++++++++++ .../toolkit/tools/latex/document_structure.go | 39 +++++++++ .../toolkit/tools/latex/locate_section.go | 47 +++++++++++ .../tools/latex/read_section_source.go | 47 +++++++++++ .../tools/latex/read_source_line_range.go | 59 ++++++++++++++ .../_webapp/src/components/message-card.tsx | 31 ++++---- .../message-entry-container/assistant.tsx | 6 +- .../toolcall-prepare.tsx | 29 ++++++- .../message-entry-container/tools/general.tsx | 73 +++++++++++++++++ 16 files changed, 739 insertions(+), 20 deletions(-) create mode 100644 internal/services/toolkit/tools/files/file_create.go create mode 100644 internal/services/toolkit/tools/files/file_delete.go create mode 100644 internal/services/toolkit/tools/files/file_read.go create mode 100644 internal/services/toolkit/tools/files/folder_create.go create mode 100644 internal/services/toolkit/tools/files/folder_delete.go create mode 100644 internal/services/toolkit/tools/files/folder_read.go create mode 100644 internal/services/toolkit/tools/files/string_search.go create mode 100644 internal/services/toolkit/tools/latex/document_structure.go create mode 100644 internal/services/toolkit/tools/latex/locate_section.go create mode 100644 internal/services/toolkit/tools/latex/read_section_source.go create mode 100644 internal/services/toolkit/tools/latex/read_source_line_range.go create mode 100644 webapp/_webapp/src/components/message-entry-container/tools/general.tsx diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go index b2880685..dad79fec 100644 --- a/internal/services/toolkit/client/utils_v2.go +++ b/internal/services/toolkit/client/utils_v2.go @@ -13,6 +13,8 @@ import ( "paperdebugger/internal/libs/logger" "paperdebugger/internal/services" "paperdebugger/internal/services/toolkit/registry" + filetools "paperdebugger/internal/services/toolkit/tools/files" + latextools "paperdebugger/internal/services/toolkit/tools/latex" "paperdebugger/internal/services/toolkit/tools/xtramcp" chatv2 "paperdebugger/pkg/gen/api/chat/v2" "time" @@ -108,6 +110,25 @@ func initializeToolkitV2( ) *registry.ToolRegistryV2 { toolRegistry := registry.NewToolRegistryV2() + // Register static file tools + toolRegistry.Register("create_file", filetools.CreateFileToolDescriptionV2, filetools.CreateFileTool) + toolRegistry.Register("delete_file", filetools.DeleteFileToolDescriptionV2, filetools.DeleteFileTool) + toolRegistry.Register("read_file", filetools.ReadFileToolDescriptionV2, filetools.ReadFileTool) + toolRegistry.Register("create_folder", filetools.CreateFolderToolDescriptionV2, filetools.CreateFolderTool) + toolRegistry.Register("delete_folder", filetools.DeleteFolderToolDescriptionV2, filetools.DeleteFolderTool) + toolRegistry.Register("read_folder", filetools.ReadFolderToolDescriptionV2, filetools.ReadFolderTool) + toolRegistry.Register("search_string", filetools.SearchStringToolDescriptionV2, filetools.SearchStringTool) + + logger.Info("[AI Client V2] Registered static file tools", "count", 7) + + // Register static LaTeX tools + toolRegistry.Register("get_document_structure", latextools.GetDocumentStructureToolDescriptionV2, latextools.GetDocumentStructureTool) + toolRegistry.Register("locate_section", latextools.LocateSectionToolDescriptionV2, latextools.LocateSectionTool) + toolRegistry.Register("read_section_source", latextools.ReadSectionSourceToolDescriptionV2, latextools.ReadSectionSourceTool) + toolRegistry.Register("read_source_line_range", latextools.ReadSourceLineRangeToolDescriptionV2, latextools.ReadSourceLineRangeTool) + + logger.Info("[AI Client V2] Registered static LaTeX tools", "count", 4) + // Load tools dynamically from backend xtraMCPLoader := xtramcp.NewXtraMCPLoaderV2(db, projectService, cfg.XtraMCPURI) diff --git a/internal/services/toolkit/tools/files/file_create.go b/internal/services/toolkit/tools/files/file_create.go new file mode 100644 index 00000000..54f91ecc --- /dev/null +++ b/internal/services/toolkit/tools/files/file_create.go @@ -0,0 +1,49 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var CreateFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "create_file", + Description: param.NewOpt("Creates a new file at the specified path with the given content. Returns an error if the file already exists."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "path": map[string]any{ + "type": "string", + "description": "The absolute or relative path where the file should be created.", + }, + "content": map[string]any{ + "type": "string", + "description": "The content to write to the file.", + }, + }, + "required": []string{"path", "content"}, + }, + }, + }, +} + +type CreateFileArgs struct { + Path string `json:"path"` + Content string `json:"content"` +} + +func CreateFileTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs CreateFileArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // TODO: Implement actual file creation logic + return fmt.Sprintf("[DUMMY] File created at: %s (content length: %d bytes)", getArgs.Path, len(getArgs.Content)), "", nil +} diff --git a/internal/services/toolkit/tools/files/file_delete.go b/internal/services/toolkit/tools/files/file_delete.go new file mode 100644 index 00000000..18f78ddd --- /dev/null +++ b/internal/services/toolkit/tools/files/file_delete.go @@ -0,0 +1,44 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var DeleteFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "delete_file", + Description: param.NewOpt("Deletes a file at the specified path. Returns an error if the file does not exist or cannot be deleted."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "path": map[string]any{ + "type": "string", + "description": "The absolute or relative path of the file to delete.", + }, + }, + "required": []string{"path"}, + }, + }, + }, +} + +type DeleteFileArgs struct { + Path string `json:"path"` +} + +func DeleteFileTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs DeleteFileArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // TODO: Implement actual file deletion logic + return fmt.Sprintf("[DUMMY] File deleted: %s", getArgs.Path), "", nil +} diff --git a/internal/services/toolkit/tools/files/file_read.go b/internal/services/toolkit/tools/files/file_read.go new file mode 100644 index 00000000..35aec81d --- /dev/null +++ b/internal/services/toolkit/tools/files/file_read.go @@ -0,0 +1,63 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var ReadFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "read_file", + Description: param.NewOpt("Reads the content of a file at the specified path. Supports reading specific line ranges."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "path": map[string]any{ + "type": "string", + "description": "The absolute or relative path of the file to read.", + }, + "start_line": map[string]any{ + "type": "integer", + "description": "Optional. The starting line number (1-indexed) to read from. If not specified, reads from the beginning.", + }, + "end_line": map[string]any{ + "type": "integer", + "description": "Optional. The ending line number (1-indexed, inclusive) to read to. If not specified, reads to the end.", + }, + }, + "required": []string{"path"}, + }, + }, + }, +} + +type ReadFileArgs struct { + Path string `json:"path"` + StartLine *int `json:"start_line,omitempty"` + EndLine *int `json:"end_line,omitempty"` +} + +func ReadFileTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs ReadFileArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + lineRange := "all" + if getArgs.StartLine != nil && getArgs.EndLine != nil { + lineRange = fmt.Sprintf("lines %d-%d", *getArgs.StartLine, *getArgs.EndLine) + } else if getArgs.StartLine != nil { + lineRange = fmt.Sprintf("from line %d", *getArgs.StartLine) + } else if getArgs.EndLine != nil { + lineRange = fmt.Sprintf("to line %d", *getArgs.EndLine) + } + + // TODO: Implement actual file reading logic + return fmt.Sprintf("[DUMMY] Read file: %s (%s)", getArgs.Path, lineRange), "", nil +} diff --git a/internal/services/toolkit/tools/files/folder_create.go b/internal/services/toolkit/tools/files/folder_create.go new file mode 100644 index 00000000..4c5a1c4e --- /dev/null +++ b/internal/services/toolkit/tools/files/folder_create.go @@ -0,0 +1,44 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var CreateFolderToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "create_folder", + Description: param.NewOpt("Creates a new folder (directory) at the specified path. Creates parent directories if they don't exist (like mkdir -p)."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "path": map[string]any{ + "type": "string", + "description": "The absolute or relative path where the folder should be created.", + }, + }, + "required": []string{"path"}, + }, + }, + }, +} + +type CreateFolderArgs struct { + Path string `json:"path"` +} + +func CreateFolderTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs CreateFolderArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // TODO: Implement actual folder creation logic + return fmt.Sprintf("[DUMMY] Folder created at: %s", getArgs.Path), "", nil +} diff --git a/internal/services/toolkit/tools/files/folder_delete.go b/internal/services/toolkit/tools/files/folder_delete.go new file mode 100644 index 00000000..d18f979e --- /dev/null +++ b/internal/services/toolkit/tools/files/folder_delete.go @@ -0,0 +1,54 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var DeleteFolderToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "delete_folder", + Description: param.NewOpt("Deletes a folder (directory) at the specified path. Can optionally delete recursively including all contents."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "path": map[string]any{ + "type": "string", + "description": "The absolute or relative path of the folder to delete.", + }, + "recursive": map[string]any{ + "type": "boolean", + "description": "If true, delete the folder and all its contents recursively. Default is false.", + }, + }, + "required": []string{"path"}, + }, + }, + }, +} + +type DeleteFolderArgs struct { + Path string `json:"path"` + Recursive *bool `json:"recursive,omitempty"` +} + +func DeleteFolderTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs DeleteFolderArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + recursive := false + if getArgs.Recursive != nil { + recursive = *getArgs.Recursive + } + + // TODO: Implement actual folder deletion logic + return fmt.Sprintf("[DUMMY] Folder deleted: %s (recursive: %v)", getArgs.Path, recursive), "", nil +} diff --git a/internal/services/toolkit/tools/files/folder_read.go b/internal/services/toolkit/tools/files/folder_read.go new file mode 100644 index 00000000..e887b9fd --- /dev/null +++ b/internal/services/toolkit/tools/files/folder_read.go @@ -0,0 +1,74 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var ReadFolderToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "read_folder", + Description: param.NewOpt("Lists the contents of a folder (directory) at the specified path. Can optionally list recursively."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "path": map[string]any{ + "type": "string", + "description": "The absolute or relative path of the folder to list.", + }, + "recursive": map[string]any{ + "type": "boolean", + "description": "If true, list contents recursively including all subdirectories. Default is false.", + }, + "max_depth": map[string]any{ + "type": "integer", + "description": "Maximum depth to recurse when recursive is true. Default is unlimited.", + }, + "pattern": map[string]any{ + "type": "string", + "description": "Optional glob pattern to filter results (e.g., '*.go', '*.py').", + }, + }, + "required": []string{"path"}, + }, + }, + }, +} + +type ReadFolderArgs struct { + Path string `json:"path"` + Recursive *bool `json:"recursive,omitempty"` + MaxDepth *int `json:"max_depth,omitempty"` + Pattern string `json:"pattern,omitempty"` +} + +func ReadFolderTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs ReadFolderArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + recursive := false + if getArgs.Recursive != nil { + recursive = *getArgs.Recursive + } + + depthStr := "unlimited" + if getArgs.MaxDepth != nil { + depthStr = fmt.Sprintf("%d", *getArgs.MaxDepth) + } + + pattern := "*" + if getArgs.Pattern != "" { + pattern = getArgs.Pattern + } + + // TODO: Implement actual folder listing logic + return fmt.Sprintf("[DUMMY] Listed folder: %s (recursive: %v, max_depth: %s, pattern: %s)", getArgs.Path, recursive, depthStr, pattern), "", nil +} diff --git a/internal/services/toolkit/tools/files/string_search.go b/internal/services/toolkit/tools/files/string_search.go new file mode 100644 index 00000000..bf94b9e5 --- /dev/null +++ b/internal/services/toolkit/tools/files/string_search.go @@ -0,0 +1,79 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var SearchStringToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "search_string", + Description: param.NewOpt("Searches for a string pattern in files within a specified directory. Supports regex patterns and can limit results."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "pattern": map[string]any{ + "type": "string", + "description": "The search pattern (string or regex) to look for.", + }, + "path": map[string]any{ + "type": "string", + "description": "The directory path to search within.", + }, + "file_pattern": map[string]any{ + "type": "string", + "description": "Optional glob pattern to filter files (e.g., '*.go', '*.py'). Default is all files.", + }, + "case_sensitive": map[string]any{ + "type": "boolean", + "description": "Whether the search should be case-sensitive. Default is true.", + }, + "max_results": map[string]any{ + "type": "integer", + "description": "Maximum number of results to return. Default is 100.", + }, + }, + "required": []string{"pattern", "path"}, + }, + }, + }, +} + +type SearchStringArgs struct { + Pattern string `json:"pattern"` + Path string `json:"path"` + FilePattern string `json:"file_pattern,omitempty"` + CaseSensitive *bool `json:"case_sensitive,omitempty"` + MaxResults *int `json:"max_results,omitempty"` +} + +func SearchStringTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs SearchStringArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // Default values + caseSensitive := true + if getArgs.CaseSensitive != nil { + caseSensitive = *getArgs.CaseSensitive + } + maxResults := 100 + if getArgs.MaxResults != nil { + maxResults = *getArgs.MaxResults + } + filePattern := "*" + if getArgs.FilePattern != "" { + filePattern = getArgs.FilePattern + } + + // TODO: Implement actual string search logic + return fmt.Sprintf("[DUMMY] Searched for pattern '%s' in '%s' (file_pattern: %s, case_sensitive: %v, max_results: %d). No results found.", + getArgs.Pattern, getArgs.Path, filePattern, caseSensitive, maxResults), "", nil +} diff --git a/internal/services/toolkit/tools/latex/document_structure.go b/internal/services/toolkit/tools/latex/document_structure.go new file mode 100644 index 00000000..1b471942 --- /dev/null +++ b/internal/services/toolkit/tools/latex/document_structure.go @@ -0,0 +1,39 @@ +package latex + +import ( + "context" + "encoding/json" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var GetDocumentStructureToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "get_document_structure", + Description: param.NewOpt("获取整个项目的目录大纲 (章节树)。Returns the complete document outline including all sections, subsections, and their hierarchy."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{}, + "required": []string{}, + }, + }, + }, +} + +func GetDocumentStructureTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + // TODO: Implement actual document structure retrieval logic + return `[DUMMY] Document Structure: +1. Introduction + 1.1 Background + 1.2 Motivation +2. Related Work +3. Methodology + 3.1 Problem Definition + 3.2 Proposed Approach +4. Experiments + 4.1 Setup + 4.2 Results +5. Conclusion`, "", nil +} diff --git a/internal/services/toolkit/tools/latex/locate_section.go b/internal/services/toolkit/tools/latex/locate_section.go new file mode 100644 index 00000000..1eb2e078 --- /dev/null +++ b/internal/services/toolkit/tools/latex/locate_section.go @@ -0,0 +1,47 @@ +package latex + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var LocateSectionToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "locate_section", + Description: param.NewOpt("根据标题查找特定章节的精确位置 (文件路径 + 行号范围)。Locates a specific section by its title and returns the file path and line number range."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "title": map[string]any{ + "type": "string", + "description": "The title of the section to locate (e.g., 'Introduction', 'Related Work').", + }, + }, + "required": []string{"title"}, + }, + }, + }, +} + +type LocateSectionArgs struct { + Title string `json:"title"` +} + +func LocateSectionTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs LocateSectionArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // TODO: Implement actual section location logic + return fmt.Sprintf(`[DUMMY] Located section '%s': +File: main.tex +Start Line: 42 +End Line: 87`, getArgs.Title), "", nil +} diff --git a/internal/services/toolkit/tools/latex/read_section_source.go b/internal/services/toolkit/tools/latex/read_section_source.go new file mode 100644 index 00000000..f0693f75 --- /dev/null +++ b/internal/services/toolkit/tools/latex/read_section_source.go @@ -0,0 +1,47 @@ +package latex + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var ReadSectionSourceToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "read_section_source", + Description: param.NewOpt("读取特定章节的完整 LaTeX 源码。Reads the complete LaTeX source code of a specific section by its title."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "title": map[string]any{ + "type": "string", + "description": "The title of the section to read (e.g., 'Introduction', 'Methodology').", + }, + }, + "required": []string{"title"}, + }, + }, + }, +} + +type ReadSectionSourceArgs struct { + Title string `json:"title"` +} + +func ReadSectionSourceTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs ReadSectionSourceArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // TODO: Implement actual section source reading logic + return fmt.Sprintf(`[DUMMY] LaTeX source for section '%s': +\section{%s} +This is a placeholder for the actual LaTeX content of the section. +The real implementation will return the complete source code.`, getArgs.Title, getArgs.Title), "", nil +} diff --git a/internal/services/toolkit/tools/latex/read_source_line_range.go b/internal/services/toolkit/tools/latex/read_source_line_range.go new file mode 100644 index 00000000..96cf0970 --- /dev/null +++ b/internal/services/toolkit/tools/latex/read_source_line_range.go @@ -0,0 +1,59 @@ +package latex + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var ReadSourceLineRangeToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "read_source_line_range", + Description: param.NewOpt("(底层兜底) 读取指定文件、指定行号范围的源码。Reads the source code from a specific file within a given line range."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "file_path": map[string]any{ + "type": "string", + "description": "The path to the LaTeX file to read from.", + }, + "start_line": map[string]any{ + "type": "integer", + "description": "The starting line number (1-indexed).", + }, + "end_line": map[string]any{ + "type": "integer", + "description": "The ending line number (1-indexed, inclusive).", + }, + }, + "required": []string{"file_path", "start_line", "end_line"}, + }, + }, + }, +} + +type ReadSourceLineRangeArgs struct { + FilePath string `json:"file_path"` + StartLine int `json:"start_line"` + EndLine int `json:"end_line"` +} + +func ReadSourceLineRangeTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs ReadSourceLineRangeArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // TODO: Implement actual line range reading logic + return fmt.Sprintf(`[DUMMY] Reading file '%s' lines %d-%d: +Line %d: \begin{document} +Line %d: This is placeholder content. +Line %d: \end{document}`, + getArgs.FilePath, getArgs.StartLine, getArgs.EndLine, + getArgs.StartLine, (getArgs.StartLine+getArgs.EndLine)/2, getArgs.EndLine), "", nil +} diff --git a/webapp/_webapp/src/components/message-card.tsx b/webapp/_webapp/src/components/message-card.tsx index 221c201a..3afa7d41 100644 --- a/webapp/_webapp/src/components/message-card.tsx +++ b/webapp/_webapp/src/components/message-card.tsx @@ -40,22 +40,22 @@ interface MessageCardProps { } export const MessageCard = memo(({ messageEntry, prevAttachment, animated }: MessageCardProps) => { - if (messageEntry.toolCall !== undefined) { - return ( -
- -
- ); - } - const returnComponent = () => { + if (messageEntry.toolCall !== undefined) { + return ( +
+ +
+ ); + } + if (messageEntry.assistant !== undefined) { return ( diff --git a/webapp/_webapp/src/components/message-entry-container/assistant.tsx b/webapp/_webapp/src/components/message-entry-container/assistant.tsx index debd8447..6429141a 100644 --- a/webapp/_webapp/src/components/message-entry-container/assistant.tsx +++ b/webapp/_webapp/src/components/message-entry-container/assistant.tsx @@ -48,8 +48,10 @@ export const AssistantMessageContainer = ({ }, 2000); } }, [user?.id, projectId, processedMessage, messageId]); + + const showMessage = processedMessage?.length || 0 > 0; const staleComponent = stale &&
This message is stale.
; - const writingIndicator = stale ? null : ( + const writingIndicator = (stale || !showMessage) ? null : ( ); - return ( + return showMessage && (
{/* Message content */} diff --git a/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx b/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx index fdaa749f..856f0d7c 100644 --- a/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx +++ b/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx @@ -1,12 +1,35 @@ import { cn } from "@heroui/react"; import { LoadingIndicator } from "../loading-indicator"; -export const ToolCallPrepareMessageContainer = ({ stale, preparing }: { stale: boolean; preparing: boolean }) => { +export const ToolCallPrepareMessageContainer = ({ functionName, stale, preparing }: { functionName: string; stale: boolean; preparing: boolean }) => { + // When preparing, show minimal UI with just the text + if (preparing && !stale) { + return ( +
+ + Preparing function {functionName}... + +
+ ); + } + + // When prepared or stale, show the full indicator return (
-
+
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/general.tsx b/webapp/_webapp/src/components/message-entry-container/tools/general.tsx new file mode 100644 index 00000000..4698a4e4 --- /dev/null +++ b/webapp/_webapp/src/components/message-entry-container/tools/general.tsx @@ -0,0 +1,73 @@ +import { cn } from "@heroui/react"; +import { useState } from "react"; + +type GeneralToolCardProps = { + functionName: string; + message: string; + animated: boolean; +}; + +const shimmerStyle = { + WebkitTextFillColor: "transparent", + animationDelay: "0.5s", + animationDuration: "3s", + animationIterationCount: "infinite", + animationName: "shimmer", + background: "#cdcdcd -webkit-gradient(linear, 100% 0, 0 0, from(#cdcdcd), color-stop(.5, #1a1a1a), to(#cdcdcd))", + WebkitBackgroundClip: "text", + backgroundRepeat: "no-repeat", + backgroundSize: "50% 200%", + backgroundPositionX: "-100%", +} as const; + +export const GeneralToolCard = ({ functionName, message, animated }: GeneralToolCardProps) => { + const [isCollapsed, setIsCollapsed] = useState(true); + + // When no message, show minimal "Calling tool..." style like Preparing function + if (!message) { + return ( +
+ + Calling tool {functionName}... + +
+ ); + } + + const toggleCollapse = () => { + setIsCollapsed(!isCollapsed); + }; + + // When there is a message, show the compact card with collapsible content + return ( +
+
+ +

{functionName}

+
+ +
+ {message} +
+
+ ); +}; From b90b1372891c3f720d3328080a6e552fb172867c Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 02:48:58 +0800 Subject: [PATCH 14/28] style changes --- internal/services/toolkit/client/utils_v2.go | 3 +- .../toolkit/tools/files/file_search.go | 70 +++++++++++++++++++ .../src/components/loading-indicator.tsx | 2 +- .../message-entry-container/tools/tools.tsx | 3 +- .../message-entry-container/tools/unknown.tsx | 18 ----- webapp/_webapp/src/index.css | 10 ++- .../handlers/handleStreamPartBegin.ts | 42 +++++++---- 7 files changed, 114 insertions(+), 34 deletions(-) create mode 100644 internal/services/toolkit/tools/files/file_search.go delete mode 100644 webapp/_webapp/src/components/message-entry-container/tools/unknown.tsx diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go index dad79fec..38b5360a 100644 --- a/internal/services/toolkit/client/utils_v2.go +++ b/internal/services/toolkit/client/utils_v2.go @@ -118,8 +118,9 @@ func initializeToolkitV2( toolRegistry.Register("delete_folder", filetools.DeleteFolderToolDescriptionV2, filetools.DeleteFolderTool) toolRegistry.Register("read_folder", filetools.ReadFolderToolDescriptionV2, filetools.ReadFolderTool) toolRegistry.Register("search_string", filetools.SearchStringToolDescriptionV2, filetools.SearchStringTool) + toolRegistry.Register("search_file", filetools.SearchFileToolDescriptionV2, filetools.SearchFileTool) - logger.Info("[AI Client V2] Registered static file tools", "count", 7) + logger.Info("[AI Client V2] Registered static file tools", "count", 8) // Register static LaTeX tools toolRegistry.Register("get_document_structure", latextools.GetDocumentStructureToolDescriptionV2, latextools.GetDocumentStructureTool) diff --git a/internal/services/toolkit/tools/files/file_search.go b/internal/services/toolkit/tools/files/file_search.go new file mode 100644 index 00000000..533ebaaf --- /dev/null +++ b/internal/services/toolkit/tools/files/file_search.go @@ -0,0 +1,70 @@ +package tools + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openai/openai-go/v3" + "github.com/openai/openai-go/v3/packages/param" +) + +var SearchFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ + OfFunction: &openai.ChatCompletionFunctionToolParam{ + Function: openai.FunctionDefinitionParam{ + Name: "search_file", + Description: param.NewOpt("Searches for files by name or pattern within a specified directory. Returns matching file paths."), + Parameters: openai.FunctionParameters{ + "type": "object", + "properties": map[string]interface{}{ + "path": map[string]any{ + "type": "string", + "description": "The directory path to search within.", + }, + "pattern": map[string]any{ + "type": "string", + "description": "The file name pattern to search for (supports glob patterns like '*.go', 'test_*.py').", + }, + "recursive": map[string]any{ + "type": "boolean", + "description": "If true, search recursively in subdirectories. Default is true.", + }, + "max_results": map[string]any{ + "type": "integer", + "description": "Maximum number of results to return. Default is 100.", + }, + }, + "required": []string{"path", "pattern"}, + }, + }, + }, +} + +type SearchFileArgs struct { + Path string `json:"path"` + Pattern string `json:"pattern"` + Recursive *bool `json:"recursive,omitempty"` + MaxResults *int `json:"max_results,omitempty"` +} + +func SearchFileTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) { + var getArgs SearchFileArgs + + if err := json.Unmarshal(args, &getArgs); err != nil { + return "", "", err + } + + // Default values + recursive := true + if getArgs.Recursive != nil { + recursive = *getArgs.Recursive + } + maxResults := 100 + if getArgs.MaxResults != nil { + maxResults = *getArgs.MaxResults + } + + // TODO: Implement actual file search logic + return fmt.Sprintf("[DUMMY] Searched for files matching '%s' in '%s' (recursive: %v, max_results: %d). No files found.", + getArgs.Pattern, getArgs.Path, recursive, maxResults), "", nil +} diff --git a/webapp/_webapp/src/components/loading-indicator.tsx b/webapp/_webapp/src/components/loading-indicator.tsx index 5198ad9a..48d22ac0 100644 --- a/webapp/_webapp/src/components/loading-indicator.tsx +++ b/webapp/_webapp/src/components/loading-indicator.tsx @@ -125,7 +125,7 @@ export const LoadingIndicator = ({ text = "Thinking", estimatedSeconds = 0, erro
{/* Status Text */}
{getStatusMessage()} diff --git a/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx b/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx index 7b01a7c7..fbd8d3e3 100644 --- a/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx +++ b/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx @@ -7,6 +7,7 @@ import { AlwaysExceptionCard } from "./always-exception"; import { JsonRpc } from "./jsonrpc"; import { ReviewPaperCard } from "./review-paper"; import { parseJsonRpcResult, UNKNOWN_JSONRPC_RESULT } from "./utils/common"; +import { GeneralToolCard } from "./general"; type ToolsProps = { messageId: string; @@ -64,6 +65,6 @@ export default function Tools({ messageId, functionName, message, error, prepari if (jsonRpcResult) { return ; } else { - return ; + return ; } } diff --git a/webapp/_webapp/src/components/message-entry-container/tools/unknown.tsx b/webapp/_webapp/src/components/message-entry-container/tools/unknown.tsx deleted file mode 100644 index 51b4cb89..00000000 --- a/webapp/_webapp/src/components/message-entry-container/tools/unknown.tsx +++ /dev/null @@ -1,18 +0,0 @@ -import { cn } from "@heroui/react"; - -type UnknownToolCardProps = { - functionName: string; - message: string; - animated: boolean; -}; - -export const UnknownToolCard = ({ functionName, message, animated }: UnknownToolCardProps) => { - return ( -
-

- Unknown Tool "{functionName}" -

- {message} -
- ); -}; diff --git a/webapp/_webapp/src/index.css b/webapp/_webapp/src/index.css index d7fa21e2..09763393 100644 --- a/webapp/_webapp/src/index.css +++ b/webapp/_webapp/src/index.css @@ -109,6 +109,14 @@ body { @apply font-medium text-gray-500; } +.tool-card.compact { + @apply px-[3px] py-[1px] my-0.5 bg-transparent text-xs border-0; +} + +.tool-card.compact .tool-card-title { + @apply text-[10px]; +} + /* 相邻 tool-card 的样式处理 */ .tool-card + .tool-card { /* 相邻的第二个卡片:移除上边框,调整上圆角,减少上边距,减少上 padding */ @@ -344,7 +352,7 @@ body { align-self: flex-start; @apply text-sm text-default-800 px-2 py-2 border border-transparent rounded-xl; @apply transition-all duration-500 ease-in-out; - @apply my-2; + @apply mb-2; } .chat-message-entry .message-box-assistant:hover { diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts index 7806b276..caa65b19 100644 --- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts +++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts @@ -14,30 +14,48 @@ export function handleStreamPartBegin( status: MessageEntryStatus.PREPARING, assistant: partBegin.payload?.messageType.value, }; - updateStreamingMessage((prev) => ({ - parts: [...prev.parts, newMessageEntry], - sequence: prev.sequence + 1, - })); + updateStreamingMessage((prev) => { + // Skip if entry with same messageId already exists (prevents duplicate keys) + if (prev.parts.some((p) => p.messageId === partBegin.messageId)) { + return prev; + } + return { + parts: [...prev.parts, newMessageEntry], + sequence: prev.sequence + 1, + }; + }); } else if (role === "toolCallPrepareArguments") { const newMessageEntry: MessageEntry = { messageId: partBegin.messageId, status: MessageEntryStatus.PREPARING, toolCallPrepareArguments: partBegin.payload?.messageType.value, }; - updateStreamingMessage((prev) => ({ - parts: [...prev.parts, newMessageEntry], - sequence: prev.sequence + 1, - })); + updateStreamingMessage((prev) => { + // Skip if entry with same messageId already exists (prevents duplicate keys) + if (prev.parts.some((p) => p.messageId === partBegin.messageId)) { + return prev; + } + return { + parts: [...prev.parts, newMessageEntry], + sequence: prev.sequence + 1, + }; + }); } else if (role === "toolCall") { const newMessageEntry: MessageEntry = { messageId: partBegin.messageId, status: MessageEntryStatus.PREPARING, toolCall: partBegin.payload?.messageType.value, }; - updateStreamingMessage((prev) => ({ - parts: [...prev.parts, newMessageEntry], - sequence: prev.sequence + 1, - })); + updateStreamingMessage((prev) => { + // Skip if entry with same messageId already exists (prevents duplicate keys) + if (prev.parts.some((p) => p.messageId === partBegin.messageId)) { + return prev; + } + return { + parts: [...prev.parts, newMessageEntry], + sequence: prev.sequence + 1, + }; + }); } else if (role === "system") { // not possible } else if (role === "user") { From 98eeaa1695b69db8799d560163ea01c9fcf8c7b2 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 02:52:43 +0800 Subject: [PATCH 15/28] style --- .../components/message-entry-container/tools/general.tsx | 7 +++++-- webapp/_webapp/src/index.css | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/webapp/_webapp/src/components/message-entry-container/tools/general.tsx b/webapp/_webapp/src/components/message-entry-container/tools/general.tsx index 4698a4e4..c9a4d0f8 100644 --- a/webapp/_webapp/src/components/message-entry-container/tools/general.tsx +++ b/webapp/_webapp/src/components/message-entry-container/tools/general.tsx @@ -37,7 +37,10 @@ export const GeneralToolCard = ({ functionName, message, animated }: GeneralTool const toggleCollapse = () => { setIsCollapsed(!isCollapsed); }; - + const pascalCase = (str: string) => { + const words = str.split("_"); + return words.map((word) => word.charAt(0).toUpperCase() + word.slice(1)).join(" "); + }; // When there is a message, show the compact card with collapsible content return (
@@ -57,7 +60,7 @@ export const GeneralToolCard = ({ functionName, message, animated }: GeneralTool -

{functionName}

+

{pascalCase(functionName)}

Date: Fri, 19 Dec 2025 03:04:54 +0800 Subject: [PATCH 16/28] style --- .../src/components/message-entry-container/toolcall-prepare.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx b/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx index 856f0d7c..9c52d853 100644 --- a/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx +++ b/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx @@ -6,7 +6,7 @@ export const ToolCallPrepareMessageContainer = ({ functionName, stale, preparing if (preparing && !stale) { return (
- Date: Fri, 19 Dec 2025 03:42:48 +0800 Subject: [PATCH 17/28] feat: small icon, last used model --- .../create_conversation_message_stream_v2.go | 38 +++++------ internal/api/chat/list_supported_models_v2.go | 66 ++++++------------- internal/libs/cfg/cfg.go | 28 +++++--- internal/services/toolkit/client/client_v2.go | 9 +-- .../services/toolkit/client/completion_v2.go | 16 +++-- .../client/get_conversation_title_v2.go | 2 +- internal/services/toolkit/client/utils_v2.go | 26 ++++---- .../services/toolkit/handler/stream_v2.go | 2 +- webapp/_webapp/src/components/switch-cell.tsx | 1 + webapp/_webapp/src/hooks/useLanguageModels.ts | 5 +- .../stores/conversation/conversation-store.ts | 5 +- .../conversation/conversation-ui-store.ts | 10 +++ 12 files changed, 108 insertions(+), 100 deletions(-) diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go index ac907347..f353b96d 100644 --- a/internal/api/chat/create_conversation_message_stream_v2.go +++ b/internal/api/chat/create_conversation_message_stream_v2.go @@ -2,11 +2,9 @@ package chat import ( "context" - "paperdebugger/internal/api/mapper" "paperdebugger/internal/libs/contextutil" "paperdebugger/internal/libs/shared" "paperdebugger/internal/models" - "paperdebugger/internal/services" chatv2 "paperdebugger/pkg/gen/api/chat/v2" "github.com/google/uuid" @@ -292,24 +290,24 @@ func (s *ChatServerV2) CreateConversationMessageStream( return s.sendStreamError(stream, err) } - if conversation.Title == services.DefaultConversationTitle { - go func() { - protoMessages := make([]*chatv2.Message, len(conversation.InappChatHistory)) - for i, bsonMsg := range conversation.InappChatHistory { - protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg) - } - title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider) - if err != nil { - s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) - return - } - conversation.Title = title - if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { - s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex()) - return - } - }() - } + // if conversation.Title == services.DefaultConversationTitle { + // go func() { + // protoMessages := make([]*chatv2.Message, len(conversation.InappChatHistory)) + // for i, bsonMsg := range conversation.InappChatHistory { + // protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg) + // } + // title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider) + // if err != nil { + // s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) + // return + // } + // conversation.Title = title + // if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { + // s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex()) + // return + // } + // }() + // } // The final conversation object is NOT returned return nil diff --git a/internal/api/chat/list_supported_models_v2.go b/internal/api/chat/list_supported_models_v2.go index 00a607e9..ad7464bd 100644 --- a/internal/api/chat/list_supported_models_v2.go +++ b/internal/api/chat/list_supported_models_v2.go @@ -30,71 +30,45 @@ func (s *ChatServerV2) ListSupportedModels( { Name: "GPT-4o", - Slug: openai.ChatModelGPT4o, + Slug: "openai/gpt-4o", }, { Name: "GPT-4.1", - Slug: openai.ChatModelGPT4_1, + Slug: "openai/gpt-4.1", }, { Name: "GPT-4.1-mini", - Slug: openai.ChatModelGPT4_1Mini, - }, - } - } else { - models = []*chatv2.SupportedModel{ - { - Name: "GPT 4o", - Slug: openai.ChatModelGPT4o, - }, - { - Name: "GPT 4.1", - Slug: openai.ChatModelGPT4_1, - }, - { - Name: "GPT 4.1 mini", - Slug: openai.ChatModelGPT4_1Mini, - }, - { - Name: "GPT 5", - Slug: openai.ChatModelGPT5, + Slug: "openai/gpt-4.1-mini", }, { - Name: "GPT 5 mini", - Slug: openai.ChatModelGPT5Mini, + Name: "Qwen Plus (balanced)", + Slug: "qwen/qwen-plus", }, { - Name: "GPT 5 nano", - Slug: openai.ChatModelGPT5Nano, + Name: "Qwen Turbo (fast)", + Slug: "qwen/qwen-turbo", }, { - Name: "GPT 5 Chat Latest", - Slug: openai.ChatModelGPT5ChatLatest, - }, - { - Name: "o1", - Slug: openai.ChatModelO1, - }, - { - Name: "o1 mini", - Slug: openai.ChatModelO1Mini, - }, - { - Name: "o3", - Slug: openai.ChatModelO3, + Name: "Gemini 2.5 Flash (fast)", + Slug: "google/gemini-2.5-flash", }, + } + } else { + models = []*chatv2.SupportedModel{ { - Name: "o3 mini", - Slug: openai.ChatModelO3Mini, + + Name: "GPT-4o", + Slug: openai.ChatModelGPT4o, }, { - Name: "o4 mini", - Slug: openai.ChatModelO4Mini, + Name: "GPT-4.1", + Slug: openai.ChatModelGPT4_1, }, { - Name: "Codex Mini Latest", - Slug: openai.ChatModelCodexMiniLatest, + Name: "GPT-4.1-mini", + Slug: openai.ChatModelGPT4_1Mini, }, + // TODO: add user custom models } } diff --git a/internal/libs/cfg/cfg.go b/internal/libs/cfg/cfg.go index 1293ea40..ec1af9a7 100644 --- a/internal/libs/cfg/cfg.go +++ b/internal/libs/cfg/cfg.go @@ -7,9 +7,11 @@ import ( ) type Cfg struct { - OpenAIBaseURL string - OpenAIAPIKey string - JwtSigningKey string + OpenAIBaseURL string + OpenAIAPIKey string + InferenceBaseURL string + InferenceAPIKey string + JwtSigningKey string MongoURI string XtraMCPURI string @@ -20,11 +22,13 @@ var cfg *Cfg func GetCfg() *Cfg { _ = godotenv.Load() cfg = &Cfg{ - OpenAIBaseURL: openAIBaseURL(), - OpenAIAPIKey: os.Getenv("OPENAI_API_KEY"), - JwtSigningKey: os.Getenv("JWT_SIGNING_KEY"), - MongoURI: mongoURI(), - XtraMCPURI: xtraMCPURI(), + OpenAIBaseURL: openAIBaseURL(), + OpenAIAPIKey: os.Getenv("OPENAI_API_KEY"), + InferenceBaseURL: inferenceBaseURL(), + InferenceAPIKey: os.Getenv("INFERENCE_API_KEY"), + JwtSigningKey: os.Getenv("JWT_SIGNING_KEY"), + MongoURI: mongoURI(), + XtraMCPURI: xtraMCPURI(), } return cfg @@ -38,6 +42,14 @@ func openAIBaseURL() string { return "https://api.openai.com/v1" } +func inferenceBaseURL() string { + val := os.Getenv("INFERENCE_BASE_URL") + if val != "" { + return val + } + return "https://inference.paperdebugger.workers.dev/openrouter" +} + func xtraMCPURI() string { val := os.Getenv("XTRAMCP_URI") if val != "" { diff --git a/internal/services/toolkit/client/client_v2.go b/internal/services/toolkit/client/client_v2.go index 60a8a57b..6d21c70e 100644 --- a/internal/services/toolkit/client/client_v2.go +++ b/internal/services/toolkit/client/client_v2.go @@ -26,16 +26,17 @@ type AIClientV2 struct { // SetOpenAIClient sets the appropriate OpenAI client based on the LLM provider config. // If the config specifies a custom endpoint and API key, a new client is created for that endpoint. +// V2 uses the inference endpoint by default. func (a *AIClientV2) GetOpenAIClient(llmConfig *models.LLMProviderConfig) *openai.Client { var Endpoint string = llmConfig.Endpoint var APIKey string = llmConfig.APIKey if Endpoint == "" { - Endpoint = a.cfg.OpenAIBaseURL + Endpoint = a.cfg.InferenceBaseURL } if APIKey == "" { - APIKey = a.cfg.OpenAIAPIKey + APIKey = a.cfg.InferenceAPIKey } opts := []option.RequestOption{ @@ -57,8 +58,8 @@ func NewAIClientV2( ) *AIClientV2 { database := db.Database("paperdebugger") oaiClient := openai.NewClient( - option.WithBaseURL(cfg.OpenAIBaseURL), - option.WithAPIKey(cfg.OpenAIAPIKey), + option.WithBaseURL(cfg.InferenceBaseURL), + option.WithAPIKey(cfg.InferenceAPIKey), ) CheckOpenAIWorksV2(oaiClient, logger) diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go index cd810d57..c6228057 100644 --- a/internal/services/toolkit/client/completion_v2.go +++ b/internal/services/toolkit/client/completion_v2.go @@ -3,6 +3,7 @@ package client import ( "context" "encoding/json" + "fmt" "paperdebugger/internal/models" "paperdebugger/internal/services/toolkit/handler" chatv2 "paperdebugger/pkg/gen/api/chat/v2" @@ -84,12 +85,14 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream if len(chunk.Choices) == 0 { // 处理用量信息 - // fmt.Printf("Usage: %+v\n", chunk.Usage) + fmt.Printf("Usage: %+v\n", chunk.Usage) continue } if chunk.Choices[0].FinishReason != "" { - // fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason) + fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason) + answer_content += chunk.Choices[0].Delta.Content + fmt.Printf("answer_content: %s\n", answer_content) streamHandler.HandleTextDoneItem(chunk, answer_content) break } @@ -100,14 +103,14 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream var s string err := json.Unmarshal([]byte(field.Raw()), &s) if err != nil { - // fmt.Println(err) + fmt.Println(err) } reasoning_content += s - // fmt.Print(s) + fmt.Print(s) } else { if !is_answering { is_answering = true - // fmt.Println("\n\n========== 回答内容 ==========") + fmt.Println("\n\n========== 回答内容 ==========") streamHandler.HandleAddedItem(chunk) } @@ -115,6 +118,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream answer_content += delta.Content answer_content_id = chunk.ID streamHandler.HandleTextDelta(chunk) + fmt.Print(delta.Content) } if len(delta.ToolCalls) > 0 { @@ -123,7 +127,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream // haskey(tool_info, index) if _, ok := tool_info[index]; !ok { - // fmt.Printf("Prepare tool %s\n", toolCall.Function.Name) + fmt.Printf("Prepare tool %s\n", toolCall.Function.Name) tool_info[index] = map[string]string{} streamHandler.HandleAddedItem(chunk) } diff --git a/internal/services/toolkit/client/get_conversation_title_v2.go b/internal/services/toolkit/client/get_conversation_title_v2.go index 90630f3a..40c8f9b8 100644 --- a/internal/services/toolkit/client/get_conversation_title_v2.go +++ b/internal/services/toolkit/client/get_conversation_title_v2.go @@ -29,7 +29,7 @@ func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistor message := strings.Join(messages, "\n") message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message) - _, resp, err := a.ChatCompletionV2(ctx, openai.ChatModelGPT4_1Mini, OpenAIChatHistory{ + _, resp, err := a.ChatCompletionV2(ctx, "gpt-4.1-mini", OpenAIChatHistory{ openai.SystemMessage("You are a helpful assistant that generates a title for a conversation."), openai.UserMessage(message), }, llmProvider) diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go index 38b5360a..4c56b386 100644 --- a/internal/services/toolkit/client/utils_v2.go +++ b/internal/services/toolkit/client/utils_v2.go @@ -17,10 +17,10 @@ import ( latextools "paperdebugger/internal/services/toolkit/tools/latex" "paperdebugger/internal/services/toolkit/tools/xtramcp" chatv2 "paperdebugger/pkg/gen/api/chat/v2" + "strings" "time" openaiv3 "github.com/openai/openai-go/v3" - "github.com/samber/lo" ) func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappChatHistory *AppChatHistory, content string, contentId string, modelSlug string) { @@ -67,13 +67,15 @@ func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2) "o1", "codex-mini-latest", } - if lo.Contains(reasoningModels, modelSlug) { - return openaiv3.ChatCompletionNewParams{ - Model: modelSlug, - MaxCompletionTokens: openaiv3.Int(4000), - Tools: toolRegistry.GetTools(), - ParallelToolCalls: openaiv3.Bool(true), - Store: openaiv3.Bool(false), + for _, model := range reasoningModels { + if strings.Contains(modelSlug, model) { + return openaiv3.ChatCompletionNewParams{ + Model: modelSlug, + MaxCompletionTokens: openaiv3.Int(4000), + Tools: toolRegistry.GetTools(), + ParallelToolCalls: openaiv3.Bool(true), + Store: openaiv3.Bool(false), + } } } @@ -88,18 +90,18 @@ func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2) } func CheckOpenAIWorksV2(oaiClient openaiv3.Client, logger *logger.Logger) { - logger.Info("[AI Client] checking if openai client works") + logger.Info("[AI Client V2] checking if openai client works") chatCompletion, err := oaiClient.Chat.Completions.New(context.TODO(), openaiv3.ChatCompletionNewParams{ Messages: []openaiv3.ChatCompletionMessageParamUnion{ openaiv3.UserMessage("Say 'openai client works'"), }, - Model: openaiv3.ChatModelGPT4o, + Model: "openai/gpt-5-nano", }) if err != nil { - logger.Errorf("[AI Client] openai client does not work: %v", err) + logger.Errorf("[AI Client V2] openai client does not work: %v", err) return } - logger.Info("[AI Client] openai client works", "response", chatCompletion.Choices[0].Message.Content) + logger.Info("[AI Client V2] openai client works", "response", chatCompletion.Choices[0].Message.Content) } func initializeToolkitV2( diff --git a/internal/services/toolkit/handler/stream_v2.go b/internal/services/toolkit/handler/stream_v2.go index cb46c077..981dc3c1 100644 --- a/internal/services/toolkit/handler/stream_v2.go +++ b/internal/services/toolkit/handler/stream_v2.go @@ -102,7 +102,7 @@ func (h *StreamHandlerV2) HandleTextDoneItem(chunk openai.ChatCompletionChunk, c if h.callbackStream == nil { return } - if chunk.Choices[0].Delta.Role != "" && chunk.Choices[0].Delta.Content != "" { + if chunk.Choices[0].Delta.Role != "" { return } h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ diff --git a/webapp/_webapp/src/components/switch-cell.tsx b/webapp/_webapp/src/components/switch-cell.tsx index f10d78bc..981cd96c 100644 --- a/webapp/_webapp/src/components/switch-cell.tsx +++ b/webapp/_webapp/src/components/switch-cell.tsx @@ -33,6 +33,7 @@ export type SwitchCellProps = Omit & { const SwitchCell = React.forwardRef( ({ label, description, classNames, isLoading, ...props }, ref) => ( ({ export const useLanguageModels = () => { const { currentConversation, setCurrentConversation } = useConversationStore(); + const { setLastUsedModelSlug } = useConversationUiStore(); const { data: supportedModelsResponse } = useListSupportedModelsQuery(); const models: Model[] = useMemo(() => { @@ -39,12 +41,13 @@ export const useLanguageModels = () => { const setModel = useCallback( (model: Model) => { + setLastUsedModelSlug(model.slug); setCurrentConversation({ ...currentConversation, modelSlug: model.slug, }); }, - [setCurrentConversation, currentConversation], + [setCurrentConversation, currentConversation, setLastUsedModelSlug], ); return { models, currentModel, setModel }; diff --git a/webapp/_webapp/src/stores/conversation/conversation-store.ts b/webapp/_webapp/src/stores/conversation/conversation-store.ts index fd80db8e..308f53ed 100644 --- a/webapp/_webapp/src/stores/conversation/conversation-store.ts +++ b/webapp/_webapp/src/stores/conversation/conversation-store.ts @@ -1,6 +1,7 @@ import { create } from "zustand"; import { Conversation, ConversationSchema } from "../../pkg/gen/apiclient/chat/v2/chat_pb"; import { fromJson } from "@bufbuild/protobuf"; +import { getLocalStorage } from "./conversation-ui-store"; interface ConversationStore { isStreaming: boolean; @@ -22,9 +23,11 @@ export const useConversationStore = create((set, get) => ({ })); export function newConversation(): Conversation { + const lastUsedModelSlug = getLocalStorage("lastUsedModelSlug") || "gpt-4.1"; + return fromJson(ConversationSchema, { id: "", - modelSlug: "gpt-4.1", + modelSlug: lastUsedModelSlug, title: "New Conversation", messages: [], }); diff --git a/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts b/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts index 87cb5fa5..121568c8 100644 --- a/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts +++ b/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts @@ -22,6 +22,7 @@ const localStorageKey = { activeTab: "pd.layout.activeTab", sidebarCollapsed: "pd.layout.sidebar.collapsed", heightCollapseRequired: "pd.layout.heightCollapseRequired", + lastUsedModelSlug: "pd.chat.lastUsedModelSlug", } as const; export const getLocalStorage = (key: keyof typeof localStorageKey): T | undefined => { @@ -71,6 +72,9 @@ interface ConversationUiStore { heightCollapseRequired: boolean; setHeightCollapseRequired: (heightCollapseRequired: boolean) => void; + lastUsedModelSlug: string; + setLastUsedModelSlug: (lastUsedModelSlug: string) => void; + resetPosition: () => void; } @@ -150,6 +154,12 @@ export const useConversationUiStore = create((set) => ({ set({ heightCollapseRequired }); }, + lastUsedModelSlug: getLocalStorage("lastUsedModelSlug") || "gpt-4.1", + setLastUsedModelSlug: (lastUsedModelSlug: string) => { + localStorage.setItem(localStorageKey.lastUsedModelSlug, JSON.stringify(lastUsedModelSlug)); + set({ lastUsedModelSlug }); + }, + resetPosition: () => { set((state) => { state.setFloatingX(100); From 24147809efb4889c9660afc672112a80800b1232 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 03:55:37 +0800 Subject: [PATCH 18/28] fix: finish reason --- .../services/toolkit/client/completion_v2.go | 16 +++++++-------- .../services/toolkit/handler/stream_v2.go | 20 +++++++++---------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go index c6228057..9577a27e 100644 --- a/internal/services/toolkit/client/completion_v2.go +++ b/internal/services/toolkit/client/completion_v2.go @@ -89,14 +89,6 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream continue } - if chunk.Choices[0].FinishReason != "" { - fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason) - answer_content += chunk.Choices[0].Delta.Content - fmt.Printf("answer_content: %s\n", answer_content) - streamHandler.HandleTextDoneItem(chunk, answer_content) - break - } - delta := chunk.Choices[0].Delta if field, ok := delta.JSON.ExtraFields["reasoning_content"]; ok && field.Raw() != "null" { @@ -159,6 +151,14 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream } } } + + if chunk.Choices[0].FinishReason != "" { + fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason) + // answer_content += chunk.Choices[0].Delta.Content + fmt.Printf("answer_content: %s\n", answer_content) + streamHandler.HandleTextDoneItem(chunk, answer_content) + break + } } if err := stream.Err(); err != nil { diff --git a/internal/services/toolkit/handler/stream_v2.go b/internal/services/toolkit/handler/stream_v2.go index 981dc3c1..f95ca82d 100644 --- a/internal/services/toolkit/handler/stream_v2.go +++ b/internal/services/toolkit/handler/stream_v2.go @@ -50,7 +50,7 @@ func (h *StreamHandlerV2) HandleAddedItem(chunk openai.ChatCompletionChunk) { h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv2.StreamPartBegin{ - MessageId: "openai_" + chunk.ID, + MessageId: chunk.ID, Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_Assistant{ Assistant: &chatv2.MessageTypeAssistant{}, @@ -63,7 +63,7 @@ func (h *StreamHandlerV2) HandleAddedItem(chunk openai.ChatCompletionChunk) { // h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ // ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ // StreamPartBegin: &chatv2.StreamPartBegin{ - // MessageId: "openai_" + chunk.ID, + // MessageId: chunk.ID, // Payload: &chatv2.MessagePayload{ // MessageType: &chatv2.MessagePayload_Unknown{ // Unknown: &chatv2.MessageTypeUnknown{ @@ -83,7 +83,7 @@ func (h *StreamHandlerV2) HandleAddedItem(chunk openai.ChatCompletionChunk) { h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv2.StreamPartBegin{ - MessageId: fmt.Sprintf("openai_toolCallPrepareArguments[%d]_%s", toolCall.Index, toolCall.ID), + MessageId: fmt.Sprintf("toolCallPrepareArguments[%d]_%s", toolCall.Index, toolCall.ID), Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{ ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{ @@ -102,13 +102,11 @@ func (h *StreamHandlerV2) HandleTextDoneItem(chunk openai.ChatCompletionChunk, c if h.callbackStream == nil { return } - if chunk.Choices[0].Delta.Role != "" { - return - } + h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ StreamPartEnd: &chatv2.StreamPartEnd{ - MessageId: "openai_" + chunk.ID, + MessageId: chunk.ID, Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_Assistant{ Assistant: &chatv2.MessageTypeAssistant{ @@ -129,7 +127,7 @@ func (h *StreamHandlerV2) HandleToolArgPreparedDoneItem(index int, id string, na h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ StreamPartEnd: &chatv2.StreamPartEnd{ - MessageId: fmt.Sprintf("openai_toolCallPrepareArguments[%d]_%s", index, id), + MessageId: fmt.Sprintf("toolCallPrepareArguments[%d]_%s", index, id), Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{ ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{ @@ -150,7 +148,7 @@ func (h *StreamHandlerV2) HandleTextDelta(chunk openai.ChatCompletionChunk) { h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_MessageChunk{ MessageChunk: &chatv2.MessageChunk{ - MessageId: "openai_" + chunk.ID, + MessageId: chunk.ID, Delta: chunk.Choices[0].Delta.Content, }, }, @@ -191,7 +189,7 @@ func (h *StreamHandlerV2) SendToolCallBegin(toolCall openai.FinishedChatCompleti h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{ StreamPartBegin: &chatv2.StreamPartBegin{ - MessageId: fmt.Sprintf("openai_tool[%d]_%s", toolCall.Index, toolCall.ID), + MessageId: fmt.Sprintf("tool[%d]_%s", toolCall.Index, toolCall.ID), Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_ToolCall{ ToolCall: &chatv2.MessageTypeToolCall{ @@ -212,7 +210,7 @@ func (h *StreamHandlerV2) SendToolCallEnd(toolCall openai.FinishedChatCompletion h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{ StreamPartEnd: &chatv2.StreamPartEnd{ - MessageId: fmt.Sprintf("openai_tool[%d]_%s", toolCall.Index, toolCall.ID), + MessageId: fmt.Sprintf("tool[%d]_%s", toolCall.Index, toolCall.ID), Payload: &chatv2.MessagePayload{ MessageType: &chatv2.MessagePayload_ToolCall{ ToolCall: &chatv2.MessageTypeToolCall{ From be1fa8149449333c5a5c56f3a614f3f93abacd7e Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 04:00:34 +0800 Subject: [PATCH 19/28] remove chinese --- internal/api/auth/auth_test.go | 10 +-- .../create_conversation_message_stream.go | 22 +++---- .../create_conversation_message_stream_v2.go | 60 +++++++++--------- .../services/toolkit/client/completion.go | 6 +- .../services/toolkit/client/completion_v2.go | 23 ++++--- .../client/get_conversation_title_v2.go | 2 +- internal/services/toolkit/client/utils.go | 2 +- internal/services/toolkit/client/utils_v2.go | 2 +- internal/services/toolkit/toolkit_test.go | 28 ++++----- proto/comment/v1/comment.proto | 2 +- .../tools/paper-score-comment/README.md | 61 ------------------- webapp/_webapp/src/hooks/useLanguageModels.ts | 11 +++- webapp/_webapp/src/libs/helpers.ts | 8 +-- webapp/_webapp/src/libs/oauth.ts | 4 +- webapp/_webapp/src/query/index.ts | 2 +- webapp/_webapp/src/views/chat/body/index.tsx | 4 +- .../chat/footer/toolbar/chat-actions.tsx | 16 ++++- .../views/chat/footer/toolbar/selection.tsx | 6 +- webapp/_webapp/src/views/devtools/index.tsx | 24 ++++---- .../views/prompts/project-instructions.tsx | 2 +- .../src/views/prompts/user-instructions.tsx | 2 +- .../src/views/settings/setting-text-input.tsx | 2 +- 22 files changed, 131 insertions(+), 168 deletions(-) delete mode 100644 webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/README.md diff --git a/internal/api/auth/auth_test.go b/internal/api/auth/auth_test.go index be5a8ea9..ac640217 100644 --- a/internal/api/auth/auth_test.go +++ b/internal/api/auth/auth_test.go @@ -55,14 +55,14 @@ func TestAuthServer(t *testing.T) { } assert.True(t, token.ExpiresAt.Before(timeNow)) - // 现在 Token 有效期应该是 24 小时前 + // Now Token expiration should be 24 hours ago token, err = tokenService.GetTokenByToken(context.Background(), token.Token) if err != nil { t.Fatalf("Failed to get refresh token: %v", err) } assert.True(t, token.ExpiresAt.Before(timeNow)) - // 这时候 RefreshToken 应该失效 + // At this point RefreshToken should be invalid resp, err := authServer.RefreshToken(context.Background(), &authv1.RefreshTokenRequest{ RefreshToken: token.Token, @@ -71,7 +71,7 @@ func TestAuthServer(t *testing.T) { assert.Error(t, err) assert.Nil(t, resp) - // 更新 Token 有效期 到 24 小时候 + // Update Token expiration to 24 hours later token.ExpiresAt = timeNow.Add(time.Hour * 24) token, err = tokenService.UpdateToken(context.Background(), token) if err != nil { @@ -79,7 +79,7 @@ func TestAuthServer(t *testing.T) { } assert.True(t, token.ExpiresAt.After(timeNow)) - // 这时候 RefreshToken 应该有效 + // At this point RefreshToken should be valid resp, err = authServer.RefreshToken(context.Background(), &authv1.RefreshTokenRequest{ RefreshToken: token.Token, @@ -88,7 +88,7 @@ func TestAuthServer(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, resp) - // 刚刚 RefreshToken 之后,有效期应该刷新到一个月后 + // After RefreshToken, expiration should be refreshed to one month later token, err = tokenService.GetTokenByToken(context.Background(), resp.RefreshToken) if err != nil { t.Fatalf("Failed to get refresh token: %v", err) diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go index 8916f8d3..b785c55a 100644 --- a/internal/api/chat/create_conversation_message_stream.go +++ b/internal/api/chat/create_conversation_message_stream.go @@ -26,9 +26,9 @@ func (s *ChatServerV1) sendStreamError(stream chatv1.ChatService_CreateConversat }) } -// 设计理念: -// 发送给 GPT 之前,消息列表已经构造进 Conversation 对象中(也保存在数据库里) -// 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList) +// Design philosophy: +// Before sending to GPT, the message list is already constructed in the Conversation object (also saved in the database) +// What we send to GPT is the content (InputItemList) from the Conversation object retrieved from the database // buildUserMessage constructs both the user-facing message and the OpenAI input message func (s *ChatServerV1) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv1.ConversationType) (*chatv1.Message, *responses.ResponseInputItemUnionParam, error) { @@ -114,8 +114,8 @@ func convertToBSON(msg *chatv1.Message) (bson.M, error) { return bsonMsg, nil } -// 创建对话并写入数据库 -// 返回 Conversation 对象 +// createConversation creates a conversation and writes it to the database +// Returns the Conversation object func (s *ChatServerV1) createConversation( ctx context.Context, userId bson.ObjectID, @@ -149,8 +149,8 @@ func (s *ChatServerV1) createConversation( ) } -// 追加消息到对话并写入数据库 -// 返回 Conversation 对象 +// appendConversationMessage appends a message to the conversation and writes it to the database +// Returns the Conversation object func (s *ChatServerV1) appendConversationMessage( ctx context.Context, userId bson.ObjectID, @@ -188,8 +188,8 @@ func (s *ChatServerV1) appendConversationMessage( return conversation, nil } -// 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 -// conversationType 可以在一次 conversation 中多次切换 +// prepare creates a new conversation if conversationId is "", otherwise appends a message to the conversation +// conversationType can be switched multiple times within a single conversation func (s *ChatServerV1) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { actor, err := contextutil.GetActor(ctx) if err != nil { @@ -286,7 +286,7 @@ func (s *ChatServerV1) CreateConversationMessageStream( return s.sendStreamError(stream, err) } - // 用法跟 ChatCompletion 一样,只是传递了 stream 参数 + // Usage is the same as ChatCompletion, just passing the stream parameter llmProvider := &models.LLMProviderConfig{ APIKey: settings.OpenAIAPIKey, } @@ -296,7 +296,7 @@ func (s *ChatServerV1) CreateConversationMessageStream( return s.sendStreamError(stream, err) } - // 附加消息到对话 + // Append messages to the conversation bsonMessages := make([]bson.M, len(inappChatHistory)) for i := range inappChatHistory { bsonMsg, err := convertToBSON(&inappChatHistory[i]) diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go index f353b96d..316ce317 100644 --- a/internal/api/chat/create_conversation_message_stream_v2.go +++ b/internal/api/chat/create_conversation_message_stream_v2.go @@ -2,9 +2,11 @@ package chat import ( "context" + "paperdebugger/internal/api/mapper" "paperdebugger/internal/libs/contextutil" "paperdebugger/internal/libs/shared" "paperdebugger/internal/models" + "paperdebugger/internal/services" chatv2 "paperdebugger/pkg/gen/api/chat/v2" "github.com/google/uuid" @@ -24,9 +26,9 @@ func (s *ChatServerV2) sendStreamError(stream chatv2.ChatService_CreateConversat }) } -// 设计理念: -// 发送给 GPT 之前,消息列表已经构造进 Conversation 对象中(也保存在数据库里) -// 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList) +// Design philosophy: +// Before sending to GPT, the message list is already constructed in the Conversation object (also saved in the database) +// What we send to GPT is the content (InputItemList) from the Conversation object retrieved from the database // buildUserMessage constructs both the user-facing message and the OpenAI input message func (s *ChatServerV2) buildSystemMessage(systemPrompt string) (*chatv2.Message, openai.ChatCompletionMessageParamUnion) { @@ -96,8 +98,8 @@ func convertToBSONV2(msg *chatv2.Message) (bson.M, error) { return bsonMsg, nil } -// 创建对话并写入数据库 -// 返回 Conversation 对象 +// createConversation creates a conversation and writes it to the database +// Returns the Conversation object func (s *ChatServerV2) createConversation( ctx context.Context, userId bson.ObjectID, @@ -132,8 +134,8 @@ func (s *ChatServerV2) createConversation( ) } -// 追加消息到对话并写入数据库 -// 返回 Conversation 对象 +// appendConversationMessage appends a message to the conversation and writes it to the database +// Returns the Conversation object func (s *ChatServerV2) appendConversationMessage( ctx context.Context, userId bson.ObjectID, @@ -171,8 +173,8 @@ func (s *ChatServerV2) appendConversationMessage( return conversation, nil } -// 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话 -// conversationType 可以在一次 conversation 中多次切换 +// prepare creates a new conversation if conversationId is "", otherwise appends a message to the conversation +// conversationType can be switched multiple times within a single conversation func (s *ChatServerV2) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv2.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) { actor, err := contextutil.GetActor(ctx) if err != nil { @@ -265,7 +267,7 @@ func (s *ChatServerV2) CreateConversationMessageStream( return s.sendStreamError(stream, err) } - // 用法跟 ChatCompletion 一样,只是传递了 stream 参数 + // Usage is the same as ChatCompletion, just passing the stream parameter llmProvider := &models.LLMProviderConfig{ APIKey: settings.OpenAIAPIKey, } @@ -275,7 +277,7 @@ func (s *ChatServerV2) CreateConversationMessageStream( return s.sendStreamError(stream, err) } - // 附加消息到对话 + // Append messages to the conversation bsonMessages := make([]bson.M, len(inappChatHistory)) for i := range inappChatHistory { bsonMsg, err := convertToBSONV2(&inappChatHistory[i]) @@ -290,24 +292,24 @@ func (s *ChatServerV2) CreateConversationMessageStream( return s.sendStreamError(stream, err) } - // if conversation.Title == services.DefaultConversationTitle { - // go func() { - // protoMessages := make([]*chatv2.Message, len(conversation.InappChatHistory)) - // for i, bsonMsg := range conversation.InappChatHistory { - // protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg) - // } - // title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider) - // if err != nil { - // s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) - // return - // } - // conversation.Title = title - // if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { - // s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex()) - // return - // } - // }() - // } + if conversation.Title == services.DefaultConversationTitle { + go func() { + protoMessages := make([]*chatv2.Message, len(conversation.InappChatHistory)) + for i, bsonMsg := range conversation.InappChatHistory { + protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg) + } + title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider) + if err != nil { + s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex()) + return + } + conversation.Title = title + if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil { + s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex()) + return + } + }() + } // The final conversation object is NOT returned return nil diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go index 88716217..94f0c391 100644 --- a/internal/services/toolkit/client/completion.go +++ b/internal/services/toolkit/client/completion.go @@ -93,20 +93,20 @@ func (a *AIClient) ChatCompletionStreamV1(ctx context.Context, callbackStream ch return nil, nil, err } - // 把 openai 的 response 记录下来,然后执行调用(如果有) + // Record the openai response, then execute the calls (if any) for _, item := range openaiOutput { if item.Type == "message" && item.Role == "assistant" { appendAssistantTextResponse(&openaiChatHistory, &inappChatHistory, item) } } - // 执行调用(如果有),返回增量数据 + // Execute the calls (if any), return incremental data openaiToolHistory, inappToolHistory, err := a.toolCallHandler.HandleToolCalls(ctx, openaiOutput, streamHandler) if err != nil { return nil, nil, err } - // 把工具调用结果记录下来 + // Record the tool call results if len(openaiToolHistory.OfInputItemList) > 0 { openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, openaiToolHistory.OfInputItemList...) inappChatHistory = append(inappChatHistory, inappToolHistory...) diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go index 9577a27e..e7e5b7b2 100644 --- a/internal/services/toolkit/client/completion_v2.go +++ b/internal/services/toolkit/client/completion_v2.go @@ -3,7 +3,6 @@ package client import ( "context" "encoding/json" - "fmt" "paperdebugger/internal/models" "paperdebugger/internal/services/toolkit/handler" chatv2 "paperdebugger/pkg/gen/api/chat/v2" @@ -84,8 +83,8 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chunk := stream.Current() if len(chunk.Choices) == 0 { - // 处理用量信息 - fmt.Printf("Usage: %+v\n", chunk.Usage) + // Handle usage information + // fmt.Printf("Usage: %+v\n", chunk.Usage) continue } @@ -95,14 +94,14 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream var s string err := json.Unmarshal([]byte(field.Raw()), &s) if err != nil { - fmt.Println(err) + // fmt.Println(err) } reasoning_content += s - fmt.Print(s) + // fmt.Print(s) } else { if !is_answering { is_answering = true - fmt.Println("\n\n========== 回答内容 ==========") + // fmt.Println("\n\n========== Response ==========") streamHandler.HandleAddedItem(chunk) } @@ -110,7 +109,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream answer_content += delta.Content answer_content_id = chunk.ID streamHandler.HandleTextDelta(chunk) - fmt.Print(delta.Content) + // fmt.Print(delta.Content) } if len(delta.ToolCalls) > 0 { @@ -119,7 +118,7 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream // haskey(tool_info, index) if _, ok := tool_info[index]; !ok { - fmt.Printf("Prepare tool %s\n", toolCall.Function.Name) + // fmt.Printf("Prepare tool %s\n", toolCall.Function.Name) tool_info[index] = map[string]string{} streamHandler.HandleAddedItem(chunk) } @@ -153,9 +152,9 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream } if chunk.Choices[0].FinishReason != "" { - fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason) + // fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason) // answer_content += chunk.Choices[0].Delta.Content - fmt.Printf("answer_content: %s\n", answer_content) + // fmt.Printf("answer_content: %s\n", answer_content) streamHandler.HandleTextDoneItem(chunk, answer_content) break } @@ -169,13 +168,13 @@ func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream appendAssistantTextResponseV2(&openaiChatHistory, &inappChatHistory, answer_content, answer_content_id, modelSlug) } - // 执行调用(如果有),返回增量数据 + // Execute the calls (if any), return incremental data openaiToolHistory, inappToolHistory, err := a.toolCallHandler.HandleToolCallsV2(ctx, toolCalls, streamHandler) if err != nil { return nil, nil, err } - // // 把工具调用结果记录下来 + // // Record the tool call results if len(openaiToolHistory) > 0 { openaiChatHistory = append(openaiChatHistory, openaiToolHistory...) inappChatHistory = append(inappChatHistory, inappToolHistory...) diff --git a/internal/services/toolkit/client/get_conversation_title_v2.go b/internal/services/toolkit/client/get_conversation_title_v2.go index 40c8f9b8..6c92f0c2 100644 --- a/internal/services/toolkit/client/get_conversation_title_v2.go +++ b/internal/services/toolkit/client/get_conversation_title_v2.go @@ -29,7 +29,7 @@ func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistor message := strings.Join(messages, "\n") message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message) - _, resp, err := a.ChatCompletionV2(ctx, "gpt-4.1-mini", OpenAIChatHistory{ + _, resp, err := a.ChatCompletionV2(ctx, "gpt-5-nano", OpenAIChatHistory{ openai.SystemMessage("You are a helpful assistant that generates a title for a conversation."), openai.UserMessage(message), }, llmProvider) diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go index 89e5cc89..9df994fa 100644 --- a/internal/services/toolkit/client/utils.go +++ b/internal/services/toolkit/client/utils.go @@ -75,7 +75,7 @@ func getDefaultParams(modelSlug string, toolRegistry *registry.ToolRegistry) res Model: modelSlug, Temperature: openaiv2.Float(0.7), MaxOutputTokens: openaiv2.Int(4000), // DEBUG POINT: change this to test the frontend handler - Tools: toolRegistry.GetTools(), // 工具注册由 registry 统一管理 + Tools: toolRegistry.GetTools(), // Tool registration is managed centrally by the registry Store: openaiv2.Bool(false), // Must set to false, because we are construct our own chat history. } } diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go index 4c56b386..7541720c 100644 --- a/internal/services/toolkit/client/utils_v2.go +++ b/internal/services/toolkit/client/utils_v2.go @@ -83,7 +83,7 @@ func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2) Model: modelSlug, Temperature: openaiv3.Float(0.7), MaxCompletionTokens: openaiv3.Int(4000), // DEBUG POINT: change this to test the frontend handler - Tools: toolRegistry.GetTools(), // 工具注册由 registry 统一管理 + Tools: toolRegistry.GetTools(), // Tool registration is managed centrally by the registry ParallelToolCalls: openaiv3.Bool(true), Store: openaiv3.Bool(false), // Must set to false, because we are construct our own chat history. } diff --git a/internal/services/toolkit/toolkit_test.go b/internal/services/toolkit/toolkit_test.go index 5215b29c..96ce3b4e 100644 --- a/internal/services/toolkit/toolkit_test.go +++ b/internal/services/toolkit/toolkit_test.go @@ -25,12 +25,12 @@ var mockConversationId = "mock-conversation-id" type mockCallbackStream struct { chatv1.ChatService_CreateConversationMessageStreamServer messages []*chatv1.CreateConversationMessageStreamResponse - // 用于跟踪消息状态的栈 + // Stack to track message state messageStack map[string]bool // key: message_id, value: true if begin, false if end - // 用于跟踪流的状态 + // To track stream state hasInitialization bool hasFinalization bool - // 用于跟踪当前活跃的assistant消息 + // To track the currently active assistant message activeAssistantMessageId string } @@ -39,7 +39,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre m.messageStack = make(map[string]bool) } - // 处理流初始化 + // Handle stream initialization if response.GetStreamInitialization() != nil { if m.hasInitialization { return fmt.Errorf("duplicate stream_initialization") @@ -49,7 +49,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre return nil } - // 处理流结束 + // Handle stream finalization if response.GetStreamFinalization() != nil { if !m.hasInitialization { return fmt.Errorf("stream_finalization without stream_initialization") @@ -62,7 +62,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre return nil } - // 获取消息ID + // Get message ID var messageId string switch { case response.GetStreamPartBegin() != nil: @@ -70,7 +70,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre messageId = begin.MessageId m.messageStack[messageId] = true - // 如果是assistant role,记录当前活跃的assistant消息ID + // If it's an assistant role, record the currently active assistant message ID if begin.GetPayload().GetAssistant() != nil { m.activeAssistantMessageId = messageId } @@ -83,7 +83,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre } delete(m.messageStack, messageId) - // 如果是结束当前活跃的assistant消息,清除活跃ID + // If ending the currently active assistant message, clear the active ID if messageId == m.activeAssistantMessageId { m.activeAssistantMessageId = "" } @@ -194,7 +194,7 @@ func TestChatCompletion_SingleRoundChat_NotCallTool(t *testing.T) { models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), oaiHistory, ) - // 验证流式消息的完整性 + // Verify streaming message integrity assert.NoError(t, tc.streamServer.ValidateMessageStack()) } else { _oai, _inapp, err = aiClient.ChatCompletion( @@ -261,7 +261,7 @@ func TestChatCompletion_TwoRoundChat_NotCallTool(t *testing.T) { models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), oaiHistory, ) - // 验证流式消息的完整性 + // Verify streaming message integrity assert.NoError(t, tc.streamServer.ValidateMessageStack()) } else { _oaiHistory, _appHistory, err = aiClient.ChatCompletion( @@ -288,7 +288,7 @@ func TestChatCompletion_TwoRoundChat_NotCallTool(t *testing.T) { models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), oaiHistory, ) - // 验证流式消息的完整性 + // Verify streaming message integrity assert.NoError(t, tc.streamServer.ValidateMessageStack()) } else { _oaiHistory, _appHistory, err = aiClient.ChatCompletion( @@ -355,7 +355,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_MessageAfterToolCall(t *testing models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), oaiHistory, ) - // 验证流式消息的完整性 + // Verify streaming message integrity assert.NoError(t, tc.streamServer.ValidateMessageStack()) } else { openaiHistory, inappHistory, err = aiClient.ChatCompletion( @@ -370,7 +370,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_MessageAfterToolCall(t *testing appHistory = append(appHistory, inappHistory...) assert.Equal(t, len(oaiHistory), 4) - assert.Equal(t, len(appHistory), 3) // app history 只保留 tool_call_result,不保留调用之前的那个 tool_call 请求 + assert.Equal(t, len(appHistory), 3) // app history only keeps tool_call_result, not the tool_call request before the call assert.NotNil(t, oaiHistory[1].OfFunctionCall) assert.Equal(t, oaiHistory[1].OfFunctionCall.Name, "greeting") @@ -384,7 +384,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_MessageAfterToolCall(t *testing } } -// 测试是否可以处理 err 的 message 添加到聊天记录中 +// Test whether error messages can be added to chat history func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) { os.Setenv("PD_MONGO_URI", "mongodb://localhost:27017") var dbInstance, _ = db.NewDB(cfg.GetCfg(), logger.GetLogger()) diff --git a/proto/comment/v1/comment.proto b/proto/comment/v1/comment.proto index bed1077e..65640697 100644 --- a/proto/comment/v1/comment.proto +++ b/proto/comment/v1/comment.proto @@ -19,7 +19,7 @@ message CommentsAcceptedRequest { string project_id = 1; string conversation_id = 2; string message_id = 3; - repeated string comment_ids = 4; // 被 accept 的 comment id 列表 + repeated string comment_ids = 4; // List of accepted comment IDs } message CommentsAcceptedResponse { diff --git a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/README.md b/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/README.md deleted file mode 100644 index 9f3aa063..00000000 --- a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# Paper Score Comment Component - -这个目录包含了Paper Score Comment功能的组件拆分。 - -## 文件结构 - -- `index.tsx` - 主组件,负责整体逻辑和状态管理 -- `types.ts` - 类型定义 -- `utils.ts` - 工具函数(重要性颜色、图标等) -- `comment-item.tsx` - 单个评论项组件 -- `stats-summary.tsx` - 统计摘要组件(显示Critical/High/Medium数量) -- `filter-controls.tsx` - 过滤器控制组件(搜索、重要性过滤) -- `comments-list.tsx` - 评论列表组件(过滤和排序逻辑) -- `add-comments-button.tsx` - 添加评论到Overleaf的按钮组件 - -## 组件职责 - -### index.tsx - -- 解析消息数据 -- 管理全局状态(cookies、展开状态等) -- 协调各个子组件 - -### comment-item.tsx - -- 渲染单个评论项 -- 处理文本展开/折叠 -- 显示重要性标签和图标 - -### stats-summary.tsx - -- 显示评论统计信息 -- 按重要性分类显示数量 - -### filter-controls.tsx - -- 提供搜索功能 -- 提供重要性过滤 -- 显示过滤结果统计 - -### comments-list.tsx - -- 过滤和排序评论 -- 处理空状态显示 -- 渲染评论列表 - -### add-comments-button.tsx - -- 处理添加评论到Overleaf的逻辑 -- 管理加载状态和错误处理 -- 显示操作结果 - -### utils.ts - -- `getImportanceColor()` - 根据重要性返回颜色类名 -- `getImportanceIcon()` - 根据重要性返回图标名称 -- `cleanCommentText()` - 清理评论文本中的表情符号 - -### types.ts - -- `PaperScoreCommentCardProps` - 主组件的props类型定义 diff --git a/webapp/_webapp/src/hooks/useLanguageModels.ts b/webapp/_webapp/src/hooks/useLanguageModels.ts index b9ae43ac..552a2a17 100644 --- a/webapp/_webapp/src/hooks/useLanguageModels.ts +++ b/webapp/_webapp/src/hooks/useLanguageModels.ts @@ -7,19 +7,28 @@ import { useListSupportedModelsQuery } from "../query"; export type Model = { name: string; slug: string; + provider: string; +}; + +// Extract provider from model slug (e.g., "openai/gpt-4.1" -> "openai") +const extractProvider = (slug: string): string => { + const parts = slug.split("/"); + return parts.length > 1 ? parts[0] : "openai"; }; // Fallback models in case the API fails const fallbackModels: Model[] = [ { name: "GPT-4.1", - slug: "gpt-4.1", + slug: "openai/gpt-4.1", + provider: "openai", }, ]; const mapSupportedModelToModel = (supportedModel: SupportedModel): Model => ({ name: supportedModel.name, slug: supportedModel.slug, + provider: extractProvider(supportedModel.slug), }); export const useLanguageModels = () => { diff --git a/webapp/_webapp/src/libs/helpers.ts b/webapp/_webapp/src/libs/helpers.ts index 9de1db10..1b7016a9 100644 --- a/webapp/_webapp/src/libs/helpers.ts +++ b/webapp/_webapp/src/libs/helpers.ts @@ -193,10 +193,10 @@ export function addClickedOverleafComment(projectId: string, messageId: string) if (!projectId || !messageId) return; const key = OVERLEAF_COMMENTS_CLICKED_PREFIX + projectId; let arr = getClickedOverleafComments(projectId); - // 去重 + // Deduplicate arr = arr.filter((id) => id !== messageId); arr.push(messageId); - // 最多 200 条 + // Maximum 200 items if (arr.length > MAX_CLICKED_COMMENTS) { arr = arr.slice(arr.length - MAX_CLICKED_COMMENTS); } @@ -209,7 +209,7 @@ export function hasClickedOverleafComment(projectId: string, messageId: string): return arr.includes(messageId); } -// 经典 debounce,适合事件回调 +// Classic debounce, suitable for event callbacks export function debounce(fn: (...args: unknown[]) => void, wait: number) { let timer: ReturnType | null = null; return function (this: unknown, ...args: unknown[]) { @@ -231,6 +231,6 @@ export function blobToBase64(blob: Blob): Promise { } }; reader.onerror = reject; - reader.readAsDataURL(blob); // 读取为 DataURL 格式(包含 base64) + reader.readAsDataURL(blob); // Read as DataURL format (contains base64) }); } diff --git a/webapp/_webapp/src/libs/oauth.ts b/webapp/_webapp/src/libs/oauth.ts index 0374caba..f034cd24 100644 --- a/webapp/_webapp/src/libs/oauth.ts +++ b/webapp/_webapp/src/libs/oauth.ts @@ -33,9 +33,9 @@ export function appleAuthUrl(state: string) { const url = new URL("https://appleid.apple.com/auth/authorize"); url.searchParams.set("redirect_uri", REDIRECT_URI); url.searchParams.set("state", state); - url.searchParams.set("nonce", Math.random().toString(36).substring(2, 15)); // 推荐加 nonce + url.searchParams.set("nonce", Math.random().toString(36).substring(2, 15)); // Recommended to add nonce url.searchParams.set("scope", "name email"); - url.searchParams.set("response_mode", "form_post"); // 或 "form_post" + url.searchParams.set("response_mode", "form_post"); // Or "form_post" url.searchParams.set("client_id", "dev.junyi.PaperDebugger.si"); url.searchParams.set("response_type", "code id_token"); return url.toString(); diff --git a/webapp/_webapp/src/query/index.ts b/webapp/_webapp/src/query/index.ts index d4e8b729..7a78ccae 100644 --- a/webapp/_webapp/src/query/index.ts +++ b/webapp/_webapp/src/query/index.ts @@ -101,7 +101,7 @@ export const useListConversationsQuery = ( projectId: string, opts?: UseQueryOptionsOverride, ) => { - // 如果登录,才获取 + // Only fetch if logged in const { user } = useAuthStore(); return useQuery({ queryKey: queryKeys.conversations.listConversations(projectId).queryKey, diff --git a/webapp/_webapp/src/views/chat/body/index.tsx b/webapp/_webapp/src/views/chat/body/index.tsx index ecc492d6..72e97fa7 100644 --- a/webapp/_webapp/src/views/chat/body/index.tsx +++ b/webapp/_webapp/src/views/chat/body/index.tsx @@ -31,7 +31,7 @@ export const ChatBody = ({ conversation }: ChatBodyProps) => { const { conversationMode } = useSettingStore(); const isDebugMode = conversationMode === "debug"; - // 滚动到最后一条 user 消息顶部 + // Scroll to the top of the last user message useEffect(() => { if (expanderRef.current) { expanderRef.current.style.height = "1000px"; @@ -44,7 +44,7 @@ export const ChatBody = ({ conversation }: ChatBodyProps) => { let expanderHeight: number; if (expanderViewOffset < 0) { - expanderHeight = 0; // expander 的 positoin 是 absolute,和 stream markdown 独立渲染。当 stream markdown 渲染的时候,expander 可能会因为用户滚动滑到 chatContainer 上面,导致 expander.y < 0。这个时候我们就不需要 expander 了 + expanderHeight = 0; // The expander's position is absolute and renders independently from stream markdown. When stream markdown renders, the expander may scroll above the chatContainer due to user scrolling, causing expander.y < 0. In this case, we don't need the expander. } else { expanderHeight = chatContainerHeight - expanderViewOffset; } diff --git a/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx b/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx index 81263b2f..dc2ce0d6 100644 --- a/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx +++ b/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx @@ -6,6 +6,20 @@ type ChatActionsProps = { onShowModelSelection: () => void; }; +// Map provider names to their respective icons +const getProviderIcon = (provider: string | undefined): string => { + switch (provider) { + case "openai": + return "tabler:brand-openai"; + case "qwen": + return "simple-icons:alibabadotcom"; + case "gemini": + return "simple-icons:googlegemini"; + default: + return "tabler:brain"; + } +}; + export function ChatActions({ onShowModelSelection }: ChatActionsProps) { const { inputRef, setPrompt } = useConversationUiStore(); const { currentModel } = useLanguageModels(); @@ -40,7 +54,7 @@ export function ChatActions({ onShowModelSelection }: ChatActionsProps) {
({ items, onSelect }: SelectionProps) { const children = parent?.getElementsByClassName("prompt-selection-item"); const child = children?.[idx] as HTMLDivElement; if (!parent || !child) return; - // 判断 child 是否在 parent 可视区域内,如果不在则滚动 + // Check if child is visible within parent's viewport, scroll if not const parentRect = parent.getBoundingClientRect(); const childRect = child.getBoundingClientRect(); if (childRect.top < parentRect.top) { - // 元素在上方不可见 + // Element is above visible area parent.scrollTop -= parentRect.top - childRect.top; } else if (childRect.bottom > parentRect.bottom) { - // 元素在下方不可见 + // Element is below visible area parent.scrollTop += childRect.bottom - parentRect.bottom; } }; diff --git a/webapp/_webapp/src/views/devtools/index.tsx b/webapp/_webapp/src/views/devtools/index.tsx index 279c2e35..75a6e598 100644 --- a/webapp/_webapp/src/views/devtools/index.tsx +++ b/webapp/_webapp/src/views/devtools/index.tsx @@ -9,7 +9,7 @@ import { MessageSchema } from "../../pkg/gen/apiclient/chat/v2/chat_pb"; import { isEmptyConversation } from "../chat/helper"; import { useState } from "react"; -// --- 工具函数 --- +// --- Utility functions --- const loremIpsum = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; const randomText = () => @@ -24,16 +24,16 @@ const randomUUID = () => { return result; }; -// --- DevTools主组件 --- +// --- DevTools main component --- export const DevTools = () => { - // 状态管理 + // State management const { selectedText, setSelectedText, setSelectionRange } = useSelectionStore(); const { streamingMessage, setStreamingMessage, updateStreamingMessage } = useStreamingMessageStore(); const { startFromScratch, currentConversation, setCurrentConversation } = useConversationStore(); const [preparingDelay, setPreparingDelay] = useState(2); - // --- 事件处理函数 --- - // Conversation相关 + // --- Event handlers --- + // Conversation related const handleClearConversation = () => setCurrentConversation({ ...currentConversation, messages: [] }); const handleAddUserMessage = () => setCurrentConversation({ @@ -84,7 +84,7 @@ export const DevTools = () => { setCurrentConversation({ ...currentConversation, messages: newMessages }); }; - // SelectedText相关 + // SelectedText related const handleClearSelectedText = () => { setSelectedText(null); setSelectionRange(null); @@ -94,7 +94,7 @@ export const DevTools = () => { setSelectionRange(new Range()); }; - // StreamingMessage相关 + // StreamingMessage related const handleClearStreamingMessage = () => setStreamingMessage({ ...streamingMessage, parts: [] }); const handleStaleLastStreamingMessage = () => { const newParts = useStreamingMessageStore @@ -104,12 +104,12 @@ export const DevTools = () => { ); setStreamingMessage({ ...streamingMessage, parts: [...newParts] }); }; - // 通用延迟处理 + // Generic delay handler const withDelay = (fn: () => void) => { if (preparingDelay > 0) setTimeout(fn, preparingDelay * 1000); else fn(); }; - // StreamingMessage添加各类消息 + // StreamingMessage add various message types const handleAddStreamingUserMessage = () => { const messageEntry: MessageEntry = { messageId: randomUUID(), @@ -229,7 +229,7 @@ export const DevTools = () => { }); }; - // --- 渲染 --- + // --- Render --- return ( { >

DevTools

- {/* Conversation 区块 */} + {/* Conversation section */}

Conversation ( @@ -289,7 +289,7 @@ export const DevTools = () => {

- {/* Streaming Message 区块 */} + {/* Streaming Message section */}

Streaming Message diff --git a/webapp/_webapp/src/views/prompts/project-instructions.tsx b/webapp/_webapp/src/views/prompts/project-instructions.tsx index c40ba463..f7a6a0be 100644 --- a/webapp/_webapp/src/views/prompts/project-instructions.tsx +++ b/webapp/_webapp/src/views/prompts/project-instructions.tsx @@ -67,7 +67,7 @@ export function ProjectInstructions() { const handleKeyDown = useCallback( (e: React.KeyboardEvent) => { if ((e.metaKey || e.ctrlKey) && e.key === "s") { - e.preventDefault(); // 阻止浏览器的默认保存行为 + e.preventDefault(); // Prevent browser's default save behavior if (hasProjectChanges && !projectInstructionsMutation.isPending) { handleSaveProjectInstructions(); } diff --git a/webapp/_webapp/src/views/prompts/user-instructions.tsx b/webapp/_webapp/src/views/prompts/user-instructions.tsx index 7af96264..1e059285 100644 --- a/webapp/_webapp/src/views/prompts/user-instructions.tsx +++ b/webapp/_webapp/src/views/prompts/user-instructions.tsx @@ -58,7 +58,7 @@ export function UserInstructions() { const handleKeyDown = useCallback( (e: React.KeyboardEvent) => { if ((e.metaKey || e.ctrlKey) && e.key === "s") { - e.preventDefault(); // 阻止浏览器的默认保存行为 + e.preventDefault(); // Prevent browser's default save behavior if (hasUserChanges && !userInstructionsMutation.isPending) { handleSaveUserInstructions(); } diff --git a/webapp/_webapp/src/views/settings/setting-text-input.tsx b/webapp/_webapp/src/views/settings/setting-text-input.tsx index 8905cdd5..893df676 100644 --- a/webapp/_webapp/src/views/settings/setting-text-input.tsx +++ b/webapp/_webapp/src/views/settings/setting-text-input.tsx @@ -62,7 +62,7 @@ export function createSettingsTextInput(settingKey: K) { const handleKeyDown = useCallback( (e: React.KeyboardEvent) => { if ((e.metaKey || e.ctrlKey) && e.key === "s") { - e.preventDefault(); // 阻止浏览器的默认保存行为 + e.preventDefault(); // Prevent browser's default save behavior if (valueChanged && !isUpdating[settingKey]) { saveSettings(); } From 6a3dbec78049bb55637cea76c5da57e0a1341228 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 04:08:58 +0800 Subject: [PATCH 20/28] remove chinese --- internal/models/conversation.go | 4 +- internal/models/oauth.go | 2 +- internal/services/token_test.go | 2 +- internal/services/toolkit/toolkit_test.go | 6 +-- .../toolkit/tools/latex/document_structure.go | 2 +- .../toolkit/tools/latex/locate_section.go | 2 +- .../tools/latex/read_section_source.go | 2 +- .../tools/latex/read_source_line_range.go | 2 +- pkg/gen/api/comment/v1/comment.pb.go | 2 +- .../src/components/top-menu-button.tsx | 2 +- .../gen/apiclient/comment/v1/comment_pb.ts | 2 +- .../conversation/conversation-ui-store.ts | 6 +-- webapp/_webapp/src/stores/selection-store.ts | 2 +- webapp/_webapp/src/stores/setting-store.ts | 2 +- .../chat/footer/toolbar/chat-actions.tsx | 10 +++-- .../src/views/settings/sections/footer.tsx | 4 +- webapp/_webapp/vite.config.dev.ts | 4 +- webapp/oauth-landing/src/App.tsx | 44 +++++++++---------- 18 files changed, 52 insertions(+), 48 deletions(-) diff --git a/internal/models/conversation.go b/internal/models/conversation.go index 6f1ebdf5..fdabf859 100644 --- a/internal/models/conversation.go +++ b/internal/models/conversation.go @@ -15,8 +15,8 @@ type Conversation struct { ModelSlug string `bson:"model_slug"` InappChatHistory []bson.M `bson:"inapp_chat_history"` // Store as raw BSON to avoid protobuf decoding issues - OpenaiChatHistory responses.ResponseInputParam `bson:"openai_chat_history"` // 实际上发给 GPT 的聊天历史 - OpenaiChatParams responses.ResponseNewParams `bson:"openai_chat_params"` // 对话的参数,比如 temperature, etc. + OpenaiChatHistory responses.ResponseInputParam `bson:"openai_chat_history"` // The actual chat history sent to GPT + OpenaiChatParams responses.ResponseNewParams `bson:"openai_chat_params"` // Conversation parameters, such as temperature, etc. OpenaiChatHistoryCompletion []openai.ChatCompletionMessageParamUnion `bson:"openai_chat_history_completion"` OpenaiChatParamsCompletion openai.ChatCompletionNewParams `bson:"openai_chat_params_completion"` } diff --git a/internal/models/oauth.go b/internal/models/oauth.go index e18554bb..3e9e82eb 100644 --- a/internal/models/oauth.go +++ b/internal/models/oauth.go @@ -2,7 +2,7 @@ package models type OAuth struct { BaseModel `bson:",inline"` - Code string `bson:"code,omitempty"` // OAuth 中的 code(即 authorization code)在 Google 的实现下 是一次性使用且短时间内有效的、临时唯一的。 + Code string `bson:"code,omitempty"` // OAuth code (authorization code) in Google's implementation is single-use, short-lived, and temporarily unique. AccessToken string `bson:"access_token,omitempty"` State string `bson:"state,omitempty"` Used bool `bson:"used,omitempty"` diff --git a/internal/services/token_test.go b/internal/services/token_test.go index 22c4786b..58477308 100644 --- a/internal/services/token_test.go +++ b/internal/services/token_test.go @@ -16,7 +16,7 @@ import ( ) func setupTestTokenService(t *testing.T) *services.TokenService { - os.Setenv("PD_MONGO_URI", "mongodb://localhost:27017") // 确保本地有 MongoDB + os.Setenv("PD_MONGO_URI", "mongodb://localhost:27017") // Ensure MongoDB is running locally dbInstance, err := db.NewDB(cfg.GetCfg(), logger.GetLogger()) if err != nil { t.Fatalf("failed to connect to test db: %v", err) diff --git a/internal/services/toolkit/toolkit_test.go b/internal/services/toolkit/toolkit_test.go index 96ce3b4e..15819beb 100644 --- a/internal/services/toolkit/toolkit_test.go +++ b/internal/services/toolkit/toolkit_test.go @@ -431,7 +431,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) { models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), oaiHistory, ) - // 验证流式消息的完整性 + // Verify streaming message integrity assert.NoError(t, tc.streamServer.ValidateMessageStack()) } else { openaiHistory, inappHistory, err = aiClient.ChatCompletion( @@ -459,7 +459,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) { } assert.Equal(t, 4, len(oaiHistory)) - //pd_user, openai_call, openai_msg 或者 pd_user, openai_msg, openai_call, openai_msg + //pd_user, openai_call, openai_msg or pd_user, openai_msg, openai_call, openai_msg assert.Condition(t, func() bool { var firstMsg = appHistory[0].MessageId if !strings.HasPrefix(firstMsg, "pd_user_") { @@ -506,7 +506,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) { models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI), oaiHistory, ) - // 验证流式消息的完整性 + // Verify streaming message integrity assert.NoError(t, tc.streamServer.ValidateMessageStack()) } else { openaiHistory, inappHistory, err = aiClient.ChatCompletion( diff --git a/internal/services/toolkit/tools/latex/document_structure.go b/internal/services/toolkit/tools/latex/document_structure.go index 1b471942..6bb1cb7d 100644 --- a/internal/services/toolkit/tools/latex/document_structure.go +++ b/internal/services/toolkit/tools/latex/document_structure.go @@ -12,7 +12,7 @@ var GetDocumentStructureToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ OfFunction: &openai.ChatCompletionFunctionToolParam{ Function: openai.FunctionDefinitionParam{ Name: "get_document_structure", - Description: param.NewOpt("获取整个项目的目录大纲 (章节树)。Returns the complete document outline including all sections, subsections, and their hierarchy."), + Description: param.NewOpt("Gets the complete project document outline (section tree). Returns the complete document outline including all sections, subsections, and their hierarchy."), Parameters: openai.FunctionParameters{ "type": "object", "properties": map[string]interface{}{}, diff --git a/internal/services/toolkit/tools/latex/locate_section.go b/internal/services/toolkit/tools/latex/locate_section.go index 1eb2e078..d0b26830 100644 --- a/internal/services/toolkit/tools/latex/locate_section.go +++ b/internal/services/toolkit/tools/latex/locate_section.go @@ -13,7 +13,7 @@ var LocateSectionToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ OfFunction: &openai.ChatCompletionFunctionToolParam{ Function: openai.FunctionDefinitionParam{ Name: "locate_section", - Description: param.NewOpt("根据标题查找特定章节的精确位置 (文件路径 + 行号范围)。Locates a specific section by its title and returns the file path and line number range."), + Description: param.NewOpt("Locates a specific section by its title and returns the exact position (file path + line number range). Locates a specific section by its title and returns the file path and line number range."), Parameters: openai.FunctionParameters{ "type": "object", "properties": map[string]interface{}{ diff --git a/internal/services/toolkit/tools/latex/read_section_source.go b/internal/services/toolkit/tools/latex/read_section_source.go index f0693f75..c303bb96 100644 --- a/internal/services/toolkit/tools/latex/read_section_source.go +++ b/internal/services/toolkit/tools/latex/read_section_source.go @@ -13,7 +13,7 @@ var ReadSectionSourceToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ OfFunction: &openai.ChatCompletionFunctionToolParam{ Function: openai.FunctionDefinitionParam{ Name: "read_section_source", - Description: param.NewOpt("读取特定章节的完整 LaTeX 源码。Reads the complete LaTeX source code of a specific section by its title."), + Description: param.NewOpt("Reads the complete LaTeX source code of a specific section by its title."), Parameters: openai.FunctionParameters{ "type": "object", "properties": map[string]interface{}{ diff --git a/internal/services/toolkit/tools/latex/read_source_line_range.go b/internal/services/toolkit/tools/latex/read_source_line_range.go index 96cf0970..6ec2409d 100644 --- a/internal/services/toolkit/tools/latex/read_source_line_range.go +++ b/internal/services/toolkit/tools/latex/read_source_line_range.go @@ -13,7 +13,7 @@ var ReadSourceLineRangeToolDescriptionV2 = openai.ChatCompletionToolUnionParam{ OfFunction: &openai.ChatCompletionFunctionToolParam{ Function: openai.FunctionDefinitionParam{ Name: "read_source_line_range", - Description: param.NewOpt("(底层兜底) 读取指定文件、指定行号范围的源码。Reads the source code from a specific file within a given line range."), + Description: param.NewOpt("(Fallback) Reads the source code from a specific file within a given line range."), Parameters: openai.FunctionParameters{ "type": "object", "properties": map[string]interface{}{ diff --git a/pkg/gen/api/comment/v1/comment.pb.go b/pkg/gen/api/comment/v1/comment.pb.go index b19607bd..acac3faf 100644 --- a/pkg/gen/api/comment/v1/comment.pb.go +++ b/pkg/gen/api/comment/v1/comment.pb.go @@ -27,7 +27,7 @@ type CommentsAcceptedRequest struct { ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` ConversationId string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"` MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` - CommentIds []string `protobuf:"bytes,4,rep,name=comment_ids,json=commentIds,proto3" json:"comment_ids,omitempty"` // 被 accept 的 comment id 列表 + CommentIds []string `protobuf:"bytes,4,rep,name=comment_ids,json=commentIds,proto3" json:"comment_ids,omitempty"` // List of accepted comment IDs unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } diff --git a/webapp/_webapp/src/components/top-menu-button.tsx b/webapp/_webapp/src/components/top-menu-button.tsx index c6b04e15..faa00f09 100644 --- a/webapp/_webapp/src/components/top-menu-button.tsx +++ b/webapp/_webapp/src/components/top-menu-button.tsx @@ -63,7 +63,7 @@ export const TopMenuButton = () => {

- {/* 位置重置菜单 */} + {/* Position reset menu */}
((set) => ({ setSelectedText: (selectedText) => { set({ selectedText }); }, - lastSelectedText: null, // 有一种情况:用户选择了文本,移动了一下 paperdebugger,然后点击 Add to chat。这个时候需要 lastSelectedText 来恢复刚刚选中的文本。 + lastSelectedText: null, // There's a case where user selects text, moves paperdebugger, then clicks Add to chat. In this case lastSelectedText is needed to restore the just-selected text. setLastSelectedText: (lastSelectedText) => { set({ lastSelectedText }); }, diff --git a/webapp/_webapp/src/stores/setting-store.ts b/webapp/_webapp/src/stores/setting-store.ts index 1a327225..313185ea 100644 --- a/webapp/_webapp/src/stores/setting-store.ts +++ b/webapp/_webapp/src/stores/setting-store.ts @@ -13,7 +13,7 @@ export interface SettingStore { updateSettings: (newSettings: Partial>) => Promise; resetSettings: () => Promise; - enableUserDeveloperTools: boolean; // 不是真的 developer tool + enableUserDeveloperTools: boolean; // Not actual developer tools setEnableUserDeveloperTools: (enable: boolean) => void; conversationMode: "debug" | "normal"; diff --git a/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx b/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx index dc2ce0d6..d749f308 100644 --- a/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx +++ b/webapp/_webapp/src/views/chat/footer/toolbar/chat-actions.tsx @@ -12,9 +12,13 @@ const getProviderIcon = (provider: string | undefined): string => { case "openai": return "tabler:brand-openai"; case "qwen": - return "simple-icons:alibabadotcom"; - case "gemini": - return "simple-icons:googlegemini"; + return "hugeicons:qwen"; + case "google": + return "vscode-icons:file-type-gemini"; + case "deepseek": + return "ri:deepseek-fill"; + case "anthropic": + return "ri:anthropic-fill" default: return "tabler:brain"; } diff --git a/webapp/_webapp/src/views/settings/sections/footer.tsx b/webapp/_webapp/src/views/settings/sections/footer.tsx index 330eda62..cf358bd9 100644 --- a/webapp/_webapp/src/views/settings/sections/footer.tsx +++ b/webapp/_webapp/src/views/settings/sections/footer.tsx @@ -61,7 +61,7 @@ export const SettingsFooter = () => { text="App Store" alwaysShowText onClick={() => { - alert("我们还没有在 App Store 上架"); + alert("We are not yet listed on the App Store"); }} /> )} @@ -84,7 +84,7 @@ export const SettingsFooter = () => { text="Like Us" alwaysShowText onClick={() => { - alert("现在不能评论!因为应用还未上架!"); + alert("Cannot leave a review now! The app is not yet listed!"); // window.open( // "https://apps.apple.com/cn/app/", // "_blank", diff --git a/webapp/_webapp/vite.config.dev.ts b/webapp/_webapp/vite.config.dev.ts index 9c9a0009..5aa565c8 100644 --- a/webapp/_webapp/vite.config.dev.ts +++ b/webapp/_webapp/vite.config.dev.ts @@ -2,7 +2,7 @@ import { defineConfig } from "vite"; import react from "@vitejs/plugin-react-swc"; import { resolve } from "path"; -// 简化的开发配置 +// Simplified development configuration export default defineConfig({ root: "src/devtool", plugins: [react()], @@ -10,7 +10,7 @@ export default defineConfig({ port: 3000, open: true, proxy: { - // 代理API请求到后端 + // Proxy API requests to the backend "/oauth2": { target: process.env.PD_API_ENDPOINT || "http://localhost:6060", changeOrigin: true, diff --git a/webapp/oauth-landing/src/App.tsx b/webapp/oauth-landing/src/App.tsx index 4e7e36f1..8055ec8f 100644 --- a/webapp/oauth-landing/src/App.tsx +++ b/webapp/oauth-landing/src/App.tsx @@ -4,40 +4,40 @@ import { Logo } from './components/logo'; const colorProfile = { default: { - background: 'linear-gradient(135deg, #f6f7f9 0%, #e2e4ea 100%)', // 低调灰 - cardBorderColor: '#e2e4ea', // 浅灰色边框 - textPrimaryColor: '#222', // 主文字深灰 - textDescColor: '#666', // 描述文字中灰 - textFooterColor: '#aaa', // 脚注浅灰 + background: 'linear-gradient(135deg, #f6f7f9 0%, #e2e4ea 100%)', // Subtle gray + cardBorderColor: '#e2e4ea', // Light gray border + textPrimaryColor: '#222', // Primary text dark gray + textDescColor: '#666', // Description text medium gray + textFooterColor: '#aaa', // Footer text light gray }, success: { - background: 'linear-gradient(135deg, #f3fcf7 0%, #b7eacb 100%)', // 低调绿 - cardBorderColor: '#b7eacb', // 绿色边框 - textPrimaryColor: '#217a4a', // 主文字深绿 - textDescColor: '#4ca96b', // 描述文字中绿 - textFooterColor: '#7fd6a3', // 脚注浅绿 + background: 'linear-gradient(135deg, #f3fcf7 0%, #b7eacb 100%)', // Subtle green + cardBorderColor: '#b7eacb', // Green border + textPrimaryColor: '#217a4a', // Primary text dark green + textDescColor: '#4ca96b', // Description text medium green + textFooterColor: '#7fd6a3', // Footer text light green }, error: { - background: 'linear-gradient(135deg, #fdf7f7 0%, #f7d4d4 100%)', // 更浅更低调的红色 - cardBorderColor: '#f7d4d4', // 红色边框 - textPrimaryColor: '#a94442', // 主文字深红 - textDescColor: '#d9534f', // 描述文字中红 - textFooterColor: '#f7bcbc', // 脚注浅红 + background: 'linear-gradient(135deg, #fdf7f7 0%, #f7d4d4 100%)', // Subtle light red + cardBorderColor: '#f7d4d4', // Red border + textPrimaryColor: '#a94442', // Primary text dark red + textDescColor: '#d9534f', // Description text medium red + textFooterColor: '#f7bcbc', // Footer text light red }, requesting: { - background: 'linear-gradient(135deg, #fafaf5 0%, #f5e9be 100%)', // 低调黄 - cardBorderColor: '#f5e9be', // 黄色边框 - textPrimaryColor: '#8a6d3b', // 主文字深黄 - textDescColor: '#c7a94a', // 描述文字中黄 - textFooterColor: '#f5e9be', // 脚注浅黄 + background: 'linear-gradient(135deg, #fafaf5 0%, #f5e9be 100%)', // Subtle yellow + cardBorderColor: '#f5e9be', // Yellow border + textPrimaryColor: '#8a6d3b', // Primary text dark yellow + textDescColor: '#c7a94a', // Description text medium yellow + textFooterColor: '#f5e9be', // Footer text light yellow }, } type Status = 'default' | 'success' | 'error' | 'requesting'; function App() { - const [title, setTitle] = useState('OAuth 登录'); - const [desc, setDesc] = useState('初始界面,可以关闭'); + const [title, setTitle] = useState('OAuth Login'); + const [desc, setDesc] = useState('Initial screen, you can close this'); const [footer, setFooter] = useState(''); const [status, setStatus] = useState('default'); // const [status, setStatus] = useState('success'); From 4f48ea6ab68ebe199b857d32ea65c79050bd3992 Mon Sep 17 00:00:00 2001 From: Junyi Hou Date: Fri, 19 Dec 2025 04:17:44 +0800 Subject: [PATCH 21/28] ui enhancement --- .../_webapp/src/views/chat/footer/index.tsx | 12 +++---- .../chat/footer/toolbar/action-selection.tsx | 9 ++++- .../chat/footer/toolbar/model-selection.tsx | 7 +++- .../chat/footer/toolbar/prompt-selection.tsx | 8 ++++- .../views/chat/footer/toolbar/selection.tsx | 35 +++++++++++++++++-- .../src/views/chat/header/chat-button.tsx | 6 ++-- .../settings/sections/api-key-settings.tsx | 4 +-- 7 files changed, 65 insertions(+), 16 deletions(-) diff --git a/webapp/_webapp/src/views/chat/footer/index.tsx b/webapp/_webapp/src/views/chat/footer/index.tsx index ea06cf17..758de2da 100644 --- a/webapp/_webapp/src/views/chat/footer/index.tsx +++ b/webapp/_webapp/src/views/chat/footer/index.tsx @@ -90,21 +90,21 @@ export function PromptInput() { ); return ( -
+
+ {/* Only show one popup at a time - priority: prompts > actions > model selection */} {prompts.length > 0 && } - {actions.length > 0 && } - {showModelSelection && } + {prompts.length === 0 && actions.length > 0 && } + {prompts.length === 0 && actions.length === 0 && showModelSelection && }
- {prompts.length == 0 && actions.length == 0 && !showModelSelection && ( - setShowModelSelection(true)} /> - )} + setShowModelSelection(true)} />
{selectedText && }