diff --git a/internal/api/auth/auth_test.go b/internal/api/auth/auth_test.go
index be5a8ea9..ac640217 100644
--- a/internal/api/auth/auth_test.go
+++ b/internal/api/auth/auth_test.go
@@ -55,14 +55,14 @@ func TestAuthServer(t *testing.T) {
}
assert.True(t, token.ExpiresAt.Before(timeNow))
- // 现在 Token 有效期应该是 24 小时前
+ // Now Token expiration should be 24 hours ago
token, err = tokenService.GetTokenByToken(context.Background(), token.Token)
if err != nil {
t.Fatalf("Failed to get refresh token: %v", err)
}
assert.True(t, token.ExpiresAt.Before(timeNow))
- // 这时候 RefreshToken 应该失效
+ // At this point RefreshToken should be invalid
resp, err := authServer.RefreshToken(context.Background(),
&authv1.RefreshTokenRequest{
RefreshToken: token.Token,
@@ -71,7 +71,7 @@ func TestAuthServer(t *testing.T) {
assert.Error(t, err)
assert.Nil(t, resp)
- // 更新 Token 有效期 到 24 小时候
+ // Update Token expiration to 24 hours later
token.ExpiresAt = timeNow.Add(time.Hour * 24)
token, err = tokenService.UpdateToken(context.Background(), token)
if err != nil {
@@ -79,7 +79,7 @@ func TestAuthServer(t *testing.T) {
}
assert.True(t, token.ExpiresAt.After(timeNow))
- // 这时候 RefreshToken 应该有效
+ // At this point RefreshToken should be valid
resp, err = authServer.RefreshToken(context.Background(),
&authv1.RefreshTokenRequest{
RefreshToken: token.Token,
@@ -88,7 +88,7 @@ func TestAuthServer(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, resp)
- // 刚刚 RefreshToken 之后,有效期应该刷新到一个月后
+ // After RefreshToken, expiration should be refreshed to one month later
token, err = tokenService.GetTokenByToken(context.Background(), resp.RefreshToken)
if err != nil {
t.Fatalf("Failed to get refresh token: %v", err)
diff --git a/internal/api/chat/create_conversation_message.go b/internal/api/chat/create_conversation_message.go
index cfe7730d..5c2cd9a8 100644
--- a/internal/api/chat/create_conversation_message.go
+++ b/internal/api/chat/create_conversation_message.go
@@ -1,252 +1 @@
package chat
-
-import (
- "context"
-
- "paperdebugger/internal/libs/contextutil"
- "paperdebugger/internal/libs/shared"
- "paperdebugger/internal/models"
- chatv1 "paperdebugger/pkg/gen/api/chat/v1"
-
- "github.com/google/uuid"
- "github.com/openai/openai-go/v2/responses"
- "go.mongodb.org/mongo-driver/v2/bson"
- "go.mongodb.org/mongo-driver/v2/mongo"
- "google.golang.org/protobuf/encoding/protojson"
-)
-
-// 设计理念:
-// 发送给 GPT 之前,消息列表已经构造进 Conversation 对象中(也保存在数据库里)
-// 我们发送给 GPT 的就是从数据库里拿到的 Conversation 对象里面的内容(InputItemList)
-
-// buildUserMessage constructs both the user-facing message and the OpenAI input message
-func (s *ChatServer) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv1.ConversationType) (*chatv1.Message, *responses.ResponseInputItemUnionParam, error) {
- userPrompt, err := s.chatService.GetPrompt(ctx, userMessage, userSelectedText, conversationType)
- if err != nil {
- return nil, nil, err
- }
-
- var inappMessage *chatv1.Message
- switch conversationType {
- case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG:
- inappMessage = &chatv1.Message{
- MessageId: "pd_msg_user_" + uuid.New().String(),
- Payload: &chatv1.MessagePayload{
- MessageType: &chatv1.MessagePayload_User{
- User: &chatv1.MessageTypeUser{
- Content: userPrompt,
- },
- },
- },
- }
- default:
- inappMessage = &chatv1.Message{
- MessageId: "pd_msg_user_" + uuid.New().String(),
- Payload: &chatv1.MessagePayload{
- MessageType: &chatv1.MessagePayload_User{
- User: &chatv1.MessageTypeUser{
- Content: userMessage,
- SelectedText: &userSelectedText,
- },
- },
- },
- }
- }
-
- openaiMessage := &responses.ResponseInputItemUnionParam{
- OfInputMessage: &responses.ResponseInputItemMessageParam{
- Role: "user",
- Content: responses.ResponseInputMessageContentListParam{
- responses.ResponseInputContentParamOfInputText(userPrompt),
- },
- },
- }
-
- return inappMessage, openaiMessage, nil
-}
-
-// buildSystemMessage constructs both the user-facing system message and the OpenAI input message
-func (s *ChatServer) buildSystemMessage(systemPrompt string) (*chatv1.Message, *responses.ResponseInputItemUnionParam) {
- inappMessage := &chatv1.Message{
- MessageId: "pd_msg_system_" + uuid.New().String(),
- Payload: &chatv1.MessagePayload{
- MessageType: &chatv1.MessagePayload_System{
- System: &chatv1.MessageTypeSystem{
- Content: systemPrompt,
- },
- },
- },
- }
-
- openaiMessage := &responses.ResponseInputItemUnionParam{
- OfInputMessage: &responses.ResponseInputItemMessageParam{
- Role: "system",
- Content: responses.ResponseInputMessageContentListParam{
- responses.ResponseInputContentParamOfInputText(systemPrompt),
- },
- },
- }
-
- return inappMessage, openaiMessage
-}
-
-// convertToBSON converts a protobuf message to BSON
-func convertToBSON(msg *chatv1.Message) (bson.M, error) {
- jsonBytes, err := protojson.Marshal(msg)
- if err != nil {
- return nil, err
- }
- var bsonMsg bson.M
- if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil {
- return nil, err
- }
- return bsonMsg, nil
-}
-
-// 创建对话并写入数据库
-// 返回 Conversation 对象
-func (s *ChatServer) createConversation(
- ctx context.Context,
- userId bson.ObjectID,
- projectId string,
- latexFullSource string,
- projectInstructions string,
- userInstructions string,
- userMessage string,
- userSelectedText string,
- modelSlug string,
- conversationType chatv1.ConversationType,
-) (*models.Conversation, error) {
- systemPrompt, err := s.chatService.GetSystemPrompt(ctx, latexFullSource, projectInstructions, userInstructions, conversationType)
- if err != nil {
- return nil, err
- }
-
- _, openaiSystemMsg := s.buildSystemMessage(systemPrompt)
- inappUserMsg, openaiUserMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType)
- if err != nil {
- return nil, err
- }
-
- messages := []*chatv1.Message{inappUserMsg}
- oaiHistory := responses.ResponseNewParamsInputUnion{
- OfInputItemList: responses.ResponseInputParam{*openaiSystemMsg, *openaiUserMsg},
- }
-
- return s.chatService.InsertConversationToDB(
- ctx, userId, projectId, modelSlug, messages, oaiHistory.OfInputItemList,
- )
-}
-
-// 追加消息到对话并写入数据库
-// 返回 Conversation 对象
-func (s *ChatServer) appendConversationMessage(
- ctx context.Context,
- userId bson.ObjectID,
- conversationId string,
- userMessage string,
- userSelectedText string,
- conversationType chatv1.ConversationType,
-) (*models.Conversation, error) {
- objectID, err := bson.ObjectIDFromHex(conversationId)
- if err != nil {
- return nil, err
- }
-
- conversation, err := s.chatService.GetConversation(ctx, userId, objectID)
- if err != nil {
- return nil, err
- }
-
- userMsg, userOaiMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType)
- if err != nil {
- return nil, err
- }
-
- bsonMsg, err := convertToBSON(userMsg)
- if err != nil {
- return nil, err
- }
- conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMsg)
- conversation.OpenaiChatHistory = append(conversation.OpenaiChatHistory, *userOaiMsg)
-
- if err := s.chatService.UpdateConversation(conversation); err != nil {
- return nil, err
- }
-
- return conversation, nil
-}
-
-// 如果 conversationId 是 "", 就创建新对话,否则就追加消息到对话
-// conversationType 可以在一次 conversation 中多次切换
-func (s *ChatServer) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) {
- actor, err := contextutil.GetActor(ctx)
- if err != nil {
- return ctx, nil, nil, err
- }
-
- project, err := s.projectService.GetProject(ctx, actor.ID, projectId)
- if err != nil && err != mongo.ErrNoDocuments {
- return ctx, nil, nil, err
- }
-
- userInstructions, err := s.userService.GetUserInstructions(ctx, actor.ID)
- if err != nil {
- return ctx, nil, nil, err
- }
-
- var latexFullSource string
- switch conversationType {
- case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG:
- latexFullSource = "latex_full_source is not available in debug mode"
- default:
- if project == nil || project.IsOutOfDate() {
- return ctx, nil, nil, shared.ErrProjectOutOfDate("project is out of date")
- }
-
- latexFullSource, err = project.GetFullContent()
- if err != nil {
- return ctx, nil, nil, err
- }
- }
-
- var conversation *models.Conversation
-
- if conversationId == "" {
- conversation, err = s.createConversation(
- ctx,
- actor.ID,
- projectId,
- latexFullSource,
- project.Instructions,
- userInstructions,
- userMessage,
- userSelectedText,
- modelSlug,
- conversationType,
- )
- } else {
- conversation, err = s.appendConversationMessage(
- ctx,
- actor.ID,
- conversationId,
- userMessage,
- userSelectedText,
- conversationType,
- )
- }
-
- if err != nil {
- return ctx, nil, nil, err
- }
-
- ctx = contextutil.SetProjectID(ctx, conversation.ProjectID)
- ctx = contextutil.SetConversationID(ctx, conversation.ID.Hex())
-
- settings, err := s.userService.GetUserSettings(ctx, actor.ID)
- if err != nil {
- return ctx, conversation, nil, err
- }
-
- return ctx, conversation, settings, nil
-}
diff --git a/internal/api/chat/create_conversation_message_stream.go b/internal/api/chat/create_conversation_message_stream.go
index e996d3a5..3d6b2faa 100644
--- a/internal/api/chat/create_conversation_message_stream.go
+++ b/internal/api/chat/create_conversation_message_stream.go
@@ -1,15 +1,22 @@
package chat
import (
+ "context"
"paperdebugger/internal/api/mapper"
+ "paperdebugger/internal/libs/contextutil"
+ "paperdebugger/internal/libs/shared"
"paperdebugger/internal/models"
"paperdebugger/internal/services"
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
+ "github.com/google/uuid"
+ "github.com/openai/openai-go/v2/responses"
"go.mongodb.org/mongo-driver/v2/bson"
+ "go.mongodb.org/mongo-driver/v2/mongo"
+ "google.golang.org/protobuf/encoding/protojson"
)
-func (s *ChatServer) sendStreamError(stream chatv1.ChatService_CreateConversationMessageStreamServer, err error) error {
+func (s *ChatServerV1) sendStreamError(stream chatv1.ChatService_CreateConversationMessageStreamServer, err error) error {
return stream.Send(&chatv1.CreateConversationMessageStreamResponse{
ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamError{
StreamError: &chatv1.StreamError{
@@ -19,7 +26,243 @@ func (s *ChatServer) sendStreamError(stream chatv1.ChatService_CreateConversatio
})
}
-func (s *ChatServer) CreateConversationMessageStream(
+// Design philosophy:
+// Before sending to GPT, the message list is already constructed in the Conversation object (also saved in the database)
+// What we send to GPT is the content (InputItemList) from the Conversation object retrieved from the database
+
+// buildUserMessage constructs both the user-facing message and the OpenAI input message
+func (s *ChatServerV1) buildUserMessage(ctx context.Context, userMessage, userSelectedText string, conversationType chatv1.ConversationType) (*chatv1.Message, *responses.ResponseInputItemUnionParam, error) {
+ userPrompt, err := s.chatServiceV1.GetPrompt(ctx, userMessage, userSelectedText, conversationType)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var inappMessage *chatv1.Message
+ switch conversationType {
+ case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG:
+ inappMessage = &chatv1.Message{
+ MessageId: "pd_msg_user_" + uuid.New().String(),
+ Payload: &chatv1.MessagePayload{
+ MessageType: &chatv1.MessagePayload_User{
+ User: &chatv1.MessageTypeUser{
+ Content: userPrompt,
+ },
+ },
+ },
+ }
+ default:
+ inappMessage = &chatv1.Message{
+ MessageId: "pd_msg_user_" + uuid.New().String(),
+ Payload: &chatv1.MessagePayload{
+ MessageType: &chatv1.MessagePayload_User{
+ User: &chatv1.MessageTypeUser{
+ Content: userMessage,
+ SelectedText: &userSelectedText,
+ },
+ },
+ },
+ }
+ }
+
+ openaiMessage := &responses.ResponseInputItemUnionParam{
+ OfInputMessage: &responses.ResponseInputItemMessageParam{
+ Role: "user",
+ Content: responses.ResponseInputMessageContentListParam{
+ responses.ResponseInputContentParamOfInputText(userPrompt),
+ },
+ },
+ }
+
+ return inappMessage, openaiMessage, nil
+}
+
+// buildSystemMessage constructs both the user-facing system message and the OpenAI input message
+func (s *ChatServerV1) buildSystemMessage(systemPrompt string) (*chatv1.Message, *responses.ResponseInputItemUnionParam) {
+ inappMessage := &chatv1.Message{
+ MessageId: "pd_msg_system_" + uuid.New().String(),
+ Payload: &chatv1.MessagePayload{
+ MessageType: &chatv1.MessagePayload_System{
+ System: &chatv1.MessageTypeSystem{
+ Content: systemPrompt,
+ },
+ },
+ },
+ }
+
+ openaiMessage := &responses.ResponseInputItemUnionParam{
+ OfInputMessage: &responses.ResponseInputItemMessageParam{
+ Role: "system",
+ Content: responses.ResponseInputMessageContentListParam{
+ responses.ResponseInputContentParamOfInputText(systemPrompt),
+ },
+ },
+ }
+
+ return inappMessage, openaiMessage
+}
+
+// convertToBSON converts a protobuf message to BSON
+func convertToBSON(msg *chatv1.Message) (bson.M, error) {
+ jsonBytes, err := protojson.Marshal(msg)
+ if err != nil {
+ return nil, err
+ }
+ var bsonMsg bson.M
+ if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil {
+ return nil, err
+ }
+ return bsonMsg, nil
+}
+
+// createConversation creates a conversation and writes it to the database
+// Returns the Conversation object
+func (s *ChatServerV1) createConversation(
+ ctx context.Context,
+ userId bson.ObjectID,
+ projectId string,
+ latexFullSource string,
+ projectInstructions string,
+ userInstructions string,
+ userMessage string,
+ userSelectedText string,
+ modelSlug string,
+ conversationType chatv1.ConversationType,
+) (*models.Conversation, error) {
+ systemPrompt, err := s.chatServiceV1.GetSystemPrompt(ctx, latexFullSource, projectInstructions, userInstructions, conversationType)
+ if err != nil {
+ return nil, err
+ }
+
+ _, openaiSystemMsg := s.buildSystemMessage(systemPrompt)
+ inappUserMsg, openaiUserMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType)
+ if err != nil {
+ return nil, err
+ }
+
+ messages := []*chatv1.Message{inappUserMsg}
+ oaiHistory := responses.ResponseNewParamsInputUnion{
+ OfInputItemList: responses.ResponseInputParam{*openaiSystemMsg, *openaiUserMsg},
+ }
+
+ return s.chatServiceV1.InsertConversationToDB(
+ ctx, userId, projectId, modelSlug, messages, oaiHistory.OfInputItemList,
+ )
+}
+
+// appendConversationMessage appends a message to the conversation and writes it to the database
+// Returns the Conversation object
+func (s *ChatServerV1) appendConversationMessage(
+ ctx context.Context,
+ userId bson.ObjectID,
+ conversationId string,
+ userMessage string,
+ userSelectedText string,
+ conversationType chatv1.ConversationType,
+) (*models.Conversation, error) {
+ objectID, err := bson.ObjectIDFromHex(conversationId)
+ if err != nil {
+ return nil, err
+ }
+
+ conversation, err := s.chatServiceV1.GetConversation(ctx, userId, objectID)
+ if err != nil {
+ return nil, err
+ }
+
+ userMsg, userOaiMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, conversationType)
+ if err != nil {
+ return nil, err
+ }
+
+ bsonMsg, err := convertToBSON(userMsg)
+ if err != nil {
+ return nil, err
+ }
+ conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMsg)
+ conversation.OpenaiChatHistory = append(conversation.OpenaiChatHistory, *userOaiMsg)
+
+ if err := s.chatServiceV1.UpdateConversation(conversation); err != nil {
+ return nil, err
+ }
+
+ return conversation, nil
+}
+
+// prepare creates a new conversation if conversationId is "", otherwise appends a message to the conversation
+// conversationType can be switched multiple times within a single conversation
+func (s *ChatServerV1) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, modelSlug string, conversationType chatv1.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) {
+ actor, err := contextutil.GetActor(ctx)
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+
+ project, err := s.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil && err != mongo.ErrNoDocuments {
+ return ctx, nil, nil, err
+ }
+
+ userInstructions, err := s.userService.GetUserInstructions(ctx, actor.ID)
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+
+ var latexFullSource string
+ switch conversationType {
+ case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG:
+ latexFullSource = "latex_full_source is not available in debug mode"
+ default:
+ if project == nil || project.IsOutOfDate() {
+ return ctx, nil, nil, shared.ErrProjectOutOfDate("project is out of date")
+ }
+
+ latexFullSource, err = project.GetFullContent()
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+ }
+
+ var conversation *models.Conversation
+
+ if conversationId == "" {
+ conversation, err = s.createConversation(
+ ctx,
+ actor.ID,
+ projectId,
+ latexFullSource,
+ project.Instructions,
+ userInstructions,
+ userMessage,
+ userSelectedText,
+ modelSlug,
+ conversationType,
+ )
+ } else {
+ conversation, err = s.appendConversationMessage(
+ ctx,
+ actor.ID,
+ conversationId,
+ userMessage,
+ userSelectedText,
+ conversationType,
+ )
+ }
+
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+
+ ctx = contextutil.SetProjectID(ctx, conversation.ProjectID)
+ ctx = contextutil.SetConversationID(ctx, conversation.ID.Hex())
+
+ settings, err := s.userService.GetUserSettings(ctx, actor.ID)
+ if err != nil {
+ return ctx, conversation, nil, err
+ }
+
+ return ctx, conversation, settings, nil
+}
+
+func (s *ChatServerV1) CreateConversationMessageStream(
req *chatv1.CreateConversationMessageStreamRequest,
stream chatv1.ChatService_CreateConversationMessageStreamServer,
) error {
@@ -43,17 +286,17 @@ func (s *ChatServer) CreateConversationMessageStream(
return s.sendStreamError(stream, err)
}
- // 用法跟 ChatCompletion 一样,只是传递了 stream 参数
+ // Usage is the same as ChatCompletion, just passing the stream parameter
llmProvider := &models.LLMProviderConfig{
APIKey: settings.OpenAIAPIKey,
}
- openaiChatHistory, inappChatHistory, err := s.aiClient.ChatCompletionStream(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistory, llmProvider)
+ openaiChatHistory, inappChatHistory, err := s.aiClientV1.ChatCompletionStreamV1(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistory, llmProvider)
if err != nil {
return s.sendStreamError(stream, err)
}
- // 附加消息到对话
+ // Append messages to the conversation
bsonMessages := make([]bson.M, len(inappChatHistory))
for i := range inappChatHistory {
bsonMsg, err := convertToBSON(&inappChatHistory[i])
@@ -64,24 +307,24 @@ func (s *ChatServer) CreateConversationMessageStream(
}
conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMessages...)
conversation.OpenaiChatHistory = openaiChatHistory
- if err := s.chatService.UpdateConversation(conversation); err != nil {
+ if err := s.chatServiceV1.UpdateConversation(conversation); err != nil {
return s.sendStreamError(stream, err)
}
if conversation.Title == services.DefaultConversationTitle {
go func() {
+ conversationID := conversation.ID
protoMessages := make([]*chatv1.Message, len(conversation.InappChatHistory))
for i, bsonMsg := range conversation.InappChatHistory {
protoMessages[i] = mapper.BSONToChatMessage(bsonMsg)
}
- title, err := s.aiClient.GetConversationTitle(ctx, protoMessages, llmProvider)
+ title, err := s.aiClientV1.GetConversationTitle(ctx, protoMessages, llmProvider)
if err != nil {
- s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex())
+ s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversationID.Hex())
return
}
- conversation.Title = title
- if err := s.chatService.UpdateConversation(conversation); err != nil {
- s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex())
+ if err := s.chatServiceV1.UpdateConversationTitle(ctx, conversationID, title); err != nil {
+ s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversationID.Hex())
return
}
}()
diff --git a/internal/api/chat/create_conversation_message_stream_v2.go b/internal/api/chat/create_conversation_message_stream_v2.go
new file mode 100644
index 00000000..2e9f95ba
--- /dev/null
+++ b/internal/api/chat/create_conversation_message_stream_v2.go
@@ -0,0 +1,322 @@
+package chat
+
+import (
+ "context"
+ "paperdebugger/internal/api/mapper"
+ "paperdebugger/internal/libs/contextutil"
+ "paperdebugger/internal/libs/shared"
+ "paperdebugger/internal/models"
+ "paperdebugger/internal/services"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/google/uuid"
+ "github.com/openai/openai-go/v3"
+ "go.mongodb.org/mongo-driver/v2/bson"
+ "go.mongodb.org/mongo-driver/v2/mongo"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+func (s *ChatServerV2) sendStreamError(stream chatv2.ChatService_CreateConversationMessageStreamServer, err error) error {
+ return stream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamError{
+ StreamError: &chatv2.StreamError{
+ ErrorMessage: err.Error(),
+ },
+ },
+ })
+}
+
+// Design philosophy:
+// Before sending to GPT, the message list is already constructed in the Conversation object (also saved in the database)
+// What we send to GPT is the content (InputItemList) from the Conversation object retrieved from the database
+
+// buildUserMessage constructs both the user-facing message and the OpenAI input message
+func (s *ChatServerV2) buildSystemMessage(systemPrompt string) (*chatv2.Message, openai.ChatCompletionMessageParamUnion) {
+ inappMessage := &chatv2.Message{
+ MessageId: "pd_msg_system_" + uuid.New().String(),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_System{
+ System: &chatv2.MessageTypeSystem{
+ Content: systemPrompt,
+ },
+ },
+ },
+ }
+
+ openaiMessage := openai.SystemMessage(systemPrompt)
+
+ return inappMessage, openaiMessage
+}
+
+func (s *ChatServerV2) buildUserMessage(ctx context.Context, userMessage, userSelectedText, surrounding string, conversationType chatv2.ConversationType) (*chatv2.Message, openai.ChatCompletionMessageParamUnion, error) {
+ userPrompt, err := s.chatServiceV2.GetPrompt(ctx, userMessage, userSelectedText, surrounding, conversationType)
+ if err != nil {
+ return nil, openai.ChatCompletionMessageParamUnion{}, err
+ }
+
+ var inappMessage *chatv2.Message
+ switch conversationType {
+ case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG:
+ inappMessage = &chatv2.Message{
+ MessageId: "pd_msg_user_" + uuid.New().String(),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_User{
+ User: &chatv2.MessageTypeUser{
+ Content: userPrompt,
+ },
+ },
+ },
+ }
+ default:
+ inappMessage = &chatv2.Message{
+ MessageId: "pd_msg_user_" + uuid.New().String(),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_User{
+ User: &chatv2.MessageTypeUser{
+ Content: userMessage,
+ SelectedText: &userSelectedText,
+ Surrounding: &surrounding,
+ },
+ },
+ },
+ }
+ }
+
+ openaiMessage := openai.UserMessage(userPrompt)
+ return inappMessage, openaiMessage, nil
+}
+
+// convertToBSON converts a protobuf message to BSON
+func convertToBSONV2(msg *chatv2.Message) (bson.M, error) {
+ jsonBytes, err := protojson.Marshal(msg)
+ if err != nil {
+ return nil, err
+ }
+ var bsonMsg bson.M
+ if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil {
+ return nil, err
+ }
+ return bsonMsg, nil
+}
+
+// createConversation creates a conversation and writes it to the database
+// Returns the Conversation object
+func (s *ChatServerV2) createConversation(
+ ctx context.Context,
+ userId bson.ObjectID,
+ projectId string,
+ latexFullSource string,
+ projectInstructions string,
+ userInstructions string,
+ userMessage string,
+ userSelectedText string,
+ surrounding string,
+ modelSlug string,
+ conversationType chatv2.ConversationType,
+) (*models.Conversation, error) {
+ systemPrompt, err := s.chatServiceV2.GetSystemPromptV2(ctx, latexFullSource, projectInstructions, userInstructions, conversationType)
+ if err != nil {
+ return nil, err
+ }
+
+ _, openaiSystemMsg := s.buildSystemMessage(systemPrompt)
+ inappUserMsg, openaiUserMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, surrounding, conversationType)
+ if err != nil {
+ return nil, err
+ }
+
+ messages := []*chatv2.Message{inappUserMsg}
+ oaiHistory := []openai.ChatCompletionMessageParamUnion{
+ openaiSystemMsg,
+ openaiUserMsg,
+ }
+
+ return s.chatServiceV2.InsertConversationToDBV2(
+ ctx, userId, projectId, modelSlug, messages, oaiHistory,
+ )
+}
+
+// appendConversationMessage appends a message to the conversation and writes it to the database
+// Returns the Conversation object
+func (s *ChatServerV2) appendConversationMessage(
+ ctx context.Context,
+ userId bson.ObjectID,
+ conversationId string,
+ userMessage string,
+ userSelectedText string,
+ surrounding string,
+ conversationType chatv2.ConversationType,
+) (*models.Conversation, error) {
+ objectID, err := bson.ObjectIDFromHex(conversationId)
+ if err != nil {
+ return nil, err
+ }
+
+ conversation, err := s.chatServiceV2.GetConversationV2(ctx, userId, objectID)
+ if err != nil {
+ return nil, err
+ }
+
+ userMsg, userOaiMsg, err := s.buildUserMessage(ctx, userMessage, userSelectedText, surrounding, conversationType)
+ if err != nil {
+ return nil, err
+ }
+
+ bsonMsg, err := convertToBSONV2(userMsg)
+ if err != nil {
+ return nil, err
+ }
+ conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMsg)
+ conversation.OpenaiChatHistoryCompletion = append(conversation.OpenaiChatHistoryCompletion, userOaiMsg)
+
+ if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil {
+ return nil, err
+ }
+
+ return conversation, nil
+}
+
+// prepare creates a new conversation if conversationId is "", otherwise appends a message to the conversation
+// conversationType can be switched multiple times within a single conversation
+func (s *ChatServerV2) prepare(ctx context.Context, projectId string, conversationId string, userMessage string, userSelectedText string, surrounding string, modelSlug string, conversationType chatv2.ConversationType) (context.Context, *models.Conversation, *models.Settings, error) {
+ actor, err := contextutil.GetActor(ctx)
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+
+ project, err := s.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil && err != mongo.ErrNoDocuments {
+ return ctx, nil, nil, err
+ }
+
+ userInstructions, err := s.userService.GetUserInstructions(ctx, actor.ID)
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+
+ var latexFullSource string
+ switch conversationType {
+ case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG:
+ latexFullSource = "latex_full_source is not available in debug mode"
+ default:
+ if project == nil || project.IsOutOfDate() {
+ return ctx, nil, nil, shared.ErrProjectOutOfDate("project is out of date")
+ }
+
+ latexFullSource, err = project.GetFullContent()
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+ }
+
+ var conversation *models.Conversation
+
+ if conversationId == "" {
+ conversation, err = s.createConversation(
+ ctx,
+ actor.ID,
+ projectId,
+ latexFullSource,
+ project.Instructions,
+ userInstructions,
+ userMessage,
+ userSelectedText,
+ surrounding,
+ modelSlug,
+ conversationType,
+ )
+ } else {
+ conversation, err = s.appendConversationMessage(
+ ctx,
+ actor.ID,
+ conversationId,
+ userMessage,
+ userSelectedText,
+ surrounding,
+ conversationType,
+ )
+ }
+
+ if err != nil {
+ return ctx, nil, nil, err
+ }
+
+ ctx = contextutil.SetProjectID(ctx, conversation.ProjectID)
+ ctx = contextutil.SetConversationID(ctx, conversation.ID.Hex())
+
+ settings, err := s.userService.GetUserSettings(ctx, actor.ID)
+ if err != nil {
+ return ctx, conversation, nil, err
+ }
+
+ return ctx, conversation, settings, nil
+}
+
+func (s *ChatServerV2) CreateConversationMessageStream(
+ req *chatv2.CreateConversationMessageStreamRequest,
+ stream chatv2.ChatService_CreateConversationMessageStreamServer,
+) error {
+ ctx := stream.Context()
+
+ modelSlug := req.GetModelSlug()
+ ctx, conversation, settings, err := s.prepare(
+ ctx,
+ req.GetProjectId(),
+ req.GetConversationId(),
+ req.GetUserMessage(),
+ req.GetUserSelectedText(),
+ req.GetSurrounding(),
+ modelSlug,
+ req.GetConversationType(),
+ )
+ if err != nil {
+ return s.sendStreamError(stream, err)
+ }
+
+ // Usage is the same as ChatCompletion, just passing the stream parameter
+ llmProvider := &models.LLMProviderConfig{
+ APIKey: settings.OpenAIAPIKey,
+ }
+
+ openaiChatHistory, inappChatHistory, err := s.aiClientV2.ChatCompletionStreamV2(ctx, stream, conversation.ID.Hex(), modelSlug, conversation.OpenaiChatHistoryCompletion, llmProvider)
+ if err != nil {
+ return s.sendStreamError(stream, err)
+ }
+
+ // Append messages to the conversation
+ bsonMessages := make([]bson.M, len(inappChatHistory))
+ for i := range inappChatHistory {
+ bsonMsg, err := convertToBSONV2(&inappChatHistory[i])
+ if err != nil {
+ return s.sendStreamError(stream, err)
+ }
+ bsonMessages[i] = bsonMsg
+ }
+ conversation.InappChatHistory = append(conversation.InappChatHistory, bsonMessages...)
+ conversation.OpenaiChatHistoryCompletion = openaiChatHistory
+ if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil {
+ return s.sendStreamError(stream, err)
+ }
+
+ if conversation.Title == services.DefaultConversationTitle {
+ go func() {
+ protoMessages := make([]*chatv2.Message, len(conversation.InappChatHistory))
+ for i, bsonMsg := range conversation.InappChatHistory {
+ protoMessages[i] = mapper.BSONToChatMessageV2(bsonMsg)
+ }
+ title, err := s.aiClientV2.GetConversationTitleV2(ctx, protoMessages, llmProvider)
+ if err != nil {
+ s.logger.Error("Failed to get conversation title", "error", err, "conversationID", conversation.ID.Hex())
+ return
+ }
+ conversation.Title = title
+ if err := s.chatServiceV2.UpdateConversationV2(conversation); err != nil {
+ s.logger.Error("Failed to update conversation with new title", "error", err, "conversationID", conversation.ID.Hex())
+ return
+ }
+ }()
+ }
+
+ // The final conversation object is NOT returned
+ return nil
+}
diff --git a/internal/api/chat/delete_conversation.go b/internal/api/chat/delete_conversation.go
index 824db1ba..bb3463dd 100644
--- a/internal/api/chat/delete_conversation.go
+++ b/internal/api/chat/delete_conversation.go
@@ -3,12 +3,13 @@ package chat
import (
"context"
- "go.mongodb.org/mongo-driver/v2/bson"
"paperdebugger/internal/libs/contextutil"
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
+
+ "go.mongodb.org/mongo-driver/v2/bson"
)
-func (s *ChatServer) DeleteConversation(
+func (s *ChatServerV1) DeleteConversation(
ctx context.Context,
req *chatv1.DeleteConversationRequest,
) (*chatv1.DeleteConversationResponse, error) {
@@ -22,7 +23,7 @@ func (s *ChatServer) DeleteConversation(
return nil, err
}
- err = s.chatService.DeleteConversation(ctx, actor.ID, conversationID)
+ err = s.chatServiceV1.DeleteConversation(ctx, actor.ID, conversationID)
if err != nil {
return nil, err
}
diff --git a/internal/api/chat/delete_conversation_v2.go b/internal/api/chat/delete_conversation_v2.go
new file mode 100644
index 00000000..ee16c222
--- /dev/null
+++ b/internal/api/chat/delete_conversation_v2.go
@@ -0,0 +1,31 @@
+package chat
+
+import (
+ "context"
+
+ "paperdebugger/internal/libs/contextutil"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "go.mongodb.org/mongo-driver/v2/bson"
+)
+
+func (s *ChatServerV2) DeleteConversation(
+ ctx context.Context,
+ req *chatv2.DeleteConversationRequest,
+) (*chatv2.DeleteConversationResponse, error) {
+ actor, err := contextutil.GetActor(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ objectID, err := bson.ObjectIDFromHex(req.GetConversationId())
+ if err != nil {
+ return nil, err
+ }
+
+ if err := s.chatServiceV2.DeleteConversationV2(ctx, actor.ID, objectID); err != nil {
+ return nil, err
+ }
+
+ return &chatv2.DeleteConversationResponse{}, nil
+}
diff --git a/internal/api/chat/get_conversation.go b/internal/api/chat/get_conversation.go
index 3b960b99..de1463c6 100644
--- a/internal/api/chat/get_conversation.go
+++ b/internal/api/chat/get_conversation.go
@@ -3,13 +3,14 @@ package chat
import (
"context"
- "go.mongodb.org/mongo-driver/v2/bson"
"paperdebugger/internal/api/mapper"
"paperdebugger/internal/libs/contextutil"
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
+
+ "go.mongodb.org/mongo-driver/v2/bson"
)
-func (s *ChatServer) GetConversation(
+func (s *ChatServerV1) GetConversation(
ctx context.Context,
req *chatv1.GetConversationRequest,
) (*chatv1.GetConversationResponse, error) {
@@ -23,7 +24,7 @@ func (s *ChatServer) GetConversation(
return nil, err
}
- conversation, err := s.chatService.GetConversation(ctx, actor.ID, conversationID)
+ conversation, err := s.chatServiceV1.GetConversation(ctx, actor.ID, conversationID)
if err != nil {
return nil, err
}
diff --git a/internal/api/chat/get_conversation_v2.go b/internal/api/chat/get_conversation_v2.go
new file mode 100644
index 00000000..2d69920e
--- /dev/null
+++ b/internal/api/chat/get_conversation_v2.go
@@ -0,0 +1,35 @@
+package chat
+
+import (
+ "context"
+
+ "paperdebugger/internal/api/mapper"
+ "paperdebugger/internal/libs/contextutil"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "go.mongodb.org/mongo-driver/v2/bson"
+)
+
+func (s *ChatServerV2) GetConversation(
+ ctx context.Context,
+ req *chatv2.GetConversationRequest,
+) (*chatv2.GetConversationResponse, error) {
+ actor, err := contextutil.GetActor(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ conversationID, err := bson.ObjectIDFromHex(req.GetConversationId())
+ if err != nil {
+ return nil, err
+ }
+
+ conversation, err := s.chatServiceV2.GetConversationV2(ctx, actor.ID, conversationID)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chatv2.GetConversationResponse{
+ Conversation: mapper.MapModelConversationToProtoV2(conversation),
+ }, nil
+}
diff --git a/internal/api/chat/list_conversations.go b/internal/api/chat/list_conversations.go
index 9a0f1232..4db2b65f 100644
--- a/internal/api/chat/list_conversations.go
+++ b/internal/api/chat/list_conversations.go
@@ -3,14 +3,15 @@ package chat
import (
"context"
- "github.com/samber/lo"
"paperdebugger/internal/api/mapper"
"paperdebugger/internal/libs/contextutil"
"paperdebugger/internal/models"
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
+
+ "github.com/samber/lo"
)
-func (s *ChatServer) ListConversations(
+func (s *ChatServerV1) ListConversations(
ctx context.Context,
req *chatv1.ListConversationsRequest,
) (*chatv1.ListConversationsResponse, error) {
@@ -19,7 +20,7 @@ func (s *ChatServer) ListConversations(
return nil, err
}
- conversations, err := s.chatService.ListConversations(ctx, actor.ID, req.GetProjectId())
+ conversations, err := s.chatServiceV1.ListConversations(ctx, actor.ID, req.GetProjectId())
if err != nil {
return nil, err
}
diff --git a/internal/api/chat/list_conversations_v2.go b/internal/api/chat/list_conversations_v2.go
new file mode 100644
index 00000000..2b6fbf2e
--- /dev/null
+++ b/internal/api/chat/list_conversations_v2.go
@@ -0,0 +1,33 @@
+package chat
+
+import (
+ "context"
+
+ "paperdebugger/internal/api/mapper"
+ "paperdebugger/internal/libs/contextutil"
+ "paperdebugger/internal/models"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/samber/lo"
+)
+
+func (s *ChatServerV2) ListConversations(
+ ctx context.Context,
+ req *chatv2.ListConversationsRequest,
+) (*chatv2.ListConversationsResponse, error) {
+ actor, err := contextutil.GetActor(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ conversations, err := s.chatServiceV2.ListConversationsV2(ctx, actor.ID, req.GetProjectId())
+ if err != nil {
+ return nil, err
+ }
+
+ return &chatv2.ListConversationsResponse{
+ Conversations: lo.Map(conversations, func(conversation *models.Conversation, _ int) *chatv2.Conversation {
+ return mapper.MapModelConversationToProtoV2(conversation)
+ }),
+ }, nil
+}
diff --git a/internal/api/chat/list_supported_models.go b/internal/api/chat/list_supported_models.go
index cf032b55..17e01e72 100644
--- a/internal/api/chat/list_supported_models.go
+++ b/internal/api/chat/list_supported_models.go
@@ -10,7 +10,7 @@ import (
"github.com/openai/openai-go/v2"
)
-func (s *ChatServer) ListSupportedModels(
+func (s *ChatServerV1) ListSupportedModels(
ctx context.Context,
req *chatv1.ListSupportedModelsRequest,
) (*chatv1.ListSupportedModelsResponse, error) {
diff --git a/internal/api/chat/list_supported_models_v2.go b/internal/api/chat/list_supported_models_v2.go
new file mode 100644
index 00000000..60e00873
--- /dev/null
+++ b/internal/api/chat/list_supported_models_v2.go
@@ -0,0 +1,112 @@
+package chat
+
+import (
+ "context"
+ "strings"
+
+ "paperdebugger/internal/libs/contextutil"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/openai/openai-go/v3"
+)
+
+func (s *ChatServerV2) ListSupportedModels(
+ ctx context.Context,
+ req *chatv2.ListSupportedModelsRequest,
+) (*chatv2.ListSupportedModelsResponse, error) {
+ actor, err := contextutil.GetActor(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ settings, err := s.userService.GetUserSettings(ctx, actor.ID)
+ if err != nil {
+ return nil, err
+ }
+
+ var models []*chatv2.SupportedModel
+ if strings.TrimSpace(settings.OpenAIAPIKey) == "" {
+ models = []*chatv2.SupportedModel{
+ {
+ Name: "GPT-4o",
+ Slug: "openai/gpt-4o",
+ TotalContext: 128000,
+ MaxOutput: 16400,
+ InputPrice: 250,
+ OutputPrice: 1000,
+ },
+ {
+ Name: "GPT-4.1",
+ Slug: "openai/gpt-4.1",
+ TotalContext: 1050000,
+ MaxOutput: 32800,
+ InputPrice: 200,
+ OutputPrice: 800,
+ },
+ {
+ Name: "GPT-4.1-mini",
+ Slug: "openai/gpt-4.1-mini",
+ TotalContext: 128000,
+ MaxOutput: 16400,
+ InputPrice: 15,
+ OutputPrice: 60,
+ },
+ {
+ Name: "Qwen Plus (balanced)",
+ Slug: "qwen/qwen-plus",
+ TotalContext: 131100,
+ MaxOutput: 8200,
+ InputPrice: 40,
+ OutputPrice: 120,
+ },
+ {
+ Name: "Qwen Turbo (fast)",
+ Slug: "qwen/qwen-turbo",
+ TotalContext: 1000000,
+ MaxOutput: 8200,
+ InputPrice: 5,
+ OutputPrice: 20,
+ },
+ {
+ Name: "Gemini 2.5 Flash (fast)",
+ Slug: "google/gemini-2.5-flash",
+ TotalContext: 1050000,
+ MaxOutput: 65500,
+ InputPrice: 30,
+ OutputPrice: 250,
+ },
+ }
+ } else {
+ models = []*chatv2.SupportedModel{
+ {
+ Name: "GPT-4o",
+ Slug: openai.ChatModelGPT4o,
+ TotalContext: 128000,
+ MaxOutput: 16400,
+ InputPrice: 250,
+ OutputPrice: 1000,
+ },
+ {
+ Name: "GPT-4.1",
+ Slug: openai.ChatModelGPT4_1,
+ TotalContext: 1050000,
+ MaxOutput: 32800,
+ InputPrice: 200,
+ OutputPrice: 800,
+ },
+ {
+ Name: "GPT-4.1-mini",
+ Slug: openai.ChatModelGPT4_1Mini,
+ TotalContext: 128000,
+ MaxOutput: 16400,
+ InputPrice: 15,
+ OutputPrice: 60,
+ },
+ // TODO: add user custom models
+ }
+ }
+
+ return &chatv2.ListSupportedModelsResponse{
+ Models: models,
+ }, nil
+}
diff --git a/internal/api/chat/server.go b/internal/api/chat/server.go
index f12c4646..332ca437 100644
--- a/internal/api/chat/server.go
+++ b/internal/api/chat/server.go
@@ -8,11 +8,10 @@ import (
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
)
-type ChatServer struct {
+type ChatServerV1 struct {
chatv1.UnimplementedChatServiceServer
-
- aiClient *aiclient.AIClient
- chatService *services.ChatService
+ aiClientV1 *aiclient.AIClient
+ chatServiceV1 *services.ChatService
projectService *services.ProjectService
userService *services.UserService
logger *logger.Logger
@@ -20,19 +19,19 @@ type ChatServer struct {
}
func NewChatServer(
- aiClient *aiclient.AIClient,
+ aiClientV1 *aiclient.AIClient,
chatService *services.ChatService,
projectService *services.ProjectService,
userService *services.UserService,
logger *logger.Logger,
cfg *cfg.Cfg,
) chatv1.ChatServiceServer {
- return &ChatServer{
- aiClient: aiClient,
- chatService: chatService,
+ return &ChatServerV1{
+ aiClientV1: aiClientV1,
projectService: projectService,
userService: userService,
logger: logger,
+ chatServiceV1: chatService,
cfg: cfg,
}
}
diff --git a/internal/api/chat/server_v2.go b/internal/api/chat/server_v2.go
new file mode 100644
index 00000000..be3cd379
--- /dev/null
+++ b/internal/api/chat/server_v2.go
@@ -0,0 +1,37 @@
+package chat
+
+import (
+ "paperdebugger/internal/libs/cfg"
+ "paperdebugger/internal/libs/logger"
+ "paperdebugger/internal/services"
+ aiclient "paperdebugger/internal/services/toolkit/client"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+)
+
+type ChatServerV2 struct {
+ chatv2.UnimplementedChatServiceServer
+ aiClientV2 *aiclient.AIClientV2
+ chatServiceV2 *services.ChatServiceV2
+ projectService *services.ProjectService
+ userService *services.UserService
+ logger *logger.Logger
+ cfg *cfg.Cfg
+}
+
+func NewChatServerV2(
+ aiClientV2 *aiclient.AIClientV2,
+ chatServiceV2 *services.ChatServiceV2,
+ projectService *services.ProjectService,
+ userService *services.UserService,
+ logger *logger.Logger,
+ cfg *cfg.Cfg,
+) chatv2.ChatServiceServer {
+ return &ChatServerV2{
+ aiClientV2: aiClientV2,
+ projectService: projectService,
+ userService: userService,
+ logger: logger,
+ chatServiceV2: chatServiceV2,
+ cfg: cfg,
+ }
+}
diff --git a/internal/api/chat/types.go b/internal/api/chat/types.go
new file mode 100644
index 00000000..2c4515f5
--- /dev/null
+++ b/internal/api/chat/types.go
@@ -0,0 +1,10 @@
+package chat
+
+import (
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/openai/openai-go/v2"
+)
+
+type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion
+type AppChatHistory []chatv2.Message
diff --git a/internal/api/chat/update_conversation.go b/internal/api/chat/update_conversation.go
index 02aaec18..4503d4f9 100644
--- a/internal/api/chat/update_conversation.go
+++ b/internal/api/chat/update_conversation.go
@@ -11,7 +11,7 @@ import (
"go.mongodb.org/mongo-driver/v2/bson"
)
-func (s *ChatServer) UpdateConversation(
+func (s *ChatServerV1) UpdateConversation(
ctx context.Context,
req *chatv1.UpdateConversationRequest,
) (*chatv1.UpdateConversationResponse, error) {
@@ -25,7 +25,7 @@ func (s *ChatServer) UpdateConversation(
return nil, err
}
- conversation, err := s.chatService.GetConversation(ctx, actor.ID, conversationID)
+ conversation, err := s.chatServiceV1.GetConversation(ctx, actor.ID, conversationID)
if err != nil {
return nil, err
}
@@ -35,7 +35,7 @@ func (s *ChatServer) UpdateConversation(
}
conversation.Title = req.GetTitle()
- err = s.chatService.UpdateConversation(conversation)
+ err = s.chatServiceV1.UpdateConversation(conversation)
if err != nil {
return nil, err
}
diff --git a/internal/api/chat/update_conversation_v2.go b/internal/api/chat/update_conversation_v2.go
new file mode 100644
index 00000000..d6855de3
--- /dev/null
+++ b/internal/api/chat/update_conversation_v2.go
@@ -0,0 +1,46 @@
+package chat
+
+import (
+ "context"
+
+ "paperdebugger/internal/api/mapper"
+ "paperdebugger/internal/libs/contextutil"
+ "paperdebugger/internal/libs/shared"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "go.mongodb.org/mongo-driver/v2/bson"
+)
+
+func (s *ChatServerV2) UpdateConversation(
+ ctx context.Context,
+ req *chatv2.UpdateConversationRequest,
+) (*chatv2.UpdateConversationResponse, error) {
+ actor, err := contextutil.GetActor(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ conversationID, err := bson.ObjectIDFromHex(req.GetConversationId())
+ if err != nil {
+ return nil, err
+ }
+
+ conversation, err := s.chatServiceV2.GetConversationV2(ctx, actor.ID, conversationID)
+ if err != nil {
+ return nil, err
+ }
+
+ if req.GetTitle() == "" {
+ return nil, shared.ErrBadRequest("title is required")
+ }
+
+ conversation.Title = req.GetTitle()
+ err = s.chatServiceV2.UpdateConversationV2(conversation)
+ if err != nil {
+ return nil, err
+ }
+
+ return &chatv2.UpdateConversationResponse{
+ Conversation: mapper.MapModelConversationToProtoV2(conversation),
+ }, nil
+}
diff --git a/internal/api/grpc.go b/internal/api/grpc.go
index 1b8443b2..ed9dc2b0 100644
--- a/internal/api/grpc.go
+++ b/internal/api/grpc.go
@@ -12,6 +12,7 @@ import (
"paperdebugger/internal/services"
authv1 "paperdebugger/pkg/gen/api/auth/v1"
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
commentv1 "paperdebugger/pkg/gen/api/comment/v1"
projectv1 "paperdebugger/pkg/gen/api/project/v1"
userv1 "paperdebugger/pkg/gen/api/user/v1"
@@ -101,6 +102,7 @@ func NewGrpcServer(
cfg *cfg.Cfg,
authServer authv1.AuthServiceServer,
chatServer chatv1.ChatServiceServer,
+ chatServerV2 chatv2.ChatServiceServer,
userServer userv1.UserServiceServer,
projectServer projectv1.ProjectServiceServer,
commentServer commentv1.CommentServiceServer,
@@ -115,6 +117,7 @@ func NewGrpcServer(
authv1.RegisterAuthServiceServer(grpcServer.Server, authServer)
chatv1.RegisterChatServiceServer(grpcServer.Server, chatServer)
+ chatv2.RegisterChatServiceServer(grpcServer.Server, chatServerV2)
userv1.RegisterUserServiceServer(grpcServer.Server, userServer)
projectv1.RegisterProjectServiceServer(grpcServer.Server, projectServer)
commentv1.RegisterCommentServiceServer(grpcServer.Server, commentServer)
diff --git a/internal/api/mapper/conversation_v2.go b/internal/api/mapper/conversation_v2.go
new file mode 100644
index 00000000..23c10fae
--- /dev/null
+++ b/internal/api/mapper/conversation_v2.go
@@ -0,0 +1,46 @@
+package mapper
+
+import (
+ "paperdebugger/internal/models"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/samber/lo"
+ "go.mongodb.org/mongo-driver/v2/bson"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+func BSONToChatMessageV2(msg bson.M) *chatv2.Message {
+ jsonBytes, err := bson.MarshalExtJSON(msg, true, true)
+ if err != nil {
+ return nil
+ }
+
+ m := &chatv2.Message{}
+ if err := protojson.Unmarshal(jsonBytes, m); err != nil {
+ return nil
+ }
+ return m
+}
+
+func MapModelConversationToProtoV2(conversation *models.Conversation) *chatv2.Conversation {
+ // Convert BSON messages back to protobuf messages
+ filteredMessages := lo.Map(conversation.InappChatHistory, func(msg bson.M, _ int) *chatv2.Message {
+ return BSONToChatMessageV2(msg)
+ })
+
+ filteredMessages = lo.Filter(filteredMessages, func(msg *chatv2.Message, _ int) bool {
+ return msg.GetPayload().GetMessageType() != &chatv2.MessagePayload_System{}
+ })
+
+ modelSlug := conversation.ModelSlug
+ if modelSlug == "" {
+ modelSlug = models.SlugFromLanguageModel(models.LanguageModel(conversation.LanguageModel))
+ }
+
+ return &chatv2.Conversation{
+ Id: conversation.ID.Hex(),
+ Title: conversation.Title,
+ ModelSlug: modelSlug,
+ Messages: filteredMessages,
+ }
+}
diff --git a/internal/api/server.go b/internal/api/server.go
index 3203148b..405ec61b 100644
--- a/internal/api/server.go
+++ b/internal/api/server.go
@@ -13,6 +13,7 @@ import (
"paperdebugger/internal/libs/shared"
authv1 "paperdebugger/pkg/gen/api/auth/v1"
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
commentv1 "paperdebugger/pkg/gen/api/comment/v1"
projectv1 "paperdebugger/pkg/gen/api/project/v1"
sharedv1 "paperdebugger/pkg/gen/api/shared/v1"
@@ -84,6 +85,11 @@ func (s *Server) Run(addr string) {
s.logger.Fatalf("failed to register chat service grpc gateway: %v", err)
return
}
+ err = chatv2.RegisterChatServiceHandler(context.Background(), mux, client)
+ if err != nil {
+ s.logger.Fatalf("failed to register chat v2 service grpc gateway: %v", err)
+ return
+ }
err = userv1.RegisterUserServiceHandler(context.Background(), mux, client)
if err != nil {
s.logger.Fatalf("failed to register user service grpc gateway: %v", err)
diff --git a/internal/libs/cfg/cfg.go b/internal/libs/cfg/cfg.go
index 1293ea40..a907f04c 100644
--- a/internal/libs/cfg/cfg.go
+++ b/internal/libs/cfg/cfg.go
@@ -7,12 +7,15 @@ import (
)
type Cfg struct {
- OpenAIBaseURL string
- OpenAIAPIKey string
- JwtSigningKey string
+ OpenAIBaseURL string
+ OpenAIAPIKey string
+ InferenceBaseURL string
+ InferenceAPIKey string
+ JwtSigningKey string
- MongoURI string
- XtraMCPURI string
+ MongoURI string
+ XtraMCPURI string
+ MCPServerURL string
}
var cfg *Cfg
@@ -20,11 +23,14 @@ var cfg *Cfg
func GetCfg() *Cfg {
_ = godotenv.Load()
cfg = &Cfg{
- OpenAIBaseURL: openAIBaseURL(),
- OpenAIAPIKey: os.Getenv("OPENAI_API_KEY"),
- JwtSigningKey: os.Getenv("JWT_SIGNING_KEY"),
- MongoURI: mongoURI(),
- XtraMCPURI: xtraMCPURI(),
+ OpenAIBaseURL: openAIBaseURL(),
+ OpenAIAPIKey: os.Getenv("OPENAI_API_KEY"),
+ InferenceBaseURL: inferenceBaseURL(),
+ InferenceAPIKey: os.Getenv("INFERENCE_API_KEY"),
+ JwtSigningKey: os.Getenv("JWT_SIGNING_KEY"),
+ MongoURI: mongoURI(),
+ XtraMCPURI: xtraMCPURI(),
+ MCPServerURL: mcpServerURL(),
}
return cfg
@@ -38,6 +44,14 @@ func openAIBaseURL() string {
return "https://api.openai.com/v1"
}
+func inferenceBaseURL() string {
+ val := os.Getenv("INFERENCE_BASE_URL")
+ if val != "" {
+ return val
+ }
+ return "https://inference.paperdebugger.workers.dev"
+}
+
func xtraMCPURI() string {
val := os.Getenv("XTRAMCP_URI")
if val != "" {
@@ -54,3 +68,11 @@ func mongoURI() string {
return "mongodb://localhost:27017"
}
+
+func mcpServerURL() string {
+ val := os.Getenv("MCP_SERVER_URL")
+ if val != "" {
+ return val
+ }
+ return "http://paperdebugger-mcp-server:8000"
+}
diff --git a/internal/libs/db/filter.go b/internal/libs/db/filter.go
new file mode 100644
index 00000000..ad54b783
--- /dev/null
+++ b/internal/libs/db/filter.go
@@ -0,0 +1,26 @@
+package db
+
+import "go.mongodb.org/mongo-driver/v2/bson"
+
+// NotDeleted returns a filter that excludes soft-deleted documents.
+// Use with MergeFilters to combine with other query conditions.
+func NotDeleted() bson.M {
+ return bson.M{
+ "$or": []bson.M{
+ {"deleted_at": nil},
+ {"deleted_at": bson.M{"$exists": false}},
+ },
+ }
+}
+
+// MergeFilters combines multiple filters with $and.
+// If only one filter is provided, it returns that filter directly.
+func MergeFilters(filters ...bson.M) bson.M {
+ if len(filters) == 0 {
+ return bson.M{}
+ }
+ if len(filters) == 1 {
+ return filters[0]
+ }
+ return bson.M{"$and": filters}
+}
diff --git a/internal/models/conversation.go b/internal/models/conversation.go
index 70d48300..fdabf859 100644
--- a/internal/models/conversation.go
+++ b/internal/models/conversation.go
@@ -2,6 +2,7 @@ package models
import (
"github.com/openai/openai-go/v2/responses"
+ "github.com/openai/openai-go/v3"
"go.mongodb.org/mongo-driver/v2/bson"
)
@@ -14,8 +15,10 @@ type Conversation struct {
ModelSlug string `bson:"model_slug"`
InappChatHistory []bson.M `bson:"inapp_chat_history"` // Store as raw BSON to avoid protobuf decoding issues
- OpenaiChatHistory responses.ResponseInputParam `bson:"openai_chat_history"` // 实际上发给 GPT 的聊天历史
- OpenaiChatParams responses.ResponseNewParams `bson:"openai_chat_params"` // 对话的参数,比如 temperature, etc.
+ OpenaiChatHistory responses.ResponseInputParam `bson:"openai_chat_history"` // The actual chat history sent to GPT
+ OpenaiChatParams responses.ResponseNewParams `bson:"openai_chat_params"` // Conversation parameters, such as temperature, etc.
+ OpenaiChatHistoryCompletion []openai.ChatCompletionMessageParamUnion `bson:"openai_chat_history_completion"`
+ OpenaiChatParamsCompletion openai.ChatCompletionNewParams `bson:"openai_chat_params_completion"`
}
func (c Conversation) CollectionName() string {
diff --git a/internal/models/oauth.go b/internal/models/oauth.go
index e18554bb..3e9e82eb 100644
--- a/internal/models/oauth.go
+++ b/internal/models/oauth.go
@@ -2,7 +2,7 @@ package models
type OAuth struct {
BaseModel `bson:",inline"`
- Code string `bson:"code,omitempty"` // OAuth 中的 code(即 authorization code)在 Google 的实现下 是一次性使用且短时间内有效的、临时唯一的。
+ Code string `bson:"code,omitempty"` // OAuth code (authorization code) in Google's implementation is single-use, short-lived, and temporarily unique.
AccessToken string `bson:"access_token,omitempty"`
State string `bson:"state,omitempty"`
Used bool `bson:"used,omitempty"`
diff --git a/internal/services/chat.go b/internal/services/chat.go
index 825eecfc..5e5358f0 100644
--- a/internal/services/chat.go
+++ b/internal/services/chat.go
@@ -33,6 +33,21 @@ var userPromptDefaultTemplate string
//go:embed user_prompt_debug.tmpl
var userPromptDebugTemplate string
+// Pre-compiled templates for better performance
+var (
+ systemPromptDefaultTmpl *template.Template
+ systemPromptDebugTmpl *template.Template
+ userPromptDefaultTmpl *template.Template
+ userPromptDebugTmpl *template.Template
+)
+
+func init() {
+ systemPromptDefaultTmpl = template.Must(template.New("system_default").Parse(systemPromptDefaultTemplate))
+ systemPromptDebugTmpl = template.Must(template.New("system_debug").Parse(systemPromptDebugTemplate))
+ userPromptDefaultTmpl = template.Must(template.New("user_default").Parse(userPromptDefaultTemplate))
+ userPromptDebugTmpl = template.Must(template.New("user_debug").Parse(userPromptDebugTemplate))
+}
+
type ChatService struct {
BaseService
conversationCollection *mongo.Collection
@@ -50,16 +65,14 @@ func NewChatService(db *db.DB, cfg *cfg.Cfg, logger *logger.Logger) *ChatService
}
func (s *ChatService) GetSystemPrompt(ctx context.Context, fullContent string, projectInstructions string, userInstructions string, conversationType chatv1.ConversationType) (string, error) {
- var systemPromptString string
+ var tmpl *template.Template
switch conversationType {
case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG:
- systemPromptString = systemPromptDebugTemplate
+ tmpl = systemPromptDebugTmpl
default:
- systemPromptString = systemPromptDefaultTemplate
+ tmpl = systemPromptDefaultTmpl
}
- tmpl := template.Must(template.New("system_prompt").Parse(systemPromptString))
-
var systemPromptBuffer bytes.Buffer
if err := tmpl.Execute(&systemPromptBuffer, map[string]string{
"FullContent": fullContent,
@@ -72,16 +85,14 @@ func (s *ChatService) GetSystemPrompt(ctx context.Context, fullContent string, p
}
func (s *ChatService) GetPrompt(ctx context.Context, content string, selectedText string, conversationType chatv1.ConversationType) (string, error) {
- var userPromptString string
+ var tmpl *template.Template
switch conversationType {
case chatv1.ConversationType_CONVERSATION_TYPE_DEBUG:
- userPromptString = userPromptDebugTemplate
+ tmpl = userPromptDebugTmpl
default:
- userPromptString = userPromptDefaultTemplate
+ tmpl = userPromptDefaultTmpl
}
- tmpl := template.Must(template.New("user_prompt").Parse(userPromptString))
-
var userPromptBuffer bytes.Buffer
if err := tmpl.Execute(&userPromptBuffer, map[string]string{
"UserInput": content,
@@ -128,14 +139,10 @@ func (s *ChatService) InsertConversationToDB(ctx context.Context, userID bson.Ob
}
func (s *ChatService) ListConversations(ctx context.Context, userID bson.ObjectID, projectID string) ([]*models.Conversation, error) {
- filter := bson.M{
- "user_id": userID,
- "project_id": projectID,
- "$or": []bson.M{
- {"deleted_at": nil},
- {"deleted_at": bson.M{"$exists": false}},
- },
- }
+ filter := db.MergeFilters(
+ bson.M{"user_id": userID, "project_id": projectID},
+ db.NotDeleted(),
+ )
opts := options.Find().
SetProjection(bson.M{
"inapp_chat_history": 0,
@@ -158,14 +165,11 @@ func (s *ChatService) ListConversations(ctx context.Context, userID bson.ObjectI
func (s *ChatService) GetConversation(ctx context.Context, userID bson.ObjectID, conversationID bson.ObjectID) (*models.Conversation, error) {
conversation := &models.Conversation{}
- err := s.conversationCollection.FindOne(ctx, bson.M{
- "_id": conversationID,
- "user_id": userID,
- "$or": []bson.M{
- {"deleted_at": nil},
- {"deleted_at": bson.M{"$exists": false}},
- },
- }).Decode(conversation)
+ filter := db.MergeFilters(
+ bson.M{"_id": conversationID, "user_id": userID},
+ db.NotDeleted(),
+ )
+ err := s.conversationCollection.FindOne(ctx, filter).Decode(conversation)
if err != nil {
return nil, err
}
@@ -174,32 +178,41 @@ func (s *ChatService) GetConversation(ctx context.Context, userID bson.ObjectID,
func (s *ChatService) UpdateConversation(conversation *models.Conversation) error {
conversation.UpdatedAt = bson.NewDateTimeFromTime(time.Now())
+ filter := db.MergeFilters(
+ bson.M{"_id": conversation.ID},
+ db.NotDeleted(),
+ )
_, err := s.conversationCollection.UpdateOne(
context.Background(),
- bson.M{
- "_id": conversation.ID,
- "$or": []bson.M{
- {"deleted_at": nil},
- {"deleted_at": bson.M{"$exists": false}},
- },
- },
+ filter,
bson.M{"$set": conversation},
)
return err
}
+func (s *ChatService) UpdateConversationTitle(ctx context.Context, conversationID bson.ObjectID, title string) error {
+ filter := db.MergeFilters(
+ bson.M{"_id": conversationID},
+ db.NotDeleted(),
+ )
+ now := bson.NewDateTimeFromTime(time.Now())
+ _, err := s.conversationCollection.UpdateOne(
+ ctx,
+ filter,
+ bson.M{"$set": bson.M{"title": title, "updated_at": now}},
+ )
+ return err
+}
+
func (s *ChatService) DeleteConversation(ctx context.Context, userID bson.ObjectID, conversationID bson.ObjectID) error {
now := bson.NewDateTimeFromTime(time.Now())
+ filter := db.MergeFilters(
+ bson.M{"_id": conversationID, "user_id": userID},
+ db.NotDeleted(),
+ )
_, err := s.conversationCollection.UpdateOne(
ctx,
- bson.M{
- "_id": conversationID,
- "user_id": userID,
- "$or": []bson.M{
- {"deleted_at": nil},
- {"deleted_at": bson.M{"$exists": false}},
- },
- },
+ filter,
bson.M{"$set": bson.M{"deleted_at": now, "updated_at": now}},
)
return err
diff --git a/internal/services/chat_v2.go b/internal/services/chat_v2.go
new file mode 100644
index 00000000..c5ea74d9
--- /dev/null
+++ b/internal/services/chat_v2.go
@@ -0,0 +1,220 @@
+package services
+
+import (
+ "bytes"
+ "context"
+ _ "embed"
+ "strings"
+ "text/template"
+ "time"
+
+ "paperdebugger/internal/libs/cfg"
+ "paperdebugger/internal/libs/db"
+ "paperdebugger/internal/libs/logger"
+ "paperdebugger/internal/models"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/openai/openai-go/v3"
+ "go.mongodb.org/mongo-driver/v2/bson"
+ "go.mongodb.org/mongo-driver/v2/mongo"
+ "go.mongodb.org/mongo-driver/v2/mongo/options"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+//go:embed system_prompt_default.tmpl
+var systemPromptDefaultTemplateV2 string
+
+//go:embed system_prompt_debug.tmpl
+var systemPromptDebugTemplateV2 string
+
+//go:embed user_prompt_default.tmpl
+var userPromptDefaultTemplateV2 string
+
+//go:embed user_prompt_debug.tmpl
+var userPromptDebugTemplateV2 string
+
+// Pre-compiled templates for better performance
+var (
+ systemPromptDefaultTmplV2 *template.Template
+ systemPromptDebugTmplV2 *template.Template
+ userPromptDefaultTmplV2 *template.Template
+ userPromptDebugTmplV2 *template.Template
+)
+
+func init() {
+ systemPromptDefaultTmplV2 = template.Must(template.New("system_default_v2").Parse(systemPromptDefaultTemplateV2))
+ systemPromptDebugTmplV2 = template.Must(template.New("system_debug_v2").Parse(systemPromptDebugTemplateV2))
+ userPromptDefaultTmplV2 = template.Must(template.New("user_default_v2").Parse(userPromptDefaultTemplateV2))
+ userPromptDebugTmplV2 = template.Must(template.New("user_debug_v2").Parse(userPromptDebugTemplateV2))
+}
+
+type ChatServiceV2 struct {
+ BaseService
+ conversationCollection *mongo.Collection
+}
+
+// define default conversation title
+const DefaultConversationTitleV2 = "New Conversation ."
+
+func NewChatServiceV2(db *db.DB, cfg *cfg.Cfg, logger *logger.Logger) *ChatServiceV2 {
+ base := NewBaseService(db, cfg, logger)
+ return &ChatServiceV2{
+ BaseService: base,
+ conversationCollection: base.db.Collection((models.Conversation{}).CollectionName()),
+ }
+}
+
+func (s *ChatServiceV2) GetSystemPromptV2(ctx context.Context, fullContent string, projectInstructions string, userInstructions string, conversationType chatv2.ConversationType) (string, error) {
+ var tmpl *template.Template
+ switch conversationType {
+ case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG:
+ tmpl = systemPromptDebugTmplV2
+ default:
+ tmpl = systemPromptDefaultTmplV2
+ }
+
+ var systemPromptBuffer bytes.Buffer
+ if err := tmpl.Execute(&systemPromptBuffer, map[string]string{
+ "FullContent": fullContent,
+ "ProjectInstructions": projectInstructions,
+ "UserInstructions": userInstructions,
+ }); err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(systemPromptBuffer.String()), nil
+}
+
+func (s *ChatServiceV2) GetPrompt(ctx context.Context, content string, selectedText string, surrounding string, conversationType chatv2.ConversationType) (string, error) {
+ var tmpl *template.Template
+ switch conversationType {
+ case chatv2.ConversationType_CONVERSATION_TYPE_DEBUG:
+ tmpl = userPromptDebugTmplV2
+ default:
+ tmpl = userPromptDefaultTmplV2
+ }
+
+ var userPromptBuffer bytes.Buffer
+ if err := tmpl.Execute(&userPromptBuffer, map[string]string{
+ "UserInput": content,
+ "SelectedText": selectedText,
+ "Surrounding": surrounding,
+ }); err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(userPromptBuffer.String()), nil
+}
+
+func (s *ChatServiceV2) InsertConversationToDBV2(ctx context.Context, userID bson.ObjectID, projectID string, modelSlug string, inappChatHistory []*chatv2.Message, openaiChatHistory []openai.ChatCompletionMessageParamUnion) (*models.Conversation, error) {
+ // Convert protobuf messages to BSON
+ bsonMessages := make([]bson.M, len(inappChatHistory))
+ for i := range inappChatHistory {
+ jsonBytes, err := protojson.Marshal(inappChatHistory[i])
+ if err != nil {
+ return nil, err
+ }
+ var bsonMsg bson.M
+ if err := bson.UnmarshalExtJSON(jsonBytes, true, &bsonMsg); err != nil {
+ return nil, err
+ }
+ bsonMessages[i] = bsonMsg
+ }
+
+ conversation := &models.Conversation{
+ BaseModel: models.BaseModel{
+ ID: bson.NewObjectID(),
+ CreatedAt: bson.NewDateTimeFromTime(time.Now()),
+ UpdatedAt: bson.NewDateTimeFromTime(time.Now()),
+ },
+ UserID: userID,
+ ProjectID: projectID,
+ Title: DefaultConversationTitleV2,
+ ModelSlug: modelSlug,
+ InappChatHistory: bsonMessages,
+ OpenaiChatHistoryCompletion: openaiChatHistory,
+ }
+ _, err := s.conversationCollection.InsertOne(ctx, conversation)
+ if err != nil {
+ return nil, err
+ }
+ return conversation, nil
+}
+
+func (s *ChatServiceV2) ListConversationsV2(ctx context.Context, userID bson.ObjectID, projectID string) ([]*models.Conversation, error) {
+ filter := db.MergeFilters(
+ bson.M{"user_id": userID, "project_id": projectID},
+ db.NotDeleted(),
+ )
+ opts := options.Find().
+ SetProjection(bson.M{
+ "inapp_chat_history": 0,
+ "openai_chat_history": 0,
+ }).
+ SetSort(bson.M{"updated_at": -1}).
+ SetLimit(50)
+ cursor, err := s.conversationCollection.Find(ctx, filter, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ var conversations []*models.Conversation
+ err = cursor.All(ctx, &conversations)
+ if err != nil {
+ return nil, err
+ }
+ return conversations, nil
+}
+
+func (s *ChatServiceV2) GetConversationV2(ctx context.Context, userID bson.ObjectID, conversationID bson.ObjectID) (*models.Conversation, error) {
+ conversation := &models.Conversation{}
+ filter := db.MergeFilters(
+ bson.M{"_id": conversationID, "user_id": userID},
+ db.NotDeleted(),
+ )
+ err := s.conversationCollection.FindOne(ctx, filter).Decode(conversation)
+ if err != nil {
+ return nil, err
+ }
+ return conversation, nil
+}
+
+func (s *ChatServiceV2) UpdateConversationV2(conversation *models.Conversation) error {
+ conversation.UpdatedAt = bson.NewDateTimeFromTime(time.Now())
+ filter := db.MergeFilters(
+ bson.M{"_id": conversation.ID},
+ db.NotDeleted(),
+ )
+ _, err := s.conversationCollection.UpdateOne(
+ context.Background(),
+ filter,
+ bson.M{"$set": conversation},
+ )
+ return err
+}
+
+func (s *ChatServiceV2) UpdateConversationTitleV2(ctx context.Context, conversationID bson.ObjectID, title string) error {
+ filter := db.MergeFilters(
+ bson.M{"_id": conversationID},
+ db.NotDeleted(),
+ )
+ now := bson.NewDateTimeFromTime(time.Now())
+ _, err := s.conversationCollection.UpdateOne(
+ ctx,
+ filter,
+ bson.M{"$set": bson.M{"title": title, "updated_at": now}},
+ )
+ return err
+}
+
+func (s *ChatServiceV2) DeleteConversationV2(ctx context.Context, userID bson.ObjectID, conversationID bson.ObjectID) error {
+ now := bson.NewDateTimeFromTime(time.Now())
+ filter := db.MergeFilters(
+ bson.M{"_id": conversationID, "user_id": userID},
+ db.NotDeleted(),
+ )
+ _, err := s.conversationCollection.UpdateOne(
+ ctx,
+ filter,
+ bson.M{"$set": bson.M{"deleted_at": now, "updated_at": now}},
+ )
+ return err
+}
diff --git a/internal/services/project.go b/internal/services/project.go
index 0ac54a8b..ae8ae89a 100644
--- a/internal/services/project.go
+++ b/internal/services/project.go
@@ -148,7 +148,7 @@ func (s *ProjectService) fetchCategoryFromAPI(latexSource string) (*models.Class
return nil, fmt.Errorf("failed to marshal request body: %w", err)
}
- req, err := http.NewRequest("POST", "http://paperdebugger-mcp-server:8000/classify-paper", bytes.NewBuffer(jsonData))
+ req, err := http.NewRequest("POST", fmt.Sprintf("%s/classify-paper", s.cfg.MCPServerURL), bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
diff --git a/internal/services/token_test.go b/internal/services/token_test.go
index 22c4786b..58477308 100644
--- a/internal/services/token_test.go
+++ b/internal/services/token_test.go
@@ -16,7 +16,7 @@ import (
)
func setupTestTokenService(t *testing.T) *services.TokenService {
- os.Setenv("PD_MONGO_URI", "mongodb://localhost:27017") // 确保本地有 MongoDB
+ os.Setenv("PD_MONGO_URI", "mongodb://localhost:27017") // Ensure MongoDB is running locally
dbInstance, err := db.NewDB(cfg.GetCfg(), logger.GetLogger())
if err != nil {
t.Fatalf("failed to connect to test db: %v", err)
diff --git a/internal/services/toolkit/client/client.go b/internal/services/toolkit/client/client.go
index 68599397..7c986d08 100644
--- a/internal/services/toolkit/client/client.go
+++ b/internal/services/toolkit/client/client.go
@@ -1,15 +1,12 @@
package client
import (
- "context"
"paperdebugger/internal/libs/cfg"
"paperdebugger/internal/libs/db"
"paperdebugger/internal/libs/logger"
"paperdebugger/internal/models"
"paperdebugger/internal/services"
"paperdebugger/internal/services/toolkit/handler"
- "paperdebugger/internal/services/toolkit/registry"
- "paperdebugger/internal/services/toolkit/tools/xtramcp"
"github.com/openai/openai-go/v2"
"github.com/openai/openai-go/v2/option"
@@ -17,8 +14,7 @@ import (
)
type AIClient struct {
- toolCallHandler *handler.ToolCallHandler
-
+ toolCallHandler *handler.ToolCallHandler
db *mongo.Database
functionCallCollection *mongo.Collection
@@ -31,8 +27,13 @@ type AIClient struct {
// SetOpenAIClient sets the appropriate OpenAI client based on the LLM provider config.
// If the config specifies a custom endpoint and API key, a new client is created for that endpoint.
func (a *AIClient) GetOpenAIClient(llmConfig *models.LLMProviderConfig) *openai.Client {
- var Endpoint string = llmConfig.Endpoint
- var APIKey string = llmConfig.APIKey
+ var Endpoint string
+ var APIKey string
+
+ if llmConfig != nil {
+ Endpoint = llmConfig.Endpoint
+ APIKey = llmConfig.APIKey
+ }
if Endpoint == "" {
Endpoint = a.cfg.OpenAIBaseURL
@@ -65,35 +66,10 @@ func NewAIClient(
option.WithAPIKey(cfg.OpenAIAPIKey),
)
CheckOpenAIWorks(oaiClient, logger)
- // toolPaperScore := tools.NewPaperScoreTool(db, projectService)
- // toolPaperScoreComment := tools.NewPaperScoreCommentTool(db, projectService, reverseCommentService)
-
- toolRegistry := registry.NewToolRegistry()
-
- // toolRegistry.Register("always_exception", tools.AlwaysExceptionToolDescription, tools.AlwaysExceptionTool)
- // toolRegistry.Register("greeting", tools.GreetingToolDescription, tools.GreetingTool)
-
- // Load tools dynamically from backend
- xtraMCPLoader := xtramcp.NewXtraMCPLoader(db, projectService, cfg.XtraMCPURI)
-
- // initialize MCP session first and log session ID
- sessionID, err := xtraMCPLoader.InitializeMCP()
- if err != nil {
- logger.Errorf("[AI Client] Failed to initialize XtraMCP session: %v", err)
- // TODO: Fallback to static tools or exit?
- } else {
- logger.Info("[AI Client] XtraMCP session initialized", "sessionID", sessionID)
-
- // dynamically load all tools from XtraMCP backend
- err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry)
- if err != nil {
- logger.Errorf("[AI Client] Failed to load XtraMCP tools: %v", err)
- } else {
- logger.Info("[AI Client] Successfully loaded XtraMCP tools")
- }
- }
+ toolRegistry := initializeToolkit(db, projectService, cfg, logger)
toolCallHandler := handler.NewToolCallHandler(toolRegistry)
+
client := &AIClient{
toolCallHandler: toolCallHandler,
@@ -108,18 +84,3 @@ func NewAIClient(
return client
}
-
-func CheckOpenAIWorks(oaiClient openai.Client, logger *logger.Logger) {
- logger.Info("[AI Client] checking if openai client works")
- chatCompletion, err := oaiClient.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{
- Messages: []openai.ChatCompletionMessageParamUnion{
- openai.UserMessage("Say 'openai client works'"),
- },
- Model: openai.ChatModelGPT4o,
- })
- if err != nil {
- logger.Errorf("[AI Client] openai client does not work: %v", err)
- return
- }
- logger.Info("[AI Client] openai client works", "response", chatCompletion.Choices[0].Message.Content)
-}
diff --git a/internal/services/toolkit/client/client_v2.go b/internal/services/toolkit/client/client_v2.go
new file mode 100644
index 00000000..8279b6bc
--- /dev/null
+++ b/internal/services/toolkit/client/client_v2.go
@@ -0,0 +1,88 @@
+package client
+
+import (
+ "paperdebugger/internal/libs/cfg"
+ "paperdebugger/internal/libs/db"
+ "paperdebugger/internal/libs/logger"
+ "paperdebugger/internal/models"
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit/handler"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/option"
+ "go.mongodb.org/mongo-driver/v2/mongo"
+)
+
+type AIClientV2 struct {
+ toolCallHandler *handler.ToolCallHandlerV2
+ db *mongo.Database
+ functionCallCollection *mongo.Collection
+
+ reverseCommentService *services.ReverseCommentService
+ projectService *services.ProjectService
+ cfg *cfg.Cfg
+ logger *logger.Logger
+}
+
+// SetOpenAIClient sets the appropriate OpenAI client based on the LLM provider config.
+// If the config specifies a custom endpoint and API key, a new client is created for that endpoint.
+// V2 uses the inference endpoint by default.
+// When a user provides their own API key, use the /openai endpoint instead of /openrouter.
+func (a *AIClientV2) GetOpenAIClient(llmConfig *models.LLMProviderConfig) *openai.Client {
+ var Endpoint string = llmConfig.Endpoint
+ var APIKey string = llmConfig.APIKey
+
+ if Endpoint == "" {
+ if APIKey != "" {
+ // User provided their own API key, use the OpenAI-compatible endpoint
+ Endpoint = a.cfg.InferenceBaseURL + "/openai"
+ } else {
+ Endpoint = a.cfg.InferenceBaseURL + "/openrouter"
+ }
+ }
+
+ if APIKey == "" {
+ APIKey = a.cfg.InferenceAPIKey
+ }
+
+ opts := []option.RequestOption{
+ option.WithAPIKey(APIKey),
+ option.WithBaseURL(Endpoint),
+ }
+
+ client := openai.NewClient(opts...)
+ return &client
+}
+
+func NewAIClientV2(
+ db *db.DB,
+
+ reverseCommentService *services.ReverseCommentService,
+ projectService *services.ProjectService,
+ cfg *cfg.Cfg,
+ logger *logger.Logger,
+) *AIClientV2 {
+ database := db.Database("paperdebugger")
+ oaiClient := openai.NewClient(
+ option.WithBaseURL(cfg.InferenceBaseURL+"/openrouter"),
+ option.WithAPIKey(cfg.InferenceAPIKey),
+ )
+ CheckOpenAIWorksV2(oaiClient, logger)
+
+ toolRegistry := initializeToolkitV2(db, projectService, cfg, logger)
+ toolCallHandler := handler.NewToolCallHandlerV2(toolRegistry)
+
+ client := &AIClientV2{
+ toolCallHandler: toolCallHandler,
+
+ db: database,
+ functionCallCollection: database.Collection((models.FunctionCall{}).CollectionName()),
+
+ reverseCommentService: reverseCommentService,
+ projectService: projectService,
+ cfg: cfg,
+ logger: logger,
+ }
+
+ return client
+}
diff --git a/internal/services/toolkit/client/completion.go b/internal/services/toolkit/client/completion.go
index f4c13259..94f0c391 100644
--- a/internal/services/toolkit/client/completion.go
+++ b/internal/services/toolkit/client/completion.go
@@ -21,8 +21,8 @@ import (
// 1. The full chat history sent to the language model (including any tool call results).
// 2. The incremental chat history visible to the user (including tool call results and assistant responses).
// 3. An error, if any occurred during the process.
-func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) {
- openaiChatHistory, inappChatHistory, err := a.ChatCompletionStream(ctx, nil, "", modelSlug, messages, llmProvider)
+func (a *AIClient) ChatCompletionV1(ctx context.Context, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) {
+ openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV1(ctx, nil, "", modelSlug, messages, llmProvider)
if err != nil {
return nil, nil, err
}
@@ -50,11 +50,11 @@ func (a *AIClient) ChatCompletion(ctx context.Context, modelSlug string, message
// - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop.
// - If no tool calls are needed, it appends the assistant's response and exits the loop.
// - Finally, it returns the updated chat histories and any error encountered.
-func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) {
+func (a *AIClient) ChatCompletionStreamV1(ctx context.Context, callbackStream chatv1.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages responses.ResponseInputParam, llmProvider *models.LLMProviderConfig) (responses.ResponseInputParam, []chatv1.Message, error) {
openaiChatHistory := responses.ResponseNewParamsInputUnion{OfInputItemList: messages}
inappChatHistory := []chatv1.Message{}
- streamHandler := handler.NewStreamHandler(callbackStream, conversationId, modelSlug)
+ streamHandler := handler.NewStreamHandlerV1(callbackStream, conversationId, modelSlug)
streamHandler.SendInitialization()
defer func() {
@@ -62,7 +62,7 @@ func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chat
}()
oaiClient := a.GetOpenAIClient(llmProvider)
- params := getDefaultParams(modelSlug, openaiChatHistory, a.toolCallHandler.Registry)
+ params := getDefaultParams(modelSlug, a.toolCallHandler.Registry)
for {
params.Input = openaiChatHistory
@@ -93,20 +93,20 @@ func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chat
return nil, nil, err
}
- // 把 openai 的 response 记录下来,然后执行调用(如果有)
+ // Record the openai response, then execute the calls (if any)
for _, item := range openaiOutput {
if item.Type == "message" && item.Role == "assistant" {
appendAssistantTextResponse(&openaiChatHistory, &inappChatHistory, item)
}
}
- // 执行调用(如果有),返回增量数据
+ // Execute the calls (if any), return incremental data
openaiToolHistory, inappToolHistory, err := a.toolCallHandler.HandleToolCalls(ctx, openaiOutput, streamHandler)
if err != nil {
return nil, nil, err
}
- // 把工具调用结果记录下来
+ // Record the tool call results
if len(openaiToolHistory.OfInputItemList) > 0 {
openaiChatHistory.OfInputItemList = append(openaiChatHistory.OfInputItemList, openaiToolHistory.OfInputItemList...)
inappChatHistory = append(inappChatHistory, inappToolHistory...)
@@ -116,10 +116,5 @@ func (a *AIClient) ChatCompletionStream(ctx context.Context, callbackStream chat
}
}
- ptrChatHistory := make([]*chatv1.Message, len(inappChatHistory))
- for i := range inappChatHistory {
- ptrChatHistory[i] = &inappChatHistory[i]
- }
-
return openaiChatHistory.OfInputItemList, inappChatHistory, nil
}
diff --git a/internal/services/toolkit/client/completion_v2.go b/internal/services/toolkit/client/completion_v2.go
new file mode 100644
index 00000000..e7e5b7b2
--- /dev/null
+++ b/internal/services/toolkit/client/completion_v2.go
@@ -0,0 +1,188 @@
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "paperdebugger/internal/models"
+ "paperdebugger/internal/services/toolkit/handler"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/openai/openai-go/v3"
+)
+
+// define []openai.ChatCompletionMessageParamUnion as OpenAIChatHistory
+
+// ChatCompletion orchestrates a chat completion process with a language model (e.g., GPT), handling tool calls and message history management.
+//
+// Parameters:
+//
+// ctx: The context for controlling cancellation and deadlines.
+// modelSlug: The language model to use for completion (e.g., GPT-3.5, GPT-4).
+// messages: The full chat history (as input) to send to the language model.
+//
+// Returns:
+// 1. The full chat history sent to the language model (including any tool call results).
+// 2. The incremental chat history visible to the user (including tool call results and assistant responses).
+// 3. An error, if any occurred during the process.
+func (a *AIClientV2) ChatCompletionV2(ctx context.Context, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) {
+ openaiChatHistory, inappChatHistory, err := a.ChatCompletionStreamV2(ctx, nil, "", modelSlug, messages, llmProvider)
+ if err != nil {
+ return nil, nil, err
+ }
+ return openaiChatHistory, inappChatHistory, nil
+}
+
+// ChatCompletionStream orchestrates a streaming chat completion process with a language model (e.g., GPT), handling tool calls, message history management, and real-time streaming of responses to the client.
+//
+// Parameters:
+//
+// ctx: The context for controlling cancellation and deadlines.
+// callbackStream: The gRPC stream to which incremental responses are sent in real time.
+// conversationId: The unique identifier for the conversation session in PaperDebugger.
+// languageModel: The language model to use for completion (e.g., GPT-3.5, GPT-4).
+// messages: The full chat history (as input) to send to the language model.
+//
+// Returns: (same as ChatCompletion)
+// 1. The full chat history sent to the language model (including any tool call results).
+// 2. The incremental chat history visible to the user (including tool call results and assistant responses).
+// 3. An error, if any occurred during the process. (However, in the streaming mode, the error is not returned, but sending by callbackStream)
+//
+// This function works as follows: (same as ChatCompletion)
+// - It initializes the chat history for the language model and the user, and sets up a stream handler for real-time updates.
+// - It repeatedly sends the current chat history to the language model, receives streaming responses, and forwards them to the client as they arrive.
+// - If tool calls are required, it handles them and appends the results to the chat history, then continues the loop.
+// - If no tool calls are needed, it appends the assistant's response and exits the loop.
+// - Finally, it returns the updated chat histories and any error encountered.
+func (a *AIClientV2) ChatCompletionStreamV2(ctx context.Context, callbackStream chatv2.ChatService_CreateConversationMessageStreamServer, conversationId string, modelSlug string, messages OpenAIChatHistory, llmProvider *models.LLMProviderConfig) (OpenAIChatHistory, AppChatHistory, error) {
+ openaiChatHistory := messages
+ inappChatHistory := AppChatHistory{}
+
+ streamHandler := handler.NewStreamHandlerV2(callbackStream, conversationId, modelSlug)
+
+ streamHandler.SendInitialization()
+ defer func() {
+ streamHandler.SendFinalization()
+ }()
+
+ oaiClient := a.GetOpenAIClient(llmProvider)
+ params := getDefaultParamsV2(modelSlug, a.toolCallHandler.Registry)
+
+ for {
+ params.Messages = openaiChatHistory
+ // var openaiOutput OpenAIChatHistory
+ stream := oaiClient.Chat.Completions.NewStreaming(context.Background(), params)
+
+ reasoning_content := ""
+ answer_content := ""
+ answer_content_id := ""
+ is_answering := false
+ tool_info := map[int]map[string]string{}
+ toolCalls := []openai.FinishedChatCompletionToolCall{}
+ for stream.Next() {
+ // time.Sleep(5000 * time.Millisecond) // DEBUG POINT: change this to test in a slow mode
+ chunk := stream.Current()
+
+ if len(chunk.Choices) == 0 {
+ // Handle usage information
+ // fmt.Printf("Usage: %+v\n", chunk.Usage)
+ continue
+ }
+
+ delta := chunk.Choices[0].Delta
+
+ if field, ok := delta.JSON.ExtraFields["reasoning_content"]; ok && field.Raw() != "null" {
+ var s string
+ err := json.Unmarshal([]byte(field.Raw()), &s)
+ if err != nil {
+ // fmt.Println(err)
+ }
+ reasoning_content += s
+ // fmt.Print(s)
+ } else {
+ if !is_answering {
+ is_answering = true
+ // fmt.Println("\n\n========== Response ==========")
+ streamHandler.HandleAddedItem(chunk)
+ }
+
+ if delta.Content != "" {
+ answer_content += delta.Content
+ answer_content_id = chunk.ID
+ streamHandler.HandleTextDelta(chunk)
+ // fmt.Print(delta.Content)
+ }
+
+ if len(delta.ToolCalls) > 0 {
+ for _, toolCall := range delta.ToolCalls {
+ index := int(toolCall.Index)
+
+ // haskey(tool_info, index)
+ if _, ok := tool_info[index]; !ok {
+ // fmt.Printf("Prepare tool %s\n", toolCall.Function.Name)
+ tool_info[index] = map[string]string{}
+ streamHandler.HandleAddedItem(chunk)
+ }
+
+ if toolCall.ID != "" {
+ tool_info[index]["id"] = tool_info[index]["id"] + toolCall.ID
+ }
+
+ if toolCall.Function.Name != "" {
+ tool_info[index]["name"] = tool_info[index]["name"] + toolCall.Function.Name
+ }
+
+ if toolCall.Function.Arguments != "" {
+ tool_info[index]["arguments"] = tool_info[index]["arguments"] + toolCall.Function.Arguments
+ // check if arguments can be unmarshaled, if not, means the arguments are not ready
+ var dummy map[string]any
+ if err := json.Unmarshal([]byte(tool_info[index]["arguments"]), &dummy); err == nil {
+ streamHandler.HandleToolArgPreparedDoneItem(index, tool_info[index]["id"], tool_info[index]["name"], tool_info[index]["arguments"])
+ toolCalls = append(toolCalls, openai.FinishedChatCompletionToolCall{
+ Index: index,
+ ID: tool_info[index]["id"],
+ ChatCompletionMessageFunctionToolCallFunction: openai.ChatCompletionMessageFunctionToolCallFunction{
+ Name: tool_info[index]["name"],
+ Arguments: tool_info[index]["arguments"],
+ },
+ })
+ }
+ }
+ }
+ }
+ }
+
+ if chunk.Choices[0].FinishReason != "" {
+ // fmt.Printf("FinishReason: %s\n", chunk.Choices[0].FinishReason)
+ // answer_content += chunk.Choices[0].Delta.Content
+ // fmt.Printf("answer_content: %s\n", answer_content)
+ streamHandler.HandleTextDoneItem(chunk, answer_content)
+ break
+ }
+ }
+
+ if err := stream.Err(); err != nil {
+ return nil, nil, err
+ }
+
+ if answer_content != "" {
+ appendAssistantTextResponseV2(&openaiChatHistory, &inappChatHistory, answer_content, answer_content_id, modelSlug)
+ }
+
+ // Execute the calls (if any), return incremental data
+ openaiToolHistory, inappToolHistory, err := a.toolCallHandler.HandleToolCallsV2(ctx, toolCalls, streamHandler)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // // Record the tool call results
+ if len(openaiToolHistory) > 0 {
+ openaiChatHistory = append(openaiChatHistory, openaiToolHistory...)
+ inappChatHistory = append(inappChatHistory, inappToolHistory...)
+ } else {
+ // response stream is finished, if there is no tool call, then break
+ break
+ }
+ }
+
+ return openaiChatHistory, inappChatHistory, nil
+}
diff --git a/internal/services/toolkit/client/get_conversation_title.go b/internal/services/toolkit/client/get_conversation_title.go
index 283e689f..bd89040f 100644
--- a/internal/services/toolkit/client/get_conversation_title.go
+++ b/internal/services/toolkit/client/get_conversation_title.go
@@ -29,7 +29,7 @@ func (a *AIClient) GetConversationTitle(ctx context.Context, inappChatHistory []
message := strings.Join(messages, "\n")
message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message)
- _, resp, err := a.ChatCompletion(ctx, "gpt-4.1-mini", responses.ResponseInputParam{
+ _, resp, err := a.ChatCompletionV1(ctx, "gpt-4.1-mini", responses.ResponseInputParam{
{
OfInputMessage: &responses.ResponseInputItemMessageParam{
Role: "system",
diff --git a/internal/services/toolkit/client/get_conversation_title_test.go b/internal/services/toolkit/client/get_conversation_title_test.go
index 0afb6e0f..1a19f4c5 100644
--- a/internal/services/toolkit/client/get_conversation_title_test.go
+++ b/internal/services/toolkit/client/get_conversation_title_test.go
@@ -61,7 +61,7 @@ func TestGetConversationTitle_Case1(t *testing.T) {
},
},
},
- })
+ }, nil)
fmt.Println("Generated title:", title)
assert.NoError(t, err)
@@ -133,7 +133,7 @@ func TestGetConversationTitle_Case2(t *testing.T) {
},
},
},
- })
+ }, nil)
fmt.Println("Generated title:", title)
assert.NoError(t, err)
@@ -201,7 +201,7 @@ func TestGetConversationTitle_Case3(t *testing.T) {
},
},
},
- })
+ }, nil)
fmt.Println("Generated title:", title)
assert.NoError(t, err)
diff --git a/internal/services/toolkit/client/get_conversation_title_v2.go b/internal/services/toolkit/client/get_conversation_title_v2.go
new file mode 100644
index 00000000..6c92f0c2
--- /dev/null
+++ b/internal/services/toolkit/client/get_conversation_title_v2.go
@@ -0,0 +1,53 @@
+package client
+
+// TODO: This file should not place in the client package.
+import (
+ "context"
+ "fmt"
+ "paperdebugger/internal/models"
+ "strings"
+
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/samber/lo"
+)
+
+func (a *AIClientV2) GetConversationTitleV2(ctx context.Context, inappChatHistory []*chatv2.Message, llmProvider *models.LLMProviderConfig) (string, error) {
+ messages := lo.Map(inappChatHistory, func(message *chatv2.Message, _ int) string {
+ if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_Assistant); ok {
+ return fmt.Sprintf("Assistant: %s", message.Payload.GetAssistant().GetContent())
+ }
+ if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_User); ok {
+ return fmt.Sprintf("User: %s", message.Payload.GetUser().GetContent())
+ }
+ if _, ok := message.Payload.MessageType.(*chatv2.MessagePayload_ToolCall); ok {
+ return fmt.Sprintf("Tool '%s' called", message.Payload.GetToolCall().GetName())
+ }
+ return ""
+ })
+ message := strings.Join(messages, "\n")
+ message = fmt.Sprintf("%s\nBased on above conversation, generate a short, clear, and descriptive title that summarizes the main topic or purpose of the discussion. The title should be concise, specific, and use natural language. Avoid vague or generic titles. Use abbreviation and short words if possible. Use 3-5 words if possible. Give me the title only, no other text including any other words.", message)
+
+ _, resp, err := a.ChatCompletionV2(ctx, "gpt-5-nano", OpenAIChatHistory{
+ openai.SystemMessage("You are a helpful assistant that generates a title for a conversation."),
+ openai.UserMessage(message),
+ }, llmProvider)
+ if err != nil {
+ return "", err
+ }
+
+ if len(resp) == 0 {
+ return "Untitled", nil
+ }
+
+ title := strings.TrimSpace(resp[0].Payload.GetAssistant().GetContent())
+ title = strings.TrimLeft(title, "\"")
+ title = strings.TrimRight(title, "\"")
+ title = strings.TrimSpace(title)
+ if title == "" {
+ return "Untitled", nil
+ }
+
+ return title, nil
+}
diff --git a/internal/services/toolkit/client/types.go b/internal/services/toolkit/client/types.go
new file mode 100644
index 00000000..933cc186
--- /dev/null
+++ b/internal/services/toolkit/client/types.go
@@ -0,0 +1,10 @@
+package client
+
+import (
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/openai/openai-go/v3"
+)
+
+type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion
+type AppChatHistory []chatv2.Message
diff --git a/internal/services/toolkit/client/utils.go b/internal/services/toolkit/client/utils.go
index 39b24d87..9df994fa 100644
--- a/internal/services/toolkit/client/utils.go
+++ b/internal/services/toolkit/client/utils.go
@@ -6,10 +6,17 @@ This file contains utility functions for the client package. (Mainly miscellaneo
It is used to append assistant responses to both OpenAI and in-app chat histories, and to create response items for chat interactions.
*/
import (
+ "context"
+ "paperdebugger/internal/libs/cfg"
+ "paperdebugger/internal/libs/db"
+ "paperdebugger/internal/libs/logger"
+ "paperdebugger/internal/services"
"paperdebugger/internal/services/toolkit/registry"
+ "paperdebugger/internal/services/toolkit/tools/xtramcp"
chatv1 "paperdebugger/pkg/gen/api/chat/v1"
"github.com/openai/openai-go/v2"
+ openaiv2 "github.com/openai/openai-go/v2"
"github.com/openai/openai-go/v2/responses"
"github.com/samber/lo"
)
@@ -43,7 +50,7 @@ func appendAssistantTextResponse(openaiChatHistory *responses.ResponseNewParamsI
// getDefaultParams constructs the default parameters for a chat completion request.
// The tool registry is managed centrally by the registry package.
// The chat history is constructed manually, so Store must be set to false.
-func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsInputUnion, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams {
+func getDefaultParams(modelSlug string, toolRegistry *registry.ToolRegistry) responses.ResponseNewParams {
var reasoningModels = []string{
"gpt-5",
"gpt-5-mini",
@@ -60,17 +67,63 @@ func getDefaultParams(modelSlug string, chatHistory responses.ResponseNewParamsI
return responses.ResponseNewParams{
Model: modelSlug,
Tools: toolRegistry.GetTools(),
- Input: chatHistory,
- Store: openai.Bool(false),
+ Store: openaiv2.Bool(false),
}
}
return responses.ResponseNewParams{
Model: modelSlug,
- Temperature: openai.Float(0.7),
- MaxOutputTokens: openai.Int(4000), // DEBUG POINT: change this to test the frontend handler
- Tools: toolRegistry.GetTools(), // 工具注册由 registry 统一管理
- Input: chatHistory,
- Store: openai.Bool(false), // Must set to false, because we are construct our own chat history.
+ Temperature: openaiv2.Float(0.7),
+ MaxOutputTokens: openaiv2.Int(4000), // DEBUG POINT: change this to test the frontend handler
+ Tools: toolRegistry.GetTools(), // Tool registration is managed centrally by the registry
+ Store: openaiv2.Bool(false), // Must set to false, because we are construct our own chat history.
}
}
+
+func CheckOpenAIWorks(oaiClient openai.Client, logger *logger.Logger) {
+ logger.Info("[AI Client] checking if openai client works")
+ chatCompletion, err := oaiClient.Chat.Completions.New(context.TODO(), openai.ChatCompletionNewParams{
+ Messages: []openai.ChatCompletionMessageParamUnion{
+ openai.UserMessage("Say 'openai client works'"),
+ },
+ Model: openai.ChatModelGPT4o,
+ })
+ if err != nil {
+ logger.Errorf("[AI Client] openai client does not work: %v", err)
+ return
+ }
+ logger.Info("[AI Client] openai client works", "response", chatCompletion.Choices[0].Message.Content)
+}
+
+// initializeToolkit creates and initializes the tool registry with XtraMCP tools.
+// This is shared between AIClient and AIClientV2 to avoid code duplication.
+func initializeToolkit(
+ db *db.DB,
+ projectService *services.ProjectService,
+ cfg *cfg.Cfg,
+ logger *logger.Logger,
+) *registry.ToolRegistry {
+ toolRegistry := registry.NewToolRegistry()
+
+ // Load tools dynamically from backend
+ xtraMCPLoader := xtramcp.NewXtraMCPLoader(db, projectService, cfg.XtraMCPURI)
+
+ // initialize MCP session first and log session ID
+ sessionID, err := xtraMCPLoader.InitializeMCP()
+ if err != nil {
+ logger.Errorf("[AI Client] Failed to initialize XtraMCP session: %v", err)
+ // TODO: Fallback to static tools or exit?
+ } else {
+ logger.Info("[AI Client] XtraMCP session initialized", "sessionID", sessionID)
+
+ // dynamically load all tools from XtraMCP backend
+ err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry)
+ if err != nil {
+ logger.Errorf("[AI Client] Failed to load XtraMCP tools: %v", err)
+ } else {
+ logger.Info("[AI Client] Successfully loaded XtraMCP tools")
+ }
+ }
+
+ return toolRegistry
+}
diff --git a/internal/services/toolkit/client/utils_v2.go b/internal/services/toolkit/client/utils_v2.go
new file mode 100644
index 00000000..cb197db6
--- /dev/null
+++ b/internal/services/toolkit/client/utils_v2.go
@@ -0,0 +1,171 @@
+package client
+
+/*
+This file contains utility functions for the client package. (Mainly miscellaneous helpers)
+
+It is used to append assistant responses to both OpenAI and in-app chat histories, and to create response items for chat interactions.
+*/
+import (
+ "context"
+ "fmt"
+ "paperdebugger/internal/libs/cfg"
+ "paperdebugger/internal/libs/db"
+ "paperdebugger/internal/libs/logger"
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit/registry"
+ filetools "paperdebugger/internal/services/toolkit/tools/files"
+ latextools "paperdebugger/internal/services/toolkit/tools/latex"
+ "paperdebugger/internal/services/toolkit/tools/xtramcp"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+ "strings"
+ "time"
+
+ openaiv3 "github.com/openai/openai-go/v3"
+)
+
+func appendAssistantTextResponseV2(openaiChatHistory *OpenAIChatHistory, inappChatHistory *AppChatHistory, content string, contentId string, modelSlug string) {
+ *openaiChatHistory = append(*openaiChatHistory, openaiv3.ChatCompletionMessageParamUnion{
+ OfAssistant: &openaiv3.ChatCompletionAssistantMessageParam{
+ Role: "assistant",
+ Content: openaiv3.ChatCompletionAssistantMessageParamContentUnion{
+ OfArrayOfContentParts: []openaiv3.ChatCompletionAssistantMessageParamContentArrayOfContentPartUnion{
+ {
+ OfText: &openaiv3.ChatCompletionContentPartTextParam{
+ Type: "text",
+ Text: content,
+ },
+ },
+ },
+ },
+ },
+ })
+
+ *inappChatHistory = append(*inappChatHistory, chatv2.Message{
+ MessageId: fmt.Sprintf("openai_%s", contentId),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_Assistant{
+ Assistant: &chatv2.MessageTypeAssistant{
+ Content: content,
+ ModelSlug: modelSlug,
+ },
+ },
+ },
+ Timestamp: time.Now().Unix(),
+ })
+}
+
+func getDefaultParamsV2(modelSlug string, toolRegistry *registry.ToolRegistryV2) openaiv3.ChatCompletionNewParams {
+ var reasoningModels = []string{
+ "gpt-5",
+ "gpt-5-mini",
+ "gpt-5-nano",
+ "gpt-5-chat-latest",
+ "o4-mini",
+ "o3-mini",
+ "o3",
+ "o1-mini",
+ "o1",
+ "codex-mini-latest",
+ }
+ for _, model := range reasoningModels {
+ if strings.Contains(modelSlug, model) {
+ return openaiv3.ChatCompletionNewParams{
+ Model: modelSlug,
+ MaxCompletionTokens: openaiv3.Int(4000),
+ Tools: toolRegistry.GetTools(),
+ ParallelToolCalls: openaiv3.Bool(true),
+ Store: openaiv3.Bool(false),
+ }
+ }
+ }
+
+ return openaiv3.ChatCompletionNewParams{
+ Model: modelSlug,
+ Temperature: openaiv3.Float(0.7),
+ MaxCompletionTokens: openaiv3.Int(4000), // DEBUG POINT: change this to test the frontend handler
+ Tools: toolRegistry.GetTools(), // Tool registration is managed centrally by the registry
+ ParallelToolCalls: openaiv3.Bool(true),
+ Store: openaiv3.Bool(false), // Must set to false, because we are construct our own chat history.
+ }
+}
+
+func CheckOpenAIWorksV2(oaiClient openaiv3.Client, logger *logger.Logger) {
+ logger.Info("[AI Client V2] checking if openai client works")
+ chatCompletion, err := oaiClient.Chat.Completions.New(context.TODO(), openaiv3.ChatCompletionNewParams{
+ Messages: []openaiv3.ChatCompletionMessageParamUnion{
+ openaiv3.UserMessage("Say 'openai client works'"),
+ },
+ Model: "openai/gpt-5-nano",
+ })
+ if err != nil {
+ logger.Errorf("[AI Client V2] openai client does not work: %v", err)
+ return
+ }
+ logger.Info("[AI Client V2] openai client works", "response", chatCompletion.Choices[0].Message.Content)
+}
+
+func initializeToolkitV2(
+ db *db.DB,
+ projectService *services.ProjectService,
+ cfg *cfg.Cfg,
+ logger *logger.Logger,
+) *registry.ToolRegistryV2 {
+ toolRegistry := registry.NewToolRegistryV2()
+
+ // Register static file tools (create/delete don't need ProjectService - they're placeholder only)
+ toolRegistry.Register("create_file", filetools.CreateFileToolDescriptionV2, filetools.CreateFileTool)
+ toolRegistry.Register("delete_file", filetools.DeleteFileToolDescriptionV2, filetools.DeleteFileTool)
+ toolRegistry.Register("create_folder", filetools.CreateFolderToolDescriptionV2, filetools.CreateFolderTool)
+ toolRegistry.Register("delete_folder", filetools.DeleteFolderToolDescriptionV2, filetools.DeleteFolderTool)
+
+ // Register file tools with ProjectService injection
+ readFileTool := filetools.NewReadFileTool(projectService)
+ toolRegistry.Register("read_file", filetools.ReadFileToolDescriptionV2, readFileTool.Call)
+
+ readFolderTool := filetools.NewReadFolderTool(projectService)
+ toolRegistry.Register("read_folder", filetools.ReadFolderToolDescriptionV2, readFolderTool.Call)
+
+ searchStringTool := filetools.NewSearchStringTool(projectService)
+ toolRegistry.Register("search_string", filetools.SearchStringToolDescriptionV2, searchStringTool.Call)
+
+ searchFileTool := filetools.NewSearchFileTool(projectService)
+ toolRegistry.Register("search_file", filetools.SearchFileToolDescriptionV2, searchFileTool.Call)
+
+ logger.Info("[AI Client V2] Registered static file tools", "count", 8)
+
+ // Register LaTeX tools with ProjectService injection
+ documentStructureTool := latextools.NewDocumentStructureTool(projectService)
+ toolRegistry.Register("get_document_structure", latextools.GetDocumentStructureToolDescriptionV2, documentStructureTool.Call)
+
+ toolRegistry.Register("locate_section", latextools.LocateSectionToolDescriptionV2, latextools.LocateSectionTool)
+
+ readSectionSourceTool := latextools.NewReadSectionSourceTool(projectService)
+ toolRegistry.Register("read_section_source", latextools.ReadSectionSourceToolDescriptionV2, readSectionSourceTool.Call)
+
+ readSourceLineRangeTool := latextools.NewReadSourceLineRangeTool(projectService)
+ toolRegistry.Register("read_source_line_range", latextools.ReadSourceLineRangeToolDescriptionV2, readSourceLineRangeTool.Call)
+
+ logger.Info("[AI Client V2] Registered static LaTeX tools", "count", 4)
+
+ // Load tools dynamically from backend
+ xtraMCPLoader := xtramcp.NewXtraMCPLoaderV2(db, projectService, cfg.XtraMCPURI)
+
+ // initialize MCP session first and log session ID
+ sessionID, err := xtraMCPLoader.InitializeMCP()
+ if err != nil {
+ logger.Errorf("[AI Client V2] Failed to initialize XtraMCP session: %v", err)
+ // TODO: Fallback to static tools or exit?
+ } else {
+ logger.Info("[AI Client V2] XtraMCP session initialized", "sessionID", sessionID)
+
+ // dynamically load all tools from XtraMCP backend
+ err = xtraMCPLoader.LoadToolsFromBackend(toolRegistry)
+ if err != nil {
+ logger.Errorf("[AI Client V2] Failed to load XtraMCP tools: %v", err)
+ } else {
+ logger.Info("[AI Client V2] Successfully loaded XtraMCP tools")
+ }
+ }
+
+ return toolRegistry
+}
diff --git a/internal/services/toolkit/handler/stream.go b/internal/services/toolkit/handler/stream.go
index 96ca6668..87328dad 100644
--- a/internal/services/toolkit/handler/stream.go
+++ b/internal/services/toolkit/handler/stream.go
@@ -7,25 +7,39 @@ import (
"github.com/openai/openai-go/v2/responses"
)
-type StreamHandler struct {
+type StreamHandler interface {
+ SendInitialization()
+ HandleAddedItem(responses.ResponseStreamEventUnion)
+ HandleDoneItem(responses.ResponseStreamEventUnion)
+ HandleTextDelta(responses.ResponseStreamEventUnion)
+ SendIncompleteIndicator(reason string, responseId string)
+ SendFinalization()
+ SendToolCallBegin(toolCall responses.ResponseFunctionToolCall)
+ SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error)
+}
+
+// Compile-time check: ensure StreamHandlerV1 implements StreamHandler interface
+var _ StreamHandler = (*StreamHandlerV1)(nil)
+
+type StreamHandlerV1 struct {
callbackStream chatv1.ChatService_CreateConversationMessageStreamServer
conversationId string
modelSlug string
}
-func NewStreamHandler(
+func NewStreamHandlerV1(
callbackStream chatv1.ChatService_CreateConversationMessageStreamServer,
conversationId string,
modelSlug string,
-) *StreamHandler {
- return &StreamHandler{
+) StreamHandler {
+ return &StreamHandlerV1{
callbackStream: callbackStream,
conversationId: conversationId,
modelSlug: modelSlug,
}
}
-func (h *StreamHandler) SendInitialization() {
+func (h *StreamHandlerV1) SendInitialization() {
if h.callbackStream == nil {
return
}
@@ -41,11 +55,12 @@ func (h *StreamHandler) SendInitialization() {
})
}
-func (h *StreamHandler) HandleAddedItem(chunk responses.ResponseStreamEventUnion) {
+func (h *StreamHandlerV1) HandleAddedItem(chunk responses.ResponseStreamEventUnion) {
if h.callbackStream == nil {
return
}
- if chunk.Item.Type == "message" {
+ switch chunk.Item.Type {
+ case "message":
h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{
ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamPartBegin{
StreamPartBegin: &chatv1.StreamPartBegin{
@@ -58,7 +73,7 @@ func (h *StreamHandler) HandleAddedItem(chunk responses.ResponseStreamEventUnion
},
},
})
- } else if chunk.Item.Type == "function_call" {
+ case "function_call":
h.callbackStream.Send(&chatv1.CreateConversationMessageStreamResponse{
ResponsePayload: &chatv1.CreateConversationMessageStreamResponse_StreamPartBegin{
StreamPartBegin: &chatv1.StreamPartBegin{
@@ -76,7 +91,7 @@ func (h *StreamHandler) HandleAddedItem(chunk responses.ResponseStreamEventUnion
}
}
-func (h *StreamHandler) HandleDoneItem(chunk responses.ResponseStreamEventUnion) {
+func (h *StreamHandlerV1) HandleDoneItem(chunk responses.ResponseStreamEventUnion) {
if h.callbackStream == nil {
return
}
@@ -131,7 +146,7 @@ func (h *StreamHandler) HandleDoneItem(chunk responses.ResponseStreamEventUnion)
}
}
-func (h *StreamHandler) HandleTextDelta(chunk responses.ResponseStreamEventUnion) {
+func (h *StreamHandlerV1) HandleTextDelta(chunk responses.ResponseStreamEventUnion) {
if h.callbackStream == nil {
return
}
@@ -145,7 +160,7 @@ func (h *StreamHandler) HandleTextDelta(chunk responses.ResponseStreamEventUnion
})
}
-func (h *StreamHandler) SendIncompleteIndicator(reason string, responseId string) {
+func (h *StreamHandlerV1) SendIncompleteIndicator(reason string, responseId string) {
if h.callbackStream == nil {
return
}
@@ -159,7 +174,7 @@ func (h *StreamHandler) SendIncompleteIndicator(reason string, responseId string
})
}
-func (h *StreamHandler) SendFinalization() {
+func (h *StreamHandlerV1) SendFinalization() {
if h.callbackStream == nil {
return
}
@@ -172,7 +187,7 @@ func (h *StreamHandler) SendFinalization() {
})
}
-func (h *StreamHandler) SendToolCallBegin(toolCall responses.ResponseFunctionToolCall) {
+func (h *StreamHandlerV1) SendToolCallBegin(toolCall responses.ResponseFunctionToolCall) {
if h.callbackStream == nil {
return
}
@@ -193,7 +208,7 @@ func (h *StreamHandler) SendToolCallBegin(toolCall responses.ResponseFunctionToo
})
}
-func (h *StreamHandler) SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) {
+func (h *StreamHandlerV1) SendToolCallEnd(toolCall responses.ResponseFunctionToolCall, result string, err error) {
if h.callbackStream == nil {
return
}
diff --git a/internal/services/toolkit/handler/stream_v2.go b/internal/services/toolkit/handler/stream_v2.go
new file mode 100644
index 00000000..f95ca82d
--- /dev/null
+++ b/internal/services/toolkit/handler/stream_v2.go
@@ -0,0 +1,232 @@
+package handler
+
+import (
+ "fmt"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+
+ "github.com/openai/openai-go/v3"
+)
+
+type StreamHandlerV2 struct {
+ callbackStream chatv2.ChatService_CreateConversationMessageStreamServer
+ conversationId string
+ modelSlug string
+}
+
+func NewStreamHandlerV2(
+ callbackStream chatv2.ChatService_CreateConversationMessageStreamServer,
+ conversationId string,
+ modelSlug string,
+) *StreamHandlerV2 {
+ return &StreamHandlerV2{
+ callbackStream: callbackStream,
+ conversationId: conversationId,
+ modelSlug: modelSlug,
+ }
+}
+
+func (h *StreamHandlerV2) SendInitialization() {
+ if h.callbackStream == nil {
+ return
+ }
+ streamInit := &chatv2.StreamInitialization{
+ ConversationId: h.conversationId,
+ ModelSlug: h.modelSlug,
+ }
+
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamInitialization{
+ StreamInitialization: streamInit,
+ },
+ })
+}
+
+func (h *StreamHandlerV2) HandleAddedItem(chunk openai.ChatCompletionChunk) {
+ if h.callbackStream == nil {
+ return
+ }
+ switch chunk.Choices[0].Delta.Role {
+ case "assistant":
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{
+ StreamPartBegin: &chatv2.StreamPartBegin{
+ MessageId: chunk.ID,
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_Assistant{
+ Assistant: &chatv2.MessageTypeAssistant{},
+ },
+ },
+ },
+ },
+ })
+ // default:
+ // h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ // ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{
+ // StreamPartBegin: &chatv2.StreamPartBegin{
+ // MessageId: chunk.ID,
+ // Payload: &chatv2.MessagePayload{
+ // MessageType: &chatv2.MessagePayload_Unknown{
+ // Unknown: &chatv2.MessageTypeUnknown{
+ // Description: fmt.Sprintf("%v", chunk.Choices[0].Delta.Role),
+ // },
+ // },
+ // },
+ // },
+ // },
+ // })
+ }
+ toolCalls := chunk.Choices[0].Delta.ToolCalls
+ for _, toolCall := range toolCalls {
+ if toolCall.Function.Name == "" {
+ continue
+ }
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{
+ StreamPartBegin: &chatv2.StreamPartBegin{
+ MessageId: fmt.Sprintf("toolCallPrepareArguments[%d]_%s", toolCall.Index, toolCall.ID),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{
+ ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{
+ Name: toolCall.Function.Name,
+ Args: "",
+ },
+ },
+ },
+ },
+ },
+ })
+ }
+}
+
+func (h *StreamHandlerV2) HandleTextDoneItem(chunk openai.ChatCompletionChunk, content string) {
+ if h.callbackStream == nil {
+ return
+ }
+
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{
+ StreamPartEnd: &chatv2.StreamPartEnd{
+ MessageId: chunk.ID,
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_Assistant{
+ Assistant: &chatv2.MessageTypeAssistant{
+ Content: content,
+ ModelSlug: h.modelSlug,
+ },
+ },
+ },
+ },
+ },
+ })
+}
+
+func (h *StreamHandlerV2) HandleToolArgPreparedDoneItem(index int, id string, name string, args string) {
+ if h.callbackStream == nil {
+ return
+ }
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{
+ StreamPartEnd: &chatv2.StreamPartEnd{
+ MessageId: fmt.Sprintf("toolCallPrepareArguments[%d]_%s", index, id),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_ToolCallPrepareArguments{
+ ToolCallPrepareArguments: &chatv2.MessageTypeToolCallPrepareArguments{
+ Name: name,
+ Args: args,
+ },
+ },
+ },
+ },
+ },
+ })
+}
+
+func (h *StreamHandlerV2) HandleTextDelta(chunk openai.ChatCompletionChunk) {
+ if h.callbackStream == nil {
+ return
+ }
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_MessageChunk{
+ MessageChunk: &chatv2.MessageChunk{
+ MessageId: chunk.ID,
+ Delta: chunk.Choices[0].Delta.Content,
+ },
+ },
+ })
+}
+
+func (h *StreamHandlerV2) SendIncompleteIndicator(reason string, responseId string) {
+ if h.callbackStream == nil {
+ return
+ }
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_IncompleteIndicator{
+ IncompleteIndicator: &chatv2.IncompleteIndicator{
+ Reason: reason,
+ ResponseId: responseId,
+ },
+ },
+ })
+}
+
+func (h *StreamHandlerV2) SendFinalization() {
+ if h.callbackStream == nil {
+ return
+ }
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamFinalization{
+ StreamFinalization: &chatv2.StreamFinalization{
+ ConversationId: h.conversationId,
+ },
+ },
+ })
+}
+
+func (h *StreamHandlerV2) SendToolCallBegin(toolCall openai.FinishedChatCompletionToolCall) {
+ if h.callbackStream == nil {
+ return
+ }
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartBegin{
+ StreamPartBegin: &chatv2.StreamPartBegin{
+ MessageId: fmt.Sprintf("tool[%d]_%s", toolCall.Index, toolCall.ID),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_ToolCall{
+ ToolCall: &chatv2.MessageTypeToolCall{
+ Name: toolCall.Name,
+ Args: toolCall.Arguments,
+ },
+ },
+ },
+ },
+ },
+ })
+}
+
+func (h *StreamHandlerV2) SendToolCallEnd(toolCall openai.FinishedChatCompletionToolCall, result string, err error) {
+ if h.callbackStream == nil {
+ return
+ }
+ h.callbackStream.Send(&chatv2.CreateConversationMessageStreamResponse{
+ ResponsePayload: &chatv2.CreateConversationMessageStreamResponse_StreamPartEnd{
+ StreamPartEnd: &chatv2.StreamPartEnd{
+ MessageId: fmt.Sprintf("tool[%d]_%s", toolCall.Index, toolCall.ID),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_ToolCall{
+ ToolCall: &chatv2.MessageTypeToolCall{
+ Name: toolCall.Name,
+ Args: toolCall.Arguments,
+ Result: result,
+ Error: func() string {
+ if err != nil {
+ return err.Error()
+ }
+ return ""
+ }(),
+ },
+ },
+ },
+ },
+ },
+ })
+}
diff --git a/internal/services/toolkit/handler/toolcall.go b/internal/services/toolkit/handler/toolcall.go
index 8cead912..2b3db2a2 100644
--- a/internal/services/toolkit/handler/toolcall.go
+++ b/internal/services/toolkit/handler/toolcall.go
@@ -38,7 +38,7 @@ func NewToolCallHandler(toolRegistry *registry.ToolRegistry) *ToolCallHandler {
// - openaiChatHistory: The OpenAI-compatible chat history including tool call and output items.
// - inappChatHistory: The in-app chat history as a slice of chatv1.Message, reflecting tool call events.
// - error: Any error encountered during processing (always nil in current implementation).
-func (h *ToolCallHandler) HandleToolCalls(ctx context.Context, outputs []responses.ResponseOutputItemUnion, streamHandler *StreamHandler) (responses.ResponseNewParamsInputUnion, []chatv1.Message, error) {
+func (h *ToolCallHandler) HandleToolCalls(ctx context.Context, outputs []responses.ResponseOutputItemUnion, streamHandler StreamHandler) (responses.ResponseNewParamsInputUnion, []chatv1.Message, error) {
openaiChatHistory := responses.ResponseNewParamsInputUnion{} // Accumulates OpenAI chat history items
inappChatHistory := []chatv1.Message{} // Accumulates in-app chat history messages
diff --git a/internal/services/toolkit/handler/toolcall_v2.go b/internal/services/toolkit/handler/toolcall_v2.go
new file mode 100644
index 00000000..b7e62fa1
--- /dev/null
+++ b/internal/services/toolkit/handler/toolcall_v2.go
@@ -0,0 +1,127 @@
+package handler
+
+import (
+ "context"
+ "fmt"
+ "paperdebugger/internal/services/toolkit/registry"
+ chatv2 "paperdebugger/pkg/gen/api/chat/v2"
+ "time"
+
+ "github.com/openai/openai-go/v3"
+)
+
+// ToolCallHandler is responsible for handling tool calls by dispatching them to the appropriate tool registry
+// and managing the chat history for both OpenAI and in-app chat systems.
+type ToolCallHandlerV2 struct {
+ Registry *registry.ToolRegistryV2 // Registry containing available tools for function calls
+}
+
+func NewToolCallHandlerV2(toolRegistry *registry.ToolRegistryV2) *ToolCallHandlerV2 {
+ return &ToolCallHandlerV2{
+ Registry: toolRegistry,
+ }
+}
+
+type OpenAIChatHistory []openai.ChatCompletionMessageParamUnion
+type AppChatHistory []chatv2.Message
+
+// HandleToolCalls processes a list of tool call outputs, invokes the corresponding tools, and constructs
+// both OpenAI and in-app chat histories reflecting the tool call and its result.
+//
+// Parameters:
+// ctx: The context for cancellation and deadlines.
+// outputs: A slice of ResponseOutputItemUnion representing outputs from the model, possibly containing tool calls.
+// streamHandler: Optional handler for streaming tool call events (can be nil).
+//
+// Returns:
+// - openaiChatHistory: The OpenAI-compatible chat history including tool call and output items.
+// - inappChatHistory: The in-app chat history as a slice of chatv2.Message, reflecting tool call events.
+// - error: Any error encountered during processing (always nil in current implementation).
+func (h *ToolCallHandlerV2) HandleToolCallsV2(ctx context.Context, toolCalls []openai.FinishedChatCompletionToolCall, streamHandler *StreamHandlerV2) (OpenAIChatHistory, AppChatHistory, error) {
+ if len(toolCalls) == 0 {
+ return nil, nil, nil
+ }
+
+ openaiChatHistory := []openai.ChatCompletionMessageParamUnion{} // Accumulates OpenAI chat history items
+ inappChatHistory := []chatv2.Message{} // Accumulates in-app chat history messages
+
+ toolCallsParam := make([]openai.ChatCompletionMessageToolCallUnionParam, len(toolCalls))
+ for i, toolCall := range toolCalls {
+ toolCallsParam[i] = openai.ChatCompletionMessageToolCallUnionParam{
+ OfFunction: &openai.ChatCompletionMessageFunctionToolCallParam{
+ ID: toolCall.ID,
+ Type: "function",
+ Function: openai.ChatCompletionMessageFunctionToolCallFunctionParam{
+ Name: toolCall.Name,
+ Arguments: toolCall.Arguments,
+ },
+ },
+ }
+ }
+
+ openaiChatHistory = append(openaiChatHistory, openai.ChatCompletionMessageParamUnion{
+ OfAssistant: &openai.ChatCompletionAssistantMessageParam{
+ ToolCalls: toolCallsParam,
+ },
+ })
+
+ // Iterate over each output item to process tool calls
+ for _, toolCall := range toolCalls {
+ if streamHandler != nil {
+ streamHandler.SendToolCallBegin(toolCall)
+ }
+
+ toolResult, err := h.Registry.Call(ctx, toolCall.ID, toolCall.Name, []byte(toolCall.Arguments))
+
+ if streamHandler != nil {
+ streamHandler.SendToolCallEnd(toolCall, toolResult, err)
+ }
+
+ resultStr := toolResult
+ if err != nil {
+ resultStr = "Error: " + err.Error()
+ }
+
+ openaiChatHistory = append(openaiChatHistory, openai.ChatCompletionMessageParamUnion{
+ OfTool: &openai.ChatCompletionToolMessageParam{
+ Role: "tool",
+ ToolCallID: toolCall.ID,
+ Content: openai.ChatCompletionToolMessageParamContentUnion{
+ OfArrayOfContentParts: []openai.ChatCompletionContentPartTextParam{
+ {
+ Type: "text",
+ Text: resultStr,
+ },
+ // {
+ // Type: "image_url",
+ // ImageURL: "xxx"
+ // },
+ },
+ },
+ },
+ })
+
+ toolCallMsg := &chatv2.MessageTypeToolCall{
+ Name: toolCall.Name,
+ Args: toolCall.Arguments,
+ }
+ if err != nil {
+ toolCallMsg.Error = err.Error()
+ } else {
+ toolCallMsg.Result = resultStr
+ }
+
+ inappChatHistory = append(inappChatHistory, chatv2.Message{
+ MessageId: fmt.Sprintf("openai_toolCall[%d]_%s", toolCall.Index, toolCall.ID),
+ Payload: &chatv2.MessagePayload{
+ MessageType: &chatv2.MessagePayload_ToolCall{
+ ToolCall: toolCallMsg,
+ },
+ },
+ Timestamp: time.Now().Unix(),
+ })
+ }
+
+ // Return both chat histories and nil error (no error aggregation in this implementation)
+ return openaiChatHistory, inappChatHistory, nil
+}
diff --git a/internal/services/toolkit/registry/registry_v2.go b/internal/services/toolkit/registry/registry_v2.go
new file mode 100644
index 00000000..6c129fe8
--- /dev/null
+++ b/internal/services/toolkit/registry/registry_v2.go
@@ -0,0 +1,49 @@
+package registry
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/samber/lo"
+)
+
+type ToolRegistryV2 struct {
+ tools map[string]toolkit.ToolHandler
+ description map[string]openai.ChatCompletionToolUnionParam
+}
+
+func NewToolRegistryV2() *ToolRegistryV2 {
+ return &ToolRegistryV2{
+ tools: make(map[string]toolkit.ToolHandler),
+ description: make(map[string]openai.ChatCompletionToolUnionParam),
+ }
+}
+
+func (r *ToolRegistryV2) Register(name string, description openai.ChatCompletionToolUnionParam, handler toolkit.ToolHandler) {
+ r.tools[name] = handler
+ r.description[name] = description
+}
+
+func (r *ToolRegistryV2) Call(ctx context.Context, toolCallId string, toolCallName string, toolCallArgs json.RawMessage) (result string, err error) {
+ handler, ok := r.tools[toolCallName]
+ if !ok {
+ return "", fmt.Errorf("unknown tool: %s", toolCallName)
+ }
+ result, furtherInstruction, err := handler(ctx, toolCallId, toolCallArgs)
+ if err != nil {
+ return result, err
+ }
+
+ if furtherInstruction == "" {
+ return result, nil
+ } else {
+ return fmt.Sprintf(`%s\n%s`, result, furtherInstruction), nil
+ }
+}
+
+func (r *ToolRegistryV2) GetTools() []openai.ChatCompletionToolUnionParam {
+ return lo.Values(r.description)
+}
diff --git a/internal/services/toolkit/toolkit_test.go b/internal/services/toolkit/toolkit_test.go
index 5215b29c..01b4a55a 100644
--- a/internal/services/toolkit/toolkit_test.go
+++ b/internal/services/toolkit/toolkit_test.go
@@ -25,12 +25,12 @@ var mockConversationId = "mock-conversation-id"
type mockCallbackStream struct {
chatv1.ChatService_CreateConversationMessageStreamServer
messages []*chatv1.CreateConversationMessageStreamResponse
- // 用于跟踪消息状态的栈
+ // Stack to track message state
messageStack map[string]bool // key: message_id, value: true if begin, false if end
- // 用于跟踪流的状态
+ // To track stream state
hasInitialization bool
hasFinalization bool
- // 用于跟踪当前活跃的assistant消息
+ // To track the currently active assistant message
activeAssistantMessageId string
}
@@ -39,7 +39,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre
m.messageStack = make(map[string]bool)
}
- // 处理流初始化
+ // Handle stream initialization
if response.GetStreamInitialization() != nil {
if m.hasInitialization {
return fmt.Errorf("duplicate stream_initialization")
@@ -49,7 +49,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre
return nil
}
- // 处理流结束
+ // Handle stream finalization
if response.GetStreamFinalization() != nil {
if !m.hasInitialization {
return fmt.Errorf("stream_finalization without stream_initialization")
@@ -62,7 +62,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre
return nil
}
- // 获取消息ID
+ // Get message ID
var messageId string
switch {
case response.GetStreamPartBegin() != nil:
@@ -70,7 +70,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre
messageId = begin.MessageId
m.messageStack[messageId] = true
- // 如果是assistant role,记录当前活跃的assistant消息ID
+ // If it's an assistant role, record the currently active assistant message ID
if begin.GetPayload().GetAssistant() != nil {
m.activeAssistantMessageId = messageId
}
@@ -83,7 +83,7 @@ func (m *mockCallbackStream) Send(response *chatv1.CreateConversationMessageStre
}
delete(m.messageStack, messageId)
- // 如果是结束当前活跃的assistant消息,清除活跃ID
+ // If ending the currently active assistant message, clear the active ID
if messageId == m.activeAssistantMessageId {
m.activeAssistantMessageId = ""
}
@@ -187,20 +187,22 @@ func TestChatCompletion_SingleRoundChat_NotCallTool(t *testing.T) {
var err error
if tc.useStream {
- _oai, _inapp, err = aiClient.ChatCompletionStream(
+ _oai, _inapp, err = aiClient.ChatCompletionStreamV1(
context.Background(),
&tc.streamServer,
tc.conversationId,
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
- // 验证流式消息的完整性
+ // Verify streaming message integrity
assert.NoError(t, tc.streamServer.ValidateMessageStack())
} else {
- _oai, _inapp, err = aiClient.ChatCompletion(
+ _oai, _inapp, err = aiClient.ChatCompletionV1(
context.Background(),
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
}
assert.NoError(t, err)
@@ -254,20 +256,22 @@ func TestChatCompletion_TwoRoundChat_NotCallTool(t *testing.T) {
var err error
if tc.useStream {
- _oaiHistory, _appHistory, err = aiClient.ChatCompletionStream(
+ _oaiHistory, _appHistory, err = aiClient.ChatCompletionStreamV1(
context.Background(),
&tc.streamServer,
tc.conversationId,
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
- // 验证流式消息的完整性
+ // Verify streaming message integrity
assert.NoError(t, tc.streamServer.ValidateMessageStack())
} else {
- _oaiHistory, _appHistory, err = aiClient.ChatCompletion(
+ _oaiHistory, _appHistory, err = aiClient.ChatCompletionV1(
context.Background(),
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
}
assert.NoError(t, err)
@@ -281,20 +285,22 @@ func TestChatCompletion_TwoRoundChat_NotCallTool(t *testing.T) {
appHistory = append(appHistory, createAppUserInputMessage(prompt))
if tc.useStream {
- _oaiHistory, _appHistory, err = aiClient.ChatCompletionStream(
+ _oaiHistory, _appHistory, err = aiClient.ChatCompletionStreamV1(
context.Background(),
&tc.streamServer,
tc.conversationId,
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
- // 验证流式消息的完整性
+ // Verify streaming message integrity
assert.NoError(t, tc.streamServer.ValidateMessageStack())
} else {
- _oaiHistory, _appHistory, err = aiClient.ChatCompletion(
+ _oaiHistory, _appHistory, err = aiClient.ChatCompletionV1(
context.Background(),
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
}
assert.NoError(t, err)
@@ -348,20 +354,22 @@ func TestChatCompletion_OneRoundChat_CallOneTool_MessageAfterToolCall(t *testing
var err error
if tc.useStream {
- openaiHistory, inappHistory, err = aiClient.ChatCompletionStream(
+ openaiHistory, inappHistory, err = aiClient.ChatCompletionStreamV1(
context.Background(),
&tc.streamServer,
tc.conversationId,
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
- // 验证流式消息的完整性
+ // Verify streaming message integrity
assert.NoError(t, tc.streamServer.ValidateMessageStack())
} else {
- openaiHistory, inappHistory, err = aiClient.ChatCompletion(
+ openaiHistory, inappHistory, err = aiClient.ChatCompletionV1(
context.Background(),
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
}
assert.NoError(t, err)
@@ -370,7 +378,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_MessageAfterToolCall(t *testing
appHistory = append(appHistory, inappHistory...)
assert.Equal(t, len(oaiHistory), 4)
- assert.Equal(t, len(appHistory), 3) // app history 只保留 tool_call_result,不保留调用之前的那个 tool_call 请求
+ assert.Equal(t, len(appHistory), 3) // app history only keeps tool_call_result, not the tool_call request before the call
assert.NotNil(t, oaiHistory[1].OfFunctionCall)
assert.Equal(t, oaiHistory[1].OfFunctionCall.Name, "greeting")
@@ -384,7 +392,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_MessageAfterToolCall(t *testing
}
}
-// 测试是否可以处理 err 的 message 添加到聊天记录中
+// Test whether error messages can be added to chat history
func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) {
os.Setenv("PD_MONGO_URI", "mongodb://localhost:27017")
var dbInstance, _ = db.NewDB(cfg.GetCfg(), logger.GetLogger())
@@ -424,20 +432,22 @@ func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) {
var err error
if tc.useStream {
- openaiHistory, inappHistory, err = aiClient.ChatCompletionStream(
+ openaiHistory, inappHistory, err = aiClient.ChatCompletionStreamV1(
context.Background(),
&tc.streamServer,
tc.conversationId,
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
- // 验证流式消息的完整性
+ // Verify streaming message integrity
assert.NoError(t, tc.streamServer.ValidateMessageStack())
} else {
- openaiHistory, inappHistory, err = aiClient.ChatCompletion(
+ openaiHistory, inappHistory, err = aiClient.ChatCompletionV1(
context.Background(),
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
}
assert.NoError(t, err)
@@ -459,7 +469,7 @@ func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) {
}
assert.Equal(t, 4, len(oaiHistory))
- //pd_user, openai_call, openai_msg 或者 pd_user, openai_msg, openai_call, openai_msg
+ //pd_user, openai_call, openai_msg or pd_user, openai_msg, openai_call, openai_msg
assert.Condition(t, func() bool {
var firstMsg = appHistory[0].MessageId
if !strings.HasPrefix(firstMsg, "pd_user_") {
@@ -499,20 +509,22 @@ func TestChatCompletion_OneRoundChat_CallOneTool_AlwaysException(t *testing.T) {
oaiHistory = append(oaiHistory, createOpenaiUserInputMessage(prompt))
appHistory = append(appHistory, createAppUserInputMessage(prompt))
if tc.useStream {
- openaiHistory, inappHistory, err = aiClient.ChatCompletionStream(
+ openaiHistory, inappHistory, err = aiClient.ChatCompletionStreamV1(
context.Background(),
&tc.streamServer,
tc.conversationId,
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
- // 验证流式消息的完整性
+ // Verify streaming message integrity
assert.NoError(t, tc.streamServer.ValidateMessageStack())
} else {
- openaiHistory, inappHistory, err = aiClient.ChatCompletion(
+ openaiHistory, inappHistory, err = aiClient.ChatCompletionV1(
context.Background(),
- models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI),
+ string(models.LanguageModel(chatv1.LanguageModel_LANGUAGE_MODEL_OPENAI_GPT41_MINI)),
oaiHistory,
+ nil,
)
}
assert.NoError(t, err)
diff --git a/internal/services/toolkit/tools/files/file_create.go b/internal/services/toolkit/tools/files/file_create.go
new file mode 100644
index 00000000..54f91ecc
--- /dev/null
+++ b/internal/services/toolkit/tools/files/file_create.go
@@ -0,0 +1,49 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var CreateFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "create_file",
+ Description: param.NewOpt("Creates a new file at the specified path with the given content. Returns an error if the file already exists."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "path": map[string]any{
+ "type": "string",
+ "description": "The absolute or relative path where the file should be created.",
+ },
+ "content": map[string]any{
+ "type": "string",
+ "description": "The content to write to the file.",
+ },
+ },
+ "required": []string{"path", "content"},
+ },
+ },
+ },
+}
+
+type CreateFileArgs struct {
+ Path string `json:"path"`
+ Content string `json:"content"`
+}
+
+func CreateFileTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs CreateFileArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // TODO: Implement actual file creation logic
+ return fmt.Sprintf("[DUMMY] File created at: %s (content length: %d bytes)", getArgs.Path, len(getArgs.Content)), "", nil
+}
diff --git a/internal/services/toolkit/tools/files/file_delete.go b/internal/services/toolkit/tools/files/file_delete.go
new file mode 100644
index 00000000..18f78ddd
--- /dev/null
+++ b/internal/services/toolkit/tools/files/file_delete.go
@@ -0,0 +1,44 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var DeleteFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "delete_file",
+ Description: param.NewOpt("Deletes a file at the specified path. Returns an error if the file does not exist or cannot be deleted."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "path": map[string]any{
+ "type": "string",
+ "description": "The absolute or relative path of the file to delete.",
+ },
+ },
+ "required": []string{"path"},
+ },
+ },
+ },
+}
+
+type DeleteFileArgs struct {
+ Path string `json:"path"`
+}
+
+func DeleteFileTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs DeleteFileArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // TODO: Implement actual file deletion logic
+ return fmt.Sprintf("[DUMMY] File deleted: %s", getArgs.Path), "", nil
+}
diff --git a/internal/services/toolkit/tools/files/file_read.go b/internal/services/toolkit/tools/files/file_read.go
new file mode 100644
index 00000000..f1916ebd
--- /dev/null
+++ b/internal/services/toolkit/tools/files/file_read.go
@@ -0,0 +1,168 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var ReadFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "read_file",
+ Description: param.NewOpt("Reads the content of a file at the specified path. Supports reading specific line ranges."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "path": map[string]any{
+ "type": "string",
+ "description": "The absolute or relative path of the file to read.",
+ },
+ "start_line": map[string]any{
+ "type": "integer",
+ "description": "Optional. The starting line number (1-indexed) to read from. If not specified, reads from the beginning.",
+ },
+ "end_line": map[string]any{
+ "type": "integer",
+ "description": "Optional. The ending line number (1-indexed, inclusive) to read to. If not specified, reads to the end.",
+ },
+ },
+ "required": []string{"path"},
+ },
+ },
+ },
+}
+
+type ReadFileArgs struct {
+ Path string `json:"path"`
+ StartLine *int `json:"start_line,omitempty"`
+ EndLine *int `json:"end_line,omitempty"`
+}
+
+type ReadFileTool struct {
+ projectService *services.ProjectService
+}
+
+func NewReadFileTool(projectService *services.ProjectService) *ReadFileTool {
+ return &ReadFileTool{
+ projectService: projectService,
+ }
+}
+
+func (t *ReadFileTool) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadFileArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // Get project from context
+ actor, projectId, _ := toolkit.GetActorProjectConversationID(ctx)
+ if actor == nil || projectId == "" {
+ return "", "", fmt.Errorf("failed to get actor or project id from context")
+ }
+
+ project, err := t.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get project: %w", err)
+ }
+
+ // Normalize the path for matching
+ targetPath := normalizePath(getArgs.Path)
+
+ // Find the document by path
+ var foundDoc *struct {
+ Lines []string
+ Filepath string
+ }
+ for _, doc := range project.Docs {
+ docPath := normalizePath(doc.Filepath)
+ if docPath == targetPath || strings.HasSuffix(docPath, "/"+targetPath) || docPath == "/"+targetPath {
+ foundDoc = &struct {
+ Lines []string
+ Filepath string
+ }{Lines: doc.Lines, Filepath: doc.Filepath}
+ break
+ }
+ }
+
+ if foundDoc == nil {
+ return fmt.Sprintf("File not found: %s", getArgs.Path), "", nil
+ }
+
+ lines := foundDoc.Lines
+ totalLines := len(lines)
+
+ // Apply line range filtering
+ startIdx := 0
+ endIdx := totalLines
+
+ if getArgs.StartLine != nil {
+ startIdx = *getArgs.StartLine - 1 // Convert to 0-indexed
+ if startIdx < 0 {
+ startIdx = 0
+ }
+ if startIdx >= totalLines {
+ startIdx = totalLines
+ }
+ }
+
+ if getArgs.EndLine != nil {
+ endIdx = *getArgs.EndLine // EndLine is inclusive, so we use it directly
+ if endIdx > totalLines {
+ endIdx = totalLines
+ }
+ if endIdx < 0 {
+ endIdx = 0
+ }
+ }
+
+ if startIdx >= endIdx {
+ return "No content in the specified line range", "", nil
+ }
+
+ // Build result with line numbers
+ var result strings.Builder
+ result.WriteString(fmt.Sprintf("File: %s (lines %d-%d of %d)\n\n", foundDoc.Filepath, startIdx+1, endIdx, totalLines))
+
+ for i := startIdx; i < endIdx; i++ {
+ result.WriteString(fmt.Sprintf("%4d: %s\n", i+1, lines[i]))
+ }
+
+ return result.String(), "", nil
+}
+
+// normalizePath removes leading slashes and normalizes path separators
+func normalizePath(path string) string {
+ path = strings.TrimPrefix(path, "/")
+ path = strings.TrimPrefix(path, "./")
+ return path
+}
+
+// ReadFileToolLegacy for backward compatibility (standalone function)
+func ReadFileToolLegacy(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadFileArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ lineRange := "all"
+ if getArgs.StartLine != nil && getArgs.EndLine != nil {
+ lineRange = fmt.Sprintf("lines %d-%d", *getArgs.StartLine, *getArgs.EndLine)
+ } else if getArgs.StartLine != nil {
+ lineRange = fmt.Sprintf("from line %d", *getArgs.StartLine)
+ } else if getArgs.EndLine != nil {
+ lineRange = fmt.Sprintf("to line %d", *getArgs.EndLine)
+ }
+
+ // TODO: This legacy function doesn't have access to ProjectService
+ return fmt.Sprintf("[WARNING] read_file tool not properly initialized. Requested: %s (%s)", getArgs.Path, lineRange), "", nil
+}
diff --git a/internal/services/toolkit/tools/files/file_search.go b/internal/services/toolkit/tools/files/file_search.go
new file mode 100644
index 00000000..790d05f8
--- /dev/null
+++ b/internal/services/toolkit/tools/files/file_search.go
@@ -0,0 +1,163 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var SearchFileToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "search_file",
+ Description: param.NewOpt("Searches for files by name or pattern within a specified directory. Returns matching file paths."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "path": map[string]any{
+ "type": "string",
+ "description": "The directory path to search within.",
+ },
+ "pattern": map[string]any{
+ "type": "string",
+ "description": "The file name pattern to search for (supports glob patterns like '*.go', 'test_*.py').",
+ },
+ "recursive": map[string]any{
+ "type": "boolean",
+ "description": "If true, search recursively in subdirectories. Default is true.",
+ },
+ "max_results": map[string]any{
+ "type": "integer",
+ "description": "Maximum number of results to return. Default is 100.",
+ },
+ },
+ "required": []string{"path", "pattern"},
+ },
+ },
+ },
+}
+
+type SearchFileArgs struct {
+ Path string `json:"path"`
+ Pattern string `json:"pattern"`
+ Recursive *bool `json:"recursive,omitempty"`
+ MaxResults *int `json:"max_results,omitempty"`
+}
+
+type SearchFileTool struct {
+ projectService *services.ProjectService
+}
+
+func NewSearchFileTool(projectService *services.ProjectService) *SearchFileTool {
+ return &SearchFileTool{
+ projectService: projectService,
+ }
+}
+
+func (t *SearchFileTool) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs SearchFileArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // Default values
+ recursive := true
+ if getArgs.Recursive != nil {
+ recursive = *getArgs.Recursive
+ }
+ maxResults := 100
+ if getArgs.MaxResults != nil {
+ maxResults = *getArgs.MaxResults
+ }
+
+ // Get project from context
+ actor, projectId, _ := toolkit.GetActorProjectConversationID(ctx)
+ if actor == nil || projectId == "" {
+ return "", "", fmt.Errorf("failed to get actor or project id from context")
+ }
+
+ project, err := t.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get project: %w", err)
+ }
+
+ // Normalize search path
+ searchPath := normalizePath(getArgs.Path)
+
+ var matchingFiles []string
+ for _, doc := range project.Docs {
+ docPath := normalizePath(doc.Filepath)
+
+ // Check if file is within the search path
+ if searchPath != "" && searchPath != "." && searchPath != "/" {
+ if !strings.HasPrefix(docPath, searchPath+"/") && docPath != searchPath {
+ dir := filepath.Dir(docPath)
+ if !recursive && dir != searchPath {
+ continue
+ }
+ if recursive && !strings.HasPrefix(docPath, searchPath) {
+ continue
+ }
+ }
+ }
+
+ // Match against the pattern (glob-style)
+ fileName := filepath.Base(docPath)
+ matched, err := filepath.Match(getArgs.Pattern, fileName)
+ if err != nil {
+ // If pattern is invalid, try substring match
+ matched = strings.Contains(strings.ToLower(fileName), strings.ToLower(getArgs.Pattern))
+ }
+
+ if matched {
+ matchingFiles = append(matchingFiles, doc.Filepath)
+ if len(matchingFiles) >= maxResults {
+ break
+ }
+ }
+ }
+
+ if len(matchingFiles) == 0 {
+ return fmt.Sprintf("No files found matching pattern '%s' in '%s'", getArgs.Pattern, getArgs.Path), "", nil
+ }
+
+ var result strings.Builder
+ result.WriteString(fmt.Sprintf("Found %d file(s) matching pattern '%s' in '%s':\n\n", len(matchingFiles), getArgs.Pattern, getArgs.Path))
+ for _, f := range matchingFiles {
+ result.WriteString(fmt.Sprintf(" %s\n", f))
+ }
+
+ return result.String(), "", nil
+}
+
+// SearchFileToolLegacy for backward compatibility (standalone function)
+func SearchFileToolLegacy(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs SearchFileArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // Default values
+ recursive := true
+ if getArgs.Recursive != nil {
+ recursive = *getArgs.Recursive
+ }
+ maxResults := 100
+ if getArgs.MaxResults != nil {
+ maxResults = *getArgs.MaxResults
+ }
+
+ // TODO: This legacy function doesn't have access to ProjectService
+ return fmt.Sprintf("[WARNING] search_file tool not properly initialized. Requested: pattern '%s' in '%s' (recursive: %v, max_results: %d)",
+ getArgs.Pattern, getArgs.Path, recursive, maxResults), "", nil
+}
diff --git a/internal/services/toolkit/tools/files/folder_create.go b/internal/services/toolkit/tools/files/folder_create.go
new file mode 100644
index 00000000..4c5a1c4e
--- /dev/null
+++ b/internal/services/toolkit/tools/files/folder_create.go
@@ -0,0 +1,44 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var CreateFolderToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "create_folder",
+ Description: param.NewOpt("Creates a new folder (directory) at the specified path. Creates parent directories if they don't exist (like mkdir -p)."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "path": map[string]any{
+ "type": "string",
+ "description": "The absolute or relative path where the folder should be created.",
+ },
+ },
+ "required": []string{"path"},
+ },
+ },
+ },
+}
+
+type CreateFolderArgs struct {
+ Path string `json:"path"`
+}
+
+func CreateFolderTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs CreateFolderArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // TODO: Implement actual folder creation logic
+ return fmt.Sprintf("[DUMMY] Folder created at: %s", getArgs.Path), "", nil
+}
diff --git a/internal/services/toolkit/tools/files/folder_delete.go b/internal/services/toolkit/tools/files/folder_delete.go
new file mode 100644
index 00000000..d18f979e
--- /dev/null
+++ b/internal/services/toolkit/tools/files/folder_delete.go
@@ -0,0 +1,54 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var DeleteFolderToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "delete_folder",
+ Description: param.NewOpt("Deletes a folder (directory) at the specified path. Can optionally delete recursively including all contents."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "path": map[string]any{
+ "type": "string",
+ "description": "The absolute or relative path of the folder to delete.",
+ },
+ "recursive": map[string]any{
+ "type": "boolean",
+ "description": "If true, delete the folder and all its contents recursively. Default is false.",
+ },
+ },
+ "required": []string{"path"},
+ },
+ },
+ },
+}
+
+type DeleteFolderArgs struct {
+ Path string `json:"path"`
+ Recursive *bool `json:"recursive,omitempty"`
+}
+
+func DeleteFolderTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs DeleteFolderArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ recursive := false
+ if getArgs.Recursive != nil {
+ recursive = *getArgs.Recursive
+ }
+
+ // TODO: Implement actual folder deletion logic
+ return fmt.Sprintf("[DUMMY] Folder deleted: %s (recursive: %v)", getArgs.Path, recursive), "", nil
+}
diff --git a/internal/services/toolkit/tools/files/folder_read.go b/internal/services/toolkit/tools/files/folder_read.go
new file mode 100644
index 00000000..b03e6d78
--- /dev/null
+++ b/internal/services/toolkit/tools/files/folder_read.go
@@ -0,0 +1,239 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var ReadFolderToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "read_folder",
+ Description: param.NewOpt("Lists the contents of a folder (directory) at the specified path. Can optionally list recursively."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "path": map[string]any{
+ "type": "string",
+ "description": "The absolute or relative path of the folder to list.",
+ },
+ "recursive": map[string]any{
+ "type": "boolean",
+ "description": "If true, list contents recursively including all subdirectories. Default is false.",
+ },
+ "max_depth": map[string]any{
+ "type": "integer",
+ "description": "Maximum depth to recurse when recursive is true. Default is unlimited.",
+ },
+ "pattern": map[string]any{
+ "type": "string",
+ "description": "Optional glob pattern to filter results (e.g., '*.go', '*.py').",
+ },
+ },
+ "required": []string{"path"},
+ },
+ },
+ },
+}
+
+type ReadFolderArgs struct {
+ Path string `json:"path"`
+ Recursive *bool `json:"recursive,omitempty"`
+ MaxDepth *int `json:"max_depth,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+}
+
+type ReadFolderTool struct {
+ projectService *services.ProjectService
+}
+
+func NewReadFolderTool(projectService *services.ProjectService) *ReadFolderTool {
+ return &ReadFolderTool{
+ projectService: projectService,
+ }
+}
+
+type folderEntry struct {
+ path string
+ isDir bool
+}
+
+func (t *ReadFolderTool) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadFolderArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ recursive := false
+ if getArgs.Recursive != nil {
+ recursive = *getArgs.Recursive
+ }
+
+ maxDepth := -1 // unlimited
+ if getArgs.MaxDepth != nil {
+ maxDepth = *getArgs.MaxDepth
+ }
+
+ // Get project from context
+ actor, projectId, _ := toolkit.GetActorProjectConversationID(ctx)
+ if actor == nil || projectId == "" {
+ return "", "", fmt.Errorf("failed to get actor or project id from context")
+ }
+
+ project, err := t.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get project: %w", err)
+ }
+
+ // Normalize search path
+ searchPath := normalizePath(getArgs.Path)
+ if searchPath == "" || searchPath == "." {
+ searchPath = ""
+ }
+
+ // Collect matching entries and directories
+ entriesMap := make(map[string]folderEntry)
+ dirsSet := make(map[string]bool)
+
+ for _, doc := range project.Docs {
+ docPath := normalizePath(doc.Filepath)
+
+ // Check if file is within the search path
+ var relativePath string
+ if searchPath == "" {
+ relativePath = docPath
+ } else {
+ if !strings.HasPrefix(docPath, searchPath+"/") && docPath != searchPath {
+ continue
+ }
+ relativePath = strings.TrimPrefix(docPath, searchPath+"/")
+ }
+
+ // Calculate depth relative to search path
+ pathDepth := strings.Count(relativePath, "/")
+
+ // Filter by depth
+ if !recursive && pathDepth > 0 {
+ // Show only direct children (files and immediate subdirectories)
+ parts := strings.SplitN(relativePath, "/", 2)
+ if len(parts) > 1 {
+ // This is a subdirectory
+ dirName := parts[0]
+ fullDirPath := searchPath
+ if fullDirPath != "" {
+ fullDirPath += "/"
+ }
+ fullDirPath += dirName
+ if !dirsSet[fullDirPath] {
+ dirsSet[fullDirPath] = true
+ entriesMap[fullDirPath] = folderEntry{path: fullDirPath, isDir: true}
+ }
+ continue
+ }
+ } else if recursive && maxDepth >= 0 && pathDepth > maxDepth {
+ continue
+ }
+
+ // Apply pattern filter
+ if getArgs.Pattern != "" {
+ fileName := filepath.Base(docPath)
+ matched, err := filepath.Match(getArgs.Pattern, fileName)
+ if err != nil {
+ matched = strings.Contains(strings.ToLower(fileName), strings.ToLower(getArgs.Pattern))
+ }
+ if !matched {
+ continue
+ }
+ }
+
+ // Add parent directories for recursive listing
+ if recursive {
+ dir := filepath.Dir(docPath)
+ for dir != "." && dir != "/" && dir != "" && strings.HasPrefix(dir, searchPath) {
+ if !dirsSet[dir] {
+ dirsSet[dir] = true
+ entriesMap[dir] = folderEntry{path: dir, isDir: true}
+ }
+ dir = filepath.Dir(dir)
+ }
+ }
+
+ entriesMap[docPath] = folderEntry{path: docPath, isDir: false}
+ }
+
+ if len(entriesMap) == 0 {
+ return fmt.Sprintf("Folder '%s' is empty or does not exist", getArgs.Path), "", nil
+ }
+
+ // Sort entries
+ var entries []folderEntry
+ for _, e := range entriesMap {
+ entries = append(entries, e)
+ }
+ sort.Slice(entries, func(i, j int) bool {
+ // Directories first, then alphabetically
+ if entries[i].isDir != entries[j].isDir {
+ return entries[i].isDir
+ }
+ return entries[i].path < entries[j].path
+ })
+
+ var result strings.Builder
+ displayPath := getArgs.Path
+ if displayPath == "" || displayPath == "." {
+ displayPath = "/"
+ }
+ result.WriteString(fmt.Sprintf("Contents of '%s':\n\n", displayPath))
+
+ for _, entry := range entries {
+ displayName := entry.path
+ if searchPath != "" && strings.HasPrefix(displayName, searchPath+"/") {
+ displayName = strings.TrimPrefix(displayName, searchPath+"/")
+ }
+ if entry.isDir {
+ result.WriteString(fmt.Sprintf(" 📁 %s/\n", displayName))
+ } else {
+ result.WriteString(fmt.Sprintf(" 📄 %s\n", displayName))
+ }
+ }
+
+ return result.String(), "", nil
+}
+
+// ReadFolderToolLegacy for backward compatibility (standalone function)
+func ReadFolderToolLegacy(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadFolderArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ recursive := false
+ if getArgs.Recursive != nil {
+ recursive = *getArgs.Recursive
+ }
+
+ depthStr := "unlimited"
+ if getArgs.MaxDepth != nil {
+ depthStr = fmt.Sprintf("%d", *getArgs.MaxDepth)
+ }
+
+ pattern := "*"
+ if getArgs.Pattern != "" {
+ pattern = getArgs.Pattern
+ }
+
+ // TODO: This legacy function doesn't have access to ProjectService
+ return fmt.Sprintf("[WARNING] read_folder tool not properly initialized. Requested: %s (recursive: %v, max_depth: %s, pattern: %s)", getArgs.Path, recursive, depthStr, pattern), "", nil
+}
diff --git a/internal/services/toolkit/tools/files/string_search.go b/internal/services/toolkit/tools/files/string_search.go
new file mode 100644
index 00000000..8edb0552
--- /dev/null
+++ b/internal/services/toolkit/tools/files/string_search.go
@@ -0,0 +1,226 @@
+package tools
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "regexp"
+ "strings"
+
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var SearchStringToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "search_string",
+ Description: param.NewOpt("Searches for a string pattern in files within a specified directory. Supports regex patterns and can limit results."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "pattern": map[string]any{
+ "type": "string",
+ "description": "The search pattern (string or regex) to look for.",
+ },
+ "path": map[string]any{
+ "type": "string",
+ "description": "The directory path to search within.",
+ },
+ "file_pattern": map[string]any{
+ "type": "string",
+ "description": "Optional glob pattern to filter files (e.g., '*.go', '*.py'). Default is all files.",
+ },
+ "case_sensitive": map[string]any{
+ "type": "boolean",
+ "description": "Whether the search should be case-sensitive. Default is true.",
+ },
+ "max_results": map[string]any{
+ "type": "integer",
+ "description": "Maximum number of results to return. Default is 100.",
+ },
+ },
+ "required": []string{"pattern", "path"},
+ },
+ },
+ },
+}
+
+type SearchStringArgs struct {
+ Pattern string `json:"pattern"`
+ Path string `json:"path"`
+ FilePattern string `json:"file_pattern,omitempty"`
+ CaseSensitive *bool `json:"case_sensitive,omitempty"`
+ MaxResults *int `json:"max_results,omitempty"`
+}
+
+type SearchStringTool struct {
+ projectService *services.ProjectService
+}
+
+func NewSearchStringTool(projectService *services.ProjectService) *SearchStringTool {
+ return &SearchStringTool{
+ projectService: projectService,
+ }
+}
+
+type searchResult struct {
+ FilePath string
+ LineNumber int
+ Content string
+}
+
+func (t *SearchStringTool) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs SearchStringArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // Default values
+ caseSensitive := true
+ if getArgs.CaseSensitive != nil {
+ caseSensitive = *getArgs.CaseSensitive
+ }
+ maxResults := 100
+ if getArgs.MaxResults != nil {
+ maxResults = *getArgs.MaxResults
+ }
+
+ // Get project from context
+ actor, projectId, _ := toolkit.GetActorProjectConversationID(ctx)
+ if actor == nil || projectId == "" {
+ return "", "", fmt.Errorf("failed to get actor or project id from context")
+ }
+
+ project, err := t.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get project: %w", err)
+ }
+
+ // Normalize search path
+ searchPath := normalizePath(getArgs.Path)
+
+ // Compile regex if possible, otherwise use string matching
+ var regex *regexp.Regexp
+ searchPattern := getArgs.Pattern
+ if !caseSensitive {
+ searchPattern = "(?i)" + searchPattern
+ }
+ regex, regexErr := regexp.Compile(searchPattern)
+ if regexErr != nil {
+ // Fall back to literal string matching
+ regex = nil
+ }
+
+ var results []searchResult
+ resultsCount := 0
+
+ for _, doc := range project.Docs {
+ if resultsCount >= maxResults {
+ break
+ }
+
+ docPath := normalizePath(doc.Filepath)
+
+ // Check if file is within the search path
+ if searchPath != "" && searchPath != "." && searchPath != "/" {
+ if !strings.HasPrefix(docPath, searchPath+"/") && docPath != searchPath {
+ continue
+ }
+ }
+
+ // Apply file pattern filter
+ if getArgs.FilePattern != "" {
+ fileName := filepath.Base(docPath)
+ matched, err := filepath.Match(getArgs.FilePattern, fileName)
+ if err != nil {
+ matched = strings.Contains(strings.ToLower(fileName), strings.ToLower(getArgs.FilePattern))
+ }
+ if !matched {
+ continue
+ }
+ }
+
+ // Search through lines
+ for lineNum, line := range doc.Lines {
+ if resultsCount >= maxResults {
+ break
+ }
+
+ var found bool
+ if regex != nil {
+ found = regex.MatchString(line)
+ } else {
+ // Literal string match
+ if caseSensitive {
+ found = strings.Contains(line, getArgs.Pattern)
+ } else {
+ found = strings.Contains(strings.ToLower(line), strings.ToLower(getArgs.Pattern))
+ }
+ }
+
+ if found {
+ results = append(results, searchResult{
+ FilePath: doc.Filepath,
+ LineNumber: lineNum + 1, // 1-indexed
+ Content: line,
+ })
+ resultsCount++
+ }
+ }
+ }
+
+ if len(results) == 0 {
+ return fmt.Sprintf("No results found for pattern '%s' in '%s'", getArgs.Pattern, getArgs.Path), "", nil
+ }
+
+ var sb strings.Builder
+ if resultsCount >= maxResults {
+ sb.WriteString(fmt.Sprintf("Found %d+ matches for pattern '%s' (showing first %d):\n\n", resultsCount, getArgs.Pattern, maxResults))
+ } else {
+ sb.WriteString(fmt.Sprintf("Found %d match(es) for pattern '%s':\n\n", len(results), getArgs.Pattern))
+ }
+
+ for _, r := range results {
+ // Truncate long lines for display
+ content := r.Content
+ if len(content) > 100 {
+ content = content[:100] + "..."
+ }
+ sb.WriteString(fmt.Sprintf("%s:%d: %s\n", r.FilePath, r.LineNumber, strings.TrimSpace(content)))
+ }
+
+ return sb.String(), "", nil
+}
+
+// SearchStringToolLegacy for backward compatibility (standalone function)
+func SearchStringToolLegacy(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs SearchStringArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // Default values
+ caseSensitive := true
+ if getArgs.CaseSensitive != nil {
+ caseSensitive = *getArgs.CaseSensitive
+ }
+ maxResults := 100
+ if getArgs.MaxResults != nil {
+ maxResults = *getArgs.MaxResults
+ }
+ filePattern := "*"
+ if getArgs.FilePattern != "" {
+ filePattern = getArgs.FilePattern
+ }
+
+ // TODO: This legacy function doesn't have access to ProjectService
+ return fmt.Sprintf("[WARNING] search_string tool not properly initialized. Requested: pattern '%s' in '%s' (file_pattern: %s, case_sensitive: %v, max_results: %d)",
+ getArgs.Pattern, getArgs.Path, filePattern, caseSensitive, maxResults), "", nil
+}
diff --git a/internal/services/toolkit/tools/latex/document_structure.go b/internal/services/toolkit/tools/latex/document_structure.go
new file mode 100644
index 00000000..e031dbe9
--- /dev/null
+++ b/internal/services/toolkit/tools/latex/document_structure.go
@@ -0,0 +1,158 @@
+package latex
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var GetDocumentStructureToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "get_document_structure",
+ Description: param.NewOpt("Gets the complete project document outline (section tree). Returns the complete document outline including all sections, subsections, and their hierarchy."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{},
+ "required": []string{},
+ },
+ },
+ },
+}
+
+type DocumentStructureTool struct {
+ projectService *services.ProjectService
+}
+
+func NewDocumentStructureTool(projectService *services.ProjectService) *DocumentStructureTool {
+ return &DocumentStructureTool{
+ projectService: projectService,
+ }
+}
+
+type sectionEntry struct {
+ Level int // 0=part, 1=chapter, 2=section, 3=subsection, 4=subsubsection
+ Title string
+ LineNumber int
+ FilePath string
+ FullContent string // The expanded content line
+}
+
+func (t *DocumentStructureTool) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ // Get project from context
+ actor, projectId, _ := toolkit.GetActorProjectConversationID(ctx)
+ if actor == nil || projectId == "" {
+ return "", "", fmt.Errorf("failed to get actor or project id from context")
+ }
+
+ project, err := t.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get project: %w", err)
+ }
+
+ // Get the full expanded content
+ fullContent, err := project.GetFullContent()
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get full content: %w", err)
+ }
+
+ // Parse the LaTeX to extract sections
+ sections := parseLaTeXSections(fullContent)
+
+ if len(sections) == 0 {
+ return "No sections found in the document.", "", nil
+ }
+
+ // Build a hierarchical output
+ var result strings.Builder
+ result.WriteString("Document Structure:\n\n")
+
+ for _, sec := range sections {
+ indent := strings.Repeat(" ", sec.Level)
+ levelName := getLevelName(sec.Level)
+ result.WriteString(fmt.Sprintf("%s%s: %s (line %d)\n", indent, levelName, sec.Title, sec.LineNumber))
+ }
+
+ return result.String(), "", nil
+}
+
+// parseLaTeXSections extracts section information from LaTeX content
+func parseLaTeXSections(content string) []sectionEntry {
+ var sections []sectionEntry
+
+ // Regex patterns for different section levels
+ patterns := []struct {
+ level int
+ pattern *regexp.Regexp
+ }{
+ {0, regexp.MustCompile(`(?m)^[^%]*\\part\*?\{([^}]*)\}`)},
+ {1, regexp.MustCompile(`(?m)^[^%]*\\chapter\*?\{([^}]*)\}`)},
+ {2, regexp.MustCompile(`(?m)^[^%]*\\section\*?\{([^}]*)\}`)},
+ {3, regexp.MustCompile(`(?m)^[^%]*\\subsection\*?\{([^}]*)\}`)},
+ {4, regexp.MustCompile(`(?m)^[^%]*\\subsubsection\*?\{([^}]*)\}`)},
+ }
+
+ lines := strings.Split(content, "\n")
+
+ for lineNum, line := range lines {
+ for _, p := range patterns {
+ matches := p.pattern.FindStringSubmatch(line)
+ if matches != nil && len(matches) > 1 {
+ title := strings.TrimSpace(matches[1])
+ // Clean up the title (remove LaTeX commands within)
+ title = cleanLaTeXTitle(title)
+ if title != "" {
+ sections = append(sections, sectionEntry{
+ Level: p.level,
+ Title: title,
+ LineNumber: lineNum + 1, // 1-indexed
+ FullContent: line,
+ })
+ }
+ break // Only match one pattern per line
+ }
+ }
+ }
+
+ return sections
+}
+
+// cleanLaTeXTitle removes or simplifies LaTeX commands in titles
+func cleanLaTeXTitle(title string) string {
+ // Remove common LaTeX commands
+ title = regexp.MustCompile(`\\[a-zA-Z]+\{([^}]*)\}`).ReplaceAllString(title, "$1")
+ title = regexp.MustCompile(`\\[a-zA-Z]+`).ReplaceAllString(title, "")
+ title = strings.TrimSpace(title)
+ return title
+}
+
+// getLevelName returns a human-readable name for the section level
+func getLevelName(level int) string {
+ switch level {
+ case 0:
+ return "Part"
+ case 1:
+ return "Chapter"
+ case 2:
+ return "Section"
+ case 3:
+ return "Subsection"
+ case 4:
+ return "Subsubsection"
+ default:
+ return "Section"
+ }
+}
+
+// GetDocumentStructureTool for backward compatibility (standalone function)
+func GetDocumentStructureTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ return `[WARNING] get_document_structure tool not properly initialized. Please ensure ProjectService is available.`, "", nil
+}
diff --git a/internal/services/toolkit/tools/latex/locate_section.go b/internal/services/toolkit/tools/latex/locate_section.go
new file mode 100644
index 00000000..d0b26830
--- /dev/null
+++ b/internal/services/toolkit/tools/latex/locate_section.go
@@ -0,0 +1,47 @@
+package latex
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var LocateSectionToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "locate_section",
+ Description: param.NewOpt("Locates a specific section by its title and returns the exact position (file path + line number range). Locates a specific section by its title and returns the file path and line number range."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "title": map[string]any{
+ "type": "string",
+ "description": "The title of the section to locate (e.g., 'Introduction', 'Related Work').",
+ },
+ },
+ "required": []string{"title"},
+ },
+ },
+ },
+}
+
+type LocateSectionArgs struct {
+ Title string `json:"title"`
+}
+
+func LocateSectionTool(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs LocateSectionArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // TODO: Implement actual section location logic
+ return fmt.Sprintf(`[DUMMY] Located section '%s':
+File: main.tex
+Start Line: 42
+End Line: 87`, getArgs.Title), "", nil
+}
diff --git a/internal/services/toolkit/tools/latex/read_section_source.go b/internal/services/toolkit/tools/latex/read_section_source.go
new file mode 100644
index 00000000..0b5e0b43
--- /dev/null
+++ b/internal/services/toolkit/tools/latex/read_section_source.go
@@ -0,0 +1,183 @@
+package latex
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var ReadSectionSourceToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "read_section_source",
+ Description: param.NewOpt("Reads the complete LaTeX source code of a specific section by its title."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "title": map[string]any{
+ "type": "string",
+ "description": "The title of the section to read (e.g., 'Introduction', 'Methodology').",
+ },
+ },
+ "required": []string{"title"},
+ },
+ },
+ },
+}
+
+type ReadSectionSourceArgs struct {
+ Title string `json:"title"`
+}
+
+type ReadSectionSourceTool struct {
+ projectService *services.ProjectService
+}
+
+func NewReadSectionSourceTool(projectService *services.ProjectService) *ReadSectionSourceTool {
+ return &ReadSectionSourceTool{
+ projectService: projectService,
+ }
+}
+
+func (t *ReadSectionSourceTool) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadSectionSourceArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // Get project from context
+ actor, projectId, _ := toolkit.GetActorProjectConversationID(ctx)
+ if actor == nil || projectId == "" {
+ return "", "", fmt.Errorf("failed to get actor or project id from context")
+ }
+
+ project, err := t.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get project: %w", err)
+ }
+
+ // Get the full expanded content
+ fullContent, err := project.GetFullContent()
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get full content: %w", err)
+ }
+
+ // Parse sections to find the requested one
+ sections := parseLaTeXSections(fullContent)
+ lines := strings.Split(fullContent, "\n")
+
+ // Find the section with matching title (fuzzy match)
+ searchTitle := strings.ToLower(strings.TrimSpace(getArgs.Title))
+ var targetSection *sectionEntry
+ var targetIndex int
+
+ for i, sec := range sections {
+ sectionTitle := strings.ToLower(sec.Title)
+ if sectionTitle == searchTitle || strings.Contains(sectionTitle, searchTitle) || strings.Contains(searchTitle, sectionTitle) {
+ targetSection = §ions[i]
+ targetIndex = i
+ break
+ }
+ }
+
+ if targetSection == nil {
+ // List available sections as a hint
+ var availableTitles []string
+ for _, sec := range sections {
+ availableTitles = append(availableTitles, sec.Title)
+ }
+ return fmt.Sprintf("Section '%s' not found. Available sections: %s", getArgs.Title, strings.Join(availableTitles, ", ")), "", nil
+ }
+
+ // Determine the end of this section (start of next same-or-higher level section or end of document)
+ startLine := targetSection.LineNumber - 1 // 0-indexed
+ endLine := len(lines)
+
+ for i := targetIndex + 1; i < len(sections); i++ {
+ if sections[i].Level <= targetSection.Level {
+ endLine = sections[i].LineNumber - 1 // End before the next section
+ break
+ }
+ }
+
+ // Extract the section content
+ if startLine >= len(lines) {
+ startLine = len(lines) - 1
+ }
+ if endLine > len(lines) {
+ endLine = len(lines)
+ }
+
+ sectionLines := lines[startLine:endLine]
+
+ var result strings.Builder
+ result.WriteString(fmt.Sprintf("Section: %s (lines %d-%d)\n\n", targetSection.Title, startLine+1, endLine))
+ for i, line := range sectionLines {
+ result.WriteString(fmt.Sprintf("%4d: %s\n", startLine+1+i, line))
+ }
+
+ return result.String(), "", nil
+}
+
+// parseLaTeXSections is defined in document_structure.go
+
+// ReadSectionSourceToolLegacy for backward compatibility (standalone function)
+func ReadSectionSourceToolLegacy(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadSectionSourceArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ return fmt.Sprintf(`[WARNING] read_section_source tool not properly initialized. Requested section: '%s'`, getArgs.Title), "", nil
+}
+
+// Helper function to re-parse sections (since we can't import from same package)
+func parseSectionsForReadSection(content string) []sectionEntry {
+ var sections []sectionEntry
+
+ patterns := []struct {
+ level int
+ pattern *regexp.Regexp
+ }{
+ {0, regexp.MustCompile(`(?m)^[^%]*\\part\*?\{([^}]*)\}`)},
+ {1, regexp.MustCompile(`(?m)^[^%]*\\chapter\*?\{([^}]*)\}`)},
+ {2, regexp.MustCompile(`(?m)^[^%]*\\section\*?\{([^}]*)\}`)},
+ {3, regexp.MustCompile(`(?m)^[^%]*\\subsection\*?\{([^}]*)\}`)},
+ {4, regexp.MustCompile(`(?m)^[^%]*\\subsubsection\*?\{([^}]*)\}`)},
+ }
+
+ lines := strings.Split(content, "\n")
+
+ for lineNum, line := range lines {
+ for _, p := range patterns {
+ matches := p.pattern.FindStringSubmatch(line)
+ if matches != nil && len(matches) > 1 {
+ title := strings.TrimSpace(matches[1])
+ title = regexp.MustCompile(`\\[a-zA-Z]+\{([^}]*)\}`).ReplaceAllString(title, "$1")
+ title = regexp.MustCompile(`\\[a-zA-Z]+`).ReplaceAllString(title, "")
+ title = strings.TrimSpace(title)
+ if title != "" {
+ sections = append(sections, sectionEntry{
+ Level: p.level,
+ Title: title,
+ LineNumber: lineNum + 1,
+ FullContent: line,
+ })
+ }
+ break
+ }
+ }
+ }
+
+ return sections
+}
diff --git a/internal/services/toolkit/tools/latex/read_source_line_range.go b/internal/services/toolkit/tools/latex/read_source_line_range.go
new file mode 100644
index 00000000..4d8e871f
--- /dev/null
+++ b/internal/services/toolkit/tools/latex/read_source_line_range.go
@@ -0,0 +1,154 @@
+package latex
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+var ReadSourceLineRangeToolDescriptionV2 = openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: "read_source_line_range",
+ Description: param.NewOpt("(Fallback) Reads the source code from a specific file within a given line range."),
+ Parameters: openai.FunctionParameters{
+ "type": "object",
+ "properties": map[string]interface{}{
+ "file_path": map[string]any{
+ "type": "string",
+ "description": "The path to the LaTeX file to read from.",
+ },
+ "start_line": map[string]any{
+ "type": "integer",
+ "description": "The starting line number (1-indexed).",
+ },
+ "end_line": map[string]any{
+ "type": "integer",
+ "description": "The ending line number (1-indexed, inclusive).",
+ },
+ },
+ "required": []string{"file_path", "start_line", "end_line"},
+ },
+ },
+ },
+}
+
+type ReadSourceLineRangeArgs struct {
+ FilePath string `json:"file_path"`
+ StartLine int `json:"start_line"`
+ EndLine int `json:"end_line"`
+}
+
+type ReadSourceLineRangeTool struct {
+ projectService *services.ProjectService
+}
+
+func NewReadSourceLineRangeTool(projectService *services.ProjectService) *ReadSourceLineRangeTool {
+ return &ReadSourceLineRangeTool{
+ projectService: projectService,
+ }
+}
+
+func (t *ReadSourceLineRangeTool) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadSourceLineRangeArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ // Validate line range
+ if getArgs.StartLine < 1 {
+ return "start_line must be at least 1", "", nil
+ }
+ if getArgs.EndLine < getArgs.StartLine {
+ return "end_line must be greater than or equal to start_line", "", nil
+ }
+
+ // Get project from context
+ actor, projectId, _ := toolkit.GetActorProjectConversationID(ctx)
+ if actor == nil || projectId == "" {
+ return "", "", fmt.Errorf("failed to get actor or project id from context")
+ }
+
+ project, err := t.projectService.GetProject(ctx, actor.ID, projectId)
+ if err != nil {
+ return "", "", fmt.Errorf("failed to get project: %w", err)
+ }
+
+ // Normalize the path for matching
+ targetPath := normalizePath(getArgs.FilePath)
+
+ // Find the document by path
+ var foundDoc *struct {
+ Lines []string
+ Filepath string
+ }
+ for _, doc := range project.Docs {
+ docPath := normalizePath(doc.Filepath)
+ if docPath == targetPath || strings.HasSuffix(docPath, "/"+targetPath) || docPath == "/"+targetPath {
+ foundDoc = &struct {
+ Lines []string
+ Filepath string
+ }{Lines: doc.Lines, Filepath: doc.Filepath}
+ break
+ }
+ }
+
+ if foundDoc == nil {
+ return fmt.Sprintf("File not found: %s", getArgs.FilePath), "", nil
+ }
+
+ lines := foundDoc.Lines
+ totalLines := len(lines)
+
+ // Convert to 0-indexed
+ startIdx := getArgs.StartLine - 1
+ endIdx := getArgs.EndLine
+
+ // Clamp to valid range
+ if startIdx < 0 {
+ startIdx = 0
+ }
+ if startIdx >= totalLines {
+ return fmt.Sprintf("start_line %d is beyond file length (%d lines)", getArgs.StartLine, totalLines), "", nil
+ }
+ if endIdx > totalLines {
+ endIdx = totalLines
+ }
+
+ // Build result with line numbers
+ var result strings.Builder
+ result.WriteString(fmt.Sprintf("File: %s (lines %d-%d of %d)\n\n", foundDoc.Filepath, startIdx+1, endIdx, totalLines))
+
+ for i := startIdx; i < endIdx; i++ {
+ result.WriteString(fmt.Sprintf("%4d: %s\n", i+1, lines[i]))
+ }
+
+ return result.String(), "", nil
+}
+
+// normalizePath removes leading slashes and normalizes path separators
+func normalizePath(path string) string {
+ path = strings.TrimPrefix(path, "/")
+ path = strings.TrimPrefix(path, "./")
+ return path
+}
+
+// ReadSourceLineRangeToolLegacy for backward compatibility (standalone function)
+func ReadSourceLineRangeToolLegacy(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ var getArgs ReadSourceLineRangeArgs
+
+ if err := json.Unmarshal(args, &getArgs); err != nil {
+ return "", "", err
+ }
+
+ return fmt.Sprintf(`[WARNING] read_source_line_range tool not properly initialized. Requested: file '%s' lines %d-%d`,
+ getArgs.FilePath, getArgs.StartLine, getArgs.EndLine), "", nil
+}
diff --git a/internal/services/toolkit/tools/xtramcp/loader_v2.go b/internal/services/toolkit/tools/xtramcp/loader_v2.go
new file mode 100644
index 00000000..b7662c4c
--- /dev/null
+++ b/internal/services/toolkit/tools/xtramcp/loader_v2.go
@@ -0,0 +1,211 @@
+package xtramcp
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "paperdebugger/internal/libs/db"
+ "paperdebugger/internal/services"
+ "paperdebugger/internal/services/toolkit/registry"
+)
+
+// MCPListToolsResponse represents the JSON-RPC response from tools/list method
+type MCPListToolsResponseV2 struct {
+ JSONRPC string `json:"jsonrpc"`
+ ID int `json:"id"`
+ Result struct {
+ Tools []ToolSchemaV2 `json:"tools"`
+ } `json:"result"`
+}
+
+// loads tools dynamically from backend
+type XtraMCPLoaderV2 struct {
+ db *db.DB
+ projectService *services.ProjectService
+ baseURL string
+ client *http.Client
+ sessionID string // Store the MCP session ID after initialization for re-use
+}
+
+// NewXtraMCPLoader creates a new dynamic XtraMCP loader
+func NewXtraMCPLoaderV2(db *db.DB, projectService *services.ProjectService, baseURL string) *XtraMCPLoaderV2 {
+ return &XtraMCPLoaderV2{
+ db: db,
+ projectService: projectService,
+ baseURL: baseURL,
+ client: &http.Client{},
+ }
+}
+
+// LoadToolsFromBackend fetches tool schemas from backend and registers them
+func (loader *XtraMCPLoaderV2) LoadToolsFromBackend(toolRegistry *registry.ToolRegistryV2) error {
+ if loader.sessionID == "" {
+ return fmt.Errorf("MCP session not initialized - call InitializeMCP first")
+ }
+
+ // Fetch tools from backend using the established session
+ toolSchemas, err := loader.fetchAvailableTools()
+ if err != nil {
+ return fmt.Errorf("failed to fetch tools from backend: %w", err)
+ }
+
+ // Register each tool dynamically, passing the session ID
+ for _, toolSchema := range toolSchemas {
+ dynamicTool := NewDynamicToolV2(loader.db, loader.projectService, toolSchema, loader.baseURL, loader.sessionID)
+
+ // Register the tool with the registry
+ toolRegistry.Register(toolSchema.Name, dynamicTool.Description, dynamicTool.Call)
+
+ fmt.Printf("Registered dynamic tool: %s\n", toolSchema.Name)
+ }
+
+ return nil
+}
+
+// InitializeMCP performs the full MCP initialization handshake, stores session ID, and returns it
+func (loader *XtraMCPLoaderV2) InitializeMCP() (string, error) {
+ // Step 1: Initialize
+ sessionID, err := loader.performInitialize()
+ if err != nil {
+ return "", fmt.Errorf("step 1 - initialize failed: %w", err)
+ }
+
+ // Step 2: Send notifications/initialized
+ err = loader.sendInitializedNotification(sessionID)
+ if err != nil {
+ return "", fmt.Errorf("step 2 - notifications/initialized failed: %w", err)
+ }
+
+ // Store session ID for future use and return it
+ loader.sessionID = sessionID
+
+ return sessionID, nil
+}
+
+// performInitialize performs MCP initialization (1. establish connection)
+func (loader *XtraMCPLoaderV2) performInitialize() (string, error) {
+ initReq := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "method": "initialize",
+ "id": 1,
+ "params": map[string]interface{}{
+ "protocolVersion": "2024-11-05",
+ "capabilities": map[string]interface{}{},
+ "clientInfo": map[string]interface{}{
+ "name": "paperdebugger-client",
+ "version": "1.0.0",
+ },
+ },
+ }
+
+ jsonData, err := json.Marshal(initReq)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal initialize request: %w", err)
+ }
+
+ req, err := http.NewRequest("POST", loader.baseURL, bytes.NewBuffer(jsonData))
+ if err != nil {
+ return "", fmt.Errorf("failed to create initialize request: %w", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Accept", "application/json, text/event-stream")
+
+ resp, err := loader.client.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("failed to make initialize request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ // Extract session ID from response headers
+ sessionID := resp.Header.Get("mcp-session-id")
+ if sessionID == "" {
+ return "", fmt.Errorf("no session ID returned from initialize")
+ }
+
+ return sessionID, nil
+}
+
+// sendInitializedNotification completes MCP initialization (acknowledges initialization)
+func (loader *XtraMCPLoaderV2) sendInitializedNotification(sessionID string) error {
+ notifyReq := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "method": "notifications/initialized",
+ "params": map[string]interface{}{},
+ }
+
+ jsonData, err := json.Marshal(notifyReq)
+ if err != nil {
+ return fmt.Errorf("failed to marshal notification: %w", err)
+ }
+
+ req, err := http.NewRequest("POST", loader.baseURL, bytes.NewBuffer(jsonData))
+ if err != nil {
+ return fmt.Errorf("failed to create notification request: %w", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Accept", "application/json, text/event-stream")
+ req.Header.Set("mcp-session-id", sessionID)
+
+ resp, err := loader.client.Do(req)
+ if err != nil {
+ return fmt.Errorf("failed to send notification: %w", err)
+ }
+ defer resp.Body.Close()
+
+ return nil
+}
+
+// fetchAvailableTools makes a request to get available tools from backend
+func (loader *XtraMCPLoaderV2) fetchAvailableTools() ([]ToolSchemaV2, error) {
+ // List all tools using the established session
+ requestBody := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "method": "tools/list",
+ "params": map[string]interface{}{},
+ "id": 2,
+ }
+
+ jsonData, err := json.Marshal(requestBody)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal request: %w", err)
+ }
+
+ req, err := http.NewRequest("POST", loader.baseURL, bytes.NewBuffer(jsonData))
+ if err != nil {
+ return nil, fmt.Errorf("failed to create request: %w", err)
+ }
+
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Accept", "application/json, text/event-stream")
+ req.Header.Set("mcp-session-id", loader.sessionID)
+
+ resp, err := loader.client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("failed to make request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ // Read the raw response body (SSE format) for debugging
+ bodyBytes, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read response body: %w", err)
+ }
+
+ extractedJSON, err := parseSSEResponse(bodyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse SSE response: %w", err)
+ }
+
+ // Parse the extracted JSON
+ var mcpResponse MCPListToolsResponseV2
+ err = json.Unmarshal([]byte(extractedJSON), &mcpResponse)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse JSON from SSE data: %w. JSON data: %s", err, extractedJSON)
+ }
+
+ return mcpResponse.Result.Tools, nil
+}
diff --git a/internal/services/toolkit/tools/xtramcp/tool_v2.go b/internal/services/toolkit/tools/xtramcp/tool_v2.go
new file mode 100644
index 00000000..a63a5a34
--- /dev/null
+++ b/internal/services/toolkit/tools/xtramcp/tool_v2.go
@@ -0,0 +1,164 @@
+package xtramcp
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "paperdebugger/internal/libs/db"
+ "paperdebugger/internal/services"
+ toolCallRecordDB "paperdebugger/internal/services/toolkit/db"
+ "time"
+
+ "github.com/openai/openai-go/v3"
+ "github.com/openai/openai-go/v3/packages/param"
+)
+
+// ToolSchema represents the schema from your backend
+type ToolSchemaV2 struct {
+ Name string `json:"name"`
+ Description string `json:"description"`
+ InputSchema map[string]interface{} `json:"inputSchema"`
+ OutputSchema map[string]interface{} `json:"outputSchema"`
+}
+
+// MCPRequest represents the JSON-RPC request structure
+type MCPRequestV2 struct {
+ JSONRPC string `json:"jsonrpc"`
+ Method string `json:"method"`
+ ID int `json:"id"`
+ Params MCPParamsV2 `json:"params"`
+}
+
+// MCPParams represents the parameters for the MCP request
+type MCPParamsV2 struct {
+ Name string `json:"name"`
+ Arguments map[string]interface{} `json:"arguments"`
+}
+
+// DynamicTool represents a generic tool that can handle any schema
+type DynamicToolV2 struct {
+ Name string
+ Description openai.ChatCompletionToolUnionParam
+ toolCallRecordDB *toolCallRecordDB.ToolCallRecordDB
+ projectService *services.ProjectService
+ coolDownTime time.Duration
+ baseURL string
+ client *http.Client
+ schema map[string]interface{}
+ sessionID string // Reuse the session ID from initialization
+}
+
+// NewDynamicTool creates a new dynamic tool from a schema
+func NewDynamicToolV2(db *db.DB, projectService *services.ProjectService, toolSchema ToolSchemaV2, baseURL string, sessionID string) *DynamicToolV2 {
+ // Create tool description with the schema
+ description := openai.ChatCompletionToolUnionParam{
+ OfFunction: &openai.ChatCompletionFunctionToolParam{
+ Function: openai.FunctionDefinitionParam{
+ Name: toolSchema.Name,
+ Description: param.NewOpt(toolSchema.Description),
+ Parameters: openai.FunctionParameters(toolSchema.InputSchema),
+ },
+ },
+ }
+
+ toolCallRecordDB := toolCallRecordDB.NewToolCallRecordDB(db)
+ return &DynamicToolV2{
+ Name: toolSchema.Name,
+ Description: description,
+ toolCallRecordDB: toolCallRecordDB,
+ projectService: projectService,
+ coolDownTime: 5 * time.Minute,
+ baseURL: baseURL,
+ client: &http.Client{},
+ schema: toolSchema.InputSchema,
+ sessionID: sessionID, // Store the session ID for reuse
+ }
+}
+
+// Call handles the tool execution (generic for any tool)
+func (t *DynamicToolV2) Call(ctx context.Context, toolCallId string, args json.RawMessage) (string, string, error) {
+ // Parse arguments as generic map since we don't know the structure
+ var argsMap map[string]interface{}
+ err := json.Unmarshal(args, &argsMap)
+ if err != nil {
+ return "", "", err
+ }
+
+ // Create function call record
+ record, err := t.toolCallRecordDB.Create(ctx, toolCallId, t.Name, argsMap)
+ if err != nil {
+ return "", "", err
+ }
+
+ // Execute the tool via MCP
+ respStr, err := t.executeTool(argsMap)
+ if err != nil {
+ err = fmt.Errorf("failed to execute tool %s: %v", t.Name, err)
+ t.toolCallRecordDB.OnError(ctx, record, err)
+ return "", "", err
+ }
+
+ rawJson, err := json.Marshal(respStr)
+ if err != nil {
+ err = fmt.Errorf("failed to marshal tool result: %v", err)
+ t.toolCallRecordDB.OnError(ctx, record, err)
+ return "", "", err
+ }
+ t.toolCallRecordDB.OnSuccess(ctx, record, string(rawJson))
+
+ return respStr, "", nil
+}
+
+// executeTool makes the MCP request (generic for any tool)
+func (t *DynamicToolV2) executeTool(args map[string]interface{}) (string, error) {
+
+ request := MCPRequest{
+ JSONRPC: "2.0",
+ Method: "tools/call",
+ ID: int(time.Now().Unix()), // to ensure unique ID; TODO: consider better ID generation
+ Params: MCPParams{
+ Name: t.Name,
+ Arguments: args,
+ },
+ }
+
+ // Marshal request to JSON
+ jsonData, err := json.Marshal(request)
+ if err != nil {
+ return "", fmt.Errorf("failed to marshal MCP request: %w", err)
+ }
+
+ // Create HTTP request
+ req, err := http.NewRequest("POST", t.baseURL, bytes.NewBuffer(jsonData))
+ if err != nil {
+ return "", fmt.Errorf("failed to create HTTP request: %w", err)
+ }
+
+ // Set headers
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Accept", "application/json, text/event-stream")
+ req.Header.Set("mcp-session-id", t.sessionID) // Use the stored session ID
+
+ // Make the request
+ resp, err := t.client.Do(req)
+ if err != nil {
+ return "", fmt.Errorf("failed to make request: %w", err)
+ }
+ defer resp.Body.Close()
+
+ // Read response
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("failed to read response: %w", err)
+ }
+
+ extractedJSON, err := parseSSEResponse(body)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse SSE response: %w", err)
+ }
+
+ return extractedJSON, nil
+}
diff --git a/internal/services/user_prompt_debug.tmpl b/internal/services/user_prompt_debug.tmpl
index bcca78cf..e9e0d14c 100644
--- a/internal/services/user_prompt_debug.tmpl
+++ b/internal/services/user_prompt_debug.tmpl
@@ -7,4 +7,11 @@
```
{{ .SelectedText }}
```
+
+{{ if .Surrounding -}}
+# surrounding context
+```
+{{ .Surrounding }}
+```
+{{- end }}
{{- end }}
diff --git a/internal/services/user_prompt_default.tmpl b/internal/services/user_prompt_default.tmpl
index 58f0e615..922bbcdc 100644
--- a/internal/services/user_prompt_default.tmpl
+++ b/internal/services/user_prompt_default.tmpl
@@ -4,6 +4,13 @@ Here is the selected text:
{{ .SelectedText }}
```
+{{ if .Surrounding -}}
+Context around the selection:
+```
+{{ .Surrounding }}
+```
+{{- end }}
+
If user requests to revise the selected text, wrap the revised text in triple backticks.
Otherwise, just answer the question.
---
diff --git a/internal/wire.go b/internal/wire.go
index afad8b9c..f823bc2e 100644
--- a/internal/wire.go
+++ b/internal/wire.go
@@ -28,13 +28,16 @@ var Set = wire.NewSet(
auth.NewOAuthHandler,
auth.NewAuthServer,
chat.NewChatServer,
+ chat.NewChatServerV2,
user.NewUserServer,
project.NewProjectServer,
comment.NewCommentServer,
aiclient.NewAIClient,
+ aiclient.NewAIClientV2,
services.NewReverseCommentService,
services.NewChatService,
+ services.NewChatServiceV2,
services.NewTokenService,
services.NewUserService,
services.NewProjectService,
diff --git a/internal/wire_gen.go b/internal/wire_gen.go
index 726f8f87..75c4e91a 100644
--- a/internal/wire_gen.go
+++ b/internal/wire_gen.go
@@ -38,11 +38,14 @@ func InitializeApp() (*api.Server, error) {
aiClient := client.NewAIClient(dbDB, reverseCommentService, projectService, cfgCfg, loggerLogger)
chatService := services.NewChatService(dbDB, cfgCfg, loggerLogger)
chatServiceServer := chat.NewChatServer(aiClient, chatService, projectService, userService, loggerLogger, cfgCfg)
+ aiClientV2 := client.NewAIClientV2(dbDB, reverseCommentService, projectService, cfgCfg, loggerLogger)
+ chatServiceV2 := services.NewChatServiceV2(dbDB, cfgCfg, loggerLogger)
+ chatv2ChatServiceServer := chat.NewChatServerV2(aiClientV2, chatServiceV2, projectService, userService, loggerLogger, cfgCfg)
promptService := services.NewPromptService(dbDB, cfgCfg, loggerLogger)
userServiceServer := user.NewUserServer(userService, promptService, cfgCfg, loggerLogger)
projectServiceServer := project.NewProjectServer(projectService, loggerLogger, cfgCfg)
commentServiceServer := comment.NewCommentServer(projectService, chatService, reverseCommentService, loggerLogger, cfgCfg)
- grpcServer := api.NewGrpcServer(userService, cfgCfg, authServiceServer, chatServiceServer, userServiceServer, projectServiceServer, commentServiceServer)
+ grpcServer := api.NewGrpcServer(userService, cfgCfg, authServiceServer, chatServiceServer, chatv2ChatServiceServer, userServiceServer, projectServiceServer, commentServiceServer)
oAuthService := services.NewOAuthService(dbDB, cfgCfg, loggerLogger)
oAuthHandler := auth.NewOAuthHandler(oAuthService)
ginServer := api.NewGinServer(cfgCfg, oAuthHandler)
@@ -52,4 +55,4 @@ func InitializeApp() (*api.Server, error) {
// wire.go:
-var Set = wire.NewSet(api.NewServer, api.NewGrpcServer, api.NewGinServer, auth.NewOAuthHandler, auth.NewAuthServer, chat.NewChatServer, user.NewUserServer, project.NewProjectServer, comment.NewCommentServer, client.NewAIClient, services.NewReverseCommentService, services.NewChatService, services.NewTokenService, services.NewUserService, services.NewProjectService, services.NewPromptService, services.NewOAuthService, cfg.GetCfg, logger.GetLogger, db.NewDB)
+var Set = wire.NewSet(api.NewServer, api.NewGrpcServer, api.NewGinServer, auth.NewOAuthHandler, auth.NewAuthServer, chat.NewChatServer, chat.NewChatServerV2, user.NewUserServer, project.NewProjectServer, comment.NewCommentServer, client.NewAIClient, client.NewAIClientV2, services.NewReverseCommentService, services.NewChatService, services.NewChatServiceV2, services.NewTokenService, services.NewUserService, services.NewProjectService, services.NewPromptService, services.NewOAuthService, cfg.GetCfg, logger.GetLogger, db.NewDB)
diff --git a/pkg/gen/api/chat/v2/chat.pb.go b/pkg/gen/api/chat/v2/chat.pb.go
new file mode 100644
index 00000000..0db41ebd
--- /dev/null
+++ b/pkg/gen/api/chat/v2/chat.pb.go
@@ -0,0 +1,2073 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.11
+// protoc (unknown)
+// source: chat/v2/chat.proto
+
+package chatv2
+
+import (
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ConversationType int32
+
+const (
+ ConversationType_CONVERSATION_TYPE_UNSPECIFIED ConversationType = 0
+ ConversationType_CONVERSATION_TYPE_DEBUG ConversationType = 1 // does not contain any customized messages, the
+)
+
+// Enum value maps for ConversationType.
+var (
+ ConversationType_name = map[int32]string{
+ 0: "CONVERSATION_TYPE_UNSPECIFIED",
+ 1: "CONVERSATION_TYPE_DEBUG",
+ }
+ ConversationType_value = map[string]int32{
+ "CONVERSATION_TYPE_UNSPECIFIED": 0,
+ "CONVERSATION_TYPE_DEBUG": 1,
+ }
+)
+
+func (x ConversationType) Enum() *ConversationType {
+ p := new(ConversationType)
+ *p = x
+ return p
+}
+
+func (x ConversationType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ConversationType) Descriptor() protoreflect.EnumDescriptor {
+ return file_chat_v2_chat_proto_enumTypes[0].Descriptor()
+}
+
+func (ConversationType) Type() protoreflect.EnumType {
+ return &file_chat_v2_chat_proto_enumTypes[0]
+}
+
+func (x ConversationType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ConversationType.Descriptor instead.
+func (ConversationType) EnumDescriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{0}
+}
+
+type MessageTypeToolCall struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Args string `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` // Json string
+ Result string `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` // Json string
+ Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` // Json string
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessageTypeToolCall) Reset() {
+ *x = MessageTypeToolCall{}
+ mi := &file_chat_v2_chat_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessageTypeToolCall) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageTypeToolCall) ProtoMessage() {}
+
+func (x *MessageTypeToolCall) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageTypeToolCall.ProtoReflect.Descriptor instead.
+func (*MessageTypeToolCall) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MessageTypeToolCall) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *MessageTypeToolCall) GetArgs() string {
+ if x != nil {
+ return x.Args
+ }
+ return ""
+}
+
+func (x *MessageTypeToolCall) GetResult() string {
+ if x != nil {
+ return x.Result
+ }
+ return ""
+}
+
+func (x *MessageTypeToolCall) GetError() string {
+ if x != nil {
+ return x.Error
+ }
+ return ""
+}
+
+type MessageTypeToolCallPrepareArguments struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Args string `protobuf:"bytes,2,opt,name=args,proto3" json:"args,omitempty"` // Json string
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessageTypeToolCallPrepareArguments) Reset() {
+ *x = MessageTypeToolCallPrepareArguments{}
+ mi := &file_chat_v2_chat_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessageTypeToolCallPrepareArguments) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageTypeToolCallPrepareArguments) ProtoMessage() {}
+
+func (x *MessageTypeToolCallPrepareArguments) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageTypeToolCallPrepareArguments.ProtoReflect.Descriptor instead.
+func (*MessageTypeToolCallPrepareArguments) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *MessageTypeToolCallPrepareArguments) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *MessageTypeToolCallPrepareArguments) GetArgs() string {
+ if x != nil {
+ return x.Args
+ }
+ return ""
+}
+
+type MessageTypeSystem struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessageTypeSystem) Reset() {
+ *x = MessageTypeSystem{}
+ mi := &file_chat_v2_chat_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessageTypeSystem) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageTypeSystem) ProtoMessage() {}
+
+func (x *MessageTypeSystem) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageTypeSystem.ProtoReflect.Descriptor instead.
+func (*MessageTypeSystem) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *MessageTypeSystem) GetContent() string {
+ if x != nil {
+ return x.Content
+ }
+ return ""
+}
+
+type MessageTypeAssistant struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ ModelSlug string `protobuf:"bytes,2,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessageTypeAssistant) Reset() {
+ *x = MessageTypeAssistant{}
+ mi := &file_chat_v2_chat_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessageTypeAssistant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageTypeAssistant) ProtoMessage() {}
+
+func (x *MessageTypeAssistant) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageTypeAssistant.ProtoReflect.Descriptor instead.
+func (*MessageTypeAssistant) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MessageTypeAssistant) GetContent() string {
+ if x != nil {
+ return x.Content
+ }
+ return ""
+}
+
+func (x *MessageTypeAssistant) GetModelSlug() string {
+ if x != nil {
+ return x.ModelSlug
+ }
+ return ""
+}
+
+type MessageTypeUser struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ SelectedText *string `protobuf:"bytes,2,opt,name=selected_text,json=selectedText,proto3,oneof" json:"selected_text,omitempty"`
+ Surrounding *string `protobuf:"bytes,7,opt,name=surrounding,proto3,oneof" json:"surrounding,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessageTypeUser) Reset() {
+ *x = MessageTypeUser{}
+ mi := &file_chat_v2_chat_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessageTypeUser) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageTypeUser) ProtoMessage() {}
+
+func (x *MessageTypeUser) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageTypeUser.ProtoReflect.Descriptor instead.
+func (*MessageTypeUser) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *MessageTypeUser) GetContent() string {
+ if x != nil {
+ return x.Content
+ }
+ return ""
+}
+
+func (x *MessageTypeUser) GetSelectedText() string {
+ if x != nil && x.SelectedText != nil {
+ return *x.SelectedText
+ }
+ return ""
+}
+
+func (x *MessageTypeUser) GetSurrounding() string {
+ if x != nil && x.Surrounding != nil {
+ return *x.Surrounding
+ }
+ return ""
+}
+
+type MessageTypeUnknown struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessageTypeUnknown) Reset() {
+ *x = MessageTypeUnknown{}
+ mi := &file_chat_v2_chat_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessageTypeUnknown) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageTypeUnknown) ProtoMessage() {}
+
+func (x *MessageTypeUnknown) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageTypeUnknown.ProtoReflect.Descriptor instead.
+func (*MessageTypeUnknown) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *MessageTypeUnknown) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+type MessagePayload struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to MessageType:
+ //
+ // *MessagePayload_System
+ // *MessagePayload_User
+ // *MessagePayload_Assistant
+ // *MessagePayload_ToolCallPrepareArguments
+ // *MessagePayload_ToolCall
+ // *MessagePayload_Unknown
+ MessageType isMessagePayload_MessageType `protobuf_oneof:"message_type"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessagePayload) Reset() {
+ *x = MessagePayload{}
+ mi := &file_chat_v2_chat_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessagePayload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessagePayload) ProtoMessage() {}
+
+func (x *MessagePayload) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessagePayload.ProtoReflect.Descriptor instead.
+func (*MessagePayload) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *MessagePayload) GetMessageType() isMessagePayload_MessageType {
+ if x != nil {
+ return x.MessageType
+ }
+ return nil
+}
+
+func (x *MessagePayload) GetSystem() *MessageTypeSystem {
+ if x != nil {
+ if x, ok := x.MessageType.(*MessagePayload_System); ok {
+ return x.System
+ }
+ }
+ return nil
+}
+
+func (x *MessagePayload) GetUser() *MessageTypeUser {
+ if x != nil {
+ if x, ok := x.MessageType.(*MessagePayload_User); ok {
+ return x.User
+ }
+ }
+ return nil
+}
+
+func (x *MessagePayload) GetAssistant() *MessageTypeAssistant {
+ if x != nil {
+ if x, ok := x.MessageType.(*MessagePayload_Assistant); ok {
+ return x.Assistant
+ }
+ }
+ return nil
+}
+
+func (x *MessagePayload) GetToolCallPrepareArguments() *MessageTypeToolCallPrepareArguments {
+ if x != nil {
+ if x, ok := x.MessageType.(*MessagePayload_ToolCallPrepareArguments); ok {
+ return x.ToolCallPrepareArguments
+ }
+ }
+ return nil
+}
+
+func (x *MessagePayload) GetToolCall() *MessageTypeToolCall {
+ if x != nil {
+ if x, ok := x.MessageType.(*MessagePayload_ToolCall); ok {
+ return x.ToolCall
+ }
+ }
+ return nil
+}
+
+func (x *MessagePayload) GetUnknown() *MessageTypeUnknown {
+ if x != nil {
+ if x, ok := x.MessageType.(*MessagePayload_Unknown); ok {
+ return x.Unknown
+ }
+ }
+ return nil
+}
+
+type isMessagePayload_MessageType interface {
+ isMessagePayload_MessageType()
+}
+
+type MessagePayload_System struct {
+ System *MessageTypeSystem `protobuf:"bytes,1,opt,name=system,proto3,oneof"`
+}
+
+type MessagePayload_User struct {
+ User *MessageTypeUser `protobuf:"bytes,2,opt,name=user,proto3,oneof"`
+}
+
+type MessagePayload_Assistant struct {
+ Assistant *MessageTypeAssistant `protobuf:"bytes,3,opt,name=assistant,proto3,oneof"`
+}
+
+type MessagePayload_ToolCallPrepareArguments struct {
+ ToolCallPrepareArguments *MessageTypeToolCallPrepareArguments `protobuf:"bytes,4,opt,name=tool_call_prepare_arguments,json=toolCallPrepareArguments,proto3,oneof"`
+}
+
+type MessagePayload_ToolCall struct {
+ ToolCall *MessageTypeToolCall `protobuf:"bytes,5,opt,name=tool_call,json=toolCall,proto3,oneof"`
+}
+
+type MessagePayload_Unknown struct {
+ Unknown *MessageTypeUnknown `protobuf:"bytes,6,opt,name=unknown,proto3,oneof"`
+}
+
+func (*MessagePayload_System) isMessagePayload_MessageType() {}
+
+func (*MessagePayload_User) isMessagePayload_MessageType() {}
+
+func (*MessagePayload_Assistant) isMessagePayload_MessageType() {}
+
+func (*MessagePayload_ToolCallPrepareArguments) isMessagePayload_MessageType() {}
+
+func (*MessagePayload_ToolCall) isMessagePayload_MessageType() {}
+
+func (*MessagePayload_Unknown) isMessagePayload_MessageType() {}
+
+type Message struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
+ Payload *MessagePayload `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"`
+ Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Message) Reset() {
+ *x = Message{}
+ mi := &file_chat_v2_chat_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
+func (*Message) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Message) GetMessageId() string {
+ if x != nil {
+ return x.MessageId
+ }
+ return ""
+}
+
+func (x *Message) GetPayload() *MessagePayload {
+ if x != nil {
+ return x.Payload
+ }
+ return nil
+}
+
+func (x *Message) GetTimestamp() int64 {
+ if x != nil {
+ return x.Timestamp
+ }
+ return 0
+}
+
+type Conversation struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
+ ModelSlug string `protobuf:"bytes,3,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"`
+ // If list conversations, then messages length is 0.
+ Messages []*Message `protobuf:"bytes,4,rep,name=messages,proto3" json:"messages,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Conversation) Reset() {
+ *x = Conversation{}
+ mi := &file_chat_v2_chat_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Conversation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Conversation) ProtoMessage() {}
+
+func (x *Conversation) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Conversation.ProtoReflect.Descriptor instead.
+func (*Conversation) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Conversation) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *Conversation) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+func (x *Conversation) GetModelSlug() string {
+ if x != nil {
+ return x.ModelSlug
+ }
+ return ""
+}
+
+func (x *Conversation) GetMessages() []*Message {
+ if x != nil {
+ return x.Messages
+ }
+ return nil
+}
+
+type ListConversationsRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ProjectId *string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3,oneof" json:"project_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ListConversationsRequest) Reset() {
+ *x = ListConversationsRequest{}
+ mi := &file_chat_v2_chat_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListConversationsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListConversationsRequest) ProtoMessage() {}
+
+func (x *ListConversationsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListConversationsRequest.ProtoReflect.Descriptor instead.
+func (*ListConversationsRequest) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ListConversationsRequest) GetProjectId() string {
+ if x != nil && x.ProjectId != nil {
+ return *x.ProjectId
+ }
+ return ""
+}
+
+type ListConversationsResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // In this response, the length of conversations[i].messages should be 0.
+ Conversations []*Conversation `protobuf:"bytes,1,rep,name=conversations,proto3" json:"conversations,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ListConversationsResponse) Reset() {
+ *x = ListConversationsResponse{}
+ mi := &file_chat_v2_chat_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListConversationsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListConversationsResponse) ProtoMessage() {}
+
+func (x *ListConversationsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListConversationsResponse.ProtoReflect.Descriptor instead.
+func (*ListConversationsResponse) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *ListConversationsResponse) GetConversations() []*Conversation {
+ if x != nil {
+ return x.Conversations
+ }
+ return nil
+}
+
+type GetConversationRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetConversationRequest) Reset() {
+ *x = GetConversationRequest{}
+ mi := &file_chat_v2_chat_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetConversationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetConversationRequest) ProtoMessage() {}
+
+func (x *GetConversationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetConversationRequest.ProtoReflect.Descriptor instead.
+func (*GetConversationRequest) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *GetConversationRequest) GetConversationId() string {
+ if x != nil {
+ return x.ConversationId
+ }
+ return ""
+}
+
+type GetConversationResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *GetConversationResponse) Reset() {
+ *x = GetConversationResponse{}
+ mi := &file_chat_v2_chat_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetConversationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetConversationResponse) ProtoMessage() {}
+
+func (x *GetConversationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetConversationResponse.ProtoReflect.Descriptor instead.
+func (*GetConversationResponse) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *GetConversationResponse) GetConversation() *Conversation {
+ if x != nil {
+ return x.Conversation
+ }
+ return nil
+}
+
+type UpdateConversationRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"`
+ Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UpdateConversationRequest) Reset() {
+ *x = UpdateConversationRequest{}
+ mi := &file_chat_v2_chat_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateConversationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateConversationRequest) ProtoMessage() {}
+
+func (x *UpdateConversationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateConversationRequest.ProtoReflect.Descriptor instead.
+func (*UpdateConversationRequest) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *UpdateConversationRequest) GetConversationId() string {
+ if x != nil {
+ return x.ConversationId
+ }
+ return ""
+}
+
+func (x *UpdateConversationRequest) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+type UpdateConversationResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Conversation *Conversation `protobuf:"bytes,1,opt,name=conversation,proto3" json:"conversation,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *UpdateConversationResponse) Reset() {
+ *x = UpdateConversationResponse{}
+ mi := &file_chat_v2_chat_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateConversationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateConversationResponse) ProtoMessage() {}
+
+func (x *UpdateConversationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateConversationResponse.ProtoReflect.Descriptor instead.
+func (*UpdateConversationResponse) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *UpdateConversationResponse) GetConversation() *Conversation {
+ if x != nil {
+ return x.Conversation
+ }
+ return nil
+}
+
+type DeleteConversationRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DeleteConversationRequest) Reset() {
+ *x = DeleteConversationRequest{}
+ mi := &file_chat_v2_chat_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteConversationRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteConversationRequest) ProtoMessage() {}
+
+func (x *DeleteConversationRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteConversationRequest.ProtoReflect.Descriptor instead.
+func (*DeleteConversationRequest) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *DeleteConversationRequest) GetConversationId() string {
+ if x != nil {
+ return x.ConversationId
+ }
+ return ""
+}
+
+type DeleteConversationResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *DeleteConversationResponse) Reset() {
+ *x = DeleteConversationResponse{}
+ mi := &file_chat_v2_chat_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteConversationResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteConversationResponse) ProtoMessage() {}
+
+func (x *DeleteConversationResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteConversationResponse.ProtoReflect.Descriptor instead.
+func (*DeleteConversationResponse) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{16}
+}
+
+type SupportedModel struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Slug string `protobuf:"bytes,2,opt,name=slug,proto3" json:"slug,omitempty"`
+ TotalContext int64 `protobuf:"varint,3,opt,name=total_context,json=totalContext,proto3" json:"total_context,omitempty"`
+ MaxOutput int64 `protobuf:"varint,4,opt,name=max_output,json=maxOutput,proto3" json:"max_output,omitempty"`
+ InputPrice int64 `protobuf:"varint,5,opt,name=input_price,json=inputPrice,proto3" json:"input_price,omitempty"` // in cents per 1M tokens
+ OutputPrice int64 `protobuf:"varint,6,opt,name=output_price,json=outputPrice,proto3" json:"output_price,omitempty"` // in cents per 1M tokens
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SupportedModel) Reset() {
+ *x = SupportedModel{}
+ mi := &file_chat_v2_chat_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SupportedModel) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SupportedModel) ProtoMessage() {}
+
+func (x *SupportedModel) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SupportedModel.ProtoReflect.Descriptor instead.
+func (*SupportedModel) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *SupportedModel) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SupportedModel) GetSlug() string {
+ if x != nil {
+ return x.Slug
+ }
+ return ""
+}
+
+func (x *SupportedModel) GetTotalContext() int64 {
+ if x != nil {
+ return x.TotalContext
+ }
+ return 0
+}
+
+func (x *SupportedModel) GetMaxOutput() int64 {
+ if x != nil {
+ return x.MaxOutput
+ }
+ return 0
+}
+
+func (x *SupportedModel) GetInputPrice() int64 {
+ if x != nil {
+ return x.InputPrice
+ }
+ return 0
+}
+
+func (x *SupportedModel) GetOutputPrice() int64 {
+ if x != nil {
+ return x.OutputPrice
+ }
+ return 0
+}
+
+type ListSupportedModelsRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ListSupportedModelsRequest) Reset() {
+ *x = ListSupportedModelsRequest{}
+ mi := &file_chat_v2_chat_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListSupportedModelsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListSupportedModelsRequest) ProtoMessage() {}
+
+func (x *ListSupportedModelsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[18]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListSupportedModelsRequest.ProtoReflect.Descriptor instead.
+func (*ListSupportedModelsRequest) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{18}
+}
+
+type ListSupportedModelsResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Models []*SupportedModel `protobuf:"bytes,1,rep,name=models,proto3" json:"models,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ListSupportedModelsResponse) Reset() {
+ *x = ListSupportedModelsResponse{}
+ mi := &file_chat_v2_chat_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListSupportedModelsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListSupportedModelsResponse) ProtoMessage() {}
+
+func (x *ListSupportedModelsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[19]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListSupportedModelsResponse.ProtoReflect.Descriptor instead.
+func (*ListSupportedModelsResponse) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *ListSupportedModelsResponse) GetModels() []*SupportedModel {
+ if x != nil {
+ return x.Models
+ }
+ return nil
+}
+
+// Information sent once at the beginning of a new conversation stream
+type StreamInitialization struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"`
+ ModelSlug string `protobuf:"bytes,2,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *StreamInitialization) Reset() {
+ *x = StreamInitialization{}
+ mi := &file_chat_v2_chat_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StreamInitialization) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StreamInitialization) ProtoMessage() {}
+
+func (x *StreamInitialization) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[20]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StreamInitialization.ProtoReflect.Descriptor instead.
+func (*StreamInitialization) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *StreamInitialization) GetConversationId() string {
+ if x != nil {
+ return x.ConversationId
+ }
+ return ""
+}
+
+func (x *StreamInitialization) GetModelSlug() string {
+ if x != nil {
+ return x.ModelSlug
+ }
+ return ""
+}
+
+// Designed as StreamPartBegin and StreamPartEnd to
+// handle the case where assistant and tool are called at the same time.
+//
+// User: Please answer me "Ok I will do that", then call "get_weather"
+// Assistant: Ok I will do that + Tool: get_weather
+type StreamPartBegin struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
+ Payload *MessagePayload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *StreamPartBegin) Reset() {
+ *x = StreamPartBegin{}
+ mi := &file_chat_v2_chat_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StreamPartBegin) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StreamPartBegin) ProtoMessage() {}
+
+func (x *StreamPartBegin) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[21]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StreamPartBegin.ProtoReflect.Descriptor instead.
+func (*StreamPartBegin) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{21}
+}
+
+func (x *StreamPartBegin) GetMessageId() string {
+ if x != nil {
+ return x.MessageId
+ }
+ return ""
+}
+
+func (x *StreamPartBegin) GetPayload() *MessagePayload {
+ if x != nil {
+ return x.Payload
+ }
+ return nil
+}
+
+// Note: After the StreamPartBegin of tool_call, there can be no MessageChunk,
+//
+// and the StreamPartEnd can be directly called when the result is ready.
+type MessageChunk struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` // The id of the message that this chunk belongs to
+ Delta string `protobuf:"bytes,2,opt,name=delta,proto3" json:"delta,omitempty"` // The small piece of text
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *MessageChunk) Reset() {
+ *x = MessageChunk{}
+ mi := &file_chat_v2_chat_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MessageChunk) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageChunk) ProtoMessage() {}
+
+func (x *MessageChunk) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[22]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageChunk.ProtoReflect.Descriptor instead.
+func (*MessageChunk) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{22}
+}
+
+func (x *MessageChunk) GetMessageId() string {
+ if x != nil {
+ return x.MessageId
+ }
+ return ""
+}
+
+func (x *MessageChunk) GetDelta() string {
+ if x != nil {
+ return x.Delta
+ }
+ return ""
+}
+
+type IncompleteIndicator struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"`
+ ResponseId string `protobuf:"bytes,2,opt,name=response_id,json=responseId,proto3" json:"response_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *IncompleteIndicator) Reset() {
+ *x = IncompleteIndicator{}
+ mi := &file_chat_v2_chat_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *IncompleteIndicator) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IncompleteIndicator) ProtoMessage() {}
+
+func (x *IncompleteIndicator) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[23]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IncompleteIndicator.ProtoReflect.Descriptor instead.
+func (*IncompleteIndicator) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{23}
+}
+
+func (x *IncompleteIndicator) GetReason() string {
+ if x != nil {
+ return x.Reason
+ }
+ return ""
+}
+
+func (x *IncompleteIndicator) GetResponseId() string {
+ if x != nil {
+ return x.ResponseId
+ }
+ return ""
+}
+
+type StreamPartEnd struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ MessageId string `protobuf:"bytes,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
+ Payload *MessagePayload `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *StreamPartEnd) Reset() {
+ *x = StreamPartEnd{}
+ mi := &file_chat_v2_chat_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StreamPartEnd) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StreamPartEnd) ProtoMessage() {}
+
+func (x *StreamPartEnd) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[24]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StreamPartEnd.ProtoReflect.Descriptor instead.
+func (*StreamPartEnd) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{24}
+}
+
+func (x *StreamPartEnd) GetMessageId() string {
+ if x != nil {
+ return x.MessageId
+ }
+ return ""
+}
+
+func (x *StreamPartEnd) GetPayload() *MessagePayload {
+ if x != nil {
+ return x.Payload
+ }
+ return nil
+}
+
+// Sent when the current AI response is fully streamed
+type StreamFinalization struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ConversationId string `protobuf:"bytes,1,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *StreamFinalization) Reset() {
+ *x = StreamFinalization{}
+ mi := &file_chat_v2_chat_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StreamFinalization) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StreamFinalization) ProtoMessage() {}
+
+func (x *StreamFinalization) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[25]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StreamFinalization.ProtoReflect.Descriptor instead.
+func (*StreamFinalization) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{25}
+}
+
+func (x *StreamFinalization) GetConversationId() string {
+ if x != nil {
+ return x.ConversationId
+ }
+ return ""
+}
+
+type StreamError struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ErrorMessage string `protobuf:"bytes,1,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *StreamError) Reset() {
+ *x = StreamError{}
+ mi := &file_chat_v2_chat_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *StreamError) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StreamError) ProtoMessage() {}
+
+func (x *StreamError) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[26]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StreamError.ProtoReflect.Descriptor instead.
+func (*StreamError) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *StreamError) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+// This message should be the same as CreateConversationMessageRequest
+// Note: If conversation_id is provided,
+//
+// the conversation will be created and returned.
+type CreateConversationMessageStreamRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ ConversationId *string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3,oneof" json:"conversation_id,omitempty"`
+ ModelSlug string `protobuf:"bytes,3,opt,name=model_slug,json=modelSlug,proto3" json:"model_slug,omitempty"`
+ UserMessage string `protobuf:"bytes,4,opt,name=user_message,json=userMessage,proto3" json:"user_message,omitempty"`
+ UserSelectedText *string `protobuf:"bytes,5,opt,name=user_selected_text,json=userSelectedText,proto3,oneof" json:"user_selected_text,omitempty"`
+ ConversationType *ConversationType `protobuf:"varint,6,opt,name=conversation_type,json=conversationType,proto3,enum=chat.v2.ConversationType,oneof" json:"conversation_type,omitempty"`
+ Surrounding *string `protobuf:"bytes,8,opt,name=surrounding,proto3,oneof" json:"surrounding,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *CreateConversationMessageStreamRequest) Reset() {
+ *x = CreateConversationMessageStreamRequest{}
+ mi := &file_chat_v2_chat_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateConversationMessageStreamRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateConversationMessageStreamRequest) ProtoMessage() {}
+
+func (x *CreateConversationMessageStreamRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateConversationMessageStreamRequest.ProtoReflect.Descriptor instead.
+func (*CreateConversationMessageStreamRequest) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{27}
+}
+
+func (x *CreateConversationMessageStreamRequest) GetProjectId() string {
+ if x != nil {
+ return x.ProjectId
+ }
+ return ""
+}
+
+func (x *CreateConversationMessageStreamRequest) GetConversationId() string {
+ if x != nil && x.ConversationId != nil {
+ return *x.ConversationId
+ }
+ return ""
+}
+
+func (x *CreateConversationMessageStreamRequest) GetModelSlug() string {
+ if x != nil {
+ return x.ModelSlug
+ }
+ return ""
+}
+
+func (x *CreateConversationMessageStreamRequest) GetUserMessage() string {
+ if x != nil {
+ return x.UserMessage
+ }
+ return ""
+}
+
+func (x *CreateConversationMessageStreamRequest) GetUserSelectedText() string {
+ if x != nil && x.UserSelectedText != nil {
+ return *x.UserSelectedText
+ }
+ return ""
+}
+
+func (x *CreateConversationMessageStreamRequest) GetConversationType() ConversationType {
+ if x != nil && x.ConversationType != nil {
+ return *x.ConversationType
+ }
+ return ConversationType_CONVERSATION_TYPE_UNSPECIFIED
+}
+
+func (x *CreateConversationMessageStreamRequest) GetSurrounding() string {
+ if x != nil && x.Surrounding != nil {
+ return *x.Surrounding
+ }
+ return ""
+}
+
+// Response for streaming a message within an existing conversation
+type CreateConversationMessageStreamResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Types that are valid to be assigned to ResponsePayload:
+ //
+ // *CreateConversationMessageStreamResponse_StreamInitialization
+ // *CreateConversationMessageStreamResponse_StreamPartBegin
+ // *CreateConversationMessageStreamResponse_MessageChunk
+ // *CreateConversationMessageStreamResponse_IncompleteIndicator
+ // *CreateConversationMessageStreamResponse_StreamPartEnd
+ // *CreateConversationMessageStreamResponse_StreamFinalization
+ // *CreateConversationMessageStreamResponse_StreamError
+ ResponsePayload isCreateConversationMessageStreamResponse_ResponsePayload `protobuf_oneof:"response_payload"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *CreateConversationMessageStreamResponse) Reset() {
+ *x = CreateConversationMessageStreamResponse{}
+ mi := &file_chat_v2_chat_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateConversationMessageStreamResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateConversationMessageStreamResponse) ProtoMessage() {}
+
+func (x *CreateConversationMessageStreamResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_chat_v2_chat_proto_msgTypes[28]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateConversationMessageStreamResponse.ProtoReflect.Descriptor instead.
+func (*CreateConversationMessageStreamResponse) Descriptor() ([]byte, []int) {
+ return file_chat_v2_chat_proto_rawDescGZIP(), []int{28}
+}
+
+func (x *CreateConversationMessageStreamResponse) GetResponsePayload() isCreateConversationMessageStreamResponse_ResponsePayload {
+ if x != nil {
+ return x.ResponsePayload
+ }
+ return nil
+}
+
+func (x *CreateConversationMessageStreamResponse) GetStreamInitialization() *StreamInitialization {
+ if x != nil {
+ if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamInitialization); ok {
+ return x.StreamInitialization
+ }
+ }
+ return nil
+}
+
+func (x *CreateConversationMessageStreamResponse) GetStreamPartBegin() *StreamPartBegin {
+ if x != nil {
+ if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamPartBegin); ok {
+ return x.StreamPartBegin
+ }
+ }
+ return nil
+}
+
+func (x *CreateConversationMessageStreamResponse) GetMessageChunk() *MessageChunk {
+ if x != nil {
+ if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_MessageChunk); ok {
+ return x.MessageChunk
+ }
+ }
+ return nil
+}
+
+func (x *CreateConversationMessageStreamResponse) GetIncompleteIndicator() *IncompleteIndicator {
+ if x != nil {
+ if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_IncompleteIndicator); ok {
+ return x.IncompleteIndicator
+ }
+ }
+ return nil
+}
+
+func (x *CreateConversationMessageStreamResponse) GetStreamPartEnd() *StreamPartEnd {
+ if x != nil {
+ if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamPartEnd); ok {
+ return x.StreamPartEnd
+ }
+ }
+ return nil
+}
+
+func (x *CreateConversationMessageStreamResponse) GetStreamFinalization() *StreamFinalization {
+ if x != nil {
+ if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamFinalization); ok {
+ return x.StreamFinalization
+ }
+ }
+ return nil
+}
+
+func (x *CreateConversationMessageStreamResponse) GetStreamError() *StreamError {
+ if x != nil {
+ if x, ok := x.ResponsePayload.(*CreateConversationMessageStreamResponse_StreamError); ok {
+ return x.StreamError
+ }
+ }
+ return nil
+}
+
+type isCreateConversationMessageStreamResponse_ResponsePayload interface {
+ isCreateConversationMessageStreamResponse_ResponsePayload()
+}
+
+type CreateConversationMessageStreamResponse_StreamInitialization struct {
+ StreamInitialization *StreamInitialization `protobuf:"bytes,1,opt,name=stream_initialization,json=streamInitialization,proto3,oneof"`
+}
+
+type CreateConversationMessageStreamResponse_StreamPartBegin struct {
+ StreamPartBegin *StreamPartBegin `protobuf:"bytes,2,opt,name=stream_part_begin,json=streamPartBegin,proto3,oneof"`
+}
+
+type CreateConversationMessageStreamResponse_MessageChunk struct {
+ MessageChunk *MessageChunk `protobuf:"bytes,3,opt,name=message_chunk,json=messageChunk,proto3,oneof"`
+}
+
+type CreateConversationMessageStreamResponse_IncompleteIndicator struct {
+ IncompleteIndicator *IncompleteIndicator `protobuf:"bytes,4,opt,name=incomplete_indicator,json=incompleteIndicator,proto3,oneof"`
+}
+
+type CreateConversationMessageStreamResponse_StreamPartEnd struct {
+ StreamPartEnd *StreamPartEnd `protobuf:"bytes,5,opt,name=stream_part_end,json=streamPartEnd,proto3,oneof"`
+}
+
+type CreateConversationMessageStreamResponse_StreamFinalization struct {
+ StreamFinalization *StreamFinalization `protobuf:"bytes,6,opt,name=stream_finalization,json=streamFinalization,proto3,oneof"`
+}
+
+type CreateConversationMessageStreamResponse_StreamError struct {
+ StreamError *StreamError `protobuf:"bytes,7,opt,name=stream_error,json=streamError,proto3,oneof"`
+}
+
+func (*CreateConversationMessageStreamResponse_StreamInitialization) isCreateConversationMessageStreamResponse_ResponsePayload() {
+}
+
+func (*CreateConversationMessageStreamResponse_StreamPartBegin) isCreateConversationMessageStreamResponse_ResponsePayload() {
+}
+
+func (*CreateConversationMessageStreamResponse_MessageChunk) isCreateConversationMessageStreamResponse_ResponsePayload() {
+}
+
+func (*CreateConversationMessageStreamResponse_IncompleteIndicator) isCreateConversationMessageStreamResponse_ResponsePayload() {
+}
+
+func (*CreateConversationMessageStreamResponse_StreamPartEnd) isCreateConversationMessageStreamResponse_ResponsePayload() {
+}
+
+func (*CreateConversationMessageStreamResponse_StreamFinalization) isCreateConversationMessageStreamResponse_ResponsePayload() {
+}
+
+func (*CreateConversationMessageStreamResponse_StreamError) isCreateConversationMessageStreamResponse_ResponsePayload() {
+}
+
+var File_chat_v2_chat_proto protoreflect.FileDescriptor
+
+const file_chat_v2_chat_proto_rawDesc = "" +
+ "\n" +
+ "\x12chat/v2/chat.proto\x12\achat.v2\x1a\x1cgoogle/api/annotations.proto\"k\n" +
+ "\x13MessageTypeToolCall\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" +
+ "\x04args\x18\x02 \x01(\tR\x04args\x12\x16\n" +
+ "\x06result\x18\x03 \x01(\tR\x06result\x12\x14\n" +
+ "\x05error\x18\x04 \x01(\tR\x05error\"M\n" +
+ "#MessageTypeToolCallPrepareArguments\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" +
+ "\x04args\x18\x02 \x01(\tR\x04args\"-\n" +
+ "\x11MessageTypeSystem\x12\x18\n" +
+ "\acontent\x18\x01 \x01(\tR\acontent\"O\n" +
+ "\x14MessageTypeAssistant\x12\x18\n" +
+ "\acontent\x18\x01 \x01(\tR\acontent\x12\x1d\n" +
+ "\n" +
+ "model_slug\x18\x02 \x01(\tR\tmodelSlug\"\x9e\x01\n" +
+ "\x0fMessageTypeUser\x12\x18\n" +
+ "\acontent\x18\x01 \x01(\tR\acontent\x12(\n" +
+ "\rselected_text\x18\x02 \x01(\tH\x00R\fselectedText\x88\x01\x01\x12%\n" +
+ "\vsurrounding\x18\a \x01(\tH\x01R\vsurrounding\x88\x01\x01B\x10\n" +
+ "\x0e_selected_textB\x0e\n" +
+ "\f_surrounding\"6\n" +
+ "\x12MessageTypeUnknown\x12 \n" +
+ "\vdescription\x18\x01 \x01(\tR\vdescription\"\xaa\x03\n" +
+ "\x0eMessagePayload\x124\n" +
+ "\x06system\x18\x01 \x01(\v2\x1a.chat.v2.MessageTypeSystemH\x00R\x06system\x12.\n" +
+ "\x04user\x18\x02 \x01(\v2\x18.chat.v2.MessageTypeUserH\x00R\x04user\x12=\n" +
+ "\tassistant\x18\x03 \x01(\v2\x1d.chat.v2.MessageTypeAssistantH\x00R\tassistant\x12m\n" +
+ "\x1btool_call_prepare_arguments\x18\x04 \x01(\v2,.chat.v2.MessageTypeToolCallPrepareArgumentsH\x00R\x18toolCallPrepareArguments\x12;\n" +
+ "\ttool_call\x18\x05 \x01(\v2\x1c.chat.v2.MessageTypeToolCallH\x00R\btoolCall\x127\n" +
+ "\aunknown\x18\x06 \x01(\v2\x1b.chat.v2.MessageTypeUnknownH\x00R\aunknownB\x0e\n" +
+ "\fmessage_type\"y\n" +
+ "\aMessage\x12\x1d\n" +
+ "\n" +
+ "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" +
+ "\apayload\x18\x02 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\x12\x1c\n" +
+ "\ttimestamp\x18\x03 \x01(\x03R\ttimestamp\"\x81\x01\n" +
+ "\fConversation\x12\x0e\n" +
+ "\x02id\x18\x01 \x01(\tR\x02id\x12\x14\n" +
+ "\x05title\x18\x02 \x01(\tR\x05title\x12\x1d\n" +
+ "\n" +
+ "model_slug\x18\x03 \x01(\tR\tmodelSlug\x12,\n" +
+ "\bmessages\x18\x04 \x03(\v2\x10.chat.v2.MessageR\bmessages\"M\n" +
+ "\x18ListConversationsRequest\x12\"\n" +
+ "\n" +
+ "project_id\x18\x01 \x01(\tH\x00R\tprojectId\x88\x01\x01B\r\n" +
+ "\v_project_id\"X\n" +
+ "\x19ListConversationsResponse\x12;\n" +
+ "\rconversations\x18\x01 \x03(\v2\x15.chat.v2.ConversationR\rconversations\"A\n" +
+ "\x16GetConversationRequest\x12'\n" +
+ "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"T\n" +
+ "\x17GetConversationResponse\x129\n" +
+ "\fconversation\x18\x01 \x01(\v2\x15.chat.v2.ConversationR\fconversation\"Z\n" +
+ "\x19UpdateConversationRequest\x12'\n" +
+ "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12\x14\n" +
+ "\x05title\x18\x02 \x01(\tR\x05title\"W\n" +
+ "\x1aUpdateConversationResponse\x129\n" +
+ "\fconversation\x18\x01 \x01(\v2\x15.chat.v2.ConversationR\fconversation\"D\n" +
+ "\x19DeleteConversationRequest\x12'\n" +
+ "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"\x1c\n" +
+ "\x1aDeleteConversationResponse\"\xc0\x01\n" +
+ "\x0eSupportedModel\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x12\n" +
+ "\x04slug\x18\x02 \x01(\tR\x04slug\x12#\n" +
+ "\rtotal_context\x18\x03 \x01(\x03R\ftotalContext\x12\x1d\n" +
+ "\n" +
+ "max_output\x18\x04 \x01(\x03R\tmaxOutput\x12\x1f\n" +
+ "\vinput_price\x18\x05 \x01(\x03R\n" +
+ "inputPrice\x12!\n" +
+ "\foutput_price\x18\x06 \x01(\x03R\voutputPrice\"\x1c\n" +
+ "\x1aListSupportedModelsRequest\"N\n" +
+ "\x1bListSupportedModelsResponse\x12/\n" +
+ "\x06models\x18\x01 \x03(\v2\x17.chat.v2.SupportedModelR\x06models\"^\n" +
+ "\x14StreamInitialization\x12'\n" +
+ "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\x12\x1d\n" +
+ "\n" +
+ "model_slug\x18\x02 \x01(\tR\tmodelSlug\"c\n" +
+ "\x0fStreamPartBegin\x12\x1d\n" +
+ "\n" +
+ "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" +
+ "\apayload\x18\x03 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\"C\n" +
+ "\fMessageChunk\x12\x1d\n" +
+ "\n" +
+ "message_id\x18\x01 \x01(\tR\tmessageId\x12\x14\n" +
+ "\x05delta\x18\x02 \x01(\tR\x05delta\"N\n" +
+ "\x13IncompleteIndicator\x12\x16\n" +
+ "\x06reason\x18\x01 \x01(\tR\x06reason\x12\x1f\n" +
+ "\vresponse_id\x18\x02 \x01(\tR\n" +
+ "responseId\"a\n" +
+ "\rStreamPartEnd\x12\x1d\n" +
+ "\n" +
+ "message_id\x18\x01 \x01(\tR\tmessageId\x121\n" +
+ "\apayload\x18\x03 \x01(\v2\x17.chat.v2.MessagePayloadR\apayload\"=\n" +
+ "\x12StreamFinalization\x12'\n" +
+ "\x0fconversation_id\x18\x01 \x01(\tR\x0econversationId\"2\n" +
+ "\vStreamError\x12#\n" +
+ "\rerror_message\x18\x01 \x01(\tR\ferrorMessage\"\xaf\x03\n" +
+ "&CreateConversationMessageStreamRequest\x12\x1d\n" +
+ "\n" +
+ "project_id\x18\x01 \x01(\tR\tprojectId\x12,\n" +
+ "\x0fconversation_id\x18\x02 \x01(\tH\x00R\x0econversationId\x88\x01\x01\x12\x1d\n" +
+ "\n" +
+ "model_slug\x18\x03 \x01(\tR\tmodelSlug\x12!\n" +
+ "\fuser_message\x18\x04 \x01(\tR\vuserMessage\x121\n" +
+ "\x12user_selected_text\x18\x05 \x01(\tH\x01R\x10userSelectedText\x88\x01\x01\x12K\n" +
+ "\x11conversation_type\x18\x06 \x01(\x0e2\x19.chat.v2.ConversationTypeH\x02R\x10conversationType\x88\x01\x01\x12%\n" +
+ "\vsurrounding\x18\b \x01(\tH\x03R\vsurrounding\x88\x01\x01B\x12\n" +
+ "\x10_conversation_idB\x15\n" +
+ "\x13_user_selected_textB\x14\n" +
+ "\x12_conversation_typeB\x0e\n" +
+ "\f_surrounding\"\xb9\x04\n" +
+ "'CreateConversationMessageStreamResponse\x12T\n" +
+ "\x15stream_initialization\x18\x01 \x01(\v2\x1d.chat.v2.StreamInitializationH\x00R\x14streamInitialization\x12F\n" +
+ "\x11stream_part_begin\x18\x02 \x01(\v2\x18.chat.v2.StreamPartBeginH\x00R\x0fstreamPartBegin\x12<\n" +
+ "\rmessage_chunk\x18\x03 \x01(\v2\x15.chat.v2.MessageChunkH\x00R\fmessageChunk\x12Q\n" +
+ "\x14incomplete_indicator\x18\x04 \x01(\v2\x1c.chat.v2.IncompleteIndicatorH\x00R\x13incompleteIndicator\x12@\n" +
+ "\x0fstream_part_end\x18\x05 \x01(\v2\x16.chat.v2.StreamPartEndH\x00R\rstreamPartEnd\x12N\n" +
+ "\x13stream_finalization\x18\x06 \x01(\v2\x1b.chat.v2.StreamFinalizationH\x00R\x12streamFinalization\x129\n" +
+ "\fstream_error\x18\a \x01(\v2\x14.chat.v2.StreamErrorH\x00R\vstreamErrorB\x12\n" +
+ "\x10response_payload*R\n" +
+ "\x10ConversationType\x12!\n" +
+ "\x1dCONVERSATION_TYPE_UNSPECIFIED\x10\x00\x12\x1b\n" +
+ "\x17CONVERSATION_TYPE_DEBUG\x10\x012\xa8\a\n" +
+ "\vChatService\x12\x83\x01\n" +
+ "\x11ListConversations\x12!.chat.v2.ListConversationsRequest\x1a\".chat.v2.ListConversationsResponse\"'\x82\xd3\xe4\x93\x02!\x12\x1f/_pd/api/v2/chats/conversations\x12\x8f\x01\n" +
+ "\x0fGetConversation\x12\x1f.chat.v2.GetConversationRequest\x1a .chat.v2.GetConversationResponse\"9\x82\xd3\xe4\x93\x023\x121/_pd/api/v2/chats/conversations/{conversation_id}\x12\xc2\x01\n" +
+ "\x1fCreateConversationMessageStream\x12/.chat.v2.CreateConversationMessageStreamRequest\x1a0.chat.v2.CreateConversationMessageStreamResponse\":\x82\xd3\xe4\x93\x024:\x01*\"//_pd/api/v2/chats/conversations/messages/stream0\x01\x12\x9b\x01\n" +
+ "\x12UpdateConversation\x12\".chat.v2.UpdateConversationRequest\x1a#.chat.v2.UpdateConversationResponse\"<\x82\xd3\xe4\x93\x026:\x01*21/_pd/api/v2/chats/conversations/{conversation_id}\x12\x98\x01\n" +
+ "\x12DeleteConversation\x12\".chat.v2.DeleteConversationRequest\x1a#.chat.v2.DeleteConversationResponse\"9\x82\xd3\xe4\x93\x023*1/_pd/api/v2/chats/conversations/{conversation_id}\x12\x82\x01\n" +
+ "\x13ListSupportedModels\x12#.chat.v2.ListSupportedModelsRequest\x1a$.chat.v2.ListSupportedModelsResponse\" \x82\xd3\xe4\x93\x02\x1a\x12\x18/_pd/api/v2/chats/modelsB\x7f\n" +
+ "\vcom.chat.v2B\tChatProtoP\x01Z(paperdebugger/pkg/gen/api/chat/v2;chatv2\xa2\x02\x03CXX\xaa\x02\aChat.V2\xca\x02\aChat\\V2\xe2\x02\x13Chat\\V2\\GPBMetadata\xea\x02\bChat::V2b\x06proto3"
+
+var (
+ file_chat_v2_chat_proto_rawDescOnce sync.Once
+ file_chat_v2_chat_proto_rawDescData []byte
+)
+
+func file_chat_v2_chat_proto_rawDescGZIP() []byte {
+ file_chat_v2_chat_proto_rawDescOnce.Do(func() {
+ file_chat_v2_chat_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_chat_v2_chat_proto_rawDesc), len(file_chat_v2_chat_proto_rawDesc)))
+ })
+ return file_chat_v2_chat_proto_rawDescData
+}
+
+var file_chat_v2_chat_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_chat_v2_chat_proto_msgTypes = make([]protoimpl.MessageInfo, 29)
+var file_chat_v2_chat_proto_goTypes = []any{
+ (ConversationType)(0), // 0: chat.v2.ConversationType
+ (*MessageTypeToolCall)(nil), // 1: chat.v2.MessageTypeToolCall
+ (*MessageTypeToolCallPrepareArguments)(nil), // 2: chat.v2.MessageTypeToolCallPrepareArguments
+ (*MessageTypeSystem)(nil), // 3: chat.v2.MessageTypeSystem
+ (*MessageTypeAssistant)(nil), // 4: chat.v2.MessageTypeAssistant
+ (*MessageTypeUser)(nil), // 5: chat.v2.MessageTypeUser
+ (*MessageTypeUnknown)(nil), // 6: chat.v2.MessageTypeUnknown
+ (*MessagePayload)(nil), // 7: chat.v2.MessagePayload
+ (*Message)(nil), // 8: chat.v2.Message
+ (*Conversation)(nil), // 9: chat.v2.Conversation
+ (*ListConversationsRequest)(nil), // 10: chat.v2.ListConversationsRequest
+ (*ListConversationsResponse)(nil), // 11: chat.v2.ListConversationsResponse
+ (*GetConversationRequest)(nil), // 12: chat.v2.GetConversationRequest
+ (*GetConversationResponse)(nil), // 13: chat.v2.GetConversationResponse
+ (*UpdateConversationRequest)(nil), // 14: chat.v2.UpdateConversationRequest
+ (*UpdateConversationResponse)(nil), // 15: chat.v2.UpdateConversationResponse
+ (*DeleteConversationRequest)(nil), // 16: chat.v2.DeleteConversationRequest
+ (*DeleteConversationResponse)(nil), // 17: chat.v2.DeleteConversationResponse
+ (*SupportedModel)(nil), // 18: chat.v2.SupportedModel
+ (*ListSupportedModelsRequest)(nil), // 19: chat.v2.ListSupportedModelsRequest
+ (*ListSupportedModelsResponse)(nil), // 20: chat.v2.ListSupportedModelsResponse
+ (*StreamInitialization)(nil), // 21: chat.v2.StreamInitialization
+ (*StreamPartBegin)(nil), // 22: chat.v2.StreamPartBegin
+ (*MessageChunk)(nil), // 23: chat.v2.MessageChunk
+ (*IncompleteIndicator)(nil), // 24: chat.v2.IncompleteIndicator
+ (*StreamPartEnd)(nil), // 25: chat.v2.StreamPartEnd
+ (*StreamFinalization)(nil), // 26: chat.v2.StreamFinalization
+ (*StreamError)(nil), // 27: chat.v2.StreamError
+ (*CreateConversationMessageStreamRequest)(nil), // 28: chat.v2.CreateConversationMessageStreamRequest
+ (*CreateConversationMessageStreamResponse)(nil), // 29: chat.v2.CreateConversationMessageStreamResponse
+}
+var file_chat_v2_chat_proto_depIdxs = []int32{
+ 3, // 0: chat.v2.MessagePayload.system:type_name -> chat.v2.MessageTypeSystem
+ 5, // 1: chat.v2.MessagePayload.user:type_name -> chat.v2.MessageTypeUser
+ 4, // 2: chat.v2.MessagePayload.assistant:type_name -> chat.v2.MessageTypeAssistant
+ 2, // 3: chat.v2.MessagePayload.tool_call_prepare_arguments:type_name -> chat.v2.MessageTypeToolCallPrepareArguments
+ 1, // 4: chat.v2.MessagePayload.tool_call:type_name -> chat.v2.MessageTypeToolCall
+ 6, // 5: chat.v2.MessagePayload.unknown:type_name -> chat.v2.MessageTypeUnknown
+ 7, // 6: chat.v2.Message.payload:type_name -> chat.v2.MessagePayload
+ 8, // 7: chat.v2.Conversation.messages:type_name -> chat.v2.Message
+ 9, // 8: chat.v2.ListConversationsResponse.conversations:type_name -> chat.v2.Conversation
+ 9, // 9: chat.v2.GetConversationResponse.conversation:type_name -> chat.v2.Conversation
+ 9, // 10: chat.v2.UpdateConversationResponse.conversation:type_name -> chat.v2.Conversation
+ 18, // 11: chat.v2.ListSupportedModelsResponse.models:type_name -> chat.v2.SupportedModel
+ 7, // 12: chat.v2.StreamPartBegin.payload:type_name -> chat.v2.MessagePayload
+ 7, // 13: chat.v2.StreamPartEnd.payload:type_name -> chat.v2.MessagePayload
+ 0, // 14: chat.v2.CreateConversationMessageStreamRequest.conversation_type:type_name -> chat.v2.ConversationType
+ 21, // 15: chat.v2.CreateConversationMessageStreamResponse.stream_initialization:type_name -> chat.v2.StreamInitialization
+ 22, // 16: chat.v2.CreateConversationMessageStreamResponse.stream_part_begin:type_name -> chat.v2.StreamPartBegin
+ 23, // 17: chat.v2.CreateConversationMessageStreamResponse.message_chunk:type_name -> chat.v2.MessageChunk
+ 24, // 18: chat.v2.CreateConversationMessageStreamResponse.incomplete_indicator:type_name -> chat.v2.IncompleteIndicator
+ 25, // 19: chat.v2.CreateConversationMessageStreamResponse.stream_part_end:type_name -> chat.v2.StreamPartEnd
+ 26, // 20: chat.v2.CreateConversationMessageStreamResponse.stream_finalization:type_name -> chat.v2.StreamFinalization
+ 27, // 21: chat.v2.CreateConversationMessageStreamResponse.stream_error:type_name -> chat.v2.StreamError
+ 10, // 22: chat.v2.ChatService.ListConversations:input_type -> chat.v2.ListConversationsRequest
+ 12, // 23: chat.v2.ChatService.GetConversation:input_type -> chat.v2.GetConversationRequest
+ 28, // 24: chat.v2.ChatService.CreateConversationMessageStream:input_type -> chat.v2.CreateConversationMessageStreamRequest
+ 14, // 25: chat.v2.ChatService.UpdateConversation:input_type -> chat.v2.UpdateConversationRequest
+ 16, // 26: chat.v2.ChatService.DeleteConversation:input_type -> chat.v2.DeleteConversationRequest
+ 19, // 27: chat.v2.ChatService.ListSupportedModels:input_type -> chat.v2.ListSupportedModelsRequest
+ 11, // 28: chat.v2.ChatService.ListConversations:output_type -> chat.v2.ListConversationsResponse
+ 13, // 29: chat.v2.ChatService.GetConversation:output_type -> chat.v2.GetConversationResponse
+ 29, // 30: chat.v2.ChatService.CreateConversationMessageStream:output_type -> chat.v2.CreateConversationMessageStreamResponse
+ 15, // 31: chat.v2.ChatService.UpdateConversation:output_type -> chat.v2.UpdateConversationResponse
+ 17, // 32: chat.v2.ChatService.DeleteConversation:output_type -> chat.v2.DeleteConversationResponse
+ 20, // 33: chat.v2.ChatService.ListSupportedModels:output_type -> chat.v2.ListSupportedModelsResponse
+ 28, // [28:34] is the sub-list for method output_type
+ 22, // [22:28] is the sub-list for method input_type
+ 22, // [22:22] is the sub-list for extension type_name
+ 22, // [22:22] is the sub-list for extension extendee
+ 0, // [0:22] is the sub-list for field type_name
+}
+
+func init() { file_chat_v2_chat_proto_init() }
+func file_chat_v2_chat_proto_init() {
+ if File_chat_v2_chat_proto != nil {
+ return
+ }
+ file_chat_v2_chat_proto_msgTypes[4].OneofWrappers = []any{}
+ file_chat_v2_chat_proto_msgTypes[6].OneofWrappers = []any{
+ (*MessagePayload_System)(nil),
+ (*MessagePayload_User)(nil),
+ (*MessagePayload_Assistant)(nil),
+ (*MessagePayload_ToolCallPrepareArguments)(nil),
+ (*MessagePayload_ToolCall)(nil),
+ (*MessagePayload_Unknown)(nil),
+ }
+ file_chat_v2_chat_proto_msgTypes[9].OneofWrappers = []any{}
+ file_chat_v2_chat_proto_msgTypes[27].OneofWrappers = []any{}
+ file_chat_v2_chat_proto_msgTypes[28].OneofWrappers = []any{
+ (*CreateConversationMessageStreamResponse_StreamInitialization)(nil),
+ (*CreateConversationMessageStreamResponse_StreamPartBegin)(nil),
+ (*CreateConversationMessageStreamResponse_MessageChunk)(nil),
+ (*CreateConversationMessageStreamResponse_IncompleteIndicator)(nil),
+ (*CreateConversationMessageStreamResponse_StreamPartEnd)(nil),
+ (*CreateConversationMessageStreamResponse_StreamFinalization)(nil),
+ (*CreateConversationMessageStreamResponse_StreamError)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_chat_v2_chat_proto_rawDesc), len(file_chat_v2_chat_proto_rawDesc)),
+ NumEnums: 1,
+ NumMessages: 29,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_chat_v2_chat_proto_goTypes,
+ DependencyIndexes: file_chat_v2_chat_proto_depIdxs,
+ EnumInfos: file_chat_v2_chat_proto_enumTypes,
+ MessageInfos: file_chat_v2_chat_proto_msgTypes,
+ }.Build()
+ File_chat_v2_chat_proto = out.File
+ file_chat_v2_chat_proto_goTypes = nil
+ file_chat_v2_chat_proto_depIdxs = nil
+}
diff --git a/pkg/gen/api/chat/v2/chat.pb.gw.go b/pkg/gen/api/chat/v2/chat.pb.gw.go
new file mode 100644
index 00000000..81f7e4e6
--- /dev/null
+++ b/pkg/gen/api/chat/v2/chat.pb.gw.go
@@ -0,0 +1,514 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: chat/v2/chat.proto
+
+/*
+Package chatv2 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package chatv2
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
+
+var filter_ChatService_ListConversations_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
+
+func request_ChatService_ListConversations_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq ListConversationsRequest
+ metadata runtime.ServerMetadata
+ )
+ if req.Body != nil {
+ _, _ = io.Copy(io.Discard, req.Body)
+ }
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ChatService_ListConversations_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.ListConversations(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_ChatService_ListConversations_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq ListConversationsRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := req.ParseForm(); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_ChatService_ListConversations_0); err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.ListConversations(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+func request_ChatService_GetConversation_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq GetConversationRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ if req.Body != nil {
+ _, _ = io.Copy(io.Discard, req.Body)
+ }
+ val, ok := pathParams["conversation_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id")
+ }
+ protoReq.ConversationId, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err)
+ }
+ msg, err := client.GetConversation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_ChatService_GetConversation_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq GetConversationRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ val, ok := pathParams["conversation_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id")
+ }
+ protoReq.ConversationId, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err)
+ }
+ msg, err := server.GetConversation(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+func request_ChatService_CreateConversationMessageStream_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (ChatService_CreateConversationMessageStreamClient, runtime.ServerMetadata, error) {
+ var (
+ protoReq CreateConversationMessageStreamRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if req.Body != nil {
+ _, _ = io.Copy(io.Discard, req.Body)
+ }
+ stream, err := client.CreateConversationMessageStream(ctx, &protoReq)
+ if err != nil {
+ return nil, metadata, err
+ }
+ header, err := stream.Header()
+ if err != nil {
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+func request_ChatService_UpdateConversation_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq UpdateConversationRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ if req.Body != nil {
+ _, _ = io.Copy(io.Discard, req.Body)
+ }
+ val, ok := pathParams["conversation_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id")
+ }
+ protoReq.ConversationId, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err)
+ }
+ msg, err := client.UpdateConversation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_ChatService_UpdateConversation_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq UpdateConversationRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ val, ok := pathParams["conversation_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id")
+ }
+ protoReq.ConversationId, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err)
+ }
+ msg, err := server.UpdateConversation(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+func request_ChatService_DeleteConversation_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq DeleteConversationRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ if req.Body != nil {
+ _, _ = io.Copy(io.Discard, req.Body)
+ }
+ val, ok := pathParams["conversation_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id")
+ }
+ protoReq.ConversationId, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err)
+ }
+ msg, err := client.DeleteConversation(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_ChatService_DeleteConversation_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq DeleteConversationRequest
+ metadata runtime.ServerMetadata
+ err error
+ )
+ val, ok := pathParams["conversation_id"]
+ if !ok {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "conversation_id")
+ }
+ protoReq.ConversationId, err = runtime.String(val)
+ if err != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "conversation_id", err)
+ }
+ msg, err := server.DeleteConversation(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+func request_ChatService_ListSupportedModels_0(ctx context.Context, marshaler runtime.Marshaler, client ChatServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq ListSupportedModelsRequest
+ metadata runtime.ServerMetadata
+ )
+ if req.Body != nil {
+ _, _ = io.Copy(io.Discard, req.Body)
+ }
+ msg, err := client.ListSupportedModels(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+}
+
+func local_request_ChatService_ListSupportedModels_0(ctx context.Context, marshaler runtime.Marshaler, server ChatServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq ListSupportedModelsRequest
+ metadata runtime.ServerMetadata
+ )
+ msg, err := server.ListSupportedModels(ctx, &protoReq)
+ return msg, metadata, err
+}
+
+// RegisterChatServiceHandlerServer registers the http handlers for service ChatService to "mux".
+// UnaryRPC :call ChatServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterChatServiceHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterChatServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ChatServiceServer) error {
+ mux.Handle(http.MethodGet, pattern_ChatService_ListConversations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/ListConversations", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_ChatService_ListConversations_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_ListConversations_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_ChatService_GetConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/GetConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_ChatService_GetConversation_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_GetConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessageStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+ _, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ })
+ mux.Handle(http.MethodPatch, pattern_ChatService_UpdateConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/UpdateConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_ChatService_UpdateConversation_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_UpdateConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodDelete, pattern_ChatService_DeleteConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/DeleteConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_ChatService_DeleteConversation_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_DeleteConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_ChatService_ListSupportedModels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/chat.v2.ChatService/ListSupportedModels", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/models"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_ChatService_ListSupportedModels_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_ListSupportedModels_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// RegisterChatServiceHandlerFromEndpoint is same as RegisterChatServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterChatServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterChatServiceHandler(ctx, mux, conn)
+}
+
+// RegisterChatServiceHandler registers the http handlers for service ChatService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterChatServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterChatServiceHandlerClient(ctx, mux, NewChatServiceClient(conn))
+}
+
+// RegisterChatServiceHandlerClient registers the http handlers for service ChatService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ChatServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ChatServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "ChatServiceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterChatServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ChatServiceClient) error {
+ mux.Handle(http.MethodGet, pattern_ChatService_ListConversations_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/ListConversations", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_ChatService_ListConversations_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_ListConversations_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_ChatService_GetConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/GetConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_ChatService_GetConversation_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_GetConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_ChatService_CreateConversationMessageStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/CreateConversationMessageStream", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/messages/stream"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_ChatService_CreateConversationMessageStream_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_CreateConversationMessageStream_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPatch, pattern_ChatService_UpdateConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/UpdateConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_ChatService_UpdateConversation_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_UpdateConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodDelete, pattern_ChatService_DeleteConversation_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/DeleteConversation", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/conversations/{conversation_id}"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_ChatService_DeleteConversation_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_DeleteConversation_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodGet, pattern_ChatService_ListSupportedModels_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/chat.v2.ChatService/ListSupportedModels", runtime.WithHTTPPathPattern("/_pd/api/v2/chats/models"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_ChatService_ListSupportedModels_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_ChatService_ListSupportedModels_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_ChatService_ListConversations_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"_pd", "api", "v2", "chats", "conversations"}, ""))
+ pattern_ChatService_GetConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, ""))
+ pattern_ChatService_CreateConversationMessageStream_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5, 2, 6}, []string{"_pd", "api", "v2", "chats", "conversations", "messages", "stream"}, ""))
+ pattern_ChatService_UpdateConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, ""))
+ pattern_ChatService_DeleteConversation_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 1, 0, 4, 1, 5, 5}, []string{"_pd", "api", "v2", "chats", "conversations", "conversation_id"}, ""))
+ pattern_ChatService_ListSupportedModels_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"_pd", "api", "v2", "chats", "models"}, ""))
+)
+
+var (
+ forward_ChatService_ListConversations_0 = runtime.ForwardResponseMessage
+ forward_ChatService_GetConversation_0 = runtime.ForwardResponseMessage
+ forward_ChatService_CreateConversationMessageStream_0 = runtime.ForwardResponseStream
+ forward_ChatService_UpdateConversation_0 = runtime.ForwardResponseMessage
+ forward_ChatService_DeleteConversation_0 = runtime.ForwardResponseMessage
+ forward_ChatService_ListSupportedModels_0 = runtime.ForwardResponseMessage
+)
diff --git a/pkg/gen/api/chat/v2/chat_grpc.pb.go b/pkg/gen/api/chat/v2/chat_grpc.pb.go
new file mode 100644
index 00000000..8303a8a8
--- /dev/null
+++ b/pkg/gen/api/chat/v2/chat_grpc.pb.go
@@ -0,0 +1,315 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.6.0
+// - protoc (unknown)
+// source: chat/v2/chat.proto
+
+package chatv2
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ ChatService_ListConversations_FullMethodName = "/chat.v2.ChatService/ListConversations"
+ ChatService_GetConversation_FullMethodName = "/chat.v2.ChatService/GetConversation"
+ ChatService_CreateConversationMessageStream_FullMethodName = "/chat.v2.ChatService/CreateConversationMessageStream"
+ ChatService_UpdateConversation_FullMethodName = "/chat.v2.ChatService/UpdateConversation"
+ ChatService_DeleteConversation_FullMethodName = "/chat.v2.ChatService/DeleteConversation"
+ ChatService_ListSupportedModels_FullMethodName = "/chat.v2.ChatService/ListSupportedModels"
+)
+
+// ChatServiceClient is the client API for ChatService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ChatServiceClient interface {
+ ListConversations(ctx context.Context, in *ListConversationsRequest, opts ...grpc.CallOption) (*ListConversationsResponse, error)
+ GetConversation(ctx context.Context, in *GetConversationRequest, opts ...grpc.CallOption) (*GetConversationResponse, error)
+ CreateConversationMessageStream(ctx context.Context, in *CreateConversationMessageStreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateConversationMessageStreamResponse], error)
+ UpdateConversation(ctx context.Context, in *UpdateConversationRequest, opts ...grpc.CallOption) (*UpdateConversationResponse, error)
+ DeleteConversation(ctx context.Context, in *DeleteConversationRequest, opts ...grpc.CallOption) (*DeleteConversationResponse, error)
+ ListSupportedModels(ctx context.Context, in *ListSupportedModelsRequest, opts ...grpc.CallOption) (*ListSupportedModelsResponse, error)
+}
+
+type chatServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewChatServiceClient(cc grpc.ClientConnInterface) ChatServiceClient {
+ return &chatServiceClient{cc}
+}
+
+func (c *chatServiceClient) ListConversations(ctx context.Context, in *ListConversationsRequest, opts ...grpc.CallOption) (*ListConversationsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(ListConversationsResponse)
+ err := c.cc.Invoke(ctx, ChatService_ListConversations_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *chatServiceClient) GetConversation(ctx context.Context, in *GetConversationRequest, opts ...grpc.CallOption) (*GetConversationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(GetConversationResponse)
+ err := c.cc.Invoke(ctx, ChatService_GetConversation_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *chatServiceClient) CreateConversationMessageStream(ctx context.Context, in *CreateConversationMessageStreamRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[CreateConversationMessageStreamResponse], error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &ChatService_ServiceDesc.Streams[0], ChatService_CreateConversationMessageStream_FullMethodName, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &grpc.GenericClientStream[CreateConversationMessageStreamRequest, CreateConversationMessageStreamResponse]{ClientStream: stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ChatService_CreateConversationMessageStreamClient = grpc.ServerStreamingClient[CreateConversationMessageStreamResponse]
+
+func (c *chatServiceClient) UpdateConversation(ctx context.Context, in *UpdateConversationRequest, opts ...grpc.CallOption) (*UpdateConversationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(UpdateConversationResponse)
+ err := c.cc.Invoke(ctx, ChatService_UpdateConversation_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *chatServiceClient) DeleteConversation(ctx context.Context, in *DeleteConversationRequest, opts ...grpc.CallOption) (*DeleteConversationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(DeleteConversationResponse)
+ err := c.cc.Invoke(ctx, ChatService_DeleteConversation_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *chatServiceClient) ListSupportedModels(ctx context.Context, in *ListSupportedModelsRequest, opts ...grpc.CallOption) (*ListSupportedModelsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(ListSupportedModelsResponse)
+ err := c.cc.Invoke(ctx, ChatService_ListSupportedModels_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ChatServiceServer is the server API for ChatService service.
+// All implementations must embed UnimplementedChatServiceServer
+// for forward compatibility.
+type ChatServiceServer interface {
+ ListConversations(context.Context, *ListConversationsRequest) (*ListConversationsResponse, error)
+ GetConversation(context.Context, *GetConversationRequest) (*GetConversationResponse, error)
+ CreateConversationMessageStream(*CreateConversationMessageStreamRequest, grpc.ServerStreamingServer[CreateConversationMessageStreamResponse]) error
+ UpdateConversation(context.Context, *UpdateConversationRequest) (*UpdateConversationResponse, error)
+ DeleteConversation(context.Context, *DeleteConversationRequest) (*DeleteConversationResponse, error)
+ ListSupportedModels(context.Context, *ListSupportedModelsRequest) (*ListSupportedModelsResponse, error)
+ mustEmbedUnimplementedChatServiceServer()
+}
+
+// UnimplementedChatServiceServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedChatServiceServer struct{}
+
+func (UnimplementedChatServiceServer) ListConversations(context.Context, *ListConversationsRequest) (*ListConversationsResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "method ListConversations not implemented")
+}
+func (UnimplementedChatServiceServer) GetConversation(context.Context, *GetConversationRequest) (*GetConversationResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "method GetConversation not implemented")
+}
+func (UnimplementedChatServiceServer) CreateConversationMessageStream(*CreateConversationMessageStreamRequest, grpc.ServerStreamingServer[CreateConversationMessageStreamResponse]) error {
+ return status.Error(codes.Unimplemented, "method CreateConversationMessageStream not implemented")
+}
+func (UnimplementedChatServiceServer) UpdateConversation(context.Context, *UpdateConversationRequest) (*UpdateConversationResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "method UpdateConversation not implemented")
+}
+func (UnimplementedChatServiceServer) DeleteConversation(context.Context, *DeleteConversationRequest) (*DeleteConversationResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "method DeleteConversation not implemented")
+}
+func (UnimplementedChatServiceServer) ListSupportedModels(context.Context, *ListSupportedModelsRequest) (*ListSupportedModelsResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "method ListSupportedModels not implemented")
+}
+func (UnimplementedChatServiceServer) mustEmbedUnimplementedChatServiceServer() {}
+func (UnimplementedChatServiceServer) testEmbeddedByValue() {}
+
+// UnsafeChatServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ChatServiceServer will
+// result in compilation errors.
+type UnsafeChatServiceServer interface {
+ mustEmbedUnimplementedChatServiceServer()
+}
+
+func RegisterChatServiceServer(s grpc.ServiceRegistrar, srv ChatServiceServer) {
+ // If the following call panics, it indicates UnimplementedChatServiceServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&ChatService_ServiceDesc, srv)
+}
+
+func _ChatService_ListConversations_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListConversationsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ChatServiceServer).ListConversations(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ChatService_ListConversations_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ChatServiceServer).ListConversations(ctx, req.(*ListConversationsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ChatService_GetConversation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetConversationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ChatServiceServer).GetConversation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ChatService_GetConversation_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ChatServiceServer).GetConversation(ctx, req.(*GetConversationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ChatService_CreateConversationMessageStream_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(CreateConversationMessageStreamRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(ChatServiceServer).CreateConversationMessageStream(m, &grpc.GenericServerStream[CreateConversationMessageStreamRequest, CreateConversationMessageStreamResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ChatService_CreateConversationMessageStreamServer = grpc.ServerStreamingServer[CreateConversationMessageStreamResponse]
+
+func _ChatService_UpdateConversation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateConversationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ChatServiceServer).UpdateConversation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ChatService_UpdateConversation_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ChatServiceServer).UpdateConversation(ctx, req.(*UpdateConversationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ChatService_DeleteConversation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteConversationRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ChatServiceServer).DeleteConversation(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ChatService_DeleteConversation_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ChatServiceServer).DeleteConversation(ctx, req.(*DeleteConversationRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ChatService_ListSupportedModels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListSupportedModelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ChatServiceServer).ListSupportedModels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: ChatService_ListSupportedModels_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ChatServiceServer).ListSupportedModels(ctx, req.(*ListSupportedModelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// ChatService_ServiceDesc is the grpc.ServiceDesc for ChatService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ChatService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "chat.v2.ChatService",
+ HandlerType: (*ChatServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListConversations",
+ Handler: _ChatService_ListConversations_Handler,
+ },
+ {
+ MethodName: "GetConversation",
+ Handler: _ChatService_GetConversation_Handler,
+ },
+ {
+ MethodName: "UpdateConversation",
+ Handler: _ChatService_UpdateConversation_Handler,
+ },
+ {
+ MethodName: "DeleteConversation",
+ Handler: _ChatService_DeleteConversation_Handler,
+ },
+ {
+ MethodName: "ListSupportedModels",
+ Handler: _ChatService_ListSupportedModels_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "CreateConversationMessageStream",
+ Handler: _ChatService_CreateConversationMessageStream_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "chat/v2/chat.proto",
+}
diff --git a/pkg/gen/api/comment/v1/comment.pb.go b/pkg/gen/api/comment/v1/comment.pb.go
index b19607bd..acac3faf 100644
--- a/pkg/gen/api/comment/v1/comment.pb.go
+++ b/pkg/gen/api/comment/v1/comment.pb.go
@@ -27,7 +27,7 @@ type CommentsAcceptedRequest struct {
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
ConversationId string `protobuf:"bytes,2,opt,name=conversation_id,json=conversationId,proto3" json:"conversation_id,omitempty"`
MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"`
- CommentIds []string `protobuf:"bytes,4,rep,name=comment_ids,json=commentIds,proto3" json:"comment_ids,omitempty"` // 被 accept 的 comment id 列表
+ CommentIds []string `protobuf:"bytes,4,rep,name=comment_ids,json=commentIds,proto3" json:"comment_ids,omitempty"` // List of accepted comment IDs
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
diff --git a/proto/chat/v2/chat.proto b/proto/chat/v2/chat.proto
new file mode 100644
index 00000000..68130275
--- /dev/null
+++ b/proto/chat/v2/chat.proto
@@ -0,0 +1,227 @@
+syntax = "proto3";
+
+package chat.v2;
+
+import "google/api/annotations.proto";
+
+option go_package = "paperdebugger/pkg/gen/api/chat/v2;chatv2";
+
+service ChatService {
+ rpc ListConversations(ListConversationsRequest) returns (ListConversationsResponse) {
+ option (google.api.http) = {get: "/_pd/api/v2/chats/conversations"};
+ }
+ rpc GetConversation(GetConversationRequest) returns (GetConversationResponse) {
+ option (google.api.http) = {get: "/_pd/api/v2/chats/conversations/{conversation_id}"};
+ }
+ rpc CreateConversationMessageStream(CreateConversationMessageStreamRequest) returns (stream CreateConversationMessageStreamResponse) {
+ option (google.api.http) = {
+ post: "/_pd/api/v2/chats/conversations/messages/stream"
+ body: "*"
+ };
+ }
+ rpc UpdateConversation(UpdateConversationRequest) returns (UpdateConversationResponse) {
+ option (google.api.http) = {
+ patch: "/_pd/api/v2/chats/conversations/{conversation_id}"
+ body: "*"
+ };
+ }
+ rpc DeleteConversation(DeleteConversationRequest) returns (DeleteConversationResponse) {
+ option (google.api.http) = {delete: "/_pd/api/v2/chats/conversations/{conversation_id}"};
+ }
+ rpc ListSupportedModels(ListSupportedModelsRequest) returns (ListSupportedModelsResponse) {
+ option (google.api.http) = {get: "/_pd/api/v2/chats/models"};
+ }
+}
+
+message MessageTypeToolCall {
+ string name = 1;
+ string args = 2; // Json string
+ string result = 3; // Json string
+ string error = 4; // Json string
+}
+
+message MessageTypeToolCallPrepareArguments {
+ string name = 1;
+ string args = 2; // Json string
+}
+
+message MessageTypeSystem {
+ string content = 1;
+}
+
+message MessageTypeAssistant {
+ string content = 1;
+ string model_slug = 2;
+}
+
+message MessageTypeUser {
+ string content = 1;
+ optional string selected_text = 2;
+ optional string surrounding = 7;
+}
+
+message MessageTypeUnknown {
+ string description = 1;
+}
+
+message MessagePayload {
+ oneof message_type {
+ MessageTypeSystem system = 1;
+ MessageTypeUser user = 2;
+ MessageTypeAssistant assistant = 3;
+ MessageTypeToolCallPrepareArguments tool_call_prepare_arguments = 4;
+ MessageTypeToolCall tool_call = 5;
+ MessageTypeUnknown unknown = 6;
+ }
+}
+
+message Message {
+ string message_id = 1;
+ MessagePayload payload = 2;
+ int64 timestamp = 3;
+}
+
+message Conversation {
+ string id = 1;
+ string title = 2;
+ string model_slug = 3;
+ // If list conversations, then messages length is 0.
+ repeated Message messages = 4;
+}
+
+message ListConversationsRequest {
+ optional string project_id = 1;
+}
+
+message ListConversationsResponse {
+ // In this response, the length of conversations[i].messages should be 0.
+ repeated Conversation conversations = 1;
+}
+
+message GetConversationRequest {
+ string conversation_id = 1;
+}
+
+message GetConversationResponse {
+ Conversation conversation = 1;
+}
+
+message UpdateConversationRequest {
+ string conversation_id = 1;
+ string title = 2;
+}
+
+message UpdateConversationResponse {
+ Conversation conversation = 1;
+}
+
+message DeleteConversationRequest {
+ string conversation_id = 1;
+}
+
+message DeleteConversationResponse {
+ // explicitly empty
+}
+
+message SupportedModel {
+ string name = 1;
+ string slug = 2;
+ int64 total_context = 3;
+ int64 max_output = 4;
+ int64 input_price = 5; // in cents per 1M tokens
+ int64 output_price = 6; // in cents per 1M tokens
+}
+
+message ListSupportedModelsRequest {
+ // explicitly empty
+}
+
+message ListSupportedModelsResponse {
+ repeated SupportedModel models = 1;
+}
+
+// ============================== Streaming Messages
+
+// Information sent once at the beginning of a new conversation stream
+message StreamInitialization {
+ string conversation_id = 1;
+ string model_slug = 2;
+}
+
+// Designed as StreamPartBegin and StreamPartEnd to
+// handle the case where assistant and tool are called at the same time.
+//
+// User: Please answer me "Ok I will do that", then call "get_weather"
+// Assistant: Ok I will do that + Tool: get_weather
+message StreamPartBegin {
+ string message_id = 1;
+ MessagePayload payload = 3;
+}
+
+// Note: After the StreamPartBegin of tool_call, there can be no MessageChunk,
+// and the StreamPartEnd can be directly called when the result is ready.
+message MessageChunk {
+ string message_id = 1; // The id of the message that this chunk belongs to
+ string delta = 2; // The small piece of text
+}
+
+message IncompleteIndicator {
+ string reason = 1;
+ string response_id = 2;
+}
+
+message StreamPartEnd {
+ string message_id = 1;
+ MessagePayload payload = 3;
+}
+
+// Sent when the current AI response is fully streamed
+message StreamFinalization {
+ string conversation_id = 1;
+ // Do not return the full Conversation here.
+ // If the user wants, they can call the GetConversation API themselves.
+ // Note: Do not call GetConversation when receiving streamFinalization,
+ // it should be called after the entire API call is finished.
+}
+
+message StreamError {
+ string error_message = 1;
+}
+
+// Currently, we inject two types of messages:
+// 1. System message
+// 2. User message
+
+enum ConversationType {
+ CONVERSATION_TYPE_UNSPECIFIED = 0;
+ CONVERSATION_TYPE_DEBUG = 1; // does not contain any customized messages, the
+ // inapp_history and openai_history are synced.
+ // CONVERSATION_TYPE_NO_SYSTEM_MESSAGE_INJECTION = 2;
+ // CONVERSATION_TYPE_NO_USER_MESSAGE_INJECTION = 3;
+}
+
+// This message should be the same as CreateConversationMessageRequest
+// Note: If conversation_id is provided,
+// the conversation will be created and returned.
+message CreateConversationMessageStreamRequest {
+ string project_id = 1;
+ optional string conversation_id = 2;
+ string model_slug = 3;
+ string user_message = 4;
+ optional string user_selected_text = 5;
+ optional ConversationType conversation_type = 6;
+ optional string surrounding = 8;
+}
+
+// Response for streaming a message within an existing conversation
+message CreateConversationMessageStreamResponse {
+ oneof response_payload {
+ StreamInitialization stream_initialization = 1;
+ StreamPartBegin stream_part_begin = 2;
+ MessageChunk message_chunk = 3;
+ IncompleteIndicator incomplete_indicator = 4;
+ StreamPartEnd stream_part_end = 5;
+ StreamFinalization stream_finalization = 6;
+ StreamError stream_error = 7;
+ }
+}
diff --git a/proto/comment/v1/comment.proto b/proto/comment/v1/comment.proto
index bed1077e..65640697 100644
--- a/proto/comment/v1/comment.proto
+++ b/proto/comment/v1/comment.proto
@@ -19,7 +19,7 @@ message CommentsAcceptedRequest {
string project_id = 1;
string conversation_id = 2;
string message_id = 3;
- repeated string comment_ids = 4; // 被 accept 的 comment id 列表
+ repeated string comment_ids = 4; // List of accepted comment IDs
}
message CommentsAcceptedResponse {
diff --git a/webapp/_webapp/src/background.ts b/webapp/_webapp/src/background.ts
index 74847df2..959a456f 100644
--- a/webapp/_webapp/src/background.ts
+++ b/webapp/_webapp/src/background.ts
@@ -83,11 +83,13 @@ const registerContentScriptsIfPermitted = async () => {
try {
const { origins = [] } = await chrome.permissions.getAll();
if (!origins.length) {
+ // eslint-disable-next-line no-console
console.log("[PaperDebugger] No origins found, skipping content script registration");
return;
}
await registerContentScripts(origins);
} catch (error) {
+ // eslint-disable-next-line no-console
console.error("[PaperDebugger] Unable to register content scripts", error);
}
};
diff --git a/webapp/_webapp/src/components/loading-indicator.tsx b/webapp/_webapp/src/components/loading-indicator.tsx
index 5198ad9a..48d22ac0 100644
--- a/webapp/_webapp/src/components/loading-indicator.tsx
+++ b/webapp/_webapp/src/components/loading-indicator.tsx
@@ -125,7 +125,7 @@ export const LoadingIndicator = ({ text = "Thinking", estimatedSeconds = 0, erro
{/* Status Text */}
{getStatusMessage()}
diff --git a/webapp/_webapp/src/components/message-card.tsx b/webapp/_webapp/src/components/message-card.tsx
index f5582775..2a5f406c 100644
--- a/webapp/_webapp/src/components/message-card.tsx
+++ b/webapp/_webapp/src/components/message-card.tsx
@@ -40,22 +40,22 @@ interface MessageCardProps {
}
export const MessageCard = memo(({ messageEntry, prevAttachment, animated }: MessageCardProps) => {
- if (messageEntry.toolCall !== undefined) {
- return (
-
-
-
- );
- }
-
const returnComponent = () => {
+ if (messageEntry.toolCall !== undefined) {
+ return (
+
+
+
+ );
+ }
+
if (messageEntry.assistant !== undefined) {
return (
@@ -84,6 +85,7 @@ export const MessageCard = memo(({ messageEntry, prevAttachment, animated }: Mes
content={messageEntry.user?.content ?? ""}
attachment={messageEntry.user?.selectedText ?? ""}
stale={messageEntry.status === MessageEntryStatus.STALE}
+ messageId={messageEntry.messageId}
/>
);
}
diff --git a/webapp/_webapp/src/components/message-entry-container/assistant.tsx b/webapp/_webapp/src/components/message-entry-container/assistant.tsx
index 1a01f6da..80986082 100644
--- a/webapp/_webapp/src/components/message-entry-container/assistant.tsx
+++ b/webapp/_webapp/src/components/message-entry-container/assistant.tsx
@@ -48,40 +48,47 @@ export const AssistantMessageContainer = ({
}, 2000);
}
}, [user?.id, projectId, processedMessage, messageId]);
+
+ const showMessage = processedMessage?.length || 0 > 0;
const staleComponent = stale &&
This message is stale.
;
- const writingIndicator = stale ? null : (
-
- );
+ const writingIndicator =
+ stale || !showMessage ? null : (
+
+ );
return (
-
-
- {/* Message content */}
-
-
- {processedMessage || ""}
-
-
+ showMessage && (
+
+
+ {/* Message content */}
+
+
+ {processedMessage || ""}
+
+
- {writingIndicator}
+ {writingIndicator}
- {/* Stale message */}
- {staleComponent}
+ {/* Stale message */}
+ {staleComponent}
-
-
+ )
);
};
diff --git a/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx b/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx
index fdaa749f..1794228d 100644
--- a/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx
+++ b/webapp/_webapp/src/components/message-entry-container/toolcall-prepare.tsx
@@ -1,12 +1,47 @@
import { cn } from "@heroui/react";
import { LoadingIndicator } from "../loading-indicator";
-export const ToolCallPrepareMessageContainer = ({ stale, preparing }: { stale: boolean; preparing: boolean }) => {
+export const ToolCallPrepareMessageContainer = ({
+ functionName,
+ stale,
+ preparing,
+}: {
+ functionName: string;
+ stale: boolean;
+ preparing: boolean;
+}) => {
+ // When preparing, show minimal UI with just the text
+ if (preparing && !stale) {
+ return (
+
+
+ Preparing function {functionName}...
+
+
+ );
+ }
+
+ // When prepared or stale, show the full indicator
return (
-
+
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/general.tsx b/webapp/_webapp/src/components/message-entry-container/tools/general.tsx
new file mode 100644
index 00000000..c9a4d0f8
--- /dev/null
+++ b/webapp/_webapp/src/components/message-entry-container/tools/general.tsx
@@ -0,0 +1,76 @@
+import { cn } from "@heroui/react";
+import { useState } from "react";
+
+type GeneralToolCardProps = {
+ functionName: string;
+ message: string;
+ animated: boolean;
+};
+
+const shimmerStyle = {
+ WebkitTextFillColor: "transparent",
+ animationDelay: "0.5s",
+ animationDuration: "3s",
+ animationIterationCount: "infinite",
+ animationName: "shimmer",
+ background: "#cdcdcd -webkit-gradient(linear, 100% 0, 0 0, from(#cdcdcd), color-stop(.5, #1a1a1a), to(#cdcdcd))",
+ WebkitBackgroundClip: "text",
+ backgroundRepeat: "no-repeat",
+ backgroundSize: "50% 200%",
+ backgroundPositionX: "-100%",
+} as const;
+
+export const GeneralToolCard = ({ functionName, message, animated }: GeneralToolCardProps) => {
+ const [isCollapsed, setIsCollapsed] = useState(true);
+
+ // When no message, show minimal "Calling tool..." style like Preparing function
+ if (!message) {
+ return (
+
+
+ Calling tool {functionName}...
+
+
+ );
+ }
+
+ const toggleCollapse = () => {
+ setIsCollapsed(!isCollapsed);
+ };
+ const pascalCase = (str: string) => {
+ const words = str.split("_");
+ return words.map((word) => word.charAt(0).toUpperCase() + word.slice(1)).join(" ");
+ };
+ // When there is a message, show the compact card with collapsible content
+ return (
+
+
+
+
{pascalCase(functionName)}
+
+
+
+ {message}
+
+
+ );
+};
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/README.md b/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/README.md
deleted file mode 100644
index 9f3aa063..00000000
--- a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# Paper Score Comment Component
-
-这个目录包含了Paper Score Comment功能的组件拆分。
-
-## 文件结构
-
-- `index.tsx` - 主组件,负责整体逻辑和状态管理
-- `types.ts` - 类型定义
-- `utils.ts` - 工具函数(重要性颜色、图标等)
-- `comment-item.tsx` - 单个评论项组件
-- `stats-summary.tsx` - 统计摘要组件(显示Critical/High/Medium数量)
-- `filter-controls.tsx` - 过滤器控制组件(搜索、重要性过滤)
-- `comments-list.tsx` - 评论列表组件(过滤和排序逻辑)
-- `add-comments-button.tsx` - 添加评论到Overleaf的按钮组件
-
-## 组件职责
-
-### index.tsx
-
-- 解析消息数据
-- 管理全局状态(cookies、展开状态等)
-- 协调各个子组件
-
-### comment-item.tsx
-
-- 渲染单个评论项
-- 处理文本展开/折叠
-- 显示重要性标签和图标
-
-### stats-summary.tsx
-
-- 显示评论统计信息
-- 按重要性分类显示数量
-
-### filter-controls.tsx
-
-- 提供搜索功能
-- 提供重要性过滤
-- 显示过滤结果统计
-
-### comments-list.tsx
-
-- 过滤和排序评论
-- 处理空状态显示
-- 渲染评论列表
-
-### add-comments-button.tsx
-
-- 处理添加评论到Overleaf的逻辑
-- 管理加载状态和错误处理
-- 显示操作结果
-
-### utils.ts
-
-- `getImportanceColor()` - 根据重要性返回颜色类名
-- `getImportanceIcon()` - 根据重要性返回图标名称
-- `cleanCommentText()` - 清理评论文本中的表情符号
-
-### types.ts
-
-- `PaperScoreCommentCardProps` - 主组件的props类型定义
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx b/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx
index 6c97e37d..473ad0fb 100644
--- a/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx
+++ b/webapp/_webapp/src/components/message-entry-container/tools/paper-score-comment/index.tsx
@@ -49,8 +49,7 @@ export const PaperScoreCommentCard = ({ messageId, message, preparing, animated
);
setSelectedComments(new Set(allCommentIds));
}
- } catch (error) {
- // eslint-disable-line @typescript-eslint/no-unused-vars
+ } catch {
// Ignore parsing errors here, they'll be handled in the render
}
}
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx b/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx
index 7b01a7c7..3f4b4c8d 100644
--- a/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx
+++ b/webapp/_webapp/src/components/message-entry-container/tools/tools.tsx
@@ -1,12 +1,12 @@
import { PaperScoreCard } from "./paper-score";
import { PaperScoreCommentCard } from "./paper-score-comment/index";
-import { UnknownToolCard } from "./unknown";
import { GreetingCard } from "./greeting";
import { ErrorToolCard } from "./error";
import { AlwaysExceptionCard } from "./always-exception";
import { JsonRpc } from "./jsonrpc";
import { ReviewPaperCard } from "./review-paper";
import { parseJsonRpcResult, UNKNOWN_JSONRPC_RESULT } from "./utils/common";
+import { GeneralToolCard } from "./general";
type ToolsProps = {
messageId: string;
@@ -64,6 +64,6 @@ export default function Tools({ messageId, functionName, message, error, prepari
if (jsonRpcResult) {
return
;
} else {
- return
;
+ return
;
}
}
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/unknown.tsx b/webapp/_webapp/src/components/message-entry-container/tools/unknown.tsx
deleted file mode 100644
index 51b4cb89..00000000
--- a/webapp/_webapp/src/components/message-entry-container/tools/unknown.tsx
+++ /dev/null
@@ -1,18 +0,0 @@
-import { cn } from "@heroui/react";
-
-type UnknownToolCardProps = {
- functionName: string;
- message: string;
- animated: boolean;
-};
-
-export const UnknownToolCard = ({ functionName, message, animated }: UnknownToolCardProps) => {
- return (
-
-
- Unknown Tool "{functionName}"
-
- {message}
-
- );
-};
diff --git a/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx b/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx
index 92e09f2c..58cdc8ea 100644
--- a/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx
+++ b/webapp/_webapp/src/components/message-entry-container/tools/utils/common.tsx
@@ -22,6 +22,7 @@ export const UNKNOWN_JSONRPC_RESULT: JsonRpcResult = {
},
};
+// eslint-disable-next-line @typescript-eslint/no-explicit-any
const isValidJsonRpcResult = (obj: any): obj is JsonRpcResult => {
// Check if obj is an object and not null
if (typeof obj !== "object" || obj === null) {
@@ -85,7 +86,8 @@ export const parseJsonRpcResult = (message: string): JsonRpcResult | undefined =
}
return undefined;
- } catch (error) {
+ } catch {
+ // Error parsing JSONRPC result
return undefined;
}
};
diff --git a/webapp/_webapp/src/components/message-entry-container/user.tsx b/webapp/_webapp/src/components/message-entry-container/user.tsx
index 249432bf..99231a04 100644
--- a/webapp/_webapp/src/components/message-entry-container/user.tsx
+++ b/webapp/_webapp/src/components/message-entry-container/user.tsx
@@ -1,16 +1,41 @@
-import { cn } from "@heroui/react";
+import { cn, Tooltip } from "@heroui/react";
+import { useCallback, useState } from "react";
import { AttachmentPopover } from "./attachment-popover";
+import { Icon } from "@iconify/react/dist/iconify.js";
+import googleAnalytics from "../../libs/google-analytics";
+import { getProjectId } from "../../libs/helpers";
+import { useAuthStore } from "../../stores/auth-store";
// import MarkdownComponent from "../markdown";
export const UserMessageContainer = ({
content,
attachment,
stale,
+ messageId,
}: {
content: string;
attachment: string;
stale: boolean;
+ messageId: string;
}) => {
+ const { user } = useAuthStore();
+ const projectId = getProjectId();
+ const [copySuccess, setCopySuccess] = useState(false);
+
+ const handleCopy = useCallback(() => {
+ if (content) {
+ googleAnalytics.fireEvent(user?.id, "messagecard_copy_user_message", {
+ projectId,
+ messageId: messageId,
+ });
+ navigator.clipboard.writeText(content);
+ setCopySuccess(true);
+ setTimeout(() => {
+ setCopySuccess(false);
+ }, 2000);
+ }
+ }, [user?.id, projectId, content, messageId]);
+
const staleComponent = stale && (
Connection error.
Please reload this conversation.
@@ -19,12 +44,21 @@ export const UserMessageContainer = ({
return (
// Align right
-
- {/*
*/}
- {content || "Error: No content"}
- {/* */}
- {attachment &&
}
- {staleComponent}
+
+
+
+
+
+
+
+
+
+ {/*
*/}
+ {content || "Error: No content"}
+ {/* */}
+ {attachment &&
}
+ {staleComponent}
+
);
diff --git a/webapp/_webapp/src/components/pd-app-small-control-button.tsx b/webapp/_webapp/src/components/pd-app-small-control-button.tsx
index 3e1895b2..41258e08 100644
--- a/webapp/_webapp/src/components/pd-app-small-control-button.tsx
+++ b/webapp/_webapp/src/components/pd-app-small-control-button.tsx
@@ -1,13 +1,15 @@
import { cn } from "@heroui/react";
import { ReactNode } from "react";
+import { useSettingStore } from "../stores/setting-store";
export function PdAppSmallControlButton({
className,
children,
...props
}: React.HTMLAttributes
& { children: ReactNode }) {
+ const { minimalistMode } = useSettingStore();
return (
-
+
{children}
);
diff --git a/webapp/_webapp/src/components/switch-cell.tsx b/webapp/_webapp/src/components/switch-cell.tsx
index f10d78bc..981cd96c 100644
--- a/webapp/_webapp/src/components/switch-cell.tsx
+++ b/webapp/_webapp/src/components/switch-cell.tsx
@@ -33,6 +33,7 @@ export type SwitchCellProps = Omit
& {
const SwitchCell = React.forwardRef(
({ label, description, classNames, isLoading, ...props }, ref) => (
{
+export const TabHeader = ({ title, actions }: TabHeaderProps) => {
const { heightCollapseRequired } = useConversationUiStore();
const { minimalistMode } = useSettingStore();
@@ -22,12 +21,9 @@ export const TabHeader = ({ title, subTitle, actions }: TabHeaderProps) => {
const expandedHeader = (
-
{title}
-
- {subTitle}
-
+
{title}
-
{actions}
+
{actions}
);
diff --git a/webapp/_webapp/src/components/tabs.tsx b/webapp/_webapp/src/components/tabs.tsx
index 74c36090..d54dcbf2 100644
--- a/webapp/_webapp/src/components/tabs.tsx
+++ b/webapp/_webapp/src/components/tabs.tsx
@@ -26,6 +26,7 @@ export const Tabs = forwardRef(({ items }, ref) => {
const { user } = useAuthStore();
const { activeTab, setActiveTab, sidebarCollapsed } = useConversationUiStore();
const { hideAvatar } = useSettingStore();
+ const { minimalistMode } = useSettingStore();
useImperativeHandle(ref, () => ({
setSelectedTab: setActiveTab,
@@ -52,11 +53,11 @@ export const Tabs = forwardRef(({ items }, ref) => {
},
[sidebarCollapsed],
);
+
+ const width = sidebarCollapsed ? "w-16" : minimalistMode ? "w-[118px]" : "w-[140px]";
return (
<>
-
+
{!hideAvatar &&
}
(({ items }, ref) => {
variant="light"
classNames={{
tabList: "bg-gray-100",
- tab: "justify-start",
+ tab: cn("justify-start", minimalistMode ? "text-xs" : ""),
}}
selectedKey={activeTab}
onSelectionChange={(e) => {
diff --git a/webapp/_webapp/src/components/top-menu-button.tsx b/webapp/_webapp/src/components/top-menu-button.tsx
index c6b04e15..faa00f09 100644
--- a/webapp/_webapp/src/components/top-menu-button.tsx
+++ b/webapp/_webapp/src/components/top-menu-button.tsx
@@ -63,7 +63,7 @@ export const TopMenuButton = () => {
- {/* 位置重置菜单 */}
+ {/* Position reset menu */}
{
const [tooltipPosition, setTooltipPosition] = useState<{ left: number; top: number } | null>(null);
const tooltipRef = useRef
(null);
- const { selectedText, setSelectedText, setSelectionRange } = useSelectionStore();
+ const { selectedText, setSelectedText, setSurroundingText, setSelectionRange, overleafCm } = useSelectionStore();
useEffect(() => {
const handleSelectionChange = () => {
@@ -19,6 +19,30 @@ export const TooltipArea = ({ children }: { children: React.ReactNode }) => {
const text = selection.toString();
if (text.trim().length > 0) {
setSelectedText(text);
+
+ let surrounding = "";
+ if (overleafCm) {
+ try {
+ const cmContentElement = document.querySelector(".cm-content");
+ if (cmContentElement) {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const view = (cmContentElement as any).cmView.view;
+ if (view) {
+ const state = view.state;
+ // Let's try to get CM selection
+ const cmSelection = state.selection.main;
+ const doc = state.doc;
+ const before = doc.sliceString(Math.max(0, cmSelection.from - 100), cmSelection.from);
+ const after = doc.sliceString(cmSelection.to, Math.min(doc.length, cmSelection.to + 100));
+ surrounding = `${before}[SELECTED_TEXT_START]${text}[SELECTED_TEXT_END]${after}`;
+ }
+ }
+ } catch (e) {
+ // fallback
+ }
+ }
+ setSurroundingText(surrounding);
+
setSelectionRange(range);
setTooltipPosition({
left: rect.left + rect.width / 2 + window.scrollX,
diff --git a/webapp/_webapp/src/hooks/useLanguageModels.ts b/webapp/_webapp/src/hooks/useLanguageModels.ts
index 918d9010..04b2e0c5 100644
--- a/webapp/_webapp/src/hooks/useLanguageModels.ts
+++ b/webapp/_webapp/src/hooks/useLanguageModels.ts
@@ -1,28 +1,51 @@
import { useCallback, useMemo } from "react";
-import { SupportedModel } from "../pkg/gen/apiclient/chat/v1/chat_pb";
+import { SupportedModel } from "../pkg/gen/apiclient/chat/v2/chat_pb";
import { useConversationStore } from "../stores/conversation/conversation-store";
import { useListSupportedModelsQuery } from "../query";
+import { useConversationUiStore } from "../stores/conversation/conversation-ui-store";
export type Model = {
name: string;
slug: string;
+ provider: string;
+ totalContext: number;
+ maxOutput: number;
+ inputPrice: number;
+ outputPrice: number;
+};
+
+// Extract provider from model slug (e.g., "openai/gpt-4.1" -> "openai")
+const extractProvider = (slug: string): string => {
+ const parts = slug.split("/");
+ return parts.length > 1 ? parts[0] : "openai";
};
// Fallback models in case the API fails
const fallbackModels: Model[] = [
{
name: "GPT-4.1",
- slug: "gpt-4.1",
+ slug: "openai/gpt-4.1",
+ provider: "openai",
+ totalContext: 1050000,
+ maxOutput: 32800,
+ inputPrice: 200,
+ outputPrice: 800,
},
];
const mapSupportedModelToModel = (supportedModel: SupportedModel): Model => ({
name: supportedModel.name,
slug: supportedModel.slug,
+ provider: extractProvider(supportedModel.slug),
+ totalContext: Number(supportedModel.totalContext),
+ maxOutput: Number(supportedModel.maxOutput),
+ inputPrice: Number(supportedModel.inputPrice),
+ outputPrice: Number(supportedModel.outputPrice),
});
export const useLanguageModels = () => {
const { currentConversation, setCurrentConversation } = useConversationStore();
+ const { setLastUsedModelSlug } = useConversationUiStore();
const { data: supportedModelsResponse } = useListSupportedModelsQuery();
const models: Model[] = useMemo(() => {
@@ -39,12 +62,13 @@ export const useLanguageModels = () => {
const setModel = useCallback(
(model: Model) => {
+ setLastUsedModelSlug(model.slug);
setCurrentConversation({
...currentConversation,
modelSlug: model.slug,
});
},
- [setCurrentConversation, currentConversation],
+ [setCurrentConversation, currentConversation, setLastUsedModelSlug],
);
return { models, currentModel, setModel };
diff --git a/webapp/_webapp/src/hooks/useSendMessageStream.ts b/webapp/_webapp/src/hooks/useSendMessageStream.ts
index a1329535..0cbfcc96 100644
--- a/webapp/_webapp/src/hooks/useSendMessageStream.ts
+++ b/webapp/_webapp/src/hooks/useSendMessageStream.ts
@@ -3,9 +3,8 @@ import {
ConversationType,
CreateConversationMessageStreamRequest,
IncompleteIndicator,
- LanguageModel,
StreamFinalization,
-} from "../pkg/gen/apiclient/chat/v1/chat_pb";
+} from "../pkg/gen/apiclient/chat/v2/chat_pb";
import { PlainMessage } from "../query/types";
import { useStreamingMessageStore } from "../stores/streaming-message-store";
import { getProjectId } from "../libs/helpers";
@@ -24,7 +23,7 @@ import {
StreamInitialization,
StreamPartBegin,
StreamPartEnd,
-} from "../pkg/gen/apiclient/chat/v1/chat_pb";
+} from "../pkg/gen/apiclient/chat/v2/chat_pb";
import { MessageEntry, MessageEntryStatus } from "../stores/conversation/types";
import { fromJson } from "@bufbuild/protobuf";
import { useConversationStore } from "../stores/conversation/conversation-store";
@@ -36,6 +35,7 @@ import { handleIncompleteIndicator } from "../stores/conversation/handlers/handl
import { useAuthStore } from "../stores/auth-store";
import { useDevtoolStore } from "../stores/devtool-store";
import { getCookies } from "../intermediate";
+import { useSelectionStore } from "../stores/selection-store";
import { useSettingStore } from "../stores/setting-store";
/**
@@ -60,6 +60,7 @@ export function useSendMessageStream() {
const { currentConversation } = useConversationStore();
const { refetch: refetchConversationList } = useListConversationsQuery(getProjectId());
const { resetStreamingMessage, updateStreamingMessage, resetIncompleteIndicator } = useStreamingMessageStore();
+ const { surroundingText: storeSurroundingText } = useSelectionStore();
const { alwaysSyncProject } = useDevtoolStore();
const { conversationMode } = useSettingStore();
@@ -74,10 +75,10 @@ export function useSendMessageStream() {
const request: PlainMessage = {
projectId: getProjectId(),
conversationId: currentConversation.id,
- languageModel: LanguageModel.UNSPECIFIED, // backward compatibility
modelSlug: currentConversation.modelSlug,
userMessage: message,
userSelectedText: selectedText,
+ surrounding: storeSurroundingText ?? undefined,
conversationType: conversationMode === "debug" ? ConversationType.DEBUG : ConversationType.UNSPECIFIED,
};
@@ -90,6 +91,7 @@ export function useSendMessageStream() {
user: fromJson(MessageTypeUserSchema, {
content: message,
selectedText: selectedText,
+ surrounding: storeSurroundingText ?? null,
}),
};
updateStreamingMessage((prev) => ({
diff --git a/webapp/_webapp/src/index.css b/webapp/_webapp/src/index.css
index 9ea01888..177c4c7b 100644
--- a/webapp/_webapp/src/index.css
+++ b/webapp/_webapp/src/index.css
@@ -102,13 +102,21 @@ body {
}
.tool-card-title {
- @apply text-xs font-semibold font-sans text-primary-700 uppercase tracking-wider noselect;
+ @apply text-xs font-semibold font-sans text-primary-700 tracking-wider noselect;
}
.tool-card-title.tool-card-jsonrpc {
@apply font-medium text-gray-500;
}
+.tool-card.compact {
+ @apply px-[3px] py-[1px] my-0.5 bg-transparent text-xs border-0;
+}
+
+.tool-card.compact .tool-card-title {
+ @apply text-[10px];
+}
+
/* 相邻 tool-card 的样式处理 */
.tool-card + .tool-card {
/* 相邻的第二个卡片:移除上边框,调整上圆角,减少上边距,减少上 padding */
@@ -250,11 +258,11 @@ body {
}
.pd-app-tab-content .pd-app-tab-content-header {
- @apply bg-gray-100 px-3 py-2 gap-2 items-center w-full transition-all;
+ @apply bg-gray-100 pl-3 pr-1 py-2 gap-2 items-center w-full h-4 text-xs transition-all;
flex: 0;
flex-direction: row;
display: flex;
- height: 60px;
+ height: 24px;
border-bottom: 1px solid var(--pd-border-color);
}
@@ -274,6 +282,7 @@ body {
width: 100%;
min-width: 0;
flex: 1;
+ overflow-anchor: none;
@apply bg-gray-50 p-2;
}
@@ -324,19 +333,27 @@ body {
@apply text-sm text-default-400 dark:text-default-50;
}
+.chat-message-entry .message-box-user-wrapper {
+ display: flex;
+ flex-direction: row;
+ align-items: center;
+ justify-content: flex-end;
+ gap: 4px;
+ width: 100%;
+}
+
.chat-message-entry .message-box-user {
max-width: 70%;
- align-self: flex-end;
@apply text-sm text-default-800 px-3 py-2 border border-transparent rounded-xl;
@apply transition-all duration-500 ease-in-out;
- @apply bg-gray-200 self-end my-2;
+ @apply bg-gray-200 my-2;
}
.chat-message-entry .message-box-assistant {
align-self: flex-start;
@apply text-sm text-default-800 px-2 py-2 border border-transparent rounded-xl;
@apply transition-all duration-500 ease-in-out;
- @apply my-2;
+ @apply mb-2;
}
.chat-message-entry .message-box-assistant:hover {
@@ -350,6 +367,10 @@ body {
@apply gap-2 text-gray-400 mt-2 -ml-2 opacity-0 transition-all duration-100;
}
+.chat-message-entry .actions.actions-left {
+ @apply mt-0 ml-0 mr-0;
+}
+
.chat-message-entry:hover .actions {
/* height: 24px; */
@apply opacity-100;
diff --git a/webapp/_webapp/src/intermediate.ts b/webapp/_webapp/src/intermediate.ts
index 70d2b617..c9becc73 100644
--- a/webapp/_webapp/src/intermediate.ts
+++ b/webapp/_webapp/src/intermediate.ts
@@ -90,7 +90,8 @@ function makeFunction(handlerName: string, opts?: MakeFunctionOpts): (args
let getCookies: (domain: string) => Promise<{ session: string; gclb: string }>;
if (import.meta.env.DEV) {
- getCookies = async (_: string) => {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ getCookies = async (_domain: string) => {
return {
session: localStorage.getItem("pd.auth.overleafSession") ?? "",
gclb: localStorage.getItem("pd.auth.gclb") ?? "",
diff --git a/webapp/_webapp/src/libs/apiclient.ts b/webapp/_webapp/src/libs/apiclient.ts
index f02e932e..0552f4ca 100644
--- a/webapp/_webapp/src/libs/apiclient.ts
+++ b/webapp/_webapp/src/libs/apiclient.ts
@@ -6,18 +6,31 @@ import { EventEmitter } from "events";
import { ErrorCode, ErrorSchema } from "../pkg/gen/apiclient/shared/v1/shared_pb";
import { errorToast } from "./toasts";
+// Exhaustive type check helper - will cause compile error if a case is not handled
+const assertNever = (x: never): never => {
+ throw new Error("Unexpected api version: " + x);
+};
+
export type RequestOptions = {
ignoreErrorToast?: boolean;
};
+export type ApiVersion = "v1" | "v2";
+
+// Storage key mapping for each API version - add new versions here
+const API_VERSION_STORAGE_KEYS: Record = {
+ v1: "pd.devtool.endpoint",
+ v2: "pd.devtool.endpoint.v2",
+} as const;
+
class ApiClient {
private axiosInstance: AxiosInstance;
private refreshToken: string | null;
private onTokenRefreshedEventEmitter: EventEmitter;
- constructor(baseURL: string) {
+ constructor(baseURL: string, apiVersion: ApiVersion) {
this.axiosInstance = axios.create({
- baseURL,
+ baseURL: `${baseURL}/_pd/api/${apiVersion}`,
headers: {
"Content-Type": "application/json",
},
@@ -26,10 +39,18 @@ class ApiClient {
this.onTokenRefreshedEventEmitter = new EventEmitter();
}
- updateBaseURL(baseURL: string): void {
- this.axiosInstance.defaults.baseURL = baseURL;
- localStorage.setItem(LOCAL_STORAGE_KEY, baseURL);
- console.log("apiclient baseURL updated to", baseURL);
+ updateBaseURL(baseURL: string, apiVersion: ApiVersion): void {
+ this.axiosInstance.defaults.baseURL = `${baseURL}/_pd/api/${apiVersion}`;
+ switch (apiVersion) {
+ case "v1":
+ localStorage.setItem(API_VERSION_STORAGE_KEYS.v1, this.axiosInstance.defaults.baseURL);
+ break;
+ case "v2":
+ localStorage.setItem(API_VERSION_STORAGE_KEYS.v2, this.axiosInstance.defaults.baseURL);
+ break;
+ default:
+ assertNever(apiVersion); // Compile error if a new version is added but not handled
+ }
}
addListener(event: "tokenRefreshed", listener: (args: { token: string; refreshToken: string }) => void): void {
@@ -100,7 +121,7 @@ class ApiClient {
const errorPayload = fromJson(ErrorSchema, errorData);
if (!options?.ignoreErrorToast) {
const message = errorPayload.message.replace(/^rpc error: code = Code\(\d+\) desc = /, "");
- errorToast(message, `Request Failed: ${ErrorCode[errorPayload.code]}`);
+ errorToast(message + ` (${config.url})`, `Request Failed: ${ErrorCode[errorPayload.code]}`);
}
throw errorPayload;
}
@@ -188,22 +209,30 @@ class ApiClient {
}
}
-const DEFAULT_ENDPOINT = `${process.env.PD_API_ENDPOINT || "http://localhost:3000"}/_pd/api/v1`;
-const LOCAL_STORAGE_KEY = "pd.devtool.endpoint";
+const DEFAULT_ENDPOINT = `${process.env.PD_API_ENDPOINT || "http://localhost:3000"}`;
+const LOCAL_STORAGE_KEY_V1 = "pd.devtool.endpoint";
+const LOCAL_STORAGE_KEY_V2 = "pd.devtool.endpoint.v2";
+
// Create apiclient instance with endpoint from localStorage or default
export const getEndpointFromLocalStorage = () => {
+ let endpoint = "";
try {
- return localStorage.getItem(LOCAL_STORAGE_KEY) || DEFAULT_ENDPOINT;
- } catch (error) {
+ endpoint = localStorage.getItem(LOCAL_STORAGE_KEY_V1) || DEFAULT_ENDPOINT;
+ } catch {
// Fallback if localStorage is not available (e.g., in SSR)
- return DEFAULT_ENDPOINT;
+ endpoint = DEFAULT_ENDPOINT;
}
+
+ return endpoint.replace("/_pd/api/v1", "").replace("/_pd/api/v2", ""); // compatible with old endpoint
};
export const resetApiClientEndpoint = () => {
- localStorage.removeItem(LOCAL_STORAGE_KEY);
- apiclient.updateBaseURL(getEndpointFromLocalStorage());
+ localStorage.removeItem(LOCAL_STORAGE_KEY_V1);
+ localStorage.removeItem(LOCAL_STORAGE_KEY_V2);
+ apiclient.updateBaseURL(getEndpointFromLocalStorage(), "v1");
+ apiclientV2.updateBaseURL(getEndpointFromLocalStorage(), "v2");
};
-const apiclient = new ApiClient(getEndpointFromLocalStorage());
+const apiclient = new ApiClient(getEndpointFromLocalStorage(), "v1");
+export const apiclientV2 = new ApiClient(getEndpointFromLocalStorage(), "v2");
export default apiclient;
diff --git a/webapp/_webapp/src/libs/google-analytics.ts b/webapp/_webapp/src/libs/google-analytics.ts
index 9abbdb47..2085f8c2 100644
--- a/webapp/_webapp/src/libs/google-analytics.ts
+++ b/webapp/_webapp/src/libs/google-analytics.ts
@@ -53,17 +53,15 @@ class Analytics {
],
}),
},
- ).catch((_) => {
- // eslint-disable-line @typescript-eslint/no-unused-vars
- // logInfo("Google Analytics request failed with an exception", e);
+ ).catch(() => {
+ // logInfo("Google Analytics request failed with an exception");
});
if (!this.debug) {
return;
}
- } catch (e) {
- // eslint-disable-line @typescript-eslint/no-unused-vars
- // logInfo("Google Analytics request failed with an exception", e);
+ } catch {
+ // logInfo("Google Analytics request failed with an exception");
}
}
diff --git a/webapp/_webapp/src/libs/helpers.ts b/webapp/_webapp/src/libs/helpers.ts
index 9de1db10..1b7016a9 100644
--- a/webapp/_webapp/src/libs/helpers.ts
+++ b/webapp/_webapp/src/libs/helpers.ts
@@ -193,10 +193,10 @@ export function addClickedOverleafComment(projectId: string, messageId: string)
if (!projectId || !messageId) return;
const key = OVERLEAF_COMMENTS_CLICKED_PREFIX + projectId;
let arr = getClickedOverleafComments(projectId);
- // 去重
+ // Deduplicate
arr = arr.filter((id) => id !== messageId);
arr.push(messageId);
- // 最多 200 条
+ // Maximum 200 items
if (arr.length > MAX_CLICKED_COMMENTS) {
arr = arr.slice(arr.length - MAX_CLICKED_COMMENTS);
}
@@ -209,7 +209,7 @@ export function hasClickedOverleafComment(projectId: string, messageId: string):
return arr.includes(messageId);
}
-// 经典 debounce,适合事件回调
+// Classic debounce, suitable for event callbacks
export function debounce(fn: (...args: unknown[]) => void, wait: number) {
let timer: ReturnType | null = null;
return function (this: unknown, ...args: unknown[]) {
@@ -231,6 +231,6 @@ export function blobToBase64(blob: Blob): Promise {
}
};
reader.onerror = reject;
- reader.readAsDataURL(blob); // 读取为 DataURL 格式(包含 base64)
+ reader.readAsDataURL(blob); // Read as DataURL format (contains base64)
});
}
diff --git a/webapp/_webapp/src/libs/oauth.ts b/webapp/_webapp/src/libs/oauth.ts
index 0374caba..f034cd24 100644
--- a/webapp/_webapp/src/libs/oauth.ts
+++ b/webapp/_webapp/src/libs/oauth.ts
@@ -33,9 +33,9 @@ export function appleAuthUrl(state: string) {
const url = new URL("https://appleid.apple.com/auth/authorize");
url.searchParams.set("redirect_uri", REDIRECT_URI);
url.searchParams.set("state", state);
- url.searchParams.set("nonce", Math.random().toString(36).substring(2, 15)); // 推荐加 nonce
+ url.searchParams.set("nonce", Math.random().toString(36).substring(2, 15)); // Recommended to add nonce
url.searchParams.set("scope", "name email");
- url.searchParams.set("response_mode", "form_post"); // 或 "form_post"
+ url.searchParams.set("response_mode", "form_post"); // Or "form_post"
url.searchParams.set("client_id", "dev.junyi.PaperDebugger.si");
url.searchParams.set("response_type", "code id_token");
return url.toString();
diff --git a/webapp/_webapp/src/libs/permissions.ts b/webapp/_webapp/src/libs/permissions.ts
index fc2d5eeb..5eeea3a4 100644
--- a/webapp/_webapp/src/libs/permissions.ts
+++ b/webapp/_webapp/src/libs/permissions.ts
@@ -3,12 +3,14 @@ export async function registerContentScripts(origins?: string[]) {
try {
const resolvedOrigins = origins ?? (await chrome.permissions.getAll()).origins ?? [];
if (resolvedOrigins.length === 0) {
+ // eslint-disable-next-line no-console
console.log("[PaperDebugger] No origins found, skipping content script registration");
return;
}
const scriptIds = (await chrome.scripting.getRegisteredContentScripts()).map((script) => script.id);
if (scriptIds.length > 0) {
+ // eslint-disable-next-line no-console
console.log("[PaperDebugger] Unregistering dynamic content scripts", scriptIds);
await chrome.scripting.unregisterContentScripts({ ids: scriptIds });
}
@@ -30,8 +32,10 @@ export async function registerContentScripts(origins?: string[]) {
},
]);
+ // eslint-disable-next-line no-console
console.log("[PaperDebugger] Registration complete", resolvedOrigins);
} catch (error) {
+ // eslint-disable-next-line no-console
console.error("[PaperDebugger] Failed to register content scripts", error);
}
}
diff --git a/webapp/_webapp/src/main.tsx b/webapp/_webapp/src/main.tsx
index bdec8369..51cf4cc0 100644
--- a/webapp/_webapp/src/main.tsx
+++ b/webapp/_webapp/src/main.tsx
@@ -9,7 +9,7 @@ import googleAnalytics from "./libs/google-analytics";
import { generateSHA1Hash, onElementAdded, onElementAppeared } from "./libs/helpers";
import { OverleafCodeMirror, completion, createSuggestionExtension } from "./libs/inline-suggestion";
import { logInfo } from "./libs/logger";
-import apiclient, { getEndpointFromLocalStorage } from "./libs/apiclient";
+import apiclient, { apiclientV2, getEndpointFromLocalStorage } from "./libs/apiclient";
import { Providers } from "./providers";
import { useAuthStore } from "./stores/auth-store";
import { useConversationUiStore } from "./stores/conversation/conversation-ui-store";
@@ -49,10 +49,13 @@ export const Main = () => {
const { inputRef, setActiveTab } = useConversationUiStore();
const {
lastSelectedText,
+ lastSurroundingText,
lastSelectionRange,
setLastSelectedText,
+ setLastSurroundingText,
setLastSelectionRange,
setSelectedText,
+ setSurroundingText,
setSelectionRange,
clearOverleafSelection,
} = useSelectionStore();
@@ -64,7 +67,8 @@ export const Main = () => {
const { loadPrompts } = usePromptLibraryStore();
useEffect(() => {
- apiclient.updateBaseURL(getEndpointFromLocalStorage());
+ apiclient.updateBaseURL(getEndpointFromLocalStorage(), "v1");
+ apiclientV2.updateBaseURL(getEndpointFromLocalStorage(), "v2");
login();
loadSettings();
loadPrompts();
@@ -74,12 +78,10 @@ export const Main = () => {
if (disableLineWrap) {
onElementAppeared(".cm-lineWrapping", (editor) => {
editor.classList.remove("cm-lineWrapping");
- console.log("disable line wrap");
});
} else {
onElementAppeared(".cm-content", (editor) => {
editor.classList.add("cm-lineWrapping");
- console.log("enable line wrap");
});
}
}, [disableLineWrap]);
@@ -90,7 +92,28 @@ export const Main = () => {
// check if the selection is in the editor
const editor = document.querySelector(".cm-editor");
if (editor && editor.contains(selection?.anchorNode ?? null)) {
- setLastSelectedText(selection?.toString() ?? null);
+ const text = selection?.toString() ?? null;
+ setLastSelectedText(text);
+
+ let surrounding = "";
+ try {
+ const cmContentElement = document.querySelector(".cm-content");
+ if (cmContentElement) {
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const state = (cmContentElement as any).cmView.view.state;
+ if (state) {
+ const cmSelection = state.selection.main;
+ const doc = state.doc;
+ const before = doc.sliceString(Math.max(0, cmSelection.from - 100), cmSelection.from);
+ const after = doc.sliceString(cmSelection.to, Math.min(doc.length, cmSelection.to + 100));
+ surrounding = `${before}[SELECTED_TEXT_START]${text}[SELECTED_TEXT_END]${after}`;
+ }
+ }
+ } catch (e) {
+ // fallback
+ }
+ setLastSurroundingText(surrounding);
+
setLastSelectionRange(selection?.getRangeAt(0) ?? null);
return;
} else {
@@ -108,10 +131,21 @@ export const Main = () => {
const selectAndOpenPaperDebugger = useCallback(() => {
setActiveTab("chat");
setSelectedText(lastSelectedText);
+ setSurroundingText(lastSurroundingText);
setSelectionRange(lastSelectionRange);
setIsOpen(true);
clearOverleafSelection();
- }, [setSelectedText, setSelectionRange, setIsOpen, lastSelectedText, lastSelectionRange, clearOverleafSelection]);
+ }, [
+ setActiveTab,
+ setSelectedText,
+ setSurroundingText,
+ setSelectionRange,
+ setIsOpen,
+ lastSelectedText,
+ lastSurroundingText,
+ lastSelectionRange,
+ clearOverleafSelection,
+ ]);
useEffect(() => {
const handleKeyDown = (event: KeyboardEvent) => {
@@ -179,8 +213,6 @@ export const Main = () => {
);
};
-console.log("[PaperDebugger] PaperDebugger injected, find toolbar-left or ide-redesign-toolbar-menu-bar to add button");
-
if (!import.meta.env.DEV) {
onElementAppeared(".toolbar-left .toolbar-item, .ide-redesign-toolbar-menu-bar", () => {
logInfo("initializing");
diff --git a/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts
new file mode 100644
index 00000000..c41584a5
--- /dev/null
+++ b/webapp/_webapp/src/pkg/gen/apiclient/chat/v2/chat_pb.ts
@@ -0,0 +1,849 @@
+// @generated by protoc-gen-es v2.10.2 with parameter "target=ts"
+// @generated from file chat/v2/chat.proto (package chat.v2, syntax proto3)
+/* eslint-disable */
+
+import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv2";
+import { enumDesc, fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv2";
+import { file_google_api_annotations } from "@buf/googleapis_googleapis.bufbuild_es/google/api/annotations_pb";
+import type { Message as Message$1 } from "@bufbuild/protobuf";
+
+/**
+ * Describes the file chat/v2/chat.proto.
+ */
+export const file_chat_v2_chat: GenFile = /*@__PURE__*/
+ fileDesc("ChJjaGF0L3YyL2NoYXQucHJvdG8SB2NoYXQudjIiUAoTTWVzc2FnZVR5cGVUb29sQ2FsbBIMCgRuYW1lGAEgASgJEgwKBGFyZ3MYAiABKAkSDgoGcmVzdWx0GAMgASgJEg0KBWVycm9yGAQgASgJIkEKI01lc3NhZ2VUeXBlVG9vbENhbGxQcmVwYXJlQXJndW1lbnRzEgwKBG5hbWUYASABKAkSDAoEYXJncxgCIAEoCSIkChFNZXNzYWdlVHlwZVN5c3RlbRIPCgdjb250ZW50GAEgASgJIjsKFE1lc3NhZ2VUeXBlQXNzaXN0YW50Eg8KB2NvbnRlbnQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJ6Cg9NZXNzYWdlVHlwZVVzZXISDwoHY29udGVudBgBIAEoCRIaCg1zZWxlY3RlZF90ZXh0GAIgASgJSACIAQESGAoLc3Vycm91bmRpbmcYByABKAlIAYgBAUIQCg5fc2VsZWN0ZWRfdGV4dEIOCgxfc3Vycm91bmRpbmciKQoSTWVzc2FnZVR5cGVVbmtub3duEhMKC2Rlc2NyaXB0aW9uGAEgASgJIuQCCg5NZXNzYWdlUGF5bG9hZBIsCgZzeXN0ZW0YASABKAsyGi5jaGF0LnYyLk1lc3NhZ2VUeXBlU3lzdGVtSAASKAoEdXNlchgCIAEoCzIYLmNoYXQudjIuTWVzc2FnZVR5cGVVc2VySAASMgoJYXNzaXN0YW50GAMgASgLMh0uY2hhdC52Mi5NZXNzYWdlVHlwZUFzc2lzdGFudEgAElMKG3Rvb2xfY2FsbF9wcmVwYXJlX2FyZ3VtZW50cxgEIAEoCzIsLmNoYXQudjIuTWVzc2FnZVR5cGVUb29sQ2FsbFByZXBhcmVBcmd1bWVudHNIABIxCgl0b29sX2NhbGwYBSABKAsyHC5jaGF0LnYyLk1lc3NhZ2VUeXBlVG9vbENhbGxIABIuCgd1bmtub3duGAYgASgLMhsuY2hhdC52Mi5NZXNzYWdlVHlwZVVua25vd25IAEIOCgxtZXNzYWdlX3R5cGUiWgoHTWVzc2FnZRISCgptZXNzYWdlX2lkGAEgASgJEigKB3BheWxvYWQYAiABKAsyFy5jaGF0LnYyLk1lc3NhZ2VQYXlsb2FkEhEKCXRpbWVzdGFtcBgDIAEoAyJhCgxDb252ZXJzYXRpb24SCgoCaWQYASABKAkSDQoFdGl0bGUYAiABKAkSEgoKbW9kZWxfc2x1ZxgDIAEoCRIiCghtZXNzYWdlcxgEIAMoCzIQLmNoYXQudjIuTWVzc2FnZSJCChhMaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QSFwoKcHJvamVjdF9pZBgBIAEoCUgAiAEBQg0KC19wcm9qZWN0X2lkIkkKGUxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2USLAoNY29udmVyc2F0aW9ucxgBIAMoCzIVLmNoYXQudjIuQ29udmVyc2F0aW9uIjEKFkdldENvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIkYKF0dldENvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjIuQ29udmVyc2F0aW9uIkMKGVVwZGF0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJEg0KBXRpdGxlGAIgASgJIkkKGlVwZGF0ZUNvbnZlcnNhdGlvblJlc3BvbnNlEisKDGNvbnZlcnNhdGlvbhgBIAEoCzIVLmNoYXQudjIuQ29udmVyc2F0aW9uIjQKGURlbGV0ZUNvbnZlcnNhdGlvblJlcXVlc3QSFwoPY29udmVyc2F0aW9uX2lkGAEgASgJIhwKGkRlbGV0ZUNvbnZlcnNhdGlvblJlc3BvbnNlIoIBCg5TdXBwb3J0ZWRNb2RlbBIMCgRuYW1lGAEgASgJEgwKBHNsdWcYAiABKAkSFQoNdG90YWxfY29udGV4dBgDIAEoAxISCgptYXhfb3V0cHV0GAQgASgDEhMKC2lucHV0X3ByaWNlGAUgASgDEhQKDG91dHB1dF9wcmljZRgGIAEoAyIcChpMaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdCJGChtMaXN0U3VwcG9ydGVkTW9kZWxzUmVzcG9uc2USJwoGbW9kZWxzGAEgAygLMhcuY2hhdC52Mi5TdXBwb3J0ZWRNb2RlbCJDChRTdHJlYW1Jbml0aWFsaXphdGlvbhIXCg9jb252ZXJzYXRpb25faWQYASABKAkSEgoKbW9kZWxfc2x1ZxgCIAEoCSJPCg9TdHJlYW1QYXJ0QmVnaW4SEgoKbWVzc2FnZV9pZBgBIAEoCRIoCgdwYXlsb2FkGAMgASgLMhcuY2hhdC52Mi5NZXNzYWdlUGF5bG9hZCIxCgxNZXNzYWdlQ2h1bmsSEgoKbWVzc2FnZV9pZBgBIAEoCRINCgVkZWx0YRgCIAEoCSI6ChNJbmNvbXBsZXRlSW5kaWNhdG9yEg4KBnJlYXNvbhgBIAEoCRITCgtyZXNwb25zZV9pZBgCIAEoCSJNCg1TdHJlYW1QYXJ0RW5kEhIKCm1lc3NhZ2VfaWQYASABKAkSKAoHcGF5bG9hZBgDIAEoCzIXLmNoYXQudjIuTWVzc2FnZVBheWxvYWQiLQoSU3RyZWFtRmluYWxpemF0aW9uEhcKD2NvbnZlcnNhdGlvbl9pZBgBIAEoCSIkCgtTdHJlYW1FcnJvchIVCg1lcnJvcl9tZXNzYWdlGAEgASgJIssCCiZDcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBISCgpwcm9qZWN0X2lkGAEgASgJEhwKD2NvbnZlcnNhdGlvbl9pZBgCIAEoCUgAiAEBEhIKCm1vZGVsX3NsdWcYAyABKAkSFAoMdXNlcl9tZXNzYWdlGAQgASgJEh8KEnVzZXJfc2VsZWN0ZWRfdGV4dBgFIAEoCUgBiAEBEjkKEWNvbnZlcnNhdGlvbl90eXBlGAYgASgOMhkuY2hhdC52Mi5Db252ZXJzYXRpb25UeXBlSAKIAQESGAoLc3Vycm91bmRpbmcYCCABKAlIA4gBAUISChBfY29udmVyc2F0aW9uX2lkQhUKE191c2VyX3NlbGVjdGVkX3RleHRCFAoSX2NvbnZlcnNhdGlvbl90eXBlQg4KDF9zdXJyb3VuZGluZyK/AwonQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlEj4KFXN0cmVhbV9pbml0aWFsaXphdGlvbhgBIAEoCzIdLmNoYXQudjIuU3RyZWFtSW5pdGlhbGl6YXRpb25IABI1ChFzdHJlYW1fcGFydF9iZWdpbhgCIAEoCzIYLmNoYXQudjIuU3RyZWFtUGFydEJlZ2luSAASLgoNbWVzc2FnZV9jaHVuaxgDIAEoCzIVLmNoYXQudjIuTWVzc2FnZUNodW5rSAASPAoUaW5jb21wbGV0ZV9pbmRpY2F0b3IYBCABKAsyHC5jaGF0LnYyLkluY29tcGxldGVJbmRpY2F0b3JIABIxCg9zdHJlYW1fcGFydF9lbmQYBSABKAsyFi5jaGF0LnYyLlN0cmVhbVBhcnRFbmRIABI6ChNzdHJlYW1fZmluYWxpemF0aW9uGAYgASgLMhsuY2hhdC52Mi5TdHJlYW1GaW5hbGl6YXRpb25IABIsCgxzdHJlYW1fZXJyb3IYByABKAsyFC5jaGF0LnYyLlN0cmVhbUVycm9ySABCEgoQcmVzcG9uc2VfcGF5bG9hZCpSChBDb252ZXJzYXRpb25UeXBlEiEKHUNPTlZFUlNBVElPTl9UWVBFX1VOU1BFQ0lGSUVEEAASGwoXQ09OVkVSU0FUSU9OX1RZUEVfREVCVUcQATKoBwoLQ2hhdFNlcnZpY2USgwEKEUxpc3RDb252ZXJzYXRpb25zEiEuY2hhdC52Mi5MaXN0Q29udmVyc2F0aW9uc1JlcXVlc3QaIi5jaGF0LnYyLkxpc3RDb252ZXJzYXRpb25zUmVzcG9uc2UiJ4LT5JMCIRIfL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucxKPAQoPR2V0Q29udmVyc2F0aW9uEh8uY2hhdC52Mi5HZXRDb252ZXJzYXRpb25SZXF1ZXN0GiAuY2hhdC52Mi5HZXRDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzEjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EsIBCh9DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtEi8uY2hhdC52Mi5DcmVhdGVDb252ZXJzYXRpb25NZXNzYWdlU3RyZWFtUmVxdWVzdBowLmNoYXQudjIuQ3JlYXRlQ29udmVyc2F0aW9uTWVzc2FnZVN0cmVhbVJlc3BvbnNlIjqC0+STAjQ6ASoiLy9fcGQvYXBpL3YyL2NoYXRzL2NvbnZlcnNhdGlvbnMvbWVzc2FnZXMvc3RyZWFtMAESmwEKElVwZGF0ZUNvbnZlcnNhdGlvbhIiLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVxdWVzdBojLmNoYXQudjIuVXBkYXRlQ29udmVyc2F0aW9uUmVzcG9uc2UiPILT5JMCNjoBKjIxL19wZC9hcGkvdjIvY2hhdHMvY29udmVyc2F0aW9ucy97Y29udmVyc2F0aW9uX2lkfRKYAQoSRGVsZXRlQ29udmVyc2F0aW9uEiIuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXF1ZXN0GiMuY2hhdC52Mi5EZWxldGVDb252ZXJzYXRpb25SZXNwb25zZSI5gtPkkwIzKjEvX3BkL2FwaS92Mi9jaGF0cy9jb252ZXJzYXRpb25zL3tjb252ZXJzYXRpb25faWR9EoIBChNMaXN0U3VwcG9ydGVkTW9kZWxzEiMuY2hhdC52Mi5MaXN0U3VwcG9ydGVkTW9kZWxzUmVxdWVzdBokLmNoYXQudjIuTGlzdFN1cHBvcnRlZE1vZGVsc1Jlc3BvbnNlIiCC0+STAhoSGC9fcGQvYXBpL3YyL2NoYXRzL21vZGVsc0J/Cgtjb20uY2hhdC52MkIJQ2hhdFByb3RvUAFaKHBhcGVyZGVidWdnZXIvcGtnL2dlbi9hcGkvY2hhdC92MjtjaGF0djKiAgNDWFiqAgdDaGF0LlYyygIHQ2hhdFxWMuICE0NoYXRcVjJcR1BCTWV0YWRhdGHqAghDaGF0OjpWMmIGcHJvdG8z", [file_google_api_annotations]);
+
+/**
+ * @generated from message chat.v2.MessageTypeToolCall
+ */
+export type MessageTypeToolCall = Message$1<"chat.v2.MessageTypeToolCall"> & {
+ /**
+ * @generated from field: string name = 1;
+ */
+ name: string;
+
+ /**
+ * Json string
+ *
+ * @generated from field: string args = 2;
+ */
+ args: string;
+
+ /**
+ * Json string
+ *
+ * @generated from field: string result = 3;
+ */
+ result: string;
+
+ /**
+ * Json string
+ *
+ * @generated from field: string error = 4;
+ */
+ error: string;
+};
+
+/**
+ * Describes the message chat.v2.MessageTypeToolCall.
+ * Use `create(MessageTypeToolCallSchema)` to create a new message.
+ */
+export const MessageTypeToolCallSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 0);
+
+/**
+ * @generated from message chat.v2.MessageTypeToolCallPrepareArguments
+ */
+export type MessageTypeToolCallPrepareArguments = Message$1<"chat.v2.MessageTypeToolCallPrepareArguments"> & {
+ /**
+ * @generated from field: string name = 1;
+ */
+ name: string;
+
+ /**
+ * Json string
+ *
+ * @generated from field: string args = 2;
+ */
+ args: string;
+};
+
+/**
+ * Describes the message chat.v2.MessageTypeToolCallPrepareArguments.
+ * Use `create(MessageTypeToolCallPrepareArgumentsSchema)` to create a new message.
+ */
+export const MessageTypeToolCallPrepareArgumentsSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 1);
+
+/**
+ * @generated from message chat.v2.MessageTypeSystem
+ */
+export type MessageTypeSystem = Message$1<"chat.v2.MessageTypeSystem"> & {
+ /**
+ * @generated from field: string content = 1;
+ */
+ content: string;
+};
+
+/**
+ * Describes the message chat.v2.MessageTypeSystem.
+ * Use `create(MessageTypeSystemSchema)` to create a new message.
+ */
+export const MessageTypeSystemSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 2);
+
+/**
+ * @generated from message chat.v2.MessageTypeAssistant
+ */
+export type MessageTypeAssistant = Message$1<"chat.v2.MessageTypeAssistant"> & {
+ /**
+ * @generated from field: string content = 1;
+ */
+ content: string;
+
+ /**
+ * @generated from field: string model_slug = 2;
+ */
+ modelSlug: string;
+};
+
+/**
+ * Describes the message chat.v2.MessageTypeAssistant.
+ * Use `create(MessageTypeAssistantSchema)` to create a new message.
+ */
+export const MessageTypeAssistantSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 3);
+
+/**
+ * @generated from message chat.v2.MessageTypeUser
+ */
+export type MessageTypeUser = Message$1<"chat.v2.MessageTypeUser"> & {
+ /**
+ * @generated from field: string content = 1;
+ */
+ content: string;
+
+ /**
+ * @generated from field: optional string selected_text = 2;
+ */
+ selectedText?: string;
+
+ /**
+ * @generated from field: optional string surrounding = 7;
+ */
+ surrounding?: string;
+};
+
+/**
+ * Describes the message chat.v2.MessageTypeUser.
+ * Use `create(MessageTypeUserSchema)` to create a new message.
+ */
+export const MessageTypeUserSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 4);
+
+/**
+ * @generated from message chat.v2.MessageTypeUnknown
+ */
+export type MessageTypeUnknown = Message$1<"chat.v2.MessageTypeUnknown"> & {
+ /**
+ * @generated from field: string description = 1;
+ */
+ description: string;
+};
+
+/**
+ * Describes the message chat.v2.MessageTypeUnknown.
+ * Use `create(MessageTypeUnknownSchema)` to create a new message.
+ */
+export const MessageTypeUnknownSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 5);
+
+/**
+ * @generated from message chat.v2.MessagePayload
+ */
+export type MessagePayload = Message$1<"chat.v2.MessagePayload"> & {
+ /**
+ * @generated from oneof chat.v2.MessagePayload.message_type
+ */
+ messageType: {
+ /**
+ * @generated from field: chat.v2.MessageTypeSystem system = 1;
+ */
+ value: MessageTypeSystem;
+ case: "system";
+ } | {
+ /**
+ * @generated from field: chat.v2.MessageTypeUser user = 2;
+ */
+ value: MessageTypeUser;
+ case: "user";
+ } | {
+ /**
+ * @generated from field: chat.v2.MessageTypeAssistant assistant = 3;
+ */
+ value: MessageTypeAssistant;
+ case: "assistant";
+ } | {
+ /**
+ * @generated from field: chat.v2.MessageTypeToolCallPrepareArguments tool_call_prepare_arguments = 4;
+ */
+ value: MessageTypeToolCallPrepareArguments;
+ case: "toolCallPrepareArguments";
+ } | {
+ /**
+ * @generated from field: chat.v2.MessageTypeToolCall tool_call = 5;
+ */
+ value: MessageTypeToolCall;
+ case: "toolCall";
+ } | {
+ /**
+ * @generated from field: chat.v2.MessageTypeUnknown unknown = 6;
+ */
+ value: MessageTypeUnknown;
+ case: "unknown";
+ } | { case: undefined; value?: undefined };
+};
+
+/**
+ * Describes the message chat.v2.MessagePayload.
+ * Use `create(MessagePayloadSchema)` to create a new message.
+ */
+export const MessagePayloadSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 6);
+
+/**
+ * @generated from message chat.v2.Message
+ */
+export type Message = Message$1<"chat.v2.Message"> & {
+ /**
+ * @generated from field: string message_id = 1;
+ */
+ messageId: string;
+
+ /**
+ * @generated from field: chat.v2.MessagePayload payload = 2;
+ */
+ payload?: MessagePayload;
+
+ /**
+ * @generated from field: int64 timestamp = 3;
+ */
+ timestamp: bigint;
+};
+
+/**
+ * Describes the message chat.v2.Message.
+ * Use `create(MessageSchema)` to create a new message.
+ */
+export const MessageSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 7);
+
+/**
+ * @generated from message chat.v2.Conversation
+ */
+export type Conversation = Message$1<"chat.v2.Conversation"> & {
+ /**
+ * @generated from field: string id = 1;
+ */
+ id: string;
+
+ /**
+ * @generated from field: string title = 2;
+ */
+ title: string;
+
+ /**
+ * @generated from field: string model_slug = 3;
+ */
+ modelSlug: string;
+
+ /**
+ * If list conversations, then messages length is 0.
+ *
+ * @generated from field: repeated chat.v2.Message messages = 4;
+ */
+ messages: Message[];
+};
+
+/**
+ * Describes the message chat.v2.Conversation.
+ * Use `create(ConversationSchema)` to create a new message.
+ */
+export const ConversationSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 8);
+
+/**
+ * @generated from message chat.v2.ListConversationsRequest
+ */
+export type ListConversationsRequest = Message$1<"chat.v2.ListConversationsRequest"> & {
+ /**
+ * @generated from field: optional string project_id = 1;
+ */
+ projectId?: string;
+};
+
+/**
+ * Describes the message chat.v2.ListConversationsRequest.
+ * Use `create(ListConversationsRequestSchema)` to create a new message.
+ */
+export const ListConversationsRequestSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 9);
+
+/**
+ * @generated from message chat.v2.ListConversationsResponse
+ */
+export type ListConversationsResponse = Message$1<"chat.v2.ListConversationsResponse"> & {
+ /**
+ * In this response, the length of conversations[i].messages should be 0.
+ *
+ * @generated from field: repeated chat.v2.Conversation conversations = 1;
+ */
+ conversations: Conversation[];
+};
+
+/**
+ * Describes the message chat.v2.ListConversationsResponse.
+ * Use `create(ListConversationsResponseSchema)` to create a new message.
+ */
+export const ListConversationsResponseSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 10);
+
+/**
+ * @generated from message chat.v2.GetConversationRequest
+ */
+export type GetConversationRequest = Message$1<"chat.v2.GetConversationRequest"> & {
+ /**
+ * @generated from field: string conversation_id = 1;
+ */
+ conversationId: string;
+};
+
+/**
+ * Describes the message chat.v2.GetConversationRequest.
+ * Use `create(GetConversationRequestSchema)` to create a new message.
+ */
+export const GetConversationRequestSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 11);
+
+/**
+ * @generated from message chat.v2.GetConversationResponse
+ */
+export type GetConversationResponse = Message$1<"chat.v2.GetConversationResponse"> & {
+ /**
+ * @generated from field: chat.v2.Conversation conversation = 1;
+ */
+ conversation?: Conversation;
+};
+
+/**
+ * Describes the message chat.v2.GetConversationResponse.
+ * Use `create(GetConversationResponseSchema)` to create a new message.
+ */
+export const GetConversationResponseSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 12);
+
+/**
+ * @generated from message chat.v2.UpdateConversationRequest
+ */
+export type UpdateConversationRequest = Message$1<"chat.v2.UpdateConversationRequest"> & {
+ /**
+ * @generated from field: string conversation_id = 1;
+ */
+ conversationId: string;
+
+ /**
+ * @generated from field: string title = 2;
+ */
+ title: string;
+};
+
+/**
+ * Describes the message chat.v2.UpdateConversationRequest.
+ * Use `create(UpdateConversationRequestSchema)` to create a new message.
+ */
+export const UpdateConversationRequestSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 13);
+
+/**
+ * @generated from message chat.v2.UpdateConversationResponse
+ */
+export type UpdateConversationResponse = Message$1<"chat.v2.UpdateConversationResponse"> & {
+ /**
+ * @generated from field: chat.v2.Conversation conversation = 1;
+ */
+ conversation?: Conversation;
+};
+
+/**
+ * Describes the message chat.v2.UpdateConversationResponse.
+ * Use `create(UpdateConversationResponseSchema)` to create a new message.
+ */
+export const UpdateConversationResponseSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 14);
+
+/**
+ * @generated from message chat.v2.DeleteConversationRequest
+ */
+export type DeleteConversationRequest = Message$1<"chat.v2.DeleteConversationRequest"> & {
+ /**
+ * @generated from field: string conversation_id = 1;
+ */
+ conversationId: string;
+};
+
+/**
+ * Describes the message chat.v2.DeleteConversationRequest.
+ * Use `create(DeleteConversationRequestSchema)` to create a new message.
+ */
+export const DeleteConversationRequestSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 15);
+
+/**
+ * explicitly empty
+ *
+ * @generated from message chat.v2.DeleteConversationResponse
+ */
+export type DeleteConversationResponse = Message$1<"chat.v2.DeleteConversationResponse"> & {
+};
+
+/**
+ * Describes the message chat.v2.DeleteConversationResponse.
+ * Use `create(DeleteConversationResponseSchema)` to create a new message.
+ */
+export const DeleteConversationResponseSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 16);
+
+/**
+ * @generated from message chat.v2.SupportedModel
+ */
+export type SupportedModel = Message$1<"chat.v2.SupportedModel"> & {
+ /**
+ * @generated from field: string name = 1;
+ */
+ name: string;
+
+ /**
+ * @generated from field: string slug = 2;
+ */
+ slug: string;
+
+ /**
+ * @generated from field: int64 total_context = 3;
+ */
+ totalContext: bigint;
+
+ /**
+ * @generated from field: int64 max_output = 4;
+ */
+ maxOutput: bigint;
+
+ /**
+ * in cents per 1M tokens
+ *
+ * @generated from field: int64 input_price = 5;
+ */
+ inputPrice: bigint;
+
+ /**
+ * in cents per 1M tokens
+ *
+ * @generated from field: int64 output_price = 6;
+ */
+ outputPrice: bigint;
+};
+
+/**
+ * Describes the message chat.v2.SupportedModel.
+ * Use `create(SupportedModelSchema)` to create a new message.
+ */
+export const SupportedModelSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 17);
+
+/**
+ * explicitly empty
+ *
+ * @generated from message chat.v2.ListSupportedModelsRequest
+ */
+export type ListSupportedModelsRequest = Message$1<"chat.v2.ListSupportedModelsRequest"> & {
+};
+
+/**
+ * Describes the message chat.v2.ListSupportedModelsRequest.
+ * Use `create(ListSupportedModelsRequestSchema)` to create a new message.
+ */
+export const ListSupportedModelsRequestSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 18);
+
+/**
+ * @generated from message chat.v2.ListSupportedModelsResponse
+ */
+export type ListSupportedModelsResponse = Message$1<"chat.v2.ListSupportedModelsResponse"> & {
+ /**
+ * @generated from field: repeated chat.v2.SupportedModel models = 1;
+ */
+ models: SupportedModel[];
+};
+
+/**
+ * Describes the message chat.v2.ListSupportedModelsResponse.
+ * Use `create(ListSupportedModelsResponseSchema)` to create a new message.
+ */
+export const ListSupportedModelsResponseSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 19);
+
+/**
+ * Information sent once at the beginning of a new conversation stream
+ *
+ * @generated from message chat.v2.StreamInitialization
+ */
+export type StreamInitialization = Message$1<"chat.v2.StreamInitialization"> & {
+ /**
+ * @generated from field: string conversation_id = 1;
+ */
+ conversationId: string;
+
+ /**
+ * @generated from field: string model_slug = 2;
+ */
+ modelSlug: string;
+};
+
+/**
+ * Describes the message chat.v2.StreamInitialization.
+ * Use `create(StreamInitializationSchema)` to create a new message.
+ */
+export const StreamInitializationSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 20);
+
+/**
+ * Designed as StreamPartBegin and StreamPartEnd to
+ * handle the case where assistant and tool are called at the same time.
+ *
+ * User: Please answer me "Ok I will do that", then call "get_weather"
+ * Assistant: Ok I will do that + Tool: get_weather
+ *
+ * @generated from message chat.v2.StreamPartBegin
+ */
+export type StreamPartBegin = Message$1<"chat.v2.StreamPartBegin"> & {
+ /**
+ * @generated from field: string message_id = 1;
+ */
+ messageId: string;
+
+ /**
+ * @generated from field: chat.v2.MessagePayload payload = 3;
+ */
+ payload?: MessagePayload;
+};
+
+/**
+ * Describes the message chat.v2.StreamPartBegin.
+ * Use `create(StreamPartBeginSchema)` to create a new message.
+ */
+export const StreamPartBeginSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 21);
+
+/**
+ * Note: After the StreamPartBegin of tool_call, there can be no MessageChunk,
+ * and the StreamPartEnd can be directly called when the result is ready.
+ *
+ * @generated from message chat.v2.MessageChunk
+ */
+export type MessageChunk = Message$1<"chat.v2.MessageChunk"> & {
+ /**
+ * The id of the message that this chunk belongs to
+ *
+ * @generated from field: string message_id = 1;
+ */
+ messageId: string;
+
+ /**
+ * The small piece of text
+ *
+ * @generated from field: string delta = 2;
+ */
+ delta: string;
+};
+
+/**
+ * Describes the message chat.v2.MessageChunk.
+ * Use `create(MessageChunkSchema)` to create a new message.
+ */
+export const MessageChunkSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 22);
+
+/**
+ * @generated from message chat.v2.IncompleteIndicator
+ */
+export type IncompleteIndicator = Message$1<"chat.v2.IncompleteIndicator"> & {
+ /**
+ * @generated from field: string reason = 1;
+ */
+ reason: string;
+
+ /**
+ * @generated from field: string response_id = 2;
+ */
+ responseId: string;
+};
+
+/**
+ * Describes the message chat.v2.IncompleteIndicator.
+ * Use `create(IncompleteIndicatorSchema)` to create a new message.
+ */
+export const IncompleteIndicatorSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 23);
+
+/**
+ * @generated from message chat.v2.StreamPartEnd
+ */
+export type StreamPartEnd = Message$1<"chat.v2.StreamPartEnd"> & {
+ /**
+ * @generated from field: string message_id = 1;
+ */
+ messageId: string;
+
+ /**
+ * @generated from field: chat.v2.MessagePayload payload = 3;
+ */
+ payload?: MessagePayload;
+};
+
+/**
+ * Describes the message chat.v2.StreamPartEnd.
+ * Use `create(StreamPartEndSchema)` to create a new message.
+ */
+export const StreamPartEndSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 24);
+
+/**
+ * Sent when the current AI response is fully streamed
+ *
+ * @generated from message chat.v2.StreamFinalization
+ */
+export type StreamFinalization = Message$1<"chat.v2.StreamFinalization"> & {
+ /**
+ * Do not return the full Conversation here.
+ * If the user wants, they can call the GetConversation API themselves.
+ * Note: Do not call GetConversation when receiving streamFinalization,
+ * it should be called after the entire API call is finished.
+ *
+ * @generated from field: string conversation_id = 1;
+ */
+ conversationId: string;
+};
+
+/**
+ * Describes the message chat.v2.StreamFinalization.
+ * Use `create(StreamFinalizationSchema)` to create a new message.
+ */
+export const StreamFinalizationSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 25);
+
+/**
+ * @generated from message chat.v2.StreamError
+ */
+export type StreamError = Message$1<"chat.v2.StreamError"> & {
+ /**
+ * @generated from field: string error_message = 1;
+ */
+ errorMessage: string;
+};
+
+/**
+ * Describes the message chat.v2.StreamError.
+ * Use `create(StreamErrorSchema)` to create a new message.
+ */
+export const StreamErrorSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 26);
+
+/**
+ * This message should be the same as CreateConversationMessageRequest
+ * Note: If conversation_id is provided,
+ * the conversation will be created and returned.
+ *
+ * @generated from message chat.v2.CreateConversationMessageStreamRequest
+ */
+export type CreateConversationMessageStreamRequest = Message$1<"chat.v2.CreateConversationMessageStreamRequest"> & {
+ /**
+ * @generated from field: string project_id = 1;
+ */
+ projectId: string;
+
+ /**
+ * @generated from field: optional string conversation_id = 2;
+ */
+ conversationId?: string;
+
+ /**
+ * @generated from field: string model_slug = 3;
+ */
+ modelSlug: string;
+
+ /**
+ * @generated from field: string user_message = 4;
+ */
+ userMessage: string;
+
+ /**
+ * @generated from field: optional string user_selected_text = 5;
+ */
+ userSelectedText?: string;
+
+ /**
+ * @generated from field: optional chat.v2.ConversationType conversation_type = 6;
+ */
+ conversationType?: ConversationType;
+
+ /**
+ * @generated from field: optional string surrounding = 8;
+ */
+ surrounding?: string;
+};
+
+/**
+ * Describes the message chat.v2.CreateConversationMessageStreamRequest.
+ * Use `create(CreateConversationMessageStreamRequestSchema)` to create a new message.
+ */
+export const CreateConversationMessageStreamRequestSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 27);
+
+/**
+ * Response for streaming a message within an existing conversation
+ *
+ * @generated from message chat.v2.CreateConversationMessageStreamResponse
+ */
+export type CreateConversationMessageStreamResponse = Message$1<"chat.v2.CreateConversationMessageStreamResponse"> & {
+ /**
+ * @generated from oneof chat.v2.CreateConversationMessageStreamResponse.response_payload
+ */
+ responsePayload: {
+ /**
+ * @generated from field: chat.v2.StreamInitialization stream_initialization = 1;
+ */
+ value: StreamInitialization;
+ case: "streamInitialization";
+ } | {
+ /**
+ * @generated from field: chat.v2.StreamPartBegin stream_part_begin = 2;
+ */
+ value: StreamPartBegin;
+ case: "streamPartBegin";
+ } | {
+ /**
+ * @generated from field: chat.v2.MessageChunk message_chunk = 3;
+ */
+ value: MessageChunk;
+ case: "messageChunk";
+ } | {
+ /**
+ * @generated from field: chat.v2.IncompleteIndicator incomplete_indicator = 4;
+ */
+ value: IncompleteIndicator;
+ case: "incompleteIndicator";
+ } | {
+ /**
+ * @generated from field: chat.v2.StreamPartEnd stream_part_end = 5;
+ */
+ value: StreamPartEnd;
+ case: "streamPartEnd";
+ } | {
+ /**
+ * @generated from field: chat.v2.StreamFinalization stream_finalization = 6;
+ */
+ value: StreamFinalization;
+ case: "streamFinalization";
+ } | {
+ /**
+ * @generated from field: chat.v2.StreamError stream_error = 7;
+ */
+ value: StreamError;
+ case: "streamError";
+ } | { case: undefined; value?: undefined };
+};
+
+/**
+ * Describes the message chat.v2.CreateConversationMessageStreamResponse.
+ * Use `create(CreateConversationMessageStreamResponseSchema)` to create a new message.
+ */
+export const CreateConversationMessageStreamResponseSchema: GenMessage = /*@__PURE__*/
+ messageDesc(file_chat_v2_chat, 28);
+
+/**
+ * @generated from enum chat.v2.ConversationType
+ */
+export enum ConversationType {
+ /**
+ * @generated from enum value: CONVERSATION_TYPE_UNSPECIFIED = 0;
+ */
+ UNSPECIFIED = 0,
+
+ /**
+ * does not contain any customized messages, the
+ *
+ * @generated from enum value: CONVERSATION_TYPE_DEBUG = 1;
+ */
+ DEBUG = 1,
+}
+
+/**
+ * Describes the enum chat.v2.ConversationType.
+ */
+export const ConversationTypeSchema: GenEnum = /*@__PURE__*/
+ enumDesc(file_chat_v2_chat, 0);
+
+/**
+ * @generated from service chat.v2.ChatService
+ */
+export const ChatService: GenService<{
+ /**
+ * @generated from rpc chat.v2.ChatService.ListConversations
+ */
+ listConversations: {
+ methodKind: "unary";
+ input: typeof ListConversationsRequestSchema;
+ output: typeof ListConversationsResponseSchema;
+ },
+ /**
+ * @generated from rpc chat.v2.ChatService.GetConversation
+ */
+ getConversation: {
+ methodKind: "unary";
+ input: typeof GetConversationRequestSchema;
+ output: typeof GetConversationResponseSchema;
+ },
+ /**
+ * @generated from rpc chat.v2.ChatService.CreateConversationMessageStream
+ */
+ createConversationMessageStream: {
+ methodKind: "server_streaming";
+ input: typeof CreateConversationMessageStreamRequestSchema;
+ output: typeof CreateConversationMessageStreamResponseSchema;
+ },
+ /**
+ * @generated from rpc chat.v2.ChatService.UpdateConversation
+ */
+ updateConversation: {
+ methodKind: "unary";
+ input: typeof UpdateConversationRequestSchema;
+ output: typeof UpdateConversationResponseSchema;
+ },
+ /**
+ * @generated from rpc chat.v2.ChatService.DeleteConversation
+ */
+ deleteConversation: {
+ methodKind: "unary";
+ input: typeof DeleteConversationRequestSchema;
+ output: typeof DeleteConversationResponseSchema;
+ },
+ /**
+ * @generated from rpc chat.v2.ChatService.ListSupportedModels
+ */
+ listSupportedModels: {
+ methodKind: "unary";
+ input: typeof ListSupportedModelsRequestSchema;
+ output: typeof ListSupportedModelsResponseSchema;
+ },
+}> = /*@__PURE__*/
+ serviceDesc(file_chat_v2_chat, 0);
+
diff --git a/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts b/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts
index ddc57ed1..199da2da 100644
--- a/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts
+++ b/webapp/_webapp/src/pkg/gen/apiclient/comment/v1/comment_pb.ts
@@ -33,7 +33,7 @@ export type CommentsAcceptedRequest = Message<"comment.v1.CommentsAcceptedReques
messageId: string;
/**
- * 被 accept 的 comment id 列表
+ * List of accepted comment IDs
*
* @generated from field: repeated string comment_ids = 4;
*/
diff --git a/webapp/_webapp/src/query/api.ts b/webapp/_webapp/src/query/api.ts
index 55ed5aa2..82760a65 100644
--- a/webapp/_webapp/src/query/api.ts
+++ b/webapp/_webapp/src/query/api.ts
@@ -1,4 +1,4 @@
-import apiclient, { RequestOptions } from "../libs/apiclient";
+import apiclient, { apiclientV2 } from "../libs/apiclient";
import {
LoginByGoogleRequest,
LoginByGoogleResponseSchema,
@@ -10,8 +10,7 @@ import {
RefreshTokenResponseSchema,
} from "../pkg/gen/apiclient/auth/v1/auth_pb";
import {
- CreateConversationMessageRequest,
- CreateConversationMessageResponseSchema,
+ CreateConversationMessageStreamRequest,
CreateConversationMessageStreamResponse,
CreateConversationMessageStreamResponseSchema,
DeleteConversationRequest,
@@ -24,7 +23,7 @@ import {
ListSupportedModelsResponseSchema,
UpdateConversationRequest,
UpdateConversationResponseSchema,
-} from "../pkg/gen/apiclient/chat/v1/chat_pb";
+} from "../pkg/gen/apiclient/chat/v2/chat_pb";
import {
GetProjectRequest,
GetProjectResponseSchema,
@@ -116,43 +115,35 @@ export const resetSettings = async () => {
};
export const listConversations = async (data: PlainMessage) => {
- const response = await apiclient.get("/chats/conversations", data);
+ const response = await apiclientV2.get("/chats/conversations", data);
return fromJson(ListConversationsResponseSchema, response);
};
export const listSupportedModels = async (data: PlainMessage) => {
- const response = await apiclient.get("/chats/models", data);
+ const response = await apiclientV2.get("/chats/models", data);
return fromJson(ListSupportedModelsResponseSchema, response);
};
export const getConversation = async (data: PlainMessage) => {
- const response = await apiclient.get(`/chats/conversations/${data.conversationId}`);
+ const response = await apiclientV2.get(`/chats/conversations/${data.conversationId}`);
return fromJson(GetConversationResponseSchema, response);
};
-export const createConversationMessage = async (
- data: PlainMessage,
- options?: RequestOptions,
-) => {
- const response = await apiclient.post(`/chats/conversations/messages`, data, options);
- return fromJson(CreateConversationMessageResponseSchema, response);
-};
-
export const createConversationMessageStream = async (
- data: PlainMessage,
+ data: PlainMessage,
onMessage: (chunk: CreateConversationMessageStreamResponse) => void,
) => {
- const stream = await apiclient.postStream(`/chats/conversations/messages/stream`, data);
+ const stream = await apiclientV2.postStream(`/chats/conversations/messages/stream`, data);
await processStream(stream, CreateConversationMessageStreamResponseSchema, onMessage);
};
export const deleteConversation = async (data: PlainMessage) => {
- const response = await apiclient.delete(`/chats/conversations/${data.conversationId}`);
+ const response = await apiclientV2.delete(`/chats/conversations/${data.conversationId}`);
return fromJson(DeleteConversationResponseSchema, response);
};
export const updateConversation = async (data: PlainMessage) => {
- const response = await apiclient.patch(`/chats/conversations/${data.conversationId}`, data);
+ const response = await apiclientV2.patch(`/chats/conversations/${data.conversationId}`, data);
return fromJson(UpdateConversationResponseSchema, response);
};
diff --git a/webapp/_webapp/src/query/index.ts b/webapp/_webapp/src/query/index.ts
index ca60f155..7a78ccae 100644
--- a/webapp/_webapp/src/query/index.ts
+++ b/webapp/_webapp/src/query/index.ts
@@ -1,15 +1,13 @@
import { useMutation, useQuery } from "@tanstack/react-query";
import {
- CreateConversationMessageResponse,
DeleteConversationResponse,
GetConversationResponse,
ListConversationsResponse,
ListSupportedModelsResponse,
UpdateConversationResponse,
-} from "../pkg/gen/apiclient/chat/v1/chat_pb";
+} from "../pkg/gen/apiclient/chat/v2/chat_pb";
import { UseMutationOptionsOverride, UseQueryOptionsOverride } from "./types";
import {
- createConversationMessage,
createPrompt,
deleteConversation,
deletePrompt,
@@ -103,7 +101,7 @@ export const useListConversationsQuery = (
projectId: string,
opts?: UseQueryOptionsOverride,
) => {
- // 如果登录,才获取
+ // Only fetch if logged in
const { user } = useAuthStore();
return useQuery({
queryKey: queryKeys.conversations.listConversations(projectId).queryKey,
@@ -138,14 +136,7 @@ export const useGetConversationQuery = (
});
};
-export const useCreateConversationMessageMutation = (
- opts?: UseMutationOptionsOverride,
-) => {
- return useMutation({
- mutationFn: createConversationMessage,
- ...opts,
- });
-};
+// Removed: useCreateConversationMessageMutation - use streaming API instead
export const useUpdateConversationMutation = (opts?: UseMutationOptionsOverride) => {
return useMutation({
diff --git a/webapp/_webapp/src/stores/auth-store.ts b/webapp/_webapp/src/stores/auth-store.ts
index c13d4b14..8bd82949 100644
--- a/webapp/_webapp/src/stores/auth-store.ts
+++ b/webapp/_webapp/src/stores/auth-store.ts
@@ -1,7 +1,7 @@
import { create } from "zustand";
import { PlainMessage } from "../query/types";
import { User } from "../pkg/gen/apiclient/user/v1/user_pb";
-import apiclient from "../libs/apiclient";
+import apiclient, { apiclientV2 } from "../libs/apiclient";
import { logout as apiLogout, getUser } from "../query/api";
import { logInfo } from "../libs/logger";
@@ -37,6 +37,7 @@ export const useAuthStore = create((set, get) => ({
login: async () => {
const { token, refreshToken } = get();
apiclient.setTokens(token, refreshToken);
+ apiclientV2.setTokens(token, refreshToken);
getUser()
.then((resp) => {
@@ -59,6 +60,7 @@ export const useAuthStore = create((set, get) => ({
// ignored
}
apiclient.clearTokens();
+ apiclientV2.clearTokens();
set({ user: null, token: "", refreshToken: "" });
},
diff --git a/webapp/_webapp/src/stores/conversation/conversation-store.ts b/webapp/_webapp/src/stores/conversation/conversation-store.ts
index 5e213448..99d6df79 100644
--- a/webapp/_webapp/src/stores/conversation/conversation-store.ts
+++ b/webapp/_webapp/src/stores/conversation/conversation-store.ts
@@ -1,6 +1,8 @@
import { create } from "zustand";
-import { Conversation, ConversationSchema } from "../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { persist, createJSONStorage } from "zustand/middleware";
+import { Conversation, ConversationSchema } from "../../pkg/gen/apiclient/chat/v2/chat_pb";
import { fromJson } from "@bufbuild/protobuf";
+import { useConversationUiStore } from "./conversation-ui-store";
interface ConversationStore {
isStreaming: boolean;
@@ -11,20 +13,42 @@ interface ConversationStore {
setIsStreaming: (isStreaming: boolean) => void;
}
-export const useConversationStore = create((set, get) => ({
- currentConversation: newConversation(),
- setCurrentConversation: (conversation: Conversation) => set({ currentConversation: conversation }),
- updateCurrentConversation: (updater: (conversation: Conversation) => Conversation) =>
- set({ currentConversation: updater(get().currentConversation) }),
- startFromScratch: () => set({ currentConversation: newConversation() }),
- isStreaming: false,
- setIsStreaming: (isStreaming: boolean) => set({ isStreaming }),
-}));
+export const useConversationStore = create()(
+ persist(
+ (set, get) => ({
+ currentConversation: newConversation(),
+ setCurrentConversation: (conversation: Conversation) => set({ currentConversation: conversation }),
+ updateCurrentConversation: (updater: (conversation: Conversation) => Conversation) =>
+ set({ currentConversation: updater(get().currentConversation) }),
+ startFromScratch: () => set({ currentConversation: newConversation() }),
+ isStreaming: false,
+ setIsStreaming: (isStreaming: boolean) => set({ isStreaming }),
+ }),
+ {
+ name: "pd.conversation-storage",
+ storage: createJSONStorage(() => localStorage, {
+ replacer: (_key, value) => {
+ if (typeof value === "bigint") {
+ return value.toString() + "n";
+ }
+ return value;
+ },
+ reviver: (_key, value) => {
+ if (typeof value === "string" && /^-?\d+n$/.test(value)) {
+ return BigInt(value.slice(0, -1));
+ }
+ return value;
+ },
+ }),
+ },
+ ),
+);
export function newConversation(): Conversation {
+ const modelSlug = useConversationUiStore.getState().lastUsedModelSlug;
return fromJson(ConversationSchema, {
id: "",
- modelSlug: "gpt-4.1",
+ modelSlug: modelSlug,
title: "New Conversation",
messages: [],
});
diff --git a/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts b/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts
index 87cb5fa5..8cabdd03 100644
--- a/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts
+++ b/webapp/_webapp/src/stores/conversation/conversation-ui-store.ts
@@ -1,34 +1,16 @@
import { create } from "zustand";
+import { persist, createJSONStorage } from "zustand/middleware";
import { createRef } from "react";
export const COLLAPSED_HEIGHT = 460;
export const DISPLAY_MODES = [
- { key: "floating", label: "窗口化" },
- { key: "right-fixed", label: "右侧固定" },
- { key: "bottom-fixed", label: "底部固定" },
+ { key: "floating", label: "Floating" },
+ { key: "right-fixed", label: "Right Fixed" },
+ { key: "bottom-fixed", label: "Bottom Fixed" },
] as const;
export type DisplayMode = (typeof DISPLAY_MODES)[number]["key"];
-const localStorageKey = {
- displayMode: "pd.layout.displayMode",
- floatingX: "pd.layout.floating.X",
- floatingY: "pd.layout.floating.Y",
- floatingWidth: "pd.layout.floating.W",
- floatingHeight: "pd.layout.floating.H",
- rightFixedWidth: "pd.layout.rightFixed.W",
- bottomFixedHeight: "pd.layout.bottomFixed.H",
- isOpen: "pd.layout.isOpen",
- activeTab: "pd.layout.activeTab",
- sidebarCollapsed: "pd.layout.sidebar.collapsed",
- heightCollapseRequired: "pd.layout.heightCollapseRequired",
-} as const;
-
-export const getLocalStorage = (key: keyof typeof localStorageKey): T | undefined => {
- const value = localStorage.getItem(localStorageKey[key]);
- return value ? (JSON.parse(value) as T) : undefined;
-};
-
interface ConversationUiStore {
inputRef: React.RefObject;
@@ -71,96 +53,81 @@ interface ConversationUiStore {
heightCollapseRequired: boolean;
setHeightCollapseRequired: (heightCollapseRequired: boolean) => void;
+ lastUsedModelSlug: string;
+ setLastUsedModelSlug: (lastUsedModelSlug: string) => void;
+
resetPosition: () => void;
}
-// TODO: track the behavior
-export const useConversationUiStore = create((set) => ({
- inputRef: createRef(),
-
- prompt: "",
- setPrompt: (prompt: string) => set({ prompt }),
-
- showChatHistory: false,
- setShowChatHistory: (showChatHistory: boolean) => set({ showChatHistory: showChatHistory }),
-
- displayMode: getLocalStorage("displayMode") || "right-fixed",
- setDisplayMode: (displayMode: DisplayMode) => {
- localStorage.setItem(localStorageKey.displayMode, JSON.stringify(displayMode));
- set({ displayMode });
- },
-
- floatingX: getLocalStorage("floatingX") || 100,
- setFloatingX: (floatingX: number) => {
- localStorage.setItem(localStorageKey.floatingX, JSON.stringify(floatingX));
- set({ floatingX });
- },
-
- floatingY: getLocalStorage("floatingY") || 100,
- setFloatingY: (floatingY: number) => {
- localStorage.setItem(localStorageKey.floatingY, JSON.stringify(floatingY));
- set({ floatingY });
- },
-
- floatingWidth: getLocalStorage("floatingWidth") || 660,
- setFloatingWidth: (floatingWidth: number) => {
- localStorage.setItem(localStorageKey.floatingWidth, JSON.stringify(floatingWidth));
- set({ floatingWidth });
- },
-
- floatingHeight: getLocalStorage("floatingHeight") || 500,
- setFloatingHeight: (floatingHeight: number) => {
- localStorage.setItem(localStorageKey.floatingHeight, JSON.stringify(floatingHeight));
- set({ floatingHeight });
- },
-
- bottomFixedHeight: getLocalStorage("bottomFixedHeight") || 470,
- setBottomFixedHeight: (bottomFixedHeight: number) => {
- localStorage.setItem(localStorageKey.bottomFixedHeight, JSON.stringify(bottomFixedHeight));
- set({ bottomFixedHeight });
- },
-
- rightFixedWidth: getLocalStorage("rightFixedWidth") || 580,
- setRightFixedWidth: (rightFixedWidth: number) => {
- localStorage.setItem(localStorageKey.rightFixedWidth, JSON.stringify(rightFixedWidth));
- set({ rightFixedWidth });
- },
-
- isOpen: getLocalStorage("isOpen") || false,
- setIsOpen: (isOpen: boolean) => {
- localStorage.setItem(localStorageKey.isOpen, JSON.stringify(isOpen));
- set({ isOpen });
- },
-
- activeTab: getLocalStorage("activeTab") || "chat",
- setActiveTab: (activeTab: string) => {
- localStorage.setItem(localStorageKey.activeTab, JSON.stringify(activeTab));
- set({ activeTab });
- },
-
- sidebarCollapsed: getLocalStorage("sidebarCollapsed") || false,
- setSidebarCollapsed: (sidebarCollapsed: boolean) => {
- localStorage.setItem(localStorageKey.sidebarCollapsed, JSON.stringify(sidebarCollapsed));
- set({ sidebarCollapsed });
- },
-
- heightCollapseRequired: getLocalStorage("heightCollapseRequired") || false,
- setHeightCollapseRequired: (heightCollapseRequired: boolean) => {
- localStorage.setItem(localStorageKey.heightCollapseRequired, JSON.stringify(heightCollapseRequired));
- set({ heightCollapseRequired });
- },
-
- resetPosition: () => {
- set((state) => {
- state.setFloatingX(100);
- state.setFloatingY(100);
- state.setFloatingWidth(620);
- state.setFloatingHeight(200);
- state.setDisplayMode("floating");
-
- return {};
- });
- },
-}));
+export const useConversationUiStore = create()(
+ persist(
+ (set) => ({
+ inputRef: createRef(),
+
+ prompt: "",
+ setPrompt: (prompt: string) => set({ prompt }),
+
+ showChatHistory: false,
+ setShowChatHistory: (showChatHistory: boolean) => set({ showChatHistory }),
+
+ displayMode: "right-fixed",
+ setDisplayMode: (displayMode: DisplayMode) => set({ displayMode }),
+
+ floatingX: 100,
+ setFloatingX: (floatingX: number) => set({ floatingX }),
+
+ floatingY: 100,
+ setFloatingY: (floatingY: number) => set({ floatingY }),
+
+ floatingWidth: 660,
+ setFloatingWidth: (floatingWidth: number) => set({ floatingWidth }),
+
+ floatingHeight: 500,
+ setFloatingHeight: (floatingHeight: number) => set({ floatingHeight }),
+
+ bottomFixedHeight: 470,
+ setBottomFixedHeight: (bottomFixedHeight: number) => set({ bottomFixedHeight }),
+
+ rightFixedWidth: 580,
+ setRightFixedWidth: (rightFixedWidth: number) => set({ rightFixedWidth }),
+
+ isOpen: false,
+ setIsOpen: (isOpen: boolean) => set({ isOpen }),
+
+ activeTab: "chat",
+ setActiveTab: (activeTab: string) => set({ activeTab }),
+
+ sidebarCollapsed: false,
+ setSidebarCollapsed: (sidebarCollapsed: boolean) => set({ sidebarCollapsed }),
+
+ heightCollapseRequired: false,
+ setHeightCollapseRequired: (heightCollapseRequired: boolean) => set({ heightCollapseRequired }),
+
+ lastUsedModelSlug: "gpt-4.1",
+ setLastUsedModelSlug: (lastUsedModelSlug: string) => set({ lastUsedModelSlug }),
+
+ resetPosition: () => {
+ set({
+ floatingX: 100,
+ floatingY: 100,
+ floatingWidth: 620,
+ floatingHeight: 200,
+ displayMode: "floating",
+ });
+ },
+ }),
+ {
+ name: "pd.layout-storage",
+ storage: createJSONStorage(() => localStorage),
+ partialize: (state) => {
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
+ const { inputRef, prompt, ...rest } = state;
+ return rest;
+ },
+ // Map old keys to new storage if needed, or just migration
+ version: 1,
+ },
+ ),
+);
// selectedText is controlled by the "selection-store.ts"
diff --git a/webapp/_webapp/src/stores/conversation/handlers/converter.ts b/webapp/_webapp/src/stores/conversation/handlers/converter.ts
index 6d289445..1994c573 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/converter.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/converter.ts
@@ -1,5 +1,5 @@
import { fromJson } from "@bufbuild/protobuf";
-import { Conversation, Message, MessageSchema } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { Conversation, Message, MessageSchema } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { MessageEntry, MessageEntryStatus } from "../types";
import { useStreamingMessageStore } from "../../streaming-message-store";
import { flushSync } from "react-dom";
diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts b/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts
index 83998dde..57513d9f 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/handleIncompleteIndicator.ts
@@ -1,4 +1,4 @@
-import { IncompleteIndicator } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { IncompleteIndicator } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { useStreamingMessageStore } from "../../streaming-message-store";
export function handleIncompleteIndicator(incompleteIndicator: IncompleteIndicator) {
diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts b/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts
index 470f38ec..020cfb13 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/handleMessageChunk.ts
@@ -1,5 +1,5 @@
import { logError } from "../../../libs/logger";
-import { MessageChunk, MessageTypeAssistant } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { MessageChunk, MessageTypeAssistant } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { StreamingMessage } from "../../streaming-message-store";
import { MessageEntry, MessageEntryStatus } from "../types";
diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts
index bd02109a..6bc4bc32 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamError.ts
@@ -1,4 +1,4 @@
-import { MessageTypeAssistantSchema, StreamError } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { MessageTypeAssistantSchema, StreamError } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { errorToast } from "../../../libs/toasts";
import { OverleafAuthentication, OverleafVersionedDoc } from "../../../libs/overleaf-socket";
import { getProjectId } from "../../../libs/helpers";
diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts
index 630ab4ee..be08d272 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamFinalization.ts
@@ -1,4 +1,4 @@
-import { StreamFinalization } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { StreamFinalization } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { flushStreamingMessageToConversation } from "./converter";
export function handleStreamFinalization(_finalization: StreamFinalization) {
diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts
index 1970eac7..c6b84eff 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamInitialization.ts
@@ -1,4 +1,4 @@
-import { StreamInitialization } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { StreamInitialization } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { useStreamingMessageStore } from "../../streaming-message-store";
import { MessageEntryStatus } from "../types";
import { logWarn } from "../../../libs/logger";
diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts
index e7d457a9..caa65b19 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartBegin.ts
@@ -1,4 +1,4 @@
-import { StreamPartBegin } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { StreamPartBegin } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { StreamingMessage } from "../../streaming-message-store";
import { MessageEntry, MessageEntryStatus } from "../types";
import { logError } from "../../../libs/logger";
@@ -14,30 +14,48 @@ export function handleStreamPartBegin(
status: MessageEntryStatus.PREPARING,
assistant: partBegin.payload?.messageType.value,
};
- updateStreamingMessage((prev) => ({
- parts: [...prev.parts, newMessageEntry],
- sequence: prev.sequence + 1,
- }));
+ updateStreamingMessage((prev) => {
+ // Skip if entry with same messageId already exists (prevents duplicate keys)
+ if (prev.parts.some((p) => p.messageId === partBegin.messageId)) {
+ return prev;
+ }
+ return {
+ parts: [...prev.parts, newMessageEntry],
+ sequence: prev.sequence + 1,
+ };
+ });
} else if (role === "toolCallPrepareArguments") {
const newMessageEntry: MessageEntry = {
messageId: partBegin.messageId,
status: MessageEntryStatus.PREPARING,
toolCallPrepareArguments: partBegin.payload?.messageType.value,
};
- updateStreamingMessage((prev) => ({
- parts: [...prev.parts, newMessageEntry],
- sequence: prev.sequence + 1,
- }));
+ updateStreamingMessage((prev) => {
+ // Skip if entry with same messageId already exists (prevents duplicate keys)
+ if (prev.parts.some((p) => p.messageId === partBegin.messageId)) {
+ return prev;
+ }
+ return {
+ parts: [...prev.parts, newMessageEntry],
+ sequence: prev.sequence + 1,
+ };
+ });
} else if (role === "toolCall") {
const newMessageEntry: MessageEntry = {
messageId: partBegin.messageId,
status: MessageEntryStatus.PREPARING,
toolCall: partBegin.payload?.messageType.value,
};
- updateStreamingMessage((prev) => ({
- parts: [...prev.parts, newMessageEntry],
- sequence: prev.sequence + 1,
- }));
+ updateStreamingMessage((prev) => {
+ // Skip if entry with same messageId already exists (prevents duplicate keys)
+ if (prev.parts.some((p) => p.messageId === partBegin.messageId)) {
+ return prev;
+ }
+ return {
+ parts: [...prev.parts, newMessageEntry],
+ sequence: prev.sequence + 1,
+ };
+ });
} else if (role === "system") {
// not possible
} else if (role === "user") {
diff --git a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts
index 7e845f21..46e6bb10 100644
--- a/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts
+++ b/webapp/_webapp/src/stores/conversation/handlers/handleStreamPartEnd.ts
@@ -3,7 +3,7 @@ import {
MessageTypeToolCall,
MessageTypeToolCallPrepareArguments,
StreamPartEnd,
-} from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+} from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { StreamingMessage } from "../../streaming-message-store";
import { logError } from "../../../libs/logger";
import { MessageEntryStatus } from "../types";
diff --git a/webapp/_webapp/src/stores/conversation/types.ts b/webapp/_webapp/src/stores/conversation/types.ts
index 2a619611..273f291f 100644
--- a/webapp/_webapp/src/stores/conversation/types.ts
+++ b/webapp/_webapp/src/stores/conversation/types.ts
@@ -4,7 +4,7 @@ import {
MessageTypeToolCallPrepareArguments,
MessageTypeUnknown,
MessageTypeUser,
-} from "../../pkg/gen/apiclient/chat/v1/chat_pb";
+} from "../../pkg/gen/apiclient/chat/v2/chat_pb";
export enum MessageEntryStatus {
PREPARING = "PREPARING",
diff --git a/webapp/_webapp/src/stores/selection-store.ts b/webapp/_webapp/src/stores/selection-store.ts
index ec5a859a..4d60ae2c 100644
--- a/webapp/_webapp/src/stores/selection-store.ts
+++ b/webapp/_webapp/src/stores/selection-store.ts
@@ -5,8 +5,10 @@ import { EditorView } from "@codemirror/view";
type CoreState = {
selectedText: string | null;
+ surroundingText: string | null;
selectionRange: Range | null;
lastSelectedText: string | null;
+ lastSurroundingText: string | null;
lastSelectionRange: Range | null;
overleafCm: OverleafCodeMirror | null;
};
@@ -21,10 +23,18 @@ export const useSelectionStore = create((set) => ({
setSelectedText: (selectedText) => {
set({ selectedText });
},
- lastSelectedText: null, // 有一种情况:用户选择了文本,移动了一下 paperdebugger,然后点击 Add to chat。这个时候需要 lastSelectedText 来恢复刚刚选中的文本。
+ surroundingText: null,
+ setSurroundingText: (surroundingText) => {
+ set({ surroundingText });
+ },
+ lastSelectedText: null, // There's a case where user selects text, moves paperdebugger, then clicks Add to chat. In this case lastSelectedText is needed to restore the just-selected text.
setLastSelectedText: (lastSelectedText) => {
set({ lastSelectedText });
},
+ lastSurroundingText: null,
+ setLastSurroundingText: (lastSurroundingText) => {
+ set({ lastSurroundingText });
+ },
selectionRange: null,
setSelectionRange: (selectionRange) => {
set({ selectionRange });
@@ -34,15 +44,16 @@ export const useSelectionStore = create((set) => ({
set({ lastSelectionRange });
},
clear: () => {
- set({ selectedText: null, selectionRange: null });
+ set({ selectedText: null, surroundingText: null, selectionRange: null });
},
clearOverleafSelection: () => {
- let cmContentElement = document.querySelector(".cm-content");
+ const cmContentElement = document.querySelector(".cm-content");
if (!cmContentElement) {
return;
}
- let editorViewInstance = (cmContentElement as any).cmView.view as EditorView;
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const editorViewInstance = (cmContentElement as any).cmView.view as EditorView;
if (!editorViewInstance) {
return;
}
diff --git a/webapp/_webapp/src/stores/setting-store.ts b/webapp/_webapp/src/stores/setting-store.ts
index 1a327225..92f02ae6 100644
--- a/webapp/_webapp/src/stores/setting-store.ts
+++ b/webapp/_webapp/src/stores/setting-store.ts
@@ -13,7 +13,7 @@ export interface SettingStore {
updateSettings: (newSettings: Partial>) => Promise;
resetSettings: () => Promise;
- enableUserDeveloperTools: boolean; // 不是真的 developer tool
+ enableUserDeveloperTools: boolean; // Not actual developer tools
setEnableUserDeveloperTools: (enable: boolean) => void;
conversationMode: "debug" | "normal";
@@ -27,6 +27,9 @@ export interface SettingStore {
hideAvatar: boolean;
setHideAvatar: (enable: boolean) => void;
+
+ allowOutOfBounds: boolean;
+ setAllowOutOfBounds: (enable: boolean) => void;
}
const defaultSettings: PlainMessage = {
@@ -151,4 +154,10 @@ export const useSettingStore = create()((set, get) => ({
localStorage.setItem("pd.ui.hideAvatar", enable.toString());
set({ hideAvatar: enable });
},
+
+ allowOutOfBounds: localStorage.getItem("pd.ui.allowOutOfBounds") === "true" || false,
+ setAllowOutOfBounds: (enable: boolean) => {
+ localStorage.setItem("pd.ui.allowOutOfBounds", enable.toString());
+ set({ allowOutOfBounds: enable });
+ },
}));
diff --git a/webapp/_webapp/src/stores/streaming-message-store.ts b/webapp/_webapp/src/stores/streaming-message-store.ts
index 3518f55d..a7c12f08 100644
--- a/webapp/_webapp/src/stores/streaming-message-store.ts
+++ b/webapp/_webapp/src/stores/streaming-message-store.ts
@@ -3,7 +3,7 @@
import { create } from "zustand";
import { MessageEntry } from "./conversation/types";
import { flushSync } from "react-dom";
-import { IncompleteIndicator } from "../pkg/gen/apiclient/chat/v1/chat_pb";
+import { IncompleteIndicator } from "../pkg/gen/apiclient/chat/v2/chat_pb";
import { SetterResetterStore } from "./types";
export type StreamingMessage = {
diff --git a/webapp/_webapp/src/views/chat/body/empty-view.tsx b/webapp/_webapp/src/views/chat/body/empty-view.tsx
index c6f2eaf5..ac227c5b 100644
--- a/webapp/_webapp/src/views/chat/body/empty-view.tsx
+++ b/webapp/_webapp/src/views/chat/body/empty-view.tsx
@@ -6,8 +6,9 @@ export const EmptyView = () => (
Ask or Edit
+ Start your conversation with PaperDebugger.
- Start your conversation with PaperDebugger. You can ask for scoring paper or make revisions.
+ Be careful of the generated content, PaperDebugger will never modify your content without your permission.
diff --git a/webapp/_webapp/src/views/chat/body/index.tsx b/webapp/_webapp/src/views/chat/body/index.tsx
index 85dcbb69..7010d737 100644
--- a/webapp/_webapp/src/views/chat/body/index.tsx
+++ b/webapp/_webapp/src/views/chat/body/index.tsx
@@ -1,6 +1,6 @@
-import { useEffect, useRef, useState } from "react";
+import { useEffect, useMemo, useRef, useState } from "react";
import { MessageCard } from "../../../components/message-card";
-import { Conversation } from "../../../pkg/gen/apiclient/chat/v1/chat_pb";
+import { Conversation, Message } from "../../../pkg/gen/apiclient/chat/v2/chat_pb";
import { filterVisibleMessages, getPrevUserMessage, isEmptyConversation, messageToMessageEntry } from "../helper";
import { StatusIndicator } from "./status-indicator";
import { EmptyView } from "./empty-view";
@@ -20,18 +20,18 @@ enum ReloadStatus {
}
export const ChatBody = ({ conversation }: ChatBodyProps) => {
- const { setCurrentConversation } = useConversationStore();
+ const setCurrentConversation = useConversationStore((s) => s.setCurrentConversation);
const chatContainerRef = useRef
(null);
const lastUserMsgRef = useRef(null);
const expanderRef = useRef(null);
const streamingMessage = useStreamingMessageStore((s) => s.streamingMessage);
- const visibleMessages = filterVisibleMessages(conversation);
+ const visibleMessages = useMemo(() => filterVisibleMessages(conversation), [conversation]);
const [reloadSuccess, setReloadSuccess] = useState(ReloadStatus.Default);
- const { conversationMode } = useSettingStore();
+ const conversationMode = useSettingStore((s) => s.conversationMode);
const isDebugMode = conversationMode === "debug";
- // 滚动到最后一条 user 消息顶部
+ // Scroll to the top of the last user message
useEffect(() => {
if (expanderRef.current) {
expanderRef.current.style.height = "1000px";
@@ -44,7 +44,7 @@ export const ChatBody = ({ conversation }: ChatBodyProps) => {
let expanderHeight: number;
if (expanderViewOffset < 0) {
- expanderHeight = 0; // expander 的 positoin 是 absolute,和 stream markdown 独立渲染。当 stream markdown 渲染的时候,expander 可能会因为用户滚动滑到 chatContainer 上面,导致 expander.y < 0。这个时候我们就不需要 expander 了
+ expanderHeight = 0; // The expander's position is absolute and renders independently from stream markdown. When stream markdown renders, the expander may scroll above the chatContainer due to user scrolling, causing expander.y < 0. In this case, we don't need the expander.
} else {
expanderHeight = chatContainerHeight - expanderViewOffset;
}
@@ -68,31 +68,39 @@ export const ChatBody = ({ conversation }: ChatBodyProps) => {
}
}, [visibleMessages.length]);
+ const finalizedMessageCards = useMemo(
+ () =>
+ visibleMessages.map((message: Message, index: number) => (
+
+
+
+ )),
+ [visibleMessages],
+ );
+
+ const streamingMessageCards = useMemo(
+ () =>
+ streamingMessage.parts.map((entry) => (
+
+ )),
+ [streamingMessage.parts],
+ );
+
if (isEmptyConversation()) {
return ;
}
- const finalizedMessageCards = visibleMessages.map((message, index) => (
-
-
-
- ));
-
- const streamingMessageCards = streamingMessage.parts.map((entry) => (
-
- ));
-
const expander = (
s.prompt);
+ const heightCollapseRequired = useConversationUiStore((s) => s.heightCollapseRequired);
+ const inputRef = useConversationUiStore((s) => s.inputRef);
+ const setPrompt = useConversationUiStore((s) => s.setPrompt);
+
+ const searchPrompts = usePromptLibraryStore((s) => s.searchPrompts);
const [showModelSelection, setShowModelSelection] = useState(false);
const prompts = useMemo(
() => (!prompt.startsWith("/") ? [] : searchPrompts(prompt.slice(1))),
@@ -50,11 +54,14 @@ export function PromptInput() {
filter: prompt.startsWith(":") ? prompt.slice(1) : undefined,
});
- const { user } = useAuthStore();
- const { isStreaming, setIsStreaming } = useConversationStore();
+ const user = useAuthStore((s) => s.user);
+ const isStreaming = useConversationStore((s) => s.isStreaming);
+ const setIsStreaming = useConversationStore((s) => s.setIsStreaming);
+
const selectedText = useSelectionStore((s) => s.selectedText);
+
const { sendMessageStream } = useSendMessageStream();
- const { minimalistMode } = useSettingStore();
+ const minimalistMode = useSettingStore((s) => s.minimalistMode);
const handleModelSelect = useCallback(() => {
setShowModelSelection(false);
@@ -90,21 +97,23 @@ export function PromptInput() {
);
return (
-
+
+ {/* Only show one popup at a time - priority: prompts > actions > model selection */}
{prompts.length > 0 &&
}
- {actions.length > 0 &&
}
- {showModelSelection &&
}
+ {prompts.length === 0 && actions.length > 0 &&
}
+ {prompts.length === 0 && actions.length === 0 && showModelSelection && (
+
+ )}
- {prompts.length == 0 && actions.length == 0 && !showModelSelection && (
- setShowModelSelection(true)} />
- )}
+ setShowModelSelection(true)} />