Compare commits

...

24 Commits

Author SHA1 Message Date
公明 e5d52cdf85 Update config.yaml 2026-05-11 20:36:58 +08:00
公明 65e48826ff Update config.yaml 2026-05-11 19:59:41 +08:00
公明 0cff507272 Add files via upload 2026-05-11 19:57:46 +08:00
公明 30afd71c05 Add files via upload 2026-05-11 19:56:38 +08:00
公明 d2b6a154de Add files via upload 2026-05-11 19:54:40 +08:00
公明 278d5aa25c Add files via upload 2026-05-11 19:52:39 +08:00
公明 215f5a4a93 Update config.yaml 2026-05-10 23:33:39 +08:00
公明 44185d748d Add files via upload 2026-05-10 23:28:18 +08:00
公明 fe47f1f058 Add files via upload 2026-05-10 23:27:07 +08:00
公明 99ce183f41 Add files via upload 2026-05-10 23:25:11 +08:00
公明 2ed1947f36 Add files via upload 2026-05-10 23:22:35 +08:00
公明 97f3e8c179 Add files via upload 2026-05-10 22:52:34 +08:00
公明 38b0c31b87 Add files via upload 2026-05-10 22:47:04 +08:00
公明 cb839da4d1 Add files via upload 2026-05-10 22:44:51 +08:00
公明 5ed730f17c Add files via upload 2026-05-10 22:43:21 +08:00
公明 30b1e5f820 Add files via upload 2026-05-10 22:16:12 +08:00
公明 8e5c70703e Add files via upload 2026-05-10 22:14:51 +08:00
公明 3cc3b25a7b Add files via upload 2026-05-10 22:12:23 +08:00
公明 44cf63fa52 Add files via upload 2026-05-10 22:10:33 +08:00
公明 12057c065b Add files via upload 2026-05-10 21:39:50 +08:00
公明 c4e0b9735c Add files via upload 2026-05-10 21:38:28 +08:00
公明 218e9b9880 Add files via upload 2026-05-10 21:36:28 +08:00
公明 82d840966e Add files via upload 2026-05-10 21:34:34 +08:00
公明 c62ff3bde9 Add files via upload 2026-05-10 20:29:34 +08:00
37 changed files with 2394 additions and 481 deletions
+8 -1
View File
@@ -10,7 +10,7 @@
# ============================================
# 前端显示的版本号(可选,不填则显示默认版本)
version: "v1.6.6"
version: "v1.6.8"
# 服务器配置
server:
host: 0.0.0.0 # 监听地址,0.0.0.0 表示监听所有网络接口
@@ -41,6 +41,13 @@ openai:
api_key: sk-xxxxxxx # API 密钥(必填)
model: qwen3-max # 模型名称(必填)
max_total_tokens: 120000 # LLM 相关上下文的最大 Token 数限制(内存压缩和攻击链构建会共用此配置)
# Eino 路径模型推理:DeepSeek/OpenAI 为 thinking / reasoning_effort 等;provider 为 claude 时合并为 Anthropic 顶层 thinkingextended thinking),mode: off 关闭
reasoning:
mode: off # auto | on | offoff 时不附加任何推理扩展字段
effort: max # low | medium | high | max;空表示不指定(openai_compat 下 auto 且无强度时不发请求扩展)
allow_client_reasoning: true # false 时忽略对话请求体 reasoning,仅以下方为准
profile: openai_compat # auto | deepseek_compat | openai_compat | output_config_effort
# extra_request_fields: {} # 可选:管理员自定义根级 JSON 片段(高级)
# ============================================
# 信息收集(FOFA)配置(可选)
# ============================================
+13 -2
View File
@@ -193,6 +193,10 @@ type ChatMessage struct {
Content string `json:"content,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
// ToolName 仅 tool 角色:从 Eino/轨迹 JSON 的 name 或 tool_name 恢复,供续跑构造 ToolMessage。
ToolName string `json:"tool_name,omitempty"`
// ReasoningContent 对应 OpenAI/DeepSeek 的 reasoning_content;思考模式 + 工具调用后续跑须回传(见 DeepSeek 文档)。
ReasoningContent string `json:"reasoning_content,omitempty"`
}
// MarshalJSON 自定义JSON序列化,将tool_calls中的arguments转换为JSON字符串
@@ -206,11 +210,17 @@ func (cm ChatMessage) MarshalJSON() ([]byte, error) {
if cm.Content != "" {
aux["content"] = cm.Content
}
if cm.ReasoningContent != "" {
aux["reasoning_content"] = cm.ReasoningContent
}
// 添加tool_call_id(如果存在)
if cm.ToolCallID != "" {
aux["tool_call_id"] = cm.ToolCallID
}
if cm.ToolName != "" {
aux["tool_name"] = cm.ToolName
}
// 转换tool_calls,将arguments转换为JSON字符串
if len(cm.ToolCalls) > 0 {
@@ -438,6 +448,7 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
Content: msg.Content,
ToolCalls: msg.ToolCalls,
ToolCallID: msg.ToolCallID,
ToolName: msg.ToolName,
})
addedCount++
contentPreview := msg.Content
@@ -657,8 +668,8 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
// 检查是否有工具调用
if len(choice.Message.ToolCalls) > 0 {
// 思考内容:如果本轮启用了思考流式增量(thinking_stream_*前端会去重
// 同时也需要在该“思考阶段结束”时补一条可落库的 thinking用于刷新后持久化展示)。
// ReAct 助手正文流式增量(thinking_stream_*在 UI 上归为「思考」;若与 streamId 重复则前端会去重
// 该条 thinking 用于刷新后持久化展示(与流式聚合一致)。
if choice.Message.Content != "" {
sendProgress("thinking", choice.Message.Content, map[string]interface{}{
"iteration": i + 1,
+1 -1
View File
@@ -301,7 +301,7 @@ func (b *Builder) formatProcessDetailsForAttackChain(details []database.ProcessD
// 目标:以主 agent(编排器)视角输出整轮迭代
// - 保留:编排器工具调用/结果、对子代理的 task 调度、子代理最终回复(不含推理)
// - 丢弃:thinking/planning/progress 等噪声、子代理的工具细节与推理过程
if d.EventType == "progress" || d.EventType == "thinking" || d.EventType == "planning" {
if d.EventType == "progress" || d.EventType == "thinking" || d.EventType == "reasoning_chain" || d.EventType == "planning" {
continue
}
+44 -1
View File
@@ -90,7 +90,8 @@ type MultiAgentEinoMiddlewareConfig struct {
SummarizationTriggerRatio float64 `yaml:"summarization_trigger_ratio,omitempty" json:"summarization_trigger_ratio,omitempty"`
// SummarizationEmitInternalEvents controls middleware internal event emission (default true).
SummarizationEmitInternalEvents *bool `yaml:"summarization_emit_internal_events,omitempty" json:"summarization_emit_internal_events,omitempty"`
// HistoryInputBudgetRatio caps pre-agent history tokens as max_total_tokens * ratio (default 0.35).
// HistoryInputBudgetRatio 已不影响 Eino:从 last_react 轨迹转 ADK 消息时**不再**按 token 比例裁剪(完整注入)。
// 字段仍保留,便于旧版 config 不报错;新部署可省略。
HistoryInputBudgetRatio float64 `yaml:"history_input_budget_ratio,omitempty" json:"history_input_budget_ratio,omitempty"`
// PlanExecuteUserInputBudgetRatio caps planner/replanner/executor userInput prompt budget ratio (default 0.35).
PlanExecuteUserInputBudgetRatio float64 `yaml:"plan_execute_user_input_budget_ratio,omitempty" json:"plan_execute_user_input_budget_ratio,omitempty"`
@@ -345,6 +346,48 @@ type OpenAIConfig struct {
BaseURL string `yaml:"base_url" json:"base_url"`
Model string `yaml:"model" json:"model"`
MaxTotalTokens int `yaml:"max_total_tokens,omitempty" json:"max_total_tokens,omitempty"`
// Reasoning 控制 Eino ChatModel 的 thinking / reasoning_effort / output_config 等(仅 Eino 路径生效;原生 ReAct 忽略)。
Reasoning OpenAIReasoningConfig `yaml:"reasoning,omitempty" json:"reasoning,omitempty"`
}
// OpenAIReasoningConfig 全局默认与网关 profile(对话页可通过 ChatRequest.reasoning 覆盖,受 AllowClientReasoning 约束)。
type OpenAIReasoningConfig struct {
// Mode: auto(默认)| on | off | default(与 auto 相同)。off 时不向模型附加推理扩展字段。
Mode string `yaml:"mode,omitempty" json:"mode,omitempty"`
// Effort: low | medium | high | max;空表示不单独指定强度(各 profile 行为见 internal/reasoning)。
Effort string `yaml:"effort,omitempty" json:"effort,omitempty"`
// AllowClientReasoning 为 false 时忽略请求体 reasoningnil 或未设置等同于 true。
AllowClientReasoning *bool `yaml:"allow_client_reasoning,omitempty" json:"allow_client_reasoning,omitempty"`
// Profile: auto | deepseek_compat | openai_compat | output_config_effort
Profile string `yaml:"profile,omitempty" json:"profile,omitempty"`
// ExtraRequestFields 合并进 Chat Completions 根 JSON(管理员用;与自动字段同名时后者覆盖)。
ExtraRequestFields map[string]interface{} `yaml:"extra_request_fields,omitempty" json:"extra_request_fields,omitempty"`
}
// ModeEffective returns auto when empty or default.
func (c OpenAIReasoningConfig) ModeEffective() string {
m := strings.ToLower(strings.TrimSpace(c.Mode))
if m == "" || m == "default" {
return "auto"
}
return m
}
// ProfileEffective returns auto when empty.
func (c OpenAIReasoningConfig) ProfileEffective() string {
p := strings.ToLower(strings.TrimSpace(c.Profile))
if p == "" {
return "auto"
}
return p
}
// AllowClientReasoningEffective true when client may send ChatRequest.reasoning.
func (c OpenAIReasoningConfig) AllowClientReasoningEffective() bool {
if c.AllowClientReasoning == nil {
return true
}
return *c.AllowClientReasoning
}
type FofaConfig struct {
+38 -13
View File
@@ -25,14 +25,15 @@ type Conversation struct {
// Message 消息
type Message struct {
ID string `json:"id"`
ConversationID string `json:"conversationId"`
Role string `json:"role"`
Content string `json:"content"`
MCPExecutionIDs []string `json:"mcpExecutionIds,omitempty"`
ProcessDetails []map[string]interface{} `json:"processDetails,omitempty"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
ID string `json:"id"`
ConversationID string `json:"conversationId"`
Role string `json:"role"`
Content string `json:"content"`
ReasoningContent string `json:"reasoningContent,omitempty"`
MCPExecutionIDs []string `json:"mcpExecutionIds,omitempty"`
ProcessDetails []map[string]interface{} `json:"processDetails,omitempty"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
}
// CreateConversation 创建新对话
@@ -498,8 +499,8 @@ func (db *DB) AddMessage(conversationID, role, content string, mcpExecutionIDs [
}
_, err := db.Exec(
"INSERT INTO messages (id, conversation_id, role, content, mcp_execution_ids, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?)",
id, conversationID, role, content, mcpIDsJSON, now, now,
"INSERT INTO messages (id, conversation_id, role, content, reasoning_content, mcp_execution_ids, created_at, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
id, conversationID, role, content, "", mcpIDsJSON, now, now,
)
if err != nil {
return nil, fmt.Errorf("添加消息失败: %w", err)
@@ -523,10 +524,30 @@ func (db *DB) AddMessage(conversationID, role, content string, mcpExecutionIDs [
return message, nil
}
// UpdateAssistantMessageFinalize 更新助手消息终态(正文、MCP id、思考链聚合文本,供无轨迹回退时回放)。
func (db *DB) UpdateAssistantMessageFinalize(messageID, content string, mcpExecutionIDs []string, reasoningContent string) error {
var mcpIDsJSON string
if len(mcpExecutionIDs) > 0 {
jsonData, err := json.Marshal(mcpExecutionIDs)
if err != nil {
return fmt.Errorf("序列化MCP执行ID失败: %w", err)
}
mcpIDsJSON = string(jsonData)
}
_, err := db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, reasoning_content = ?, updated_at = ? WHERE id = ?",
content, mcpIDsJSON, strings.TrimSpace(reasoningContent), time.Now(), messageID,
)
if err != nil {
return fmt.Errorf("更新助手消息失败: %w", err)
}
return nil
}
// GetMessages 获取对话的所有消息
func (db *DB) GetMessages(conversationID string) ([]Message, error) {
rows, err := db.Query(
"SELECT id, conversation_id, role, content, mcp_execution_ids, created_at, updated_at FROM messages WHERE conversation_id = ? ORDER BY created_at ASC",
"SELECT id, conversation_id, role, content, reasoning_content, mcp_execution_ids, created_at, updated_at FROM messages WHERE conversation_id = ? ORDER BY created_at ASC",
conversationID,
)
if err != nil {
@@ -537,13 +558,17 @@ func (db *DB) GetMessages(conversationID string) ([]Message, error) {
var messages []Message
for rows.Next() {
var msg Message
var reasoning sql.NullString
var mcpIDsJSON sql.NullString
var createdAt string
var updatedAt sql.NullString
if err := rows.Scan(&msg.ID, &msg.ConversationID, &msg.Role, &msg.Content, &mcpIDsJSON, &createdAt, &updatedAt); err != nil {
if err := rows.Scan(&msg.ID, &msg.ConversationID, &msg.Role, &msg.Content, &reasoning, &mcpIDsJSON, &createdAt, &updatedAt); err != nil {
return nil, fmt.Errorf("扫描消息失败: %w", err)
}
if reasoning.Valid {
msg.ReasoningContent = reasoning.String
}
// 尝试多种时间格式解析
var err error
@@ -683,7 +708,7 @@ type ProcessDetail struct {
ID string `json:"id"`
MessageID string `json:"messageId"`
ConversationID string `json:"conversationId"`
EventType string `json:"eventType"` // iteration, thinking, tool_calls_detected, tool_call, tool_result, progress, error
EventType string `json:"eventType"` // iteration, thinking, reasoning_chain, tool_calls_detected, tool_call, tool_result, progress, error
Message string `json:"message"`
Data string `json:"data"` // JSON格式的数据
CreatedAt time.Time `json:"createdAt"`
+19
View File
@@ -594,6 +594,25 @@ func (db *DB) migrateMessagesTable() error {
// 回填已有数据:让 updated_at 至少等于 created_at,避免前端出现空/当前时间回退。
_, _ = db.Exec("UPDATE messages SET updated_at = created_at WHERE updated_at IS NULL OR updated_at = ''")
// reasoning_contentDeepSeek 思考模式 + 工具调用续跑;与 last_react_input 互补,供消息表回退路径回放
var rcColCount int
errRC := db.QueryRow("SELECT COUNT(*) FROM pragma_table_info('messages') WHERE name='reasoning_content'").Scan(&rcColCount)
if errRC != nil {
if _, addErr := db.Exec("ALTER TABLE messages ADD COLUMN reasoning_content TEXT"); addErr != nil {
errMsg := strings.ToLower(addErr.Error())
if !strings.Contains(errMsg, "duplicate column") && !strings.Contains(errMsg, "already exists") {
return fmt.Errorf("添加 messages.reasoning_content 字段失败: %w", addErr)
}
}
} else if rcColCount == 0 {
if _, err := db.Exec("ALTER TABLE messages ADD COLUMN reasoning_content TEXT"); err != nil {
errMsg := strings.ToLower(err.Error())
if !strings.Contains(errMsg, "duplicate column") && !strings.Contains(errMsg, "already exists") {
return fmt.Errorf("添加 messages.reasoning_content 字段失败: %w", err)
}
}
}
return nil
}
+124 -95
View File
@@ -19,6 +19,7 @@ import (
"cyberstrike-ai/internal/agent"
"cyberstrike-ai/internal/config"
"cyberstrike-ai/internal/database"
"cyberstrike-ai/internal/reasoning"
"cyberstrike-ai/internal/mcp"
"cyberstrike-ai/internal/mcp/builtin"
"cyberstrike-ai/internal/multiagent"
@@ -201,6 +202,14 @@ type ChatAttachment struct {
ServerPath string `json:"serverPath,omitempty"` // 已保存在 chat_uploads 下的绝对路径(由 POST /api/chat-uploads 返回)
}
// ChatReasoningRequest 对话页「模型推理」意图(仅 Eino 路径消费;原生 agent-loop 忽略)。
type ChatReasoningRequest struct {
// Mode: default(跟随系统)| off | on | auto
Mode string `json:"mode,omitempty"`
// Effort: low | medium | high | max;空表示不指定(由系统默认与各 profile 决定)。
Effort string `json:"effort,omitempty"`
}
// ChatRequest 聊天请求
type ChatRequest struct {
Message string `json:"message" binding:"required"`
@@ -209,10 +218,18 @@ type ChatRequest struct {
Attachments []ChatAttachment `json:"attachments,omitempty"`
WebShellConnectionID string `json:"webshellConnectionId,omitempty"` // WebShell 管理 - AI 助手:当前选中的连接 ID,仅使用 webshell_* 工具
Hitl *HITLRequest `json:"hitl,omitempty"`
Reasoning *ChatReasoningRequest `json:"reasoning,omitempty"`
// Orchestration 仅对 /api/multi-agent、/api/multi-agent/streamdeep | plan_execute | supervisor;空则等同 deep。机器人/批量等无请求体时由服务端默认 deep。/api/eino-agent* 不使用此字段。
Orchestration string `json:"orchestration,omitempty"`
}
func chatReasoningToClientIntent(r *ChatReasoningRequest) *reasoning.ClientIntent {
if r == nil {
return nil
}
return &reasoning.ClientIntent{Mode: r.Mode, Effort: r.Effort}
}
type HITLRequest struct {
Enabled bool `json:"enabled"`
Mode string `json:"mode,omitempty"`
@@ -567,14 +584,7 @@ func (h *AgentHandler) AgentLoop(c *gin.Context) {
h.logger.Warn("获取历史消息失败", zap.Error(err))
agentHistoryMessages = []agent.ChatMessage{}
} else {
// 将数据库消息转换为Agent消息格式
agentHistoryMessages = make([]agent.ChatMessage, 0, len(historyMessages))
for _, msg := range historyMessages {
agentHistoryMessages = append(agentHistoryMessages, agent.ChatMessage{
Role: msg.Role,
Content: msg.Content,
})
}
agentHistoryMessages = dbMessagesToAgentChatMessages(historyMessages)
h.logger.Info("从消息表加载历史消息", zap.Int("count", len(agentHistoryMessages)))
}
} else {
@@ -775,6 +785,7 @@ func (h *AgentHandler) ProcessMessageForRobot(ctx context.Context, conversationI
progressCallback,
h.agentsMarkdownDir,
"deep",
nil,
)
if errMA != nil {
if shouldPersistEinoAgentTraceAfterRunError(ctx) {
@@ -788,17 +799,8 @@ func (h *AgentHandler) ProcessMessageForRobot(ctx context.Context, conversationI
return "", conversationID, errMA
}
if assistantMessageID != "" {
mcpIDsJSON := ""
if len(resultMA.MCPExecutionIDs) > 0 {
jsonData, _ := json.Marshal(resultMA.MCPExecutionIDs)
mcpIDsJSON = string(jsonData)
}
_, err = h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
resultMA.Response, mcpIDsJSON, time.Now(), assistantMessageID,
)
if err != nil {
h.logger.Warn("机器人:更新助手消息失败", zap.Error(err))
if errU := h.db.UpdateAssistantMessageFinalize(assistantMessageID, resultMA.Response, resultMA.MCPExecutionIDs, multiagent.AggregatedReasoningFromTraceJSON(resultMA.LastAgentTraceInput)); errU != nil {
h.logger.Warn("机器人:更新助手消息失败", zap.Error(errU))
}
} else {
if _, err = h.db.AddMessage(conversationID, "assistant", resultMA.Response, resultMA.MCPExecutionIDs); err != nil {
@@ -823,17 +825,8 @@ func (h *AgentHandler) ProcessMessageForRobot(ctx context.Context, conversationI
// 更新助手消息内容与 MCP 执行 ID(与 stream 一致)
if assistantMessageID != "" {
mcpIDsJSON := ""
if len(result.MCPExecutionIDs) > 0 {
jsonData, _ := json.Marshal(result.MCPExecutionIDs)
mcpIDsJSON = string(jsonData)
}
_, err = h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
result.Response, mcpIDsJSON, time.Now(), assistantMessageID,
)
if err != nil {
h.logger.Warn("机器人:更新助手消息失败", zap.Error(err))
if errU := h.db.UpdateAssistantMessageFinalize(assistantMessageID, result.Response, result.MCPExecutionIDs, multiagent.AggregatedReasoningFromTraceJSON(result.LastAgentTraceInput)); errU != nil {
h.logger.Warn("机器人:更新助手消息失败", zap.Error(errU))
}
} else {
if _, err = h.db.AddMessage(conversationID, "assistant", result.Response, result.MCPExecutionIDs); err != nil {
@@ -891,10 +884,12 @@ func (h *AgentHandler) createProgressCallback(runCtx context.Context, cancelRun
return ""
}
// thinking_stream_*:不逐条落库,按 streamId 聚合,在后续关键事件前补一条可持久化的 thinking
// thinking_stream_*(ReAct 等助手正文流)与 reasoning_chain_stream_*Eino ReasoningContent):
// 不逐条落库,按 streamId 聚合,flush 时分别落 thinking / reasoning_chain。
type thinkingBuf struct {
b strings.Builder
meta map[string]interface{}
b strings.Builder
meta map[string]interface{}
persistAs string // "thinking" | "reasoning_chain"
}
thinkingStreams := make(map[string]*thinkingBuf) // streamId -> buf
flushedThinking := make(map[string]bool) // streamId -> flushed
@@ -948,8 +943,12 @@ func (h *AgentHandler) createProgressCallback(runCtx context.Context, cancelRun
}
data[k] = v
}
if err := h.db.AddProcessDetail(assistantMessageID, conversationID, "thinking", content, data); err != nil {
h.logger.Warn("保存过程详情失败", zap.Error(err), zap.String("eventType", "thinking"))
persist := tb.persistAs
if persist != "reasoning_chain" {
persist = "thinking"
}
if err := h.db.AddProcessDetail(assistantMessageID, conversationID, persist, content, data); err != nil {
h.logger.Warn("保存过程详情失败", zap.Error(err), zap.String("eventType", persist))
}
flushedThinking[sid] = true
}
@@ -1177,14 +1176,20 @@ func (h *AgentHandler) createProgressCallback(runCtx context.Context, cancelRun
return
}
// 聚合 thinking_stream_*ReasoningContent,不逐条落库
if eventType == "thinking_stream_start" {
// 聚合 thinking_stream_* / reasoning_chain_stream_*,不逐条落库
if eventType == "thinking_stream_start" || eventType == "reasoning_chain_stream_start" {
persistAs := "thinking"
if eventType == "reasoning_chain_stream_start" {
persistAs = "reasoning_chain"
}
if dataMap, ok := data.(map[string]interface{}); ok {
if sid, ok2 := dataMap["streamId"].(string); ok2 && sid != "" {
tb := thinkingStreams[sid]
if tb == nil {
tb = &thinkingBuf{meta: map[string]interface{}{}}
tb = &thinkingBuf{meta: map[string]interface{}{}, persistAs: persistAs}
thinkingStreams[sid] = tb
} else {
tb.persistAs = persistAs
}
// 记录元信息(source/einoAgent/einoRole/iteration 等)
for k, v := range dataMap {
@@ -1194,15 +1199,21 @@ func (h *AgentHandler) createProgressCallback(runCtx context.Context, cancelRun
}
return
}
if eventType == "thinking_stream_delta" {
if eventType == "thinking_stream_delta" || eventType == "reasoning_chain_stream_delta" {
persistAs := "thinking"
if eventType == "reasoning_chain_stream_delta" {
persistAs = "reasoning_chain"
}
if dataMap, ok := data.(map[string]interface{}); ok {
if sid, ok2 := dataMap["streamId"].(string); ok2 && sid != "" {
tb := thinkingStreams[sid]
if tb == nil {
tb = &thinkingBuf{meta: map[string]interface{}{}}
tb = &thinkingBuf{meta: map[string]interface{}{}, persistAs: persistAs}
thinkingStreams[sid] = tb
} else if tb.persistAs == "" {
tb.persistAs = persistAs
}
// delta 片段直接拼接message 本身就是 reasoning content
// delta 片段直接拼接
tb.b.WriteString(message)
// 有时 delta 先到 start 未到,补充元信息
for k, v := range dataMap {
@@ -1213,10 +1224,9 @@ func (h *AgentHandler) createProgressCallback(runCtx context.Context, cancelRun
return
}
// 当 Agent 同时发送 thinking_stream_* 和 thinking(带同一 streamId时,
// thinking_stream_* 已经会在 flushThinkingStreams() 聚合落库;
// 这里跳过同 streamId 的 thinking,避免 processDetails 双份展示。
if eventType == "thinking" {
// 当 Agent 同时发送 *_stream_* 与同名 streamId 的 thinking/reasoning_chain 时,
// 流式聚合已会在 flushThinkingStreams() 落库;此处跳过逐条重复。
if eventType == "thinking" || eventType == "reasoning_chain" {
if dataMap, ok := data.(map[string]interface{}); ok {
if sid, ok2 := dataMap["streamId"].(string); ok2 && sid != "" {
if tb, exists := thinkingStreams[sid]; exists && tb != nil {
@@ -1245,7 +1255,7 @@ func (h *AgentHandler) createProgressCallback(runCtx context.Context, cancelRun
if eventType == "tool_result" {
discardPlanningIfEchoesToolResult(&respPlan, data)
}
// 在关键过程事件落库前,先把「规划中」与 thinking_stream 落库
// 在关键过程事件落库前,先把「规划中」与聚合中的 thinking / reasoning_chain 流落库
flushResponsePlan()
flushThinkingStreams()
if err := h.db.AddProcessDetail(assistantMessageID, conversationID, eventType, message, data); err != nil {
@@ -1427,14 +1437,7 @@ func (h *AgentHandler) AgentLoopStream(c *gin.Context) {
h.logger.Warn("获取历史消息失败", zap.Error(err))
agentHistoryMessages = []agent.ChatMessage{}
} else {
// 将数据库消息转换为Agent消息格式
agentHistoryMessages = make([]agent.ChatMessage, 0, len(historyMessages))
for _, msg := range historyMessages {
agentHistoryMessages = append(agentHistoryMessages, agent.ChatMessage{
Role: msg.Role,
Content: msg.Content,
})
}
agentHistoryMessages = dbMessagesToAgentChatMessages(historyMessages)
h.logger.Info("从消息表加载历史消息", zap.Int("count", len(agentHistoryMessages)))
}
} else {
@@ -1727,20 +1730,8 @@ func (h *AgentHandler) AgentLoopStream(c *gin.Context) {
// 更新助手消息内容
if assistantMsg != nil {
_, err = h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
result.Response,
func() string {
if len(result.MCPExecutionIDs) > 0 {
jsonData, _ := json.Marshal(result.MCPExecutionIDs)
return string(jsonData)
}
return ""
}(),
time.Now(), assistantMessageID,
)
if err != nil {
h.logger.Error("更新助手消息失败", zap.Error(err))
if errU := h.db.UpdateAssistantMessageFinalize(assistantMessageID, result.Response, result.MCPExecutionIDs, multiagent.AggregatedReasoningFromTraceJSON(result.LastAgentTraceInput)); errU != nil {
h.logger.Error("更新助手消息失败", zap.Error(errU))
}
} else {
// 如果之前创建失败,现在创建
@@ -1789,27 +1780,51 @@ func (h *AgentHandler) CancelAgentLoop(c *gin.Context) {
return
}
execID := h.tasks.ActiveMCPExecutionID(req.ConversationID)
if execID == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "当前没有正在执行的 MCP 工具(例如模型尚在推理、尚未发起工具调用)。请等待工具开始执行后再试,或使用「彻底停止」结束整轮任务。"})
return
}
note := strings.TrimSpace(req.Reason)
if !h.agent.CancelMCPToolExecutionWithNote(execID, note) {
c.JSON(http.StatusNotFound, gin.H{"error": "未找到进行中的工具执行或该调用已结束"})
if execID != "" {
if !h.agent.CancelMCPToolExecutionWithNote(execID, note) {
c.JSON(http.StatusNotFound, gin.H{"error": "未找到进行中的工具执行或该调用已结束"})
return
}
h.logger.Info("对话页仅终止当前 MCP 工具",
zap.String("conversationId", req.ConversationID),
zap.String("executionId", execID),
zap.Bool("hasNote", note != ""),
)
c.JSON(http.StatusOK, gin.H{
"status": "tool_abort_requested",
"conversationId": req.ConversationID,
"executionId": execID,
"message": "已请求终止当前工具调用;工具返回后本轮推理将继续(与 MCP 监控页终止一致)。",
"continueAfter": true,
"interruptWithNote": note != "",
"continueWithoutTool": false,
})
return
}
h.logger.Info("对话页仅终止当前 MCP 工具",
// 无进行中的 MCP 工具(模型纯推理/流式输出阶段):取消当前上下文并由 Eino 流式处理器合并用户补充后自动续跑。
h.tasks.SetInterruptContinueNote(req.ConversationID, note)
ok, err := h.tasks.CancelTask(req.ConversationID, multiagent.ErrInterruptContinue)
if err != nil {
h.logger.Error("中断并继续(无工具)失败", zap.Error(err))
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()})
return
}
if !ok {
c.JSON(http.StatusNotFound, gin.H{"error": "未找到正在执行的任务"})
return
}
h.logger.Info("对话页中断并继续(无 MCP 工具,将自动续跑)",
zap.String("conversationId", req.ConversationID),
zap.String("executionId", execID),
zap.Bool("hasNote", note != ""),
)
c.JSON(http.StatusOK, gin.H{
"status": "tool_abort_requested",
"conversationId": req.ConversationID,
"executionId": execID,
"message": "已请求终止当前工具调用;工具返回后本轮推理将继续(与 MCP 监控页终止一致)。",
"continueAfter": true,
"interruptWithNote": note != "",
"status": "interrupt_continue_scheduled",
"conversationId": req.ConversationID,
"message": "已请求暂停当前推理;用户补充将合并到上下文并自动继续执行(无需整轮停止)。",
"continueAfter": true,
"interruptWithNote": note != "",
"continueWithoutTool": true,
})
return
}
@@ -2640,12 +2655,12 @@ func (h *AgentHandler) executeBatchQueue(queueID string) {
var runErr error
switch {
case useBatchMulti:
resultMA, runErr = multiagent.RunDeepAgent(taskCtx, h.config, &h.config.MultiAgent, h.agent, h.logger, conversationID, finalMessage, []agent.ChatMessage{}, roleTools, progressCallback, h.agentsMarkdownDir, batchOrch)
resultMA, runErr = multiagent.RunDeepAgent(taskCtx, h.config, &h.config.MultiAgent, h.agent, h.logger, conversationID, finalMessage, []agent.ChatMessage{}, roleTools, progressCallback, h.agentsMarkdownDir, batchOrch, nil)
case useEinoSingle:
if h.config == nil {
runErr = fmt.Errorf("服务器配置未加载")
} else {
resultMA, runErr = multiagent.RunEinoSingleChatModelAgent(taskCtx, h.config, &h.config.MultiAgent, h.agent, h.logger, conversationID, finalMessage, []agent.ChatMessage{}, roleTools, progressCallback)
resultMA, runErr = multiagent.RunEinoSingleChatModelAgent(taskCtx, h.config, &h.config.MultiAgent, h.agent, h.logger, conversationID, finalMessage, []agent.ChatMessage{}, roleTools, progressCallback, nil)
}
default:
result, runErr = h.agent.AgentLoopWithProgress(taskCtx, finalMessage, []agent.ChatMessage{}, conversationID, progressCallback, roleTools)
@@ -2744,17 +2759,7 @@ func (h *AgentHandler) executeBatchQueue(queueID string) {
// 更新助手消息内容
if assistantMessageID != "" {
mcpIDsJSON := ""
if len(mcpIDs) > 0 {
jsonData, _ := json.Marshal(mcpIDs)
mcpIDsJSON = string(jsonData)
}
if _, updateErr := h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
resText,
mcpIDsJSON,
time.Now(), assistantMessageID,
); updateErr != nil {
if updateErr := h.db.UpdateAssistantMessageFinalize(assistantMessageID, resText, mcpIDs, multiagent.AggregatedReasoningFromTraceJSON(lastIn)); updateErr != nil {
h.logger.Warn("更新助手消息失败", zap.String("queueId", queueID), zap.String("taskId", task.ID), zap.Error(updateErr))
// 如果更新失败,尝试创建新消息
_, err = h.db.AddMessage(conversationID, "assistant", resText, mcpIDs)
@@ -2846,6 +2851,10 @@ func (h *AgentHandler) loadHistoryFromAgentTrace(conversationID string) ([]agent
if content, ok := msgMap["content"].(string); ok {
msg.Content = content
}
// DeepSeek 思考模式:含工具调用的 assistant 须在后续请求中回传 reasoning_content
if rc, ok := msgMap["reasoning_content"].(string); ok && strings.TrimSpace(rc) != "" {
msg.ReasoningContent = rc
}
// 解析tool_calls(如果存在)
if toolCallsRaw, ok := msgMap["tool_calls"]; ok && toolCallsRaw != nil {
@@ -2901,6 +2910,11 @@ func (h *AgentHandler) loadHistoryFromAgentTrace(conversationID string) ([]agent
if toolCallID, ok := msgMap["tool_call_id"].(string); ok {
msg.ToolCallID = toolCallID
}
if tn, ok := msgMap["tool_name"].(string); ok && strings.TrimSpace(tn) != "" {
msg.ToolName = strings.TrimSpace(tn)
} else if tn, ok := msgMap["name"].(string); ok && strings.TrimSpace(tn) != "" && strings.EqualFold(msg.Role, "tool") {
msg.ToolName = strings.TrimSpace(tn)
}
agentMessages = append(agentMessages, msg)
}
@@ -2946,3 +2960,18 @@ func (h *AgentHandler) loadHistoryFromAgentTrace(conversationID string) ([]agent
)
return agentMessages, nil
}
// dbMessagesToAgentChatMessages maps DB rows to agent ChatMessage for history fallback
// (includes reasoning_content for DeepSeek thinking + tool replay).
func dbMessagesToAgentChatMessages(msgs []database.Message) []agent.ChatMessage {
out := make([]agent.ChatMessage, 0, len(msgs))
for i := range msgs {
m := msgs[i]
out = append(out, agent.ChatMessage{
Role: m.Role,
Content: m.Content,
ReasoningContent: m.ReasoningContent,
})
}
return out
}
+13
View File
@@ -1312,6 +1312,19 @@ func updateOpenAIConfig(doc *yaml.Node, cfg config.OpenAIConfig) {
if cfg.MaxTotalTokens > 0 {
setIntInMap(openaiNode, "max_total_tokens", cfg.MaxTotalTokens)
}
rn := ensureMap(openaiNode, "reasoning")
if strings.TrimSpace(cfg.Reasoning.Mode) != "" {
setStringInMap(rn, "mode", cfg.Reasoning.Mode)
}
if strings.TrimSpace(cfg.Reasoning.Effort) != "" {
setStringInMap(rn, "effort", cfg.Reasoning.Effort)
}
if cfg.Reasoning.AllowClientReasoning != nil {
setBoolInMap(rn, "allow_client_reasoning", *cfg.Reasoning.AllowClientReasoning)
}
if strings.TrimSpace(cfg.Reasoning.Profile) != "" {
setStringInMap(rn, "profile", cfg.Reasoning.Profile)
}
}
func updateFOFAConfig(doc *yaml.Node, cfg config.FofaConfig) {
+65 -46
View File
@@ -46,7 +46,7 @@ func (h *AgentHandler) EinoSingleAgentLoopStream(c *gin.Context) {
sendEvent := func(eventType, message string, data interface{}) {
if eventType == "error" && baseCtx != nil {
cause := context.Cause(baseCtx)
if errors.Is(cause, ErrTaskCancelled) {
if errors.Is(cause, ErrTaskCancelled) || errors.Is(cause, multiagent.ErrInterruptContinue) {
return
}
}
@@ -175,29 +175,69 @@ func (h *AgentHandler) EinoSingleAgentLoopStream(c *gin.Context) {
}
taskOwned = true
progressCallback := h.createProgressCallback(taskCtx, cancelWithCause, conversationID, assistantMessageID, sendEvent)
taskCtx = mcp.WithMCPConversationID(taskCtx, conversationID)
taskCtx = mcp.WithToolRunRegistry(taskCtx, h.tasks)
taskCtx = multiagent.WithHITLToolInterceptor(taskCtx, func(ctx context.Context, toolName, arguments string) (string, error) {
return h.interceptHITLForEinoTool(ctx, cancelWithCause, conversationID, assistantMessageID, sendEvent, toolName, arguments)
})
var cumulativeMCPExecutionIDs []string
result, runErr = multiagent.RunEinoSingleChatModelAgent(
taskCtx,
h.config,
&h.config.MultiAgent,
h.agent,
h.logger,
conversationID,
curFinalMessage,
curHistory,
roleTools,
progressCallback,
)
timeoutCancel()
for {
progressCallback := h.createProgressCallback(taskCtx, cancelWithCause, conversationID, assistantMessageID, sendEvent)
taskCtxLoop := mcp.WithMCPConversationID(taskCtx, conversationID)
taskCtxLoop = mcp.WithToolRunRegistry(taskCtxLoop, h.tasks)
taskCtxLoop = multiagent.WithHITLToolInterceptor(taskCtxLoop, func(ctx context.Context, toolName, arguments string) (string, error) {
return h.interceptHITLForEinoTool(ctx, cancelWithCause, conversationID, assistantMessageID, sendEvent, toolName, arguments)
})
result, runErr = multiagent.RunEinoSingleChatModelAgent(
taskCtxLoop,
h.config,
&h.config.MultiAgent,
h.agent,
h.logger,
conversationID,
curFinalMessage,
curHistory,
roleTools,
progressCallback,
chatReasoningToClientIntent(req.Reasoning),
)
timeoutCancel()
if result != nil && len(result.MCPExecutionIDs) > 0 {
cumulativeMCPExecutionIDs = mergeMCPExecutionIDLists(cumulativeMCPExecutionIDs, result.MCPExecutionIDs)
}
if runErr == nil {
break
}
if runErr != nil {
cause := context.Cause(baseCtx)
if errors.Is(cause, multiagent.ErrInterruptContinue) {
if shouldPersistEinoAgentTraceAfterRunError(baseCtx) {
h.persistEinoAgentTraceForResume(conversationID, result)
}
note := h.tasks.TakeInterruptContinueNote(conversationID)
icSummary := interruptContinueTimelineSummary(note)
progressCallback("user_interrupt_continue", icSummary, map[string]interface{}{
"conversationId": conversationID,
"rawReason": strings.TrimSpace(note),
"emptyReason": strings.TrimSpace(note) == "",
"kind": "no_active_mcp_tool",
})
inject := formatInterruptContinueUserMessage(note)
// 不写入 messages 表为 user 气泡:避免主对话流出现大段模板;说明已由 user_interrupt_continue 记入助手 process_details(迭代详情)。
if hist, err := h.loadHistoryFromAgentTrace(conversationID); err == nil && len(hist) > 0 {
curHistory = hist
}
curFinalMessage = inject
sendEvent("progress", "已合并用户补充与最新轨迹,正在继续推理…", map[string]interface{}{
"conversationId": conversationID,
"source": "interrupt_continue",
})
h.tasks.UpdateTaskStatus(conversationID, "running")
baseCtx, cancelWithCause = context.WithCancelCause(context.Background())
h.tasks.BindTaskCancel(conversationID, cancelWithCause)
taskCtx, timeoutCancel = context.WithTimeout(baseCtx, 600*time.Minute)
continue
}
if shouldPersistEinoAgentTraceAfterRunError(baseCtx) {
h.persistEinoAgentTraceForResume(conversationID, result)
}
@@ -258,18 +298,7 @@ func (h *AgentHandler) EinoSingleAgentLoopStream(c *gin.Context) {
}
if assistantMessageID != "" {
mcpIDsJSON := ""
if len(result.MCPExecutionIDs) > 0 {
jsonData, _ := json.Marshal(result.MCPExecutionIDs)
mcpIDsJSON = string(jsonData)
}
_, _ = h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
result.Response,
mcpIDsJSON,
time.Now(),
assistantMessageID,
)
_ = h.db.UpdateAssistantMessageFinalize(assistantMessageID, result.Response, cumulativeMCPExecutionIDs, multiagent.AggregatedReasoningFromTraceJSON(result.LastAgentTraceInput))
}
if result.LastAgentTraceInput != "" || result.LastAgentTraceOutput != "" {
@@ -279,7 +308,7 @@ func (h *AgentHandler) EinoSingleAgentLoopStream(c *gin.Context) {
}
sendEvent("response", result.Response, map[string]interface{}{
"mcpExecutionIds": result.MCPExecutionIDs,
"mcpExecutionIds": cumulativeMCPExecutionIDs,
"conversationId": conversationID,
"messageId": assistantMessageID,
"agentMode": "eino_single",
@@ -337,6 +366,7 @@ func (h *AgentHandler) EinoSingleAgentLoop(c *gin.Context) {
prep.History,
prep.RoleTools,
progressCallback,
chatReasoningToClientIntent(req.Reasoning),
)
if runErr != nil {
if shouldPersistEinoAgentTraceAfterRunError(baseCtx) {
@@ -347,18 +377,7 @@ func (h *AgentHandler) EinoSingleAgentLoop(c *gin.Context) {
}
if prep.AssistantMessageID != "" {
mcpIDsJSON := ""
if len(result.MCPExecutionIDs) > 0 {
jsonData, _ := json.Marshal(result.MCPExecutionIDs)
mcpIDsJSON = string(jsonData)
}
_, _ = h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
result.Response,
mcpIDsJSON,
time.Now(),
prep.AssistantMessageID,
)
_ = h.db.UpdateAssistantMessageFinalize(prep.AssistantMessageID, result.Response, result.MCPExecutionIDs, multiagent.AggregatedReasoningFromTraceJSON(result.LastAgentTraceInput))
}
if result.LastAgentTraceInput != "" || result.LastAgentTraceOutput != "" {
_ = h.db.SaveAgentTrace(prep.ConversationID, result.LastAgentTraceInput, result.LastAgentTraceOutput)
+114 -48
View File
@@ -63,7 +63,7 @@ func (h *AgentHandler) MultiAgentLoopStream(c *gin.Context) {
// 为避免 UI 看到“取消错误 + cancelled 文案”两条回复,这里直接丢弃取消对应的 error。
if eventType == "error" && baseCtx != nil {
cause := context.Cause(baseCtx)
if errors.Is(cause, ErrTaskCancelled) {
if errors.Is(cause, ErrTaskCancelled) || errors.Is(cause, multiagent.ErrInterruptContinue) {
return
}
}
@@ -184,31 +184,72 @@ func (h *AgentHandler) MultiAgentLoopStream(c *gin.Context) {
}
taskOwned = true
progressCallback := h.createProgressCallback(taskCtx, cancelWithCause, conversationID, assistantMessageID, sendEvent)
taskCtx = mcp.WithMCPConversationID(taskCtx, conversationID)
taskCtx = mcp.WithToolRunRegistry(taskCtx, h.tasks)
taskCtx = multiagent.WithHITLToolInterceptor(taskCtx, func(ctx context.Context, toolName, arguments string) (string, error) {
return h.interceptHITLForEinoTool(ctx, cancelWithCause, conversationID, assistantMessageID, sendEvent, toolName, arguments)
})
// 同一 HTTP 流内多段 Run(如中断并继续)合并 MCP execution id,供最终 response / 库表与工具芯片展示完整列表
var cumulativeMCPExecutionIDs []string
result, runErr = multiagent.RunDeepAgent(
taskCtx,
h.config,
&h.config.MultiAgent,
h.agent,
h.logger,
conversationID,
curFinalMessage,
curHistory,
roleTools,
progressCallback,
h.agentsMarkdownDir,
orch,
)
timeoutCancel()
for {
progressCallback := h.createProgressCallback(taskCtx, cancelWithCause, conversationID, assistantMessageID, sendEvent)
taskCtxLoop := mcp.WithMCPConversationID(taskCtx, conversationID)
taskCtxLoop = mcp.WithToolRunRegistry(taskCtxLoop, h.tasks)
taskCtxLoop = multiagent.WithHITLToolInterceptor(taskCtxLoop, func(ctx context.Context, toolName, arguments string) (string, error) {
return h.interceptHITLForEinoTool(ctx, cancelWithCause, conversationID, assistantMessageID, sendEvent, toolName, arguments)
})
result, runErr = multiagent.RunDeepAgent(
taskCtxLoop,
h.config,
&h.config.MultiAgent,
h.agent,
h.logger,
conversationID,
curFinalMessage,
curHistory,
roleTools,
progressCallback,
h.agentsMarkdownDir,
orch,
chatReasoningToClientIntent(req.Reasoning),
)
timeoutCancel()
if result != nil && len(result.MCPExecutionIDs) > 0 {
cumulativeMCPExecutionIDs = mergeMCPExecutionIDLists(cumulativeMCPExecutionIDs, result.MCPExecutionIDs)
}
if runErr == nil {
break
}
if runErr != nil {
cause := context.Cause(baseCtx)
if errors.Is(cause, multiagent.ErrInterruptContinue) {
if shouldPersistEinoAgentTraceAfterRunError(baseCtx) {
h.persistEinoAgentTraceForResume(conversationID, result)
}
note := h.tasks.TakeInterruptContinueNote(conversationID)
icSummary := interruptContinueTimelineSummary(note)
progressCallback("user_interrupt_continue", icSummary, map[string]interface{}{
"conversationId": conversationID,
"rawReason": strings.TrimSpace(note),
"emptyReason": strings.TrimSpace(note) == "",
"kind": "no_active_mcp_tool",
})
inject := formatInterruptContinueUserMessage(note)
// 不写入 messages 表为 user 气泡:避免主对话流出现大段模板;说明已由 user_interrupt_continue 记入助手 process_details(迭代详情)。
if hist, err := h.loadHistoryFromAgentTrace(conversationID); err == nil && len(hist) > 0 {
curHistory = hist
}
curFinalMessage = inject
sendEvent("progress", "已合并用户补充与最新轨迹,正在继续推理…", map[string]interface{}{
"conversationId": conversationID,
"source": "interrupt_continue",
})
h.tasks.UpdateTaskStatus(conversationID, "running")
baseCtx, cancelWithCause = context.WithCancelCause(context.Background())
h.tasks.BindTaskCancel(conversationID, cancelWithCause)
taskCtx, timeoutCancel = context.WithTimeout(baseCtx, 600*time.Minute)
continue
}
if shouldPersistEinoAgentTraceAfterRunError(baseCtx) {
h.persistEinoAgentTraceForResume(conversationID, result)
}
@@ -269,18 +310,7 @@ func (h *AgentHandler) MultiAgentLoopStream(c *gin.Context) {
}
if assistantMessageID != "" {
mcpIDsJSON := ""
if len(result.MCPExecutionIDs) > 0 {
jsonData, _ := json.Marshal(result.MCPExecutionIDs)
mcpIDsJSON = string(jsonData)
}
_, _ = h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
result.Response,
mcpIDsJSON,
time.Now(),
assistantMessageID,
)
_ = h.db.UpdateAssistantMessageFinalize(assistantMessageID, result.Response, cumulativeMCPExecutionIDs, multiagent.AggregatedReasoningFromTraceJSON(result.LastAgentTraceInput))
}
if result.LastAgentTraceInput != "" || result.LastAgentTraceOutput != "" {
@@ -294,7 +324,7 @@ func (h *AgentHandler) MultiAgentLoopStream(c *gin.Context) {
effectiveOrch = config.NormalizeMultiAgentOrchestration(o)
}
sendEvent("response", result.Response, map[string]interface{}{
"mcpExecutionIds": result.MCPExecutionIDs,
"mcpExecutionIds": cumulativeMCPExecutionIDs,
"conversationId": conversationID,
"messageId": assistantMessageID,
"agentMode": "eino_" + effectiveOrch,
@@ -350,6 +380,7 @@ func (h *AgentHandler) MultiAgentLoop(c *gin.Context) {
progressCallback,
h.agentsMarkdownDir,
strings.TrimSpace(req.Orchestration),
chatReasoningToClientIntent(req.Reasoning),
)
if runErr != nil {
if shouldPersistEinoAgentTraceAfterRunError(baseCtx) {
@@ -365,18 +396,7 @@ func (h *AgentHandler) MultiAgentLoop(c *gin.Context) {
}
if prep.AssistantMessageID != "" {
mcpIDsJSON := ""
if len(result.MCPExecutionIDs) > 0 {
jsonData, _ := json.Marshal(result.MCPExecutionIDs)
mcpIDsJSON = string(jsonData)
}
_, _ = h.db.Exec(
"UPDATE messages SET content = ?, mcp_execution_ids = ?, updated_at = ? WHERE id = ?",
result.Response,
mcpIDsJSON,
time.Now(),
prep.AssistantMessageID,
)
_ = h.db.UpdateAssistantMessageFinalize(prep.AssistantMessageID, result.Response, result.MCPExecutionIDs, multiagent.AggregatedReasoningFromTraceJSON(result.LastAgentTraceInput))
}
if result.LastAgentTraceInput != "" || result.LastAgentTraceOutput != "" {
@@ -406,6 +426,52 @@ func (h *AgentHandler) persistEinoAgentTraceForResume(conversationID string, res
}
}
// mergeMCPExecutionIDLists 去重合并多段 Run 的 MCP execution id(顺序:先 dst 后 more)。
func mergeMCPExecutionIDLists(dst []string, more []string) []string {
seen := make(map[string]struct{}, len(dst)+len(more))
out := make([]string, 0, len(dst)+len(more))
add := func(ids []string) {
for _, id := range ids {
id = strings.TrimSpace(id)
if id == "" {
continue
}
if _, ok := seen[id]; ok {
continue
}
seen[id] = struct{}{}
out = append(out, id)
}
}
add(dst)
add(more)
return out
}
// interruptContinueTimelineSummary 时间线 / process_details 中展示的简短正文(完整模板已写入另一条用户消息)。
func interruptContinueTimelineSummary(note string) string {
note = strings.TrimSpace(note)
if note == "" {
return "用户选择「中断并继续」,未填写说明;已按默认渗透补充模板合并上下文并续跑。"
}
return "用户中断说明(原文):\n\n" + note
}
// formatInterruptContinueUserMessage 将「中断并继续」弹窗中的说明格式化为新一轮 user 消息(渗透场景下强调路径补充与端口复扫)。
func formatInterruptContinueUserMessage(note string) string {
var b strings.Builder
b.WriteString("【用户补充 / 中断后继续】\n")
if s := strings.TrimSpace(note); s != "" {
b.WriteString(s)
b.WriteString("\n\n")
}
b.WriteString("【请在本轮落实】\n")
b.WriteString("- 将用户提供的接口路径、参数、业务变化纳入后续测试与推理。\n")
b.WriteString("- 若资产或目标信息有更新,请对目标重新执行端口/服务探测,再基于新结果规划下一步。\n")
b.WriteString("- 在已有轨迹基础上推进,避免无意义重复已完成的步骤。\n")
return strings.TrimSpace(b.String())
}
func multiAgentHTTPErrorStatus(err error) (int, string) {
msg := err.Error()
switch {
+1 -7
View File
@@ -55,13 +55,7 @@ func (h *AgentHandler) prepareMultiAgentSession(req *ChatRequest) (*multiAgentPr
if getErr != nil {
agentHistoryMessages = []agent.ChatMessage{}
} else {
agentHistoryMessages = make([]agent.ChatMessage, 0, len(historyMessages))
for _, msg := range historyMessages {
agentHistoryMessages = append(agentHistoryMessages, agent.ChatMessage{
Role: msg.Role,
Content: msg.Content,
})
}
agentHistoryMessages = dbMessagesToAgentChatMessages(historyMessages)
}
}
+59 -2
View File
@@ -6,6 +6,8 @@ import (
"strings"
"sync"
"time"
"cyberstrike-ai/internal/multiagent"
)
// ErrTaskCancelled 用户取消任务的错误
@@ -32,6 +34,9 @@ type AgentTask struct {
// ActiveMCPExecutionID 当前正在执行的 MCP 工具 executionId(仅内存,供「中断并继续」= 仅掐当前工具)
ActiveMCPExecutionID string `json:"-"`
// InterruptContinueNote 无 MCP 时「中断并继续」由用户在弹窗中填写的补充说明(Cancel 前写入,续跑轮次读取后清空)
InterruptContinueNote string `json:"-"`
cancel func(error)
}
@@ -65,6 +70,50 @@ func (m *AgentTaskManager) UnregisterRunningTool(conversationID, executionID str
}
}
// SetInterruptContinueNote 在发起 ErrInterruptContinue 取消前写入用户补充说明(仅内存)。
func (m *AgentTaskManager) SetInterruptContinueNote(conversationID, note string) {
conversationID = strings.TrimSpace(conversationID)
if conversationID == "" {
return
}
m.mu.Lock()
defer m.mu.Unlock()
if t, ok := m.tasks[conversationID]; ok && t != nil {
t.InterruptContinueNote = note
}
}
// TakeInterruptContinueNote 读取并清空补充说明(续跑开始时调用一次)。
func (m *AgentTaskManager) TakeInterruptContinueNote(conversationID string) string {
conversationID = strings.TrimSpace(conversationID)
if conversationID == "" {
return ""
}
m.mu.Lock()
defer m.mu.Unlock()
if t, ok := m.tasks[conversationID]; ok && t != nil {
n := t.InterruptContinueNote
t.InterruptContinueNote = ""
return n
}
return ""
}
// BindTaskCancel 在同一运行任务内替换与 context 绑定的 cancel 函数(用于中断后继续时换新 baseCtx)。
func (m *AgentTaskManager) BindTaskCancel(conversationID string, cancel context.CancelCauseFunc) {
conversationID = strings.TrimSpace(conversationID)
if conversationID == "" || cancel == nil {
return
}
m.mu.Lock()
defer m.mu.Unlock()
if t, ok := m.tasks[conversationID]; ok && t != nil {
t.cancel = func(err error) {
cancel(err)
}
}
}
// ActiveMCPExecutionID 返回当前会话进行中的工具 executionId,无则空串。
func (m *AgentTaskManager) ActiveMCPExecutionID(conversationID string) string {
conversationID = strings.TrimSpace(conversationID)
@@ -210,8 +259,16 @@ func (m *AgentTaskManager) CancelTask(conversationID string, cause error) (bool,
return true, nil
}
task.Status = "cancelling"
task.CancellingAt = time.Now()
// ErrInterruptContinue:仅掐断当前推理步骤,随后由处理器续跑,不进入长时间「取消中」态。
if cause != nil && errors.Is(cause, multiagent.ErrInterruptContinue) {
task.Status = "running"
} else {
task.Status = "cancelling"
task.CancellingAt = time.Now()
}
if cause != nil && errors.Is(cause, ErrTaskCancelled) {
task.InterruptContinueNote = ""
}
cancel := task.cancel
m.mu.Unlock()
+195 -98
View File
@@ -13,7 +13,9 @@ import (
"sync/atomic"
"unicode/utf8"
"cyberstrike-ai/internal/agent"
"cyberstrike-ai/internal/einomcp"
"cyberstrike-ai/internal/openai"
"github.com/cloudwego/eino/adk"
"github.com/cloudwego/eino/schema"
@@ -40,6 +42,13 @@ func normalizeStreamingDelta(current, incoming string) (next, delta string) {
return current + incoming, incoming
}
func isInterruptContinue(ctx context.Context) bool {
if ctx == nil {
return false
}
return errors.Is(context.Cause(ctx), ErrInterruptContinue)
}
func isEinoIterationLimitError(err error) bool {
if err == nil {
return false
@@ -70,6 +79,11 @@ type einoADKRunLoopArgs struct {
McpIDsMu *sync.Mutex
McpIDs *[]string
// FilesystemMonitorAgent / FilesystemMonitorRecord 非 nil 时,将 Eino ADK filesystem 中间件工具(ls/read_file/write_file/edit_file/glob/grep
// 在完成时写入 MCP 监控;execute 仍由 eino_execute_monitor 记录,此处跳过。
FilesystemMonitorAgent *agent.Agent
FilesystemMonitorRecord einomcp.ExecutionRecorder
// ToolInvokeNotify 与 einomcp.ToolsFromDefinitions 共享:run loop 在迭代前 SetMCP 桥 Fire 以补全 tool_result。
ToolInvokeNotify *einomcp.ToolInvokeNotifyHolder
@@ -77,6 +91,10 @@ type einoADKRunLoopArgs struct {
// EmptyResponseMessage 当未捕获到助手正文时的占位(多代理与单代理文案不同)。
EmptyResponseMessage string
// ModelFacingTrace 可选:由各 ChatModelAgent Handlers 链末尾中间件写入「即将送入模型」的消息快照;
// 非空时优先用于 LastAgentTraceInput 序列化,使续跑与 summarization/reduction 后的上下文一致。
ModelFacingTrace *modelFacingTraceHolder
}
func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs []adk.Message) (*RunResult, error) {
@@ -399,7 +417,8 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
}
ids := snapshotMCPIDs()
return buildEinoRunResultFromAccumulated(
orchMode, runAccumulatedMsgs, lastAssistant, lastPlanExecuteExecutor, emptyHint, ids, true,
orchMode, runAccumulatedMsgs, persistTraceSource(args, runAccumulatedMsgs),
lastAssistant, lastPlanExecuteExecutor, emptyHint, ids, true,
), runErr
}
@@ -409,10 +428,18 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
case <-ctx.Done():
flushAllPendingAsFailed(ctx.Err())
if progress != nil {
progress("error", "Request cancelled / 请求已取消", map[string]interface{}{
"conversationId": conversationID,
"source": "eino",
})
if isInterruptContinue(ctx) {
progress("progress", "已暂停当前输出,正在合并用户补充并继续…", map[string]interface{}{
"conversationId": conversationID,
"source": "eino",
"kind": "interrupt_continue",
})
} else {
progress("error", "Request cancelled / 请求已取消", map[string]interface{}{
"conversationId": conversationID,
"source": "eino",
})
}
}
return takePartial(ctx.Err())
default:
@@ -426,10 +453,18 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
if ctxErr := ctx.Err(); ctxErr != nil {
flushAllPendingAsFailed(ctxErr)
if progress != nil {
progress("error", ctxErr.Error(), map[string]interface{}{
"conversationId": conversationID,
"source": "eino",
})
if isInterruptContinue(ctx) {
progress("progress", "已暂停当前输出,正在合并用户补充并继续…", map[string]interface{}{
"conversationId": conversationID,
"source": "eino",
"kind": "interrupt_continue",
})
} else {
progress("error", ctxErr.Error(), map[string]interface{}{
"conversationId": conversationID,
"source": "eino",
})
}
}
return takePartial(ctxErr)
}
@@ -516,104 +551,140 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
var mainAssistantBuf string
var mainAssistDupTarget string // 非空表示本段主助手流需缓冲至 EOF,与 execute 输出比对去重
var reasoningBuf string
var prevReasoningDisplay string // UI 用:剥离 Claude 内部 signature 尾缀后的累计展示
var streamRecvErr error
type streamMsg struct {
chunk *schema.Message
err error
}
recvCh := make(chan streamMsg, 8)
go func() {
defer close(recvCh)
for {
ch, rerr := mv.MessageStream.Recv()
recvCh <- streamMsg{chunk: ch, err: rerr}
if rerr != nil {
return
}
}
}()
streamRecvLoop:
for {
chunk, rerr := mv.MessageStream.Recv()
if rerr != nil {
if errors.Is(rerr, io.EOF) {
break
select {
case <-ctx.Done():
streamRecvErr = ctx.Err()
break streamRecvLoop
case sm, ok := <-recvCh:
if !ok {
break streamRecvLoop
}
if logger != nil {
logger.Warn("eino stream recv error, flushing incomplete stream",
zap.Error(rerr),
zap.String("agent", ev.AgentName),
zap.Int("toolFragments", len(toolStreamFragments)))
}
streamRecvErr = rerr
break
}
if chunk == nil {
continue
}
if progress != nil && strings.TrimSpace(chunk.ReasoningContent) != "" {
var reasoningDelta string
reasoningBuf, reasoningDelta = normalizeStreamingDelta(reasoningBuf, chunk.ReasoningContent)
if reasoningDelta != "" {
if reasoningStreamID == "" {
reasoningStreamID = fmt.Sprintf("eino-reasoning-%s-%d", conversationID, atomic.AddInt64(&reasoningStreamSeq, 1))
progress("thinking_stream_start", " ", map[string]interface{}{
"streamId": reasoningStreamID,
"source": "eino",
"einoAgent": ev.AgentName,
"einoRole": einoRoleTag(ev.AgentName),
"orchestration": orchMode,
})
chunk, rerr := sm.chunk, sm.err
if rerr != nil {
if errors.Is(rerr, io.EOF) {
break streamRecvLoop
}
progress("thinking_stream_delta", reasoningDelta, map[string]interface{}{
"streamId": reasoningStreamID,
})
if logger != nil {
logger.Warn("eino stream recv error, flushing incomplete stream",
zap.Error(rerr),
zap.String("agent", ev.AgentName),
zap.Int("toolFragments", len(toolStreamFragments)))
}
streamRecvErr = rerr
break streamRecvLoop
}
}
if chunk.Content != "" {
if progress != nil && streamsMainAssistant(ev.AgentName) {
var contentDelta string
mainAssistantBuf, contentDelta = normalizeStreamingDelta(mainAssistantBuf, chunk.Content)
if contentDelta != "" {
if mainAssistDupTarget == "" {
executeStdoutDupMu.Lock()
if pendingExecuteStdoutDup != "" {
mainAssistDupTarget = pendingExecuteStdoutDup
}
executeStdoutDupMu.Unlock()
}
if mainAssistDupTarget != "" {
// 已展示过 tool_result,缓冲全文;EOF 后与 execute 输出相同则不再发助手流
if chunk == nil {
continue
}
if progress != nil && strings.TrimSpace(chunk.ReasoningContent) != "" {
var reasoningDelta string
reasoningBuf, reasoningDelta = normalizeStreamingDelta(reasoningBuf, chunk.ReasoningContent)
if reasoningDelta != "" {
fullDisplay := openai.DisplayReasoningContent(reasoningBuf)
var displayDelta string
if strings.HasPrefix(fullDisplay, prevReasoningDisplay) {
displayDelta = fullDisplay[len(prevReasoningDisplay):]
} else {
if !streamHeaderSent {
progress("response_start", "", map[string]interface{}{
"conversationId": conversationID,
"mcpExecutionIds": snapshotMCPIDs(),
"messageGeneratedBy": "eino:" + ev.AgentName,
"einoRole": "orchestrator",
"einoAgent": ev.AgentName,
"orchestration": orchMode,
})
streamHeaderSent = true
}
progress("response_delta", contentDelta, map[string]interface{}{
"conversationId": conversationID,
"mcpExecutionIds": snapshotMCPIDs(),
"einoRole": "orchestrator",
"einoAgent": ev.AgentName,
"orchestration": orchMode,
})
displayDelta = fullDisplay
}
}
} else if !streamsMainAssistant(ev.AgentName) {
var subDelta string
subAssistantBuf, subDelta = normalizeStreamingDelta(subAssistantBuf, chunk.Content)
if subDelta != "" {
if progress != nil {
if subReplyStreamID == "" {
subReplyStreamID = fmt.Sprintf("eino-sub-reply-%s-%d", conversationID, atomic.AddInt64(&einoSubReplyStreamSeq, 1))
progress("eino_agent_reply_stream_start", "", map[string]interface{}{
"streamId": subReplyStreamID,
"einoAgent": ev.AgentName,
"einoRole": "sub",
"conversationId": conversationID,
"source": "eino",
prevReasoningDisplay = fullDisplay
if displayDelta != "" {
if reasoningStreamID == "" {
reasoningStreamID = fmt.Sprintf("eino-reasoning-%s-%d", conversationID, atomic.AddInt64(&reasoningStreamSeq, 1))
progress("reasoning_chain_stream_start", " ", map[string]interface{}{
"streamId": reasoningStreamID,
"source": "eino",
"einoAgent": ev.AgentName,
"einoRole": einoRoleTag(ev.AgentName),
"orchestration": orchMode,
})
}
progress("eino_agent_reply_stream_delta", subDelta, map[string]interface{}{
"streamId": subReplyStreamID,
"conversationId": conversationID,
progress("reasoning_chain_stream_delta", displayDelta, map[string]interface{}{
"streamId": reasoningStreamID,
})
}
}
}
}
if len(chunk.ToolCalls) > 0 {
toolStreamFragments = append(toolStreamFragments, chunk.ToolCalls...)
if chunk.Content != "" {
if progress != nil && streamsMainAssistant(ev.AgentName) {
var contentDelta string
mainAssistantBuf, contentDelta = normalizeStreamingDelta(mainAssistantBuf, chunk.Content)
if contentDelta != "" {
if mainAssistDupTarget == "" {
executeStdoutDupMu.Lock()
if pendingExecuteStdoutDup != "" {
mainAssistDupTarget = pendingExecuteStdoutDup
}
executeStdoutDupMu.Unlock()
}
if mainAssistDupTarget != "" {
// 已展示过 tool_result,缓冲全文;EOF 后与 execute 输出相同则不再发助手流
} else {
if !streamHeaderSent {
progress("response_start", "", map[string]interface{}{
"conversationId": conversationID,
"mcpExecutionIds": snapshotMCPIDs(),
"messageGeneratedBy": "eino:" + ev.AgentName,
"einoRole": "orchestrator",
"einoAgent": ev.AgentName,
"orchestration": orchMode,
})
streamHeaderSent = true
}
progress("response_delta", contentDelta, map[string]interface{}{
"conversationId": conversationID,
"mcpExecutionIds": snapshotMCPIDs(),
"einoRole": "orchestrator",
"einoAgent": ev.AgentName,
"orchestration": orchMode,
})
}
}
} else if !streamsMainAssistant(ev.AgentName) {
var subDelta string
subAssistantBuf, subDelta = normalizeStreamingDelta(subAssistantBuf, chunk.Content)
if subDelta != "" {
if progress != nil {
if subReplyStreamID == "" {
subReplyStreamID = fmt.Sprintf("eino-sub-reply-%s-%d", conversationID, atomic.AddInt64(&einoSubReplyStreamSeq, 1))
progress("eino_agent_reply_stream_start", "", map[string]interface{}{
"streamId": subReplyStreamID,
"einoAgent": ev.AgentName,
"einoRole": "sub",
"conversationId": conversationID,
"source": "eino",
})
}
progress("eino_agent_reply_stream_delta", subDelta, map[string]interface{}{
"streamId": subReplyStreamID,
"conversationId": conversationID,
})
}
}
}
}
if len(chunk.ToolCalls) > 0 {
toolStreamFragments = append(toolStreamFragments, chunk.ToolCalls...)
}
}
}
if streamsMainAssistant(ev.AgentName) {
@@ -683,10 +754,17 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
}
var lastToolChunk *schema.Message
if merged := mergeStreamingToolCallFragments(toolStreamFragments); len(merged) > 0 {
lastToolChunk = &schema.Message{ToolCalls: merged}
lastToolChunk = mergeMessageToolCalls(&schema.Message{ToolCalls: merged})
}
tryEmitToolCallsOnce(lastToolChunk, ev.AgentName, orchestratorName, conversationID, progress, toolEmitSeen, subAgentToolStep, markPending)
// 流式路径此前只把 tool_calls 推给进度 UI,未写入 runAccumulatedMsgs;落库后 loadHistory→RepairOrphan 会删掉全部 tool 结果,表现为「续跑/下轮失忆」。
if lastToolChunk != nil && len(lastToolChunk.ToolCalls) > 0 {
runAccumulatedMsgs = append(runAccumulatedMsgs, schema.AssistantMessage("", lastToolChunk.ToolCalls))
}
if streamRecvErr != nil {
if isInterruptContinue(ctx) {
return takePartial(streamRecvErr)
}
if progress != nil {
progress("eino_stream_error", streamRecvErr.Error(), map[string]interface{}{
"conversationId": conversationID,
@@ -711,7 +789,7 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
if mv.Role == schema.Assistant {
if progress != nil && strings.TrimSpace(msg.ReasoningContent) != "" {
progress("thinking", strings.TrimSpace(msg.ReasoningContent), map[string]interface{}{
progress("reasoning_chain", openai.DisplayReasoningContent(strings.TrimSpace(msg.ReasoningContent)), map[string]interface{}{
"conversationId": conversationID,
"source": "eino",
"einoAgent": ev.AgentName,
@@ -817,11 +895,15 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
if toolCallID != "" {
removePendingByID(toolCallID)
if _, loaded := toolResultSent.LoadOrStore(toolCallID, struct{}{}); loaded {
// ToolInvokeNotify 可能已推过 tool_result(如 execute 流式包装里 Fire 仅携带截断后的 stdout),
// 此处仍应用 ADK Tool 消息中的完整内容刷新去重基准,避免模型复述全文时与截断串比对失败而重复展示「助手输出」。
recordPendingExecuteStdoutDup(toolName, content, isErr)
continue
}
data["toolCallId"] = toolCallID
}
recordPendingExecuteStdoutDup(toolName, content, isErr)
recordEinoADKFilesystemToolMonitor(args.FilesystemMonitorAgent, args.FilesystemMonitorRecord, toolName, toolCallID, runAccumulatedMsgs, content, isErr)
progress("tool_result", fmt.Sprintf("工具结果 (%s)", toolName), data)
}
}
@@ -831,11 +913,21 @@ func runEinoADKAgentLoop(ctx context.Context, args *einoADKRunLoopArgs, baseMsgs
mcpIDsMu.Unlock()
out := buildEinoRunResultFromAccumulated(
orchMode, runAccumulatedMsgs, lastAssistant, lastPlanExecuteExecutor, emptyHint, ids, false,
orchMode, runAccumulatedMsgs, persistTraceSource(args, runAccumulatedMsgs),
lastAssistant, lastPlanExecuteExecutor, emptyHint, ids, false,
)
return out, nil
}
func persistTraceSource(args *einoADKRunLoopArgs, fallback []adk.Message) []adk.Message {
if args != nil && args.ModelFacingTrace != nil {
if snap := args.ModelFacingTrace.Snapshot(); len(snap) > 0 {
return snap
}
}
return fallback
}
func einoPartialRunLastOutputHint() string {
return "[执行未正常结束(用户停止、超时或异常)。续跑时请基于上文已产生的工具与结果继续,勿重复已完成步骤。]\n" +
"[Run ended abnormally; continue from the trace above without repeating completed steps.]"
@@ -844,13 +936,18 @@ func einoPartialRunLastOutputHint() string {
func buildEinoRunResultFromAccumulated(
orchMode string,
runAccumulatedMsgs []adk.Message,
persistMsgs []adk.Message,
lastAssistant string,
lastPlanExecuteExecutor string,
emptyHint string,
mcpIDs []string,
partial bool,
) *RunResult {
histJSON, _ := json.Marshal(runAccumulatedMsgs)
traceForJSON := persistMsgs
if len(traceForJSON) == 0 {
traceForJSON = runAccumulatedMsgs
}
histJSON, _ := json.Marshal(traceForJSON)
cleaned := strings.TrimSpace(lastAssistant)
if orchMode == "plan_execute" {
if e := strings.TrimSpace(lastPlanExecuteExecutor); e != "" {
@@ -0,0 +1,101 @@
package multiagent
import (
"encoding/json"
"errors"
"strings"
"cyberstrike-ai/internal/agent"
"cyberstrike-ai/internal/einomcp"
"github.com/cloudwego/eino/adk"
"github.com/cloudwego/eino/schema"
)
// einoADKFilesystemToolNames 与 cloudwego/eino/adk/middlewares/filesystem 默认 ToolName* 一致。
// execute 已由 eino_execute_monitor 落库,此处不包含。
var einoADKFilesystemToolNames = map[string]struct{}{
"ls": {},
"read_file": {},
"write_file": {},
"edit_file": {},
"glob": {},
"grep": {},
}
func isBuiltinEinoADKFilesystemToolName(name string) bool {
n := strings.ToLower(strings.TrimSpace(name))
_, ok := einoADKFilesystemToolNames[n]
return ok
}
func toolCallArgsFromAccumulated(msgs []adk.Message, toolCallID, expectToolName string) map[string]interface{} {
tid := strings.TrimSpace(toolCallID)
expect := strings.TrimSpace(expectToolName)
for i := len(msgs) - 1; i >= 0; i-- {
m := msgs[i]
if m == nil || m.Role != schema.Assistant || len(m.ToolCalls) == 0 {
continue
}
for j := len(m.ToolCalls) - 1; j >= 0; j-- {
tc := m.ToolCalls[j]
if tid != "" && strings.TrimSpace(tc.ID) != tid {
continue
}
fn := strings.TrimSpace(tc.Function.Name)
if expect != "" && !strings.EqualFold(fn, expect) {
continue
}
raw := strings.TrimSpace(tc.Function.Arguments)
if raw == "" {
return map[string]interface{}{}
}
var args map[string]interface{}
if err := json.Unmarshal([]byte(raw), &args); err != nil {
return map[string]interface{}{"arguments_raw": raw}
}
if args == nil {
return map[string]interface{}{}
}
return args
}
}
return map[string]interface{}{}
}
// recordEinoADKFilesystemToolMonitor 将 Eino ADK filesystem 中间件工具结果写入 MCP 监控(与 execute / MCP 桥芯片一致)。
func recordEinoADKFilesystemToolMonitor(
ag *agent.Agent,
rec einomcp.ExecutionRecorder,
toolName string,
toolCallID string,
msgs []adk.Message,
resultText string,
isErr bool,
) {
if ag == nil || rec == nil {
return
}
name := strings.TrimSpace(toolName)
if name == "" || strings.EqualFold(name, "execute") {
return
}
if !isBuiltinEinoADKFilesystemToolName(name) {
return
}
args := toolCallArgsFromAccumulated(msgs, toolCallID, name)
storedName := "eino_fs::" + strings.ToLower(name)
var invErr error
if isErr {
t := strings.TrimSpace(resultText)
if t == "" {
invErr = errors.New("tool error")
} else {
invErr = errors.New(t)
}
}
id := ag.RecordLocalToolExecution(storedName, args, resultText, invErr)
if id != "" {
rec(id)
}
}
@@ -0,0 +1,84 @@
package multiagent
import (
"context"
"encoding/json"
"sync"
"github.com/cloudwego/eino/adk"
)
// modelFacingTraceHolder 保存「即将送入 ChatModel」的消息快照(已走 summarization / reduction / orphan 修剪等),
// 用于 last_react_input 落库,使续跑与「上下文压缩后」的模型视角一致,而非仅依赖事件流 append 的 runAccumulatedMsgs。
type modelFacingTraceHolder struct {
mu sync.Mutex
// msgs 为深拷贝后的切片,避免框架后续原地修改污染快照
msgs []adk.Message
}
func newModelFacingTraceHolder() *modelFacingTraceHolder {
return &modelFacingTraceHolder{}
}
// Snapshot 返回当前快照的再一次深拷贝(供序列化落库,避免与 holder 互斥长期持锁)。
func (h *modelFacingTraceHolder) Snapshot() []adk.Message {
if h == nil {
return nil
}
h.mu.Lock()
defer h.mu.Unlock()
return cloneADKMessagesForTrace(h.msgs)
}
func (h *modelFacingTraceHolder) storeFromState(state *adk.ChatModelAgentState) {
if h == nil || state == nil || len(state.Messages) == 0 {
return
}
cloned := cloneADKMessagesForTrace(state.Messages)
if len(cloned) == 0 {
return
}
h.mu.Lock()
h.msgs = cloned
h.mu.Unlock()
}
func cloneADKMessagesForTrace(msgs []adk.Message) []adk.Message {
if len(msgs) == 0 {
return nil
}
b, err := json.Marshal(msgs)
if err != nil {
return nil
}
var out []adk.Message
if err := json.Unmarshal(b, &out); err != nil {
return nil
}
return out
}
// modelFacingTraceMiddleware 必须在 Handlers 链中处于 **BeforeModel 最后**telemetry 之后),
// 此时 state.Messages 即为本次 LLM 调用的最终入参。
type modelFacingTraceMiddleware struct {
adk.BaseChatModelAgentMiddleware
holder *modelFacingTraceHolder
}
func newModelFacingTraceMiddleware(holder *modelFacingTraceHolder) adk.ChatModelAgentMiddleware {
if holder == nil {
return nil
}
return &modelFacingTraceMiddleware{holder: holder}
}
func (m *modelFacingTraceMiddleware) BeforeModelRewriteState(
ctx context.Context,
state *adk.ChatModelAgentState,
mc *adk.ModelContext,
) (context.Context, *adk.ChatModelAgentState, error) {
if m.holder != nil && state != nil {
m.holder.storeFromState(state)
}
return ctx, state, nil
}
@@ -41,6 +41,8 @@ type PlanExecuteRootArgs struct {
FilesystemMiddleware adk.ChatModelAgentMiddleware
// PlannerReplannerRewriteHandlers applies BeforeModelRewriteState pipeline for planner/replanner input.
PlannerReplannerRewriteHandlers []adk.ChatModelAgentMiddleware
// ModelFacingTrace 可选:由 Executor Handlers 链末尾写入,供 last_react 与 summarization 后上下文对齐。
ModelFacingTrace *modelFacingTraceHolder
}
// NewPlanExecuteRoot 返回 plan → execute → replan 预置编排根节点(与 Deep / Supervisor 并列)。
@@ -101,6 +103,11 @@ func NewPlanExecuteRoot(ctx context.Context, a *PlanExecuteRootArgs) (adk.Resuma
if teleMw := newEinoModelInputTelemetryMiddleware(a.Logger, a.ModelName, a.ConversationID, "plan_execute_executor"); teleMw != nil {
execHandlers = append(execHandlers, teleMw)
}
if a.ModelFacingTrace != nil {
if capMw := newModelFacingTraceMiddleware(a.ModelFacingTrace); capMw != nil {
execHandlers = append(execHandlers, capMw)
}
}
executor, err := newPlanExecuteExecutor(ctx, &planexecute.ExecutorConfig{
Model: a.ExecModel,
ToolsConfig: a.ToolsCfg,
+26 -15
View File
@@ -13,6 +13,7 @@ import (
"cyberstrike-ai/internal/config"
"cyberstrike-ai/internal/einomcp"
"cyberstrike-ai/internal/openai"
"cyberstrike-ai/internal/reasoning"
einoopenai "github.com/cloudwego/eino-ext/components/model/openai"
"github.com/cloudwego/eino/adk"
@@ -37,6 +38,7 @@ func RunEinoSingleChatModelAgent(
history []agent.ChatMessage,
roleTools []string,
progress func(eventType, message string, data interface{}),
reasoningClient *reasoning.ClientIntent,
) (*RunResult, error) {
if appCfg == nil || ag == nil {
return nil, fmt.Errorf("eino single: 配置或 Agent 为空")
@@ -121,6 +123,7 @@ func RunEinoSingleChatModelAgent(
Model: appCfg.OpenAI.Model,
HTTPClient: httpClient,
}
reasoning.ApplyToEinoChatModelConfig(baseModelCfg, &appCfg.OpenAI, reasoningClient)
mainModel, err := einoopenai.NewChatModel(ctx, baseModelCfg)
if err != nil {
@@ -132,7 +135,9 @@ func RunEinoSingleChatModelAgent(
return nil, fmt.Errorf("eino single summarization: %w", err)
}
handlers := make([]adk.ChatModelAgentMiddleware, 0, 4)
modelFacingTrace := newModelFacingTraceHolder()
handlers := make([]adk.ChatModelAgentMiddleware, 0, 8)
if len(mainOrchestratorPre) > 0 {
handlers = append(handlers, mainOrchestratorPre...)
}
@@ -150,6 +155,9 @@ func RunEinoSingleChatModelAgent(
if teleMw := newEinoModelInputTelemetryMiddleware(logger, appCfg.OpenAI.Model, conversationID, "eino_single"); teleMw != nil {
handlers = append(handlers, teleMw)
}
if capMw := newModelFacingTraceMiddleware(modelFacingTrace); capMw != nil {
handlers = append(handlers, capMw)
}
maxIter := ma.MaxIteration
if maxIter <= 0 {
@@ -164,7 +172,7 @@ func RunEinoSingleChatModelAgent(
Tools: mainToolsForCfg,
UnknownToolsHandler: einomcp.UnknownToolReminderHandler(),
ToolCallMiddlewares: []compose.ToolMiddleware{
{Invokable: hitlToolCallMiddleware()},
hitlToolCallMiddleware(),
{Invokable: softRecoveryToolCallMiddleware()},
},
},
@@ -223,19 +231,22 @@ func RunEinoSingleChatModelAgent(
}
return runEinoADKAgentLoop(ctx, &einoADKRunLoopArgs{
OrchMode: "eino_single",
OrchestratorName: einoSingleAgentName,
ConversationID: conversationID,
Progress: progress,
Logger: logger,
SnapshotMCPIDs: snapshotMCPIDs,
StreamsMainAssistant: streamsMainAssistant,
EinoRoleTag: einoRoleTag,
CheckpointDir: ma.EinoMiddleware.CheckpointDir,
McpIDsMu: &mcpIDsMu,
McpIDs: &mcpIDs,
ToolInvokeNotify: toolInvokeNotify,
DA: chatAgent,
OrchMode: "eino_single",
OrchestratorName: einoSingleAgentName,
ConversationID: conversationID,
Progress: progress,
Logger: logger,
SnapshotMCPIDs: snapshotMCPIDs,
StreamsMainAssistant: streamsMainAssistant,
EinoRoleTag: einoRoleTag,
CheckpointDir: ma.EinoMiddleware.CheckpointDir,
McpIDsMu: &mcpIDsMu,
McpIDs: &mcpIDs,
FilesystemMonitorAgent: ag,
FilesystemMonitorRecord: recorder,
ToolInvokeNotify: toolInvokeNotify,
DA: chatAgent,
ModelFacingTrace: modelFacingTrace,
EmptyResponseMessage: "(Eino ADK single-agent session completed but no assistant text was captured. Check process details or logs.) " +
"Eino ADK 单代理会话已完成,但未捕获到助手文本输出。请查看过程详情或日志。)",
}, baseMsgs)
+1 -1
View File
@@ -214,7 +214,7 @@ func summarizeFinalizeWithRecentAssistantToolTrail(
selectedCount++
}
// 还原时间顺序
// 还原时间顺序。round 内为原始 *schema.Message 指针,保留 ReasoningContentDeepSeek 工具续跑所必需)。
selectedMsgs := make([]adk.Message, 0, 8)
for i := len(selectedRoundsReverse) - 1; i >= 0; i-- {
selectedMsgs = append(selectedMsgs, selectedRoundsReverse[i].messages...)
+54 -12
View File
@@ -8,6 +8,7 @@ import (
"github.com/cloudwego/eino/adk"
"github.com/cloudwego/eino/compose"
"github.com/cloudwego/eino/schema"
)
type hitlInterceptorKey struct{}
@@ -41,7 +42,31 @@ func WithHITLToolInterceptor(ctx context.Context, fn HITLToolInterceptor) contex
return context.WithValue(ctx, hitlInterceptorKey{}, fn)
}
func hitlToolCallMiddleware() compose.InvokableToolMiddleware {
// hitlToolCallMiddleware 同时注册 Invokable 与 Streamable。
// Eino filesystem 的 execute 为流式工具(StreamableTool),仅挂 Invokable 时人机协同不会拦截,会直接执行。
func hitlToolCallMiddleware() compose.ToolMiddleware {
return compose.ToolMiddleware{
Invokable: hitlInvokableToolCallMiddleware(),
Streamable: hitlStreamableToolCallMiddleware(),
}
}
func hitlClearReturnDirectlyIfTransfer(ctx context.Context, toolName string) {
if !strings.EqualFold(strings.TrimSpace(toolName), adk.TransferToAgentToolName) {
return
}
_ = compose.ProcessState[*adk.State](ctx, func(_ context.Context, st *adk.State) error {
if st == nil {
return nil
}
st.ReturnDirectlyToolCallID = ""
st.HasReturnDirectly = false
st.ReturnDirectlyEvent = nil
return nil
})
}
func hitlInvokableToolCallMiddleware() compose.InvokableToolMiddleware {
return func(next compose.InvokableToolEndpoint) compose.InvokableToolEndpoint {
return func(ctx context.Context, input *compose.ToolInput) (*compose.ToolOutput, error) {
if input != nil {
@@ -55,17 +80,7 @@ func hitlToolCallMiddleware() compose.InvokableToolMiddleware {
// transfer_to_agent 在 Eino 中标记为 returnDirectly:工具成功后 ReAct 子图会直接 END,
// 并依赖真实工具内的 SendToolGenAction 触发移交。HITL 拒绝时不会执行真实工具,
// 若仍走 returnDirectly 分支,监督者会在无 Transfer 动作的情况下结束,模型不再迭代。
if strings.EqualFold(strings.TrimSpace(input.Name), adk.TransferToAgentToolName) {
_ = compose.ProcessState[*adk.State](ctx, func(_ context.Context, st *adk.State) error {
if st == nil {
return nil
}
st.ReturnDirectlyToolCallID = ""
st.HasReturnDirectly = false
st.ReturnDirectlyEvent = nil
return nil
})
}
hitlClearReturnDirectlyIfTransfer(ctx, input.Name)
return &compose.ToolOutput{Result: msg}, nil
}
return nil, err
@@ -79,3 +94,30 @@ func hitlToolCallMiddleware() compose.InvokableToolMiddleware {
}
}
}
func hitlStreamableToolCallMiddleware() compose.StreamableToolMiddleware {
return func(next compose.StreamableToolEndpoint) compose.StreamableToolEndpoint {
return func(ctx context.Context, input *compose.ToolInput) (*compose.StreamToolOutput, error) {
if input != nil {
if fn, ok := ctx.Value(hitlInterceptorKey{}).(HITLToolInterceptor); ok && fn != nil {
edited, err := fn(ctx, input.Name, input.Arguments)
if err != nil {
if IsHumanRejectError(err) {
msg := fmt.Sprintf("[HITL Reject] Tool '%s' was rejected by human reviewer. Reason: %s\nPlease adjust parameters/plan and continue without this call.",
input.Name, strings.TrimSpace(err.Error()))
hitlClearReturnDirectlyIfTransfer(ctx, input.Name)
return &compose.StreamToolOutput{
Result: schema.StreamReaderFromArray([]string{msg}),
}, nil
}
return nil, err
}
if edited != "" {
input.Arguments = edited
}
}
}
return next(ctx, input)
}
}
}
+7
View File
@@ -0,0 +1,7 @@
package multiagent
import "errors"
// ErrInterruptContinue 作为 context.CancelCause 使用:用户选择「中断并继续」且当前无进行中的 MCP 工具时,
// 取消当前推理/流式输出,并在同一会话任务内携带用户补充说明自动续跑下一轮(类似 Hermes 式人机回合)。
var ErrInterruptContinue = errors.New("agent interrupt: continue with user-supplied context")
+52
View File
@@ -0,0 +1,52 @@
package multiagent
import (
"encoding/json"
"fmt"
"strings"
)
// AggregatedReasoningFromTraceJSON concatenates non-empty assistant `reasoning_content`
// fields from last_react-style JSON (slice of message objects) in document order.
// Used to persist on the single assistant bubble row for audit and for GetMessages fallback
// when the full trace JSON is unavailable. For strict per-message replay, prefer last_react_input.
func AggregatedReasoningFromTraceJSON(traceJSON string) string {
traceJSON = strings.TrimSpace(traceJSON)
if traceJSON == "" {
return ""
}
var arr []map[string]interface{}
if err := json.Unmarshal([]byte(traceJSON), &arr); err != nil {
return ""
}
var b strings.Builder
for _, m := range arr {
role, _ := m["role"].(string)
if !strings.EqualFold(strings.TrimSpace(role), "assistant") {
continue
}
rc := reasoningContentFromMessageMap(m)
if rc == "" {
continue
}
if b.Len() > 0 {
b.WriteByte('\n')
}
b.WriteString(rc)
}
return b.String()
}
func reasoningContentFromMessageMap(m map[string]interface{}) string {
if m == nil {
return ""
}
switch v := m["reasoning_content"].(type) {
case string:
return strings.TrimSpace(v)
case nil:
return ""
default:
return strings.TrimSpace(fmt.Sprint(v))
}
}
@@ -0,0 +1,20 @@
package multiagent
import "testing"
func TestAggregatedReasoningFromTraceJSON(t *testing.T) {
const j = `[
{"role":"user","content":"hi"},
{"role":"assistant","content":"c1","reasoning_content":"r1","tool_calls":[{"id":"1","type":"function","function":{"name":"f","arguments":"{}"}}]},
{"role":"tool","tool_call_id":"1","content":"out"},
{"role":"assistant","content":"c2","reasoning_content":"r2"}
]`
got := AggregatedReasoningFromTraceJSON(j)
want := "r1\nr2"
if got != want {
t.Fatalf("got %q want %q", got, want)
}
if AggregatedReasoningFromTraceJSON("") != "" || AggregatedReasoningFromTraceJSON("[]") != "" {
t.Fatal("empty expected")
}
}
+87 -71
View File
@@ -17,6 +17,7 @@ import (
"cyberstrike-ai/internal/config"
"cyberstrike-ai/internal/einomcp"
"cyberstrike-ai/internal/openai"
"cyberstrike-ai/internal/reasoning"
einoopenai "github.com/cloudwego/eino-ext/components/model/openai"
"github.com/cloudwego/eino/adk"
@@ -48,6 +49,7 @@ type toolCallPendingInfo struct {
// RunDeepAgent 使用 Eino 多代理预置编排执行一轮对话(deep / plan_execute / supervisor;流式事件通过 progress 回调输出)。
// orchestrationOverride 非空时优先(如聊天/WebShell 请求体);否则用 multi_agent.orchestration(遗留 yaml);皆空则按 deep。
// reasoningClient 来自 ChatRequest.reasoning;可为 nil(机器人/批量等走全局 openai.reasoning)。
func RunDeepAgent(
ctx context.Context,
appCfg *config.Config,
@@ -61,6 +63,7 @@ func RunDeepAgent(
progress func(eventType, message string, data interface{}),
agentsMarkdownDir string,
orchestrationOverride string,
reasoningClient *reasoning.ClientIntent,
) (*RunResult, error) {
if appCfg == nil || ma == nil || ag == nil {
return nil, fmt.Errorf("multiagent: 配置或 Agent 为空")
@@ -163,6 +166,7 @@ func RunDeepAgent(
Model: appCfg.OpenAI.Model,
HTTPClient: httpClient,
}
reasoning.ApplyToEinoChatModelConfig(baseModelCfg, &appCfg.OpenAI, reasoningClient)
deepMaxIter := ma.MaxIteration
if deepMaxIter <= 0 {
@@ -285,7 +289,7 @@ func RunDeepAgent(
Tools: subToolsForCfg,
UnknownToolsHandler: einomcp.UnknownToolReminderHandler(),
ToolCallMiddlewares: []compose.ToolMiddleware{
{Invokable: hitlToolCallMiddleware()},
hitlToolCallMiddleware(),
{Invokable: softRecoveryToolCallMiddleware()},
},
},
@@ -311,6 +315,8 @@ func RunDeepAgent(
return nil, fmt.Errorf("多代理主 summarization 中间件: %w", err)
}
modelFacingTrace := newModelFacingTraceHolder()
// 与 deep.Config.Name / supervisor 主代理 Name 一致。
orchestratorName := "cyberstrike-deep"
orchDescription := "Coordinates specialist agents and MCP tools for authorized security testing."
@@ -407,6 +413,9 @@ func RunDeepAgent(
if teleMw := newEinoModelInputTelemetryMiddleware(logger, appCfg.OpenAI.Model, conversationID, "deep_orchestrator"); teleMw != nil {
deepHandlers = append(deepHandlers, teleMw)
}
if capMw := newModelFacingTraceMiddleware(modelFacingTrace); capMw != nil {
deepHandlers = append(deepHandlers, capMw)
}
supHandlers := []adk.ChatModelAgentMiddleware{}
if len(mainOrchestratorPre) > 0 {
@@ -420,13 +429,16 @@ func RunDeepAgent(
if teleMw := newEinoModelInputTelemetryMiddleware(logger, appCfg.OpenAI.Model, conversationID, "supervisor_orchestrator"); teleMw != nil {
supHandlers = append(supHandlers, teleMw)
}
if capMw := newModelFacingTraceMiddleware(modelFacingTrace); capMw != nil {
supHandlers = append(supHandlers, capMw)
}
mainToolsCfg := adk.ToolsConfig{
ToolsNodeConfig: compose.ToolsNodeConfig{
Tools: mainToolsForCfg,
UnknownToolsHandler: einomcp.UnknownToolReminderHandler(),
ToolCallMiddlewares: []compose.ToolMiddleware{
{Invokable: hitlToolCallMiddleware()},
hitlToolCallMiddleware(),
{Invokable: softRecoveryToolCallMiddleware()},
},
},
@@ -465,6 +477,7 @@ func RunDeepAgent(
ExecPreMiddlewares: mainOrchestratorPre,
SkillMiddleware: einoSkillMW,
FilesystemMiddleware: peFsMw,
ModelFacingTrace: modelFacingTrace,
PlannerReplannerRewriteHandlers: []adk.ChatModelAgentMiddleware{
mainSumMw,
// 孤儿 tool 消息兜底:必须挂在 summarization 之后、telemetry 之前。
@@ -556,96 +569,99 @@ func RunDeepAgent(
}
return runEinoADKAgentLoop(ctx, &einoADKRunLoopArgs{
OrchMode: orchMode,
OrchestratorName: orchestratorName,
ConversationID: conversationID,
Progress: progress,
Logger: logger,
SnapshotMCPIDs: snapshotMCPIDs,
StreamsMainAssistant: streamsMainAssistant,
EinoRoleTag: einoRoleTag,
CheckpointDir: ma.EinoMiddleware.CheckpointDir,
McpIDsMu: &mcpIDsMu,
McpIDs: &mcpIDs,
ToolInvokeNotify: toolInvokeNotify,
DA: da,
OrchMode: orchMode,
OrchestratorName: orchestratorName,
ConversationID: conversationID,
Progress: progress,
Logger: logger,
SnapshotMCPIDs: snapshotMCPIDs,
StreamsMainAssistant: streamsMainAssistant,
EinoRoleTag: einoRoleTag,
CheckpointDir: ma.EinoMiddleware.CheckpointDir,
McpIDsMu: &mcpIDsMu,
McpIDs: &mcpIDs,
FilesystemMonitorAgent: ag,
FilesystemMonitorRecord: recorder,
ToolInvokeNotify: toolInvokeNotify,
DA: da,
ModelFacingTrace: modelFacingTrace,
EmptyResponseMessage: "(Eino multi-agent orchestration completed but no assistant text was captured. Check process details or logs.) " +
"(Eino 多代理编排已完成,但未捕获到助手文本输出。请查看过程详情或日志。)",
}, baseMsgs)
}
func chatToolCallsToSchema(tcs []agent.ToolCall) []schema.ToolCall {
if len(tcs) == 0 {
return nil
}
out := make([]schema.ToolCall, 0, len(tcs))
for _, tc := range tcs {
if strings.TrimSpace(tc.ID) == "" {
continue
}
argsStr := ""
if tc.Function.Arguments != nil {
b, err := json.Marshal(tc.Function.Arguments)
if err == nil {
argsStr = string(b)
}
}
typ := tc.Type
if typ == "" {
typ = "function"
}
out = append(out, schema.ToolCall{
ID: tc.ID,
Type: typ,
Function: schema.FunctionCall{
Name: tc.Function.Name,
Arguments: argsStr,
},
})
}
return out
}
// historyToMessages 将轨迹恢复的 ChatMessage 转为 Eino ADK 消息:**不裁剪条数、不按 token 预算截断**,
// 并保留 user / assistant(含仅 tool_calls/ tool,与库中 last_react 轨迹一致。
func historyToMessages(history []agent.ChatMessage, appCfg *config.Config, mwCfg *config.MultiAgentEinoMiddlewareConfig) []adk.Message {
_ = appCfg
_ = mwCfg
if len(history) == 0 {
return nil
}
// Keep a bounded tail first; then enforce a token budget.
const maxHistoryMessages = 200
start := 0
if len(history) > maxHistoryMessages {
start = len(history) - maxHistoryMessages
}
raw := make([]adk.Message, 0, len(history[start:]))
for _, h := range history[start:] {
switch h.Role {
raw := make([]adk.Message, 0, len(history))
for _, h := range history {
role := strings.ToLower(strings.TrimSpace(h.Role))
switch role {
case "user":
if strings.TrimSpace(h.Content) != "" {
raw = append(raw, schema.UserMessage(h.Content))
}
case "assistant":
if strings.TrimSpace(h.Content) == "" && len(h.ToolCalls) > 0 {
toolSchema := chatToolCallsToSchema(h.ToolCalls)
hasRC := strings.TrimSpace(h.ReasoningContent) != ""
if len(toolSchema) > 0 || strings.TrimSpace(h.Content) != "" || hasRC {
am := schema.AssistantMessage(h.Content, toolSchema)
if hasRC {
am.ReasoningContent = strings.TrimSpace(h.ReasoningContent)
}
raw = append(raw, am)
}
case "tool":
if strings.TrimSpace(h.ToolCallID) == "" && strings.TrimSpace(h.Content) == "" {
continue
}
if strings.TrimSpace(h.Content) != "" {
raw = append(raw, schema.AssistantMessage(h.Content, nil))
var opts []schema.ToolMessageOption
if tn := strings.TrimSpace(h.ToolName); tn != "" {
opts = append(opts, schema.WithToolName(tn))
}
raw = append(raw, schema.ToolMessage(h.Content, h.ToolCallID, opts...))
default:
continue
}
}
if len(raw) == 0 {
return raw
}
maxTotal := 120000
modelName := "gpt-4o"
if appCfg != nil {
if appCfg.OpenAI.MaxTotalTokens > 0 {
maxTotal = appCfg.OpenAI.MaxTotalTokens
}
if m := strings.TrimSpace(appCfg.OpenAI.Model); m != "" {
modelName = m
}
}
ratio := 0.35
if mwCfg != nil {
ratio = mwCfg.HistoryInputBudgetRatioEffective()
}
budget := int(float64(maxTotal) * ratio)
if budget < 4096 {
budget = 4096
}
tc := agent.NewTikTokenCounter()
outRev := make([]adk.Message, 0, len(raw))
used := 0
for i := len(raw) - 1; i >= 0; i-- {
msg := raw[i]
n, err := tc.Count(modelName, string(msg.Role)+"\n"+msg.Content)
if err != nil {
n = (len(msg.Content) + 3) / 4
}
if n <= 0 {
n = 1
}
if used+n > budget {
break
}
used += n
outRev = append(outRev, msg)
}
out := make([]adk.Message, 0, len(outRev))
for i := len(outRev) - 1; i >= 0; i-- {
out = append(out, outRev[i])
}
return out
return raw
}
// mergeStreamingToolCallFragments 将流式多帧的 ToolCall 按 index 合并 arguments(与 schema.concatToolCalls 行为一致)。
@@ -0,0 +1,22 @@
package multiagent
import (
"testing"
"cyberstrike-ai/internal/agent"
)
func TestHistoryToMessagesPreservesReasoningContent(t *testing.T) {
h := []agent.ChatMessage{
{Role: "user", Content: "u"},
{Role: "assistant", Content: "c", ReasoningContent: "r1", ToolCalls: []agent.ToolCall{{ID: "t1", Type: "function", Function: agent.FunctionCall{Name: "f", Arguments: map[string]interface{}{}}}}},
}
msgs := historyToMessages(h, nil, nil)
if len(msgs) != 2 {
t.Fatalf("len=%d", len(msgs))
}
am := msgs[1]
if am.ReasoningContent != "r1" || am.Content != "c" {
t.Fatalf("got reasoning=%q content=%q", am.ReasoningContent, am.Content)
}
}
+112 -1
View File
@@ -9,6 +9,9 @@ package openai
// Stream: Claude SSE (event: content_block_delta / message_delta) → OpenAI SSE 格式
// Auth: Bearer → x-api-key
// Tools: OpenAI tools[] → Claude tools[] (input_schema)
//
// Extended thinking: 顶层 `thinking` 从 OpenAI 请求体透传;响应中 `thinking` block 映射为
// `reasoning_content`(可读前缀 + 内部 JSON 尾缀以保留 signature,供多轮工具续跑;UI 用 openai.DisplayReasoningContent 剥离)。
import (
"bufio"
@@ -38,6 +41,7 @@ type claudeRequest struct {
Messages []claudeMessage `json:"messages"`
Tools []claudeTool `json:"tools,omitempty"`
Stream bool `json:"stream,omitempty"`
Thinking json.RawMessage `json:"thinking,omitempty"`
}
type claudeMessage struct {
@@ -76,6 +80,10 @@ type claudeContentBlock struct {
// text block
Text string `json:"text,omitempty"`
// thinking block (extended thinking)
Thinking string `json:"thinking,omitempty"`
Signature string `json:"signature,omitempty"`
// tool_use block (assistant 返回)
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
@@ -176,7 +184,13 @@ func convertOpenAIToClaude(payload interface{}) (*claudeRequest, error) {
// tool_calls (assistant 消息中包含工具调用)
if role == "assistant" {
rc, _ := mm["reasoning_content"].(string)
_, thinkingReplay := parseClaudeReasoningAssistantBlocks(rc)
var blocks []claudeContentBlock
for _, tb := range thinkingReplay {
blocks = append(blocks, tb)
}
if content != "" {
blocks = append(blocks, claudeContentBlock{Type: "text", Text: content})
}
@@ -290,6 +304,13 @@ func convertOpenAIToClaude(payload interface{}) (*claudeRequest, error) {
}
}
// Extended thinking (Anthropic top-level); merged from Eino ExtraFields / admin extras.
if th, ok := oai["thinking"]; ok && th != nil {
if raw, err := json.Marshal(th); err == nil && len(raw) > 0 && string(raw) != "null" {
req.Thinking = json.RawMessage(raw)
}
}
return req, nil
}
@@ -318,9 +339,12 @@ func claudeToOpenAIResponseJSON(claudeBody []byte) ([]byte, error) {
var textContent string
var toolCalls []interface{}
var thinkingBlocks []claudeContentBlock
for _, block := range cr.Content {
switch block.Type {
case "thinking":
thinkingBlocks = append(thinkingBlocks, block)
case "text":
textContent += block.Text
case "tool_use":
@@ -344,6 +368,18 @@ func claudeToOpenAIResponseJSON(claudeBody []byte) ([]byte, error) {
if len(toolCalls) > 0 {
message["tool_calls"] = toolCalls
}
if len(thinkingBlocks) > 0 {
var parts []string
for _, tb := range thinkingBlocks {
if strings.TrimSpace(tb.Thinking) != "" {
parts = append(parts, tb.Thinking)
}
}
rc := appendClaudeReasoningRoundTrip(strings.Join(parts, "\n\n"), thinkingBlocks)
if rc != "" {
message["reasoning_content"] = rc
}
}
choice := map[string]interface{}{
"index": 0,
@@ -901,8 +937,16 @@ func (rt *claudeRoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
reader := bufio.NewReader(resp.Body)
blockToToolIndex := make(map[int]int)
blockIndexToType := make(map[int]string)
nextToolIndex := 0
type thinkingAcc struct {
text strings.Builder
sig strings.Builder
}
thinkingByIndex := make(map[int]*thinkingAcc)
var finishedThinking []claudeContentBlock
for {
line, readErr := reader.ReadString('\n')
if readErr != nil {
@@ -947,6 +991,11 @@ func (rt *claudeRoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
blockIdx := int(blockIdxFlt)
cb, _ := event["content_block"].(map[string]interface{})
bt, _ := cb["type"].(string)
blockIndexToType[blockIdx] = bt
if bt == "thinking" {
thinkingByIndex[blockIdx] = &thinkingAcc{}
}
if bt == "tool_use" {
id, _ := cb["id"].(string)
@@ -986,7 +1035,35 @@ func (rt *claudeRoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
delta, _ := event["delta"].(map[string]interface{})
dt, _ := delta["type"].(string)
if dt == "text_delta" {
if dt == "thinking_delta" {
tPart, _ := delta["thinking"].(string)
if tPart != "" {
if acc := thinkingByIndex[blockIdx]; acc != nil {
acc.text.WriteString(tPart)
}
oaiChunk := map[string]interface{}{
"choices": []map[string]interface{}{
{
"delta": map[string]interface{}{
"reasoning_content": tPart,
},
},
},
}
b, _ := json.Marshal(oaiChunk)
if !writeLine("data: " + string(b) + "\n\n") {
pw.Close()
return
}
}
} else if dt == "signature_delta" {
sigPart, _ := delta["signature"].(string)
if sigPart != "" {
if acc := thinkingByIndex[blockIdx]; acc != nil {
acc.sig.WriteString(sigPart)
}
}
} else if dt == "text_delta" {
text, _ := delta["text"].(string)
oaiChunk := map[string]interface{}{
"choices": []map[string]interface{}{
@@ -1031,6 +1108,21 @@ func (rt *claudeRoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
}
}
case "content_block_stop":
blockIdxFlt, _ := event["index"].(float64)
blockIdx := int(blockIdxFlt)
bt := blockIndexToType[blockIdx]
if bt == "thinking" {
if acc := thinkingByIndex[blockIdx]; acc != nil {
finishedThinking = append(finishedThinking, claudeContentBlock{
Type: "thinking",
Thinking: acc.text.String(),
Signature: acc.sig.String(),
})
delete(thinkingByIndex, blockIdx)
}
}
case "message_delta":
d, _ := event["delta"].(map[string]interface{})
if sr, ok := d["stop_reason"].(string); ok {
@@ -1051,6 +1143,25 @@ func (rt *claudeRoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
}
case "message_stop":
if len(finishedThinking) > 0 {
suffix := appendClaudeReasoningRoundTrip("", finishedThinking)
if strings.TrimSpace(suffix) != "" {
oaiChunk := map[string]interface{}{
"choices": []map[string]interface{}{
{
"delta": map[string]interface{}{
"reasoning_content": suffix,
},
},
},
}
b, _ := json.Marshal(oaiChunk)
if !writeLine("data: " + string(b) + "\n\n") {
pw.Close()
return
}
}
}
writeLine("data: [DONE]\n\n")
pw.Close()
return
@@ -0,0 +1,81 @@
package openai
import (
"encoding/json"
"strings"
)
// claudeReasoningRoundTripSep separates human-readable reasoning from a JSON payload of
// Anthropic thinking blocks (with signatures) for multi-turn extended thinking + tools.
// Not shown in UI (see DisplayReasoningContent).
const claudeReasoningRoundTripSep = "\n---CSAI_CLAUDE_THINKING_BLOCKS---\n"
// DisplayReasoningContent returns reasoning text suitable for the UI (strips internal
// Claude round-trip JSON suffix). Safe for DeepSeek/plain reasoning strings (no-op).
func DisplayReasoningContent(s string) string {
s = strings.TrimSpace(s)
if s == "" {
return ""
}
i := strings.LastIndex(s, claudeReasoningRoundTripSep)
if i < 0 {
return s
}
return strings.TrimSpace(s[:i])
}
func appendClaudeReasoningRoundTrip(display string, blocks []claudeContentBlock) string {
var payload []map[string]string
for _, b := range blocks {
if b.Type != "thinking" {
continue
}
payload = append(payload, map[string]string{
"type": b.Type,
"thinking": b.Thinking,
"signature": b.Signature,
})
}
if len(payload) == 0 {
return strings.TrimSpace(display)
}
js, err := json.Marshal(payload)
if err != nil {
return strings.TrimSpace(display)
}
d := strings.TrimSpace(display)
if d == "" {
return claudeReasoningRoundTripSep + string(js)
}
return d + claudeReasoningRoundTripSep + string(js)
}
// parseClaudeReasoningAssistantBlocks extracts Anthropic thinking blocks from an OpenAI-style
// reasoning_content string. When no suffix is present, blocks is nil (caller must not invent signatures).
func parseClaudeReasoningAssistantBlocks(reasoningContent string) (display string, blocks []claudeContentBlock) {
reasoningContent = strings.TrimSpace(reasoningContent)
if reasoningContent == "" {
return "", nil
}
idx := strings.LastIndex(reasoningContent, claudeReasoningRoundTripSep)
if idx < 0 {
return reasoningContent, nil
}
display = strings.TrimSpace(reasoningContent[:idx])
jsonPart := strings.TrimSpace(reasoningContent[idx+len(claudeReasoningRoundTripSep):])
var arr []struct {
Type string `json:"type"`
Thinking string `json:"thinking"`
Signature string `json:"signature"`
}
if err := json.Unmarshal([]byte(jsonPart), &arr); err != nil {
return reasoningContent, nil
}
for _, x := range arr {
if x.Type != "thinking" {
continue
}
blocks = append(blocks, claudeContentBlock{Type: "thinking", Thinking: x.Thinking, Signature: x.Signature})
}
return display, blocks
}
@@ -0,0 +1,102 @@
package openai
import (
"encoding/json"
"strings"
"testing"
)
func TestDisplayReasoningContent(t *testing.T) {
raw := "hello" + claudeReasoningRoundTripSep + `[{"type":"thinking","thinking":"x","signature":"sig"}]`
if d := DisplayReasoningContent(raw); d != "hello" {
t.Fatalf("got %q", d)
}
if DisplayReasoningContent("plain") != "plain" {
t.Fatal()
}
}
func TestAppendParseClaudeReasoningRoundTrip(t *testing.T) {
blocks := []claudeContentBlock{
{Type: "thinking", Thinking: "a", Signature: "s1"},
{Type: "thinking", Thinking: "b", Signature: "s2"},
}
s := appendClaudeReasoningRoundTrip("sum", blocks)
if !strings.Contains(s, claudeReasoningRoundTripSep) {
t.Fatal("missing sep")
}
display, back := parseClaudeReasoningAssistantBlocks(s)
if display != "sum" || len(back) != 2 {
t.Fatalf("display=%q len=%d", display, len(back))
}
if back[0].Signature != "s1" || back[1].Thinking != "b" {
t.Fatalf("%+v", back)
}
}
func TestConvertOpenAIToClaude_AssistantReasoningReplay(t *testing.T) {
rc := appendClaudeReasoningRoundTrip("vis", []claudeContentBlock{
{Type: "thinking", Thinking: "t1", Signature: "sig1"},
})
payload := map[string]interface{}{
"model": "claude-3-5-sonnet-latest",
"messages": []interface{}{
map[string]interface{}{
"role": "assistant",
"content": "out",
"reasoning_content": rc,
},
},
}
req, err := convertOpenAIToClaude(payload)
if err != nil {
t.Fatal(err)
}
if len(req.Messages) != 1 {
t.Fatalf("messages=%d", len(req.Messages))
}
blocks := req.Messages[0].Content.Blocks
if len(blocks) < 2 {
t.Fatalf("blocks=%d", len(blocks))
}
if blocks[0].Type != "thinking" || blocks[0].Signature != "sig1" {
t.Fatalf("first block %+v", blocks[0])
}
foundText := false
for _, b := range blocks {
if b.Type == "text" && b.Text == "out" {
foundText = true
}
}
if !foundText {
t.Fatalf("blocks=%+v", blocks)
}
}
func TestClaudeToOpenAIResponseJSON_Thinking(t *testing.T) {
claudeBody := []byte(`{
"id":"msg_1","type":"message","role":"assistant","model":"x","stop_reason":"end_turn",
"content":[
{"type":"thinking","thinking":"step","signature":"sigx"},
{"type":"text","text":"hi"}
]
}`)
oai, err := claudeToOpenAIResponseJSON(claudeBody)
if err != nil {
t.Fatal(err)
}
var wrap map[string]interface{}
if err := json.Unmarshal(oai, &wrap); err != nil {
t.Fatal(err)
}
choices := wrap["choices"].([]interface{})
ch0 := choices[0].(map[string]interface{})
msg := ch0["message"].(map[string]interface{})
rc, _ := msg["reasoning_content"].(string)
if !strings.Contains(rc, "step") || !strings.Contains(rc, claudeReasoningRoundTripSep) {
t.Fatalf("reasoning_content=%q", rc)
}
if msg["content"] != "hi" {
t.Fatal()
}
}
+250
View File
@@ -0,0 +1,250 @@
// Package reasoning maps user/config intent to CloudWeGo Eino OpenAI ChatModel fields
// (ReasoningEffort, ExtraFields such as thinking / reasoning_effort / output_config).
package reasoning
import (
"strings"
"cyberstrike-ai/internal/config"
einoopenai "github.com/cloudwego/eino-ext/components/model/openai"
)
// ClientIntent is optional per-request override from ChatRequest.reasoning.
type ClientIntent struct {
Mode string
Effort string
}
type wireProfile int
const (
wireNone wireProfile = iota
wireClaude
wireDeepseek
wireOpenAI
wireOutputConfig
)
// ApplyToEinoChatModelConfig merges reasoning-related options into cfg.
// Precondition: cfg already has APIKey, BaseURL, Model, HTTPClient set.
func ApplyToEinoChatModelConfig(cfg *einoopenai.ChatModelConfig, oa *config.OpenAIConfig, client *ClientIntent) {
if cfg == nil || oa == nil {
return
}
sr := &oa.Reasoning
allowClient := sr.AllowClientReasoningEffective()
mode := effectiveMode(sr, client, allowClient)
// Claude (Anthropic): merge admin extras first; optional extended thinking maps to top-level `thinking`
// (see internal/openai convertOpenAIToClaude). DeepSeek/OpenAI-style fields are not sent.
if strings.EqualFold(strings.TrimSpace(oa.Provider), "claude") ||
strings.EqualFold(strings.TrimSpace(oa.Provider), "anthropic") {
if len(sr.ExtraRequestFields) > 0 {
if cfg.ExtraFields == nil {
cfg.ExtraFields = make(map[string]any)
}
for k, v := range sr.ExtraRequestFields {
cfg.ExtraFields[k] = v
}
}
if mode == "off" {
return
}
applyClaudeExtendedThinking(cfg, mode, effectiveEffort(sr, client, allowClient), oa.Model)
return
}
if mode == "off" {
return
}
effort := effectiveEffort(sr, client, allowClient)
prof := resolveWireProfile(oa, sr)
// Admin-defined extra root fields (merged first; automatic keys may follow).
if len(sr.ExtraRequestFields) > 0 {
if cfg.ExtraFields == nil {
cfg.ExtraFields = make(map[string]any)
}
for k, v := range sr.ExtraRequestFields {
cfg.ExtraFields[k] = v
}
}
switch prof {
case wireClaude, wireNone:
return
case wireDeepseek:
applyDeepseek(cfg, mode, effort)
case wireOutputConfig:
applyOutputConfigEffort(cfg, mode, effort)
default: // wireOpenAI
applyOpenAICompat(cfg, mode, effort)
}
}
// applyClaudeExtendedThinking sets Anthropic Messages API `thinking` when absent from ExtraRequestFields.
// Uses adaptive + summarized display by default (per Anthropic guidance for Claude 4.x); Sonnet 3.7 uses enabled+budget.
func applyClaudeExtendedThinking(cfg *einoopenai.ChatModelConfig, mode, effort, model string) {
if cfg == nil || mode == "off" {
return
}
if cfg.ExtraFields == nil {
cfg.ExtraFields = make(map[string]any)
}
if _, exists := cfg.ExtraFields["thinking"]; exists {
return
}
m := strings.ToLower(strings.TrimSpace(model))
thinking := map[string]any{
"type": "adaptive",
"display": "summarized",
}
// Sonnet 3.7: manual extended thinking is the documented path.
if strings.Contains(m, "claude-3-7-sonnet") || strings.Contains(m, "3-7-sonnet") || strings.Contains(m, "sonnet-3.7") {
thinking = map[string]any{
"type": "enabled",
"budget_tokens": 10000,
"display": "summarized",
}
}
// Opus 4.7+: manual enabled+budget rejected — keep adaptive only.
if strings.Contains(m, "opus-4-7") || strings.Contains(m, "opus-4.7") {
thinking = map[string]any{
"type": "adaptive",
"display": "summarized",
}
}
_ = effort // reserved: map to Anthropic effort / output_config when API stabilizes in one place
cfg.ExtraFields["thinking"] = thinking
}
func effectiveMode(sr *config.OpenAIReasoningConfig, client *ClientIntent, allowClient bool) string {
server := strings.ToLower(strings.TrimSpace(sr.ModeEffective()))
if server == "" || server == "default" {
server = "auto"
}
if !allowClient || client == nil {
return server
}
cm := strings.ToLower(strings.TrimSpace(client.Mode))
if cm == "" || cm == "default" {
return server
}
return cm
}
func effectiveEffort(sr *config.OpenAIReasoningConfig, client *ClientIntent, allowClient bool) string {
se := normalizeEffort(sr.Effort)
if !allowClient || client == nil {
return se
}
ce := normalizeEffort(client.Effort)
if ce != "" {
return ce
}
return se
}
func normalizeEffort(s string) string {
e := strings.ToLower(strings.TrimSpace(s))
switch e {
case "low", "medium", "high", "max":
return e
default:
return ""
}
}
func resolveWireProfile(oa *config.OpenAIConfig, sr *config.OpenAIReasoningConfig) wireProfile {
if strings.EqualFold(strings.TrimSpace(oa.Provider), "claude") {
return wireClaude
}
p := strings.ToLower(strings.TrimSpace(sr.ProfileEffective()))
switch p {
case "output_config", "output_config_effort":
return wireOutputConfig
case "openai", "openai_compat":
return wireOpenAI
case "deepseek", "deepseek_compat":
return wireDeepseek
case "auto", "":
bu := strings.ToLower(oa.BaseURL)
mo := strings.ToLower(oa.Model)
if strings.Contains(bu, "deepseek") || strings.Contains(mo, "deepseek") {
return wireDeepseek
}
return wireOpenAI
default:
return wireOpenAI
}
}
func applyDeepseek(cfg *einoopenai.ChatModelConfig, mode, effort string) {
// auto: enable thinking for DeepSeek line; on: same; auto without effort still opens thinking.
if mode == "off" {
return
}
if mode == "auto" || mode == "on" {
if cfg.ExtraFields == nil {
cfg.ExtraFields = make(map[string]any)
}
cfg.ExtraFields["thinking"] = map[string]any{"type": "enabled"}
}
if effort != "" {
if cfg.ExtraFields == nil {
cfg.ExtraFields = make(map[string]any)
}
cfg.ExtraFields["reasoning_effort"] = effortStringForAPI(effort)
}
}
func applyOpenAICompat(cfg *einoopenai.ChatModelConfig, mode, effort string) {
if mode == "auto" && effort == "" {
return
}
e := effort
if mode == "on" && e == "" {
e = "medium"
}
if e == "" {
return
}
if e == "max" {
if cfg.ExtraFields == nil {
cfg.ExtraFields = make(map[string]any)
}
cfg.ExtraFields["reasoning_effort"] = "max"
return
}
switch e {
case "low":
cfg.ReasoningEffort = einoopenai.ReasoningEffortLevelLow
case "medium":
cfg.ReasoningEffort = einoopenai.ReasoningEffortLevelMedium
case "high":
cfg.ReasoningEffort = einoopenai.ReasoningEffortLevelHigh
}
}
func applyOutputConfigEffort(cfg *einoopenai.ChatModelConfig, mode, effort string) {
if mode == "auto" && effort == "" {
return
}
e := effort
if mode == "on" && e == "" {
e = "high"
}
if e == "" {
return
}
if cfg.ExtraFields == nil {
cfg.ExtraFields = make(map[string]any)
}
cfg.ExtraFields["output_config"] = map[string]any{"effort": effortStringForAPI(e)}
}
func effortStringForAPI(e string) string {
// Gateways expect lowercase strings; "max" kept as max.
return strings.ToLower(strings.TrimSpace(e))
}
+137 -2
View File
@@ -2391,7 +2391,118 @@ header {
box-sizing: border-box;
}
.chat-input-container > .chat-input-with-files {
.chat-input-primary-row {
display: flex;
flex-direction: row;
align-items: flex-end;
gap: 8px;
flex: 1;
min-width: 0;
width: 100%;
}
.chat-input-leading {
display: flex;
flex-direction: row;
align-items: flex-end;
gap: 8px;
flex-shrink: 0;
}
/* Eino:模型推理收进浮层,保持主输入行简洁 */
.chat-reasoning-wrapper {
flex-shrink: 0;
}
.chat-reasoning-inner {
position: relative;
}
.chat-reasoning-btn {
max-width: 10.5rem;
padding-left: 0.5rem;
padding-right: 0.45rem;
}
.chat-reasoning-btn .chat-reasoning-btn-icon {
flex-shrink: 0;
font-size: 0.95rem;
line-height: 1;
opacity: 0.95;
}
.chat-reasoning-btn.active .chat-reasoning-btn-icon {
opacity: 1;
}
.chat-reasoning-btn .chat-reasoning-btn-summary {
max-width: 7.6rem;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.chat-reasoning-btn.active {
border-color: rgba(49, 130, 206, 0.45);
background: rgba(49, 130, 206, 0.06);
}
.chat-reasoning-panel {
position: absolute;
bottom: calc(100% + 8px);
left: 0;
width: 288px;
max-width: calc(100vw - 32px);
background: #ffffff;
border: 1px solid rgba(0, 0, 0, 0.08);
border-radius: 16px;
padding: 12px;
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.12), 0 4px 16px rgba(0, 0, 0, 0.08), 0 0 0 1px rgba(0, 0, 0, 0.04);
z-index: 1000;
display: flex;
flex-direction: column;
gap: 10px;
text-align: left;
}
.chat-reasoning-panel-header {
margin-bottom: 0;
}
.chat-reasoning-panel-hint {
font-size: 0.75rem;
color: var(--text-muted, #718096);
margin: 0;
line-height: 1.45;
}
.chat-reasoning-fields {
display: flex;
flex-direction: column;
gap: 12px;
}
.chat-reasoning-field-label {
display: block;
font-size: 0.75rem;
font-weight: 600;
color: var(--text-muted, #718096);
margin-bottom: 6px;
}
.chat-reasoning-select {
width: 100%;
box-sizing: border-box;
padding: 0.45rem 0.6rem;
font-size: 0.8125rem;
border: 1px solid var(--border-color, #e2e8f0);
border-radius: 8px;
background: var(--card-bg, #fff);
color: var(--text-color, #2d3748);
}
.chat-input-container .chat-input-with-files,
.chat-input-primary-row .chat-input-with-files {
flex: 1;
display: flex;
flex-direction: column;
@@ -2399,7 +2510,8 @@ header {
gap: 6px;
}
.chat-input-container > .chat-input-field {
.chat-input-container > .chat-input-field,
.chat-input-primary-row .chat-input-field {
flex: 1;
display: flex;
min-width: 0;
@@ -3568,6 +3680,11 @@ header {
background: rgba(156, 39, 176, 0.05);
}
.timeline-item-reasoning_chain {
border-left-color: #5c6bc0;
background: rgba(92, 107, 192, 0.06);
}
.timeline-item-tool_call {
border-left-color: #ff9800;
background: rgba(255, 152, 0, 0.05);
@@ -3593,6 +3710,11 @@ header {
background: rgba(255, 112, 67, 0.12);
}
.timeline-item-user_interrupt_continue {
border-left-color: #d97706;
background: rgba(217, 119, 6, 0.08);
}
.timeline-item-header {
display: flex;
align-items: center;
@@ -3623,6 +3745,12 @@ header {
line-height: 1.6;
}
/* 流式增量阶段纯文本展示(避免半段 Markdown 反复解析) */
.timeline-item-content.timeline-stream-plain {
white-space: pre-wrap;
word-break: break-word;
}
.tool-details {
display: flex;
flex-direction: column;
@@ -12283,6 +12411,9 @@ header {
.webshell-ai-process-block .webshell-ai-timeline-thinking {
border-left-color: #9c27b0;
}
.webshell-ai-process-block .webshell-ai-timeline-reasoning_chain {
border-left-color: #5c6bc0;
}
.webshell-ai-process-block .webshell-ai-timeline-tool_call,
.webshell-ai-process-block .webshell-ai-timeline-tool_calls_detected {
border-left-color: #ff9800;
@@ -18300,6 +18431,10 @@ button.chat-files-dropdown-item:hover:not(:disabled) {
transform: translateX(-50%) translateY(0);
}
.chat-files-toast.chat-toast--error {
background: #b91c1c;
}
/* 对话附件读取 / 文件管理上传 进度条 */
/* [hidden] 默认会被本类的 display:flex 覆盖,须显式隐藏否则空闲时仍露出灰条 */
.chat-upload-progress-row[hidden] {
+20 -1
View File
@@ -277,6 +277,7 @@
"planExecuteStreamPhase": "Phase output",
"einoSubAgentStep": "Sub-agent {{agent}} · step {{n}}",
"aiThinking": "AI thinking",
"reasoningChain": "Reasoning process",
"planning": "Planning",
"assistantStreamPhase": "Assistant output",
"toolCallsDetected": "Detected {{count}} tool call(s)",
@@ -288,6 +289,7 @@
"error": "Error",
"streamNetworkErrorHint": "Connection lost ({{detail}}). A long task may still be running on the server; check running tasks at the top or refresh this conversation later.",
"taskCancelled": "Task cancelled",
"userInterruptContinueTitle": "⏸️ User interrupt & continue",
"unknownTool": "Unknown tool",
"einoAgentReplyTitle": "Sub-agent reply",
"einoStreamErrorTitle": "⚠️ Eino stream interrupted ({{agent}})",
@@ -328,6 +330,19 @@
"agentModeMulti": "Multi-agent",
"agentModeSingleHint": "Single-model ReAct loop for chat and tool use",
"agentModeMultiHint": "Eino prebuilt orchestration (deep / plan_execute / supervisor) for complex tasks",
"reasoningModeLabel": "Model reasoning",
"reasoningEffortLabel": "Reasoning effort",
"reasoningModeDefault": "Use system default",
"reasoningModeOff": "Off",
"reasoningModeOn": "On",
"reasoningModeAuto": "Auto",
"reasoningEffortUnset": "Unspecified",
"reasoningCompactLabel": "Reasoning",
"reasoningCompactAria": "Open model reasoning options",
"reasoningPanelTitle": "Model reasoning",
"reasoningPanelHint": "Only Eino single- and multi-agent requests use these; merged with defaults in Settings.",
"reasoningSummaryFollow": "System",
"reasoningSummaryDash": "—",
"agentModeOrchPlanExecute": "Plan-Exec",
"agentModeOrchSupervisor": "Supervisor",
"hitlTitle": "Human-in-the-loop",
@@ -396,7 +411,7 @@
"stopTask": "Stop task",
"interruptModalTitle": "Interrupt current step",
"interruptReasonLabel": "Interrupt note",
"interruptModalHint": "Same as MCP monitor \"Stop tool\": ends only the in-flight tool call; the conversation and this run continue. Optional note is merged into the tool result (bilingual USER INTERRUPT NOTE, not raw CLI). Leave empty for a plain stop. If no tool is running yet (model still thinking), wait for a tool call or use \"Stop completely\".",
"interruptModalHint": "When a tool is running: same as MCP monitor \"Stop tool\" only that call is stopped and the run continues; your note can be merged into the tool result (USER INTERRUPT NOTE). When no tool is running (model thinking/streaming only): \"Interrupt & continue\" still works — current output pauses, your note is merged into context and the run resumes automatically; the progress timeline shows a \"User interrupt & continue\" entry. Use this instead of a full stop when you only want to steer; use \"Stop completely\" to end the whole task.",
"interruptReasonPlaceholder": "e.g. Tool is too slow—skip and summarize…",
"interruptReasonRequired": "Please enter a short note so the model can continue accordingly.",
"interruptSubmitting": "Submitting...",
@@ -1591,6 +1606,10 @@
"maxTotalTokens": "Max Context Tokens",
"maxTotalTokensPlaceholder": "120000",
"maxTotalTokensHint": "Shared by memory compression and attack chain building. Default: 120000",
"openaiReasoningTitle": "Model reasoning (Eino)",
"openaiReasoningHint": "Applies to Eino single-agent and multi-agent only; works with chat-page reasoning controls.",
"openaiReasoningProfile": "Wire profile",
"openaiReasoningAllowClient": "Allow chat page to override reasoning options",
"fofaBaseUrlPlaceholder": "https://fofa.info/api/v1/search/all (optional)",
"fofaBaseUrlHint": "Leave empty for default.",
"email": "Email",
+20 -1
View File
@@ -266,6 +266,7 @@
"planExecuteStreamPhase": "阶段输出",
"einoSubAgentStep": "子代理 {{agent}} · 第 {{n}} 步",
"aiThinking": "AI思考",
"reasoningChain": "推理过程",
"planning": "规划中",
"assistantStreamPhase": "助手输出",
"toolCallsDetected": "检测到 {{count}} 个工具调用",
@@ -277,6 +278,7 @@
"error": "错误",
"streamNetworkErrorHint": "连接已中断({{detail}})。长时间任务可能仍在后端执行,请查看顶部「运行中」任务或稍后刷新本对话。",
"taskCancelled": "任务已取消",
"userInterruptContinueTitle": "⏸️ 用户中断并继续",
"unknownTool": "未知工具",
"einoAgentReplyTitle": "子代理回复",
"einoStreamErrorTitle": "⚠️ Eino 流式中断({{agent}}",
@@ -317,6 +319,19 @@
"agentModeMulti": "多代理",
"agentModeSingleHint": "单模型 ReAct 循环,适合常规对话与工具调用",
"agentModeMultiHint": "Eino 预置编排(deep / plan_execute / supervisor),适合复杂任务",
"reasoningModeLabel": "模型推理",
"reasoningEffortLabel": "推理强度",
"reasoningModeDefault": "跟随系统",
"reasoningModeOff": "关闭",
"reasoningModeOn": "开启",
"reasoningModeAuto": "自动",
"reasoningEffortUnset": "不指定",
"reasoningCompactLabel": "推理",
"reasoningCompactAria": "打开模型推理选项",
"reasoningPanelTitle": "模型推理",
"reasoningPanelHint": "仅 Eino 单代理与多代理请求会带上这些参数;与系统设置中的默认值合并。",
"reasoningSummaryFollow": "系统",
"reasoningSummaryDash": "—",
"agentModeOrchPlanExecute": "Plan-Exec",
"agentModeOrchSupervisor": "Supervisor",
"hitlTitle": "人机协同",
@@ -385,7 +400,7 @@
"stopTask": "停止任务",
"interruptModalTitle": "中断当前步骤",
"interruptReasonLabel": "中断说明",
"interruptModalHint": "与 MCP 监控页「终止工具」一致仅结束当前这一次工具调用,整条对话与本轮推理会继续;工具返回中可附带说明(中英 USER INTERRUPT NOTE 块,与命令行原文区分)。留空则等同仅终止工具。若当前没有工具在执行(模型尚在思考),请等待工具开始或改用「彻底停止」。",
"interruptModalHint": "有工具在执行时:与 MCP 监控页「终止工具」一致仅结束当前这一次工具调用,本轮推理会继续;说明可写入工具返回(USER INTERRUPT NOTE)。无工具在执行(模型纯思考/流式输出):仍可「中断并继续」——会暂停当前输出,把你的说明合并进上下文并自动续跑;进度详情时间线会出现「用户中断并继续」条目。不需要整轮停止时请优先用本按钮;要结束整条任务请用「彻底停止」。",
"interruptReasonPlaceholder": "例如:工具耗时过长,请先跳过并总结当前结果…",
"interruptReasonRequired": "请填写中断说明,以便模型根据你的意图继续。",
"interruptSubmitting": "提交中...",
@@ -1580,6 +1595,10 @@
"maxTotalTokens": "最大上下文 Token 数",
"maxTotalTokensPlaceholder": "120000",
"maxTotalTokensHint": "内存压缩和攻击链构建共用此配置,默认 120000",
"openaiReasoningTitle": "模型推理(Eino",
"openaiReasoningHint": "仅 Eino 单代理与多代理请求生效;与对话页「模型推理」下拉配合使用。",
"openaiReasoningProfile": "线路 profile",
"openaiReasoningAllowClient": "允许对话页覆盖推理选项",
"fofaBaseUrlPlaceholder": "https://fofa.info/api/v1/search/all(可选)",
"fofaBaseUrlHint": "留空则使用默认地址。",
"email": "Email",
+190 -3
View File
@@ -26,6 +26,11 @@ const DRAFT_SAVE_DELAY = 500; // 500ms防抖延迟
// 对话文件上传相关(后端会拼接路径与内容发给大模型,前端不再重复发文件列表)
const MAX_CHAT_FILES = 10;
const CHAT_FILE_DEFAULT_PROMPT = '请根据上传的文件内容进行分析。';
/** 与 handler.formatInterruptContinueUserMessage 首段一致;主对话不展示,仅迭代详情(user_interrupt_continue */
const CHAT_INTERRUPT_CONTINUE_USER_PREFIX = '【用户补充 / 中断后继续】';
function isInterruptContinueInjectChatMessage(content) {
return typeof content === 'string' && content.trimStart().startsWith(CHAT_INTERRUPT_CONTINUE_USER_PREFIX);
}
/**
* 对话附件选文件后异步 POST /api/chat-uploads发送时只传 serverPath绝对路径请求体不再内联大文件内容
* @type {{ id: number, fileName: string, mimeType: string, serverPath: string|null, uploading: boolean, uploadPercent: number, uploadPromise: Promise<void>|null, uploadError: string|null }[]}
@@ -35,6 +40,8 @@ let chatAttachmentSeq = 0;
// 对话模式:react = 原生 ReAct/agent-loop);eino_single = Eino ADK 单代理(/api/eino-agent/stream);deep / plan_execute / supervisor = Eino 多代理(/api/multi-agent/stream,请求体 orchestration
const AGENT_MODE_STORAGE_KEY = 'cyberstrike-chat-agent-mode';
const REASONING_MODE_LS = 'cyberstrike-chat-reasoning-mode';
const REASONING_EFFORT_LS = 'cyberstrike-chat-reasoning-effort';
const CHAT_AGENT_MODE_REACT = 'react';
const CHAT_AGENT_MODE_EINO_SINGLE = 'eino_single';
const CHAT_AGENT_EINO_MODES = ['deep', 'plan_execute', 'supervisor'];
@@ -51,6 +58,28 @@ const HITL_MODE_REVIEW_EDIT = 'review_edit';
const HITL_MODE_OPTIONS = [HITL_MODE_OFF, HITL_MODE_APPROVAL, HITL_MODE_REVIEW_EDIT];
let hitlApplyFeedbackTimer = null;
/** 非阻塞提示(与 chat-files-toast 样式共用) */
function showChatToast(message, type) {
const text = message == null ? '' : String(message);
if (!text) return;
const el = document.createElement('div');
el.className = 'chat-files-toast' + (type === 'error' ? ' chat-toast--error' : '');
el.setAttribute('role', 'status');
el.textContent = text;
document.body.appendChild(el);
requestAnimationFrame(function () {
el.classList.add('chat-files-toast-visible');
});
const hideMs = type === 'error' ? 4500 : 2600;
setTimeout(function () {
el.classList.remove('chat-files-toast-visible');
setTimeout(function () { el.remove(); }, 300);
}, hideMs);
}
if (typeof window !== 'undefined') {
window.showChatToast = showChatToast;
}
function normalizeOrchestrationClient(s) {
const v = String(s || '').trim().toLowerCase().replace(/-/g, '_');
if (v === 'plan_execute' || v === 'planexecute' || v === 'pe') return 'plan_execute';
@@ -293,7 +322,7 @@ function showHitlApplyFeedback(text, isError, partial) {
}
if (!el) {
if (text && isError) {
alert(text);
showChatToast(text, 'error');
}
return;
}
@@ -465,6 +494,132 @@ function syncAgentModeFromValue(value) {
const v = el.getAttribute('data-value');
el.classList.toggle('selected', v === value);
});
syncReasoningRowVisibility(value);
}
function syncReasoningRowVisibility(modeVal) {
const wrap = document.getElementById('chat-reasoning-wrapper');
if (!wrap) return;
const show = modeVal === CHAT_AGENT_MODE_EINO_SINGLE || (multiAgentAPIEnabled && chatAgentModeIsEino(modeVal));
wrap.style.display = show ? '' : 'none';
if (!show) {
closeChatReasoningPanel();
} else {
updateChatReasoningSummary();
}
}
function reasoningSummaryModeLabel(mode) {
const m = (mode || 'default').trim();
const t = (typeof window.t === 'function') ? window.t : function (k) { return k; };
switch (m) {
case 'off': return t('chat.reasoningModeOff');
case 'on': return t('chat.reasoningModeOn');
case 'auto': return t('chat.reasoningModeAuto');
default: return t('chat.reasoningSummaryFollow');
}
}
function updateChatReasoningSummary() {
const el = document.getElementById('chat-reasoning-summary');
const modeEl = document.getElementById('chat-reasoning-mode');
const effEl = document.getElementById('chat-reasoning-effort');
if (!el || !modeEl) return;
const mode = (modeEl.value || 'default').trim();
const effort = effEl && effEl.value ? String(effEl.value).trim() : '';
const t = (typeof window.t === 'function') ? window.t : function (k) { return k; };
const modePart = reasoningSummaryModeLabel(mode);
const effPart = effort || t('chat.reasoningSummaryDash');
el.textContent = modePart + ' / ' + effPart;
}
function closeChatReasoningPanel() {
const panel = document.getElementById('chat-reasoning-panel');
const btn = document.getElementById('chat-reasoning-btn');
if (panel) panel.style.display = 'none';
if (btn) {
btn.classList.remove('active');
btn.setAttribute('aria-expanded', 'false');
}
}
function toggleChatReasoningPanel() {
const panel = document.getElementById('chat-reasoning-panel');
const btn = document.getElementById('chat-reasoning-btn');
if (!panel || !btn) return;
const isOpen = panel.style.display === 'flex';
if (isOpen) {
closeChatReasoningPanel();
return;
}
if (typeof closeAgentModePanel === 'function') {
closeAgentModePanel();
}
if (typeof closeRoleSelectionPanel === 'function') {
closeRoleSelectionPanel();
}
updateChatReasoningSummary();
panel.style.display = 'flex';
btn.classList.add('active');
btn.setAttribute('aria-expanded', 'true');
}
function restoreChatReasoningControlsFromStorage() {
try {
const m = document.getElementById('chat-reasoning-mode');
const e = document.getElementById('chat-reasoning-effort');
if (m) {
const v = localStorage.getItem(REASONING_MODE_LS);
if (v && ['default', 'off', 'on', 'auto'].indexOf(v) !== -1) {
m.value = v;
}
}
if (e) {
const v = localStorage.getItem(REASONING_EFFORT_LS);
if (v !== null && ['', 'low', 'medium', 'high', 'max'].indexOf(v) !== -1) {
e.value = v;
}
}
updateChatReasoningSummary();
} catch (err) { /* ignore */ }
}
function persistChatReasoningPrefs() {
try {
const m = document.getElementById('chat-reasoning-mode');
const elEff = document.getElementById('chat-reasoning-effort');
if (m) localStorage.setItem(REASONING_MODE_LS, m.value || 'default');
if (elEff) localStorage.setItem(REASONING_EFFORT_LS, elEff.value || '');
updateChatReasoningSummary();
} catch (err) { /* ignore */ }
}
/** 供 WebShell 等复用:在 Eino 路径下返回 reasoning 请求片段或 undefined */
function buildReasoningRequestPayload() {
const wrap = document.getElementById('chat-reasoning-wrapper');
if (!wrap || wrap.style.display === 'none') {
return undefined;
}
const modeEl = document.getElementById('chat-reasoning-mode');
const effEl = document.getElementById('chat-reasoning-effort');
if (!modeEl) return undefined;
const mode = (modeEl.value || 'default').trim();
const effort = effEl && effEl.value ? String(effEl.value).trim() : '';
if (mode === 'default' && !effort) {
return undefined;
}
const o = {};
if (mode !== 'default') o.mode = mode;
if (effort) o.effort = effort;
return Object.keys(o).length ? o : undefined;
}
if (typeof window !== 'undefined') {
window.persistChatReasoningPrefs = persistChatReasoningPrefs;
window.buildReasoningRequestPayload = buildReasoningRequestPayload;
window.closeChatReasoningPanel = closeChatReasoningPanel;
window.toggleChatReasoningPanel = toggleChatReasoningPanel;
window.updateChatReasoningSummary = updateChatReasoningSummary;
}
function closeAgentModePanel() {
@@ -486,6 +641,9 @@ function toggleAgentModePanel() {
closeAgentModePanel();
return;
}
if (typeof closeChatReasoningPanel === 'function') {
closeChatReasoningPanel();
}
if (typeof closeRoleSelectionPanel === 'function') {
closeRoleSelectionPanel();
}
@@ -536,6 +694,8 @@ async function initChatAgentModeFromConfig() {
} catch (e) { /* ignore */ }
sel.value = stored;
syncAgentModeFromValue(stored);
restoreChatReasoningControlsFromStorage();
syncReasoningRowVisibility(stored);
} catch (e) {
console.warn('initChatAgentModeFromConfig', e);
}
@@ -548,6 +708,9 @@ document.addEventListener('languagechange', function () {
if (v === CHAT_AGENT_MODE_REACT || chatAgentModeIsEinoSingle(v) || chatAgentModeIsEino(v)) {
syncAgentModeFromValue(v);
}
if (typeof updateChatReasoningSummary === 'function') {
updateChatReasoningSummary();
}
});
// 保存输入框草稿到localStorage(防抖版本)
@@ -733,6 +896,10 @@ async function sendMessage() {
serverPath: a.serverPath
}));
}
const reasoningPayload = buildReasoningRequestPayload();
if (reasoningPayload) {
body.reasoning = reasoningPayload;
}
// 发送后清空附件列表
chatAttachments = [];
renderChatFileChips();
@@ -2201,6 +2368,8 @@ function renderProcessDetails(messageId, processDetails) {
}
} else if (eventType === 'thinking') {
itemTitle = agPx + '🤔 ' + (typeof window.t === 'function' ? window.t('chat.aiThinking') : 'AI思考');
} else if (eventType === 'reasoning_chain') {
itemTitle = agPx + '🔗 ' + (typeof window.t === 'function' ? window.t('chat.reasoningChain') : '推理过程');
} else if (eventType === 'planning') {
if (typeof window.einoMainStreamPlanningTitle === 'function') {
itemTitle = window.einoMainStreamPlanningTitle(data);
@@ -2237,6 +2406,10 @@ function renderProcessDetails(messageId, processDetails) {
itemTitle = agPx + '🧑‍⚖️ HITL · ' + hitlMsg;
} else if (eventType === 'progress') {
itemTitle = typeof window.translateProgressMessage === 'function' ? window.translateProgressMessage(detail.message || '') : (detail.message || '');
} else if (eventType === 'user_interrupt_continue') {
itemTitle = typeof window.t === 'function'
? window.t('chat.userInterruptContinueTitle')
: '⏸️ 用户中断并继续';
}
addTimelineItem(timeline, eventType, {
@@ -2853,7 +3026,7 @@ async function loadConversation(conversationId) {
const conversation = await response.json();
if (!response.ok) {
alert('加载对话失败: ' + (conversation.error || '未知错误'));
showChatToast('加载对话失败: ' + (conversation.error || '未知错误'), 'error');
return;
}
if (seq !== loadConversationRequestSeq) {
@@ -2953,6 +3126,9 @@ async function loadConversation(conversationId) {
// 渲染单条消息的辅助函数
const renderOneMessage = (msg) => {
if (msg.role === 'user' && isInterruptContinueInjectChatMessage(msg.content)) {
return;
}
let displayContent = msg.content;
if (msg.role === 'assistant' && msg.content === '处理中...' && msg.processDetails && msg.processDetails.length > 0) {
for (let i = msg.processDetails.length - 1; i >= 0; i--) {
@@ -3061,7 +3237,7 @@ async function loadConversation(conversationId) {
}
} catch (error) {
console.error('加载对话失败:', error);
alert('加载对话失败: ' + error.message);
showChatToast('加载对话失败: ' + (error && error.message ? error.message : String(error)), 'error');
}
}
@@ -6617,6 +6793,9 @@ function formatConversationAsMarkdown(conversation, options = {}) {
}
messages.forEach((msg, index) => {
if (msg && msg.role === 'user' && isInterruptContinueInjectChatMessage(msg.content)) {
return;
}
const role = getConversationRoleLabel(msg && msg.role);
const timestamp = formatConversationDateForMarkdown(msg && msg.createdAt);
const content = msg && typeof msg.content === 'string' ? msg.content : '';
@@ -7196,6 +7375,14 @@ document.addEventListener('click', function(event) {
closeAgentModePanel();
}
}
const reasoningWrap = document.getElementById('chat-reasoning-wrapper');
const reasoningPanel = document.getElementById('chat-reasoning-panel');
if (reasoningWrap && reasoningPanel && reasoningPanel.style.display === 'flex') {
if (!reasoningWrap.contains(event.target)) {
closeChatReasoningPanel();
}
}
});
// 创建分组
+168 -45
View File
@@ -273,6 +273,47 @@ function escapeHtmlLocal(text) {
return div.innerHTML;
}
/**
* internal/openai.normalizeStreamingDelta 一致兼容网关/模型返回累计全文或整包重发
* 避免前端 buffer += chunk 与后端已归一化的增量叠加导致逐段重复响应中显示了响应中显示了
* @returns {[string, string]} [nextBuffer, effectiveDelta]
*/
function normalizeStreamingDeltaJs(current, incoming) {
const cur = current == null ? '' : String(current);
const inc = incoming == null ? '' : String(incoming);
if (inc === '') {
return [cur, ''];
}
if (cur === '') {
return [inc, inc];
}
if (inc.startsWith(cur) && inc.length > cur.length) {
return [inc, inc.slice(cur.length)];
}
const runeCount = Array.from(cur).length;
if (inc === cur && runeCount > 1) {
return [cur, ''];
}
return [cur + inc, inc];
}
if (typeof window !== 'undefined') {
window.normalizeStreamingDeltaJs = normalizeStreamingDeltaJs;
}
/** 流式 delta:纯文本,避免每条全量 marked + DOMPurify */
function setTimelineItemContentStreamPlain(contentEl, text) {
if (!contentEl) return;
contentEl.classList.add('timeline-stream-plain');
contentEl.textContent = text == null ? '' : String(text);
}
/** 流结束或非流式:富文本(已消毒的 HTML 字符串) */
function setTimelineItemContentStreamRich(contentEl, html) {
if (!contentEl) return;
contentEl.classList.remove('timeline-stream-plain');
contentEl.innerHTML = html;
}
function formatAssistantMarkdownContent(text) {
const raw = text == null ? '' : String(text);
if (typeof marked !== 'undefined') {
@@ -743,19 +784,33 @@ function integrateProgressToMCPSection(progressId, assistantMessageId, mcpExecut
mcpSection.appendChild(buttonsContainer);
}
const hasExecBtns = buttonsContainer.querySelector('.mcp-detail-btn:not(.process-detail-btn)');
if (mcpIds.length > 0 && !hasExecBtns) {
mcpIds.forEach((execId, index) => {
let maxExecIndex = 0;
const existingExecBtns = buttonsContainer.querySelectorAll('.mcp-detail-btn:not(.process-detail-btn)');
existingExecBtns.forEach(function (btn) {
const n = parseInt(btn.dataset.execIndex, 10);
if (!isNaN(n) && n > maxExecIndex) maxExecIndex = n;
});
const seenExec = new Set();
existingExecBtns.forEach(function (btn) {
if (btn.dataset.execId) seenExec.add(String(btn.dataset.execId).trim());
});
let appendedAny = false;
if (mcpIds.length > 0) {
mcpIds.forEach(function (execId) {
const id = execId != null ? String(execId).trim() : '';
if (!id || seenExec.has(id)) return;
seenExec.add(id);
maxExecIndex += 1;
appendedAny = true;
const detailBtn = document.createElement('button');
detailBtn.className = 'mcp-detail-btn';
detailBtn.dataset.execId = execId;
detailBtn.dataset.execIndex = String(index + 1);
detailBtn.innerHTML = '<span>' + (typeof window.t === 'function' ? window.t('chat.callNumber', { n: index + 1 }) : '调用 #' + (index + 1)) + '</span>';
detailBtn.onclick = () => showMCPDetail(execId);
detailBtn.dataset.execId = id;
detailBtn.dataset.execIndex = String(maxExecIndex);
detailBtn.innerHTML = '<span>' + (typeof window.t === 'function' ? window.t('chat.callNumber', { n: maxExecIndex }) : '调用 #' + maxExecIndex) + '</span>';
detailBtn.onclick = function () { showMCPDetail(id); };
buttonsContainer.appendChild(detailBtn);
});
// 使用批量 API 一次性获取所有工具名称(消除 N 次单独请求)
if (typeof batchUpdateButtonToolNames === 'function') {
if (appendedAny && typeof batchUpdateButtonToolNames === 'function') {
batchUpdateButtonToolNames(buttonsContainer, mcpIds);
}
}
@@ -1038,6 +1093,24 @@ function resolveStreamTimeline(progressId) {
return timeline;
}
/** 去重合并 MCP execution id(顺序:先 prev 后 next),用于多段 Run / 多次 SSE 同一任务。 */
function mergeMcpExecutionIDLists(prev, next) {
const seen = new Set();
const out = [];
const add = function (arr) {
if (!Array.isArray(arr)) return;
for (let i = 0; i < arr.length; i++) {
const s = arr[i] != null ? String(arr[i]).trim() : '';
if (!s || seen.has(s)) continue;
seen.add(s);
out.push(s);
}
};
add(prev);
add(next);
return out;
}
// 处理流式事件
function handleStreamEvent(event, progressElement, progressId,
getAssistantId, setAssistantId, getMcpIds, setMcpIds) {
@@ -1150,20 +1223,38 @@ function handleStreamEvent(event, progressElement, progressId,
break;
}
case 'thinking_stream_start': {
case 'thinking_stream_start':
case 'reasoning_chain_stream_start': {
const d = event.data || {};
const streamId = d.streamId || null;
if (!streamId) break;
const timelineType = event.type === 'reasoning_chain_stream_start' ? 'reasoning_chain' : 'thinking';
let state = thinkingStreamStateByProgressId.get(progressId);
if (!state) {
state = new Map();
thinkingStreamStateByProgressId.set(progressId, state);
}
// 若已存在,重置 buffer
const thinkBase = typeof window.t === 'function' ? window.t('chat.aiThinking') : 'AI思考';
const title = timelineAgentBracketPrefix(d) + '🤔 ' + thinkBase;
const itemId = addTimelineItem(timeline, 'thinking', {
// 同一 streamId 重复 start:复用已有条目,避免孤儿卡片 + 新条目重复收 delta
if (state.has(streamId)) {
const ex = state.get(streamId);
ex.buffer = '';
const existingItem = document.getElementById(ex.itemId);
if (existingItem) {
const contentEl = existingItem.querySelector('.timeline-item-content');
if (contentEl) {
setTimelineItemContentStreamPlain(contentEl, '');
}
}
break;
}
const labelBase = typeof window.t === 'function'
? window.t(timelineType === 'reasoning_chain' ? 'chat.reasoningChain' : 'chat.aiThinking')
: (timelineType === 'reasoning_chain' ? '推理过程' : 'AI思考');
const emoji = timelineType === 'reasoning_chain' ? '🔗' : '🤔';
const title = timelineAgentBracketPrefix(d) + emoji + ' ' + labelBase;
const itemId = addTimelineItem(timeline, timelineType, {
title: title,
message: ' ',
data: d
@@ -1172,7 +1263,8 @@ function handleStreamEvent(event, progressElement, progressId,
break;
}
case 'thinking_stream_delta': {
case 'thinking_stream_delta':
case 'reasoning_chain_stream_delta': {
const d = event.data || {};
const streamId = d.streamId || null;
if (!streamId) break;
@@ -1182,24 +1274,23 @@ function handleStreamEvent(event, progressElement, progressId,
const s = state.get(streamId);
const delta = event.message || '';
s.buffer += delta;
const merged = normalizeStreamingDeltaJs(s.buffer, delta);
s.buffer = merged[0];
const item = document.getElementById(s.itemId);
if (item) {
const contentEl = item.querySelector('.timeline-item-content');
if (contentEl) {
if (typeof formatMarkdown === 'function') {
contentEl.innerHTML = formatMarkdown(s.buffer);
} else {
contentEl.textContent = s.buffer;
}
setTimelineItemContentStreamPlain(contentEl, s.buffer);
}
}
break;
}
case 'thinking':
// 如果本 thinking 是由 thinking_stream_* 聚合出来的(带 streamId),避免重复创建 timeline item
case 'reasoning_chain': {
const timelineType = event.type === 'reasoning_chain' ? 'reasoning_chain' : 'thinking';
// 若已由 *_stream_* 聚合(带 streamId),避免重复创建 timeline item
if (event.data && event.data.streamId) {
const streamId = event.data.streamId;
const state = thinkingStreamStateByProgressId.get(progressId);
@@ -1210,11 +1301,10 @@ function handleStreamEvent(event, progressElement, progressId,
if (item) {
const contentEl = item.querySelector('.timeline-item-content');
if (contentEl) {
// contentEl.innerHTML 用于兼容 Markdown 展示
if (typeof formatMarkdown === 'function') {
contentEl.innerHTML = formatMarkdown(s.buffer);
setTimelineItemContentStreamRich(contentEl, formatMarkdown(s.buffer));
} else {
contentEl.textContent = s.buffer;
setTimelineItemContentStreamPlain(contentEl, s.buffer);
}
}
}
@@ -1222,12 +1312,17 @@ function handleStreamEvent(event, progressElement, progressId,
}
}
addTimelineItem(timeline, 'thinking', {
title: timelineAgentBracketPrefix(event.data) + '🤔 ' + (typeof window.t === 'function' ? window.t('chat.aiThinking') : 'AI思考'),
const labelBase = typeof window.t === 'function'
? window.t(timelineType === 'reasoning_chain' ? 'chat.reasoningChain' : 'chat.aiThinking')
: (timelineType === 'reasoning_chain' ? '推理过程' : 'AI思考');
const emoji = timelineType === 'reasoning_chain' ? '🔗' : '🤔';
addTimelineItem(timeline, timelineType, {
title: timelineAgentBracketPrefix(event.data) + emoji + ' ' + labelBase,
message: event.message,
data: event.data
});
break;
}
case 'tool_calls_detected':
addTimelineItem(timeline, 'tool_calls_detected', {
@@ -1271,6 +1366,19 @@ function handleStreamEvent(event, progressElement, progressId,
});
break;
case 'user_interrupt_continue': {
const d = event.data || {};
const titleBase = typeof window.t === 'function'
? window.t('chat.userInterruptContinueTitle')
: '⏸️ 用户中断并继续';
addTimelineItem(timeline, 'user_interrupt_continue', {
title: titleBase,
message: event.message || '',
data: d
});
break;
}
case 'eino_stream_error': {
const d = event.data || {};
const agent = d.einoAgent ? String(d.einoAgent) : '';
@@ -1456,6 +1564,18 @@ function handleStreamEvent(event, progressElement, progressId,
stateMap = new Map();
einoAgentReplyStreamStateByProgressId.set(progressId, stateMap);
}
if (stateMap.has(streamId)) {
const ex = stateMap.get(streamId);
ex.buffer = '';
const existingItem = document.getElementById(ex.itemId);
if (existingItem) {
let contentEl = existingItem.querySelector('.timeline-item-content');
if (contentEl) {
setTimelineItemContentStreamPlain(contentEl, '');
}
}
break;
}
const streamingLabel = typeof window.t === 'function' ? window.t('timeline.running') : '执行中...';
const replyTitleBase = typeof window.t === 'function' ? window.t('chat.einoAgentReplyTitle') : '子代理回复';
const itemId = addTimelineItem(timeline, 'eino_agent_reply', {
@@ -1477,7 +1597,8 @@ function handleStreamEvent(event, progressElement, progressId,
const stateMap = einoAgentReplyStreamStateByProgressId.get(progressId);
if (!stateMap || !stateMap.has(streamId)) break;
const s = stateMap.get(streamId);
s.buffer += delta;
const merged = normalizeStreamingDeltaJs(s.buffer, delta);
s.buffer = merged[0];
const item = document.getElementById(s.itemId);
if (item) {
let contentEl = item.querySelector('.timeline-item-content');
@@ -1490,11 +1611,7 @@ function handleStreamEvent(event, progressElement, progressId,
}
}
if (contentEl) {
if (typeof formatMarkdown === 'function') {
contentEl.innerHTML = formatMarkdown(s.buffer);
} else {
contentEl.textContent = s.buffer;
}
setTimelineItemContentStreamPlain(contentEl, s.buffer);
}
}
break;
@@ -1522,9 +1639,9 @@ function handleStreamEvent(event, progressElement, progressId,
item.appendChild(contentEl);
}
if (typeof formatMarkdown === 'function') {
contentEl.innerHTML = formatMarkdown(full);
setTimelineItemContentStreamRich(contentEl, formatMarkdown(full));
} else {
contentEl.textContent = full;
setTimelineItemContentStreamPlain(contentEl, full);
}
if (d.einoAgent != null && String(d.einoAgent).trim() !== '') {
item.dataset.einoAgent = String(d.einoAgent).trim();
@@ -1614,7 +1731,7 @@ function handleStreamEvent(event, progressElement, progressId,
const responseData = event.data || {};
const mcpIds = responseData.mcpExecutionIds || [];
setMcpIds(mcpIds);
setMcpIds(mergeMcpExecutionIDLists(typeof getMcpIds === 'function' ? (getMcpIds() || []) : [], mcpIds));
if (responseData.conversationId) {
// 如果用户已经开始了新对话(currentConversationId 为 null),且这个事件来自旧对话,则忽略
@@ -1665,7 +1782,8 @@ function handleStreamEvent(event, progressElement, progressId,
}
const deltaContent = event.message || '';
state.buffer += deltaContent;
const mergedResp = normalizeStreamingDeltaJs(state.buffer, deltaContent);
state.buffer = mergedResp[0];
// 更新时间线条目内容
if (state.itemId) {
@@ -1675,11 +1793,7 @@ function handleStreamEvent(event, progressElement, progressId,
if (contentEl) {
const meta = state.streamMeta || responseData;
const body = formatTimelineStreamBody(state.buffer, meta);
if (typeof formatMarkdown === 'function') {
contentEl.innerHTML = formatMarkdown(body);
} else {
contentEl.textContent = body;
}
setTimelineItemContentStreamPlain(contentEl, body);
}
}
}
@@ -1693,7 +1807,7 @@ function handleStreamEvent(event, progressElement, progressId,
// 先更新 mcp ids
const responseData = event.data || {};
const mcpIds = responseData.mcpExecutionIds || [];
const mcpIds = mergeMcpExecutionIDLists(typeof getMcpIds === 'function' ? (getMcpIds() || []) : [], responseData.mcpExecutionIds || []);
setMcpIds(mcpIds);
// 更新对话ID
@@ -2217,7 +2331,7 @@ async function attachRunningTaskEventStream(conversationId) {
if (line.indexOf('data: ') === 0) {
try {
const eventData = JSON.parse(line.slice(6));
handleStreamEvent(eventData, null, progressId, getAssistantIdFn, setAssistantIdFn, function () { return mcpIds; }, function (ids) { mcpIds = ids; });
handleStreamEvent(eventData, null, progressId, getAssistantIdFn, setAssistantIdFn, function () { return mcpIds; }, function (ids) { mcpIds = mergeMcpExecutionIDLists(mcpIds, ids || []); });
} catch (e) {
console.error('task-events parse', e);
}
@@ -2375,7 +2489,7 @@ function addTimelineItem(timeline, type, options) {
`;
// 根据类型添加详细内容
if ((type === 'thinking' || type === 'planning') && options.message) {
if ((type === 'thinking' || type === 'reasoning_chain' || type === 'planning') && options.message) {
const streamBody = typeof formatTimelineStreamBody === 'function'
? formatTimelineStreamBody(options.message, options.data)
: options.message;
@@ -2430,6 +2544,11 @@ function addTimelineItem(timeline, type, options) {
${escapeHtml(options.message || taskCancelledLabel)}
</div>
`;
} else if (type === 'user_interrupt_continue' && options.message) {
const streamBody = typeof formatTimelineStreamBody === 'function'
? formatTimelineStreamBody(options.message, options.data)
: options.message;
content += `<div class="timeline-item-content">${formatMarkdown(streamBody)}</div>`;
}
item.innerHTML = content;
@@ -3305,6 +3424,8 @@ function refreshProgressAndTimelineI18n() {
} else {
titleSpan.textContent = ap + '\uD83E\uDD14 ' + _t('chat.aiThinking');
}
} else if (type === 'reasoning_chain') {
titleSpan.textContent = ap + '\uD83D\uDD17 ' + _t('chat.reasoningChain');
} else if (type === 'planning') {
if (item.dataset.orchestration && typeof einoMainStreamPlanningTitle === 'function') {
titleSpan.textContent = einoMainStreamPlanningTitle({
@@ -3331,6 +3452,8 @@ function refreshProgressAndTimelineI18n() {
titleSpan.textContent = ap + '\uD83D\uDCAC ' + _t('chat.einoAgentReplyTitle');
} else if (type === 'cancelled') {
titleSpan.textContent = '\u26D4 ' + _t('chat.taskCancelled');
} else if (type === 'user_interrupt_continue') {
titleSpan.textContent = _t('chat.userInterruptContinueTitle');
} else if (type === 'progress' && item.dataset.progressMessage !== undefined) {
titleSpan.textContent = typeof window.translateProgressMessage === 'function' ? window.translateProgressMessage(item.dataset.progressMessage) : item.dataset.progressMessage;
}
+3
View File
@@ -256,6 +256,9 @@ function toggleRoleSelectionPanel() {
if (typeof closeAgentModePanel === 'function') {
closeAgentModePanel();
}
if (typeof closeChatReasoningPanel === 'function') {
closeChatReasoningPanel();
}
panel.style.display = 'flex'; // 使用flex布局
// 添加打开状态的视觉反馈
if (roleSelectorBtn) {
+31 -1
View File
@@ -159,6 +159,27 @@ async function loadConfig(loadTools = true) {
if (maxTokensEl) {
maxTokensEl.value = currentConfig.openai.max_total_tokens || 120000;
}
const orm = currentConfig.openai && currentConfig.openai.reasoning ? currentConfig.openai.reasoning : {};
const orModeEl = document.getElementById('openai-reasoning-mode');
if (orModeEl) {
const mv = (orm.mode || 'auto').toString().trim().toLowerCase();
orModeEl.value = ['auto', 'on', 'off'].includes(mv) ? mv : 'auto';
}
const orEffEl = document.getElementById('openai-reasoning-effort');
if (orEffEl) {
const ev = (orm.effort || '').toString().trim().toLowerCase();
orEffEl.value = ['', 'low', 'medium', 'high', 'max'].includes(ev) ? ev : '';
}
const orProfEl = document.getElementById('openai-reasoning-profile');
if (orProfEl) {
const pv = (orm.profile || 'auto').toString().trim().toLowerCase();
const ok = ['auto', 'deepseek_compat', 'openai_compat', 'output_config_effort'];
orProfEl.value = ok.includes(pv) ? pv : 'auto';
}
const orAllowEl = document.getElementById('openai-reasoning-allow-client');
if (orAllowEl) {
orAllowEl.checked = orm.allow_client_reasoning !== false;
}
// 填充FOFA配置
const fofa = currentConfig.fofa || {};
@@ -1065,13 +1086,22 @@ async function applySettings() {
};
const wecomAgentIdVal = document.getElementById('robot-wecom-agent-id')?.value.trim();
const prevOpenai = (currentConfig && currentConfig.openai) ? currentConfig.openai : {};
const config = {
openai: {
...prevOpenai,
provider: provider,
api_key: apiKey,
base_url: baseUrl,
model: model,
max_total_tokens: parseInt(document.getElementById('openai-max-total-tokens')?.value) || 120000
max_total_tokens: parseInt(document.getElementById('openai-max-total-tokens')?.value) || 120000,
reasoning: {
...(prevOpenai.reasoning || {}),
mode: document.getElementById('openai-reasoning-mode')?.value || 'auto',
effort: (document.getElementById('openai-reasoning-effort')?.value || '').trim(),
profile: document.getElementById('openai-reasoning-profile')?.value || 'auto',
allow_client_reasoning: document.getElementById('openai-reasoning-allow-client')?.checked !== false
}
},
fofa: {
email: document.getElementById('fofa-email')?.value.trim() || '',
+46 -14
View File
@@ -1658,6 +1658,8 @@ function buildWebshellTimelineItemFromDetail(detail) {
title = ap + ((typeof window.t === 'function') ? window.t('chat.iterationRound', { n: data.iteration || 1 }) : ('第 ' + (data.iteration || 1) + ' 轮迭代'));
} else if (eventType === 'thinking') {
title = ap + '🤔 ' + ((typeof window.t === 'function') ? window.t('chat.aiThinking') : 'AI 思考');
} else if (eventType === 'reasoning_chain') {
title = ap + '🔗 ' + ((typeof window.t === 'function') ? window.t('chat.reasoningChain') : '推理过程');
} else if (eventType === 'tool_calls_detected') {
title = ap + '🔧 ' + ((typeof window.t === 'function') ? window.t('chat.toolCallsDetected', { count: data.count || 0 }) : ('检测到 ' + (data.count || 0) + ' 个工具调用'));
} else if (eventType === 'tool_call') {
@@ -2847,6 +2849,12 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
if (info && info.orchestration) {
body.orchestration = info.orchestration;
}
if (typeof window.buildReasoningRequestPayload === 'function') {
var rp = window.buildReasoningRequestPayload();
if (rp) {
body.reasoning = rp;
}
}
return apiFetch(info.path, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
@@ -2898,7 +2906,10 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
} else if (_et === 'response_delta') {
var deltaText = (_em != null && _em !== '') ? String(_em) : '';
if (deltaText) {
streamingTarget += deltaText;
var normR = (typeof window.normalizeStreamingDeltaJs === 'function')
? window.normalizeStreamingDeltaJs(streamingTarget, deltaText)
: [streamingTarget + deltaText, deltaText];
streamingTarget = normR[0];
webshellStreamingTypingId += 1;
streamingTypingId = webshellStreamingTypingId;
runWebshellAiStreamingTyping(assistantDiv, streamingTarget, streamingTypingId, messagesContainer);
@@ -2950,23 +2961,33 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
appendTimelineItem('iteration', '🔍 ' + iterTitle, iterMessage, _ed);
if (!streamingTarget) assistantDiv.textContent = '…';
// ─── Thinking (non-stream + stream) ───
} else if (_et === 'thinking_stream_start' && _ed.streamId) {
var thinkSLabel = wsTOr('chat.aiThinking', 'AI 思考');
// ─── Thinking / reasoning_chain(推理过程,reasoning_content ───
} else if ((_et === 'thinking_stream_start' || _et === 'reasoning_chain_stream_start') && _ed.streamId) {
var isRcStart = _et === 'reasoning_chain_stream_start';
if (wsThinkingStreams.has(_ed.streamId)) {
var tsExist = wsThinkingStreams.get(_ed.streamId);
tsExist.buf = '';
if (tsExist.body) tsExist.body.textContent = '';
} else {
var thinkSLabel = wsTOr(isRcStart ? 'chat.reasoningChain' : 'chat.aiThinking', isRcStart ? '推理过程' : 'AI 思考');
var thinkEmoji = isRcStart ? '🔗' : '🤔';
var thinkSItem = document.createElement('div');
thinkSItem.className = 'webshell-ai-timeline-item webshell-ai-timeline-thinking';
thinkSItem.innerHTML = '<span class="webshell-ai-timeline-title">' + escapeHtml(webshellAgentPx(_ed) + '🤔 ' + thinkSLabel) + '</span>';
thinkSItem.className = 'webshell-ai-timeline-item webshell-ai-timeline-' + (isRcStart ? 'reasoning_chain' : 'thinking');
thinkSItem.innerHTML = '<span class="webshell-ai-timeline-title">' + escapeHtml(webshellAgentPx(_ed) + thinkEmoji + ' ' + thinkSLabel) + '</span>';
var thinkSPre = document.createElement('div');
thinkSPre.className = 'webshell-ai-timeline-msg webshell-thinking-stream-body';
thinkSItem.appendChild(thinkSPre);
timelineContainer.appendChild(thinkSItem);
timelineContainer.classList.add('has-items');
wsThinkingStreams.set(_ed.streamId, { el: thinkSItem, body: thinkSPre, buf: '' });
}
if (!streamingTarget) assistantDiv.textContent = '…';
} else if (_et === 'thinking_stream_delta' && _ed.streamId) {
} else if ((_et === 'thinking_stream_delta' || _et === 'reasoning_chain_stream_delta') && _ed.streamId) {
var tsD = wsThinkingStreams.get(_ed.streamId);
if (tsD) {
tsD.buf += (_em || '');
var normT = (typeof window.normalizeStreamingDeltaJs === 'function')
? window.normalizeStreamingDeltaJs(tsD.buf, _em || '') : [tsD.buf + (_em || ''), _em || ''];
tsD.buf = normT[0];
if (typeof formatMarkdown === 'function') {
tsD.body.innerHTML = formatMarkdown(tsD.buf);
} else {
@@ -2974,7 +2995,7 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
}
}
if (!streamingTarget) assistantDiv.textContent = '…';
} else if (_et === 'thinking_stream_end' && _ed.streamId) {
} else if ((_et === 'thinking_stream_end' || _et === 'reasoning_chain_stream_end') && _ed.streamId) {
var tsE = wsThinkingStreams.get(_ed.streamId);
if (tsE) {
var fullThink = (_em != null && _em !== '') ? String(_em) : tsE.buf;
@@ -2985,13 +3006,15 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
}
wsThinkingStreams.delete(_ed.streamId);
}
} else if (_et === 'thinking' && _em) {
} else if ((_et === 'thinking' || _et === 'reasoning_chain') && _em) {
// 如果有 streamId 且已存在流式条目,跳过避免重复
if (_ed.streamId && wsThinkingStreams.has(_ed.streamId)) {
// 已由 thinking_stream_* 处理
// 已由 *_stream_* 处理
} else {
var thinkLabel = wsTOr('chat.aiThinking', 'AI 思考');
appendTimelineItem('thinking', webshellAgentPx(_ed) + '🤔 ' + thinkLabel, _em, _ed);
var isRc = _et === 'reasoning_chain';
var thinkLabel = wsTOr(isRc ? 'chat.reasoningChain' : 'chat.aiThinking', isRc ? '推理过程' : 'AI 思考');
var thinkEm = isRc ? '🔗' : '🤔';
appendTimelineItem(isRc ? 'reasoning_chain' : 'thinking', webshellAgentPx(_ed) + thinkEm + ' ' + thinkLabel, _em, _ed);
}
if (!streamingTarget) assistantDiv.textContent = '…';
@@ -3076,6 +3099,12 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
// ─── Eino sub-agent reply streaming ───
} else if (_et === 'eino_agent_reply_stream_start' && _ed.streamId) {
if (einoSubReplyStreams.has(_ed.streamId)) {
var stExist = einoSubReplyStreams.get(_ed.streamId);
stExist.buf = '';
var preExist = stExist.el && stExist.el.querySelector('.webshell-eino-reply-stream-body');
if (preExist) preExist.textContent = '';
} else {
var repTS = wsTOr('chat.einoAgentReplyTitle', '子代理回复');
var runTS = wsTOr('timeline.running', '执行中...');
var itemS = document.createElement('div');
@@ -3084,11 +3113,14 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
timelineContainer.appendChild(itemS);
timelineContainer.classList.add('has-items');
einoSubReplyStreams.set(_ed.streamId, { el: itemS, buf: '' });
}
if (!streamingTarget) assistantDiv.textContent = '…';
} else if (_et === 'eino_agent_reply_stream_delta' && _ed.streamId) {
var stD = einoSubReplyStreams.get(_ed.streamId);
if (stD) {
stD.buf += (_em || '');
var normS = (typeof window.normalizeStreamingDeltaJs === 'function')
? window.normalizeStreamingDeltaJs(stD.buf, _em || '') : [stD.buf + (_em || ''), _em || ''];
stD.buf = normS[0];
var preD = stD.el.querySelector('.webshell-eino-reply-stream-body');
if (!preD) {
preD = document.createElement('pre');
+79
View File
@@ -894,6 +894,8 @@
<div id="active-tasks-bar" class="active-tasks-bar"></div>
<div id="chat-messages" class="chat-messages"></div>
<div id="chat-input-container" class="chat-input-container">
<div class="chat-input-primary-row">
<div class="chat-input-leading">
<div class="role-selector-wrapper">
<button id="role-selector-btn" class="role-selector-btn" onclick="toggleRoleSelectionPanel()" data-i18n="chat.selectRole" data-i18n-attr="title" title="选择角色">
<span id="role-selector-icon" class="role-selector-icon">🔵</span>
@@ -979,6 +981,50 @@
</div>
<input type="hidden" id="agent-mode-select" value="react" autocomplete="off">
</div>
<div id="chat-reasoning-wrapper" class="chat-reasoning-wrapper" style="display: none;">
<div class="chat-reasoning-inner">
<button type="button" id="chat-reasoning-btn" class="role-selector-btn chat-reasoning-btn" onclick="toggleChatReasoningPanel()" aria-expanded="false" aria-haspopup="dialog" aria-controls="chat-reasoning-panel" data-i18n="chat.reasoningCompactAria" data-i18n-attr="aria-label,title" data-i18n-skip-text="true" aria-label="模型推理选项" title="模型推理选项">
<span class="chat-reasoning-btn-icon" aria-hidden="true">🔎</span>
<span id="chat-reasoning-summary" class="role-selector-text chat-reasoning-btn-summary"></span>
<svg class="role-selector-arrow" width="10" height="10" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg" aria-hidden="true">
<path d="M6 9l6 6 6-6" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
</button>
<div id="chat-reasoning-panel" class="chat-reasoning-panel" style="display: none;" role="dialog" aria-labelledby="chat-reasoning-panel-title">
<div class="role-selection-panel-header chat-reasoning-panel-header">
<h3 id="chat-reasoning-panel-title" class="role-selection-panel-title" data-i18n="chat.reasoningPanelTitle">模型推理</h3>
<button type="button" class="role-selection-panel-close" onclick="closeChatReasoningPanel()" data-i18n="common.close" data-i18n-attr="title" title="关闭">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M18 6L6 18M6 6l12 12" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
</button>
</div>
<p class="chat-reasoning-panel-hint" data-i18n="chat.reasoningPanelHint">仅 Eino 请求生效,与系统设置中的默认值合并。</p>
<div class="chat-reasoning-fields">
<div class="chat-reasoning-field">
<label class="chat-reasoning-field-label" for="chat-reasoning-mode"><span data-i18n="chat.reasoningModeLabel">模式</span></label>
<select id="chat-reasoning-mode" class="chat-reasoning-select" onchange="persistChatReasoningPrefs()">
<option value="default" data-i18n="chat.reasoningModeDefault">跟随系统</option>
<option value="off" data-i18n="chat.reasoningModeOff">关闭</option>
<option value="on" data-i18n="chat.reasoningModeOn">开启</option>
<option value="auto" data-i18n="chat.reasoningModeAuto">自动</option>
</select>
</div>
<div class="chat-reasoning-field">
<label class="chat-reasoning-field-label" for="chat-reasoning-effort"><span data-i18n="chat.reasoningEffortLabel">推理强度</span></label>
<select id="chat-reasoning-effort" class="chat-reasoning-select" onchange="persistChatReasoningPrefs()">
<option value="" data-i18n="chat.reasoningEffortUnset">不指定</option>
<option value="low">low</option>
<option value="medium">medium</option>
<option value="high">high</option>
<option value="max">max</option>
</select>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="chat-input-with-files">
<div id="chat-file-list" class="chat-file-list" aria-label="已选文件列表"></div>
<div id="chat-attachment-progress" class="chat-upload-progress-row" hidden role="status" aria-live="polite">
@@ -1002,6 +1048,7 @@
<path d="M5 12h14M12 5l7 7-7 7" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
</svg>
</button>
</div>
</div>
</div>
</div>
@@ -1989,6 +2036,38 @@
<input type="number" id="openai-max-total-tokens" data-i18n="settingsBasic.maxTotalTokensPlaceholder" data-i18n-attr="placeholder" placeholder="120000" min="1000" step="1000" />
<small style="color: var(--text-muted, #718096); font-size: 0.75rem;" data-i18n="settingsBasic.maxTotalTokensHint">内存压缩和攻击链构建共用此配置,默认 120000</small>
</div>
<div class="form-group">
<label data-i18n="settingsBasic.openaiReasoningTitle">模型推理(Eino</label>
<small class="form-hint" data-i18n="settingsBasic.openaiReasoningHint">仅影响 Eino 单代理与多代理;对话页可覆盖(见下方「允许对话覆盖」)。</small>
<div style="display: flex; flex-wrap: wrap; gap: 10px; margin-top: 8px; align-items: center;">
<label for="openai-reasoning-mode" style="font-size: 0.8125rem;" data-i18n="chat.reasoningModeLabel">模式</label>
<select id="openai-reasoning-mode" style="min-width: 120px; padding: 0.35rem 0.5rem; border-radius: 6px; border: 1px solid var(--border-color, #e2e8f0);">
<option value="auto" data-i18n="chat.reasoningModeAuto">自动</option>
<option value="on" data-i18n="chat.reasoningModeOn">开启</option>
<option value="off" data-i18n="chat.reasoningModeOff">关闭</option>
</select>
<label for="openai-reasoning-effort" style="font-size: 0.8125rem;" data-i18n="chat.reasoningEffortLabel">强度</label>
<select id="openai-reasoning-effort" style="min-width: 100px; padding: 0.35rem 0.5rem; border-radius: 6px; border: 1px solid var(--border-color, #e2e8f0);">
<option value="" data-i18n="chat.reasoningEffortUnset">不指定</option>
<option value="low">low</option>
<option value="medium">medium</option>
<option value="high">high</option>
<option value="max">max</option>
</select>
<label for="openai-reasoning-profile" style="font-size: 0.8125rem;" data-i18n="settingsBasic.openaiReasoningProfile">线路</label>
<select id="openai-reasoning-profile" style="min-width: 140px; padding: 0.35rem 0.5rem; border-radius: 6px; border: 1px solid var(--border-color, #e2e8f0);">
<option value="auto">auto</option>
<option value="deepseek_compat">deepseek_compat</option>
<option value="openai_compat">openai_compat</option>
<option value="output_config_effort">output_config_effort</option>
</select>
</div>
<label class="checkbox-label" style="margin-top: 8px;">
<input type="checkbox" id="openai-reasoning-allow-client" class="modern-checkbox" checked />
<span class="checkbox-custom"></span>
<span class="checkbox-text" data-i18n="settingsBasic.openaiReasoningAllowClient">允许对话页覆盖推理选项</span>
</label>
</div>
<div style="display: flex; align-items: center; gap: 8px; margin-top: 2px;">
<a href="javascript:void(0)" id="test-openai-btn" onclick="testOpenAIConnection()" style="font-size: 0.8125rem; color: var(--accent-color, #3182ce); text-decoration: none; cursor: pointer; user-select: none;" data-i18n="settingsBasic.testConnection">测试连接</a>
<span id="test-openai-result" style="font-size: 0.8125rem;"></span>