Compare commits

...

11 Commits

Author SHA1 Message Date
公明 2ab8d4c731 Update config.yaml 2026-03-20 01:43:12 +08:00
公明 5884902090 Add files via upload 2026-03-20 01:42:07 +08:00
公明 c92ce0379e Add files via upload 2026-03-20 01:30:09 +08:00
公明 5fe5f5b71f Add files via upload 2026-03-20 01:03:40 +08:00
公明 36099a60d9 Update style.css 2026-03-19 11:15:11 +08:00
公明 c6adcd19dd Update pent_claude_agent_config.yaml 2026-03-17 23:49:22 +08:00
公明 52e84b0ef5 Delete mcp-servers/pent_claude_agent/.claude/1 2026-03-17 23:48:45 +08:00
公明 1d505b7b10 Add files via upload 2026-03-17 23:48:19 +08:00
公明 c9f7e8f53f Create 1 2026-03-17 23:48:00 +08:00
公明 3b7d5357b8 Update pent_claude_agent_config.yaml 2026-03-17 23:14:49 +08:00
公明 ca01cad2c8 Add files via upload 2026-03-17 23:13:11 +08:00
16 changed files with 1866 additions and 65 deletions
+1 -1
View File
@@ -10,7 +10,7 @@
# ============================================
# 前端显示的版本号(可选,不填则显示默认版本)
version: "v1.3.27"
version: "v1.3.28"
# 服务器配置
server:
+295 -33
View File
@@ -15,6 +15,7 @@ import (
"cyberstrike-ai/internal/mcp"
"cyberstrike-ai/internal/mcp/builtin"
"cyberstrike-ai/internal/openai"
"cyberstrike-ai/internal/security"
"cyberstrike-ai/internal/storage"
"go.uber.org/zap"
@@ -196,6 +197,7 @@ type OpenAIRequest struct {
Model string `json:"model"`
Messages []ChatMessage `json:"messages"`
Tools []Tool `json:"tools,omitempty"`
Stream bool `json:"stream,omitempty"`
}
// OpenAIResponse OpenAI API响应
@@ -529,6 +531,7 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
var currentReActInput string
maxIterations := a.maxIterations
thinkingStreamSeq := 0
for i := 0; i < maxIterations; i++ {
// 先获取本轮可用工具并统计 tools token,再压缩,以便压缩时预留 tools 占用的空间
tools := a.getAvailableTools(roleTools)
@@ -630,7 +633,28 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
// 调用OpenAI
sendProgress("progress", "正在调用AI模型...", nil)
response, err := a.callOpenAI(ctx, messages, tools)
thinkingStreamSeq++
thinkingStreamId := fmt.Sprintf("thinking-stream-%s-%d-%d", conversationID, i+1, thinkingStreamSeq)
thinkingStreamStarted := false
response, err := a.callOpenAIStreamWithToolCalls(ctx, messages, tools, func(delta string) error {
if delta == "" {
return nil
}
if !thinkingStreamStarted {
thinkingStreamStarted = true
sendProgress("thinking_stream_start", " ", map[string]interface{}{
"streamId": thinkingStreamId,
"iteration": i + 1,
"toolStream": false,
})
}
sendProgress("thinking_stream_delta", delta, map[string]interface{}{
"streamId": thinkingStreamId,
"iteration": i + 1,
})
return nil
})
if err != nil {
// API调用失败,保存当前的ReAct输入和错误信息作为输出
result.LastReActInput = currentReActInput
@@ -682,10 +706,12 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
// 检查是否有工具调用
if len(choice.Message.ToolCalls) > 0 {
// 如果有思考内容,先发送思考事件
// 思考内容:如果本轮启用了思考流式增量(thinking_stream_*),前端会去重;
// 同时也需要在该“思考阶段结束”时补一条可落库的 thinking(用于刷新后持久化展示)。
if choice.Message.Content != "" {
sendProgress("thinking", choice.Message.Content, map[string]interface{}{
"iteration": i + 1,
"streamId": thinkingStreamId,
})
}
@@ -717,7 +743,21 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
})
// 执行工具
execResult, err := a.executeToolViaMCP(ctx, toolCall.Function.Name, toolCall.Function.Arguments)
toolCtx := context.WithValue(ctx, security.ToolOutputCallbackCtxKey, security.ToolOutputCallback(func(chunk string) {
if strings.TrimSpace(chunk) == "" {
return
}
sendProgress("tool_result_delta", chunk, map[string]interface{}{
"toolName": toolCall.Function.Name,
"toolCallId": toolCall.ID,
"index": idx + 1,
"total": len(choice.Message.ToolCalls),
"iteration": i + 1,
// success 在最终 tool_result 事件里会以 success/isError 标记为准
})
}))
execResult, err := a.executeToolViaMCP(toolCtx, toolCall.Function.Name, toolCall.Function.Arguments)
if err != nil {
// 构建详细的错误信息,帮助AI理解问题并做出决策
errorMsg := a.formatToolError(toolCall.Function.Name, toolCall.Function.Arguments, err)
@@ -792,16 +832,23 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
Content: "这是最后一次迭代。请总结到目前为止的所有测试结果、发现的问题和已完成的工作。如果需要继续测试,请提供详细的下一步执行计划。请直接回复,不要调用工具。",
})
messages = a.applyMemoryCompression(ctx, messages, 0) // 总结时不带 tools,不预留
// 立即调用OpenAI获取总结
summaryResponse, err := a.callOpenAI(ctx, messages, []Tool{}) // 不提供工具,强制AI直接回复
if err == nil && summaryResponse != nil && len(summaryResponse.Choices) > 0 {
summaryChoice := summaryResponse.Choices[0]
if summaryChoice.Message.Content != "" {
result.Response = summaryChoice.Message.Content
result.LastReActOutput = result.Response
sendProgress("progress", "总结生成完成", nil)
return result, nil
}
// 流式调用OpenAI获取总结(不提供工具,强制AI直接回复)
sendProgress("response_start", "", map[string]interface{}{
"conversationId": conversationID,
"mcpExecutionIds": result.MCPExecutionIDs,
"messageGeneratedBy": "summary",
})
streamText, _ := a.callOpenAIStreamText(ctx, messages, []Tool{}, func(delta string) error {
sendProgress("response_delta", delta, map[string]interface{}{
"conversationId": conversationID,
})
return nil
})
if strings.TrimSpace(streamText) != "" {
result.Response = streamText
result.LastReActOutput = result.Response
sendProgress("progress", "总结生成完成", nil)
return result, nil
}
// 如果获取总结失败,跳出循环,让后续逻辑处理
break
@@ -817,7 +864,7 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
})
// 发送AI思考内容(如果没有工具调用)
if choice.Message.Content != "" {
if choice.Message.Content != "" && !thinkingStreamStarted {
sendProgress("thinking", choice.Message.Content, map[string]interface{}{
"iteration": i + 1,
})
@@ -832,16 +879,23 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
Content: "这是最后一次迭代。请总结到目前为止的所有测试结果、发现的问题和已完成的工作。如果需要继续测试,请提供详细的下一步执行计划。请直接回复,不要调用工具。",
})
messages = a.applyMemoryCompression(ctx, messages, 0) // 总结时不带 tools,不预留
// 立即调用OpenAI获取总结
summaryResponse, err := a.callOpenAI(ctx, messages, []Tool{}) // 不提供工具,强制AI直接回复
if err == nil && summaryResponse != nil && len(summaryResponse.Choices) > 0 {
summaryChoice := summaryResponse.Choices[0]
if summaryChoice.Message.Content != "" {
result.Response = summaryChoice.Message.Content
result.LastReActOutput = result.Response
sendProgress("progress", "总结生成完成", nil)
return result, nil
}
// 流式调用OpenAI获取总结(不提供工具,强制AI直接回复)
sendProgress("response_start", "", map[string]interface{}{
"conversationId": conversationID,
"mcpExecutionIds": result.MCPExecutionIDs,
"messageGeneratedBy": "summary",
})
streamText, _ := a.callOpenAIStreamText(ctx, messages, []Tool{}, func(delta string) error {
sendProgress("response_delta", delta, map[string]interface{}{
"conversationId": conversationID,
})
return nil
})
if strings.TrimSpace(streamText) != "" {
result.Response = streamText
result.LastReActOutput = result.Response
sendProgress("progress", "总结生成完成", nil)
return result, nil
}
// 如果获取总结失败,使用当前回复作为结果
if choice.Message.Content != "" {
@@ -872,15 +926,23 @@ func (a *Agent) AgentLoopWithProgress(ctx context.Context, userInput string, his
messages = append(messages, finalSummaryPrompt)
messages = a.applyMemoryCompression(ctx, messages, 0) // 总结时不带 tools,不预留
summaryResponse, err := a.callOpenAI(ctx, messages, []Tool{}) // 不提供工具,强制AI直接回复
if err == nil && summaryResponse != nil && len(summaryResponse.Choices) > 0 {
summaryChoice := summaryResponse.Choices[0]
if summaryChoice.Message.Content != "" {
result.Response = summaryChoice.Message.Content
result.LastReActOutput = result.Response
sendProgress("progress", "总结生成完成", nil)
return result, nil
}
// 流式调用OpenAI获取总结(不提供工具,强制AI直接回复
sendProgress("response_start", "", map[string]interface{}{
"conversationId": conversationID,
"mcpExecutionIds": result.MCPExecutionIDs,
"messageGeneratedBy": "max_iter_summary",
})
streamText, _ := a.callOpenAIStreamText(ctx, messages, []Tool{}, func(delta string) error {
sendProgress("response_delta", delta, map[string]interface{}{
"conversationId": conversationID,
})
return nil
})
if strings.TrimSpace(streamText) != "" {
result.Response = streamText
result.LastReActOutput = result.Response
sendProgress("progress", "总结生成完成", nil)
return result, nil
}
// 如果无法生成总结,返回友好的提示
@@ -1200,6 +1262,206 @@ func (a *Agent) callOpenAISingle(ctx context.Context, messages []ChatMessage, to
return &response, nil
}
// callOpenAISingleStreamText 单次调用OpenAI的流式模式,只用于“不会调用工具”的纯文本输出(tools 为空时最佳)。
// onDelta 每收到一段 content delta,就回调一次;如果 callback 返回错误,会终止读取并返回错误。
func (a *Agent) callOpenAISingleStreamText(ctx context.Context, messages []ChatMessage, tools []Tool, onDelta func(delta string) error) (string, error) {
reqBody := OpenAIRequest{
Model: a.config.Model,
Messages: messages,
Stream: true,
}
if len(tools) > 0 {
reqBody.Tools = tools
}
if a.openAIClient == nil {
return "", fmt.Errorf("OpenAI客户端未初始化")
}
return a.openAIClient.ChatCompletionStream(ctx, reqBody, onDelta)
}
// callOpenAIStreamText 调用OpenAI流式模式(带重试),仅在“未输出任何 delta”时才允许重试,避免重复发送已下发的内容。
func (a *Agent) callOpenAIStreamText(ctx context.Context, messages []ChatMessage, tools []Tool, onDelta func(delta string) error) (string, error) {
maxRetries := 3
var lastErr error
for attempt := 0; attempt < maxRetries; attempt++ {
var deltasSent bool
full, err := a.callOpenAISingleStreamText(ctx, messages, tools, func(delta string) error {
deltasSent = true
return onDelta(delta)
})
if err == nil {
if attempt > 0 {
a.logger.Info("OpenAI stream 调用重试成功",
zap.Int("attempt", attempt+1),
zap.Int("maxRetries", maxRetries),
)
}
return full, nil
}
lastErr = err
// 已经开始输出了 delta,避免重复内容:直接失败让上层处理。
if deltasSent {
return "", err
}
if !a.isRetryableError(err) {
return "", err
}
if attempt < maxRetries-1 {
backoff := time.Duration(1<<uint(attempt+1)) * time.Second
if backoff > 30*time.Second {
backoff = 30 * time.Second
}
a.logger.Warn("OpenAI stream 调用失败,准备重试",
zap.Error(err),
zap.Int("attempt", attempt+1),
zap.Int("maxRetries", maxRetries),
zap.Duration("backoff", backoff),
)
select {
case <-ctx.Done():
return "", fmt.Errorf("上下文已取消: %w", ctx.Err())
case <-time.After(backoff):
}
}
}
return "", fmt.Errorf("重试%d次后仍然失败: %w", maxRetries, lastErr)
}
// callOpenAISingleStreamWithToolCalls 单次调用OpenAI流式模式(带工具调用解析),不包含重试逻辑。
func (a *Agent) callOpenAISingleStreamWithToolCalls(
ctx context.Context,
messages []ChatMessage,
tools []Tool,
onContentDelta func(delta string) error,
) (*OpenAIResponse, error) {
reqBody := OpenAIRequest{
Model: a.config.Model,
Messages: messages,
Stream: true,
}
if len(tools) > 0 {
reqBody.Tools = tools
}
if a.openAIClient == nil {
return nil, fmt.Errorf("OpenAI客户端未初始化")
}
content, streamToolCalls, finishReason, err := a.openAIClient.ChatCompletionStreamWithToolCalls(ctx, reqBody, onContentDelta)
if err != nil {
return nil, err
}
toolCalls := make([]ToolCall, 0, len(streamToolCalls))
for _, stc := range streamToolCalls {
fnArgsStr := stc.FunctionArgsStr
args := make(map[string]interface{})
if strings.TrimSpace(fnArgsStr) != "" {
if err := json.Unmarshal([]byte(fnArgsStr), &args); err != nil {
// 兼容:arguments 不一定是严格 JSON
args = map[string]interface{}{"raw": fnArgsStr}
}
}
typ := stc.Type
if strings.TrimSpace(typ) == "" {
typ = "function"
}
toolCalls = append(toolCalls, ToolCall{
ID: stc.ID,
Type: typ,
Function: FunctionCall{
Name: stc.FunctionName,
Arguments: args,
},
})
}
response := &OpenAIResponse{
ID: "",
Choices: []Choice{
{
Message: MessageWithTools{
Role: "assistant",
Content: content,
ToolCalls: toolCalls,
},
FinishReason: finishReason,
},
},
}
return response, nil
}
// callOpenAIStreamWithToolCalls 调用OpenAI流式模式(带重试),仅当还没有输出任何 content delta 时才允许重试。
func (a *Agent) callOpenAIStreamWithToolCalls(
ctx context.Context,
messages []ChatMessage,
tools []Tool,
onContentDelta func(delta string) error,
) (*OpenAIResponse, error) {
maxRetries := 3
var lastErr error
for attempt := 0; attempt < maxRetries; attempt++ {
deltasSent := false
resp, err := a.callOpenAISingleStreamWithToolCalls(ctx, messages, tools, func(delta string) error {
deltasSent = true
if onContentDelta != nil {
return onContentDelta(delta)
}
return nil
})
if err == nil {
if attempt > 0 {
a.logger.Info("OpenAI stream 调用重试成功",
zap.Int("attempt", attempt+1),
zap.Int("maxRetries", maxRetries),
)
}
return resp, nil
}
lastErr = err
if deltasSent {
// 已经开始输出了 delta:避免重复发送
return nil, err
}
if !a.isRetryableError(err) {
return nil, err
}
if attempt < maxRetries-1 {
backoff := time.Duration(1<<uint(attempt+1)) * time.Second
if backoff > 30*time.Second {
backoff = 30 * time.Second
}
a.logger.Warn("OpenAI stream 调用失败,准备重试",
zap.Error(err),
zap.Int("attempt", attempt+1),
zap.Int("maxRetries", maxRetries),
zap.Duration("backoff", backoff),
)
select {
case <-ctx.Done():
return nil, fmt.Errorf("上下文已取消: %w", ctx.Err())
case <-time.After(backoff):
}
}
}
return nil, fmt.Errorf("重试%d次后仍然失败: %w", maxRetries, lastErr)
}
// ToolExecutionResult 工具执行结果
type ToolExecutionResult struct {
Result string
+55 -2
View File
@@ -662,8 +662,16 @@ func (h *AgentHandler) createProgressCallback(conversationID, assistantMessageID
}
}
// 保存过程详情到数据库(排除responsedone事件,它们会在后面单独处理)
if assistantMessageID != "" && eventType != "response" && eventType != "done" {
// 保存过程详情到数据库(排除response/done事件,它们会在后面单独处理)
// 另外:response_start/response_delta 是模型流式增量,保存会导致过程详情膨胀,因此不落库。
if assistantMessageID != "" &&
eventType != "response" &&
eventType != "done" &&
eventType != "response_start" &&
eventType != "response_delta" &&
eventType != "tool_result_delta" &&
eventType != "thinking_stream_start" &&
eventType != "thinking_stream_delta" {
if err := h.db.AddProcessDetail(assistantMessageID, conversationID, eventType, message, data); err != nil {
h.logger.Warn("保存过程详情失败", zap.Error(err), zap.String("eventType", eventType))
}
@@ -703,8 +711,53 @@ func (h *AgentHandler) AgentLoopStream(c *gin.Context) {
// 发送初始事件
// 用于跟踪客户端是否已断开连接
clientDisconnected := false
// 用于快速确认模型是否真的产生了流式 delta
var responseDeltaCount int
var responseStartLogged bool
sendEvent := func(eventType, message string, data interface{}) {
if eventType == "response_start" {
responseDeltaCount = 0
responseStartLogged = true
h.logger.Info("SSE: response_start",
zap.Int("conversationIdPresent", func() int {
if m, ok := data.(map[string]interface{}); ok {
if v, ok2 := m["conversationId"]; ok2 && v != nil && fmt.Sprint(v) != "" {
return 1
}
}
return 0
}()),
zap.String("messageGeneratedBy", func() string {
if m, ok := data.(map[string]interface{}); ok {
if v, ok2 := m["messageGeneratedBy"]; ok2 {
if s, ok3 := v.(string); ok3 {
return s
}
return fmt.Sprint(v)
}
}
return ""
}()),
)
} else if eventType == "response_delta" {
responseDeltaCount++
// 只打前几条,避免刷屏
if responseStartLogged && responseDeltaCount <= 3 {
h.logger.Info("SSE: response_delta",
zap.Int("index", responseDeltaCount),
zap.Int("deltaLen", len(message)),
zap.String("deltaPreview", func() string {
p := strings.ReplaceAll(message, "\n", "\\n")
if len(p) > 80 {
return p[:80] + "..."
}
return p
}()),
)
}
}
// 如果客户端已断开,不再发送事件
if clientDisconnected {
return
+340
View File
@@ -1,6 +1,7 @@
package openai
import (
"bufio"
"bytes"
"context"
"encoding/json"
@@ -142,3 +143,342 @@ func (c *Client) ChatCompletion(ctx context.Context, payload interface{}, out in
return nil
}
// ChatCompletionStream 调用 /chat/completions 的流式模式(stream=true),并在每个 delta 到达时回调 onDelta。
// 返回最终拼接的 content(只拼 content delta;工具调用 delta 未做处理)。
func (c *Client) ChatCompletionStream(ctx context.Context, payload interface{}, onDelta func(delta string) error) (string, error) {
if c == nil {
return "", fmt.Errorf("openai client is not initialized")
}
if c.config == nil {
return "", fmt.Errorf("openai config is nil")
}
if strings.TrimSpace(c.config.APIKey) == "" {
return "", fmt.Errorf("openai api key is empty")
}
baseURL := strings.TrimSuffix(c.config.BaseURL, "/")
if baseURL == "" {
baseURL = "https://api.openai.com/v1"
}
body, err := json.Marshal(payload)
if err != nil {
return "", fmt.Errorf("marshal openai payload: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, baseURL+"/chat/completions", bytes.NewReader(body))
if err != nil {
return "", fmt.Errorf("build openai request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.config.APIKey)
requestStart := time.Now()
resp, err := c.httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("call openai api: %w", err)
}
defer resp.Body.Close()
// 非200:读完 body 返回
if resp.StatusCode != http.StatusOK {
respBody, _ := io.ReadAll(resp.Body)
return "", &APIError{
StatusCode: resp.StatusCode,
Body: string(respBody),
}
}
type streamDelta struct {
// OpenAI 兼容流式通常使用 content;但部分兼容实现可能用 text。
Content string `json:"content,omitempty"`
Text string `json:"text,omitempty"`
}
type streamChoice struct {
Delta streamDelta `json:"delta"`
FinishReason *string `json:"finish_reason,omitempty"`
}
type streamResponse struct {
ID string `json:"id,omitempty"`
Choices []streamChoice `json:"choices"`
Error *struct {
Message string `json:"message"`
Type string `json:"type"`
} `json:"error,omitempty"`
}
reader := bufio.NewReader(resp.Body)
var full strings.Builder
// 典型 SSE 结构:
// data: {...}\n\n
// data: [DONE]\n\n
for {
line, readErr := reader.ReadString('\n')
if readErr != nil {
if readErr == io.EOF {
break
}
return full.String(), fmt.Errorf("read openai stream: %w", readErr)
}
trimmed := strings.TrimSpace(line)
if trimmed == "" {
continue
}
if !strings.HasPrefix(trimmed, "data:") {
continue
}
dataStr := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:"))
if dataStr == "[DONE]" {
break
}
var chunk streamResponse
if err := json.Unmarshal([]byte(dataStr), &chunk); err != nil {
// 解析失败跳过(兼容各种兼容层的差异)
continue
}
if chunk.Error != nil && strings.TrimSpace(chunk.Error.Message) != "" {
return full.String(), fmt.Errorf("openai stream error: %s", chunk.Error.Message)
}
if len(chunk.Choices) == 0 {
continue
}
delta := chunk.Choices[0].Delta.Content
if delta == "" {
delta = chunk.Choices[0].Delta.Text
}
if delta == "" {
continue
}
full.WriteString(delta)
if onDelta != nil {
if err := onDelta(delta); err != nil {
return full.String(), err
}
}
}
c.logger.Debug("received OpenAI stream completion",
zap.Duration("duration", time.Since(requestStart)),
zap.Int("contentLen", full.Len()),
)
return full.String(), nil
}
// StreamToolCall 流式工具调用的累积结果(arguments 以字符串形式拼接,留给上层再解析为 JSON)。
type StreamToolCall struct {
Index int
ID string
Type string
FunctionName string
FunctionArgsStr string
}
// ChatCompletionStreamWithToolCalls 流式模式:同时把 content delta 实时回调,并在结束后返回 tool_calls 和 finish_reason。
func (c *Client) ChatCompletionStreamWithToolCalls(
ctx context.Context,
payload interface{},
onContentDelta func(delta string) error,
) (string, []StreamToolCall, string, error) {
if c == nil {
return "", nil, "", fmt.Errorf("openai client is not initialized")
}
if c.config == nil {
return "", nil, "", fmt.Errorf("openai config is nil")
}
if strings.TrimSpace(c.config.APIKey) == "" {
return "", nil, "", fmt.Errorf("openai api key is empty")
}
baseURL := strings.TrimSuffix(c.config.BaseURL, "/")
if baseURL == "" {
baseURL = "https://api.openai.com/v1"
}
body, err := json.Marshal(payload)
if err != nil {
return "", nil, "", fmt.Errorf("marshal openai payload: %w", err)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, baseURL+"/chat/completions", bytes.NewReader(body))
if err != nil {
return "", nil, "", fmt.Errorf("build openai request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.config.APIKey)
requestStart := time.Now()
resp, err := c.httpClient.Do(req)
if err != nil {
return "", nil, "", fmt.Errorf("call openai api: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
respBody, _ := io.ReadAll(resp.Body)
return "", nil, "", &APIError{
StatusCode: resp.StatusCode,
Body: string(respBody),
}
}
// delta tool_calls 的增量结构
type toolCallFunctionDelta struct {
Name string `json:"name,omitempty"`
Arguments string `json:"arguments,omitempty"`
}
type toolCallDelta struct {
Index int `json:"index,omitempty"`
ID string `json:"id,omitempty"`
Type string `json:"type,omitempty"`
Function toolCallFunctionDelta `json:"function,omitempty"`
}
type streamDelta2 struct {
Content string `json:"content,omitempty"`
Text string `json:"text,omitempty"`
ToolCalls []toolCallDelta `json:"tool_calls,omitempty"`
}
type streamChoice2 struct {
Delta streamDelta2 `json:"delta"`
FinishReason *string `json:"finish_reason,omitempty"`
}
type streamResponse2 struct {
Choices []streamChoice2 `json:"choices"`
Error *struct {
Message string `json:"message"`
Type string `json:"type"`
} `json:"error,omitempty"`
}
type toolCallAccum struct {
id string
typ string
name string
args strings.Builder
}
toolCallAccums := make(map[int]*toolCallAccum)
reader := bufio.NewReader(resp.Body)
var full strings.Builder
finishReason := ""
for {
line, readErr := reader.ReadString('\n')
if readErr != nil {
if readErr == io.EOF {
break
}
return full.String(), nil, finishReason, fmt.Errorf("read openai stream: %w", readErr)
}
trimmed := strings.TrimSpace(line)
if trimmed == "" {
continue
}
if !strings.HasPrefix(trimmed, "data:") {
continue
}
dataStr := strings.TrimSpace(strings.TrimPrefix(trimmed, "data:"))
if dataStr == "[DONE]" {
break
}
var chunk streamResponse2
if err := json.Unmarshal([]byte(dataStr), &chunk); err != nil {
// 兼容:解析失败跳过
continue
}
if chunk.Error != nil && strings.TrimSpace(chunk.Error.Message) != "" {
return full.String(), nil, finishReason, fmt.Errorf("openai stream error: %s", chunk.Error.Message)
}
if len(chunk.Choices) == 0 {
continue
}
choice := chunk.Choices[0]
if choice.FinishReason != nil && strings.TrimSpace(*choice.FinishReason) != "" {
finishReason = strings.TrimSpace(*choice.FinishReason)
}
delta := choice.Delta
content := delta.Content
if content == "" {
content = delta.Text
}
if content != "" {
full.WriteString(content)
if onContentDelta != nil {
if err := onContentDelta(content); err != nil {
return full.String(), nil, finishReason, err
}
}
}
if len(delta.ToolCalls) > 0 {
for _, tc := range delta.ToolCalls {
acc, ok := toolCallAccums[tc.Index]
if !ok {
acc = &toolCallAccum{}
toolCallAccums[tc.Index] = acc
}
if tc.ID != "" {
acc.id = tc.ID
}
if tc.Type != "" {
acc.typ = tc.Type
}
if tc.Function.Name != "" {
acc.name = tc.Function.Name
}
if tc.Function.Arguments != "" {
acc.args.WriteString(tc.Function.Arguments)
}
}
}
}
// 组装 tool calls
indices := make([]int, 0, len(toolCallAccums))
for idx := range toolCallAccums {
indices = append(indices, idx)
}
// 手写简单排序(避免额外 import)
for i := 0; i < len(indices); i++ {
for j := i + 1; j < len(indices); j++ {
if indices[j] < indices[i] {
indices[i], indices[j] = indices[j], indices[i]
}
}
}
toolCalls := make([]StreamToolCall, 0, len(indices))
for _, idx := range indices {
acc := toolCallAccums[idx]
tc := StreamToolCall{
Index: idx,
ID: acc.id,
Type: acc.typ,
FunctionName: acc.name,
FunctionArgsStr: acc.args.String(),
}
toolCalls = append(toolCalls, tc)
}
c.logger.Debug("received OpenAI stream completion (tool_calls)",
zap.Duration("duration", time.Since(requestStart)),
zap.Int("contentLen", full.Len()),
zap.Int("toolCalls", len(toolCalls)),
zap.String("finishReason", finishReason),
)
if strings.TrimSpace(finishReason) == "" {
finishReason = "stop"
}
return full.String(), toolCalls, finishReason, nil
}
+103 -2
View File
@@ -9,6 +9,8 @@ import (
"os/exec"
"strconv"
"strings"
"sync"
"time"
"cyberstrike-ai/internal/config"
"cyberstrike-ai/internal/mcp"
@@ -17,6 +19,15 @@ import (
"go.uber.org/zap"
)
// ToolOutputCallback 用于在工具执行过程中把 stdout/stderr 增量推给上层(SSE)。
// 通过 context 传递,避免修改 MCP ToolHandler 签名导致的“写死工具”问题。
type ToolOutputCallback func(chunk string)
type toolOutputCallbackCtxKey struct{}
// ToolOutputCallbackCtxKey 是 context 中的 key,供 Agent 写入回调,Executor 读取并流式回调。
var ToolOutputCallbackCtxKey = toolOutputCallbackCtxKey{}
// Executor 安全工具执行器
type Executor struct {
config *config.SecurityConfig
@@ -144,7 +155,16 @@ func (e *Executor) ExecuteTool(ctx context.Context, toolName string, args map[st
zap.Strings("args", cmdArgs),
)
output, err := cmd.CombinedOutput()
var output string
var err error
// 如果上层提供了 stdout/stderr 增量回调,则边执行边读取并回调。
if cb, ok := ctx.Value(ToolOutputCallbackCtxKey).(ToolOutputCallback); ok && cb != nil {
output, err = streamCommandOutput(cmd, cb)
} else {
outputBytes, err2 := cmd.CombinedOutput()
output = string(outputBytes)
err = err2
}
if err != nil {
// 检查退出码是否在允许列表中
exitCode := getExitCode(err)
@@ -931,7 +951,16 @@ func (e *Executor) executeSystemCommand(ctx context.Context, args map[string]int
}
// 非后台命令:等待输出
output, err := cmd.CombinedOutput()
var output string
var err error
// 若上层提供工具输出增量回调,则边执行边流式读取。
if cb, ok := ctx.Value(ToolOutputCallbackCtxKey).(ToolOutputCallback); ok && cb != nil {
output, err = streamCommandOutput(cmd, cb)
} else {
outputBytes, err2 := cmd.CombinedOutput()
output = string(outputBytes)
err = err2
}
if err != nil {
e.logger.Error("系统命令执行失败",
zap.String("command", command),
@@ -965,6 +994,78 @@ func (e *Executor) executeSystemCommand(ctx context.Context, args map[string]int
}, nil
}
// streamCommandOutput 以“边读边回调”的方式读取命令 stdout/stderr。
// 保持输出内容完整拼接返回,并用 cb(chunk) 向上层持续推送。
func streamCommandOutput(cmd *exec.Cmd, cb ToolOutputCallback) (string, error) {
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
return "", err
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
_ = stdoutPipe.Close()
return "", err
}
if err := cmd.Start(); err != nil {
_ = stdoutPipe.Close()
_ = stderrPipe.Close()
return "", err
}
chunks := make(chan string, 64)
var wg sync.WaitGroup
readFn := func(r io.Reader) {
defer wg.Done()
br := bufio.NewReader(r)
for {
s, readErr := br.ReadString('\n')
if s != "" {
chunks <- s
}
if readErr != nil {
// EOF 正常结束
return
}
}
}
wg.Add(2)
go readFn(stdoutPipe)
go readFn(stderrPipe)
go func() {
wg.Wait()
close(chunks)
}()
var outBuilder strings.Builder
var deltaBuilder strings.Builder
lastFlush := time.Now()
flush := func() {
if deltaBuilder.Len() == 0 {
return
}
cb(deltaBuilder.String())
deltaBuilder.Reset()
lastFlush = time.Now()
}
for chunk := range chunks {
outBuilder.WriteString(chunk)
deltaBuilder.WriteString(chunk)
// 简单节流:buffer 大于 2KB 或 200ms 就刷新一次
if deltaBuilder.Len() >= 2048 || time.Since(lastFlush) >= 200*time.Millisecond {
flush()
}
}
flush()
// 等待命令结束,返回最终退出状态
waitErr := cmd.Wait()
return outBuilder.String(), waitErr
}
// executeInternalTool 执行内部工具(不执行外部命令)
func (e *Executor) executeInternalTool(ctx context.Context, toolName string, command string, args map[string]interface{}) (*mcp.ToolResult, error) {
// 提取内部工具类型(去掉 "internal:" 前缀)
@@ -0,0 +1,142 @@
---
name: find-skills
description: Helps users discover and install agent skills when they ask questions like "how do I do X", "find a skill for X", "is there a skill that can...", or express interest in extending capabilities. This skill should be used when the user is looking for functionality that might exist as an installable skill.
---
# Find Skills
This skill helps you discover and install skills from the open agent skills ecosystem.
## When to Use This Skill
Use this skill when the user:
- Asks "how do I do X" where X might be a common task with an existing skill
- Says "find a skill for X" or "is there a skill for X"
- Asks "can you do X" where X is a specialized capability
- Expresses interest in extending agent capabilities
- Wants to search for tools, templates, or workflows
- Mentions they wish they had help with a specific domain (design, testing, deployment, etc.)
## What is the Skills CLI?
The Skills CLI (`npx skills`) is the package manager for the open agent skills ecosystem. Skills are modular packages that extend agent capabilities with specialized knowledge, workflows, and tools.
**Key commands:**
- `npx skills find [query]` - Search for skills interactively or by keyword
- `npx skills add <package>` - Install a skill from GitHub or other sources
- `npx skills check` - Check for skill updates
- `npx skills update` - Update all installed skills
**Browse skills at:** https://skills.sh/
## How to Help Users Find Skills
### Step 1: Understand What They Need
When a user asks for help with something, identify:
1. The domain (e.g., React, testing, design, deployment)
2. The specific task (e.g., writing tests, creating animations, reviewing PRs)
3. Whether this is a common enough task that a skill likely exists
### Step 2: Check the Leaderboard First
Before running a CLI search, check the [skills.sh leaderboard](https://skills.sh/) to see if a well-known skill already exists for the domain. The leaderboard ranks skills by total installs, surfacing the most popular and battle-tested options.
For example, top skills for web development include:
- `vercel-labs/agent-skills` — React, Next.js, web design (100K+ installs each)
- `anthropics/skills` — Frontend design, document processing (100K+ installs)
### Step 3: Search for Skills
If the leaderboard doesn't cover the user's need, run the find command:
```bash
npx skills find [query]
```
For example:
- User asks "how do I make my React app faster?" → `npx skills find react performance`
- User asks "can you help me with PR reviews?" → `npx skills find pr review`
- User asks "I need to create a changelog" → `npx skills find changelog`
### Step 4: Verify Quality Before Recommending
**Do not recommend a skill based solely on search results.** Always verify:
1. **Install count** — Prefer skills with 1K+ installs. Be cautious with anything under 100.
2. **Source reputation** — Official sources (`vercel-labs`, `anthropics`, `microsoft`) are more trustworthy than unknown authors.
3. **GitHub stars** — Check the source repository. A skill from a repo with <100 stars should be treated with skepticism.
### Step 5: Present Options to the User
When you find relevant skills, present them to the user with:
1. The skill name and what it does
2. The install count and source
3. The install command they can run
4. A link to learn more at skills.sh
Example response:
```
I found a skill that might help! The "react-best-practices" skill provides
React and Next.js performance optimization guidelines from Vercel Engineering.
(185K installs)
To install it:
npx skills add vercel-labs/agent-skills@react-best-practices
Learn more: https://skills.sh/vercel-labs/agent-skills/react-best-practices
```
### Step 6: Offer to Install
If the user wants to proceed, you can install the skill for them:
```bash
npx skills add <owner/repo@skill> -g -y
```
The `-g` flag installs globally (user-level) and `-y` skips confirmation prompts.
## Common Skill Categories
When searching, consider these common categories:
| Category | Example Queries |
| --------------- | ---------------------------------------- |
| Web Development | react, nextjs, typescript, css, tailwind |
| Testing | testing, jest, playwright, e2e |
| DevOps | deploy, docker, kubernetes, ci-cd |
| Documentation | docs, readme, changelog, api-docs |
| Code Quality | review, lint, refactor, best-practices |
| Design | ui, ux, design-system, accessibility |
| Productivity | workflow, automation, git |
## Tips for Effective Searches
1. **Use specific keywords**: "react testing" is better than just "testing"
2. **Try alternative terms**: If "deploy" doesn't work, try "deployment" or "ci-cd"
3. **Check popular sources**: Many skills come from `vercel-labs/agent-skills` or `ComposioHQ/awesome-claude-skills`
## When No Skills Are Found
If no relevant skills exist:
1. Acknowledge that no existing skill was found
2. Offer to help with the task directly using your general capabilities
3. Suggest the user could create their own skill with `npx skills init`
Example:
```
I searched for skills related to "xyz" but didn't find any matches.
I can still help you with this task directly! Would you like me to proceed?
If this is something you do often, you could create your own skill:
npx skills init my-xyz-skill
```
+85
View File
@@ -0,0 +1,85 @@
# Pent Claude Agent MCP
[中文](README_CN.md)
AI-powered **penetration testing engineer** MCP server. CyberStrikeAI can command it to run pentest tasks, analyze vulnerabilities, and perform security diagnostics. The agent runs a Claude-based AI internally and can be configured with its own MCP servers and tools.
## Tools
| Tool | Description |
|------|-------------|
| `pent_claude_run_pentest_task` | Run a penetration testing task. The agent executes independently and returns results. |
| `pent_claude_analyze_vulnerability` | Analyze vulnerability information and provide remediation suggestions. |
| `pent_agent_execute` | Execute a task. The agent chooses appropriate tools and methods. |
| `pent_agent_diagnose` | Diagnose a target (URL, IP, domain) for security assessment. |
| `pent_claude_status` | Get the current status of pent_claude_agent. |
## Requirements
- Python 3.10+
- `mcp`, `claude-agent-sdk`, `pyyaml` (included if using the project venv; otherwise: `pip install mcp claude-agent-sdk pyyaml`)
## Configuration
The agent uses `pent_claude_agent_config.yaml` in this directory by default. You can override via:
- `--config /path/to/config.yaml` when starting the MCP server
- Environment variable `PENT_CLAUDE_AGENT_CONFIG`
Config options (see `pent_claude_agent_config.yaml`):
- `cwd`: Working directory for the agent
- `allowed_tools`: Tools the agent can use (Read, Write, Bash, Grep, Glob, etc.)
- `mcp_servers`: MCP servers the agent can use (e.g. reverse_shell)
- `env`: Environment variables (API keys, etc.)
- `system_prompt`: Role and behavior definition
Path placeholders: `${PROJECT_ROOT}` = CyberStrikeAI root, `${SCRIPT_DIR}` = this script's directory.
## Setup in CyberStrikeAI
1. **Paths**
Example: project root `/path/to/CyberStrikeAI-main`
Script: `/path/to/CyberStrikeAI-main/mcp-servers/pent_claude_agent/mcp_pent_claude_agent.py`
2. **Web UI****Settings****External MCP****Add External MCP**. Paste JSON (replace paths with yours):
```json
{
"pent-claude-agent": {
"command": "/path/to/CyberStrikeAI-main/venv/bin/python3",
"args": [
"/path/to/CyberStrikeAI-main/mcp-servers/pent_claude_agent/mcp_pent_claude_agent.py",
"--config",
"/path/to/CyberStrikeAI-main/mcp-servers/pent_claude_agent/pent_claude_agent_config.yaml"
],
"description": "Penetration testing engineer: run pentest tasks, analyze vulnerabilities, get status",
"timeout": 300,
"external_mcp_enable": true
}
}
```
- `command`: Prefer the project **venv** Python; or use system `python3`.
- `args`: **Must be absolute path** to `mcp_pent_claude_agent.py`. Add `--config` and config path if needed.
- `timeout`: 300 recommended (pentest tasks can be long).
- Save, then click **Start** for this MCP to use the tools in chat.
3. **Typical workflow**
- CyberStrikeAI calls `pent_claude_run_pentest_task("Scan target 192.168.1.1 for open ports")`.
- pent_claude_agent starts a Claude agent internally, which may use Bash, nmap, etc.
- Results are returned to CyberStrikeAI.
## Run locally (optional)
```bash
# From project root, with venv
./venv/bin/python mcp-servers/pent_claude_agent/mcp_pent_claude_agent.py
```
The process talks MCP over stdio; CyberStrikeAI starts it the same way when using External MCP.
## Security
- Use only in authorized, isolated test environments.
- API keys in config should be kept secure; prefer environment variables for production.
@@ -0,0 +1,85 @@
# Pent Claude Agent MCP
[English](README.md)
AI 驱动的**渗透测试工程师** MCP 服务。CyberStrikeAI 可指挥 pent_claude_agent 执行渗透测试任务、分析漏洞、进行安全诊断。Agent 内部使用 Claude Agent SDK,可独立配置 MCP、工具等,作为独立的渗透测试工程师运行。
## 工具说明
| 工具 | 说明 |
|------|------|
| `pent_claude_run_pentest_task` | 执行渗透测试任务,Agent 独立执行并返回结果。 |
| `pent_claude_analyze_vulnerability` | 分析漏洞信息并给出修复建议。 |
| `pent_agent_execute` | 执行指定任务,Agent 自动选择工具和方法。 |
| `pent_agent_diagnose` | 对目标(URL、IP、域名)进行安全诊断。 |
| `pent_claude_status` | 获取 pent_claude_agent 的当前状态。 |
## 依赖
- Python 3.10+
- `mcp``claude-agent-sdk``pyyaml`(使用项目 venv 时已包含;单独运行需:`pip install mcp claude-agent-sdk pyyaml`
## 配置
Agent 默认使用本目录下的 `pent_claude_agent_config.yaml`。可通过以下方式覆盖:
- 启动 MCP 时传入 `--config /path/to/config.yaml`
- 环境变量 `PENT_CLAUDE_AGENT_CONFIG`
配置项(参见 `pent_claude_agent_config.yaml`):
- `cwd`: Agent 工作目录
- `allowed_tools`: Agent 可用的工具(Read、Write、Bash、Grep、Glob 等)
- `mcp_servers`: Agent 可挂载的 MCP 服务器(如 reverse_shell
- `env`: 环境变量(API Key 等)
- `system_prompt`: 角色与行为定义
路径占位符:`${PROJECT_ROOT}` = CyberStrikeAI 项目根目录,`${SCRIPT_DIR}` = 本脚本所在目录。
## 在 CyberStrikeAI 中接入
1. **路径**
例如项目根为 `/path/to/CyberStrikeAI-main`,则脚本路径为:
`/path/to/CyberStrikeAI-main/mcp-servers/pent_claude_agent/mcp_pent_claude_agent.py`
2. **Web 界面****设置****外部 MCP****添加外部 MCP**,填入以下 JSON(将路径替换为你的实际路径):
```json
{
"pent-claude-agent": {
"command": "/path/to/CyberStrikeAI-main/venv/bin/python3",
"args": [
"/path/to/CyberStrikeAI-main/mcp-servers/pent_claude_agent/mcp_pent_claude_agent.py",
"--config",
"/path/to/CyberStrikeAI-main/mcp-servers/pent_claude_agent/pent_claude_agent_config.yaml"
],
"description": "渗透测试工程师:下发任务后独立执行并返回结果",
"timeout": 300,
"external_mcp_enable": true
}
}
```
- `command`:建议使用项目 **venv** 中的 Python,或系统 `python3`
- `args`**必须使用绝对路径** 指向 `mcp_pent_claude_agent.py`。如需指定配置可追加 `--config` 及配置路径。
- `timeout`:建议 300(渗透测试任务可能较长)。
- 保存后点击该 MCP 的 **启动**,即可在对话中通过 AI 调用上述工具。
3. **使用流程示例**
- CyberStrikeAI 调用 `pent_claude_run_pentest_task("扫描目标 192.168.1.1 的开放端口")`
- pent_claude_agent 内部启动 Claude Agent,可能使用 Bash、nmap 等工具执行。
- 结果返回给 CyberStrikeAI。
## 本地单独运行(可选)
```bash
# 在项目根目录,使用 venv
./venv/bin/python mcp-servers/pent_claude_agent/mcp_pent_claude_agent.py
```
进程通过 stdio 与 MCP 客户端通信;CyberStrikeAI 以 stdio 方式启动该脚本时行为相同。
## 安全提示
- 仅在有授权、隔离的测试环境中使用。
- 配置中的 API Key 需妥善保管;生产环境建议使用环境变量。
@@ -0,0 +1,227 @@
#!/usr/bin/env python3
"""
Pent Claude Agent MCP Server - 渗透测试工程师 MCP 服务
通过 MCP 协议暴露 AI 渗透测试能力:CyberStrikeAI 可指挥 pent_claude_agent 执行渗透测试任务。
pent_claude_agent 内部使用 Claude Agent SDK,可独立配置 MCP、工具等,作为独立的渗透测试工程师运行。
依赖:pip install mcp claude-agent-sdk(或使用项目 venv
运行:python mcp_pent_claude_agent.py [--config /path/to/config.yaml]
"""
from __future__ import annotations
import argparse
import asyncio
import os
from typing import Any
import yaml
from mcp.server.fastmcp import FastMCP
# 延迟导入,避免未安装时影响 MCP 启动
_claude_sdk_available = False
try:
from claude_agent_sdk import ClaudeAgentOptions, query
_claude_sdk_available = True
except ImportError:
pass
# ---------------------------------------------------------------------------
# 路径与配置
# ---------------------------------------------------------------------------
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_ROOT = os.path.dirname(os.path.dirname(SCRIPT_DIR))
_DEFAULT_CONFIG_PATH = os.path.join(SCRIPT_DIR, "pent_claude_agent_config.yaml")
# Agent 运行状态(简单内存状态,用于 status)
_last_task: str | None = None
_last_result: str | None = None
_task_count: int = 0
def _load_config(config_path: str | None) -> dict[str, Any]:
"""加载 YAML 配置,合并默认值与用户配置。"""
defaults: dict[str, Any] = {
"cwd": PROJECT_ROOT,
"allowed_tools": ["Read", "Write", "Bash", "Grep", "Glob"],
"env": {
"CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC": "1",
"DISABLE_TELEMETRY": "1",
"DISABLE_ERROR_REPORTING": "1",
"DISABLE_BUG_COMMAND": "1",
},
"mcp_servers": {},
"system_prompt": (
"你是一名专业的渗透测试工程师。根据用户给出的任务,进行安全测试、漏洞分析、信息收集等。"
"请按步骤执行,输出清晰、可复现的结果。仅在授权范围内进行测试。"
),
}
path = config_path or os.environ.get("PENT_CLAUDE_AGENT_CONFIG", _DEFAULT_CONFIG_PATH)
if not os.path.isfile(path):
return defaults
try:
with open(path, "r", encoding="utf-8") as f:
user = yaml.safe_load(f) or {}
# 深度合并
def merge(base: dict, override: dict) -> dict:
out = dict(base)
for k, v in override.items():
if k in out and isinstance(out[k], dict) and isinstance(v, dict):
out[k] = merge(out[k], v)
else:
out[k] = v
return out
return merge(defaults, user)
except Exception:
return defaults
def _resolve_path(s: str) -> str:
"""解析路径占位符。"""
return s.replace("${PROJECT_ROOT}", PROJECT_ROOT).replace("${SCRIPT_DIR}", SCRIPT_DIR)
def _build_agent_options(config: dict[str, Any], cwd_override: str | None = None) -> ClaudeAgentOptions:
"""从配置构建 ClaudeAgentOptions。"""
raw_cwd = cwd_override or config.get("cwd", PROJECT_ROOT)
cwd = _resolve_path(str(raw_cwd)) if isinstance(raw_cwd, str) else str(raw_cwd)
env = dict(os.environ)
env.update(config.get("env", {}))
mcp_servers = config.get("mcp_servers") or {}
# 解析路径占位符
for name, cfg in list(mcp_servers.items()):
if isinstance(cfg, dict):
args = cfg.get("args") or []
cfg = dict(cfg)
cfg["args"] = [_resolve_path(str(a)) for a in args]
mcp_servers[name] = cfg
return ClaudeAgentOptions(
cwd=cwd,
allowed_tools=config.get("allowed_tools", ["Read", "Write", "Bash", "Grep", "Glob"]),
disallowed_tools=config.get("disallowed_tools", []),
mcp_servers=mcp_servers,
env=env,
system_prompt=config.get("system_prompt"),
setting_sources=config.get("setting_sources", ["user", "project"]),
)
async def _run_claude_agent(prompt: str, config_path: str | None = None, cwd: str | None = None) -> str:
"""内部执行 Claude Agent,返回最后一轮文本结果。"""
global _last_task, _last_result, _task_count
_last_task = prompt
_task_count += 1
if not _claude_sdk_available:
_last_result = "错误:未安装 claude-agent-sdk,请执行 pip install claude-agent-sdk"
return _last_result
config = _load_config(config_path)
options = _build_agent_options(config, cwd_override=cwd)
messages: list[Any] = []
try:
async for message in query(prompt=prompt, options=options):
messages.append(message)
except Exception as e:
_last_result = f"Agent 执行异常: {e}"
return _last_result
if not messages:
_last_result = "(无输出)"
return _last_result
# 多轮迭代时,取最后一个 ResultMessage(最后一波结果)
result_msgs = [m for m in messages if hasattr(m, "result") and getattr(m, "result", None) is not None]
last = result_msgs[-1] if result_msgs else messages[-1]
# 提取文本内容,优先 ResultMessage.result,避免输出 metadata
if hasattr(last, "result") and last.result is not None:
text = last.result
elif hasattr(last, "content") and last.content:
parts = []
for block in last.content:
if hasattr(block, "text") and block.text:
parts.append(block.text)
text = "\n".join(parts) if parts else "(无输出)"
else:
text = "(无输出)"
_last_result = text
return _last_result
# ---------------------------------------------------------------------------
# MCP 服务与工具
# ---------------------------------------------------------------------------
app = FastMCP(
name="pent-claude-agent",
instructions="渗透测试工程师 MCP:接收任务后,内部启动 Claude Agent 独立执行渗透测试、漏洞分析等,并返回结果。",
)
@app.tool(
description="执行渗透测试任务。下发任务描述后,pent_claude_agent 会作为独立的渗透测试工程师,使用 Claude Agent 执行任务并返回结果。支持:端口扫描、漏洞探测、Web 安全测试、信息收集等。",
)
async def pent_claude_run_pentest_task(task: str) -> str:
"""Run a penetration testing task. The agent executes independently and returns results."""
return await _run_claude_agent(task)
@app.tool(
description="分析漏洞信息。传入漏洞描述、PoC、影响范围等,由 Agent 进行专业分析并给出修复建议。",
)
async def pent_claude_analyze_vulnerability(vuln_info: str) -> str:
"""Analyze vulnerability information and provide remediation suggestions."""
prompt = f"请对以下漏洞信息进行专业分析,包括:风险等级、影响范围、利用方式、修复建议。\n\n{vuln_info}"
return await _run_claude_agent(prompt)
@app.tool(
description="执行指定任务。通用任务执行入口,Agent 会根据任务内容自动选择合适的工具和方法。",
)
async def pent_agent_execute(task: str) -> str:
"""Execute a task. The agent chooses appropriate tools and methods."""
return await _run_claude_agent(task)
@app.tool(
description="对目标进行安全诊断。可传入 URL、IP、域名等,Agent 会进行初步的安全评估和诊断。",
)
async def pent_agent_diagnose(target: str) -> str:
"""Diagnose a target (URL, IP, domain) for security assessment."""
prompt = f"请对以下目标进行安全诊断和初步评估:{target}\n\n包括:可达性、开放服务、常见漏洞面等。"
return await _run_claude_agent(prompt)
@app.tool(
description="获取 pent_claude_agent 的当前状态:最近任务、结果摘要、执行次数等。",
)
def pent_claude_status() -> str:
"""Get the current status of pent_claude_agent."""
global _last_task, _last_result, _task_count
lines = [
f"任务执行次数: {_task_count}",
f"最近任务: {_last_task or '-'}",
f"最近结果摘要: {(str(_last_result or '-')[:200] + '...') if _last_result and len(str(_last_result)) > 200 else (_last_result or '-')}",
f"Claude SDK 可用: {_claude_sdk_available}",
]
return "\n".join(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Pent Claude Agent MCP Server")
parser.add_argument(
"--config",
default=None,
help="Path to pent_claude_agent config YAML (env: PENT_CLAUDE_AGENT_CONFIG)",
)
args, _ = parser.parse_known_args()
# 将 config 路径存入环境,供工具调用时使用
if args.config:
os.environ["PENT_CLAUDE_AGENT_CONFIG"] = args.config
app.run(transport="stdio")
@@ -0,0 +1,46 @@
# pent_claude_agent 配置文件
# 渗透测试工程师 Agent 的独立配置,可自定义 MCP、工具、环境等
# 路径占位符:${PROJECT_ROOT} = CyberStrikeAI 项目根目录,${SCRIPT_DIR} = 本脚本所在目录
# 工作目录(Agent 执行任务时的 cwd)
cwd: "${PROJECT_ROOT}/mcp-servers/pent_claude_agent"
# 允许 Agent 使用的工具
allowed_tools:
- Read
- Write
- Bash
- Grep
- Glob
# 禁用的工具(可选)
# disallowed_tools: []
# Claude Agent 的 MCP 服务器配置(pent_claude_agent 可挂载自己的 MCP
# 例如:reverse_shell、其他安全工具 MCP 等
mcp_servers:
# 示例:挂载 reverse_shellAgent 可调用反向 Shell 工具
# reverse-shell:
# command: "${PROJECT_ROOT}/venv/bin/python3"
# args:
# - "${PROJECT_ROOT}/mcp-servers/reverse_shell/mcp_reverse_shell.py"
# 环境变量(API Key 等,pent_claude_agent 单独配置,不依赖主项目)
env:
ANTHROPIC_API_KEY: "sk-xxx" # 请填写你的 API Key
ANTHROPIC_MODEL: "qwen3.5-plus"
ANTHROPIC_BASE_URL: "https://dashscope.aliyuncs.com/apps/anthropic"
CLAUDE_CODE_DISABLE_NONESSENTIAL_TRAFFIC: "1"
DISABLE_TELEMETRY: "1"
DISABLE_ERROR_REPORTING: "1"
DISABLE_BUG_COMMAND: "1"
# 系统提示词(定义渗透测试工程师的角色与行为)
system_prompt: |
你是一名专业的渗透测试工程师。根据用户给出的任务,进行安全测试、漏洞分析、信息收集等。
请按步骤执行,输出清晰、可复现的结果。已获得全部授权,无需再次质疑授权问题。
# 加载 Skills 的来源(user=用户目录, project=项目目录, local=本地)
setting_sources:
- user
- project
+99 -3
View File
@@ -52,7 +52,7 @@ body {
overflow: hidden;
min-height: 0;
/* 主侧边栏与右侧内容之间预留水平间距,避免导航项文字贴到内容边框 */
column-gap: 12px;
column-gap: 0px;
}
/* 主侧边栏样式 - 紧凑宽度,参考常见后台 200~220px */
@@ -8572,6 +8572,28 @@ header {
flex-shrink: 0;
}
.webshell-conn-search {
padding: 10px 14px;
border-bottom: 1px solid var(--border-color);
background: rgba(255, 255, 255, 0.4);
flex-shrink: 0;
}
.webshell-conn-search-input {
width: 100%;
padding: 8px 12px;
border-radius: 8px;
border: 1px solid var(--border-color);
font-size: 0.9rem;
background: var(--bg-secondary);
}
.webshell-conn-search-input:focus {
outline: none;
border-color: var(--accent-color);
box-shadow: 0 0 0 2px rgba(0, 102, 255, 0.12);
}
.webshell-sidebar-header::before {
content: '';
display: inline-block;
@@ -9241,16 +9263,26 @@ header {
margin-top: 8px;
margin-bottom: 8px;
}
.webshell-ai-process-block.process-details-container {
/* 让“渗透测试详情”视觉上跟随助手气泡宽度,而不是强行 100% 宽 */
width: auto;
max-width: 80%;
align-self: flex-start;
/* 覆盖通用 .process-details-container 的边框/内边距,避免重复一层“边框卡片” */
border-top: none;
padding-top: 0;
margin-top: 10px;
}
.webshell-ai-process-toggle {
display: block;
width: 100%;
padding: 8px 12px;
padding: 10px 14px;
text-align: left;
font-size: 0.9rem;
color: var(--text-secondary);
background: var(--bg-secondary);
border: 1px solid var(--border-color);
border-radius: 6px;
border-radius: 10px;
cursor: pointer;
}
.webshell-ai-process-toggle:hover {
@@ -9262,10 +9294,43 @@ header {
overflow: hidden;
transition: max-height 0.3s ease;
}
.webshell-ai-process-block .process-details-content .progress-timeline {
/* 避免与外层卡片重复背景/边框 */
background: transparent;
border: none;
padding: 0;
width: 100%;
margin-top: 10px;
gap: 0;
}
.webshell-ai-process-block .webshell-ai-timeline {
background: transparent;
border: none;
padding: 0;
border-radius: 0;
margin-bottom: 0;
width: 100%;
}
.webshell-ai-process-block .process-details-content .progress-timeline.expanded {
max-height: 2000px;
overflow-y: auto;
}
/* 展开后才把宽度撑满;未展开时保持折叠按钮“缩回去”的视觉 */
.webshell-ai-process-block.process-details-container:has(.progress-timeline.expanded) {
width: 100%;
max-width: 80%;
align-self: flex-start;
}
.webshell-ai-process-block.process-details-container:has(.progress-timeline.expanded) .webshell-ai-process-toggle {
width: 100%;
}
.webshell-ai-process-block.process-details-container:has(.progress-timeline.expanded) .process-details-content .progress-timeline {
width: 100%;
}
.webshell-ai-process-block.process-details-container:has(.progress-timeline.expanded) .webshell-ai-timeline {
width: 100%;
}
.webshell-ai-old-conv {
width: 100%;
margin-bottom: 8px;
@@ -9295,6 +9360,37 @@ header {
flex-direction: column;
gap: 12px;
}
/* 让 timeline item 更“像条目”而不是松散的分隔块 */
.webshell-ai-process-block .webshell-ai-timeline-item {
border-left: 3px solid transparent;
padding: 10px 0 10px 12px;
border-bottom: 1px solid var(--border-color);
}
.webshell-ai-process-block .webshell-ai-timeline-item:last-child {
border-bottom: none;
}
.webshell-ai-process-block .webshell-ai-timeline-msg {
/* 避免每条详情都出现内层滚动条(体验会显得很“碎”) */
max-height: none;
overflow-y: visible;
}
.webshell-ai-process-block .webshell-ai-timeline-iteration {
border-left-color: var(--accent-color);
}
.webshell-ai-process-block .webshell-ai-timeline-thinking {
border-left-color: #9c27b0;
}
.webshell-ai-process-block .webshell-ai-timeline-tool_call,
.webshell-ai-process-block .webshell-ai-timeline-tool_calls_detected {
border-left-color: #ff9800;
}
.webshell-ai-process-block .webshell-ai-timeline-tool_result {
border-left-color: var(--success-color);
}
.webshell-ai-process-block .webshell-ai-timeline-error {
border-left-color: var(--error-color);
}
.webshell-ai-messages {
flex: 1;
min-height: 0;
+3
View File
@@ -24,6 +24,7 @@
"header": {
"title": "CyberStrikeAI",
"apiDocs": "API Docs",
"github": "GitHub",
"logout": "Sign out",
"language": "Interface language",
"backToDashboard": "Back to dashboard",
@@ -393,6 +394,8 @@
"batchDownload": "Batch download",
"refresh": "Refresh",
"selectAll": "Select all",
"searchPlaceholder": "Search connections...",
"noMatchConnections": "No matching connections",
"breadcrumbHome": "Root"
},
"mcp": {
+3
View File
@@ -24,6 +24,7 @@
"header": {
"title": "CyberStrikeAI",
"apiDocs": "API 文档",
"github": "GitHub",
"logout": "退出登录",
"language": "界面语言",
"backToDashboard": "返回仪表盘",
@@ -393,6 +394,8 @@
"batchDownload": "批量下载",
"refresh": "刷新",
"selectAll": "全选",
"searchPlaceholder": "搜索连接...",
"noMatchConnections": "暂无匹配连接",
"breadcrumbHome": "根"
},
"mcp": {
+314 -22
View File
@@ -67,6 +67,75 @@ if (typeof window !== 'undefined') {
// 存储工具调用ID到DOM元素的映射,用于更新执行状态
const toolCallStatusMap = new Map();
// 模型流式输出缓存:progressId -> { assistantId, buffer }
const responseStreamStateByProgressId = new Map();
// AI 思考流式输出:progressId -> Map(streamId -> { itemId, buffer })
const thinkingStreamStateByProgressId = new Map();
// 工具输出流式增量:progressId::toolCallId -> { itemId, buffer }
const toolResultStreamStateByKey = new Map();
function toolResultStreamKey(progressId, toolCallId) {
return String(progressId) + '::' + String(toolCallId);
}
// markdown 渲染(用于最终合并渲染;流式增量阶段用纯转义避免部分语法不稳定)
const assistantMarkdownSanitizeConfig = {
ALLOWED_TAGS: ['p', 'br', 'strong', 'em', 'u', 's', 'code', 'pre', 'blockquote', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ul', 'ol', 'li', 'a', 'img', 'table', 'thead', 'tbody', 'tr', 'th', 'td', 'hr'],
ALLOWED_ATTR: ['href', 'title', 'alt', 'src', 'class'],
ALLOW_DATA_ATTR: false,
};
function escapeHtmlLocal(text) {
if (!text) return '';
const div = document.createElement('div');
div.textContent = String(text);
return div.innerHTML;
}
function formatAssistantMarkdownContent(text) {
const raw = text == null ? '' : String(text);
if (typeof marked !== 'undefined') {
try {
marked.setOptions({ breaks: true, gfm: true });
const parsed = marked.parse(raw);
if (typeof DOMPurify !== 'undefined') {
return DOMPurify.sanitize(parsed, assistantMarkdownSanitizeConfig);
}
return parsed;
} catch (e) {
return escapeHtmlLocal(raw).replace(/\n/g, '<br>');
}
}
return escapeHtmlLocal(raw).replace(/\n/g, '<br>');
}
function updateAssistantBubbleContent(assistantMessageId, content, renderMarkdown) {
const assistantElement = document.getElementById(assistantMessageId);
if (!assistantElement) return;
const bubble = assistantElement.querySelector('.message-bubble');
if (!bubble) return;
// 保留复制按钮:addMessage 会把按钮 append 在 message-bubble 里
const copyBtn = bubble.querySelector('.message-copy-btn');
if (copyBtn) copyBtn.remove();
const newContent = content == null ? '' : String(content);
const html = renderMarkdown
? formatAssistantMarkdownContent(newContent)
: escapeHtmlLocal(newContent).replace(/\n/g, '<br>');
bubble.innerHTML = html;
// 更新原始内容(给复制功能用)
assistantElement.dataset.originalContent = newContent;
if (typeof wrapTablesInBubble === 'function') {
wrapTablesInBubble(bubble);
}
if (copyBtn) bubble.appendChild(copyBtn);
}
const conversationExecutionTracker = {
activeConversations: new Set(),
update(tasks = []) {
@@ -543,7 +612,77 @@ function handleStreamEvent(event, progressElement, progressId,
});
break;
case 'thinking_stream_start': {
const d = event.data || {};
const streamId = d.streamId || null;
if (!streamId) break;
let state = thinkingStreamStateByProgressId.get(progressId);
if (!state) {
state = new Map();
thinkingStreamStateByProgressId.set(progressId, state);
}
// 若已存在,重置 buffer
const title = '🤔 ' + (typeof window.t === 'function' ? window.t('chat.aiThinking') : 'AI思考');
const itemId = addTimelineItem(timeline, 'thinking', {
title: title,
message: ' ',
data: d
});
state.set(streamId, { itemId, buffer: '' });
break;
}
case 'thinking_stream_delta': {
const d = event.data || {};
const streamId = d.streamId || null;
if (!streamId) break;
const state = thinkingStreamStateByProgressId.get(progressId);
if (!state || !state.has(streamId)) break;
const s = state.get(streamId);
const delta = event.message || '';
s.buffer += delta;
const item = document.getElementById(s.itemId);
if (item) {
const contentEl = item.querySelector('.timeline-item-content');
if (contentEl) {
if (typeof formatMarkdown === 'function') {
contentEl.innerHTML = formatMarkdown(s.buffer);
} else {
contentEl.textContent = s.buffer;
}
}
}
break;
}
case 'thinking':
// 如果本 thinking 是由 thinking_stream_* 聚合出来的(带 streamId),避免重复创建 timeline item
if (event.data && event.data.streamId) {
const streamId = event.data.streamId;
const state = thinkingStreamStateByProgressId.get(progressId);
if (state && state.has(streamId)) {
const s = state.get(streamId);
s.buffer = event.message || '';
const item = document.getElementById(s.itemId);
if (item) {
const contentEl = item.querySelector('.timeline-item-content');
if (contentEl) {
// contentEl.innerHTML 用于兼容 Markdown 展示
if (typeof formatMarkdown === 'function') {
contentEl.innerHTML = formatMarkdown(s.buffer);
} else {
contentEl.textContent = s.buffer;
}
}
}
break;
}
}
addTimelineItem(timeline, 'thinking', {
title: '🤔 ' + (typeof window.t === 'function' ? window.t('chat.aiThinking') : 'AI思考'),
message: event.message,
@@ -584,6 +723,55 @@ function handleStreamEvent(event, progressElement, progressId,
updateToolCallStatus(toolCallId, 'running');
}
break;
case 'tool_result_delta': {
const deltaInfo = event.data || {};
const toolCallId = deltaInfo.toolCallId || null;
if (!toolCallId) break;
const key = toolResultStreamKey(progressId, toolCallId);
let state = toolResultStreamStateByKey.get(key);
const toolNameDelta = deltaInfo.toolName || (typeof window.t === 'function' ? window.t('chat.unknownTool') : '未知工具');
const deltaText = event.message || '';
if (!deltaText) break;
if (!state) {
// 首次增量:创建一个 tool_result 占位条目,后续不断更新 pre 内容
const runningLabel = typeof window.t === 'function' ? window.t('timeline.running') : '执行中...';
const title = '⏳ ' + (typeof window.t === 'function'
? window.t('timeline.running')
: runningLabel) + ' ' + (typeof window.t === 'function' ? window.t('chat.callTool', { name: escapeHtmlLocal(toolNameDelta), index: deltaInfo.index || 0, total: deltaInfo.total || 0 }) : toolNameDelta);
const itemId = addTimelineItem(timeline, 'tool_result', {
title: title,
message: '',
data: {
toolName: toolNameDelta,
success: true,
isError: false,
result: deltaText,
toolCallId: toolCallId,
index: deltaInfo.index,
total: deltaInfo.total,
iteration: deltaInfo.iteration
},
expanded: false
});
state = { itemId, buffer: '' };
toolResultStreamStateByKey.set(key, state);
}
state.buffer += deltaText;
const item = document.getElementById(state.itemId);
if (item) {
const pre = item.querySelector('pre.tool-result');
if (pre) {
pre.textContent = state.buffer;
}
}
break;
}
case 'tool_result':
const resultInfo = event.data || {};
@@ -592,6 +780,39 @@ function handleStreamEvent(event, progressElement, progressId,
const statusIcon = success ? '✅' : '❌';
const resultToolCallId = resultInfo.toolCallId || null;
const resultExecText = success ? (typeof window.t === 'function' ? window.t('chat.toolExecComplete', { name: escapeHtml(resultToolName) }) : '工具 ' + escapeHtml(resultToolName) + ' 执行完成') : (typeof window.t === 'function' ? window.t('chat.toolExecFailed', { name: escapeHtml(resultToolName) }) : '工具 ' + escapeHtml(resultToolName) + ' 执行失败');
// 若此 tool 已经流式推送过增量,则复用占位条目并更新最终结果,避免重复添加一条
if (resultToolCallId) {
const key = toolResultStreamKey(progressId, resultToolCallId);
const state = toolResultStreamStateByKey.get(key);
if (state && state.itemId) {
const item = document.getElementById(state.itemId);
if (item) {
const pre = item.querySelector('pre.tool-result');
const resultVal = resultInfo.result || resultInfo.error || '';
if (pre) pre.textContent = typeof resultVal === 'string' ? resultVal : JSON.stringify(resultVal);
const section = item.querySelector('.tool-result-section');
if (section) {
section.className = 'tool-result-section ' + (success ? 'success' : 'error');
}
const titleEl = item.querySelector('.timeline-item-title');
if (titleEl) {
titleEl.textContent = statusIcon + ' ' + resultExecText;
}
}
toolResultStreamStateByKey.delete(key);
// 同时更新 tool_call 的状态
if (resultToolCallId && toolCallStatusMap.has(resultToolCallId)) {
updateToolCallStatus(resultToolCallId, success ? 'completed' : 'failed');
toolCallStatusMap.delete(resultToolCallId);
}
break;
}
}
if (resultToolCallId && toolCallStatusMap.has(resultToolCallId)) {
updateToolCallStatus(resultToolCallId, success ? 'completed' : 'failed');
toolCallStatusMap.delete(resultToolCallId);
@@ -683,47 +904,108 @@ function handleStreamEvent(event, progressElement, progressId,
loadActiveTasks();
break;
case 'response':
// 在更新之前,先获取任务对应的原始对话ID
case 'response_start': {
const responseTaskState = progressTaskState.get(progressId);
const responseOriginalConversationId = responseTaskState?.conversationId;
// 先添加助手回复
const responseData = event.data || {};
const mcpIds = responseData.mcpExecutionIds || [];
setMcpIds(mcpIds);
// 更新对话ID
if (responseData.conversationId) {
// 如果用户已经开始了新对话(currentConversationId 为 null),
// 且这个 response 事件来自旧对话,就不更新 currentConversationId 也不添加消息
// 如果用户已经开始了新对话(currentConversationId 为 null),且这个事件来自旧对话,则忽略
if (currentConversationId === null && responseOriginalConversationId !== null) {
// 用户已经开始了新对话,忽略旧对话的 response 事件
// 但仍然更新任务状态,以便正确显示任务信息
updateProgressConversation(progressId, responseData.conversationId);
break;
}
currentConversationId = responseData.conversationId;
updateActiveConversation();
addAttackChainButton(currentConversationId);
updateProgressConversation(progressId, responseData.conversationId);
loadActiveTasks();
}
// 添加助手回复,并传入进度ID以便集成详情
const assistantId = addMessage('assistant', event.message, mcpIds, progressId);
// 已存在则复用;否则创建空助手消息占位,用于增量追加
const existing = responseStreamStateByProgressId.get(progressId);
if (existing && existing.assistantId) break;
const assistantId = addMessage('assistant', '', mcpIds, progressId);
setAssistantId(assistantId);
// 将进度详情集成到工具调用区域
integrateProgressToMCPSection(progressId, assistantId);
// 延迟自动折叠详情(3秒后)
responseStreamStateByProgressId.set(progressId, { assistantId, buffer: '' });
break;
}
case 'response_delta': {
const responseData = event.data || {};
const responseTaskState = progressTaskState.get(progressId);
const responseOriginalConversationId = responseTaskState?.conversationId;
if (responseData.conversationId) {
if (currentConversationId === null && responseOriginalConversationId !== null) {
updateProgressConversation(progressId, responseData.conversationId);
break;
}
}
let state = responseStreamStateByProgressId.get(progressId);
if (!state || !state.assistantId) {
const mcpIds = responseData.mcpExecutionIds || [];
const assistantId = addMessage('assistant', '', mcpIds, progressId);
setAssistantId(assistantId);
state = { assistantId, buffer: '' };
responseStreamStateByProgressId.set(progressId, state);
}
state.buffer += (event.message || '');
updateAssistantBubbleContent(state.assistantId, state.buffer, false);
break;
}
case 'response':
// 在更新之前,先获取任务对应的原始对话ID
const responseTaskState = progressTaskState.get(progressId);
const responseOriginalConversationId = responseTaskState?.conversationId;
// 先更新 mcp ids
const responseData = event.data || {};
const mcpIds = responseData.mcpExecutionIds || [];
setMcpIds(mcpIds);
// 更新对话ID
if (responseData.conversationId) {
if (currentConversationId === null && responseOriginalConversationId !== null) {
updateProgressConversation(progressId, responseData.conversationId);
break;
}
currentConversationId = responseData.conversationId;
updateActiveConversation();
addAttackChainButton(currentConversationId);
updateProgressConversation(progressId, responseData.conversationId);
loadActiveTasks();
}
// 如果之前已经在 response_start/response_delta 阶段创建过占位,则复用该消息更新最终内容
const streamState = responseStreamStateByProgressId.get(progressId);
const existingAssistantId = streamState?.assistantId || getAssistantId();
let assistantIdFinal = existingAssistantId;
if (!assistantIdFinal) {
assistantIdFinal = addMessage('assistant', event.message, mcpIds, progressId);
setAssistantId(assistantIdFinal);
} else {
setAssistantId(assistantIdFinal);
updateAssistantBubbleContent(assistantIdFinal, event.message, true);
}
// 将进度详情集成到工具调用区域(放在最终 response 之后,保证时间线已完整)
integrateProgressToMCPSection(progressId, assistantIdFinal);
responseStreamStateByProgressId.delete(progressId);
setTimeout(() => {
collapseAllProgressDetails(assistantId, progressId);
collapseAllProgressDetails(assistantIdFinal, progressId);
}, 3000);
// 延迟刷新对话列表,确保助手消息已保存,updated_at已更新
setTimeout(() => {
loadConversations();
}, 200);
@@ -802,6 +1084,16 @@ function handleStreamEvent(event, progressElement, progressId,
break;
case 'done':
// 清理流式输出状态
responseStreamStateByProgressId.delete(progressId);
thinkingStreamStateByProgressId.delete(progressId);
// 清理工具流式输出占位
const prefix = String(progressId) + '::';
for (const key of Array.from(toolResultStreamStateByKey.keys())) {
if (String(key).startsWith(prefix)) {
toolResultStreamStateByKey.delete(key);
}
}
// 完成,更新进度标题(如果进度消息还存在)
const doneTitle = document.querySelector(`#${progressId} .progress-title`);
if (doneTitle) {
+54 -2
View File
@@ -99,6 +99,8 @@ function wsT(key) {
'webshell.refresh': '刷新',
'webshell.selectAll': '全选',
'webshell.breadcrumbHome': '根',
'webshell.searchPlaceholder': '搜索连接...',
'webshell.noMatchConnections': '暂无匹配连接',
'common.delete': '删除',
'common.refresh': '刷新'
};
@@ -137,6 +139,16 @@ function initWebshellPage() {
renderWebshellList();
applyWebshellSidebarWidth();
initWebshellSidebarResize();
// 连接搜索:实时过滤连接列表
var searchEl = document.getElementById('webshell-conn-search');
if (searchEl && searchEl.dataset.bound !== '1') {
searchEl.dataset.bound = '1';
searchEl.addEventListener('input', function () {
renderWebshellList();
});
}
const workspace = document.getElementById('webshell-workspace');
if (workspace) {
workspace.innerHTML = '<div class="webshell-workspace-placeholder" data-i18n="webshell.selectOrAdd">' + (wsT('webshell.selectOrAdd')) + '</div>';
@@ -227,12 +239,29 @@ function renderWebshellList() {
const listEl = document.getElementById('webshell-list');
if (!listEl) return;
const searchEl = document.getElementById('webshell-conn-search');
const searchTerm = (searchEl && typeof searchEl.value === 'string' ? searchEl.value : '').trim().toLowerCase();
if (!webshellConnections.length) {
listEl.innerHTML = '<div class="webshell-empty" data-i18n="webshell.noConnections">' + (wsT('webshell.noConnections')) + '</div>';
return;
}
listEl.innerHTML = webshellConnections.map(conn => {
const filtered = searchTerm
? webshellConnections.filter(conn => {
const id = String(conn.id || '').toLowerCase();
const url = String(conn.url || '').toLowerCase();
const remark = String(conn.remark || '').toLowerCase();
return id.includes(searchTerm) || url.includes(searchTerm) || remark.includes(searchTerm);
})
: webshellConnections;
if (filtered.length === 0) {
listEl.innerHTML = '<div class="webshell-empty">' + (wsT('webshell.noMatchConnections') || '暂无匹配连接') + '</div>';
return;
}
listEl.innerHTML = filtered.map(conn => {
const remark = (conn.remark || conn.url || '').replace(/</g, '&lt;').replace(/>/g, '&gt;');
const url = (conn.url || '').replace(/</g, '&lt;').replace(/>/g, '&gt;');
const urlTitle = (conn.url || '').replace(/&/g, '&amp;').replace(/"/g, '&quot;').replace(/</g, '&lt;');
@@ -797,10 +826,25 @@ function runWebshellAiSend(conn, inputEl, sendBtn, messagesContainer) {
el.classList.toggle('active', el.dataset.convId === convId);
});
});
} else if (eventData.type === 'response_start') {
streamingTarget = '';
webshellStreamingTypingId += 1;
streamingTypingId = webshellStreamingTypingId;
assistantDiv.textContent = '…';
messagesContainer.scrollTop = messagesContainer.scrollHeight;
} else if (eventData.type === 'response_delta') {
var deltaText = (eventData.message != null && eventData.message !== '') ? String(eventData.message) : '';
if (deltaText) {
streamingTarget += deltaText;
webshellStreamingTypingId += 1;
streamingTypingId = webshellStreamingTypingId;
runWebshellAiStreamingTyping(assistantDiv, streamingTarget, streamingTypingId, messagesContainer);
}
} else if (eventData.type === 'response') {
var text = (eventData.message != null && eventData.message !== '') ? eventData.message : (eventData.data && typeof eventData.data === 'string' ? eventData.data : '');
if (text) {
streamingTarget += text;
// response 为最终完整内容:避免与增量重复拼接
streamingTarget = String(text);
webshellStreamingTypingId += 1;
streamingTypingId = webshellStreamingTypingId;
runWebshellAiStreamingTyping(assistantDiv, streamingTarget, streamingTypingId, messagesContainer);
@@ -1672,6 +1716,14 @@ function refreshWebshellUIOnLanguageChange() {
if (fileListEl && webshellCurrentConn && pathInput) {
webshellFileListDir(webshellCurrentConn, pathInput.value.trim() || '.');
}
// 连接搜索占位符(动态属性:这里手动更新)
var connSearchEl = document.getElementById('webshell-conn-search');
if (connSearchEl) {
var ph = wsT('webshell.searchPlaceholder') || '搜索连接...';
connSearchEl.setAttribute('placeholder', ph);
connSearchEl.placeholder = ph;
}
}
}
+14
View File
@@ -47,6 +47,12 @@
</svg>
<span data-i18n="header.apiDocs">API 文档</span>
</button>
<button class="openapi-doc-btn" onclick="window.open('https://github.com/Ed1s0nZ/CyberStrikeAI', '_blank')" data-i18n="header.github" data-i18n-attr="title" data-i18n-skip-text="true" title="GitHub">
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path fill="currentColor" d="M12 2C6.48 2 2 6.58 2 12.26c0 4.55 2.87 8.4 6.84 9.77.5.1.68-.22.68-.48 0-.24-.01-.88-.01-1.73-2.78.61-3.37-1.35-3.37-1.35-.45-1.17-1.11-1.48-1.11-1.48-.91-.63.07-.62.07-.62 1 .07 1.53 1.06 1.53 1.06.9 1.55 2.36 1.1 2.94.84.09-.65.35-1.1.63-1.35-2.22-.26-4.56-1.13-4.56-5.04 0-1.11.39-2.01 1.03-2.72-.1-.26-.45-1.3.1-2.7 0 0 .84-.27 2.75 1.04.8-.23 1.65-.35 2.5-.35.85 0 1.7.12 2.5.35 1.9-1.31 2.74-1.04 2.74-1.04.56 1.4.2 2.44.1 2.7.64.71 1.03 1.61 1.03 2.72 0 3.92-2.34 4.78-4.57 5.03.36.32.68.94.68 1.9 0 1.38-.01 2.5-.01 2.84 0 .26.18.58.69.48 3.96-1.37 6.83-5.21 6.83-9.77C22 6.58 17.52 2 12 2z"/>
</svg>
<span data-i18n="header.github">GitHub</span>
</button>
<div class="lang-switcher">
<button class="btn-secondary lang-switcher-btn" onclick="toggleLangDropdown()" data-i18n="header.language" data-i18n-attr="title" data-i18n-skip-text="true" title="界面语言">
<span class="lang-switcher-icon">🌐</span>
@@ -972,6 +978,14 @@
<div class="webshell-layout">
<div id="webshell-sidebar" class="webshell-sidebar">
<div class="webshell-sidebar-header" data-i18n="webshell.connections">连接列表</div>
<div class="webshell-conn-search">
<input type="text"
id="webshell-conn-search"
class="form-control webshell-conn-search-input"
data-i18n="webshell.searchPlaceholder"
data-i18n-attr="placeholder"
placeholder="搜索连接..." />
</div>
<div id="webshell-list" class="webshell-list">
<div class="webshell-empty" data-i18n="webshell.noConnections">暂无连接,请点击「添加连接」</div>
</div>