diff --git a/frontend/app/web-gold/package.json b/frontend/app/web-gold/package.json
index 8472b31164..2081f3d782 100644
--- a/frontend/app/web-gold/package.json
+++ b/frontend/app/web-gold/package.json
@@ -16,9 +16,12 @@
"format": "prettier --write src/"
},
"dependencies": {
+ "@ai-sdk/anthropic": "^3.0.15",
+ "@ai-sdk/openai": "^3.0.12",
"@ant-design/icons-vue": "^7.0.1",
"@microsoft/fetch-event-source": "^2.0.1",
"@tailwindcss/vite": "^4.1.14",
+ "ai": "^6.0.39",
"ant-design-vue": "^4.2.6",
"dayjs": "^1.11.18",
"markdown-it": "^14.1.0",
diff --git a/frontend/app/web-gold/src/components/ChatMessageRendererV2.vue b/frontend/app/web-gold/src/components/ChatMessageRendererV2.vue
new file mode 100644
index 0000000000..614448ff93
--- /dev/null
+++ b/frontend/app/web-gold/src/components/ChatMessageRendererV2.vue
@@ -0,0 +1,142 @@
+
+
+
+
+
+
+
diff --git a/frontend/app/web-gold/src/services/ai-bridge/index.ts b/frontend/app/web-gold/src/services/ai-bridge/index.ts
new file mode 100644
index 0000000000..6da49594a6
--- /dev/null
+++ b/frontend/app/web-gold/src/services/ai-bridge/index.ts
@@ -0,0 +1,345 @@
+/**
+ * AI 桥接服务 - 主入口
+ * 提供统一的 AI 服务接口,基于现有系统架构
+ */
+
+// 导入核心类和类型
+export { StreamAdapter, createStreamAdapter, DEFAULT_STREAM_CONFIG } from './stream-adapter'
+export * from './type-definitions'
+
+// 导入现有聊天 API
+import { ChatMessageApi } from '@/api/chat'
+
+// 导入适配器类
+import { StreamAdapter } from './stream-adapter'
+
+// ========================================
+// 配置和常量
+// ========================================
+
+/**
+ * AI 服务配置
+ */
+const AI_SERVICE_CONFIG = {
+ // 默认配置
+ defaults: {
+ model: 'default',
+ temperature: 0.7,
+ maxTokens: 2000,
+ apiUrl: '/admin-api'
+ },
+
+ // 功能开关
+ features: {
+ useCustomAI: import.meta.env.VITE_USE_CUSTOM_AI === 'true',
+ useBridgeService: !!import.meta.env.VITE_AI_BRIDGE_URL,
+ enableStreaming: true
+ }
+}
+
+// ========================================
+// 核心服务类
+// ========================================
+
+/**
+ * AI 聊天服务类
+ * 基于现有系统,提供流式渲染优化
+ */
+export class AIChatService {
+ private streamAdapter: StreamAdapter
+ private performanceMonitor: PerformanceMonitor
+
+ constructor() {
+ this.streamAdapter = new StreamAdapter()
+ this.performanceMonitor = new PerformanceMonitor()
+ }
+
+ /**
+ * 发送流式聊天消息
+ */
+ async sendStreamMessage(options: {
+ conversationId?: string
+ content: string
+ roleId?: number
+ useContext?: boolean
+ useSearch?: boolean
+ attachmentUrls?: string[]
+ onUpdate?: (content: string) => void
+ onComplete?: (fullContent: string) => void
+ onError?: (error: Error) => void
+ }) {
+ const {
+ conversationId,
+ content,
+ roleId = 20,
+ useContext = true,
+ useSearch = false,
+ attachmentUrls = [],
+ onUpdate,
+ onComplete,
+ onError
+ } = options
+
+ try {
+ this.performanceMonitor.track('chat-start')
+
+ // 使用现有的流式 API
+ return await this.sendLegacyStreamMessage({
+ conversationId,
+ content,
+ roleId,
+ useContext,
+ useSearch,
+ attachmentUrls,
+ onUpdate,
+ onComplete,
+ onError
+ })
+ } catch (error) {
+ this.performanceMonitor.track('chat-error')
+ if (onError) {
+ onError(error as Error)
+ }
+ throw error
+ }
+ }
+
+ /**
+ * 使用现有系统发送流式消息
+ */
+ private async sendLegacyStreamMessage(options: {
+ conversationId?: string
+ content: string
+ roleId?: number
+ useContext?: boolean
+ useSearch?: boolean
+ attachmentUrls?: string[]
+ onUpdate?: (content: string) => void
+ onComplete?: (fullContent: string) => void
+ onError?: (error: Error) => void
+ }) {
+ const {
+ conversationId,
+ content,
+ roleId,
+ useContext,
+ useSearch,
+ attachmentUrls,
+ onUpdate,
+ onComplete,
+ onError
+ } = options
+
+ // 如果没有 conversationId,先创建一个
+ let currentConversationId = conversationId
+ if (!currentConversationId) {
+ const createResp = await ChatMessageApi.createChatConversationMy({
+ roleId: roleId || 20
+ })
+ currentConversationId = typeof createResp.data === 'object'
+ ? createResp.data.id
+ : createResp.data
+ }
+
+ if (!currentConversationId) {
+ throw new Error('创建对话失败')
+ }
+
+ // 使用现有的流式 API
+ return await ChatMessageApi.sendChatMessageStream({
+ conversationId: currentConversationId,
+ content,
+ useContext,
+ useSearch,
+ attachmentUrls,
+ onMessage: (event: any) => {
+ try {
+ const dataStr = event?.data || ''
+ if (!dataStr) return
+
+ const { code, data: responseData, msg } = JSON.parse(dataStr)
+ if (code !== 0) {
+ console.warn('[AI Service] 对话异常:', msg)
+ return
+ }
+
+ // 提取 AI 回复内容
+ const piece = responseData?.receive?.content || ''
+ if (piece && onUpdate) {
+ onUpdate(piece)
+ }
+ } catch (e) {
+ console.warn('[AI Service] 解析流数据异常:', e)
+ }
+ },
+ onError: (err: any) => {
+ console.error('[AI Service] 流式请求错误:', err)
+ if (onError) {
+ onError(err)
+ }
+ },
+ onClose: () => {
+ console.log('[AI Service] 流式请求完成')
+ if (onComplete) {
+ onComplete('完成')
+ }
+ }
+ })
+ }
+
+ /**
+ * 获取性能指标
+ */
+ getPerformanceMetrics() {
+ return this.performanceMonitor.getMetrics()
+ }
+
+ /**
+ * 重置服务状态
+ */
+ reset() {
+ this.streamAdapter.reset()
+ this.performanceMonitor.reset()
+ }
+}
+
+/**
+ * 性能监控类
+ */
+class PerformanceMonitor {
+ private metrics = {
+ chatCount: 0,
+ errorCount: 0,
+ totalLatency: 0,
+ averageLatency: 0,
+ startTime: Date.now()
+ }
+
+ track(event: string) {
+ if (event === 'chat-start') {
+ this.metrics.chatCount++
+ } else if (event === 'chat-error') {
+ this.metrics.errorCount++
+ }
+ }
+
+ trackLatency(latency: number) {
+ this.metrics.totalLatency += latency
+ this.metrics.averageLatency = this.metrics.totalLatency / this.metrics.chatCount
+ }
+
+ getMetrics() {
+ return {
+ ...this.metrics,
+ uptime: Date.now() - this.metrics.startTime,
+ errorRate: this.metrics.chatCount > 0
+ ? this.metrics.errorCount / this.metrics.chatCount
+ : 0
+ }
+ }
+
+ reset() {
+ this.metrics = {
+ chatCount: 0,
+ errorCount: 0,
+ totalLatency: 0,
+ averageLatency: 0,
+ startTime: Date.now()
+ }
+ }
+}
+
+// ========================================
+// 工具函数
+// ========================================
+
+/**
+ * 创建 AI 聊天服务实例
+ */
+export function createAIChatService(): AIChatService {
+ return new AIChatService()
+}
+
+/**
+ * 检查 AI 服务是否可用
+ */
+export function isAIServiceEnabled(): boolean {
+ return AI_SERVICE_CONFIG.features.enableStreaming
+}
+
+/**
+ * 获取 AI 服务状态
+ */
+export function getAIServiceStatus() {
+ return {
+ customAIEnabled: AI_SERVICE_CONFIG.features.useCustomAI,
+ bridgeServiceAvailable: AI_SERVICE_CONFIG.features.useBridgeService,
+ streamingEnabled: AI_SERVICE_CONFIG.features.enableStreaming,
+ defaultModel: AI_SERVICE_CONFIG.defaults.model,
+ uptime: Date.now() - (globalThis as any).__AI_SERVICE_START_TIME || Date.now()
+ }
+}
+
+// ========================================
+// Vue 组合式函数
+// ========================================
+
+/**
+ * Vue 组合式函数:使用 AI 聊天服务
+ */
+export function useAIChat() {
+ const service = createAIChatService()
+
+ return {
+ service,
+ isEnabled: isAIServiceEnabled(),
+ status: getAIServiceStatus()
+ }
+}
+
+/**
+ * Vue 组合式函数:流式消息处理
+ */
+export function useStreamMessage() {
+ const { service, isEnabled } = useAIChat()
+
+ const sendMessage = async (options: {
+ conversationId?: string
+ content: string
+ roleId?: number
+ onUpdate?: (content: string) => void
+ onComplete?: (fullContent: string) => void
+ onError?: (error: Error) => void
+ }) => {
+ return await service.sendStreamMessage(options)
+ }
+
+ return {
+ sendMessage,
+ isEnabled,
+ service
+ }
+}
+
+// ========================================
+// 全局实例
+// ========================================
+
+// 创建全局 AI 服务实例
+export const aiChatService = createAIChatService()
+
+// 设置启动时间
+;(globalThis as any).__AI_SERVICE_START_TIME = Date.now()
+
+// ========================================
+// 导出默认实例和工具函数
+// ========================================
+
+export default {
+ service: aiChatService,
+ createService: createAIChatService,
+ isEnabled: isAIServiceEnabled,
+ status: getAIServiceStatus,
+ useChat: useAIChat,
+ useStreamMessage
+}
diff --git a/frontend/app/web-gold/src/services/ai-bridge/stream-adapter.ts b/frontend/app/web-gold/src/services/ai-bridge/stream-adapter.ts
new file mode 100644
index 0000000000..1903f7e370
--- /dev/null
+++ b/frontend/app/web-gold/src/services/ai-bridge/stream-adapter.ts
@@ -0,0 +1,285 @@
+/**
+ * AI 桥接服务 - 流式适配器
+ * 负责 SSE 到 AI SDK 协议转换,保持与现有系统的兼容性
+ */
+
+import type { ReadableStream } from 'stream/web'
+
+// SSE 事件类型
+interface SSEEvent {
+ data: string
+ event?: string
+ id?: string
+ retry?: number
+}
+
+// 流式数据处理配置
+interface StreamAdapterConfig {
+ enableVisibilityOptimization?: boolean
+ enablePerformanceTracking?: boolean
+ bufferSize?: number
+}
+
+/**
+ * 流式适配器类
+ * 将 SSE 响应转换为 AI SDK 兼容的格式
+ */
+export class StreamAdapter {
+ private buffer: string = ''
+ private isVisible: boolean = true
+ private config: StreamAdapterConfig
+ private performanceMetrics: {
+ chunkCount: number
+ totalBytes: number
+ startTime: number
+ }
+
+ constructor(config: StreamAdapterConfig = {}) {
+ this.config = {
+ enableVisibilityOptimization: true,
+ enablePerformanceTracking: true,
+ bufferSize: 1024,
+ ...config
+ }
+ this.performanceMetrics = {
+ chunkCount: 0,
+ totalBytes: 0,
+ startTime: Date.now()
+ }
+
+ // 监听页面可见性变化
+ if (this.config.enableVisibilityOptimization) {
+ this.setupVisibilityListener()
+ }
+ }
+
+ /**
+ * 设置页面可见性监听器
+ */
+ private setupVisibilityListener(): void {
+ const handleVisibilityChange = () => {
+ const wasVisible = this.isVisible
+ this.isVisible = !document.hidden
+
+ if (!wasVisible && this.isVisible) {
+ console.log('[AI Bridge] 页面重新可见,重新连接流')
+ } else if (wasVisible && !this.isVisible) {
+ console.log('[AI Bridge] 页面进入后台,优化性能')
+ }
+ }
+
+ document.addEventListener('visibilitychange', handleVisibilityChange)
+ }
+
+ /**
+ * 将 SSE ReadableStream 转换为 AI SDK 可消费的异步迭代器
+ */
+ async *convertSSEResponse(sseStream: ReadableStream): AsyncIterable {
+ console.log('[AI Bridge] 开始转换 SSE 响应')
+
+ const reader = sseStream.getReader()
+ const decoder = new TextDecoder()
+ let isFirstChunk = true
+
+ try {
+ while (true) {
+ const { done, value } = await reader.read()
+ if (done) break
+
+ const chunk = decoder.decode(value, { stream: true })
+ this.buffer += chunk
+ this.performanceMetrics.totalBytes += chunk.length
+
+ // 提取完整的 SSE 事件(以 \n\n 分隔)
+ const events = this.buffer.split('\n\n')
+ this.buffer = events.pop() || '' // 保存不完整的事件
+
+ for (const event of events) {
+ if (event.trim()) {
+ const parsedEvent = this.parseSSEEvent(event)
+ if (parsedEvent && this.shouldProcessEvent(parsedEvent)) {
+ const content = this.extractContent(parsedEvent)
+ if (content) {
+ yield content
+ this.performanceMetrics.chunkCount++
+ }
+ }
+ }
+ }
+
+ // 第一块数据特殊处理
+ if (isFirstChunk) {
+ console.log('[AI Bridge] 接收到第一块数据')
+ isFirstChunk = false
+ }
+ }
+
+ // 处理缓冲区中剩余的数据
+ if (this.buffer.trim()) {
+ const lastEvent = this.parseSSEEvent(this.buffer)
+ if (lastEvent) {
+ const content = this.extractContent(lastEvent)
+ if (content) {
+ yield content
+ }
+ }
+ }
+ } catch (error) {
+ console.error('[AI Bridge] SSE 转换错误:', error)
+ throw error
+ } finally {
+ reader.releaseLock()
+ this.logPerformanceMetrics()
+ }
+ }
+
+ /**
+ * 解析 SSE 事件
+ */
+ private parseSSEEvent(event: string): SSEEvent | null {
+ const lines = event.split('\n')
+ const parsedEvent: SSEEvent = {
+ data: ''
+ }
+
+ for (const line of lines) {
+ if (line.startsWith('data: ')) {
+ parsedEvent.data = line.substring(6)
+ } else if (line.startsWith('event: ')) {
+ parsedEvent.event = line.substring(7)
+ } else if (line.startsWith('id: ')) {
+ parsedEvent.id = line.substring(4)
+ } else if (line.startsWith('retry: ')) {
+ parsedEvent.retry = parseInt(line.substring(8), 10)
+ }
+ }
+
+ return parsedEvent.data ? parsedEvent : null
+ }
+
+ /**
+ * 判断是否应该处理此事件
+ */
+ private shouldProcessEvent(event: SSEEvent): boolean {
+ // 页面不可见时跳过非关键事件
+ if (!this.isVisible && this.config.enableVisibilityOptimization) {
+ return event.event !== 'ping'
+ }
+ return true
+ }
+
+ /**
+ * 从 SSE 事件中提取内容
+ */
+ private extractContent(event: SSEEvent): string | null {
+ try {
+ // 尝试解析 JSON 格式
+ const data = JSON.parse(event.data)
+
+ // 处理不同的响应格式
+ if (data.content) {
+ return data.content
+ } else if (data.text) {
+ return data.text
+ } else if (data.message?.content) {
+ return data.message.content
+ } else if (data.receive?.content) {
+ return data.receive.content
+ } else if (data.delta) {
+ return data.delta
+ }
+
+ // 如果是纯文本数据
+ if (typeof data === 'string') {
+ return data
+ }
+
+ return null
+ } catch {
+ // 非 JSON 数据直接返回
+ return event.data
+ }
+ }
+
+ /**
+ * 处理流式增量数据
+ */
+ processStreamingDelta(newFullContent: string, previousContent: string = ''): string {
+ if (newFullContent.startsWith(previousContent)) {
+ // 正常情况:增量更新
+ return newFullContent.slice(previousContent.length)
+ } else {
+ // 异常情况:内容乱序,返回完整内容
+ console.warn('[AI Bridge] 流式内容乱序,使用完整内容')
+ return newFullContent
+ }
+ }
+
+ /**
+ * 获取性能指标
+ */
+ getPerformanceMetrics() {
+ const duration = Date.now() - this.performanceMetrics.startTime
+ return {
+ ...this.performanceMetrics,
+ duration,
+ averageChunkSize: this.performanceMetrics.chunkCount > 0
+ ? this.performanceMetrics.totalBytes / this.performanceMetrics.chunkCount
+ : 0,
+ chunksPerSecond: this.performanceMetrics.chunkCount / (duration / 1000)
+ }
+ }
+
+ /**
+ * 记录性能指标
+ */
+ private logPerformanceMetrics(): void {
+ if (this.config.enablePerformanceTracking) {
+ const metrics = this.getPerformanceMetrics()
+ console.log('[AI Bridge] 性能指标:', {
+ 总字节数: metrics.totalBytes,
+ 数据块数: metrics.chunkCount,
+ 持续时间: `${metrics.duration}ms`,
+ 平均块大小: `${Math.round(metrics.averageChunkSize)} bytes`,
+ 处理速度: `${metrics.chunksPerSecond.toFixed(2)} chunks/s`
+ })
+ }
+ }
+
+ /**
+ * 重置适配器状态
+ */
+ reset(): void {
+ this.buffer = ''
+ this.performanceMetrics = {
+ chunkCount: 0,
+ totalBytes: 0,
+ startTime: Date.now()
+ }
+ }
+
+ /**
+ * 清理资源
+ */
+ dispose(): void {
+ this.buffer = ''
+ this.reset()
+ // 移除事件监听器(在实际实现中需要保存引用以便移除)
+ }
+}
+
+/**
+ * 创建流式适配器实例的工厂函数
+ */
+export function createStreamAdapter(config?: StreamAdapterConfig): StreamAdapter {
+ return new StreamAdapter(config)
+}
+
+/**
+ * 默认配置
+ */
+export const DEFAULT_STREAM_CONFIG: StreamAdapterConfig = {
+ enableVisibilityOptimization: true,
+ enablePerformanceTracking: true,
+ bufferSize: 1024
+}
diff --git a/frontend/app/web-gold/src/services/ai-bridge/type-definitions.ts b/frontend/app/web-gold/src/services/ai-bridge/type-definitions.ts
new file mode 100644
index 0000000000..e5bbb0d548
--- /dev/null
+++ b/frontend/app/web-gold/src/services/ai-bridge/type-definitions.ts
@@ -0,0 +1,497 @@
+/**
+ * AI 桥接服务 - 类型定义系统
+ * 提供完整的 TypeScript 类型定义,确保端到端类型安全
+ */
+
+// ========================================
+// 基础类型定义
+// ========================================
+
+/**
+ * 聊天消息基础类型
+ */
+export interface ChatMessage {
+ id: string
+ content: string
+ role: 'user' | 'assistant' | 'system'
+ timestamp: Date
+ metadata?: {
+ isStreaming?: boolean
+ tokens?: number
+ model?: string
+ provider?: string
+ }
+}
+
+/**
+ * 流式数据块
+ */
+export interface StreamChunk {
+ id: string
+ content: string
+ delta?: string
+ isComplete: boolean
+ timestamp: Date
+}
+
+/**
+ * SSE 事件格式
+ */
+export interface SSEEvent {
+ data: string
+ event?: string
+ id?: string
+ retry?: number
+}
+
+// ========================================
+// AI SDK 集成类型
+// ========================================
+
+/**
+ * AI 提供商枚举
+ */
+export type AIProvider = 'openai' | 'anthropic' | 'custom'
+
+/**
+ * AI 模型配置
+ */
+export interface AIModelConfig {
+ provider: AIProvider
+ model: string
+ apiKey?: string
+ baseURL?: string
+ parameters?: {
+ temperature?: number
+ maxTokens?: number
+ topP?: number
+ frequencyPenalty?: number
+ presencePenalty?: number
+ }
+}
+
+/**
+ * AI SDK 流式配置
+ */
+export interface AIStreamConfig {
+ model: AIModelConfig
+ streamMode: 'text' | 'full' | 'object'
+ enableStreaming?: boolean
+ timeout?: number
+}
+
+/**
+ * AI SDK 响应格式
+ */
+export interface AIResponse {
+ id: string
+ type: 'message' | 'tool-call' | 'error' | 'done'
+ content?: string
+ toolCalls?: ToolCall[]
+ error?: {
+ code: string
+ message: string
+ }
+ usage?: {
+ promptTokens: number
+ completionTokens: number
+ totalTokens: number
+ }
+}
+
+/**
+ * 工具调用类型
+ */
+export interface ToolCall {
+ id: string
+ type: 'function'
+ function: {
+ name: string
+ arguments: string
+ description?: string
+ }
+}
+
+// ========================================
+// 流式处理类型
+// ========================================
+
+/**
+ * 流式处理器配置
+ */
+export interface StreamProcessorConfig {
+ enableVisibilityOptimization: boolean
+ enablePerformanceTracking: boolean
+ bufferSize: number
+ chunkSize: number
+ enableRetry: boolean
+ retryAttempts: number
+ retryDelay: number
+}
+
+/**
+ * 流式处理器状态
+ */
+export interface StreamProcessorState {
+ status: 'idle' | 'connecting' | 'streaming' | 'paused' | 'completed' | 'error'
+ bytesReceived: number
+ chunksReceived: number
+ startTime: Date
+ lastChunkTime: Date
+ error?: string
+}
+
+/**
+ * 流式事件类型
+ */
+export type StreamEventType =
+ | 'stream-start'
+ | 'stream-chunk'
+ | 'stream-end'
+ | 'stream-error'
+ | 'stream-pause'
+ | 'stream-resume'
+
+/**
+ * 流式事件
+ */
+export interface StreamEvent {
+ type: StreamEventType
+ data: T
+ timestamp: Date
+ requestId?: string
+}
+
+// ========================================
+// 性能监控类型
+// ========================================
+
+/**
+ * 性能指标
+ */
+export interface PerformanceMetrics {
+ // 渲染性能
+ renderTime: number
+ firstRenderTime: number
+ incrementalRenderTime: number
+
+ // 流式性能
+ streamLatency: number
+ chunkProcessingTime: number
+ bytesPerSecond: number
+
+ // 内存使用
+ memoryUsage: number
+ memoryPeak: number
+
+ // 错误统计
+ errorCount: number
+ retryCount: number
+
+ // 成功率
+ successRate: number
+}
+
+/**
+ * 性能报告
+ */
+export interface PerformanceReport {
+ metrics: PerformanceMetrics
+ timestamp: Date
+ sessionId: string
+ duration: number
+ summary: {
+ performanceScore: number
+ issues: string[]
+ recommendations: string[]
+ }
+}
+
+// ========================================
+// 适配器类型
+// ========================================
+
+/**
+ * 适配器接口
+ */
+export interface StreamAdapterInterface {
+ convertSSEResponse(sseStream: ReadableStream): AsyncIterable
+ processStreamingDelta(newContent: string, previousContent?: string): string
+ getPerformanceMetrics(): any
+ reset(): void
+ dispose(): void
+}
+
+/**
+ * 适配器配置
+ */
+export interface AdapterConfig {
+ enableVisibilityOptimization: boolean
+ enablePerformanceTracking: boolean
+ bufferSize: number
+ chunkSize: number
+}
+
+// ========================================
+// 组件 Props 类型
+// ========================================
+
+/**
+ * ChatMessageRenderer 组件 Props
+ */
+export interface ChatMessageRendererProps {
+ content: string
+ isStreaming: boolean
+ config?: {
+ enableTypewriter?: boolean
+ typewriterSpeed?: number
+ enableMarkdown?: boolean
+ enableHighlighting?: boolean
+ }
+ onStreamStart?: () => void
+ onStreamChunk?: (chunk: string) => void
+ onStreamEnd?: (fullContent: string) => void
+ onError?: (error: Error) => void
+}
+
+/**
+ * ChatMessageRendererV2 组件 Props
+ */
+export interface ChatMessageRendererV2Props extends ChatMessageRendererProps {
+ aiConfig?: AIStreamConfig
+ adapterConfig?: AdapterConfig
+ enableAIIntegration?: boolean
+}
+
+// ========================================
+// API 类型
+// ========================================
+
+/**
+ * 聊天请求类型
+ */
+export interface ChatRequest {
+ conversationId?: string
+ content: string
+ roleId?: number
+ useContext?: boolean
+ useSearch?: boolean
+ attachmentUrls?: string[]
+ config?: {
+ enableStreaming?: boolean
+ enableAI?: boolean
+ model?: AIModelConfig
+ }
+}
+
+/**
+ * 聊天响应类型
+ */
+export interface ChatResponse {
+ id: string
+ conversationId: string
+ send?: {
+ id: string
+ content: string
+ timestamp: Date
+ }
+ receive?: {
+ id: string
+ content: string
+ reasoningContent?: string
+ timestamp: Date
+ }
+ metadata?: {
+ model?: string
+ provider?: string
+ tokens?: number
+ processingTime?: number
+ }
+}
+
+// ========================================
+// 配置类型
+// ========================================
+
+/**
+ * 全局 AI 配置
+ */
+export interface AIGlobalConfig {
+ enabled: boolean
+ defaultProvider: AIProvider
+ defaultModel: string
+ streamConfig: StreamProcessorConfig
+ performanceConfig: {
+ enabled: boolean
+ samplingRate: number
+ reportInterval: number
+ }
+ featureFlags: {
+ enableVercelSDK: boolean
+ enableStreamingV2: boolean
+ enablePerformanceMonitoring: boolean
+ enableAdvancedFeatures: boolean
+ }
+}
+
+/**
+ * 环境变量类型
+ */
+export interface EnvironmentConfig {
+ VITE_AI_SDK_ENABLED: string
+ VITE_AI_BRIDGE_URL: string
+ VITE_AI_DEFAULT_PROVIDER: AIProvider
+ VITE_AI_DEFAULT_MODEL: string
+}
+
+// ========================================
+// 工具类型
+// ========================================
+
+/**
+ * 工具调用结果
+ */
+export interface ToolResult {
+ id: string
+ name: string
+ result: any
+ error?: string
+ duration: number
+}
+
+/**
+ * 工具定义
+ */
+export interface ToolDefinition {
+ name: string
+ description: string
+ parameters: {
+ type: 'object'
+ properties: Record
+ required?: string[]
+ }
+}
+
+// ========================================
+// 错误类型
+// ========================================
+
+/**
+ * AI 桥接服务错误
+ */
+export class AIBridgeError extends Error {
+ constructor(
+ message: string,
+ public code: string,
+ public cause?: Error,
+ public context?: Record
+ ) {
+ super(message)
+ this.name = 'AIBridgeError'
+ }
+}
+
+/**
+ * 流处理错误
+ */
+export class StreamProcessingError extends AIBridgeError {
+ constructor(message: string, public streamId: string, cause?: Error) {
+ super(message, 'STREAM_PROCESSING_ERROR', cause, { streamId })
+ this.name = 'StreamProcessingError'
+ }
+}
+
+/**
+ * 配置错误
+ */
+export class ConfigError extends AIBridgeError {
+ constructor(message: string, public configKey: string, public configValue: any) {
+ super(message, 'CONFIG_ERROR', undefined, { configKey, configValue })
+ this.name = 'ConfigError'
+ }
+}
+
+// ========================================
+// 工具函数类型
+// ========================================
+
+/**
+ * 类型守卫函数
+ */
+export type TypeGuard = (value: any) => value is T
+
+/**
+ * 异步处理器
+ */
+export type AsyncHandler = (data?: T) => Promise
+
+/**
+ * 同步处理器
+ */
+export type Handler = (data?: T) => void
+
+/**
+ * 事件监听器
+ */
+export type EventListener = (event: StreamEvent) => void
+
+// ========================================
+// 导出工具类型
+// ========================================
+
+/**
+ * 从联合类型中排除指定类型
+ */
+export type ExcludeType = T extends U ? never : T
+
+/**
+ * 必需属性类型
+ */
+export type RequiredKeys = {
+ [K in keyof T]-?: {} extends Pick ? never : K
+}[keyof T]
+
+/**
+ * 可选属性类型
+ */
+export type OptionalKeys = {
+ [K in keyof T]-?: {} extends Pick ? K : never
+}[keyof T]
+
+// ========================================
+// 默认值导出
+// ========================================
+
+/**
+ * 默认 AI 配置
+ */
+export const DEFAULT_AI_CONFIG: AIModelConfig = {
+ provider: 'openai',
+ model: 'gpt-3.5-turbo',
+ parameters: {
+ temperature: 0.7,
+ maxTokens: 2000
+ }
+}
+
+/**
+ * 默认流处理配置
+ */
+export const DEFAULT_STREAM_CONFIG: StreamProcessorConfig = {
+ enableVisibilityOptimization: true,
+ enablePerformanceTracking: true,
+ bufferSize: 1024,
+ chunkSize: 64,
+ enableRetry: true,
+ retryAttempts: 3,
+ retryDelay: 1000
+}
+
+/**
+ * 默认性能配置
+ */
+export const DEFAULT_PERFORMANCE_CONFIG = {
+ enabled: true,
+ samplingRate: 0.1, // 10% 采样率
+ reportInterval: 5000 // 5秒报告一次
+}
diff --git a/frontend/app/web-gold/src/views/content-style/Benchmark.vue b/frontend/app/web-gold/src/views/content-style/Benchmark.vue
index 3154be4241..5c7f92e9ef 100644
--- a/frontend/app/web-gold/src/views/content-style/Benchmark.vue
+++ b/frontend/app/web-gold/src/views/content-style/Benchmark.vue
@@ -15,21 +15,19 @@ import BenchmarkTable from './components/BenchmarkTable.vue'
import BatchAnalyzeModal from './components/BatchAnalyzeModal.vue'
import SavePromptModal from './components/SavePromptModal.vue'
-// ==================== 初始化 ====================
const router = useRouter()
const promptStore = usePromptStore()
-// ==================== 数据管理 ====================
const {
data,
selectedRowKeys,
+ expandedRowKeys,
saveTableDataToSession,
loadTableDataFromSession,
processApiResponse,
clearData,
} = useBenchmarkData()
-// ==================== 分析功能 ====================
const {
loading,
batchAnalyzeLoading,
@@ -37,9 +35,8 @@ const {
globalLoadingText,
batchAnalyze,
getVoiceText,
-} = useBenchmarkAnalysis(data, saveTableDataToSession)
+} = useBenchmarkAnalysis(data, expandedRowKeys, saveTableDataToSession)
-// ==================== 表单状态 ====================
const form = ref({
platform: '抖音',
url: '',
@@ -47,7 +44,6 @@ const form = ref({
sort_type: 0,
})
-// ==================== 弹窗状态 ====================
const modalVisible = ref(false)
const batchPromptMergedText = ref('')
const batchPromptTextCount = ref(0)
@@ -55,10 +51,6 @@ const batchPromptTextCount = ref(0)
const savePromptModalVisible = ref(false)
const savePromptContent = ref('')
-// ==================== API 调用函数 ====================
-/**
- * 分析用户主页,获取视频列表
- */
async function handleAnalyzeUser() {
const sec_user_id = resolveId(form.value.url, {
queryKeys: ['user'],
@@ -94,16 +86,13 @@ async function handleAnalyzeUser() {
}
}
-/**
- * 导出数据到 Excel
- */
async function handleExportToExcel() {
- if (!data.value || data.value.length === 0) {
+ if (!data.value?.length) {
message.warning('暂无数据可导出')
return
}
- if (selectedRowKeys.value.length === 0) {
+ if (!selectedRowKeys.value.length) {
message.warning('请先选择要导出的行')
return
}
@@ -116,15 +105,13 @@ async function handleExportToExcel() {
const selectedRows = data.value.filter(item => selectedRowKeys.value.includes(item.id))
const rowsNeedTranscription = selectedRows.filter(row => !row.transcriptions)
- // 导出时只获取语音转写,不进行 AI 对话分析
- if (rowsNeedTranscription.length > 0) {
+ if (rowsNeedTranscription.length) {
globalLoading.value = true
- globalLoadingText.value = `正在分析中...`
+ globalLoadingText.value = '正在分析中...'
try {
const transcriptions = await getVoiceText(rowsNeedTranscription)
- // 更新转写数据
for (const row of rowsNeedTranscription) {
const transcription = transcriptions.find(item => item.audio_url === row.audio_url)
if (transcription) {
@@ -163,9 +150,6 @@ async function handleExportToExcel() {
}
}
-/**
- * 批量分析处理
- */
async function handleBatchAnalyze() {
try {
await batchAnalyze(selectedRowKeys, async (mergedText, textCount) => {
@@ -174,22 +158,17 @@ async function handleBatchAnalyze() {
modalVisible.value = true
})
} finally {
- // 批量分析完成后清空选中项(无论成功还是失败)
selectedRowKeys.value = []
}
}
-/**
- * 重置表单
- */
async function handleResetForm() {
form.value = { platform: '抖音', url: '', count: 20, sort_type: 0 }
await clearData()
}
-// ==================== 批量提示词操作函数 ====================
function handleCopyBatchPrompt(prompt) {
- if (!prompt || !prompt.trim()) {
+ if (!prompt?.trim()) {
message.warning('没有提示词可复制')
return
}
@@ -202,7 +181,7 @@ function handleCopyBatchPrompt(prompt) {
}
function handleUseBatchPrompt(prompt) {
- if (!prompt || !prompt.trim()) {
+ if (!prompt?.trim()) {
message.warning('暂无批量生成的提示词')
return
}
@@ -211,11 +190,9 @@ function handleUseBatchPrompt(prompt) {
router.push('/content-style/copywriting')
}
-// ==================== 保存提示词到服务器 ====================
function handleOpenSavePromptModal(batchPrompt = null) {
- // 批量提示词:使用传入的 batchPrompt(AI 生成的内容),而不是原始的 mergedText
const promptToSave = batchPrompt || batchPromptMergedText.value
- if (!promptToSave || !promptToSave.trim()) {
+ if (!promptToSave?.trim()) {
message.warning('没有提示词可保存')
return
}
@@ -223,7 +200,6 @@ function handleOpenSavePromptModal(batchPrompt = null) {
savePromptModalVisible.value = true
}
-// ==================== 生命周期 ====================
onMounted(async () => {
await loadTableDataFromSession()
})
@@ -252,8 +228,7 @@ defineOptions({ name: 'ContentStyleBenchmark' })
@batch-analyze="handleBatchAnalyze"
/>
-
-
+