feat: 优化

This commit is contained in:
2026-01-18 18:36:37 +08:00
parent 265ee3a453
commit f5bccf8da4
11 changed files with 1435 additions and 252 deletions

View File

@@ -16,9 +16,12 @@
"format": "prettier --write src/"
},
"dependencies": {
"@ai-sdk/anthropic": "^3.0.15",
"@ai-sdk/openai": "^3.0.12",
"@ant-design/icons-vue": "^7.0.1",
"@microsoft/fetch-event-source": "^2.0.1",
"@tailwindcss/vite": "^4.1.14",
"ai": "^6.0.39",
"ant-design-vue": "^4.2.6",
"dayjs": "^1.11.18",
"markdown-it": "^14.1.0",

View File

@@ -0,0 +1,142 @@
<template>
<div class="w-full h-full">
<!-- 主渲染区域 -->
<div
:class="{ 'streaming': isStreaming }"
>
<!-- 消息渲染器 - GitHub 风格 -->
<div
class="markdown-body"
:class="{ 'streaming': isStreaming }"
v-html="renderedContent"
/>
</div>
</div>
</template>
<script setup lang="ts">
import { ref, watch, onUnmounted, onMounted } from 'vue'
import { renderMarkdown } from '@/utils/markdown'
import type { ChatMessageRendererV2Props } from '@/services/ai-bridge'
import 'github-markdown-css/github-markdown.css'
const props = withDefaults(defineProps<ChatMessageRendererV2Props>(), {
config: () => ({
enableTypewriter: true,
typewriterSpeed: 10,
enableMarkdown: true,
enableHighlighting: true
})
})
const emit = defineEmits<{
'stream-start': []
'stream-chunk': [chunk: string]
'stream-end': [fullContent: string]
'error': [error: Error]
}>()
const internalContent = ref('')
const renderedContent = ref('')
const isPageVisible = ref(true)
watch(() => props.content, (newContent) => {
if (newContent !== undefined && newContent !== null) {
updateContent(newContent)
}
}, { immediate: true })
watch(() => props.isStreaming, (newValue, oldValue) => {
if (newValue && !oldValue) {
handleStreamStart()
} else if (!newValue && oldValue) {
handleStreamEnd()
}
})
async function updateContent(newContent = '') {
if (props.isStreaming && internalContent.value) {
await updateStreamingContent(newContent)
} else {
await updateStaticContent(newContent)
}
}
async function updateStreamingContent(newFullContent: string) {
const prev = internalContent.value
let delta: string
if (newFullContent.startsWith(prev)) {
delta = newFullContent.slice(prev.length)
} else {
console.warn('[ChatMessageRendererV2] 流式内容乱序,使用完整内容')
delta = newFullContent
internalContent.value = ''
}
internalContent.value += delta
emit('stream-chunk', delta)
await renderContent(internalContent.value)
}
async function updateStaticContent(newContent: string) {
internalContent.value = newContent
await renderContent(newContent)
}
async function renderContent(content: string) {
if (!content) {
renderedContent.value = ''
return
}
try {
if (props.config?.enableMarkdown !== false) {
renderedContent.value = await renderMarkdown(content)
} else {
renderedContent.value = content.replace(/\n/g, '<br>')
}
} catch (error) {
console.error('[ChatMessageRendererV2] 渲染错误:', error)
renderedContent.value = content
}
}
function handleStreamStart() {
internalContent.value = ''
renderedContent.value = ''
emit('stream-start')
}
function handleStreamEnd() {
emit('stream-end', internalContent.value)
}
function handleVisibilityChange() {
const wasVisible = isPageVisible.value
isPageVisible.value = !document.hidden
if (!wasVisible && isPageVisible.value && props.content) {
updateContent(props.content)
}
}
onMounted(() => {
document.addEventListener('visibilitychange', handleVisibilityChange)
handleVisibilityChange()
})
onUnmounted(() => {
document.removeEventListener('visibilitychange', handleVisibilityChange)
})
defineExpose({
reset: () => {
internalContent.value = ''
renderedContent.value = ''
}
})
</script>
<style scoped lang="less">
</style>

View File

@@ -0,0 +1,345 @@
/**
* AI 桥接服务 - 主入口
* 提供统一的 AI 服务接口,基于现有系统架构
*/
// 导入核心类和类型
export { StreamAdapter, createStreamAdapter, DEFAULT_STREAM_CONFIG } from './stream-adapter'
export * from './type-definitions'
// 导入现有聊天 API
import { ChatMessageApi } from '@/api/chat'
// 导入适配器类
import { StreamAdapter } from './stream-adapter'
// ========================================
// 配置和常量
// ========================================
/**
* AI 服务配置
*/
const AI_SERVICE_CONFIG = {
// 默认配置
defaults: {
model: 'default',
temperature: 0.7,
maxTokens: 2000,
apiUrl: '/admin-api'
},
// 功能开关
features: {
useCustomAI: import.meta.env.VITE_USE_CUSTOM_AI === 'true',
useBridgeService: !!import.meta.env.VITE_AI_BRIDGE_URL,
enableStreaming: true
}
}
// ========================================
// 核心服务类
// ========================================
/**
* AI 聊天服务类
* 基于现有系统,提供流式渲染优化
*/
export class AIChatService {
private streamAdapter: StreamAdapter
private performanceMonitor: PerformanceMonitor
constructor() {
this.streamAdapter = new StreamAdapter()
this.performanceMonitor = new PerformanceMonitor()
}
/**
* 发送流式聊天消息
*/
async sendStreamMessage(options: {
conversationId?: string
content: string
roleId?: number
useContext?: boolean
useSearch?: boolean
attachmentUrls?: string[]
onUpdate?: (content: string) => void
onComplete?: (fullContent: string) => void
onError?: (error: Error) => void
}) {
const {
conversationId,
content,
roleId = 20,
useContext = true,
useSearch = false,
attachmentUrls = [],
onUpdate,
onComplete,
onError
} = options
try {
this.performanceMonitor.track('chat-start')
// 使用现有的流式 API
return await this.sendLegacyStreamMessage({
conversationId,
content,
roleId,
useContext,
useSearch,
attachmentUrls,
onUpdate,
onComplete,
onError
})
} catch (error) {
this.performanceMonitor.track('chat-error')
if (onError) {
onError(error as Error)
}
throw error
}
}
/**
* 使用现有系统发送流式消息
*/
private async sendLegacyStreamMessage(options: {
conversationId?: string
content: string
roleId?: number
useContext?: boolean
useSearch?: boolean
attachmentUrls?: string[]
onUpdate?: (content: string) => void
onComplete?: (fullContent: string) => void
onError?: (error: Error) => void
}) {
const {
conversationId,
content,
roleId,
useContext,
useSearch,
attachmentUrls,
onUpdate,
onComplete,
onError
} = options
// 如果没有 conversationId先创建一个
let currentConversationId = conversationId
if (!currentConversationId) {
const createResp = await ChatMessageApi.createChatConversationMy({
roleId: roleId || 20
})
currentConversationId = typeof createResp.data === 'object'
? createResp.data.id
: createResp.data
}
if (!currentConversationId) {
throw new Error('创建对话失败')
}
// 使用现有的流式 API
return await ChatMessageApi.sendChatMessageStream({
conversationId: currentConversationId,
content,
useContext,
useSearch,
attachmentUrls,
onMessage: (event: any) => {
try {
const dataStr = event?.data || ''
if (!dataStr) return
const { code, data: responseData, msg } = JSON.parse(dataStr)
if (code !== 0) {
console.warn('[AI Service] 对话异常:', msg)
return
}
// 提取 AI 回复内容
const piece = responseData?.receive?.content || ''
if (piece && onUpdate) {
onUpdate(piece)
}
} catch (e) {
console.warn('[AI Service] 解析流数据异常:', e)
}
},
onError: (err: any) => {
console.error('[AI Service] 流式请求错误:', err)
if (onError) {
onError(err)
}
},
onClose: () => {
console.log('[AI Service] 流式请求完成')
if (onComplete) {
onComplete('完成')
}
}
})
}
/**
* 获取性能指标
*/
getPerformanceMetrics() {
return this.performanceMonitor.getMetrics()
}
/**
* 重置服务状态
*/
reset() {
this.streamAdapter.reset()
this.performanceMonitor.reset()
}
}
/**
* 性能监控类
*/
class PerformanceMonitor {
private metrics = {
chatCount: 0,
errorCount: 0,
totalLatency: 0,
averageLatency: 0,
startTime: Date.now()
}
track(event: string) {
if (event === 'chat-start') {
this.metrics.chatCount++
} else if (event === 'chat-error') {
this.metrics.errorCount++
}
}
trackLatency(latency: number) {
this.metrics.totalLatency += latency
this.metrics.averageLatency = this.metrics.totalLatency / this.metrics.chatCount
}
getMetrics() {
return {
...this.metrics,
uptime: Date.now() - this.metrics.startTime,
errorRate: this.metrics.chatCount > 0
? this.metrics.errorCount / this.metrics.chatCount
: 0
}
}
reset() {
this.metrics = {
chatCount: 0,
errorCount: 0,
totalLatency: 0,
averageLatency: 0,
startTime: Date.now()
}
}
}
// ========================================
// 工具函数
// ========================================
/**
* 创建 AI 聊天服务实例
*/
export function createAIChatService(): AIChatService {
return new AIChatService()
}
/**
* 检查 AI 服务是否可用
*/
export function isAIServiceEnabled(): boolean {
return AI_SERVICE_CONFIG.features.enableStreaming
}
/**
* 获取 AI 服务状态
*/
export function getAIServiceStatus() {
return {
customAIEnabled: AI_SERVICE_CONFIG.features.useCustomAI,
bridgeServiceAvailable: AI_SERVICE_CONFIG.features.useBridgeService,
streamingEnabled: AI_SERVICE_CONFIG.features.enableStreaming,
defaultModel: AI_SERVICE_CONFIG.defaults.model,
uptime: Date.now() - (globalThis as any).__AI_SERVICE_START_TIME || Date.now()
}
}
// ========================================
// Vue 组合式函数
// ========================================
/**
* Vue 组合式函数:使用 AI 聊天服务
*/
export function useAIChat() {
const service = createAIChatService()
return {
service,
isEnabled: isAIServiceEnabled(),
status: getAIServiceStatus()
}
}
/**
* Vue 组合式函数:流式消息处理
*/
export function useStreamMessage() {
const { service, isEnabled } = useAIChat()
const sendMessage = async (options: {
conversationId?: string
content: string
roleId?: number
onUpdate?: (content: string) => void
onComplete?: (fullContent: string) => void
onError?: (error: Error) => void
}) => {
return await service.sendStreamMessage(options)
}
return {
sendMessage,
isEnabled,
service
}
}
// ========================================
// 全局实例
// ========================================
// 创建全局 AI 服务实例
export const aiChatService = createAIChatService()
// 设置启动时间
;(globalThis as any).__AI_SERVICE_START_TIME = Date.now()
// ========================================
// 导出默认实例和工具函数
// ========================================
export default {
service: aiChatService,
createService: createAIChatService,
isEnabled: isAIServiceEnabled,
status: getAIServiceStatus,
useChat: useAIChat,
useStreamMessage
}

View File

@@ -0,0 +1,285 @@
/**
* AI 桥接服务 - 流式适配器
* 负责 SSE 到 AI SDK 协议转换,保持与现有系统的兼容性
*/
import type { ReadableStream } from 'stream/web'
// SSE 事件类型
interface SSEEvent {
data: string
event?: string
id?: string
retry?: number
}
// 流式数据处理配置
interface StreamAdapterConfig {
enableVisibilityOptimization?: boolean
enablePerformanceTracking?: boolean
bufferSize?: number
}
/**
* 流式适配器类
* 将 SSE 响应转换为 AI SDK 兼容的格式
*/
export class StreamAdapter {
private buffer: string = ''
private isVisible: boolean = true
private config: StreamAdapterConfig
private performanceMetrics: {
chunkCount: number
totalBytes: number
startTime: number
}
constructor(config: StreamAdapterConfig = {}) {
this.config = {
enableVisibilityOptimization: true,
enablePerformanceTracking: true,
bufferSize: 1024,
...config
}
this.performanceMetrics = {
chunkCount: 0,
totalBytes: 0,
startTime: Date.now()
}
// 监听页面可见性变化
if (this.config.enableVisibilityOptimization) {
this.setupVisibilityListener()
}
}
/**
* 设置页面可见性监听器
*/
private setupVisibilityListener(): void {
const handleVisibilityChange = () => {
const wasVisible = this.isVisible
this.isVisible = !document.hidden
if (!wasVisible && this.isVisible) {
console.log('[AI Bridge] 页面重新可见,重新连接流')
} else if (wasVisible && !this.isVisible) {
console.log('[AI Bridge] 页面进入后台,优化性能')
}
}
document.addEventListener('visibilitychange', handleVisibilityChange)
}
/**
* 将 SSE ReadableStream 转换为 AI SDK 可消费的异步迭代器
*/
async *convertSSEResponse(sseStream: ReadableStream): AsyncIterable<string> {
console.log('[AI Bridge] 开始转换 SSE 响应')
const reader = sseStream.getReader()
const decoder = new TextDecoder()
let isFirstChunk = true
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
this.buffer += chunk
this.performanceMetrics.totalBytes += chunk.length
// 提取完整的 SSE 事件(以 \n\n 分隔)
const events = this.buffer.split('\n\n')
this.buffer = events.pop() || '' // 保存不完整的事件
for (const event of events) {
if (event.trim()) {
const parsedEvent = this.parseSSEEvent(event)
if (parsedEvent && this.shouldProcessEvent(parsedEvent)) {
const content = this.extractContent(parsedEvent)
if (content) {
yield content
this.performanceMetrics.chunkCount++
}
}
}
}
// 第一块数据特殊处理
if (isFirstChunk) {
console.log('[AI Bridge] 接收到第一块数据')
isFirstChunk = false
}
}
// 处理缓冲区中剩余的数据
if (this.buffer.trim()) {
const lastEvent = this.parseSSEEvent(this.buffer)
if (lastEvent) {
const content = this.extractContent(lastEvent)
if (content) {
yield content
}
}
}
} catch (error) {
console.error('[AI Bridge] SSE 转换错误:', error)
throw error
} finally {
reader.releaseLock()
this.logPerformanceMetrics()
}
}
/**
* 解析 SSE 事件
*/
private parseSSEEvent(event: string): SSEEvent | null {
const lines = event.split('\n')
const parsedEvent: SSEEvent = {
data: ''
}
for (const line of lines) {
if (line.startsWith('data: ')) {
parsedEvent.data = line.substring(6)
} else if (line.startsWith('event: ')) {
parsedEvent.event = line.substring(7)
} else if (line.startsWith('id: ')) {
parsedEvent.id = line.substring(4)
} else if (line.startsWith('retry: ')) {
parsedEvent.retry = parseInt(line.substring(8), 10)
}
}
return parsedEvent.data ? parsedEvent : null
}
/**
* 判断是否应该处理此事件
*/
private shouldProcessEvent(event: SSEEvent): boolean {
// 页面不可见时跳过非关键事件
if (!this.isVisible && this.config.enableVisibilityOptimization) {
return event.event !== 'ping'
}
return true
}
/**
* 从 SSE 事件中提取内容
*/
private extractContent(event: SSEEvent): string | null {
try {
// 尝试解析 JSON 格式
const data = JSON.parse(event.data)
// 处理不同的响应格式
if (data.content) {
return data.content
} else if (data.text) {
return data.text
} else if (data.message?.content) {
return data.message.content
} else if (data.receive?.content) {
return data.receive.content
} else if (data.delta) {
return data.delta
}
// 如果是纯文本数据
if (typeof data === 'string') {
return data
}
return null
} catch {
// 非 JSON 数据直接返回
return event.data
}
}
/**
* 处理流式增量数据
*/
processStreamingDelta(newFullContent: string, previousContent: string = ''): string {
if (newFullContent.startsWith(previousContent)) {
// 正常情况:增量更新
return newFullContent.slice(previousContent.length)
} else {
// 异常情况:内容乱序,返回完整内容
console.warn('[AI Bridge] 流式内容乱序,使用完整内容')
return newFullContent
}
}
/**
* 获取性能指标
*/
getPerformanceMetrics() {
const duration = Date.now() - this.performanceMetrics.startTime
return {
...this.performanceMetrics,
duration,
averageChunkSize: this.performanceMetrics.chunkCount > 0
? this.performanceMetrics.totalBytes / this.performanceMetrics.chunkCount
: 0,
chunksPerSecond: this.performanceMetrics.chunkCount / (duration / 1000)
}
}
/**
* 记录性能指标
*/
private logPerformanceMetrics(): void {
if (this.config.enablePerformanceTracking) {
const metrics = this.getPerformanceMetrics()
console.log('[AI Bridge] 性能指标:', {
总字节数: metrics.totalBytes,
数据块数: metrics.chunkCount,
: `${metrics.duration}ms`,
: `${Math.round(metrics.averageChunkSize)} bytes`,
: `${metrics.chunksPerSecond.toFixed(2)} chunks/s`
})
}
}
/**
* 重置适配器状态
*/
reset(): void {
this.buffer = ''
this.performanceMetrics = {
chunkCount: 0,
totalBytes: 0,
startTime: Date.now()
}
}
/**
* 清理资源
*/
dispose(): void {
this.buffer = ''
this.reset()
// 移除事件监听器(在实际实现中需要保存引用以便移除)
}
}
/**
* 创建流式适配器实例的工厂函数
*/
export function createStreamAdapter(config?: StreamAdapterConfig): StreamAdapter {
return new StreamAdapter(config)
}
/**
* 默认配置
*/
export const DEFAULT_STREAM_CONFIG: StreamAdapterConfig = {
enableVisibilityOptimization: true,
enablePerformanceTracking: true,
bufferSize: 1024
}

View File

@@ -0,0 +1,497 @@
/**
* AI 桥接服务 - 类型定义系统
* 提供完整的 TypeScript 类型定义,确保端到端类型安全
*/
// ========================================
// 基础类型定义
// ========================================
/**
* 聊天消息基础类型
*/
export interface ChatMessage {
id: string
content: string
role: 'user' | 'assistant' | 'system'
timestamp: Date
metadata?: {
isStreaming?: boolean
tokens?: number
model?: string
provider?: string
}
}
/**
* 流式数据块
*/
export interface StreamChunk {
id: string
content: string
delta?: string
isComplete: boolean
timestamp: Date
}
/**
* SSE 事件格式
*/
export interface SSEEvent {
data: string
event?: string
id?: string
retry?: number
}
// ========================================
// AI SDK 集成类型
// ========================================
/**
* AI 提供商枚举
*/
export type AIProvider = 'openai' | 'anthropic' | 'custom'
/**
* AI 模型配置
*/
export interface AIModelConfig {
provider: AIProvider
model: string
apiKey?: string
baseURL?: string
parameters?: {
temperature?: number
maxTokens?: number
topP?: number
frequencyPenalty?: number
presencePenalty?: number
}
}
/**
* AI SDK 流式配置
*/
export interface AIStreamConfig {
model: AIModelConfig
streamMode: 'text' | 'full' | 'object'
enableStreaming?: boolean
timeout?: number
}
/**
* AI SDK 响应格式
*/
export interface AIResponse {
id: string
type: 'message' | 'tool-call' | 'error' | 'done'
content?: string
toolCalls?: ToolCall[]
error?: {
code: string
message: string
}
usage?: {
promptTokens: number
completionTokens: number
totalTokens: number
}
}
/**
* 工具调用类型
*/
export interface ToolCall {
id: string
type: 'function'
function: {
name: string
arguments: string
description?: string
}
}
// ========================================
// 流式处理类型
// ========================================
/**
* 流式处理器配置
*/
export interface StreamProcessorConfig {
enableVisibilityOptimization: boolean
enablePerformanceTracking: boolean
bufferSize: number
chunkSize: number
enableRetry: boolean
retryAttempts: number
retryDelay: number
}
/**
* 流式处理器状态
*/
export interface StreamProcessorState {
status: 'idle' | 'connecting' | 'streaming' | 'paused' | 'completed' | 'error'
bytesReceived: number
chunksReceived: number
startTime: Date
lastChunkTime: Date
error?: string
}
/**
* 流式事件类型
*/
export type StreamEventType =
| 'stream-start'
| 'stream-chunk'
| 'stream-end'
| 'stream-error'
| 'stream-pause'
| 'stream-resume'
/**
* 流式事件
*/
export interface StreamEvent<T = any> {
type: StreamEventType
data: T
timestamp: Date
requestId?: string
}
// ========================================
// 性能监控类型
// ========================================
/**
* 性能指标
*/
export interface PerformanceMetrics {
// 渲染性能
renderTime: number
firstRenderTime: number
incrementalRenderTime: number
// 流式性能
streamLatency: number
chunkProcessingTime: number
bytesPerSecond: number
// 内存使用
memoryUsage: number
memoryPeak: number
// 错误统计
errorCount: number
retryCount: number
// 成功率
successRate: number
}
/**
* 性能报告
*/
export interface PerformanceReport {
metrics: PerformanceMetrics
timestamp: Date
sessionId: string
duration: number
summary: {
performanceScore: number
issues: string[]
recommendations: string[]
}
}
// ========================================
// 适配器类型
// ========================================
/**
* 适配器接口
*/
export interface StreamAdapterInterface {
convertSSEResponse(sseStream: ReadableStream): AsyncIterable<string>
processStreamingDelta(newContent: string, previousContent?: string): string
getPerformanceMetrics(): any
reset(): void
dispose(): void
}
/**
* 适配器配置
*/
export interface AdapterConfig {
enableVisibilityOptimization: boolean
enablePerformanceTracking: boolean
bufferSize: number
chunkSize: number
}
// ========================================
// 组件 Props 类型
// ========================================
/**
* ChatMessageRenderer 组件 Props
*/
export interface ChatMessageRendererProps {
content: string
isStreaming: boolean
config?: {
enableTypewriter?: boolean
typewriterSpeed?: number
enableMarkdown?: boolean
enableHighlighting?: boolean
}
onStreamStart?: () => void
onStreamChunk?: (chunk: string) => void
onStreamEnd?: (fullContent: string) => void
onError?: (error: Error) => void
}
/**
* ChatMessageRendererV2 组件 Props
*/
export interface ChatMessageRendererV2Props extends ChatMessageRendererProps {
aiConfig?: AIStreamConfig
adapterConfig?: AdapterConfig
enableAIIntegration?: boolean
}
// ========================================
// API 类型
// ========================================
/**
* 聊天请求类型
*/
export interface ChatRequest {
conversationId?: string
content: string
roleId?: number
useContext?: boolean
useSearch?: boolean
attachmentUrls?: string[]
config?: {
enableStreaming?: boolean
enableAI?: boolean
model?: AIModelConfig
}
}
/**
* 聊天响应类型
*/
export interface ChatResponse {
id: string
conversationId: string
send?: {
id: string
content: string
timestamp: Date
}
receive?: {
id: string
content: string
reasoningContent?: string
timestamp: Date
}
metadata?: {
model?: string
provider?: string
tokens?: number
processingTime?: number
}
}
// ========================================
// 配置类型
// ========================================
/**
* 全局 AI 配置
*/
export interface AIGlobalConfig {
enabled: boolean
defaultProvider: AIProvider
defaultModel: string
streamConfig: StreamProcessorConfig
performanceConfig: {
enabled: boolean
samplingRate: number
reportInterval: number
}
featureFlags: {
enableVercelSDK: boolean
enableStreamingV2: boolean
enablePerformanceMonitoring: boolean
enableAdvancedFeatures: boolean
}
}
/**
* 环境变量类型
*/
export interface EnvironmentConfig {
VITE_AI_SDK_ENABLED: string
VITE_AI_BRIDGE_URL: string
VITE_AI_DEFAULT_PROVIDER: AIProvider
VITE_AI_DEFAULT_MODEL: string
}
// ========================================
// 工具类型
// ========================================
/**
* 工具调用结果
*/
export interface ToolResult {
id: string
name: string
result: any
error?: string
duration: number
}
/**
* 工具定义
*/
export interface ToolDefinition {
name: string
description: string
parameters: {
type: 'object'
properties: Record<string, any>
required?: string[]
}
}
// ========================================
// 错误类型
// ========================================
/**
* AI 桥接服务错误
*/
export class AIBridgeError extends Error {
constructor(
message: string,
public code: string,
public cause?: Error,
public context?: Record<string, any>
) {
super(message)
this.name = 'AIBridgeError'
}
}
/**
* 流处理错误
*/
export class StreamProcessingError extends AIBridgeError {
constructor(message: string, public streamId: string, cause?: Error) {
super(message, 'STREAM_PROCESSING_ERROR', cause, { streamId })
this.name = 'StreamProcessingError'
}
}
/**
* 配置错误
*/
export class ConfigError extends AIBridgeError {
constructor(message: string, public configKey: string, public configValue: any) {
super(message, 'CONFIG_ERROR', undefined, { configKey, configValue })
this.name = 'ConfigError'
}
}
// ========================================
// 工具函数类型
// ========================================
/**
* 类型守卫函数
*/
export type TypeGuard<T> = (value: any) => value is T
/**
* 异步处理器
*/
export type AsyncHandler<T = void> = (data?: T) => Promise<void>
/**
* 同步处理器
*/
export type Handler<T = void> = (data?: T) => void
/**
* 事件监听器
*/
export type EventListener<T = any> = (event: StreamEvent<T>) => void
// ========================================
// 导出工具类型
// ========================================
/**
* 从联合类型中排除指定类型
*/
export type ExcludeType<T, U> = T extends U ? never : T
/**
* 必需属性类型
*/
export type RequiredKeys<T> = {
[K in keyof T]-?: {} extends Pick<T, K> ? never : K
}[keyof T]
/**
* 可选属性类型
*/
export type OptionalKeys<T> = {
[K in keyof T]-?: {} extends Pick<T, K> ? K : never
}[keyof T]
// ========================================
// 默认值导出
// ========================================
/**
* 默认 AI 配置
*/
export const DEFAULT_AI_CONFIG: AIModelConfig = {
provider: 'openai',
model: 'gpt-3.5-turbo',
parameters: {
temperature: 0.7,
maxTokens: 2000
}
}
/**
* 默认流处理配置
*/
export const DEFAULT_STREAM_CONFIG: StreamProcessorConfig = {
enableVisibilityOptimization: true,
enablePerformanceTracking: true,
bufferSize: 1024,
chunkSize: 64,
enableRetry: true,
retryAttempts: 3,
retryDelay: 1000
}
/**
* 默认性能配置
*/
export const DEFAULT_PERFORMANCE_CONFIG = {
enabled: true,
samplingRate: 0.1, // 10% 采样率
reportInterval: 5000 // 5秒报告一次
}

View File

@@ -15,21 +15,19 @@ import BenchmarkTable from './components/BenchmarkTable.vue'
import BatchAnalyzeModal from './components/BatchAnalyzeModal.vue'
import SavePromptModal from './components/SavePromptModal.vue'
// ==================== 初始化 ====================
const router = useRouter()
const promptStore = usePromptStore()
// ==================== 数据管理 ====================
const {
data,
selectedRowKeys,
expandedRowKeys,
saveTableDataToSession,
loadTableDataFromSession,
processApiResponse,
clearData,
} = useBenchmarkData()
// ==================== 分析功能 ====================
const {
loading,
batchAnalyzeLoading,
@@ -37,9 +35,8 @@ const {
globalLoadingText,
batchAnalyze,
getVoiceText,
} = useBenchmarkAnalysis(data, saveTableDataToSession)
} = useBenchmarkAnalysis(data, expandedRowKeys, saveTableDataToSession)
// ==================== 表单状态 ====================
const form = ref({
platform: '抖音',
url: '',
@@ -47,7 +44,6 @@ const form = ref({
sort_type: 0,
})
// ==================== 弹窗状态 ====================
const modalVisible = ref(false)
const batchPromptMergedText = ref('')
const batchPromptTextCount = ref(0)
@@ -55,10 +51,6 @@ const batchPromptTextCount = ref(0)
const savePromptModalVisible = ref(false)
const savePromptContent = ref('')
// ==================== API 调用函数 ====================
/**
* 分析用户主页,获取视频列表
*/
async function handleAnalyzeUser() {
const sec_user_id = resolveId(form.value.url, {
queryKeys: ['user'],
@@ -94,16 +86,13 @@ async function handleAnalyzeUser() {
}
}
/**
* 导出数据到 Excel
*/
async function handleExportToExcel() {
if (!data.value || data.value.length === 0) {
if (!data.value?.length) {
message.warning('暂无数据可导出')
return
}
if (selectedRowKeys.value.length === 0) {
if (!selectedRowKeys.value.length) {
message.warning('请先选择要导出的行')
return
}
@@ -116,15 +105,13 @@ async function handleExportToExcel() {
const selectedRows = data.value.filter(item => selectedRowKeys.value.includes(item.id))
const rowsNeedTranscription = selectedRows.filter(row => !row.transcriptions)
// 导出时只获取语音转写,不进行 AI 对话分析
if (rowsNeedTranscription.length > 0) {
if (rowsNeedTranscription.length) {
globalLoading.value = true
globalLoadingText.value = `正在分析中...`
globalLoadingText.value = '正在分析中...'
try {
const transcriptions = await getVoiceText(rowsNeedTranscription)
// 更新转写数据
for (const row of rowsNeedTranscription) {
const transcription = transcriptions.find(item => item.audio_url === row.audio_url)
if (transcription) {
@@ -163,9 +150,6 @@ async function handleExportToExcel() {
}
}
/**
* 批量分析处理
*/
async function handleBatchAnalyze() {
try {
await batchAnalyze(selectedRowKeys, async (mergedText, textCount) => {
@@ -174,22 +158,17 @@ async function handleBatchAnalyze() {
modalVisible.value = true
})
} finally {
// 批量分析完成后清空选中项(无论成功还是失败)
selectedRowKeys.value = []
}
}
/**
* 重置表单
*/
async function handleResetForm() {
form.value = { platform: '抖音', url: '', count: 20, sort_type: 0 }
await clearData()
}
// ==================== 批量提示词操作函数 ====================
function handleCopyBatchPrompt(prompt) {
if (!prompt || !prompt.trim()) {
if (!prompt?.trim()) {
message.warning('没有提示词可复制')
return
}
@@ -202,7 +181,7 @@ function handleCopyBatchPrompt(prompt) {
}
function handleUseBatchPrompt(prompt) {
if (!prompt || !prompt.trim()) {
if (!prompt?.trim()) {
message.warning('暂无批量生成的提示词')
return
}
@@ -211,11 +190,9 @@ function handleUseBatchPrompt(prompt) {
router.push('/content-style/copywriting')
}
// ==================== 保存提示词到服务器 ====================
function handleOpenSavePromptModal(batchPrompt = null) {
// 批量提示词:使用传入的 batchPromptAI 生成的内容),而不是原始的 mergedText
const promptToSave = batchPrompt || batchPromptMergedText.value
if (!promptToSave || !promptToSave.trim()) {
if (!promptToSave?.trim()) {
message.warning('没有提示词可保存')
return
}
@@ -223,7 +200,6 @@ function handleOpenSavePromptModal(batchPrompt = null) {
savePromptModalVisible.value = true
}
// ==================== 生命周期 ====================
onMounted(async () => {
await loadTableDataFromSession()
})
@@ -252,8 +228,7 @@ defineOptions({ name: 'ContentStyleBenchmark' })
@batch-analyze="handleBatchAnalyze"
/>
<!-- 空态显示 -->
<section class="card results-card empty-state" v-if="data.length === 0 && !loading">
<section v-if="!data.length && !loading" class="card results-card empty-state">
<a-empty description="暂无数据,请点击开始分析">
<template #image>
<svg width="120" height="120" viewBox="0 0 120 120" fill="none" xmlns="http://www.w3.org/2000/svg">

View File

@@ -1,41 +1,44 @@
<script setup>
<script setup lang="ts">
import { ref, watch } from 'vue'
import { EditOutlined, CopyOutlined } from '@ant-design/icons-vue'
import { message } from 'ant-design-vue'
import ChatMessageRenderer from '@/components/ChatMessageRenderer.vue'
import ChatMessageRendererV2 from '@/components/ChatMessageRendererV2.vue'
import { ChatMessageApi } from '@/api/chat'
import { streamChat } from '@/utils/streamChat'
const props = defineProps({
visible: {
type: Boolean,
default: false,
},
mergedText: {
type: String,
default: '',
},
textCount: {
type: Number,
default: 0,
},
const props = withDefaults(defineProps<{
visible: boolean
mergedText: string
textCount: number
}>(), {
visible: false,
mergedText: '',
textCount: 0,
})
const emit = defineEmits(['update:visible', 'copy', 'save', 'use'])
const emit = defineEmits<{
'update:visible': [value: boolean]
'copy': [text: string]
'save': [text: string]
'use': [text: string]
}>()
const batchPrompt = ref('')
const batchPromptEditMode = ref(false)
const batchPromptGenerating = ref(false)
const hasGenerated = ref(false)
function resetModal() {
batchPrompt.value = ''
batchPromptEditMode.value = false
batchPromptGenerating.value = false
hasGenerated.value = false
}
watch(() => props.visible, (newVal) => {
if (newVal && props.mergedText && !hasGenerated.value) {
generateBatchPrompt()
} else if (!newVal) {
batchPrompt.value = ''
batchPromptEditMode.value = false
batchPromptGenerating.value = false
hasGenerated.value = false
resetModal()
}
})
@@ -47,47 +50,61 @@ watch(() => props.mergedText, (newVal) => {
async function generateBatchPrompt() {
if (!props.mergedText || hasGenerated.value) return
hasGenerated.value = true
try {
batchPromptGenerating.value = true
const createPayload = { roleId: 20 }
console.debug('createChatConversationMy payload(batch):', createPayload)
const conversationResp = await ChatMessageApi.createChatConversationMy(createPayload)
let conversationId = null
if (conversationResp?.data) {
conversationId = typeof conversationResp.data === 'object' ? conversationResp.data.id : conversationResp.data
}
if (!conversationId) {
throw new Error('创建对话失败:未获取到 conversationId')
}
const conversationId = await createConversation()
const aiContent = await streamChat({
conversationId,
content: props.mergedText,
onUpdate: (fullText) => {
onUpdate: (fullText: string) => {
batchPrompt.value = fullText
},
enableTypewriter: true,
typewriterSpeed: 10,
typewriterBatchSize: 2
typewriterBatchSize: 2,
onComplete: () => {},
onError: (error: Error) => {
console.error('流式聊天错误:', error)
},
enableContext: false,
enableWebSearch: false,
timeout: 180000,
attachmentUrls: []
})
if (aiContent && aiContent !== batchPrompt.value) {
batchPrompt.value = aiContent
}
message.success(`批量分析完成:已基于 ${props.textCount} 个视频的文案生成综合提示词`)
} catch (aiError) {
console.error('AI生成失败:', aiError)
} catch (error) {
console.error('AI生成失败:', error)
message.error('AI生成失败请稍后重试')
} finally {
batchPromptGenerating.value = false
hasGenerated.value = false
}
}
async function createConversation() {
const createPayload = { roleId: 20 }
const conversationResp = await ChatMessageApi.createChatConversationMy(createPayload)
const conversationId = conversationResp?.data
? (typeof conversationResp.data === 'object' ? conversationResp.data.id : conversationResp.data)
: null
if (!conversationId) {
throw new Error('创建对话失败:未获取到 conversationId')
}
return conversationId
}
function handleClose() {
emit('update:visible', false)
}
@@ -112,131 +129,86 @@ function handleUse() {
:width="800"
:maskClosable="false"
:keyboard="false"
@cancel="handleClose">
@cancel="handleClose"
>
<div class="batch-prompt-modal">
<!-- 内容显示模式 -->
<div v-if="!batchPromptEditMode" class="batch-prompt-display">
<ChatMessageRenderer
<ChatMessageRendererV2
:content="batchPrompt"
:is-streaming="batchPromptGenerating"
/>
</div>
<a-textarea
<!-- 编辑模式 -->
<a-textarea
v-else
v-model:value="batchPrompt"
:rows="15"
placeholder="内容将在这里显示..." />
v-model:value="batchPrompt"
:rows="15"
placeholder="内容将在这里显示..."
/>
</div>
<template #footer>
<a-space>
<a-button size="small" :title="batchPromptEditMode ? '取消编辑' : '编辑'" @click="batchPromptEditMode = !batchPromptEditMode">
<template #icon>
<EditOutlined />
</template>
</a-button>
<a-button size="small" title="复制" @click="handleCopy">
<template #icon>
<CopyOutlined />
</template>
</a-button>
<a-button size="small" title="保存提示词" @click="handleSave" :disabled="!batchPrompt.trim()">
保存提示词
</a-button>
<a-button @click="handleClose">取消</a-button>
<a-button
type="primary"
:disabled="batchPromptGenerating || !batchPrompt.trim()"
@click="handleUse">去创作</a-button>
</a-space>
<div class="footer-actions">
<div class="left-actions">
<a-button type="text" @click="batchPromptEditMode = !batchPromptEditMode">
{{ batchPromptEditMode ? '取消编辑' : '编辑' }}
</a-button>
<a-button type="text" @click="handleCopy">复制</a-button>
<a-button
type="text"
@click="handleSave"
:disabled="!batchPrompt.trim()"
>
保存提示词
</a-button>
</div>
<div class="right-actions">
<a-button @click="handleClose">取消</a-button>
<a-button
type="primary"
:disabled="batchPromptGenerating || !batchPrompt.trim()"
@click="handleUse"
>
去创作
</a-button>
</div>
</div>
</template>
</a-modal>
</template>
<style scoped>
<style scoped lang="less">
.batch-prompt-modal {
min-height: 200px;
.batch-prompt-display {
min-height: 300px;
max-height: 500px;
overflow-y: auto;
padding: 24px;
border: 1px solid var(--color-border);
border-radius: 8px;
background: var(--color-surface);
}
}
.batch-prompt-display {
min-height: 300px;
max-height: 500px;
overflow-y: auto;
padding: 12px;
background: #0d0d0d;
border: 1px solid var(--color-border);
border-radius: 6px;
line-height: 1.6;
}
.footer-actions {
display: flex;
justify-content: space-between;
align-items: center;
width: 100%;
.batch-prompt-display :deep(h1) {
font-size: 18px;
font-weight: 600;
margin: 12px 0;
color: var(--color-text);
}
.left-actions {
display: flex;
gap: 8px;
}
.batch-prompt-display :deep(h2) {
font-size: 16px;
font-weight: 600;
margin: 16px 0 8px 0;
color: var(--color-text);
}
.batch-prompt-display :deep(h3) {
font-size: 14px;
font-weight: 600;
margin: 12px 0 6px 0;
color: var(--color-text-secondary);
}
.batch-prompt-display :deep(p) {
margin: 8px 0;
color: var(--color-text-secondary);
}
.batch-prompt-display :deep(ul),
.batch-prompt-display :deep(ol) {
margin: 8px 0;
padding-left: 20px;
}
.batch-prompt-display :deep(li) {
margin: 4px 0;
color: var(--color-text-secondary);
}
.batch-prompt-display :deep(strong) {
font-weight: 600;
color: var(--color-text);
}
.batch-prompt-display :deep(code) {
background: #1a1a1a;
padding: 2px 6px;
border-radius: 4px;
font-family: 'Courier New', monospace;
font-size: 12px;
color: #e11d48;
}
.batch-prompt-display :deep(pre) {
background: #1a1a1a;
padding: 12px;
border-radius: 6px;
overflow-x: auto;
margin: 8px 0;
}
.batch-prompt-display :deep(pre code) {
background: transparent;
padding: 0;
}
.batch-prompt-display :deep(blockquote) {
border-left: 3px solid var(--color-primary);
padding-left: 12px;
margin: 8px 0;
color: var(--color-text-secondary);
.right-actions {
display: flex;
gap: 8px;
}
}
</style>

View File

@@ -1,6 +1,6 @@
<script setup>
import { CopyOutlined, SaveOutlined } from '@ant-design/icons-vue'
import ChatMessageRenderer from '@/components/ChatMessageRenderer.vue'
import ChatMessageRendererV2 from '@/components/ChatMessageRendererV2.vue'
const props = defineProps({
record: {
@@ -26,7 +26,6 @@ function handleCreateContent() {
<template>
<div class="expanded-content">
<!-- 未分析的行显示提示 -->
<div v-if="!record.transcriptions && !record.prompt" class="no-analysis-tip">
<a-empty description="该视频尚未分析">
<template #image>
@@ -42,10 +41,8 @@ function handleCreateContent() {
</a-button>
</a-empty>
</div>
<!-- 已分析的行显示内容 -->
<div v-else class="two-col">
<!-- 左侧原配音内容 -->
<section class="col left-col">
<div class="sub-title">原配音</div>
<div class="transcript-box" v-if="record.transcriptions">
@@ -54,44 +51,43 @@ function handleCreateContent() {
<div v-else class="no-transcript">暂无转写文本请先点击"分析"获取</div>
</section>
<!-- 右侧提示词 -->
<section class="col right-col">
<div class="sub-title">提示词</div>
<div class="prompt-display-wrapper">
<ChatMessageRenderer
<ChatMessageRendererV2
:content="record.prompt || ''"
:is-streaming="record._analyzing || false"
/>
<div v-if="!record.prompt" class="no-prompt">暂无提示词</div>
</div>
<div class="right-actions">
<a-space>
<a-button
size="small"
type="text"
<a-button
size="small"
type="text"
class="copy-btn"
:title="'复制'"
title="复制"
@click="handleCopy">
<template #icon>
<CopyOutlined />
</template>
</a-button>
<a-button
<a-button
v-if="record.prompt"
size="small"
type="text"
size="small"
type="text"
class="save-server-btn"
:title="'保存'"
title="保存"
@click="handleSaveToServer">
<template #icon>
<SaveOutlined />
</template>
保存
</a-button>
<a-button
type="dashed"
<a-button
type="dashed"
:disabled="!record.prompt || record._analyzing"
@click="handleCreateContent">基于提示词去创作</a-button>
</a-space>
@@ -177,7 +173,6 @@ function handleCreateContent() {
opacity: 0.8;
}
.no-analysis-tip {
padding: var(--space-8) var(--space-5);
text-align: center;

View File

@@ -12,43 +12,35 @@ export function useBenchmarkAnalysis(data, expandedRowKeys, saveTableDataToSessi
const globalLoadingText = ref('')
const { getVoiceText } = useVoiceText()
/**
* 分析单个视频,获取提示词
*/
async function analyzeVideo(row) {
try {
if (row._analyzing) return
row._analyzing = true
// 1) 获取音频转写
message.info('正在获取音频转写...')
const transcriptions = await getVoiceText([row])
row.transcriptions = transcriptions.find(item => item.audio_url === row.audio_url)?.value
// 2) 检查是否有语音文案
if (!row.transcriptions || !row.transcriptions.trim()) {
if (!row.transcriptions?.trim()) {
message.warning('未提取到语音内容,请检查音频文件或稍后重试')
row._analyzing = false
return false
}
// 3) 创建对话
message.info('正在创建对话...')
const createPayload = { roleId: 20, role_id: 20 }
console.debug('createChatConversationMy payload:', createPayload)
const conversationResp = await ChatMessageApi.createChatConversationMy(createPayload)
let conversationId = null
if (conversationResp?.data) {
conversationId = typeof conversationResp.data === 'object' ? conversationResp.data.id : conversationResp.data
}
const conversationId = conversationResp?.data
? (typeof conversationResp.data === 'object' ? conversationResp.data.id : conversationResp.data)
: null
if (!conversationId) {
throw new Error('创建对话失败:未获取到 conversationId')
}
// 4) 基于转写构建提示,流式生成并实时写入 UI
message.info('正在生成提示词...')
const content = buildPromptFromTranscription(row.transcriptions)
const index = data.value.findIndex(item => item.id === row.id)
@@ -63,19 +55,16 @@ export function useBenchmarkAnalysis(data, expandedRowKeys, saveTableDataToSessi
typewriterBatchSize: 2
})
// 5) 兜底处理
const finalPrompt = aiContent || row.transcriptions || ''
if (index !== -1) data.value[index].prompt = finalPrompt
// 6) 分析完成后自动展开该行
const rowId = String(row.id) // 确保类型一致
const rowId = String(row.id)
if (!expandedRowKeys.value.includes(rowId)) {
expandedRowKeys.value.push(rowId)
}
// 7) 保存数据到 session
await saveTableDataToSession()
message.success('分析完成')
return true
} catch (error) {
@@ -87,11 +76,8 @@ export function useBenchmarkAnalysis(data, expandedRowKeys, saveTableDataToSessi
}
}
/**
* 批量分析选中的视频
*/
async function batchAnalyze(selectedRowKeys, onBatchComplete) {
if (selectedRowKeys.value.length === 0) {
if (!selectedRowKeys.value.length) {
message.warning('请先选择要分析的视频')
return
}
@@ -101,26 +87,23 @@ export function useBenchmarkAnalysis(data, expandedRowKeys, saveTableDataToSessi
globalLoadingText.value = `正在批量分析 ${selectedRowKeys.value.length} 个视频...`
try {
// 1. 获取所有选中视频的语音转写
globalLoadingText.value = '正在获取中...'
const selectedRows = data.value.filter(item => selectedRowKeys.value.includes(item.id))
const transcriptions = await getVoiceText(selectedRows)
// 2. 收集所有转写内容
const allTexts = []
for (const id of selectedRowKeys.value) {
const row = data.value.find(item => item.id === id)
if (row && row.audio_url) {
if (row?.audio_url) {
const transcription = transcriptions.find(item => item.audio_url === row.audio_url)
if (transcription && transcription.value && transcription.value.trim()) {
if (transcription?.value?.trim()) {
allTexts.push({ id: row.id, url: row.audio_url, text: transcription.value })
row.transcriptions = transcription.value
}
}
}
// 3. 检查是否有可用的语音内容
if (allTexts.length === 0) {
if (!allTexts.length) {
message.warning('未提取到任何语音内容,请检查音频文件或稍后重试')
batchAnalyzeLoading.value = false
globalLoading.value = false
@@ -130,8 +113,7 @@ export function useBenchmarkAnalysis(data, expandedRowKeys, saveTableDataToSessi
await saveTableDataToSession()
const mergedText = allTexts.map(item => item.text).join('\n\n---\n\n')
// 4. 通知父组件打开弹窗并开始生成
if (onBatchComplete) {
await onBatchComplete(mergedText, allTexts.length)
}

View File

@@ -9,12 +9,8 @@ export function useBenchmarkData() {
const selectedRowKeys = ref([])
const expandedRowKeys = ref([])
/**
* 保存表格数据到 session
*/
async function saveTableDataToSession() {
try {
// 过滤掉不需要持久化的临时字段(如 _analyzing
const persistData = (data.value || []).map((item) => {
const rest = { ...item }
delete rest._analyzing
@@ -26,14 +22,10 @@ export function useBenchmarkData() {
}
}
/**
* 从 session 加载表格数据
*/
async function loadTableDataFromSession() {
try {
const savedData = await storage.getJSON(TABLE_DATA_STORAGE_KEY)
if (savedData && Array.isArray(savedData) && savedData.length > 0) {
// 强制恢复临时字段的初始状态
if (savedData?.length) {
data.value = savedData.map((item) => ({ ...item, _analyzing: false }))
console.log('从session加载了表格数据:', savedData.length, '条')
}
@@ -42,9 +34,6 @@ export function useBenchmarkData() {
}
}
/**
* 处理 API 响应数据
*/
function processApiResponse(resp, platform) {
if (platform === '抖音') {
const awemeList = resp?.data?.aweme_list || []
@@ -57,9 +46,6 @@ export function useBenchmarkData() {
}
}
/**
* 清空数据
*/
async function clearData() {
data.value = []
selectedRowKeys.value = []

View File

@@ -13,6 +13,7 @@
"dependencies": {
"@types/node": "^25.0.6",
"axios": "^1.12.2",
"github-markdown-css": "^5.8.1",
"localforage": "^1.10.0",
"unocss": "^66.5.4",
"web-storage-cache": "^1.1.1"