feat: 优化

This commit is contained in:
2026-01-18 18:36:37 +08:00
parent 265ee3a453
commit f5bccf8da4
11 changed files with 1435 additions and 252 deletions

View File

@@ -0,0 +1,345 @@
/**
* AI 桥接服务 - 主入口
* 提供统一的 AI 服务接口,基于现有系统架构
*/
// 导入核心类和类型
export { StreamAdapter, createStreamAdapter, DEFAULT_STREAM_CONFIG } from './stream-adapter'
export * from './type-definitions'
// 导入现有聊天 API
import { ChatMessageApi } from '@/api/chat'
// 导入适配器类
import { StreamAdapter } from './stream-adapter'
// ========================================
// 配置和常量
// ========================================
/**
* AI 服务配置
*/
const AI_SERVICE_CONFIG = {
// 默认配置
defaults: {
model: 'default',
temperature: 0.7,
maxTokens: 2000,
apiUrl: '/admin-api'
},
// 功能开关
features: {
useCustomAI: import.meta.env.VITE_USE_CUSTOM_AI === 'true',
useBridgeService: !!import.meta.env.VITE_AI_BRIDGE_URL,
enableStreaming: true
}
}
// ========================================
// 核心服务类
// ========================================
/**
* AI 聊天服务类
* 基于现有系统,提供流式渲染优化
*/
export class AIChatService {
private streamAdapter: StreamAdapter
private performanceMonitor: PerformanceMonitor
constructor() {
this.streamAdapter = new StreamAdapter()
this.performanceMonitor = new PerformanceMonitor()
}
/**
* 发送流式聊天消息
*/
async sendStreamMessage(options: {
conversationId?: string
content: string
roleId?: number
useContext?: boolean
useSearch?: boolean
attachmentUrls?: string[]
onUpdate?: (content: string) => void
onComplete?: (fullContent: string) => void
onError?: (error: Error) => void
}) {
const {
conversationId,
content,
roleId = 20,
useContext = true,
useSearch = false,
attachmentUrls = [],
onUpdate,
onComplete,
onError
} = options
try {
this.performanceMonitor.track('chat-start')
// 使用现有的流式 API
return await this.sendLegacyStreamMessage({
conversationId,
content,
roleId,
useContext,
useSearch,
attachmentUrls,
onUpdate,
onComplete,
onError
})
} catch (error) {
this.performanceMonitor.track('chat-error')
if (onError) {
onError(error as Error)
}
throw error
}
}
/**
* 使用现有系统发送流式消息
*/
private async sendLegacyStreamMessage(options: {
conversationId?: string
content: string
roleId?: number
useContext?: boolean
useSearch?: boolean
attachmentUrls?: string[]
onUpdate?: (content: string) => void
onComplete?: (fullContent: string) => void
onError?: (error: Error) => void
}) {
const {
conversationId,
content,
roleId,
useContext,
useSearch,
attachmentUrls,
onUpdate,
onComplete,
onError
} = options
// 如果没有 conversationId先创建一个
let currentConversationId = conversationId
if (!currentConversationId) {
const createResp = await ChatMessageApi.createChatConversationMy({
roleId: roleId || 20
})
currentConversationId = typeof createResp.data === 'object'
? createResp.data.id
: createResp.data
}
if (!currentConversationId) {
throw new Error('创建对话失败')
}
// 使用现有的流式 API
return await ChatMessageApi.sendChatMessageStream({
conversationId: currentConversationId,
content,
useContext,
useSearch,
attachmentUrls,
onMessage: (event: any) => {
try {
const dataStr = event?.data || ''
if (!dataStr) return
const { code, data: responseData, msg } = JSON.parse(dataStr)
if (code !== 0) {
console.warn('[AI Service] 对话异常:', msg)
return
}
// 提取 AI 回复内容
const piece = responseData?.receive?.content || ''
if (piece && onUpdate) {
onUpdate(piece)
}
} catch (e) {
console.warn('[AI Service] 解析流数据异常:', e)
}
},
onError: (err: any) => {
console.error('[AI Service] 流式请求错误:', err)
if (onError) {
onError(err)
}
},
onClose: () => {
console.log('[AI Service] 流式请求完成')
if (onComplete) {
onComplete('完成')
}
}
})
}
/**
* 获取性能指标
*/
getPerformanceMetrics() {
return this.performanceMonitor.getMetrics()
}
/**
* 重置服务状态
*/
reset() {
this.streamAdapter.reset()
this.performanceMonitor.reset()
}
}
/**
* 性能监控类
*/
class PerformanceMonitor {
private metrics = {
chatCount: 0,
errorCount: 0,
totalLatency: 0,
averageLatency: 0,
startTime: Date.now()
}
track(event: string) {
if (event === 'chat-start') {
this.metrics.chatCount++
} else if (event === 'chat-error') {
this.metrics.errorCount++
}
}
trackLatency(latency: number) {
this.metrics.totalLatency += latency
this.metrics.averageLatency = this.metrics.totalLatency / this.metrics.chatCount
}
getMetrics() {
return {
...this.metrics,
uptime: Date.now() - this.metrics.startTime,
errorRate: this.metrics.chatCount > 0
? this.metrics.errorCount / this.metrics.chatCount
: 0
}
}
reset() {
this.metrics = {
chatCount: 0,
errorCount: 0,
totalLatency: 0,
averageLatency: 0,
startTime: Date.now()
}
}
}
// ========================================
// 工具函数
// ========================================
/**
* 创建 AI 聊天服务实例
*/
export function createAIChatService(): AIChatService {
return new AIChatService()
}
/**
* 检查 AI 服务是否可用
*/
export function isAIServiceEnabled(): boolean {
return AI_SERVICE_CONFIG.features.enableStreaming
}
/**
* 获取 AI 服务状态
*/
export function getAIServiceStatus() {
return {
customAIEnabled: AI_SERVICE_CONFIG.features.useCustomAI,
bridgeServiceAvailable: AI_SERVICE_CONFIG.features.useBridgeService,
streamingEnabled: AI_SERVICE_CONFIG.features.enableStreaming,
defaultModel: AI_SERVICE_CONFIG.defaults.model,
uptime: Date.now() - (globalThis as any).__AI_SERVICE_START_TIME || Date.now()
}
}
// ========================================
// Vue 组合式函数
// ========================================
/**
* Vue 组合式函数:使用 AI 聊天服务
*/
export function useAIChat() {
const service = createAIChatService()
return {
service,
isEnabled: isAIServiceEnabled(),
status: getAIServiceStatus()
}
}
/**
* Vue 组合式函数:流式消息处理
*/
export function useStreamMessage() {
const { service, isEnabled } = useAIChat()
const sendMessage = async (options: {
conversationId?: string
content: string
roleId?: number
onUpdate?: (content: string) => void
onComplete?: (fullContent: string) => void
onError?: (error: Error) => void
}) => {
return await service.sendStreamMessage(options)
}
return {
sendMessage,
isEnabled,
service
}
}
// ========================================
// 全局实例
// ========================================
// 创建全局 AI 服务实例
export const aiChatService = createAIChatService()
// 设置启动时间
;(globalThis as any).__AI_SERVICE_START_TIME = Date.now()
// ========================================
// 导出默认实例和工具函数
// ========================================
export default {
service: aiChatService,
createService: createAIChatService,
isEnabled: isAIServiceEnabled,
status: getAIServiceStatus,
useChat: useAIChat,
useStreamMessage
}

View File

@@ -0,0 +1,285 @@
/**
* AI 桥接服务 - 流式适配器
* 负责 SSE 到 AI SDK 协议转换,保持与现有系统的兼容性
*/
import type { ReadableStream } from 'stream/web'
// SSE 事件类型
interface SSEEvent {
data: string
event?: string
id?: string
retry?: number
}
// 流式数据处理配置
interface StreamAdapterConfig {
enableVisibilityOptimization?: boolean
enablePerformanceTracking?: boolean
bufferSize?: number
}
/**
* 流式适配器类
* 将 SSE 响应转换为 AI SDK 兼容的格式
*/
export class StreamAdapter {
private buffer: string = ''
private isVisible: boolean = true
private config: StreamAdapterConfig
private performanceMetrics: {
chunkCount: number
totalBytes: number
startTime: number
}
constructor(config: StreamAdapterConfig = {}) {
this.config = {
enableVisibilityOptimization: true,
enablePerformanceTracking: true,
bufferSize: 1024,
...config
}
this.performanceMetrics = {
chunkCount: 0,
totalBytes: 0,
startTime: Date.now()
}
// 监听页面可见性变化
if (this.config.enableVisibilityOptimization) {
this.setupVisibilityListener()
}
}
/**
* 设置页面可见性监听器
*/
private setupVisibilityListener(): void {
const handleVisibilityChange = () => {
const wasVisible = this.isVisible
this.isVisible = !document.hidden
if (!wasVisible && this.isVisible) {
console.log('[AI Bridge] 页面重新可见,重新连接流')
} else if (wasVisible && !this.isVisible) {
console.log('[AI Bridge] 页面进入后台,优化性能')
}
}
document.addEventListener('visibilitychange', handleVisibilityChange)
}
/**
* 将 SSE ReadableStream 转换为 AI SDK 可消费的异步迭代器
*/
async *convertSSEResponse(sseStream: ReadableStream): AsyncIterable<string> {
console.log('[AI Bridge] 开始转换 SSE 响应')
const reader = sseStream.getReader()
const decoder = new TextDecoder()
let isFirstChunk = true
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
this.buffer += chunk
this.performanceMetrics.totalBytes += chunk.length
// 提取完整的 SSE 事件(以 \n\n 分隔)
const events = this.buffer.split('\n\n')
this.buffer = events.pop() || '' // 保存不完整的事件
for (const event of events) {
if (event.trim()) {
const parsedEvent = this.parseSSEEvent(event)
if (parsedEvent && this.shouldProcessEvent(parsedEvent)) {
const content = this.extractContent(parsedEvent)
if (content) {
yield content
this.performanceMetrics.chunkCount++
}
}
}
}
// 第一块数据特殊处理
if (isFirstChunk) {
console.log('[AI Bridge] 接收到第一块数据')
isFirstChunk = false
}
}
// 处理缓冲区中剩余的数据
if (this.buffer.trim()) {
const lastEvent = this.parseSSEEvent(this.buffer)
if (lastEvent) {
const content = this.extractContent(lastEvent)
if (content) {
yield content
}
}
}
} catch (error) {
console.error('[AI Bridge] SSE 转换错误:', error)
throw error
} finally {
reader.releaseLock()
this.logPerformanceMetrics()
}
}
/**
* 解析 SSE 事件
*/
private parseSSEEvent(event: string): SSEEvent | null {
const lines = event.split('\n')
const parsedEvent: SSEEvent = {
data: ''
}
for (const line of lines) {
if (line.startsWith('data: ')) {
parsedEvent.data = line.substring(6)
} else if (line.startsWith('event: ')) {
parsedEvent.event = line.substring(7)
} else if (line.startsWith('id: ')) {
parsedEvent.id = line.substring(4)
} else if (line.startsWith('retry: ')) {
parsedEvent.retry = parseInt(line.substring(8), 10)
}
}
return parsedEvent.data ? parsedEvent : null
}
/**
* 判断是否应该处理此事件
*/
private shouldProcessEvent(event: SSEEvent): boolean {
// 页面不可见时跳过非关键事件
if (!this.isVisible && this.config.enableVisibilityOptimization) {
return event.event !== 'ping'
}
return true
}
/**
* 从 SSE 事件中提取内容
*/
private extractContent(event: SSEEvent): string | null {
try {
// 尝试解析 JSON 格式
const data = JSON.parse(event.data)
// 处理不同的响应格式
if (data.content) {
return data.content
} else if (data.text) {
return data.text
} else if (data.message?.content) {
return data.message.content
} else if (data.receive?.content) {
return data.receive.content
} else if (data.delta) {
return data.delta
}
// 如果是纯文本数据
if (typeof data === 'string') {
return data
}
return null
} catch {
// 非 JSON 数据直接返回
return event.data
}
}
/**
* 处理流式增量数据
*/
processStreamingDelta(newFullContent: string, previousContent: string = ''): string {
if (newFullContent.startsWith(previousContent)) {
// 正常情况:增量更新
return newFullContent.slice(previousContent.length)
} else {
// 异常情况:内容乱序,返回完整内容
console.warn('[AI Bridge] 流式内容乱序,使用完整内容')
return newFullContent
}
}
/**
* 获取性能指标
*/
getPerformanceMetrics() {
const duration = Date.now() - this.performanceMetrics.startTime
return {
...this.performanceMetrics,
duration,
averageChunkSize: this.performanceMetrics.chunkCount > 0
? this.performanceMetrics.totalBytes / this.performanceMetrics.chunkCount
: 0,
chunksPerSecond: this.performanceMetrics.chunkCount / (duration / 1000)
}
}
/**
* 记录性能指标
*/
private logPerformanceMetrics(): void {
if (this.config.enablePerformanceTracking) {
const metrics = this.getPerformanceMetrics()
console.log('[AI Bridge] 性能指标:', {
总字节数: metrics.totalBytes,
数据块数: metrics.chunkCount,
: `${metrics.duration}ms`,
: `${Math.round(metrics.averageChunkSize)} bytes`,
: `${metrics.chunksPerSecond.toFixed(2)} chunks/s`
})
}
}
/**
* 重置适配器状态
*/
reset(): void {
this.buffer = ''
this.performanceMetrics = {
chunkCount: 0,
totalBytes: 0,
startTime: Date.now()
}
}
/**
* 清理资源
*/
dispose(): void {
this.buffer = ''
this.reset()
// 移除事件监听器(在实际实现中需要保存引用以便移除)
}
}
/**
* 创建流式适配器实例的工厂函数
*/
export function createStreamAdapter(config?: StreamAdapterConfig): StreamAdapter {
return new StreamAdapter(config)
}
/**
* 默认配置
*/
export const DEFAULT_STREAM_CONFIG: StreamAdapterConfig = {
enableVisibilityOptimization: true,
enablePerformanceTracking: true,
bufferSize: 1024
}

View File

@@ -0,0 +1,497 @@
/**
* AI 桥接服务 - 类型定义系统
* 提供完整的 TypeScript 类型定义,确保端到端类型安全
*/
// ========================================
// 基础类型定义
// ========================================
/**
* 聊天消息基础类型
*/
export interface ChatMessage {
id: string
content: string
role: 'user' | 'assistant' | 'system'
timestamp: Date
metadata?: {
isStreaming?: boolean
tokens?: number
model?: string
provider?: string
}
}
/**
* 流式数据块
*/
export interface StreamChunk {
id: string
content: string
delta?: string
isComplete: boolean
timestamp: Date
}
/**
* SSE 事件格式
*/
export interface SSEEvent {
data: string
event?: string
id?: string
retry?: number
}
// ========================================
// AI SDK 集成类型
// ========================================
/**
* AI 提供商枚举
*/
export type AIProvider = 'openai' | 'anthropic' | 'custom'
/**
* AI 模型配置
*/
export interface AIModelConfig {
provider: AIProvider
model: string
apiKey?: string
baseURL?: string
parameters?: {
temperature?: number
maxTokens?: number
topP?: number
frequencyPenalty?: number
presencePenalty?: number
}
}
/**
* AI SDK 流式配置
*/
export interface AIStreamConfig {
model: AIModelConfig
streamMode: 'text' | 'full' | 'object'
enableStreaming?: boolean
timeout?: number
}
/**
* AI SDK 响应格式
*/
export interface AIResponse {
id: string
type: 'message' | 'tool-call' | 'error' | 'done'
content?: string
toolCalls?: ToolCall[]
error?: {
code: string
message: string
}
usage?: {
promptTokens: number
completionTokens: number
totalTokens: number
}
}
/**
* 工具调用类型
*/
export interface ToolCall {
id: string
type: 'function'
function: {
name: string
arguments: string
description?: string
}
}
// ========================================
// 流式处理类型
// ========================================
/**
* 流式处理器配置
*/
export interface StreamProcessorConfig {
enableVisibilityOptimization: boolean
enablePerformanceTracking: boolean
bufferSize: number
chunkSize: number
enableRetry: boolean
retryAttempts: number
retryDelay: number
}
/**
* 流式处理器状态
*/
export interface StreamProcessorState {
status: 'idle' | 'connecting' | 'streaming' | 'paused' | 'completed' | 'error'
bytesReceived: number
chunksReceived: number
startTime: Date
lastChunkTime: Date
error?: string
}
/**
* 流式事件类型
*/
export type StreamEventType =
| 'stream-start'
| 'stream-chunk'
| 'stream-end'
| 'stream-error'
| 'stream-pause'
| 'stream-resume'
/**
* 流式事件
*/
export interface StreamEvent<T = any> {
type: StreamEventType
data: T
timestamp: Date
requestId?: string
}
// ========================================
// 性能监控类型
// ========================================
/**
* 性能指标
*/
export interface PerformanceMetrics {
// 渲染性能
renderTime: number
firstRenderTime: number
incrementalRenderTime: number
// 流式性能
streamLatency: number
chunkProcessingTime: number
bytesPerSecond: number
// 内存使用
memoryUsage: number
memoryPeak: number
// 错误统计
errorCount: number
retryCount: number
// 成功率
successRate: number
}
/**
* 性能报告
*/
export interface PerformanceReport {
metrics: PerformanceMetrics
timestamp: Date
sessionId: string
duration: number
summary: {
performanceScore: number
issues: string[]
recommendations: string[]
}
}
// ========================================
// 适配器类型
// ========================================
/**
* 适配器接口
*/
export interface StreamAdapterInterface {
convertSSEResponse(sseStream: ReadableStream): AsyncIterable<string>
processStreamingDelta(newContent: string, previousContent?: string): string
getPerformanceMetrics(): any
reset(): void
dispose(): void
}
/**
* 适配器配置
*/
export interface AdapterConfig {
enableVisibilityOptimization: boolean
enablePerformanceTracking: boolean
bufferSize: number
chunkSize: number
}
// ========================================
// 组件 Props 类型
// ========================================
/**
* ChatMessageRenderer 组件 Props
*/
export interface ChatMessageRendererProps {
content: string
isStreaming: boolean
config?: {
enableTypewriter?: boolean
typewriterSpeed?: number
enableMarkdown?: boolean
enableHighlighting?: boolean
}
onStreamStart?: () => void
onStreamChunk?: (chunk: string) => void
onStreamEnd?: (fullContent: string) => void
onError?: (error: Error) => void
}
/**
* ChatMessageRendererV2 组件 Props
*/
export interface ChatMessageRendererV2Props extends ChatMessageRendererProps {
aiConfig?: AIStreamConfig
adapterConfig?: AdapterConfig
enableAIIntegration?: boolean
}
// ========================================
// API 类型
// ========================================
/**
* 聊天请求类型
*/
export interface ChatRequest {
conversationId?: string
content: string
roleId?: number
useContext?: boolean
useSearch?: boolean
attachmentUrls?: string[]
config?: {
enableStreaming?: boolean
enableAI?: boolean
model?: AIModelConfig
}
}
/**
* 聊天响应类型
*/
export interface ChatResponse {
id: string
conversationId: string
send?: {
id: string
content: string
timestamp: Date
}
receive?: {
id: string
content: string
reasoningContent?: string
timestamp: Date
}
metadata?: {
model?: string
provider?: string
tokens?: number
processingTime?: number
}
}
// ========================================
// 配置类型
// ========================================
/**
* 全局 AI 配置
*/
export interface AIGlobalConfig {
enabled: boolean
defaultProvider: AIProvider
defaultModel: string
streamConfig: StreamProcessorConfig
performanceConfig: {
enabled: boolean
samplingRate: number
reportInterval: number
}
featureFlags: {
enableVercelSDK: boolean
enableStreamingV2: boolean
enablePerformanceMonitoring: boolean
enableAdvancedFeatures: boolean
}
}
/**
* 环境变量类型
*/
export interface EnvironmentConfig {
VITE_AI_SDK_ENABLED: string
VITE_AI_BRIDGE_URL: string
VITE_AI_DEFAULT_PROVIDER: AIProvider
VITE_AI_DEFAULT_MODEL: string
}
// ========================================
// 工具类型
// ========================================
/**
* 工具调用结果
*/
export interface ToolResult {
id: string
name: string
result: any
error?: string
duration: number
}
/**
* 工具定义
*/
export interface ToolDefinition {
name: string
description: string
parameters: {
type: 'object'
properties: Record<string, any>
required?: string[]
}
}
// ========================================
// 错误类型
// ========================================
/**
* AI 桥接服务错误
*/
export class AIBridgeError extends Error {
constructor(
message: string,
public code: string,
public cause?: Error,
public context?: Record<string, any>
) {
super(message)
this.name = 'AIBridgeError'
}
}
/**
* 流处理错误
*/
export class StreamProcessingError extends AIBridgeError {
constructor(message: string, public streamId: string, cause?: Error) {
super(message, 'STREAM_PROCESSING_ERROR', cause, { streamId })
this.name = 'StreamProcessingError'
}
}
/**
* 配置错误
*/
export class ConfigError extends AIBridgeError {
constructor(message: string, public configKey: string, public configValue: any) {
super(message, 'CONFIG_ERROR', undefined, { configKey, configValue })
this.name = 'ConfigError'
}
}
// ========================================
// 工具函数类型
// ========================================
/**
* 类型守卫函数
*/
export type TypeGuard<T> = (value: any) => value is T
/**
* 异步处理器
*/
export type AsyncHandler<T = void> = (data?: T) => Promise<void>
/**
* 同步处理器
*/
export type Handler<T = void> = (data?: T) => void
/**
* 事件监听器
*/
export type EventListener<T = any> = (event: StreamEvent<T>) => void
// ========================================
// 导出工具类型
// ========================================
/**
* 从联合类型中排除指定类型
*/
export type ExcludeType<T, U> = T extends U ? never : T
/**
* 必需属性类型
*/
export type RequiredKeys<T> = {
[K in keyof T]-?: {} extends Pick<T, K> ? never : K
}[keyof T]
/**
* 可选属性类型
*/
export type OptionalKeys<T> = {
[K in keyof T]-?: {} extends Pick<T, K> ? K : never
}[keyof T]
// ========================================
// 默认值导出
// ========================================
/**
* 默认 AI 配置
*/
export const DEFAULT_AI_CONFIG: AIModelConfig = {
provider: 'openai',
model: 'gpt-3.5-turbo',
parameters: {
temperature: 0.7,
maxTokens: 2000
}
}
/**
* 默认流处理配置
*/
export const DEFAULT_STREAM_CONFIG: StreamProcessorConfig = {
enableVisibilityOptimization: true,
enablePerformanceTracking: true,
bufferSize: 1024,
chunkSize: 64,
enableRetry: true,
retryAttempts: 3,
retryDelay: 1000
}
/**
* 默认性能配置
*/
export const DEFAULT_PERFORMANCE_CONFIG = {
enabled: true,
samplingRate: 0.1, // 10% 采样率
reportInterval: 5000 // 5秒报告一次
}