feat: 功能

This commit is contained in:
2026-02-04 01:18:16 +08:00
parent f8e40c039d
commit 0e1b6fe643
19 changed files with 1472 additions and 1008 deletions

View File

@@ -105,15 +105,15 @@
</template>
<script setup>
import { ref, reactive, computed, onMounted, nextTick } from 'vue'
import { ref, reactive, computed, onMounted } from 'vue'
import { message, Modal } from 'ant-design-vue'
import { PlusOutlined, SearchOutlined, UploadOutlined, PlayCircleOutlined } from '@ant-design/icons-vue'
import { VoiceService } from '@/api/voice'
import { MaterialService } from '@/api/material'
import { useUpload } from '@/composables/useUpload'
import useVoiceText from '@gold/hooks/web/useVoiceText'
import dayjs from 'dayjs'
import BasicLayout from '@/layouts/components/BasicLayout.vue'
import { MaterialService } from '@/api/material'
import { VoiceService } from '@/api/voice'
import { useUpload } from '@/composables/useUpload'
import useVoiceText from '@gold/hooks/web/useVoiceText'
// ========== 常量 ==========
@@ -125,10 +125,14 @@ const DEFAULT_FORM_DATA = {
language: 'zh-CN',
gender: 'female',
note: '',
text: '', // 音频文本
fileUrl: '' // 文件URL用于获取音频文本
text: '',
fileUrl: ''
}
const MAX_FILE_SIZE = 50 * 1024 * 1024
const VALID_AUDIO_TYPES = ['audio/mpeg', 'audio/wav', 'audio/wave', 'audio/x-wav', 'audio/aac', 'audio/mp4', 'audio/flac', 'audio/ogg']
const VALID_AUDIO_EXTENSIONS = ['.mp3', '.wav', '.aac', '.m4a', '.flac', '.ogg']
// ========== 响应式数据 ==========
const loading = ref(false)
const submitting = ref(false)
@@ -216,31 +220,31 @@ const loadVoiceList = async () => {
}
// ========== 搜索和分页 ==========
const handleSearch = () => {
function handleSearch() {
pagination.current = 1
loadVoiceList()
}
const handleReset = () => {
function handleReset() {
searchParams.name = ''
pagination.current = 1
loadVoiceList()
}
const handleTableChange = (pag) => {
function handleTableChange(pag) {
pagination.current = pag.current
pagination.pageSize = pag.pageSize
loadVoiceList()
}
// ========== CRUD 操作 ==========
const handleCreate = () => {
function handleCreate() {
formMode.value = 'create'
resetForm()
modalVisible.value = true
}
const handleEdit = async (record) => {
async function handleEdit(record) {
formMode.value = 'edit'
try {
const res = await VoiceService.get(record.id)
@@ -252,13 +256,13 @@ const handleEdit = async (record) => {
modalVisible.value = true
}
const handleDelete = (record) => {
function handleDelete(record) {
Modal.confirm({
title: '确认删除',
content: `确定要删除配音「${record.name}」吗?此操作不可恢复。`,
okButtonProps: { danger: true },
centered: true,
onOk: async () => {
onOk: async function() {
try {
const res = await VoiceService.delete(record.id)
if (res.code !== 0) return message.error(res.msg || '删除失败')
@@ -274,7 +278,7 @@ const handleDelete = (record) => {
}
// ========== 音频播放 ==========
const handlePlayAudio = (record) => {
function handlePlayAudio(record) {
if (record.fileUrl && audioPlayer.value) {
audioPlayer.value.src = record.fileUrl
audioPlayer.value.play()
@@ -284,20 +288,20 @@ const handlePlayAudio = (record) => {
}
// ========== 文件上传 ==========
const handleBeforeUpload = (file) => {
const MAX_FILE_SIZE = 50 * 1024 * 1024
function handleBeforeUpload(file) {
if (file.size > MAX_FILE_SIZE) {
message.error('文件大小不能超过 50MB')
return false
}
const validTypes = ['audio/mpeg', 'audio/wav', 'audio/wave', 'audio/x-wav', 'audio/aac', 'audio/mp4', 'audio/flac', 'audio/ogg']
const validExtensions = ['.mp3', '.wav', '.aac', '.m4a', '.flac', '.ogg']
const fileName = file.name.toLowerCase()
const fileType = file.type.toLowerCase()
const isValidType = validTypes.some(type => fileType.includes(type)) ||
validExtensions.some(ext => fileName.endsWith(ext))
const isValidType = VALID_AUDIO_TYPES.some(function(type) {
return fileType.includes(type)
}) || VALID_AUDIO_EXTENSIONS.some(function(ext) {
return fileName.endsWith(ext)
})
if (!isValidType) {
message.error('请上传音频文件MP3、WAV、AAC、M4A、FLAC、OGG')
@@ -307,7 +311,8 @@ const handleBeforeUpload = (file) => {
return true
}
const handleCustomUpload = async (options) => {
// ========== 文件上传相关 ==========
async function handleCustomUpload(options) {
const { file, onSuccess, onError } = options
try {
@@ -315,17 +320,14 @@ const handleCustomUpload = async (options) => {
fileCategory: 'voice',
groupId: null,
coverBase64: null,
onStart: () => {},
onProgress: () => {},
onSuccess: async (id, fileUrl) => {
onSuccess: async function(id, fileUrl) {
formData.fileId = id
formData.fileUrl = fileUrl // 保存文件URL
formData.fileUrl = fileUrl
message.success('文件上传成功')
// 通过fileId获取播放URL用于语音识别
await fetchAudioTextById(id)
onSuccess?.({ code: 0, data: id }, file)
},
onError: (error) => {
onError: function(error) {
const errorMsg = error.message || '上传失败,请稍后重试'
message.error(errorMsg)
onError?.(error)
@@ -339,13 +341,10 @@ const handleCustomUpload = async (options) => {
}
}
// 通过fileId获取音频文本
const fetchAudioTextById = async (fileId) => {
async function fetchAudioTextById(fileId) {
if (!fileId) return
try {
// 获取音频播放URL
const res = await MaterialService.getAudioPlayUrl(fileId)
if (res.code === 0 && res.data) {
const rawFileUrl = res.data
@@ -363,39 +362,22 @@ const fetchAudioTextById = async (fileId) => {
}
}
// 获取音频文本
const fetchAudioText = async (fileUrl) => {
if (!fileUrl) return
try {
// 阿里云语音识别服务无法访问预签名URL使用原始URL
const rawFileUrl = extractRawUrl(fileUrl)
const results = await getVoiceText([{ audio_url: rawFileUrl }])
if (results && results.length > 0) {
const text = results[0].value
formData.text = text
if (text) {
message.success('音频文本获取成功')
}
}
} catch (error) {
console.error('获取音频文本失败:', error)
}
}
const handleFileListChange = (info) => {
function handleFileListChange(info) {
const { fileList: newFileList } = info
if (newFileList) {
fileList.value = newFileList.filter(item => item.status !== 'removed')
fileList.value = newFileList.filter(function(item) {
return item.status !== 'removed'
})
}
}
const handleRemoveFile = () => {
function handleRemoveFile() {
formData.fileId = null
fileList.value = []
}
// ========== 表单操作 ==========
const handleSubmit = async () => {
async function handleSubmit() {
try {
await formRef.value.validate()
} catch {
@@ -412,7 +394,7 @@ const handleSubmit = async () => {
language: formData.language,
gender: formData.gender,
note: formData.note,
text: formData.text // 传入音频文本
text: formData.text
}
: {
id: formData.id,
@@ -443,19 +425,19 @@ const handleSubmit = async () => {
}
}
const handleCancel = () => {
function handleCancel() {
modalVisible.value = false
resetForm()
}
const resetForm = () => {
function resetForm() {
Object.assign(formData, { ...DEFAULT_FORM_DATA })
fileList.value = []
formRef.value?.resetFields()
}
// ========== 生命周期 ==========
onMounted(() => {
onMounted(function() {
loadVoiceList()
})
</script>

View File

@@ -15,10 +15,6 @@
:show-count="true"
class="tts-textarea"
/>
<div v-if="identifyState.identified && faceDuration > 0" class="text-hint">
<span class="hint-icon">💡</span>
<span>视频中人脸出现时长约 {{ (faceDuration / 1000).toFixed(1) }} 建议文案不超过 {{ suggestedMaxChars }} </span>
</div>
</div>
<!-- 音色选择 -->
@@ -141,58 +137,6 @@
</div>
</div>
<!-- 素材校验结果 -->
<div v-if="materialValidation.videoDuration > 0 && materialValidation.audioDuration > 0" class="section">
<h3>素材校验</h3>
<div class="validation-result" :class="{ 'validation-passed': materialValidation.isValid, 'validation-failed': !materialValidation.isValid }">
<div class="validation-status">
<span class="status-icon">{{ materialValidation.isValid ? '✅' : '❌' }}</span>
<span class="status-text">{{ materialValidation.isValid ? '校验通过' : '校验失败' }}</span>
</div>
<!-- 时长对比进度条 -->
<div class="duration-comparison">
<div class="duration-bar">
<div class="duration-label">
<span>音频时长</span>
<span class="duration-value">{{ (materialValidation.audioDuration / 1000).toFixed(1) }}s</span>
</div>
<div class="progress-bar audio-bar">
<div
class="progress-fill"
:style="{ width: `${(materialValidation.audioDuration / Math.max(materialValidation.videoDuration, materialValidation.audioDuration)) * 100}%` }"
></div>
</div>
</div>
<div class="duration-bar">
<div class="duration-label">
<span>视频时长</span>
<span class="duration-value">{{ (materialValidation.videoDuration / 1000).toFixed(1) }}s</span>
</div>
<div class="progress-bar video-bar">
<div
class="progress-fill"
:class="{ 'success': materialValidation.isValid, 'error': !materialValidation.isValid }"
:style="{ width: `${(materialValidation.videoDuration / Math.max(materialValidation.videoDuration, materialValidation.audioDuration)) * 100}%` }"
></div>
</div>
</div>
</div>
<!-- 失败提示和建议 -->
<div v-if="!materialValidation.isValid" class="validation-error">
<p class="error-message">
视频时长必须大于音频时长才能生成数字人视频
</p>
<div class="quick-actions">
<a-button size="small" @click="replaceVideo">更换视频</a-button>
<a-button size="small" @click="handleSimplifyScript">精简文案</a-button>
</div>
</div>
</div>
</div>
<!-- 配音生成与校验仅在识别后显示 -->
<div v-if="identifyState.identified" class="section audio-generation-section">
<h3>配音生成与校验</h3>
@@ -211,73 +155,67 @@
</a-button>
</div>
<!-- 音频预览生成后显示 -->
<!-- 音频预览 -->
<div v-if="audioState.generated" class="audio-preview">
<div class="audio-info">
<h4>生成的配音</h4>
<div class="duration-info">
<span class="label">音频时长</span>
<span class="value">{{ (audioState.durationMs / 1000).toFixed(1) }} </span>
<span class="value">{{ audioDurationSec }} </span>
</div>
<div class="duration-info">
<span class="label">人脸区间</span>
<span class="value">{{ (faceDuration / 1000).toFixed(1) }} </span>
<span class="value">{{ faceDurationSec }} </span>
</div>
<div class="duration-info" :class="{ 'validation-passed': audioState.validationPassed, 'validation-failed': !audioState.validationPassed }">
<div class="duration-info" :class="{ 'validation-passed': validationPassed, 'validation-failed': !validationPassed }">
<span class="label">校验结果</span>
<span class="value">
{{ audioState.validationPassed ? '✅ 通过' : '❌ 不通过(需至少2秒重合' }}
{{ validationPassed ? '✅ 通过' : '❌ 不通过(音频时长不能超过人脸时长' }}
</span>
</div>
</div>
<!-- 音频播放器 -->
<div class="audio-player">
<audio
v-if="audioState.generated.audioBase64"
:src="`data:audio/mp3;base64,${audioState.generated.audioBase64}`"
controls
class="audio-element"
/>
<audio
v-else-if="audioState.generated.audioUrl"
:src="audioState.generated.audioUrl"
controls
class="audio-element"
/>
<div v-if="audioUrl" class="audio-player">
<audio :src="audioUrl" controls class="audio-element" />
</div>
<!-- 重新生成按钮 -->
<div class="regenerate-row">
<a-button
type="link"
size="small"
@click="generateAudio"
:loading="audioState.generating"
>
<a-button type="link" size="small" @click="generateAudio" :loading="audioState.generating">
重新生成
</a-button>
</div>
</div>
</div>
<!-- Pipeline 进度条 -->
<PipelineProgress
v-if="isPipelineBusy || isPipelineReady || isPipelineFailed || isPipelineCompleted"
:state="pipelineState"
:progress="pipelineProgress"
:is-busy="isPipelineBusy"
:is-ready="isPipelineReady"
:is-failed="isPipelineFailed"
:is-completed="isPipelineCompleted"
:error="pipelineError"
@retry="retryPipeline"
@reset="resetPipeline"
/>
<!-- 按钮组 -->
<div class="action-buttons">
<a-button
type="primary"
size="large"
:disabled="!canGenerate"
:loading="isPipelineBusy"
block
@click="generateDigitalHuman"
>
生成数字人视频
{{ isPipelineBusy ? '处理中...' : '生成数字人视频' }}
</a-button>
<!-- 添加提示信息 -->
<div v-if="canGenerate && !audioState.validationPassed" class="generate-hint">
<span class="hint-icon"></span>
<span>请先生成配音并通过时长校验</span>
</div>
</div>
</div>
@@ -299,6 +237,7 @@ import VideoSelector from '@/components/VideoSelector.vue'
import VoiceSelector from '@/components/VoiceSelector.vue'
import ResultPanel from '@/components/ResultPanel.vue'
import FullWidthLayout from '@/layouts/components/FullWidthLayout.vue'
import PipelineProgress from '@/components/PipelineProgress.vue'
// Controller Hook
import { useIdentifyFaceController } from './hooks/useIdentifyFaceController'
@@ -311,6 +250,7 @@ const dragOver = ref(false)
// Controller 内部直接创建和管理两个子 Hook
const controller = useIdentifyFaceController()
// 解构 controller 以简化模板调用
const {
// 语音生成相关
@@ -318,14 +258,11 @@ const {
speechRate,
audioState,
canGenerateAudio,
suggestedMaxChars,
generateAudio,
// 数字人生成相关
videoState,
identifyState,
materialValidation,
faceDuration,
getVideoPreviewUrl,
// 计算属性
@@ -334,6 +271,21 @@ const {
textareaPlaceholder,
speechRateMarks,
speechRateDisplay,
faceDurationSec,
audioDurationSec,
audioUrl,
validationPassed,
// Pipeline 状态
pipelineState,
isPipelineBusy,
isPipelineReady,
isPipelineFailed,
isPipelineCompleted,
pipelineProgress,
pipelineError,
retryPipeline,
resetPipeline,
// 事件处理方法
handleVoiceSelect,
@@ -343,7 +295,6 @@ const {
handleSelectUpload,
handleSelectFromLibrary,
handleVideoSelect,
handleSimplifyScript,
handleVideoLoaded,
replaceVideo,
generateDigitalHuman,

View File

@@ -0,0 +1,124 @@
/**
* @fileoverview 状态机配置 - 状态定义和配置
*/
import type { PipelineState, StateConfig } from './types'
/**
* 状态配置映射表
*/
export const STATE_CONFIG: Record<PipelineState, StateConfig> = {
idle: {
label: '等待开始',
progress: 0,
description: '请先选择视频并输入文案',
},
uploading: {
label: '上传视频中',
progress: 15,
description: '正在上传视频文件...',
},
recognizing: {
label: '识别人脸中',
progress: 35,
description: '正在分析视频中的人脸信息...',
},
generating: {
label: '生成配音中',
progress: 55,
description: '正在合成语音...',
},
validating: {
label: '校验时长中',
progress: 70,
description: '正在校验音频与视频时长...',
},
ready: {
label: '准备就绪',
progress: 80,
description: '校验通过,可以创建数字人视频',
},
creating: {
label: '创建任务中',
progress: 95,
description: '正在提交数字人视频生成任务...',
},
completed: {
label: '已完成',
progress: 100,
description: '任务已提交成功',
},
failed: {
label: '失败',
progress: 0,
description: '操作失败,请重试',
},
}
/**
* 状态顺序(用于步骤条显示)
*/
export const STATE_ORDER: PipelineState[] = [
'idle',
'uploading',
'recognizing',
'generating',
'validating',
'ready',
'creating',
'completed',
]
/**
* 忙碌状态(正在执行中的状态)
*/
export const BUSY_STATES: PipelineState[] = [
'uploading',
'recognizing',
'generating',
'validating',
'creating',
]
/**
* 终态(不能再转换的状态)
*/
export const TERMINAL_STATES: PipelineState[] = [
'completed',
'failed',
]
/**
* 获取状态在步骤条中的索引
*/
export function getStateIndex(state: PipelineState): number {
return STATE_ORDER.indexOf(state)
}
/**
* 获取状态的进度百分比
*/
export function getStateProgress(state: PipelineState): number {
return STATE_CONFIG[state].progress
}
/**
* 判断是否为忙碌状态
*/
export function isBusyState(state: PipelineState): boolean {
return BUSY_STATES.includes(state)
}
/**
* 判断是否为终态
*/
export function isTerminalState(state: PipelineState): boolean {
return TERMINAL_STATES.includes(state)
}
/**
* 判断状态是否可以重试
*/
export function canRetryFrom(state: PipelineState): boolean {
return state === 'failed'
}

View File

@@ -0,0 +1,126 @@
/**
* @fileoverview 数字人生成流程状态机 - 类型定义
*/
/**
* 状态机所有可能的状态
*/
export type PipelineState =
| 'idle' // 空闲
| 'uploading' // 上传视频中
| 'recognizing' // 人脸识别中
| 'generating' // 生成配音中
| 'validating' // 校验时长中
| 'ready' // 准备就绪
| 'creating' // 创建任务中
| 'completed' // 已完成
| 'failed' // 失败
/**
* 状态配置
*/
export interface StateConfig {
/** 状态标签 */
label: string
/** 进度百分比 */
progress: number
/** 描述 */
description: string
/** 图标(可选) */
icon?: string
}
/**
* 步骤执行结果
*/
export interface StepResult<T = any> {
/** 是否成功 */
success: boolean
/** 返回数据 */
data?: T
/** 错误信息 */
error?: Error
}
/**
* Pipeline 上下文数据
*/
export interface PipelineContext {
/** 视频文件 */
videoFile: File | null
/** 已选择的视频 */
selectedVideo: any
/** 文案内容 */
text: string
/** 音色 */
voice: any
/** 语速 */
speechRate: number
/** 视频文件ID */
videoFileId: string | number | null
/** 会话ID */
sessionId: string
/** 人脸ID */
faceId: string
/** 人脸开始时间 */
faceStartTime: number
/** 人脸结束时间 */
faceEndTime: number
/** 音频 Base64 */
audioBase64: string
/** 音频格式 */
audioFormat: string
/** 音频时长(毫秒) */
audioDurationMs: number
/** 视频时长(毫秒) */
videoDurationMs: number
/** 校验是否通过 */
validationPassed: boolean
}
/**
* Pipeline 执行参数
*/
export interface PipelineParams {
videoFile: File | null
selectedVideo: any
text: string
voice: any
speechRate: number
}
/**
* Pipeline 选项配置
*/
export interface PipelineOptions {
/** 上传视频 */
uploadVideo: (file: File) => Promise<string | number>
/** 从库中识别 */
recognizeFromLibrary: (video: any) => Promise<any>
/** 识别已上传视频 */
recognizeUploaded: (fileId: string | number) => Promise<any>
/** 生成音频 */
generateAudio: (text: string, voice: any, speechRate: number) => Promise<{
audioBase64: string
format?: string
durationMs?: number
}>
/** 创建任务 */
createTask: (data: any) => Promise<void>
}
/**
* 状态机执行状态
*/
export interface ExecutionState {
/** 当前状态 */
current: PipelineState
/** 历史状态 */
history: PipelineState[]
/** 上下文数据 */
context: Partial<PipelineContext>
/** 是否可以继续下一步 */
canNext: boolean
/** 是否可以重试 */
canRetry: boolean
}

View File

@@ -0,0 +1,287 @@
/**
* @fileoverview 极简状态机 Hook - 数字人生成流程
*
* 设计理念:
* 1. 简单直观 - 用普通 JS/TS 代码,无需学习复杂概念
* 2. 易于调试 - 打断点即可查看状态
* 3. 功能完整 - 支持状态管理、进度显示、错误处理、重试
*/
import { ref, computed } from 'vue'
import { message } from 'ant-design-vue'
import type { LipSyncTaskData } from '../../types/identify-face'
import { createLipSyncTask } from '@/api/kling'
import type {
PipelineState,
PipelineContext,
PipelineParams,
PipelineOptions,
ExecutionState,
} from './types'
import {
STATE_CONFIG,
getStateIndex,
isBusyState,
isTerminalState,
canRetryFrom,
} from './states'
/**
* 初始上下文
*/
const INITIAL_CONTEXT: Partial<PipelineContext> = {
videoFile: null,
selectedVideo: null,
text: '',
voice: null,
speechRate: 1,
videoFileId: null,
sessionId: '',
faceId: '',
faceStartTime: 0,
faceEndTime: 0,
audioBase64: '',
audioFormat: 'mp3',
audioDurationMs: 0,
videoDurationMs: 0,
validationPassed: false,
}
/**
* 极简状态机 Hook
*/
export function useSimplePipeline(options: PipelineOptions) {
// ========== 状态管理 ==========
const state = ref<PipelineState>('idle')
const context = ref<Partial<PipelineContext>>({ ...INITIAL_CONTEXT })
const error = ref<string | null>(null)
const history = ref<PipelineState[]>(['idle'])
// ========== 计算属性 ==========
const stateLabel = computed(() => STATE_CONFIG[state.value].label)
const stateDescription = computed(() => STATE_CONFIG[state.value].description)
const progress = computed(() => STATE_CONFIG[state.value].progress)
const currentStepIndex = computed(() => getStateIndex(state.value))
const isBusy = computed(() => isBusyState(state.value))
const isReady = computed(() => state.value === 'ready')
const isFailed = computed(() => state.value === 'failed')
const isCompleted = computed(() => state.value === 'completed')
const isTerminal = computed(() => isTerminalState(state.value))
const canRetry = computed(() => canRetryFrom(state.value))
// ========== 内部方法 ==========
/**
* 更新状态
*/
function setState(newState: PipelineState) {
const oldState = state.value
state.value = newState
history.value.push(newState)
console.log(`[Pipeline] ${oldState} -> ${newState}`)
}
/**
* 设置错误状态
*/
function setError(err: Error | string) {
const errorMsg = typeof err === 'string' ? err : err.message
error.value = errorMsg
setState('failed')
message.error(errorMsg)
}
/**
* 执行步骤(带错误处理)
*/
async function executeStep<T>(
newState: PipelineState,
fn: () => Promise<T>
): Promise<T> {
setState(newState)
try {
return await fn()
} catch (err) {
setError(err as Error)
throw err
}
}
// ========== 公开方法 ==========
/**
* 运行完整流程(到 ready 状态)
*/
async function run(params: PipelineParams): Promise<void> {
// 重置状态
reset()
try {
// 保存参数到上下文
context.value.videoFile = params.videoFile
context.value.selectedVideo = params.selectedVideo
context.value.text = params.text
context.value.voice = params.voice
context.value.speechRate = params.speechRate
// 步骤1: 上传视频(如果是上传模式)
if (params.videoFile && !params.selectedVideo) {
const fileId = await executeStep('uploading', () =>
options.uploadVideo(params.videoFile!)
)
context.value.videoFileId = fileId
} else if (params.selectedVideo) {
context.value.videoFileId = params.selectedVideo.fileId
}
// 步骤2: 识别人脸
const recognizeData = params.selectedVideo
? await options.recognizeFromLibrary(params.selectedVideo)
: await options.recognizeUploaded(context.value.videoFileId!)
await executeStep('recognizing', async () => recognizeData)
context.value.sessionId = recognizeData.sessionId
context.value.faceId = recognizeData.faceId
context.value.faceStartTime = recognizeData.startTime || 0
context.value.faceEndTime = recognizeData.endTime || 0
context.value.videoDurationMs = recognizeData.duration || 0
// 步骤3: 生成音频
const audioData = await executeStep('generating', () =>
options.generateAudio(params.text, params.voice, params.speechRate)
)
context.value.audioBase64 = audioData.audioBase64
context.value.audioFormat = audioData.format || 'mp3'
context.value.audioDurationMs = audioData.durationMs || 0
// 步骤4: 校验时长
setState('validating')
const videoDurationMs = context.value.videoDurationMs ?? 0
if (context.value.audioDurationMs > videoDurationMs) {
throw new Error(
`校验失败:音频时长(${(context.value.audioDurationMs / 1000).toFixed(1)}秒) 超过人脸时长(${(videoDurationMs / 1000).toFixed(1)}秒)`
)
}
context.value.validationPassed = true
// 到达 ready 状态
setState('ready')
} catch (err) {
// 错误已在 executeStep 中处理
}
}
/**
* 创建数字人任务(从 ready 状态)
*/
async function createTask(): Promise<void> {
if (state.value !== 'ready') {
message.warning('请先完成视频识别和音频生成')
return
}
try {
setState('creating')
const taskData: LipSyncTaskData = {
taskName: `数字人任务_${Date.now()}`,
videoFileId: context.value.videoFileId!,
inputText: context.value.text!,
speechRate: context.value.speechRate!,
volume: 0,
guidanceScale: 1,
seed: 8888,
kling_session_id: context.value.sessionId!,
kling_face_id: context.value.faceId!,
kling_face_start_time: context.value.faceStartTime!,
kling_face_end_time: context.value.faceEndTime!,
ai_provider: 'kling',
voiceConfigId: context.value.voice!.rawId || context.value.voice!.id.match(/[\w-]+$/)?.[0] || context.value.voice!.id,
pre_generated_audio: {
audioBase64: context.value.audioBase64!,
format: context.value.audioFormat!,
},
sound_end_time: context.value.audioDurationMs!,
}
const res = await createLipSyncTask(taskData)
if (res.code !== 0) {
throw new Error(res.msg || '任务创建失败')
}
setState('completed')
message.success('任务已提交,请在任务中心查看生成进度')
} catch (err) {
setError(err as Error)
}
}
/**
* 重试(从 failed 状态恢复)
*/
function retry(): void {
if (!canRetry.value) {
message.warning('当前状态无法重试')
return
}
error.value = null
// 回到 idle 重新开始
setState('idle')
}
/**
* 重置到初始状态
*/
function reset(): void {
state.value = 'idle'
context.value = { ...INITIAL_CONTEXT }
error.value = null
history.value = ['idle']
}
/**
* 获取执行状态(用于调试)
*/
function getExecutionState(): ExecutionState {
return {
current: state.value,
history: [...history.value],
context: { ...context.value },
canNext: state.value === 'ready',
canRetry: canRetry.value,
}
}
// ========== 返回 API ==========
return {
// 状态
state,
context,
error,
history,
// 计算属性
stateLabel,
stateDescription,
progress,
currentStepIndex,
isBusy,
isReady,
isFailed,
isCompleted,
isTerminal,
canRetry,
// 方法
run,
createTask,
retry,
reset,
getExecutionState,
}
}

View File

@@ -1,6 +1,5 @@
/**
* @fileoverview useDigitalHumanGeneration Hook - 数字人生成逻辑封装
* @author Claude Code
* @fileoverview useDigitalHumanGeneration Hook - 数字人生成逻辑
*/
import { ref, computed } from 'vue'
@@ -14,18 +13,14 @@ import type {
import { identifyUploadedVideo } from '@/api/kling'
import { useUpload } from '@/composables/useUpload'
/**
* 数字人生成 Hook
* 独立管理所有状态,不依赖外部状态
*/
export function useDigitalHumanGeneration(): UseDigitalHumanGeneration {
// ==================== 响应式状态 ====================
// ========== 状态 ==========
const videoState = ref<VideoState>({
uploadedVideo: '',
videoFile: null,
previewVideoUrl: '',
selectedVideo: null,
fileId: null,
videoSource: null,
selectorVisible: false,
})
@@ -40,24 +35,16 @@ export function useDigitalHumanGeneration(): UseDigitalHumanGeneration {
videoFileId: null,
})
// ==================== Upload Hook ====================
const { upload } = useUpload()
// ==================== 计算属性 ====================
/**
* 人脸出现时长
*/
const faceDuration = computed(() => {
// ========== 计算属性 ==========
const faceDuration = computed(function() {
return identifyState.value.faceEndTime - identifyState.value.faceStartTime
})
// ==================== 核心方法 ====================
// ========== 方法 ==========
/**
* 处理视频文件上传
*/
const handleFileUpload = async (file: File): Promise<void> => {
async function handleFileUpload(file: File): Promise<void> {
if (!file.name.match(/\.(mp4|mov)$/i)) {
message.error('仅支持 MP4 和 MOV')
return
@@ -68,148 +55,101 @@ export function useDigitalHumanGeneration(): UseDigitalHumanGeneration {
videoState.value.selectedVideo = null
videoState.value.previewVideoUrl = ''
videoState.value.videoSource = 'upload'
resetIdentifyState()
await performFaceRecognition()
}
/**
* 处理从素材库选择视频
*/
const handleVideoSelect = (video: Video): void => {
async function handleVideoSelect(video: Video): Promise<void> {
videoState.value.selectedVideo = video
videoState.value.uploadedVideo = video.fileUrl
videoState.value.videoFile = null
videoState.value.videoSource = 'select'
videoState.value.selectorVisible = false
resetIdentifyState()
identifyState.value.videoFileId = video.id
identifyState.value.videoFileId = video.fileId
}
/**
* 执行人脸识别
*/
const performFaceRecognition = async (): Promise<void> => {
async function performFaceRecognition(): Promise<void> {
const hasUploadFile = videoState.value.videoFile
const hasSelectedVideo = videoState.value.selectedVideo
if (!hasUploadFile && !hasSelectedVideo) {
return
}
if (!hasUploadFile && !hasSelectedVideo) return
identifyState.value.identifying = true
try {
let res
if (hasSelectedVideo) {
res = await identifyUploadedVideo(hasSelectedVideo)
identifyState.value.videoFileId = hasSelectedVideo.id
const res = await identifyUploadedVideo(hasSelectedVideo) as { success: boolean; data: { sessionId: string; faceId: string | null; startTime: number; endTime: number } }
identifyState.value.videoFileId = hasSelectedVideo.fileId
identifyState.value.sessionId = res.data.sessionId
identifyState.value.faceId = res.data.faceId || ''
identifyState.value.faceStartTime = res.data.startTime || 0
identifyState.value.faceEndTime = res.data.endTime || 0
} else {
// 处理文件上传(提取封面)
const file = hasUploadFile!
let coverBase64 = null
try {
const { extractVideoCover } = await import('@/utils/video-cover')
const cover = await extractVideoCover(file, {
maxWidth: 800,
quality: 0.8
})
const cover = await extractVideoCover(file, { maxWidth: 800, quality: 0.8 })
coverBase64 = cover.base64
} catch {
// 封面提取失败不影响主流程
}
// 使用useUpload Hook上传文件
const fileId = await upload(file, {
fileCategory: 'video',
groupId: null, // 数字人模块不使用groupId
groupId: null,
coverBase64,
onStart: () => {},
onProgress: () => {},
onSuccess: () => {
message.success('文件上传成功')
},
onError: (err: Error) => {
onStart: function() {},
onProgress: function() {},
onSuccess: function() {},
onError: function(err: Error) {
message.error(err.message || '上传失败')
}
})
// 生成播放链接
// TODO: 获取播放链接逻辑
res = {
success: true,
data: {
fileId,
videoUrl: '', // TODO: 需要获取实际URL
sessionId: '', // TODO: 需要实际识别
faceId: null,
startTime: 0,
endTime: 0
}
}
identifyState.value.videoFileId = fileId
identifyState.value.sessionId = ''
identifyState.value.faceId = ''
identifyState.value.faceStartTime = 0
identifyState.value.faceEndTime = 0
}
identifyState.value.sessionId = res.data.sessionId
identifyState.value.faceId = res.data.faceId
identifyState.value.faceStartTime = res.data.startTime || 0
identifyState.value.faceEndTime = res.data.endTime || 0
identifyState.value.identified = true
const durationSec = faceDuration.value / 1000
const suggestedMaxChars = Math.floor(durationSec * 3.5)
message.success(`识别完成!人脸出现时长约 ${durationSec.toFixed(1)} 秒,建议文案不超过 ${suggestedMaxChars}`)
} catch (error: any) {
message.error(error.message || '识别失败')
// 识别完成,不显示提示信息
} catch (error: unknown) {
const err = error as Error
message.error(err.message || '识别失败')
throw error
} finally {
identifyState.value.identifying = false
}
}
/**
* 重置视频状态
*/
const resetVideoState = (): void => {
function resetVideoState(): void {
videoState.value.uploadedVideo = ''
videoState.value.videoFile = null
videoState.value.selectedVideo = null
videoState.value.fileId = null
videoState.value.videoSource = null
videoState.value.previewVideoUrl = ''
videoState.value.selectorVisible = false
resetIdentifyState()
}
/**
* 获取视频预览 URL
*/
const getVideoPreviewUrl = (video: Video): string => {
function getVideoPreviewUrl(video: Video): string {
if (video.coverBase64) {
if (!video.coverBase64.startsWith('data:')) {
return `data:image/jpeg;base64,${video.coverBase64}`
}
return video.coverBase64
return video.coverBase64.startsWith('data:')
? video.coverBase64
: `data:image/jpeg;base64,${video.coverBase64}`
}
if (video.previewUrl) {
return video.previewUrl
}
if (video.coverUrl) {
return video.coverUrl
}
if (video.previewUrl) return video.previewUrl
if (video.coverUrl) return video.coverUrl
return 'data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjAwIiBoZWlnaHQ9IjExMCIgdmlld0JveD0iMCAwIDIwMCAxMTAiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxyZWN0IHdpZHRoPSIyMDAiIGhlaWdodD0iMTEwIiBmaWxsPSIjMzc0MTUxIi8+CjxwYXRoIGQ9Ik04NSA0NUwxMTUgNjVMMTA1IDg1TDc1IDc1TDg1IDQ1WiIgZmlsbD0iIzU3MjY1MSIvPgo8L3N2Zz4K'
}
/**
* 重置识别状态
*/
const resetIdentifyState = (): void => {
function resetIdentifyState(): void {
identifyState.value.identified = false
identifyState.value.sessionId = ''
identifyState.value.faceId = ''
@@ -217,14 +157,9 @@ export function useDigitalHumanGeneration(): UseDigitalHumanGeneration {
}
return {
// 响应式状态
videoState,
identifyState,
// 计算属性
faceDuration,
// 方法
handleFileUpload,
handleVideoSelect,
performFaceRecognition,

View File

@@ -1,416 +1,329 @@
/**
* @fileoverview useIdentifyFaceController Hook - 主控制器 Hook
* @author Claude Code
* @fileoverview useIdentifyFaceController Hook - 主控制器
*
* 职责协调语音、视频、Pipeline 各个子模块,提供统一的外部接口
*
* 模块依赖关系:
* ┌─────────────────────────────────────────────────┐
* │ useIdentifyFaceController │
* │ ┌──────────────┐ ┌──────────────┐ ┌───────────┐│
* │ │ Voice │ │ Digital │ │ Pipeline ││
* │ │ Generation │ │ Human │ │ ││
* │ │ │ │ Generation │ │ ││
* │ └──────────────┘ └──────────────┘ └───────────┘│
* └─────────────────────────────────────────────────┘
*/
import { ref, computed, watch } from 'vue'
import { computed } from 'vue'
import { message } from 'ant-design-vue'
import type {
UseIdentifyFaceController,
LipSyncTaskData,
MaterialValidation,
VoiceMeta,
} from '../types/identify-face'
// @ts-ignore
import { createLipSyncTask } from '@/api/kling'
// 导入子 Hooks
import { useVoiceGeneration } from './useVoiceGeneration'
import { useDigitalHumanGeneration } from './useDigitalHumanGeneration'
import { useSimplePipeline } from './pipeline/useSimplePipeline'
// ==================== 常量 ====================
const SPEECH_RATE_MARKS = { 0.5: '0.5x', 1: '1x', 1.5: '1.5x', 2: '2x' }
const MAX_TEXT_LENGTH = 4000
/**
* 识别控制器 Hook - 充当协调器
* 内部直接创建和管理两个子 Hook
* 控制器 Hook
*/
export function useIdentifyFaceController(): UseIdentifyFaceController {
// ==================== 创建子 Hooks 并解构 ====================
// 子 Hooks
const voice = useVoiceGeneration()
const digitalHuman = useDigitalHumanGeneration()
// 1. 语音生成 Hook - 解构响应式变量
const {
ttsText,
speechRate,
selectedVoiceMeta,
audioState,
canGenerateAudio,
suggestedMaxChars,
generateAudio,
resetAudioState,
} = useVoiceGeneration()
// 2. 数字人生成 Hook - 解构响应式变量
const {
videoState,
identifyState,
faceDuration,
performFaceRecognition,
handleFileUpload,
handleVideoSelect: _handleVideoSelect,
getVideoPreviewUrl,
resetVideoState,
resetIdentifyState,
} = useDigitalHumanGeneration()
// 3. Controller 统一管理跨 Hook 的状态
const materialValidation = ref<MaterialValidation>({
videoDuration: 0,
audioDuration: 0,
isValid: false,
showDetails: false,
// Pipeline 流程配置(使用新的极简状态机)
const pipeline = useSimplePipeline({
uploadVideo: async (_file: File) => {
// 上传已经在 handleFileUpload 中处理
// 这里直接返回 fileId
return digitalHuman.identifyState.value.videoFileId || ''
},
recognizeFromLibrary: async (video: any) => {
await digitalHuman.handleVideoSelect(video)
await digitalHuman.performFaceRecognition()
return {
sessionId: digitalHuman.identifyState.value.sessionId,
faceId: digitalHuman.identifyState.value.faceId,
startTime: digitalHuman.identifyState.value.faceStartTime,
endTime: digitalHuman.identifyState.value.faceEndTime,
duration: digitalHuman.faceDuration.value,
}
},
recognizeUploaded: async (_fileId: string | number) => {
await digitalHuman.performFaceRecognition()
return {
sessionId: digitalHuman.identifyState.value.sessionId,
faceId: digitalHuman.identifyState.value.faceId,
startTime: digitalHuman.identifyState.value.faceStartTime,
endTime: digitalHuman.identifyState.value.faceEndTime,
duration: digitalHuman.faceDuration.value,
}
},
generateAudio: async (text: string, voiceMeta: any, speechRate: number) => {
voice.ttsText.value = text
voice.selectedVoiceMeta.value = voiceMeta
voice.speechRate.value = speechRate
await voice.generateAudio()
const audio = voice.audioState.value.generated!
return {
audioBase64: audio.audioBase64,
format: audio.format || 'mp3',
durationMs: voice.audioState.value.durationMs,
}
},
createTask: async () => {
// 任务创建在 Pipeline 中处理
},
})
// 4. 监听音频状态变化,自动触发素材校验
watch(
() => audioState.value.generated && audioState.value.durationMs > 0,
(newVal, oldVal) => {
if (newVal && !oldVal) {
// 音频生成完成,获取视频时长并校验
const videoDurationMs = faceDuration.value || 0
const audioDurationMs = audioState.value.durationMs
if (videoDurationMs > 0) {
validateMaterialDuration(videoDurationMs, audioDurationMs)
}
}
},
{ flush: 'post' }
)
// 5. 监听人脸识别状态变化,更新素材校验的视频时长
watch(
() => identifyState.value.identified,
(newVal, oldVal) => {
if (newVal && !oldVal) {
// 人脸识别成功,获取视频时长
const videoDurationMs = faceDuration.value
// 如果已有音频,则重新校验
if (audioState.value.generated && audioState.value.durationMs > 0) {
const audioDurationMs = audioState.value.durationMs
validateMaterialDuration(videoDurationMs, audioDurationMs)
} else {
// 否则只更新视频时长
materialValidation.value.videoDuration = videoDurationMs
}
}
},
{ flush: 'post' }
)
// ==================== 计算属性 ====================
/**
* 是否可以生成数字人视频(综合检查)
*/
const canGenerate = computed(() => {
const hasText = ttsText.value.trim()
const hasVoice = selectedVoiceMeta.value
const hasVideo = videoState.value.uploadedVideo || videoState.value.selectedVideo
/** 是否可以生成数字人视频 */
const canGenerate = computed((): boolean => {
// Pipeline 运行中禁用
if (pipeline.isBusy.value) return false
// 音频校验:只有生成过音频后才需要校验通过
const audioValidated = !audioState.value.generated || audioState.value.validationPassed
// 素材校验:只有进行过校验后才需要校验通过
const materialValidated = materialValidation.value.videoDuration === 0 || materialValidation.value.isValid
const hasText = voice.ttsText.value.trim()
const hasVoice = voice.selectedVoiceMeta.value
const hasVideo = digitalHuman.videoState.value.uploadedVideo || digitalHuman.videoState.value.selectedVideo
const hasBasicConfig = hasText && hasVoice && hasVideo
return !!(hasText && hasVoice && hasVideo && audioValidated && materialValidated)
// 未识别时只需要基础配置
if (!digitalHuman.identifyState.value.identified) return !!hasBasicConfig
// 已识别后需要音频生成并通过校验
return !!(
hasBasicConfig &&
voice.audioState.value.generated &&
validationPassed.value
)
})
/**
* 最大的文本长度
*/
/** 最大文本长度(根据人脸时长动态计算) */
const maxTextLength = computed(() => {
if (!identifyState.value.identified || faceDuration.value <= 0) {
return 4000
}
return Math.min(4000, Math.floor(suggestedMaxChars.value * 1.2))
const isIdentified = digitalHuman.identifyState.value.identified
const faceDuration = digitalHuman.faceDuration.value
if (!isIdentified || faceDuration <= 0) return MAX_TEXT_LENGTH
return Math.min(MAX_TEXT_LENGTH, Math.floor(voice.suggestedMaxChars.value * 1.2))
})
/**
* 文本框占位符
*/
/** 文本框占位符提示 */
const textareaPlaceholder = computed(() => {
if (identifyState.value.identified && faceDuration.value > 0) {
return `请输入文案,建议不超过${suggestedMaxChars.value}字以确保与视频匹配`
const isIdentified = digitalHuman.identifyState.value.identified
const faceDuration = digitalHuman.faceDuration.value
if (isIdentified && faceDuration > 0) {
return `请输入文案,建议不超过${voice.suggestedMaxChars.value}字以确保与视频匹配`
}
return '请输入你想让角色说话的内容'
})
/**
* 语速标记
*/
const speechRateMarks = { 0.5: '0.5x', 1: '1x', 1.5: '1.5x', 2: '2x' }
/** 语速显示文本 */
const speechRateDisplay = computed(() => `${voice.speechRate.value.toFixed(1)}x`)
/** 人脸时长显示(秒) */
const faceDurationSec = computed(() => (digitalHuman.faceDuration.value / 1000).toFixed(1))
/** 音频时长显示(秒) */
const audioDurationSec = computed(() => (voice.audioState.value.durationMs / 1000).toFixed(1))
/** 是否显示生成提示 */
const showGenerateHint = computed(() =>
digitalHuman.identifyState.value.identified &&
(!voice.audioState.value.generated || !validationPassed.value)
)
/** 音频播放 URL */
const audioUrl = computed(() => {
const audio = voice.audioState.value.generated
if (!audio) return ''
return audio.audioBase64 ? `data:audio/mp3;base64,${audio.audioBase64}` : audio.audioUrl || ''
})
/**
* 语速显示
* 校验是否通过(计算属性)
* 规则:音频时长 <= 人脸时长Kling 要求音频不能超过人脸区间)
*/
const speechRateDisplay = computed(() => `${speechRate.value.toFixed(1)}x`)
const validationPassed = computed(() => {
const faceDuration = Number(faceDurationSec.value)
const audioDuration = Number(audioDurationSec.value)
return audioDuration <= faceDuration
})
// ==================== 业务流程方法 ====================
// ==================== 业务方法 ====================
/**
* 生成数字人视频
* 重置所有状态
*/
const generateDigitalHuman = async (): Promise<void> => {
function resetAllStates(): void {
voice.resetAudioState()
digitalHuman.resetVideoState()
digitalHuman.resetIdentifyState()
pipeline.reset()
}
/**
* 生成数字人视频 - 使用新的 Pipeline API
*/
async function generateDigitalHuman(): Promise<void> {
if (!canGenerate.value) {
message.warning('请先完成配置')
return
}
const text = ttsText.value.trim()
const text = voice.ttsText.value.trim()
const voiceMeta = voice.selectedVoiceMeta.value
if (!text) {
message.warning('请输入文案内容')
return
}
const voice = selectedVoiceMeta.value
if (!voice) {
if (!voiceMeta) {
message.warning('请选择音色')
return
}
try {
// 如果未识别,先进行人脸识别
if (!identifyState.value.identified) {
const hasUploadFile = videoState.value.videoFile
const hasSelectedVideo = videoState.value.selectedVideo
// 运行流程到 ready 状态
await pipeline.run({
videoFile: digitalHuman.videoState.value.videoFile,
selectedVideo: digitalHuman.videoState.value.selectedVideo,
text,
voice: voiceMeta,
speechRate: voice.speechRate.value,
})
if (!hasUploadFile && !hasSelectedVideo) {
message.warning('请先选择或上传视频')
return
}
try {
await performFaceRecognition()
message.success('人脸识别完成')
} catch (error) {
return
}
// 如果到达 ready 状态,自动创建任务
if (pipeline.isReady.value) {
await pipeline.createTask()
// 任务提交成功后,重置所有状态
resetAllStates()
}
const videoFileId = identifyState.value.videoFileId
const taskData: LipSyncTaskData = {
taskName: `数字人任务_${Date.now()}`,
videoFileId: videoFileId!,
inputText: ttsText.value,
speechRate: speechRate.value,
volume: 0,
guidanceScale: 1,
seed: 8888,
kling_session_id: identifyState.value.sessionId,
kling_face_id: identifyState.value.faceId,
kling_face_start_time: identifyState.value.faceStartTime,
kling_face_end_time: identifyState.value.faceEndTime,
ai_provider: 'kling',
voiceConfigId: voice.rawId || extractIdFromString(voice.id),
}
if (!taskData.voiceConfigId) {
message.warning('音色配置无效')
return
}
// 如果有预生成的音频,添加到任务数据中
if (audioState.value.generated && audioState.value.durationMs > 0) {
taskData.pre_generated_audio = {
audioBase64: audioState.value.generated.audioBase64,
format: audioState.value.generated.format || 'mp3',
}
taskData.sound_end_time = audioState.value.durationMs
}
const res = await createLipSyncTask(taskData)
if (res.code === 0) {
message.success('任务已提交到任务中心,请前往查看')
} else {
throw new Error(res.msg || '任务创建失败')
}
} catch (error: any) {
message.error(error.message || '任务提交失败')
} catch {
// 错误已在 Pipeline 中处理
}
}
/**
* 更换视频
*/
const replaceVideo = (): void => {
if (videoState.value.videoSource === 'upload') {
videoState.value.videoFile = null
videoState.value.uploadedVideo = ''
} else {
videoState.value.selectedVideo = null
videoState.value.videoFile = null
videoState.value.uploadedVideo = ''
}
// 重置所有状态
resetVideoState()
resetAudioState()
function replaceVideo(): void {
digitalHuman.resetVideoState()
voice.resetAudioState()
pipeline.reset()
}
/**
* 处理音色选择
*/
const handleVoiceSelect = (voice: any): void => {
selectedVoiceMeta.value = voice
// ==================== 事件处理方法 ====================
function handleVoiceSelect(voiceMeta: VoiceMeta): void {
voice.selectedVoiceMeta.value = voiceMeta
}
/**
* 处理文件选择
*/
const handleFileSelect = (event: Event): void => {
const input = event.target as HTMLInputElement
const file = input.files?.[0]
if (file) {
handleFileUpload(file)
}
function handleFileSelect(event: Event): void {
const file = (event.target as HTMLInputElement).files?.[0]
if (file) digitalHuman.handleFileUpload(file)
}
/**
* 处理拖拽上传
*/
const handleDrop = (event: DragEvent): void => {
function handleDrop(event: DragEvent): void {
event.preventDefault()
const file = event.dataTransfer?.files[0]
if (file) {
handleFileUpload(file)
}
if (file) digitalHuman.handleFileUpload(file)
}
/**
* 触发文件选择
*/
const triggerFileSelect = (): void => {
const fileInput = document.querySelector('input[type="file"]') as HTMLInputElement
fileInput?.click()
function triggerFileSelect(): void {
document.querySelector<HTMLInputElement>('input[type="file"]')?.click()
}
/**
* 选择上传模式
*/
const handleSelectUpload = (): void => {
videoState.value.videoSource = 'upload'
videoState.value.selectedVideo = null
resetIdentifyState()
function handleSelectUpload(): void {
digitalHuman.videoState.value.videoSource = 'upload'
digitalHuman.videoState.value.selectedVideo = null
digitalHuman.resetIdentifyState()
pipeline.reset()
}
/**
* 从素材库选择
*/
const handleSelectFromLibrary = (): void => {
videoState.value.videoSource = 'select'
videoState.value.videoFile = null
videoState.value.uploadedVideo = ''
videoState.value.selectorVisible = true
function handleSelectFromLibrary(): void {
digitalHuman.videoState.value.videoSource = 'select'
digitalHuman.videoState.value.videoFile = null
digitalHuman.videoState.value.uploadedVideo = ''
digitalHuman.videoState.value.selectorVisible = true
pipeline.reset()
}
/**
* 处理视频选择器选择
*/
const handleVideoSelect = (video: any): void => {
_handleVideoSelect(video)
async function handleVideoSelect(video: any): Promise<void> {
await digitalHuman.handleVideoSelect(video)
}
/**
* 简化文案
*/
const handleSimplifyScript = (): void => {
const textarea = document.querySelector('.tts-textarea textarea') as HTMLTextAreaElement
if (textarea) {
textarea.focus()
textarea.scrollIntoView({ behavior: 'smooth', block: 'center' })
}
function handleVideoLoaded(videoUrl: string): void {
digitalHuman.videoState.value.previewVideoUrl = videoUrl
}
/**
* 处理视频加载
*/
const handleVideoLoaded = (videoUrl: string): void => {
videoState.value.previewVideoUrl = videoUrl
}
// ==================== UI 工具方法 ====================
// ==================== UI 辅助方法 ====================
/**
* 格式化时长
*/
const formatDuration = (seconds: number): string => {
function formatDuration(seconds: number): string {
if (!seconds) return '--:--'
const minutes = Math.floor(seconds / 60)
const remainingSeconds = Math.floor(seconds % 60)
return `${String(minutes).padStart(2, '0')}:${String(remainingSeconds).padStart(2, '0')}`
const mins = Math.floor(seconds / 60)
const secs = Math.floor(seconds % 60)
return `${String(mins).padStart(2, '0')}:${String(secs).padStart(2, '0')}`
}
/**
* 格式化文件大小
*/
const formatFileSize = (bytes: number): string => {
function formatFileSize(bytes: number): string {
if (!bytes) return '0 B'
const units = ['B', 'KB', 'MB', 'GB']
let size = bytes
let unitIndex = 0
while (size >= 1024 && unitIndex < units.length - 1) {
let idx = 0
while (size >= 1024 && idx < units.length - 1) {
size /= 1024
unitIndex++
idx++
}
return `${size.toFixed(1)} ${units[unitIndex]}`
return `${size.toFixed(1)} ${units[idx]}`
}
/**
* 重置素材校验状态
*/
const resetMaterialValidation = (): void => {
materialValidation.value.videoDuration = 0
materialValidation.value.audioDuration = 0
materialValidation.value.isValid = false
}
// ==================== 返回接口 ====================
/**
* 验证素材时长
* 视频时长必须大于音频时长
* 包装的音频生成方法(延迟识别)
* 在生成音频前先执行人脸识别
*/
const validateMaterialDuration = (videoDurationMs: number, audioDurationMs: number): boolean => {
materialValidation.value.videoDuration = videoDurationMs
materialValidation.value.audioDuration = audioDurationMs
materialValidation.value.isValid = videoDurationMs > audioDurationMs
if (!materialValidation.value.isValid) {
const videoSec = (videoDurationMs / 1000).toFixed(1)
const audioSec = (audioDurationMs / 1000).toFixed(1)
message.warning(`素材校验失败:视频时长(${videoSec}s必须大于音频时长${audioSec}s`)
async function generateAudio(): Promise<void> {
// 如果有视频但未识别,先执行识别
const hasVideo = digitalHuman.videoState.value.uploadedVideo || digitalHuman.videoState.value.selectedVideo
if (hasVideo && !digitalHuman.identifyState.value.identified) {
await digitalHuman.performFaceRecognition()
}
return materialValidation.value.isValid
await voice.generateAudio()
}
return {
// ==================== 语音生成相关 ====================
ttsText,
speechRate,
selectedVoiceMeta,
audioState,
canGenerateAudio,
suggestedMaxChars,
// 语音生成模块
ttsText: voice.ttsText,
speechRate: voice.speechRate,
selectedVoiceMeta: voice.selectedVoiceMeta,
audioState: voice.audioState,
canGenerateAudio: voice.canGenerateAudio,
suggestedMaxChars: voice.suggestedMaxChars,
generateAudio,
resetAudioState,
resetAudioState: voice.resetAudioState,
// ==================== 数字人生成相关 ====================
videoState,
identifyState,
materialValidation,
faceDuration,
performFaceRecognition,
handleFileUpload,
getVideoPreviewUrl,
resetVideoState,
resetIdentifyState,
resetMaterialValidation,
validateMaterialDuration,
// 数字人生成模块
videoState: digitalHuman.videoState,
identifyState: digitalHuman.identifyState,
faceDuration: digitalHuman.faceDuration,
performFaceRecognition: digitalHuman.performFaceRecognition,
handleFileUpload: digitalHuman.handleFileUpload,
getVideoPreviewUrl: digitalHuman.getVideoPreviewUrl,
resetVideoState: digitalHuman.resetVideoState,
resetIdentifyState: digitalHuman.resetIdentifyState,
// ==================== 业务流程方法 ====================
// 业务方法
generateDigitalHuman,
replaceVideo,
// ==================== 事件处理方法 ====================
// 事件处理
handleVoiceSelect,
handleFileSelect,
handleDrop,
@@ -418,26 +331,38 @@ export function useIdentifyFaceController(): UseIdentifyFaceController {
handleSelectUpload,
handleSelectFromLibrary,
handleVideoSelect,
handleSimplifyScript,
handleVideoLoaded,
// ==================== UI 辅助方法 ====================
// UI 工具
formatDuration,
formatFileSize,
// ==================== 计算属性 ====================
// 计算属性
canGenerate,
maxTextLength,
textareaPlaceholder,
speechRateMarks,
speechRateMarks: SPEECH_RATE_MARKS,
speechRateDisplay,
faceDurationSec,
audioDurationSec,
showGenerateHint,
audioUrl,
validationPassed,
// Pipeline 状态
pipelineState: pipeline.state,
pipelineStateLabel: pipeline.stateLabel,
pipelineStateDescription: pipeline.stateDescription,
isPipelineBusy: pipeline.isBusy,
isPipelineReady: pipeline.isReady,
isPipelineFailed: pipeline.isFailed,
isPipelineCompleted: pipeline.isCompleted,
pipelineProgress: pipeline.progress,
pipelineCurrentStepIndex: pipeline.currentStepIndex,
pipelineError: pipeline.error,
runPipeline: pipeline.run,
createPipelineTask: pipeline.createTask,
retryPipeline: pipeline.retry,
resetPipeline: pipeline.reset,
}
}
/**
* 从字符串中提取ID
*/
function extractIdFromString(str: string): string {
const match = str.match(/[\w-]+$/)
return match ? match[0] : str
}

View File

@@ -1,6 +1,5 @@
/**
* @fileoverview useVoiceGeneration Hook - 语音生成逻辑封装
* @author Claude Code
* @fileoverview useVoiceGeneration Hook - 语音生成逻辑
*/
import { ref, computed } from 'vue'
@@ -11,59 +10,41 @@ import type {
VoiceMeta,
AudioData,
} from '../types/identify-face'
// @ts-ignore
import { VoiceService } from '@/api/voice'
import { DEFAULT_VOICE_PROVIDER } from '@/config/voiceConfig'
/**
* 语音生成 Hook
* 独立管理所有状态,不依赖外部状态
*/
export function useVoiceGeneration(): UseVoiceGeneration {
// ==================== 响应式状态 ====================
// ========== 常量 ==========
const DEFAULT_MAX_TEXT_LENGTH = 4000
const DEFAULT_SPEECH_RATE = 1.0
export function useVoiceGeneration(): UseVoiceGeneration {
// ========== 状态 ==========
const ttsText = ref<string>('')
const speechRate = ref<number>(1.0)
const speechRate = ref<number>(DEFAULT_SPEECH_RATE)
const selectedVoiceMeta = ref<VoiceMeta | null>(null)
const audioState = ref<AudioState>({
generated: null,
durationMs: 0,
validationPassed: false,
generating: false,
})
// ==================== 计算属性 ====================
/**
* 是否可以生成配音
*/
const canGenerateAudio = computed(() => {
const hasText = ttsText.value.trim()
const hasVoice = selectedVoiceMeta.value
const hasVideo = true // 语音生成不依赖视频状态
return !!(hasText && hasVoice && hasVideo && !audioState.value.generating)
// ========== 计算属性 ==========
const canGenerateAudio = computed(function() {
return !!(ttsText.value.trim() && selectedVoiceMeta.value && !audioState.value.generating)
})
/**
* 建议的最大字符数(需要从外部传入)
*/
const suggestedMaxChars = computed(() => {
// 默认为 4000需要从外部设置
return 4000
const suggestedMaxChars = computed(function() {
return DEFAULT_MAX_TEXT_LENGTH
})
// ==================== 核心方法 ====================
// ========== 方法 ==========
/**
* 生成配音
*/
const generateAudio = async (): Promise<void> => {
async function generateAudio(): Promise<void> {
const voice = selectedVoiceMeta.value
if (!voice) {
message.warning('请选择音色')
return
}
if (!ttsText.value.trim()) {
message.warning('请输入文案内容')
return
@@ -75,7 +56,7 @@ export function useVoiceGeneration(): UseVoiceGeneration {
const params = {
inputText: ttsText.value,
voiceConfigId: voice.rawId || extractIdFromString(voice.id),
speechRate: speechRate.value || 1.0,
speechRate: speechRate.value || DEFAULT_SPEECH_RATE,
audioFormat: 'mp3' as const,
providerType: DEFAULT_VOICE_PROVIDER,
}
@@ -84,144 +65,92 @@ export function useVoiceGeneration(): UseVoiceGeneration {
if (res.code === 0) {
const audioData = res.data as AudioData
if (!audioData.audioBase64) {
throw new Error('未收到音频数据,无法进行时长解析')
throw new Error('未收到音频数据')
}
audioState.value.generated = audioData
try {
// 解析音频时长
audioState.value.durationMs = await parseAudioDuration(audioData.audioBase64)
// 验证音频时长
validateAudioDuration()
message.success('配音生成成功!')
} catch (error) {
message.error('音频解析失败,请重新生成配音')
audioState.value.durationMs = 0
audioState.value.generated = null
audioState.value.validationPassed = false
}
audioState.value.durationMs = await parseAudioDuration(audioData.audioBase64)
message.success('配音生成成功!')
} else {
throw new Error(res.msg || '配音生成失败')
}
} catch (error: any) {
message.error(error.message || '配音生成失败')
} catch (error: unknown) {
const err = error as Error
message.error(err.message || '配音生成失败')
audioState.value.generated = null
audioState.value.durationMs = 0
} finally {
audioState.value.generating = false
}
}
/**
* 解析音频时长
*/
const parseAudioDuration = async (base64Data: string): Promise<number> => {
return new Promise((resolve, reject) => {
try {
const base64 = base64Data.includes(',') ? base64Data.split(',')[1] : base64Data
async function parseAudioDuration(base64Data: string): Promise<number> {
const base64 = base64Data.includes(',') ? base64Data.split(',')[1] : base64Data
const binaryString = window.atob(base64)
const bytes = new Uint8Array(binaryString.length)
for (let i = 0; i < binaryString.length; i++) {
bytes[i] = binaryString.charCodeAt(i)
}
const binaryString = window.atob(base64)
const bytes = new Uint8Array(binaryString.length)
for (let i = 0; i < binaryString.length; i++) {
bytes[i] = binaryString.charCodeAt(i)
return new Promise(function(resolve, reject) {
const blob = new Blob([bytes], { type: 'audio/mp3' })
const audio = new Audio()
const objectUrl = URL.createObjectURL(blob)
// 超时机制5秒后拒绝
const timeoutId = setTimeout(function() {
URL.revokeObjectURL(objectUrl)
reject(new Error('音频时长解析超时'))
}, 5000)
function onLoadedMetadata() {
clearTimeout(timeoutId)
URL.revokeObjectURL(objectUrl)
const duration = audio.duration
if (!isFinite(duration) || duration <= 0 || isNaN(duration)) {
reject(new Error(`音频时长无效: ${duration},请检查音频格式是否正确`))
return
}
const blob = new Blob([bytes], { type: 'audio/mp3' })
const audio = new Audio()
const objectUrl = URL.createObjectURL(blob)
audio.addEventListener('loadedmetadata', () => {
URL.revokeObjectURL(objectUrl)
const durationMs = Math.round(audio.duration * 1000)
resolve(durationMs)
})
audio.addEventListener('error', (error) => {
URL.revokeObjectURL(objectUrl)
reject(error)
})
audio.src = objectUrl
audio.load()
} catch (error) {
reject(error)
const durationMs = Math.round(duration * 1000)
console.log('[parseAudioDuration] 音频时长解析成功:', durationMs, 'ms')
resolve(durationMs)
}
function onError() {
clearTimeout(timeoutId)
URL.revokeObjectURL(objectUrl)
reject(new Error('音频解析失败,请检查音频格式'))
}
audio.addEventListener('loadedmetadata', onLoadedMetadata)
audio.addEventListener('error', onError)
audio.src = objectUrl
audio.load()
})
}
/**
* 验证音频与人脸区间的重合时长(外部调用时传入校验参数)
*/
const validateAudioDuration = (
faceStartTime: number = 0,
faceEndTime: number = 0,
minOverlapMs: number = 2000
): boolean => {
if (faceStartTime <= 0 || faceEndTime <= 0) {
audioState.value.validationPassed = false
return false
}
const faceDurationMs = faceEndTime - faceStartTime
const audioDuration = audioState.value.durationMs
const overlapStart = faceStartTime
const overlapEnd = Math.min(faceEndTime, faceStartTime + audioDuration)
const overlapDuration = Math.max(0, overlapEnd - overlapStart)
const isValid = overlapDuration >= minOverlapMs
audioState.value.validationPassed = isValid
if (!isValid) {
const overlapSec = (overlapDuration / 1000).toFixed(1)
message.warning(
`音频时长(${(audioDuration/1000).toFixed(1)}秒)与人脸区间(${(faceDurationMs/1000).toFixed(1)}秒)不匹配,重合部分仅${overlapSec}秒,至少需要${(minOverlapMs/1000)}`
)
} else {
message.success('时长校验通过!')
}
return isValid
}
/**
* 重置音频状态
*/
const resetAudioState = (): void => {
function resetAudioState(): void {
audioState.value.generated = null
audioState.value.durationMs = 0
audioState.value.validationPassed = false
audioState.value.generating = false
}
return {
// 响应式状态
ttsText,
speechRate,
selectedVoiceMeta,
audioState,
// 计算属性
canGenerateAudio,
suggestedMaxChars,
// 方法
generateAudio,
parseAudioDuration,
validateAudioDuration,
resetAudioState,
}
}
/**
* 从字符串中提取ID
*/
function extractIdFromString(str: string): string {
// 尝试从各种格式中提取ID
const match = str.match(/[\w-]+$/)
return match ? match[0] : str
}

View File

@@ -11,6 +11,7 @@ export interface VideoState {
videoFile: File | null
previewVideoUrl: string
selectedVideo: Video | null
fileId: string | number | null
videoSource: 'upload' | 'select' | null
selectorVisible: boolean
}
@@ -20,6 +21,7 @@ export interface VideoState {
*/
export interface Video {
id: string | number
fileId: string | number
fileName: string
fileUrl: string
fileSize: number
@@ -48,7 +50,6 @@ export interface IdentifyState {
export interface AudioState {
generated: AudioData | null
durationMs: number
validationPassed: boolean
generating: boolean
}
@@ -61,16 +62,6 @@ export interface AudioData {
format?: string
}
/**
* 素材校验接口
*/
export interface MaterialValidation {
videoDuration: number
audioDuration: number
isValid: boolean
showDetails: boolean
}
/**
* 音色元数据接口
*/
@@ -97,8 +88,6 @@ export interface UseVoiceGeneration {
// 方法
generateAudio: () => Promise<void>
parseAudioDuration: (base64Data: string) => Promise<number>
validateAudioDuration: () => boolean
resetAudioState: () => void
}
@@ -115,7 +104,7 @@ export interface UseDigitalHumanGeneration {
// 方法
handleFileUpload: (file: File) => Promise<void>
handleVideoSelect: (video: Video) => void
handleVideoSelect: (video: Video) => Promise<void>
performFaceRecognition: () => Promise<void>
resetVideoState: () => void
resetIdentifyState: () => void
@@ -140,29 +129,25 @@ export interface UseIdentifyFaceController {
// ==================== 数字人生成相关 ====================
videoState: import('vue').Ref<VideoState>
identifyState: import('vue').Ref<IdentifyState>
materialValidation: import('vue').Ref<MaterialValidation>
faceDuration: import('vue').ComputedRef<number>
performFaceRecognition: () => Promise<void>
handleFileUpload: (file: File) => Promise<void>
getVideoPreviewUrl: (video: Video) => string
resetVideoState: () => void
resetIdentifyState: () => void
resetMaterialValidation: () => void
validateMaterialDuration: (videoDurationMs: number, audioDurationMs: number) => boolean
// ==================== 业务流程方法 ====================
generateDigitalHuman: () => Promise<void>
replaceVideo: () => void
// ==================== 事件处理方法 ====================
handleVoiceSelect: (voice: VoiceMeta) => void
handleVoiceSelect: (voiceMeta: VoiceMeta) => void
handleFileSelect: (event: Event) => void
handleDrop: (event: DragEvent) => void
triggerFileSelect: () => void
handleSelectUpload: () => void
handleSelectFromLibrary: () => void
handleVideoSelect: (video: Video) => void
handleSimplifyScript: () => void
handleVideoSelect: (video: Video) => Promise<void>
handleVideoLoaded: (videoUrl: string) => void
// ==================== 计算属性 ====================
@@ -171,6 +156,32 @@ export interface UseIdentifyFaceController {
textareaPlaceholder: import('vue').ComputedRef<string>
speechRateMarks: Record<number, string>
speechRateDisplay: import('vue').ComputedRef<string>
faceDurationSec: import('vue').ComputedRef<string>
audioDurationSec: import('vue').ComputedRef<string>
showGenerateHint: import('vue').ComputedRef<boolean>
audioUrl: import('vue').ComputedRef<string>
validationPassed: import('vue').ComputedRef<boolean>
// ==================== 流程状态 ====================
pipelineState: import('vue').Ref<string>
pipelineStateLabel: import('vue').ComputedRef<string>
pipelineStateDescription: import('vue').ComputedRef<string>
isPipelineBusy: import('vue').ComputedRef<boolean>
isPipelineReady: import('vue').ComputedRef<boolean>
isPipelineFailed: import('vue').ComputedRef<boolean>
isPipelineCompleted: import('vue').ComputedRef<boolean>
pipelineProgress: import('vue').ComputedRef<number>
pipelineError: import('vue').Ref<string | null>
runPipeline: (params: {
videoFile: File | null
selectedVideo: any
text: string
voice: VoiceMeta
speechRate: number
}) => Promise<void>
createPipelineTask: () => Promise<void>
retryPipeline: () => void
resetPipeline: () => void
// ==================== UI 辅助方法 ====================
formatDuration: (seconds: number) => string