feat: 重构 IdentifyFace.vue 为 Hooks 架构

- 新增 hooks/ 目录,包含三个专用 Hook:
  * useVoiceGeneration - 语音生成和校验逻辑
  * useDigitalHumanGeneration - 数字人视频生成逻辑
  * useIdentifyFaceController - 协调两个子 Hook 的控制器

- 新增 types/identify-face.ts 完整类型定义

- 重构 IdentifyFace.vue 使用 hooks 架构:
  * 视图层与业务逻辑分离
  * 状态管理清晰化
  * 模块解耦,逻辑清晰

- 遵循单一职责原则,每个 Hook 只负责一个领域
- 提升代码可测试性和可维护性
- 支持两种视频素材来源:素材库选择和直接上传
- 实现语音生成优先校验的业务规则

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-12-28 00:19:17 +08:00
parent effbbc694c
commit 36195ea55a
46 changed files with 4258 additions and 3454 deletions

View File

@@ -16,7 +16,7 @@ function saveTokens(info) {
tokenManager.setTokens({
accessToken: info.accessToken || '',
refreshToken: info.refreshToken || '',
expiresIn: info.expiresTime || 7200, // expiresTime 是秒数
expiresTime: info.expiresTime || 0, // 直接传递,由 token-manager 处理格式转换
tokenType: info.tokenType || 'Bearer'
})
}
@@ -71,7 +71,7 @@ export async function loginByPassword(mobile, password) {
const { clearUserInfoCache } = await import('@gold/hooks/web/useUserInfo')
clearUserInfoCache()
} catch (e) {
console.error('清除用户信息缓存失败:', e)
// 清除缓存失败不影响登录流程
}
return info;
@@ -126,7 +126,7 @@ export async function loginBySms(mobile, code) {
const { clearUserInfoCache } = await import('@gold/hooks/web/useUserInfo')
clearUserInfoCache()
} catch (e) {
console.error('清除用户信息缓存失败:', e)
// 清除缓存失败不影响登录流程
}
return info;
@@ -151,7 +151,7 @@ export async function refreshToken() {
const { clearUserInfoCache } = await import('@gold/hooks/web/useUserInfo')
clearUserInfoCache()
} catch (e) {
console.error('清除用户信息缓存失败:', e)
// 清除缓存失败不影响登录流程
}
return info;

View File

@@ -56,16 +56,13 @@ export const ChatMessageApi = {
onmessage: onMessage,
onerror: (err) => {
retryCount++
console.error('SSE错误重试次数:', retryCount, err)
// 调用自定义错误处理
if (typeof onError === 'function') {
onError(err)
}
// 超过最大重试次数,停止重连
if (retryCount > maxRetries) {
throw err // 抛出错误,停止自动重连
throw err
}
},
onclose: () => {

View File

@@ -61,12 +61,11 @@ export const CommonService = {
onmessage: onMessage,
onerror: (err) => {
retryCount++
console.error('SSE错误重试次数:', retryCount, err)
if (typeof onError === 'function') {
onError(err)
}
if (retryCount > maxRetries) {
throw err
}

View File

@@ -33,23 +33,16 @@ export function createHttpClient(options = {}) {
// 默认处理尝试刷新token
try {
await refreshToken()
// 刷新成功标记错误已处理token已更新
error._handled = true
error._tokenRefreshed = true
console.info('Token刷新成功可以重试原请求')
// 不抛出错误,交给上层决定是否重试
} catch (refreshError) {
// 刷新失败使用router跳转避免整页刷新
console.error('刷新token失败:', refreshError)
router.push('/login')
}
},
on403: (error) => {
// 403没有权限直接跳转到登录页
if (on403) {
on403(error)
} else {
console.warn('403权限不足使用router跳转到登录页')
router.push('/login')
}
},

View File

@@ -6,8 +6,15 @@ import { message } from "ant-design-vue"
import { MaterialService } from './material'
/**
* 人脸识别
* 显示加载提示
*/
const showLoading = (text) => message.loading(text, 0)
/**
* 销毁加载提示
*/
const hideLoading = () => message.destroy()
export function identifyFace(data) {
return request({
url: '/webApi/api/tik/kling/identify-face',
@@ -16,9 +23,6 @@ export function identifyFace(data) {
})
}
/**
* 创建口型同步任务
*/
export function createLipSyncTask(data) {
return request({
url: '/webApi/api/tik/kling/task/create',
@@ -27,9 +31,6 @@ export function createLipSyncTask(data) {
})
}
/**
* 查询口型同步任务
*/
export function getLipSyncTask(taskId) {
return request({
url: `/webApi/api/tik/kling/lip-sync/${taskId}`,
@@ -37,13 +38,36 @@ export function getLipSyncTask(taskId) {
})
}
/**
* 创建可灵任务并识别(推荐方式)
*/
export async function createKlingTaskAndIdentify(file) {
export async function identifyUploadedVideo(videoFile) {
try {
// 1. 提取视频封面
message.loading('正在提取视频封面...', 0)
showLoading('正在识别视频中的人脸...')
const identifyRes = await identifyFace({ video_url: videoFile.fileUrl })
hideLoading()
if (identifyRes.code !== 0) {
throw new Error(identifyRes.msg || '识别失败')
}
return {
success: true,
data: {
fileId: videoFile.id,
videoUrl: videoFile.fileUrl,
sessionId: identifyRes.data.sessionId,
faceId: identifyRes.data.data.face_data[0].face_id || null,
startTime: identifyRes.data.data.face_data[0].start_time || 0,
endTime: identifyRes.data.data.face_data[0].end_time || 0
}
}
} catch (error) {
hideLoading()
throw error
}
}
export async function uploadAndIdentifyVideo(file) {
try {
showLoading('正在提取视频封面...')
let coverBase64 = null
try {
const { extractVideoCover } = await import('@/utils/video-cover')
@@ -52,46 +76,39 @@ export async function createKlingTaskAndIdentify(file) {
quality: 0.8
})
coverBase64 = cover.base64
console.log('视频封面提取成功')
} catch (coverError) {
console.warn('视频封面提取失败:', coverError)
// 封面提取失败不影响主流程
}
message.destroy()
hideLoading()
// 2. 上传视频到OSS包含封面
message.loading('正在上传视频...', 0)
showLoading('正在上传视频...')
const uploadRes = await MaterialService.uploadFile(file, 'video', coverBase64)
message.destroy()
hideLoading()
if (uploadRes.code !== 0) {
throw new Error(uploadRes.msg || '上传失败')
}
const fileId = uploadRes.data
console.log('文件上传成功ID:', fileId, '封面长度:', coverBase64?.length || 0)
// 3. 获取公网播放URL
message.loading('正在生成播放链接...', 0)
showLoading('正在生成播放链接...')
const urlRes = await MaterialService.getVideoPlayUrl(fileId)
message.destroy()
hideLoading()
if (urlRes.code !== 0) {
throw new Error(urlRes.msg || '获取播放链接失败')
}
const videoUrl = urlRes.data
console.log('视频URL:', videoUrl)
// 4. 调用识别API
message.loading('正在识别视频中的人脸...', 0)
const videoUrl = urlRes.data
showLoading('正在识别视频中的人脸...')
const identifyRes = await identifyFace({ video_url: videoUrl })
message.destroy()
hideLoading()
if (identifyRes.code !== 0) {
throw new Error(identifyRes.msg || '识别失败')
}
return {
success: true,
data: {
@@ -99,14 +116,12 @@ export async function createKlingTaskAndIdentify(file) {
videoUrl,
sessionId: identifyRes.data.sessionId,
faceId: identifyRes.data.data.face_data[0].face_id || null,
// 人脸时间信息,用于音频插入时间
startTime: identifyRes.data.data.face_data[0].start_time || 0,
endTime: identifyRes.data.data.face_data[0].end_time || 0
}
}
} catch (error) {
message.destroy()
console.error('可灵任务失败:', error)
hideLoading()
throw error
}
}

View File

@@ -34,8 +34,7 @@ function getVideoDuration(file) {
video.onerror = function() {
URL.revokeObjectURL(video.src);
console.warn('[视频时长] 获取失败使用默认值60秒');
resolve(60); // 返回默认值
resolve(60);
};
video.src = URL.createObjectURL(file);
@@ -71,32 +70,24 @@ export const MaterialService = {
* @returns {Promise}
*/
async uploadFile(file, fileCategory, coverBase64 = null, duration = null) {
// 如果没有提供时长且是视频文件,自动获取
if (duration === null && file.type.startsWith('video/')) {
duration = await getVideoDuration(file);
console.log('[上传] 获取到视频时长:', duration, '秒');
}
const formData = new FormData()
formData.append('file', file)
formData.append('fileCategory', fileCategory)
// 添加时长(如果是视频文件)
if (duration !== null) {
formData.append('duration', duration.toString());
console.log('[上传] 附加视频时长:', duration, '秒');
}
// 如果有封面 base64添加到表单数据
if (coverBase64) {
// base64 格式data:image/jpeg;base64,/9j/4AAQ...
// 后端会解析这个格式
formData.append('coverBase64', coverBase64)
}
// 大文件上传需要更长的超时时间30分钟
return http.post(`${BASE_URL}/upload`, formData, {
timeout: 30 * 60 * 1000 // 30分钟
timeout: 30 * 60 * 1000
})
},

View File

@@ -15,7 +15,6 @@ export const UserPromptApi = {
* @returns {Promise} 响应数据
*/
createUserPrompt: async (data) => {
console.log('[UserPromptApi] 发送请求参数:', JSON.stringify(data, null, 2))
return await http.post(`${SERVER_BASE_AI}/user-prompt/create`, data, {
headers: {
'Content-Type': 'application/json'

View File

@@ -0,0 +1,112 @@
<template>
<div class="result-panel">
<div v-if="!previewVideoUrl" class="result-placeholder">
<h3>生成的视频将在这里显示</h3>
</div>
<div v-else class="result-content">
<div class="result-section">
<h3>生成的数字人视频</h3>
<video :src="previewVideoUrl" controls class="generated-video"></video>
<div class="video-actions">
<a-button type="primary" @click="downloadVideo">下载视频</a-button>
</div>
</div>
</div>
</div>
</template>
<script setup>
import { ref, onMounted } from 'vue'
import { message } from 'ant-design-vue'
import { getDigitalHumanTask } from '@/api/digitalHuman'
const props = defineProps({
taskId: {
type: String,
default: ''
}
})
const emit = defineEmits(['videoLoaded'])
const previewVideoUrl = ref('')
const loadLastTask = async () => {
try {
const lastTaskId = localStorage.getItem('digital_human_last_task_id')
if (!lastTaskId) return
const res = await getDigitalHumanTask(lastTaskId)
if (res.code === 0 && res.data) {
const task = res.data
if (task.status === 'SUCCESS' && task.resultVideoUrl) {
previewVideoUrl.value = task.resultVideoUrl
emit('videoLoaded', task.resultVideoUrl)
}
}
} catch (error) {
localStorage.removeItem('digital_human_last_task_id')
}
}
const downloadVideo = () => {
if (!previewVideoUrl.value) return message.warning('没有可下载的视频')
const link = document.createElement('a')
link.href = previewVideoUrl.value
link.download = `数字人视频_${Date.now()}.mp4`
link.click()
}
defineExpose({
loadLastTask,
previewVideoUrl
})
onMounted(async () => {
await loadLastTask()
})
</script>
<style scoped>
.result-panel {
background: rgba(255, 255, 255, 0.05);
border-radius: 16px;
padding: 24px;
}
.result-placeholder {
min-height: 400px;
display: flex;
align-items: center;
justify-content: center;
color: #94a3b8;
}
.result-content {
color: #fff;
}
.result-section {
margin-bottom: 24px;
}
.result-section h3 {
margin-bottom: 12px;
font-size: 18px;
}
.generated-video {
width: 100%;
max-height: 400px;
border-radius: 8px;
margin-top: 12px;
}
.video-actions {
margin-top: 16px;
display: flex;
justify-content: center;
}
</style>

View File

@@ -36,7 +36,7 @@ const items = computed(() => {
title: '数字人',
children: [
{ name: '人声克隆', label: '人声克隆', icon: 'mic' },
{ name: '可灵数字人', label: "可灵数字人", icon: "user" },
{ name: '数字人生成', label: "数字人", icon: "user" },
// { name: '数字人视频', label: '数字人视频', icon: 'video' },
]
},

View File

@@ -0,0 +1,406 @@
<template>
<a-modal
v-model:open="visible"
:title="modalTitle"
width="900px"
:footer="null"
:maskClosable="false"
class="video-selector-modal"
>
<div class="video-selector">
<!-- 搜索栏 -->
<div class="search-bar">
<a-input-search
v-model:value="searchKeyword"
placeholder="搜索视频名称"
allow-clear
@search="handleSearch"
class="search-input"
/>
</div>
<!-- 视频网格 -->
<div class="video-grid" v-loading="loading">
<div
v-for="video in videoList"
:key="video.id"
class="video-card"
:class="{ selected: selectedVideoId === video.id }"
@click="selectVideo(video)"
>
<div class="video-thumbnail">
<img
:src="getVideoPreviewUrl(video) || defaultCover"
:alt="video.fileName"
@error="handleImageError"
/>
<div class="video-duration">{{ formatDuration(video.duration) }}</div>
<div class="video-selected-mark" v-if="selectedVideoId === video.id">
<CheckOutlined />
</div>
</div>
<div class="video-info">
<div class="video-title" :title="video.fileName">{{ video.fileName }}</div>
<div class="video-meta">
<span class="meta-item">
<VideoCameraOutlined />
{{ formatFileSize(video.fileSize) }}
</span>
<span class="meta-item">
<ClockCircleOutlined />
{{ formatDuration(video.duration) }}
</span>
</div>
</div>
</div>
<!-- 空状态 -->
<div v-if="!loading && videoList.length === 0" class="empty-state">
<PictureOutlined class="empty-icon" />
<p>{{ searchKeyword ? '未找到匹配的视频' : '暂无视频,请先上传视频' }}</p>
</div>
</div>
<!-- 分页 -->
<div class="pagination-wrapper" v-if="total > pageSize">
<a-pagination
v-model:current="currentPage"
v-model:page-size="pageSize"
:total="total"
show-size-changer
show-quick-jumper
:show-total="(total, range) => `${range[0]}-${range[1]} 条,共 ${total}`"
@change="handlePageChange"
@show-size-change="handlePageSizeChange"
/>
</div>
<!-- 底部操作栏 -->
<div class="modal-footer">
<a-button @click="handleCancel">取消</a-button>
<a-button type="primary" @click="handleConfirm" :disabled="!selectedVideoId">
确认选择
</a-button>
</div>
</div>
</a-modal>
</template>
<script setup>
import { ref, computed, watch } from 'vue'
import { message } from 'ant-design-vue'
import { CheckOutlined, PictureOutlined, VideoCameraOutlined, ClockCircleOutlined } from '@ant-design/icons-vue'
import { MaterialService } from '@/api/material'
const props = defineProps({
open: {
type: Boolean,
default: false
}
})
const emit = defineEmits(['update:open', 'select'])
// 状态管理
const visible = computed({
get: () => props.open,
set: (val) => emit('update:open', val)
})
const loading = ref(false)
const videoList = ref([])
const selectedVideoId = ref(null)
const selectedVideo = ref(null)
const searchKeyword = ref('')
const currentPage = ref(1)
const pageSize = ref(20)
const total = ref(0)
// 默认封面
const defaultCover = 'data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjAwIiBoZWlnaHQ9IjExMCIgdmlld0JveD0iMCAwIDIwMCAxMTAiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxyZWN0IHdpZHRoPSIyMDAiIGhlaWdodD0iMTEwIiBmaWxsPSIjMzc0MTUxIi8+CjxwYXRoIGQ9Ik04NSA0NUwxMTUgNjVMMTA1IDg1TDc1IDc1TDg1IDQ1WiIgZmlsbD0iIzU3MjY1MSIvPgo8L3N2Zz4K'
// 模态框标题
const modalTitle = '选择视频'
// 获取视频列表
const fetchVideoList = async () => {
loading.value = true
try {
const params = {
page: currentPage.value,
pageSize: pageSize.value,
fileCategory: 'video',
fileName: searchKeyword.value.trim() || undefined
}
const res = await MaterialService.getFilePage(params)
if (res.code === 0) {
videoList.value = res.data.list || []
total.value = res.data.total || 0
} else {
message.error(res.msg || '获取视频列表失败')
}
} catch (error) {
console.error('获取视频列表失败:', error)
message.error('获取视频列表失败')
} finally {
loading.value = false
}
}
// 搜索
const handleSearch = () => {
currentPage.value = 1
fetchVideoList()
}
// 分页变化
const handlePageChange = (page, size) => {
currentPage.value = page
if (size) {
pageSize.value = size
}
fetchVideoList()
}
// 每页数量变化
const handlePageSizeChange = (_current, size) => {
// _current 参数未使用,但需要保留以匹配事件处理器签名
currentPage.value = 1
pageSize.value = size
fetchVideoList()
}
// 选择视频
const selectVideo = (video) => {
selectedVideoId.value = video.id
selectedVideo.value = video
}
// 图片加载错误处理
const handleImageError = (event) => {
event.target.src = defaultCover
}
// 格式化时长
const formatDuration = (seconds) => {
if (!seconds) return '--:--'
const minutes = Math.floor(seconds / 60)
const remainingSeconds = Math.floor(seconds % 60)
return `${String(minutes).padStart(2, '0')}:${String(remainingSeconds).padStart(2, '0')}`
}
// 格式化文件大小
const formatFileSize = (bytes) => {
if (!bytes) return '0 B'
const units = ['B', 'KB', 'MB', 'GB']
let size = bytes
let unitIndex = 0
while (size >= 1024 && unitIndex < units.length - 1) {
size /= 1024
unitIndex++
}
return `${size.toFixed(1)} ${units[unitIndex]}`
}
// 获取视频预览URL优先使用base64然后是URL
const getVideoPreviewUrl = (video) => {
// 优先使用 coverBase64如果存在
if (video.coverBase64) {
// 确保 base64 有正确的前缀
if (!video.coverBase64.startsWith('data:')) {
return `data:image/jpeg;base64,${video.coverBase64}`
}
return video.coverBase64
}
// 其次使用 previewUrl
if (video.previewUrl) {
return video.previewUrl
}
// 最后使用 coverUrl
if (video.coverUrl) {
return video.coverUrl
}
// 返回默认封面
return defaultCover
}
// 取消
const handleCancel = () => {
visible.value = false
selectedVideoId.value = null
selectedVideo.value = null
searchKeyword.value = ''
}
// 确认
const handleConfirm = () => {
if (!selectedVideo.value) {
message.warning('请选择一个视频')
return
}
emit('select', selectedVideo.value)
handleCancel()
}
// 监听visible变化
watch(() => props.open, (newVal) => {
if (newVal) {
selectedVideoId.value = null
selectedVideo.value = null
currentPage.value = 1
fetchVideoList()
}
})
</script>
<style scoped>
.video-selector {
display: flex;
flex-direction: column;
gap: 16px;
}
.search-bar {
padding: 16px;
background: rgba(0, 0, 0, 0.2);
border-radius: 8px;
}
.search-input {
width: 100%;
}
.video-grid {
display: grid;
grid-template-columns: repeat(auto-fill, minmax(200px, 1fr));
gap: 16px;
max-height: 500px;
overflow-y: auto;
padding: 4px;
}
.video-card {
background: rgba(0, 0, 0, 0.3);
border: 2px solid rgba(59, 130, 246, 0.2);
border-radius: 12px;
overflow: hidden;
cursor: pointer;
transition: all 0.3s;
}
.video-card:hover {
border-color: rgba(59, 130, 246, 0.5);
transform: translateY(-2px);
box-shadow: 0 4px 12px rgba(59, 130, 246, 0.3);
}
.video-card.selected {
border-color: #3B82F6;
background: rgba(59, 130, 246, 0.1);
box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.2);
}
.video-thumbnail {
position: relative;
width: 100%;
height: 112px;
overflow: hidden;
background: #374151;
}
.video-thumbnail img {
width: 100%;
height: 100%;
object-fit: cover;
}
.video-duration {
position: absolute;
bottom: 8px;
right: 8px;
background: rgba(0, 0, 0, 0.8);
color: #fff;
padding: 2px 6px;
border-radius: 4px;
font-size: 12px;
font-weight: 600;
}
.video-selected-mark {
position: absolute;
top: 8px;
right: 8px;
width: 24px;
height: 24px;
background: #3B82F6;
color: #fff;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
font-size: 14px;
}
.video-info {
padding: 12px;
}
.video-title {
font-size: 14px;
font-weight: 600;
color: #fff;
margin-bottom: 8px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.video-meta {
display: flex;
gap: 12px;
font-size: 12px;
color: #94a3b8;
}
.meta-item {
display: flex;
align-items: center;
gap: 4px;
}
.empty-state {
grid-column: 1 / -1;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
padding: 60px 20px;
color: #94a3b8;
}
.empty-icon {
font-size: 48px;
margin-bottom: 16px;
color: #6b7280;
}
.pagination-wrapper {
display: flex;
justify-content: center;
padding: 16px 0;
border-top: 1px solid rgba(59, 130, 246, 0.1);
}
.modal-footer {
display: flex;
justify-content: flex-end;
gap: 12px;
padding-top: 16px;
border-top: 1px solid rgba(59, 130, 246, 0.1);
}
</style>

View File

@@ -0,0 +1,179 @@
<template>
<div class="voice-selector">
<div v-if="displayedVoices.length === 0" class="empty-voices">
还没有配音可先在"配音管理"中上传
</div>
<div v-else class="voice-selector-with-preview">
<a-select
v-model:value="selectedVoiceId"
placeholder="请选择音色"
class="voice-select"
:options="voiceOptions"
@change="handleVoiceChange"
style="width: calc(100% - 80px)"
/>
<a-button
class="preview-button"
size="small"
:disabled="!selectedVoiceId"
:loading="previewLoadingVoiceId === selectedVoiceId"
@click="handlePreviewCurrentVoice"
>
<template #icon>
<SoundOutlined />
</template>
试听
</a-button>
</div>
</div>
</template>
<script setup>
import { ref, computed, onMounted } from 'vue'
import { useVoiceCopyStore } from '@/stores/voiceCopy'
import { useTTS, TTS_PROVIDERS } from '@/composables/useTTS'
const voiceStore = useVoiceCopyStore()
const emit = defineEmits(['select'])
// 使用TTS Hook默认使用Qwen供应商
const {
previewLoadingVoiceId,
playingPreviewVoiceId,
ttsText,
speechRate,
playVoiceSample,
setText,
setSpeechRate,
resetPreviewState
} = useTTS({
provider: TTS_PROVIDERS.QWEN
})
// 当前选中的音色ID
const selectedVoiceId = ref('')
// 从store数据构建音色列表
const userVoiceCards = computed(() =>
(voiceStore.profiles || []).map(profile => ({
id: `user-${profile.id}`,
rawId: profile.id,
name: profile.name || '未命名',
category:'',
gender: profile.gender || 'female',
description: profile.note || '我的配音',
fileUrl: profile.fileUrl,
transcription: profile.transcription || '',
source: 'user',
voiceId: profile.voiceId
}))
)
const displayedVoices = computed(() => userVoiceCards.value)
// 转换为下拉框选项格式
const voiceOptions = computed(() =>
displayedVoices.value.map(voice => ({
value: voice.id,
label: voice.name,
data: voice // 保存完整数据
}))
)
// 音色选择变化处理
const handleVoiceChange = (value, option) => {
const voice = option.data
selectedVoiceId.value = value
emit('select', voice)
}
// 试听当前选中的音色
const handlePreviewCurrentVoice = () => {
if (!selectedVoiceId.value) return
const voice = displayedVoices.value.find(v => v.id === selectedVoiceId.value)
if (!voice) return
handlePlayVoiceSample(voice)
}
/**
* 处理音色试听
* 使用Hook提供的playVoiceSample方法
*/
const handlePlayVoiceSample = (voice) => {
playVoiceSample(
voice,
(audioData) => {
// 成功回调
console.log('音频播放成功', audioData)
},
(error) => {
// 错误回调
console.error('音频播放失败', error)
}
)
}
/**
* 设置要试听的文本(供父组件调用)
* @param {string} text 要试听的文本
*/
const setPreviewText = (text) => {
setText(text)
}
/**
* 设置语速(供父组件调用)
* @param {number} rate 语速倍率
*/
const setPreviewSpeechRate = (rate) => {
setSpeechRate(rate)
}
defineExpose({
setPreviewText,
setPreviewSpeechRate
})
onMounted(async () => {
await voiceStore.refresh()
})
</script>
<style scoped>
.voice-selector {
width: 100%;
}
.empty-voices {
padding: 8px 12px;
font-size: 12px;
color: var(--color-text-secondary);
background: rgba(0, 0, 0, 0.3);
border: 1px dashed rgba(59, 130, 246, 0.3);
border-radius: var(--radius-card);
}
/* 音色选择器和试听按钮的容器 */
.voice-selector-with-preview {
display: flex;
gap: 8px;
align-items: center;
width: 100%;
}
/* 下拉框样式 */
.voice-select {
flex: 1;
}
/* 试听按钮样式 */
.preview-button {
height: 32px;
white-space: nowrap;
}
</style>

View File

@@ -0,0 +1,355 @@
/**
* TTS (Text-to-Speech) 公共Hook
* 支持多个供应商Qwen, Azure, AWS等
*/
import { ref, computed } from 'vue'
import { message } from 'ant-design-vue'
import { VoiceService } from '@/api/voice'
// 供应商配置
const TTS_PROVIDERS = {
QWEN: 'qwen',
AZURE: 'azure',
AWS: 'aws'
}
// 默认配置
const DEFAULT_CONFIG = {
qwen: {
apiEndpoint: '/api/tik/voice/tts',
audioFormat: 'mp3',
supportedFormats: ['mp3', 'wav']
},
azure: {
apiEndpoint: '/api/tik/voice/azure/tts',
audioFormat: 'mp3',
supportedFormats: ['mp3', 'wav', 'ogg']
},
aws: {
apiEndpoint: '/api/tik/voice/aws/tts',
audioFormat: 'mp3',
supportedFormats: ['mp3', 'wav', 'ogg']
}
}
/**
* TTS Hook主函数
* @param {Object} options 配置选项
* @param {string} options.provider 供应商名称,默认'qwen'
* @param {Object} options.customConfig 自定义配置
* @returns {Object} TTS相关的方法和状态
*/
export function useTTS(options = {}) {
const {
provider = TTS_PROVIDERS.QWEN,
customConfig = {}
} = options
// 状态管理
const previewAudioCache = new Map()
const MAX_PREVIEW_CACHE_SIZE = 50
const previewLoadingVoiceId = ref(null)
const playingPreviewVoiceId = ref(null)
const ttsText = ref('')
const speechRate = ref(1.0)
// 音频实例
let previewAudio = null
let previewObjectUrl = ''
// 获取当前供应商配置
const getProviderConfig = () => {
const config = DEFAULT_CONFIG[provider] || DEFAULT_CONFIG[TTS_PROVIDERS.QWEN]
return { ...config, ...customConfig }
}
/**
* 播放音频预览
* @param {string} url 音频URL
* @param {Object} options 播放选项
*/
const playAudioPreview = (url, options = {}) => {
if (!url) return message.warning('暂无可试听的音频')
try {
previewAudio?.pause?.()
previewAudio = null
} catch (error) {
}
const audio = new Audio(url)
const cleanup = () => {
if (options.revokeOnEnd && url.startsWith('blob:')) {
URL.revokeObjectURL(url)
previewObjectUrl === url && (previewObjectUrl = '')
}
previewAudio = null
options.onEnded && options.onEnded()
}
audio.play()
.then(() => {
previewAudio = audio
audio.onended = cleanup
audio.onerror = () => {
cleanup()
message.error('播放失败')
}
})
.catch(err => {
cleanup()
message.error('播放失败')
})
}
/**
* 生成预览缓存键
* @param {Object} voice 音色对象
* @returns {string} 缓存键
*/
const generatePreviewCacheKey = (voice) => {
const voiceId = voice.voiceId || voice.rawId || voice.id
const text = ttsText.value.trim()
const rate = speechRate.value
return `${voiceId}:${text}:${rate}`
}
/**
* 解码并缓存Base64音频
* @param {string} audioBase64 Base64编码的音频数据
* @param {string} format 音频格式
* @param {string} cacheKey 缓存键
* @returns {Promise<Object>} 音频数据
*/
const decodeAndCacheBase64 = async (audioBase64, format = 'mp3', cacheKey) => {
const byteCharacters = window.atob(audioBase64)
const byteNumbers = new Uint8Array(byteCharacters.length)
for (let i = 0; i < byteCharacters.length; i++) {
byteNumbers[i] = byteCharacters.charCodeAt(i)
}
const mime = format === 'mp3' ? 'audio/mpeg' : `audio/${format}`
const blob = new Blob([byteNumbers], { type: mime })
const objectUrl = URL.createObjectURL(blob)
const audioData = { blob, objectUrl, format }
previewAudioCache.set(cacheKey, audioData)
if (previewAudioCache.size > MAX_PREVIEW_CACHE_SIZE) {
const firstKey = previewAudioCache.keys().next().value
const oldData = previewAudioCache.get(firstKey)
URL.revokeObjectURL(oldData.objectUrl)
previewAudioCache.delete(firstKey)
}
return audioData
}
/**
* 播放缓存的音频
* @param {Object} audioData 音频数据
* @param {Function} onEnded 播放结束回调
*/
const playCachedAudio = (audioData, onEnded) => {
if (previewObjectUrl && previewObjectUrl !== audioData.objectUrl) {
URL.revokeObjectURL(previewObjectUrl)
}
previewObjectUrl = audioData.objectUrl
playAudioPreview(previewObjectUrl, {
revokeOnEnd: false,
onEnded: () => {
onEnded && onEnded()
}
})
}
/**
* 重置预览状态
*/
const resetPreviewState = () => {
previewLoadingVoiceId.value = null
playingPreviewVoiceId.value = null
}
/**
* 提取ID从字符串
* @param {string} idStr 包含前缀的ID字符串
* @returns {number|null} 提取的ID
*/
const extractIdFromString = (idStr) => {
if (typeof idStr !== 'string' || !idStr.startsWith('user-')) return null
const extractedId = parseInt(idStr.replace('user-', ''))
return Number.isNaN(extractedId) ? null : extractedId
}
/**
* 构建预览参数
* @param {Object} voice 音色对象
* @returns {Object|null} 预览参数
*/
const buildPreviewParams = (voice) => {
const configId = voice.rawId || extractIdFromString(voice.id)
if (!configId) {
message.error('配音配置无效')
return null
}
const providerConfig = getProviderConfig()
return {
voiceConfigId: configId,
inputText: ttsText.value,
speechRate: speechRate.value || 1.0,
audioFormat: providerConfig.audioFormat,
timestamp: Date.now(),
provider: provider
}
}
/**
* 播放音色试听
* @param {Object} voice 音色对象
* @param {Function} onSuccess 成功回调
* @param {Function} onError 错误回调
*/
const playVoiceSample = async (voice, onSuccess, onError) => {
if (!voice) return
if (previewLoadingVoiceId.value === voice.id || playingPreviewVoiceId.value === voice.id) {
return
}
if (playingPreviewVoiceId.value && playingPreviewVoiceId.value !== voice.id) {
try {
previewAudio?.pause?.()
previewAudio = null
} catch (error) {
}
}
previewLoadingVoiceId.value = voice.id
playingPreviewVoiceId.value = voice.id
const cacheKey = generatePreviewCacheKey(voice)
const cachedAudio = previewAudioCache.get(cacheKey)
if (cachedAudio) {
playCachedAudio(cachedAudio, resetPreviewState)
onSuccess && onSuccess(cachedAudio)
return
}
try {
const params = buildPreviewParams(voice)
if (!params) {
resetPreviewState()
onError && onError(new Error('参数构建失败'))
return
}
const res = await VoiceService.preview(params)
if (res.code !== 0) {
message.error(res.msg || '试听失败')
resetPreviewState()
onError && onError(new Error(res.msg || '试听失败'))
return
}
if (res.data?.audioUrl) {
playAudioPreview(res.data.audioUrl, { onEnded: resetPreviewState })
onSuccess && onSuccess(res.data)
} else if (res.data?.audioBase64) {
const audioData = await decodeAndCacheBase64(res.data.audioBase64, res.data.format, cacheKey)
playCachedAudio(audioData, resetPreviewState)
onSuccess && onSuccess(audioData)
} else {
message.error('试听失败')
resetPreviewState()
onError && onError(new Error('未收到音频数据'))
}
} catch (error) {
message.error('试听失败')
resetPreviewState()
onError && onError(error)
}
}
/**
* TTS文本转语音
* @param {Object} params TTS参数
* @returns {Promise<Object>} TTS结果
*/
const synthesize = async (params) => {
const providerConfig = getProviderConfig()
const ttsParams = {
inputText: params.inputText || ttsText.value,
voiceConfigId: params.voiceConfigId,
speechRate: params.speechRate || speechRate.value,
audioFormat: params.audioFormat || providerConfig.audioFormat,
provider: provider
}
return await VoiceService.synthesize(ttsParams)
}
/**
* 设置文本
* @param {string} text 要设置的文本
*/
const setText = (text) => {
ttsText.value = text
}
/**
* 设置语速
* @param {number} rate 语速倍率
*/
const setSpeechRate = (rate) => {
speechRate.value = rate
}
/**
* 清除音频缓存
*/
const clearAudioCache = () => {
previewAudioCache.forEach((audioData) => {
URL.revokeObjectURL(audioData.objectUrl)
})
previewAudioCache.clear()
}
/**
* 停止当前播放
*/
const stopCurrentPlayback = () => {
try {
previewAudio?.pause?.()
previewAudio = null
} catch (error) {
}
}
return {
// 状态
previewLoadingVoiceId,
playingPreviewVoiceId,
ttsText,
speechRate,
// 方法
playVoiceSample,
synthesize,
setText,
setSpeechRate,
playAudioPreview,
clearAudioCache,
stopCurrentPlayback,
resetPreviewState,
// 配置
getProviderConfig,
TTS_PROVIDERS,
DEFAULT_CONFIG
}
}
export { TTS_PROVIDERS }

View File

@@ -43,10 +43,8 @@ const routes = [
name: '数字人',
children: [
{ path: '', redirect: '/digital-human/voice-copy' },
{ path: 'kling', name: '可灵数字人', component: () => import('../views/kling/IdentifyFace.vue') },
{ path: 'kling', name: '数字人生成', component: () => import('../views/kling/IdentifyFace.vue') },
{ path: 'voice-copy', name: '人声克隆', component: () => import('../views/dh/VoiceCopy.vue') },
{ path: 'avatar', name: '生成数字人', component: () => import('../views/dh/Avatar.vue') },
{ path: 'video', name: '数字人视频', component: () => import('../views/dh/Video.vue') },
]
},
{

View File

@@ -1,25 +0,0 @@
<script setup>
</script>
<template>
<div class="space-y-4">
<h2 class="text-xl font-bold">生成数字人</h2>
<div class="grid grid-cols-1 gap-4 lg:grid-cols-3">
<section class="p-4 bg-white rounded shadow lg:col-span-1">
<div class="space-y-3">
<div class="text-sm text-gray-600">形象背景脚本分辨率字幕等配置</div>
<button class="px-4 py-2 text-white bg-purple-600 rounded">生成视频</button>
</div>
</section>
<section class="p-4 bg-white rounded shadow lg:col-span-2">
<div class="text-gray-500">视频预览任务队列渲染进度</div>
</section>
</div>
</div>
</template>
<style scoped>
</style>

View File

@@ -61,7 +61,7 @@ const userVoiceCards = computed(() =>
id: `user-${profile.id}`,
rawId: profile.id,
name: profile.name || '未命名',
category: profile.gender === 'male' ? '男青年' : '女青',
category: '',
gender: profile.gender || 'female',
description: profile.note || '我的配音',
fileUrl: profile.fileUrl,

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,239 @@
/**
* @fileoverview useDigitalHumanGeneration Hook - 数字人生成逻辑封装
* @author Claude Code
*/
import { ref, computed } from 'vue'
import { message } from 'ant-design-vue'
import type {
UseDigitalHumanGeneration,
VideoState,
IdentifyState,
MaterialValidation,
Video,
AudioState,
} from '../types/identify-face'
import { identifyUploadedVideo, uploadAndIdentifyVideo } from '@/api/kling'
/**
* 数字人生成 Hook
* @param audioState 音频状态(来自父 Hook
*/
export function useDigitalHumanGeneration(
audioState: AudioState
): UseDigitalHumanGeneration {
// ==================== 响应式状态 ====================
const videoState = ref<VideoState>({
uploadedVideo: '',
videoFile: null,
previewVideoUrl: '',
selectedVideo: null,
videoSource: null,
selectorVisible: false,
})
const identifyState = ref<IdentifyState>({
identifying: false,
identified: false,
sessionId: '',
faceId: '',
faceStartTime: 0,
faceEndTime: 0,
videoFileId: null,
})
const materialValidation = ref<MaterialValidation>({
videoDuration: 0,
audioDuration: 0,
isValid: false,
showDetails: false,
})
// ==================== 计算属性 ====================
/**
* 人脸出现时长
*/
const faceDuration = computed(() => {
return identifyState.value.faceEndTime - identifyState.value.faceStartTime
})
/**
* 是否可以生成数字人视频
*/
const canGenerate = computed(() => {
const hasVideo = videoState.value.uploadedVideo || videoState.value.selectedVideo
const audioValidated = audioState.validationPassed
const materialValidated = materialValidation.value.isValid
return !!(hasVideo && audioValidated && materialValidated)
})
// ==================== 核心方法 ====================
/**
* 处理视频文件上传
*/
const handleFileUpload = async (file: File): Promise<void> => {
if (!file.name.match(/\.(mp4|mov)$/i)) {
message.error('仅支持 MP4 和 MOV')
return
}
videoState.value.videoFile = file
videoState.value.uploadedVideo = URL.createObjectURL(file)
videoState.value.selectedVideo = null
videoState.value.previewVideoUrl = ''
videoState.value.videoSource = 'upload'
resetIdentifyState()
resetMaterialValidation()
await performFaceRecognition()
}
/**
* 处理从素材库选择视频
*/
const handleVideoSelect = (video: Video): void => {
videoState.value.selectedVideo = video
videoState.value.uploadedVideo = video.fileUrl
videoState.value.videoFile = null
videoState.value.videoSource = 'select'
videoState.value.selectorVisible = false
resetIdentifyState()
identifyState.value.videoFileId = video.id
materialValidation.value.videoDuration = (video.duration || 0) * 1000
performFaceRecognition()
}
/**
* 执行人脸识别
*/
const performFaceRecognition = async (): Promise<void> => {
const hasUploadFile = videoState.value.videoFile
const hasSelectedVideo = videoState.value.selectedVideo
if (!hasUploadFile && !hasSelectedVideo) {
return
}
identifyState.value.identifying = true
try {
let res
if (hasSelectedVideo) {
res = await identifyUploadedVideo(hasSelectedVideo)
identifyState.value.videoFileId = hasSelectedVideo.id
} else {
res = await uploadAndIdentifyVideo(hasUploadFile!)
identifyState.value.videoFileId = res.data.fileId
}
identifyState.value.sessionId = res.data.sessionId
identifyState.value.faceId = res.data.faceId
identifyState.value.faceStartTime = res.data.startTime || 0
identifyState.value.faceEndTime = res.data.endTime || 0
identifyState.value.identified = true
const durationSec = faceDuration.value / 1000
const suggestedMaxChars = Math.floor(durationSec * 3.5)
message.success(`识别完成!人脸出现时长约 ${durationSec.toFixed(1)} 秒,建议文案不超过 ${suggestedMaxChars}`)
} catch (error: any) {
message.error(error.message || '识别失败')
throw error
} finally {
identifyState.value.identifying = false
}
}
/**
* 验证素材时长
*/
const validateMaterialDuration = (videoDurationMs: number, audioDurationMs: number): boolean => {
const isValid = videoDurationMs > audioDurationMs
materialValidation.value.videoDuration = videoDurationMs
materialValidation.value.audioDuration = audioDurationMs
materialValidation.value.isValid = isValid
return isValid
}
/**
* 重置视频状态
*/
const resetVideoState = (): void => {
videoState.value.uploadedVideo = ''
videoState.value.videoFile = null
videoState.value.selectedVideo = null
videoState.value.videoSource = null
videoState.value.previewVideoUrl = ''
videoState.value.selectorVisible = false
resetIdentifyState()
resetMaterialValidation()
}
/**
* 获取视频预览 URL
*/
const getVideoPreviewUrl = (video: Video): string => {
if (video.coverBase64) {
if (!video.coverBase64.startsWith('data:')) {
return `data:image/jpeg;base64,${video.coverBase64}`
}
return video.coverBase64
}
if (video.previewUrl) {
return video.previewUrl
}
if (video.coverUrl) {
return video.coverUrl
}
return 'data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjAwIiBoZWlnaHQ9IjExMCIgdmlld0JveD0iMCAwIDIwMCAxMTAiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxyZWN0IHdpZHRoPSIyMDAiIGhlaWdodD0iMTEwIiBmaWxsPSIjMzc0MTUxIi8+CjxwYXRoIGQ9Ik04NSA0NUwxMTUgNjVMMTA1IDg1TDc1IDc1TDg1IDQ1WiIgZmlsbD0iIzU3MjY1MSIvPgo8L3N2Zz4K'
}
/**
* 重置识别状态
*/
const resetIdentifyState = (): void => {
identifyState.value.identified = false
identifyState.value.sessionId = ''
identifyState.value.faceId = ''
identifyState.value.videoFileId = null
}
/**
* 重置素材校验状态
*/
const resetMaterialValidation = (): void => {
materialValidation.value.videoDuration = 0
materialValidation.value.audioDuration = 0
materialValidation.value.isValid = false
}
return {
// 响应式状态
videoState,
identifyState,
materialValidation,
// 计算属性
faceDuration,
canGenerate,
// 方法
handleFileUpload,
handleVideoSelect,
performFaceRecognition,
validateMaterialDuration,
resetVideoState,
getVideoPreviewUrl,
}
}

View File

@@ -0,0 +1,326 @@
/**
* @fileoverview useIdentifyFaceController Hook - 主控制器 Hook
* @author Claude Code
*/
import { computed } from 'vue'
import { message } from 'ant-design-vue'
import type {
UseIdentifyFaceController,
UseVoiceGeneration,
UseDigitalHumanGeneration,
LipSyncTaskData,
} from '../types/identify-face'
import { createLipSyncTask } from '@/api/kling'
/**
* 识别控制器 Hook
* @param voiceGeneration 语音生成 Hook
* @param digitalHuman 数字人生成 Hook
*/
export function useIdentifyFaceController(
voiceGeneration: UseVoiceGeneration,
digitalHuman: UseDigitalHumanGeneration
): UseIdentifyFaceController {
// ==================== 计算属性 ====================
/**
* 是否可以生成数字人视频(综合检查)
*/
const canGenerate = computed(() => {
const hasText = voiceGeneration.ttsText.value.trim()
const hasVoice = voiceGeneration.selectedVoiceMeta.value
const hasVideo = digitalHuman.videoState.value.uploadedVideo || digitalHuman.videoState.value.selectedVideo
const audioValidated = voiceGeneration.audioState.value.validationPassed
const materialValidated = digitalHuman.materialValidation.value.isValid
return !!(hasText && hasVoice && hasVideo && audioValidated && materialValidated)
})
/**
* 最大的文本长度
*/
const maxTextLength = computed(() => {
if (!digitalHuman.identifyState.value.identified || digitalHuman.faceDuration.value <= 0) {
return 4000
}
return Math.min(4000, Math.floor(voiceGeneration.suggestedMaxChars.value * 1.2))
})
/**
* 文本框占位符
*/
const textareaPlaceholder = computed(() => {
if (digitalHuman.identifyState.value.identified && digitalHuman.faceDuration.value > 0) {
return `请输入文案,建议不超过${voiceGeneration.suggestedMaxChars.value}字以确保与视频匹配`
}
return '请输入你想让角色说话的内容'
})
/**
* 语速标记
*/
const speechRateMarks = { 0.5: '0.5x', 1: '1x', 1.5: '1.5x', 2: '2x' }
/**
* 语速显示
*/
const speechRateDisplay = computed(() => `${voiceGeneration.speechRate.value.toFixed(1)}x`)
// ==================== 业务流程方法 ====================
/**
* 生成数字人视频
*/
const generateDigitalHuman = async (): Promise<void> => {
if (!canGenerate.value) {
message.warning('请先完成配置')
return
}
const text = voiceGeneration.ttsText.value.trim()
if (!text) {
message.warning('请输入文案内容')
return
}
const voice = voiceGeneration.selectedVoiceMeta.value
if (!voice) {
message.warning('请选择音色')
return
}
try {
// 如果未识别,先进行人脸识别
if (!digitalHuman.identifyState.value.identified) {
message.loading('正在进行人脸识别...', 0)
const hasUploadFile = digitalHuman.videoState.value.videoFile
const hasSelectedVideo = digitalHuman.videoState.value.selectedVideo
if (!hasUploadFile && !hasSelectedVideo) {
message.destroy()
message.warning('请先选择或上传视频')
return
}
try {
await digitalHuman.performFaceRecognition()
message.destroy()
message.success('人脸识别完成')
} catch (error) {
message.destroy()
return
}
}
const videoFileId = digitalHuman.identifyState.value.videoFileId
const taskData: LipSyncTaskData = {
taskName: `数字人任务_${Date.now()}`,
videoFileId: videoFileId!,
inputText: voiceGeneration.ttsText.value,
speechRate: voiceGeneration.speechRate.value,
volume: 0,
guidanceScale: 1,
seed: 8888,
kling_session_id: digitalHuman.identifyState.value.sessionId,
kling_face_id: digitalHuman.identifyState.value.faceId,
kling_face_start_time: digitalHuman.identifyState.value.faceStartTime,
kling_face_end_time: digitalHuman.identifyState.value.faceEndTime,
ai_provider: 'kling',
voiceConfigId: voice.rawId || extractIdFromString(voice.id),
}
if (!taskData.voiceConfigId) {
message.warning('音色配置无效')
return
}
// 如果有预生成的音频,添加到任务数据中
if (voiceGeneration.audioState.value.generated && voiceGeneration.audioState.value.durationMs > 0) {
taskData.pre_generated_audio = {
audioBase64: voiceGeneration.audioState.value.generated.audioBase64,
format: voiceGeneration.audioState.value.generated.format || 'mp3',
}
taskData.sound_end_time = voiceGeneration.audioState.value.durationMs
}
const res = await createLipSyncTask(taskData)
if (res.code === 0) {
message.success('任务已提交到任务中心,请前往查看')
} else {
throw new Error(res.msg || '任务创建失败')
}
} catch (error: any) {
message.error(error.message || '任务提交失败')
}
}
/**
* 更换视频
*/
const replaceVideo = (): void => {
if (digitalHuman.videoState.value.videoSource === 'upload') {
digitalHuman.videoState.value.videoFile = null
digitalHuman.videoState.value.uploadedVideo = ''
} else {
digitalHuman.videoState.value.selectedVideo = null
digitalHuman.videoState.value.videoFile = null
digitalHuman.videoState.value.uploadedVideo = ''
}
// 重置所有状态
digitalHuman.resetVideoState()
voiceGeneration.resetAudioState()
}
/**
* 处理音色选择
*/
const handleVoiceSelect = (voice: any): void => {
voiceGeneration.selectedVoiceMeta.value = voice
}
/**
* 处理文件选择
*/
const handleFileSelect = (event: Event): void => {
const input = event.target as HTMLInputElement
const file = input.files?.[0]
if (file) {
digitalHuman.handleFileUpload(file)
}
}
/**
* 处理拖拽上传
*/
const handleDrop = (event: DragEvent): void => {
event.preventDefault()
const file = event.dataTransfer?.files[0]
if (file) {
digitalHuman.handleFileUpload(file)
}
}
/**
* 触发文件选择
*/
const triggerFileSelect = (): void => {
document.querySelector('input[type="file"]')?.click()
}
/**
* 选择上传模式
*/
const handleSelectUpload = (): void => {
digitalHuman.videoState.value.videoSource = 'upload'
digitalHuman.videoState.value.selectedVideo = null
digitalHuman.resetIdentifyState()
digitalHuman.resetMaterialValidation()
}
/**
* 从素材库选择
*/
const handleSelectFromLibrary = (): void => {
digitalHuman.videoState.value.videoSource = 'select'
digitalHuman.videoState.value.videoFile = null
digitalHuman.videoState.value.uploadedVideo = ''
digitalHuman.videoState.value.selectorVisible = true
}
/**
* 处理视频选择器选择
*/
const handleVideoSelect = (video: any): void => {
digitalHuman.handleVideoSelect(video)
}
/**
* 简化文案
*/
const handleSimplifyScript = (): void => {
const textarea = document.querySelector('.tts-textarea textarea') as HTMLTextAreaElement
if (textarea) {
textarea.focus()
textarea.scrollIntoView({ behavior: 'smooth', block: 'center' })
}
}
/**
* 处理视频加载
*/
const handleVideoLoaded = (videoUrl: string): void => {
digitalHuman.videoState.value.previewVideoUrl = videoUrl
}
// ==================== UI 辅助方法 ====================
/**
* 格式化时长
*/
const formatDuration = (seconds: number): string => {
if (!seconds) return '--:--'
const minutes = Math.floor(seconds / 60)
const remainingSeconds = Math.floor(seconds % 60)
return `${String(minutes).padStart(2, '0')}:${String(remainingSeconds).padStart(2, '0')}`
}
/**
* 格式化文件大小
*/
const formatFileSize = (bytes: number): string => {
if (!bytes) return '0 B'
const units = ['B', 'KB', 'MB', 'GB']
let size = bytes
let unitIndex = 0
while (size >= 1024 && unitIndex < units.length - 1) {
size /= 1024
unitIndex++
}
return `${size.toFixed(1)} ${units[unitIndex]}`
}
return {
// 组合子 Hooks
voiceGeneration,
digitalHuman,
// 业务流程方法
generateDigitalHuman,
replaceVideo,
// 事件处理方法
handleVoiceSelect,
handleFileSelect,
handleDrop,
triggerFileSelect,
handleSelectUpload,
handleSelectFromLibrary,
handleVideoSelect,
handleSimplifyScript,
handleVideoLoaded,
// UI 辅助方法
formatDuration,
formatFileSize,
// 计算属性
canGenerate,
maxTextLength,
textareaPlaceholder,
speechRateMarks,
speechRateDisplay,
}
}
/**
* 从字符串中提取ID
*/
function extractIdFromString(str: string): string {
const match = str.match(/[\w-]+$/)
return match ? match[0] : str
}

View File

@@ -0,0 +1,228 @@
/**
* @fileoverview useVoiceGeneration Hook - 语音生成逻辑封装
* @author Claude Code
*/
import { ref, computed } from 'vue'
import { message } from 'ant-design-vue'
import type {
UseVoiceGeneration,
AudioState,
VoiceMeta,
IdentifyState,
AudioData,
} from '../types/identify-face'
import { VoiceService } from '@/api/voice'
/**
* 语音生成 Hook
* @param identifyState 人脸识别状态(来自父 Hook
* @param faceDuration 人脸出现时长(毫秒)
*/
export function useVoiceGeneration(
identifyState: IdentifyState,
faceDuration: number
): UseVoiceGeneration {
// ==================== 响应式状态 ====================
const ttsText = ref<string>('')
const speechRate = ref<number>(1.0)
const selectedVoiceMeta = ref<VoiceMeta | null>(null)
const audioState = ref<AudioState>({
generated: null,
durationMs: 0,
validationPassed: false,
generating: false,
})
// ==================== 计算属性 ====================
/**
* 是否可以生成配音
*/
const canGenerateAudio = computed(() => {
const hasText = ttsText.value.trim()
const hasVoice = selectedVoiceMeta.value
const hasVideo = identifyState.identified
return !!(hasText && hasVoice && hasVideo && !audioState.value.generating)
})
/**
* 建议的最大字符数
*/
const suggestedMaxChars = computed(() => {
const durationSec = faceDuration / 1000
const adjustedRate = speechRate.value || 1.0
return Math.floor(durationSec * 3.5 * adjustedRate)
})
// ==================== 核心方法 ====================
/**
* 生成配音
*/
const generateAudio = async (): Promise<void> => {
const voice = selectedVoiceMeta.value
if (!voice) {
message.warning('请选择音色')
return
}
if (!ttsText.value.trim()) {
message.warning('请输入文案内容')
return
}
audioState.value.generating = true
try {
const params = {
inputText: ttsText.value,
voiceConfigId: voice.rawId || extractIdFromString(voice.id),
speechRate: speechRate.value || 1.0,
audioFormat: 'mp3' as const,
}
const res = await VoiceService.synthesize(params)
if (res.code === 0) {
const audioData = res.data as AudioData
if (!audioData.audioBase64) {
throw new Error('未收到音频数据,无法进行时长解析')
}
audioState.value.generated = audioData
try {
// 解析音频时长
audioState.value.durationMs = await parseAudioDuration(audioData.audioBase64)
// 验证音频时长
validateAudioDuration()
message.success('配音生成成功!')
} catch (error) {
message.error('音频解析失败,请重新生成配音')
audioState.value.durationMs = 0
audioState.value.generated = null
audioState.value.validationPassed = false
}
} else {
throw new Error(res.msg || '配音生成失败')
}
} catch (error: any) {
message.error(error.message || '配音生成失败')
} finally {
audioState.value.generating = false
}
}
/**
* 解析音频时长
*/
const parseAudioDuration = async (base64Data: string): Promise<number> => {
return new Promise((resolve, reject) => {
try {
const base64 = base64Data.includes(',') ? base64Data.split(',')[1] : base64Data
const binaryString = window.atob(base64)
const bytes = new Uint8Array(binaryString.length)
for (let i = 0; i < binaryString.length; i++) {
bytes[i] = binaryString.charCodeAt(i)
}
const blob = new Blob([bytes], { type: 'audio/mp3' })
const audio = new Audio()
const objectUrl = URL.createObjectURL(blob)
audio.addEventListener('loadedmetadata', () => {
URL.revokeObjectURL(objectUrl)
const durationMs = Math.round(audio.duration * 1000)
resolve(durationMs)
})
audio.addEventListener('error', (error) => {
URL.revokeObjectURL(objectUrl)
reject(error)
})
audio.src = objectUrl
audio.load()
} catch (error) {
reject(error)
}
})
}
/**
* 验证音频与人脸区间的重合时长
*/
const validateAudioDuration = (): boolean => {
if (!identifyState.identified || faceDuration <= 0) {
audioState.value.validationPassed = false
return false
}
const faceStart = identifyState.faceStartTime
const faceEnd = identifyState.faceEndTime
const faceDurationMs = faceEnd - faceStart
const audioDuration = audioState.value.durationMs
const overlapStart = faceStart
const overlapEnd = Math.min(faceEnd, faceStart + audioDuration)
const overlapDuration = Math.max(0, overlapEnd - overlapStart)
const isValid = overlapDuration >= 2000
audioState.value.validationPassed = isValid
if (!isValid) {
const overlapSec = (overlapDuration / 1000).toFixed(1)
message.warning(
`音频时长(${(audioDuration/1000).toFixed(1)}秒)与人脸区间(${(faceDurationMs/1000).toFixed(1)}秒)不匹配,重合部分仅${overlapSec}至少需要2秒`
)
} else {
message.success('时长校验通过!')
}
return isValid
}
/**
* 重置音频状态
*/
const resetAudioState = (): void => {
audioState.value.generated = null
audioState.value.durationMs = 0
audioState.value.validationPassed = false
audioState.value.generating = false
}
return {
// 响应式状态
ttsText,
speechRate,
selectedVoiceMeta,
audioState,
// 计算属性
canGenerateAudio,
suggestedMaxChars,
// 方法
generateAudio,
parseAudioDuration,
validateAudioDuration,
resetAudioState,
}
}
/**
* 从字符串中提取ID
*/
function extractIdFromString(str: string): string {
// 尝试从各种格式中提取ID
const match = str.match(/[\w-]+$/)
return match ? match[0] : str
}

View File

@@ -0,0 +1,175 @@
/**
* @fileoverview IdentifyFace 组件类型定义
* @author Claude Code
*/
/**
* 视频状态接口
*/
export interface VideoState {
uploadedVideo: string
videoFile: File | null
previewVideoUrl: string
selectedVideo: Video | null
videoSource: 'upload' | 'select' | null
selectorVisible: boolean
}
/**
* 视频对象接口(来自素材库)
*/
export interface Video {
id: string | number
fileName: string
fileUrl: string
fileSize: number
duration: number
coverBase64?: string
previewUrl?: string
coverUrl?: string
}
/**
* 人脸识别状态接口
*/
export interface IdentifyState {
identifying: boolean
identified: boolean
sessionId: string
faceId: string
faceStartTime: number
faceEndTime: number
videoFileId: string | number | null
}
/**
* 音频状态接口
*/
export interface AudioState {
generated: AudioData | null
durationMs: number
validationPassed: boolean
generating: boolean
}
/**
* 音频数据接口
*/
export interface AudioData {
audioBase64: string
audioUrl?: string
format?: string
}
/**
* 素材校验接口
*/
export interface MaterialValidation {
videoDuration: number
audioDuration: number
isValid: boolean
showDetails: boolean
}
/**
* 音色元数据接口
*/
export interface VoiceMeta {
id: string
rawId?: string
name?: string
[key: string]: any
}
/**
* useVoiceGeneration Hook 返回接口
*/
export interface UseVoiceGeneration {
// 响应式状态
ttsText: import('vue').Ref<string>
speechRate: import('vue').Ref<number>
selectedVoiceMeta: import('vue').Ref<VoiceMeta | null>
audioState: import('vue').Ref<AudioState>
// 计算属性
canGenerateAudio: import('vue').ComputedRef<boolean>
suggestedMaxChars: import('vue').ComputedRef<number>
// 方法
generateAudio: () => Promise<void>
parseAudioDuration: (base64Data: string) => Promise<number>
validateAudioDuration: () => boolean
resetAudioState: () => void
}
/**
* useDigitalHumanGeneration Hook 返回接口
*/
export interface UseDigitalHumanGeneration {
// 响应式状态
videoState: import('vue').Ref<VideoState>
identifyState: import('vue').Ref<IdentifyState>
materialValidation: import('vue').Ref<MaterialValidation>
// 计算属性
faceDuration: import('vue').ComputedRef<number>
canGenerate: import('vue').ComputedRef<boolean>
// 方法
handleFileUpload: (file: File) => Promise<void>
handleVideoSelect: (video: Video) => void
performFaceRecognition: () => Promise<void>
validateMaterialDuration: (videoMs: number, audioMs: number) => boolean
resetVideoState: () => void
getVideoPreviewUrl: (video: Video) => string
}
/**
* useIdentifyFaceController Hook 返回接口
*/
export interface UseIdentifyFaceController {
// 组合子 Hooks
voiceGeneration: UseVoiceGeneration
digitalHuman: UseDigitalHumanGeneration
// 业务流程方法
generateDigitalHuman: () => Promise<void>
replaceVideo: () => void
// UI 辅助方法
formatDuration: (seconds: number) => string
formatFileSize: (bytes: number) => string
}
/**
* Kling API 响应接口
*/
export interface KlingApiResponse<T = any> {
code: number
data: T
msg?: string
}
/**
* 数字人生成任务数据接口
*/
export interface LipSyncTaskData {
taskName: string
videoFileId: string | number
inputText: string
speechRate: number
volume: number
guidanceScale: number
seed: number
kling_session_id: string
kling_face_id: string
kling_face_start_time: number
kling_face_end_time: number
ai_provider: string
voiceConfigId: string
pre_generated_audio?: {
audioBase64: string
format: string
}
sound_end_time?: number
}