优化功能

This commit is contained in:
2025-11-22 00:25:29 +08:00
parent bd367c645b
commit a3cc6c6db0
22 changed files with 595 additions and 258 deletions

View File

@@ -8,7 +8,7 @@ import request from './http'
*/
export function createDigitalHumanTask(data) {
return request({
url: '/api/tik/digital-human/task/create',
url: '/webApi/api/tik/digital-human/task/create',
method: 'post',
data
})
@@ -19,7 +19,7 @@ export function createDigitalHumanTask(data) {
*/
export function getDigitalHumanTask(taskId) {
return request({
url: '/api/tik/digital-human/task/get',
url: '/webApi/api/tik/digital-human/task/get',
method: 'get',
params: { taskId }
})
@@ -30,7 +30,7 @@ export function getDigitalHumanTask(taskId) {
*/
export function getDigitalHumanTaskPage(params) {
return request({
url: '/api/tik/digital-human/task/page',
url: '/webApi/api/tik/digital-human/task/page',
method: 'get',
params
})
@@ -41,7 +41,7 @@ export function getDigitalHumanTaskPage(params) {
*/
export function getTaskStatistics() {
return request({
url: '/api/tik/digital-human/task/statistics',
url: '/webApi/api/tik/digital-human/task/statistics',
method: 'get'
})
}
@@ -51,7 +51,7 @@ export function getTaskStatistics() {
*/
export function cancelTask(taskId) {
return request({
url: `/api/tik/digital-human/task/${taskId}/cancel`,
url: `/webApi/api/tik/digital-human/task/${taskId}/cancel`,
method: 'post'
})
}
@@ -61,7 +61,7 @@ export function cancelTask(taskId) {
*/
export function retryTask(taskId) {
return request({
url: `/api/tik/digital-human/task/${taskId}/retry`,
url: `/webApi/api/tik/digital-human/task/${taskId}/retry`,
method: 'post'
})
}
@@ -71,7 +71,7 @@ export function retryTask(taskId) {
*/
export function deleteTask(taskId) {
return request({
url: `/api/tik/digital-human/task/${taskId}`,
url: `/webApi/api/tik/digital-human/task/${taskId}`,
method: 'delete'
})
}

View File

@@ -26,19 +26,23 @@ const isPlayingPreview = ref(false) // 是否正在播放试听音频
const isPlayingSynthesized = ref(false) // 是否正在播放已合成的音频
const pollingInterval = ref(null) // 轮询间隔ID
// Base64音频缓存
const audioBase64Cache = new Map()
const AUDIO_CACHE_MAX_SIZE = 10 // 最多缓存10个音频
// TTS 配置
const ttsText = ref('')
const selectedTtsVoice = ref('')
const speechRate = ref(1.0)
const emotion = ref('neutral')
const instruction = ref('neutral') // 指令参数,用于控制音色风格
const voiceSource = ref('user')
// 系统音色库
// 系统音色库使用CosyVoice v3-flash模型
const SYSTEM_VOICES = [
{ id: 'sys-pro-01', name: '星悦·知性女声', gender: 'female', category: '职业', description: '温柔专业', voiceId: 'cosyvoice-v2-sys-pro-01' },
{ id: 'sys-boy-01', name: '澄澄·少男音', gender: 'male', category: '少男', description: '年轻清爽', voiceId: 'cosyvoice-v2-sys-boy-01' },
{ id: 'sys-girl-01', name: '沁雪·少女音', gender: 'female', category: '少女', description: '活泼甜美', voiceId: 'cosyvoice-v2-sys-girl-01' },
{ id: 'sys-man-01', name: '寰宇·男青年', gender: 'male', category: '男青年', description: '磁性沉稳', voiceId: 'cosyvoice-v2-sys-man-01' }
{ id: 'sys-pro-01', name: '星悦·知性女声', gender: 'female', category: '职业', description: '温柔专业', voiceId: 'cosyvoice-v3-flash-sys-pro-01', defaultInstruction: '请用温柔专业的语调朗读' },
{ id: 'sys-boy-01', name: '澄澄·少男音', gender: 'male', category: '少男', description: '年轻清爽', voiceId: 'cosyvoice-v3-flash-sys-boy-01', defaultInstruction: '请用年轻清爽的语调朗读' },
{ id: 'sys-girl-01', name: '沁雪·少女音', gender: 'female', category: '少女', description: '活泼甜美', voiceId: 'cosyvoice-v3-flash-sys-girl-01', defaultInstruction: '请用活泼甜美的语调朗读' },
{ id: 'sys-man-01', name: '寰宇·男青年', gender: 'male', category: '男青年', description: '磁性沉稳', voiceId: 'cosyvoice-v3-flash-sys-man-01', defaultInstruction: '请用磁性沉稳的语调朗读' }
]
// 用户音色列表
@@ -57,7 +61,15 @@ const userVoiceCards = computed(() =>
}))
)
const displayedVoices = computed(() => userVoiceCards.value)
const displayedVoices = computed(() => {
if (voiceSource.value === 'system') {
return SYSTEM_VOICES.map(voice => ({
...voice,
source: 'system'
}))
}
return userVoiceCards.value
})
const selectedVoiceMeta = computed(() =>
displayedVoices.value.find(voice => `${voice.source}-${voice.id}` === selectedTtsVoice.value)
@@ -66,7 +78,16 @@ const selectedVoiceMeta = computed(() =>
// UI 状态
const speechRateMarks = { 0.5: '0.5x', 1: '1x', 1.5: '1.5x', 2: '2x' }
const speechRateDisplay = computed(() => `${speechRate.value.toFixed(1)}x`)
const canGenerate = computed(() => !!(synthesizedAudio.value?.fileId && uploadedVideo.value && !isGenerating.value))
// 生成数字人的条件:选中了音色 + 上传了视频 + 没有正在生成
// 注意:不需要先合成语音,可以直接使用音色配置
const canGenerate = computed(() => {
const hasText = ttsText.value.trim() // 文案必填
const hasVoice = selectedVoiceMeta.value // 必须选中音色
const hasVideo = uploadedVideo.value // 必须上传视频
const notGenerating = !isGenerating.value // 不能正在生成
return !!(hasText && hasVoice && hasVideo && notGenerating)
})
// 音色选择
const setVoiceSource = (source) => {
@@ -75,6 +96,8 @@ const setVoiceSource = (source) => {
selectedTtsVoice.value = ''
if (source === 'user' && userVoiceCards.value.length > 0) {
selectVoiceProfile(userVoiceCards.value[0])
} else if (source === 'system' && SYSTEM_VOICES.length > 0) {
selectVoiceProfile({ ...SYSTEM_VOICES[0], source: 'system' })
}
}
@@ -89,12 +112,8 @@ const playVoiceSample = async (voice) => {
if (previewLoadingVoiceId.value === voice.id || isPlayingPreview.value) {
return
}
if (voice.source === 'user' || (voice.source === 'system' && voice.voiceId)) {
return triggerVoicePreview(voice)
}
const url = voice.previewUrl || voice.fileUrl
if (!url) return message.warning('暂无可试听的音频')
playAudioPreview(url)
// 用户音色和系统音色都走实时试听流程
return triggerVoicePreview(voice)
}
const triggerVoicePreview = async (voice) => {
@@ -137,6 +156,7 @@ const triggerVoicePreview = async (voice) => {
const buildPreviewParams = (voice) => {
if (voice.source === 'user') {
// 使用voiceConfigId让后端查询数据库获取文件URL和transcriptionText
// 用户音色不传instruction
const configId = voice.rawId || extractIdFromString(voice.id)
if (!configId) {
message.error('配音配置无效')
@@ -145,15 +165,15 @@ const buildPreviewParams = (voice) => {
return {
voiceConfigId: configId,
inputText: ttsText.value, // 传递用户输入的文本
emotion: emotion.value || 'neutral',
speechRate: speechRate.value || 1.0,
audioFormat: 'mp3'
}
} else {
// 系统音色使用用户选择的instruction
return {
voiceId: voice.voiceId,
inputText: ttsText.value, // 传递用户输入的文本
emotion: emotion.value || 'neutral',
instruction: instruction.value && instruction.value !== 'neutral' ? instruction.value : (voice.defaultInstruction || '请用自然流畅的语调朗读'),
speechRate: speechRate.value || 1.0,
audioFormat: 'mp3'
}
@@ -177,11 +197,10 @@ const handleSynthesizeVoice = async () => {
const params = {
inputText: ttsText.value,
speechRate: speechRate.value,
emotion: emotion.value,
audioFormat: 'mp3'
}
// 如果是用户配音使用voiceConfigId让后端查询
// 如果是用户配音使用voiceConfigId让后端查询不传instruction
if (voice.source === 'user') {
const configId = voice.rawId || extractIdFromString(voice.id)
if (!configId) {
@@ -190,14 +209,14 @@ const handleSynthesizeVoice = async () => {
}
params.voiceConfigId = configId
} else {
// 使用系统音色voiceId
// 使用系统音色voiceId和用户选择的instruction
const voiceId = voice.voiceId || voice.rawId
if (!voiceId) {
message.warning('音色配置无效')
return
}
params.voiceId = voiceId
params.model = voice.model
params.instruction = instruction.value && instruction.value !== 'neutral' ? instruction.value : (voice.defaultInstruction || '请用自然流畅的语调朗读')
}
const res = await VoiceService.synthesize(params)
@@ -206,7 +225,7 @@ const handleSynthesizeVoice = async () => {
synthesizedAudio.value = res.data
message.success('语音合成成功')
} else {
message.error(res.msg || '合成失败')
message.error(res.message || '合成失败')
}
} catch (error) {
console.error('synthesize error:', error)
@@ -289,22 +308,8 @@ const generateVideo = async () => {
currentTaskStep.value = 'prepare_files'
try {
// 1. 首先上传音频和视频文件到后端
message.loading('正在上传文件...', 0)
// 上传音频(使用合成后的音频或原始音频)
let audioFileId = null
let audioUrl = null
if (synthesizedAudio.value?.fileId) {
// 如果有已合成的音频使用其fileId
audioFileId = synthesizedAudio.value.fileId
} else {
// 否则使用voiceConfigId让后端处理
audioFileId = voice.rawId || extractIdFromString(voice.id)
}
// 上传视频文件
// 1. 上传视频文件(只上传视频,音频由后端实时合成)
message.loading('正在上传视频...', 0)
const videoFileId = await uploadVideoFile(uploadedVideoFile.value)
if (!videoFileId) {
throw new Error('视频上传失败')
@@ -312,13 +317,15 @@ const generateVideo = async () => {
message.destroy()
// 2. 创建数字人任务
// 2. 创建数字人任务简化只使用voiceId后端实时TTS
const taskData = {
taskName: `数字人任务_${Date.now()}`,
audioFileId: audioFileId,
videoFileId: videoFileId,
// 音频由后端实时合成使用voiceId
voiceId: voice.voiceId || voice.rawId,
inputText: ttsText.value, // 文本内容用于TTS合成
speechRate: speechRate.value,
emotion: emotion.value,
instruction: voice.source === 'user' ? undefined : (instruction.value && instruction.value !== 'neutral' ? instruction.value : (voice.defaultInstruction || '请用自然流畅的语调朗读')),
guidanceScale: 1,
seed: 8888
}
@@ -350,10 +357,10 @@ const generateVideo = async () => {
const uploadVideoFile = async (file) => {
try {
const res = await MaterialService.uploadFile(file, 'video')
if (res.code === 0 && res.data?.id) {
return res.data.id
if (res.code === 0) {
return res.data // res.data就是文件ID
} else {
throw new Error(res.msg || '上传失败')
throw new Error(res.message || '上传失败')
}
} catch (error) {
console.error('uploadVideoFile error:', error)
@@ -528,17 +535,42 @@ const playAudioPreview = (url, options = {}) => {
const playAudioFromBase64 = (audioBase64, format = 'mp3', onEnded = null) => {
try {
previewObjectUrl && URL.revokeObjectURL(previewObjectUrl)
const byteCharacters = window.atob(audioBase64)
const byteNumbers = new Array(byteCharacters.length)
for (let i = 0; i < byteCharacters.length; i++) {
byteNumbers[i] = byteCharacters.charCodeAt(i)
// 检查缓存
const cacheKey = `${audioBase64.substring(0, 32)}_${format}` // 使用base64前32位作为缓存键
let objectUrl = audioBase64Cache.get(cacheKey)
if (!objectUrl) {
// 解码base64并创建blob
const byteCharacters = window.atob(audioBase64)
const byteNumbers = new Array(byteCharacters.length)
for (let i = 0; i < byteCharacters.length; i++) {
byteNumbers[i] = byteCharacters.charCodeAt(i)
}
const mime = format === 'mp3' ? 'audio/mpeg' : `audio/${format}`
const blob = new Blob([new Uint8Array(byteNumbers)], { type: mime })
objectUrl = URL.createObjectURL(blob)
// 管理缓存大小
if (audioBase64Cache.size >= AUDIO_CACHE_MAX_SIZE) {
// 清理最早的缓存
const firstKey = audioBase64Cache.keys().next().value
const oldUrl = audioBase64Cache.get(firstKey)
URL.revokeObjectURL(oldUrl)
audioBase64Cache.delete(firstKey)
}
// 存储到缓存
audioBase64Cache.set(cacheKey, objectUrl)
}
const mime = format === 'mp3' ? 'audio/mpeg' : `audio/${format}`
const blob = new Blob([new Uint8Array(byteNumbers)], { type: mime })
previewObjectUrl = URL.createObjectURL(blob)
// 清理旧的previewObjectUrl
if (previewObjectUrl && previewObjectUrl !== objectUrl) {
URL.revokeObjectURL(previewObjectUrl)
}
previewObjectUrl = objectUrl
playAudioPreview(previewObjectUrl, {
revokeOnEnd: true,
revokeOnEnd: false, // 缓存模式下不立即释放
onEnded: () => {
isPlayingPreview.value = false
onEnded && onEnded()
@@ -555,12 +587,20 @@ const playAudioFromBase64 = (audioBase64, format = 'mp3', onEnded = null) => {
// 生命周期
onMounted(async () => {
await voiceStore.load()
userVoiceCards.value.length > 0 && selectVoiceProfile(userVoiceCards.value[0])
// 默认选择第一个音色
if (voiceSource.value === 'user' && userVoiceCards.value.length > 0) {
selectVoiceProfile(userVoiceCards.value[0])
} else if (voiceSource.value === 'system' && SYSTEM_VOICES.length > 0) {
selectVoiceProfile({ ...SYSTEM_VOICES[0], source: 'system' })
}
})
onUnmounted(() => {
previewAudio?.pause?.()
previewAudio = null
// 清理所有缓存的ObjectURL
audioBase64Cache.forEach(url => URL.revokeObjectURL(url))
audioBase64Cache.clear()
previewObjectUrl && URL.revokeObjectURL(previewObjectUrl)
// 重置播放状态
isPlayingPreview.value = false
@@ -575,12 +615,17 @@ onUnmounted(() => {
// 监听器
watch(voiceSource, () => {
selectedTtsVoice.value = ''
userVoiceCards.value.length > 0 && selectVoiceProfile(userVoiceCards.value[0])
if (voiceSource.value === 'user' && userVoiceCards.value.length > 0) {
selectVoiceProfile(userVoiceCards.value[0])
} else if (voiceSource.value === 'system' && SYSTEM_VOICES.length > 0) {
selectVoiceProfile({ ...SYSTEM_VOICES[0], source: 'system' })
}
})
watch(() => voiceStore.profiles, () => {
voiceSource.value === 'user' && userVoiceCards.value.length > 0 &&
!selectedTtsVoice.value && selectVoiceProfile(userVoiceCards.value[0])
if (voiceSource.value === 'user' && userVoiceCards.value.length > 0 && !selectedTtsVoice.value) {
selectVoiceProfile(userVoiceCards.value[0])
}
})
watch([ttsText, selectedTtsVoice], () => {
@@ -613,7 +658,7 @@ let previewObjectUrl = ''
<div class="voice-source-toggle">
<button
v-for="source in ['user']"
v-for="source in ['user', 'system']"
:key="source"
class="source-btn"
:class="{ active: voiceSource === source }"
@@ -623,8 +668,8 @@ let previewObjectUrl = ''
</button>
</div>
<div v-if="userVoiceCards.length === 0" class="empty-voices">
还没有配音可先在"配音管理"中上传
<div v-if="displayedVoices.length === 0" class="empty-voices">
{{ voiceSource === 'user' ? '还没有配音可先在"配音管理"中上传' : '暂无可用的系统音色' }}
</div>
<div class="voice-list">
@@ -678,25 +723,17 @@ let previewObjectUrl = ''
</div>
</div>
<div class="control-group">
<div class="control-label">情感</div>
<div v-if="voiceSource === 'system'" class="control-group">
<div class="control-label">指令</div>
<div class="emotion-buttons">
<button
v-for="em in ['neutral', 'happy', 'angry', 'sad', 'scared', 'disgusted', 'surprised']"
:key="em"
v-for="inst in ['neutral', '请用自然流畅的语调朗读', '请用温柔专业的语调朗读', '请用热情洋溢的语调朗读', '请用低沉磁性的语调朗读', '请用活泼生动的语调朗读']"
:key="inst"
class="emotion-btn"
:class="{ active: emotion === em }"
@click="emotion = em"
:class="{ active: instruction === inst }"
@click="instruction = inst"
>
{{ {
neutral: '中性',
happy: '高兴',
angry: '愤怒',
sad: '悲伤',
scared: '害怕',
disgusted: '厌恶',
surprised: '惊讶'
}[em] }}
{{ inst === 'neutral' ? '中性' : inst }}
</button>
</div>
</div>