feat: 优化

This commit is contained in:
2026-02-26 18:52:09 +08:00
parent c2e4fde218
commit b76e3ff47d
17 changed files with 1027 additions and 1630 deletions

View File

@@ -0,0 +1,19 @@
import http from './http'
import { API_BASE } from '@gold/config/api'
/**
* 积分记录 API
*/
/**
* 获取当前用户积分记录分页
* @param {Object} params - 分页参数
* @param {number} params.pageNo - 页码
* @param {number} params.pageSize - 每页数量
* @param {string} params.type - 变动类型 increase/decrease
* @param {string} params.bizType - 业务类型
* @returns {Promise}
*/
export function getPointRecordPage(params = {}) {
return http.get(`${API_BASE.APP_MEMBER}/tik/point-record/page`, { params })
}

View File

@@ -1,112 +0,0 @@
<template>
<div class="pipeline-progress">
<!-- 状态和进度 -->
<div class="progress-header">
<span class="state-text">{{ stateLabel }}</span>
<span v-if="stateDescription" class="state-desc">{{ stateDescription }}</span>
</div>
<a-progress
:percent="displayProgress"
:status="progressStatus"
:stroke-color="progressColor"
/>
<!-- 错误时显示 -->
<div v-if="isFailed && error" class="error-section">
<span class="error-text">{{ error }}</span>
<a-button size="small" @click="handleRetry">重试</a-button>
</div>
</div>
</template>
<script setup lang="ts">
import { computed } from 'vue'
import type { PipelineState } from '@/views/kling/hooks/pipeline/types'
import { STATE_CONFIG } from '@/views/kling/hooks/pipeline/states'
interface Props {
state: PipelineState | string
progress: number
isBusy: boolean
isReady: boolean
isFailed: boolean
isCompleted: boolean
error: string | null
}
const props = defineProps<Props>()
const emit = defineEmits<{
retry: []
reset: []
}>()
const stateConfig = computed(() => STATE_CONFIG[props.state as PipelineState])
const stateLabel = computed(() => stateConfig.value.label)
const stateDescription = computed(() => stateConfig.value.description)
const progressStatus = computed(() => {
if (props.isFailed) return 'exception'
if (props.isCompleted) return 'success'
return 'active'
})
const progressColor = computed(() => {
if (props.isFailed) return '#ff4d4f'
if (props.isCompleted) return '#52c41a'
return '#1890ff'
})
const displayProgress = computed(() => {
if (props.isFailed) return 0
return props.progress
})
function handleRetry() {
emit('retry')
}
</script>
<style scoped lang="less">
.pipeline-progress {
padding: 12px 16px;
background: var(--bg-primary);
border-radius: 8px;
border: 1px solid var(--border-light);
margin-bottom: 16px;
}
.progress-header {
display: flex;
align-items: center;
justify-content: space-between;
margin-bottom: 8px;
font-size: 13px;
.state-text {
font-weight: 600;
color: var(--text-primary);
}
.state-desc {
color: var(--text-secondary);
}
}
.error-section {
display: flex;
align-items: center;
justify-content: space-between;
margin-top: 12px;
padding: 8px 12px;
background: rgba(255, 77, 79, 0.1);
border-radius: 6px;
.error-text {
flex: 1;
color: #ff4d4f;
font-size: 13px;
}
}
</style>

View File

@@ -18,7 +18,7 @@
<a-button
class="preview-button"
size="small"
:disabled="!selectedVoiceId"
:disabled="!selectedVoiceId || isPlayerInitializing"
:loading="previewLoadingVoiceId === selectedVoiceId"
@click="handleSynthesize"
>
@@ -74,6 +74,7 @@ let player = null
const playerContainer = ref(null)
const audioUrl = ref('')
const currentVoiceName = ref('')
const isPlayerInitializing = ref(false)
// 默认封面图片(音频波形图标)
const defaultCover = `data:image/svg+xml;base64,${btoa(`
@@ -154,6 +155,8 @@ const handleVoiceChange = (value, option) => {
const handleSynthesize = () => {
if (!selectedVoiceId.value) return
// 防止在播放器初始化过程中重复点击
if (isPlayerInitializing.value) return
const voice = userVoiceCards.value.find(v => v.id === selectedVoiceId.value)
if (!voice) return
@@ -171,23 +174,17 @@ watch(() => props.speechRate, (newRate) => {
setSpeechRate(newRate)
}, { immediate: true })
/**
* 处理音色
*/
const handlePlayVoiceSample = (voice) => {
currentVoiceName.value = voice.name
playVoiceSample(
voice,
(data) => {
const url = data.audioUrl || data.objectUrl
if (!url) {
console.error('无效的音频数据格式', data)
return
}
if (!url) return
initPlayer(url)
},
(error) => {
console.error('音频播放失败', error)
// 音频播放失败,静默处理
},
{ autoPlay: false } // 禁用自动播放,由 APlayer 控制
)
@@ -197,31 +194,46 @@ const handlePlayVoiceSample = (voice) => {
* 初始化 APlayer
*/
const initPlayer = (url) => {
// 防止并发初始化
if (isPlayerInitializing.value) {
return
}
isPlayerInitializing.value = true
destroyPlayer()
audioUrl.value = url
nextTick(() => {
player = new APlayer({
container: playerContainer.value,
autoplay: true,
theme: '#3b82f6',
volume: 0.7,
loop: 'none',
audio: [{
name: currentVoiceName.value || '语音合成',
artist: '合成',
url: url,
cover: defaultCover
}]
})
try {
player = new APlayer({
container: playerContainer.value,
autoplay: true,
theme: '#3b82f6',
volume: 0.7,
loop: 'none',
audio: [{
name: currentVoiceName.value || '语音合成',
artist: '合成',
url: url,
cover: defaultCover
}]
})
player.on('ended', () => {
player.seek(0)
})
player.on('ended', () => {
player.seek(0)
})
player.on('error', (e) => {
console.error('APlayer 播放错误:', e)
})
player.on('error', (e) => {
console.error('APlayer 播放错误:', e)
})
player.on('canplay', () => {
isPlayerInitializing.value = false
})
} catch (e) {
console.error('APlayer 初始化失败:', e)
isPlayerInitializing.value = false
}
})
}
@@ -244,8 +256,11 @@ const downloadAudio = () => {
* 销毁播放器
*/
const destroyPlayer = () => {
isPlayerInitializing.value = false
if (player) {
try {
// 先暂停播放,防止销毁过程中出错
player.pause()
player.destroy()
} catch (e) {
console.error('销毁播放器失败:', e)
@@ -259,8 +274,6 @@ const destroyPlayer = () => {
audioUrl.value = ''
}
defineExpose({})
onMounted(async () => {
await voiceStore.refresh()
})

View File

@@ -89,15 +89,15 @@ export function useTTS(options = {}) {
}
audio.play()
.then(function() {
.then(() => {
previewAudio = audio
audio.onended = cleanup
audio.onerror = function() {
audio.onerror = () => {
cleanup()
message.error('播放失败')
}
})
.catch(function() {
.catch(() => {
cleanup()
message.error('播放失败')
})
@@ -159,11 +159,11 @@ export function useTTS(options = {}) {
playAudioPreview(previewObjectUrl, {
revokeOnEnd: false,
onEnded: function() {
onEnded() {
if (audioData.objectUrl?.startsWith('blob:')) {
URL.revokeObjectURL(audioData.objectUrl)
}
onEnded && onEnded()
onEnded?.()
}
})
}
@@ -244,7 +244,7 @@ export function useTTS(options = {}) {
} else {
resetPreviewState()
}
onSuccess && onSuccess(cachedAudio)
onSuccess?.(cachedAudio)
return
}
@@ -252,11 +252,11 @@ export function useTTS(options = {}) {
const params = buildPreviewParams(voice)
if (!params) {
resetPreviewState()
onError && onError(new Error('参数构建失败'))
onError?.(new Error('参数构建失败'))
return
}
const res = await VoiceService.preview(params)
const res = await VoiceService.synthesize(params)
if (res.code !== 0) {
message.error(res.msg || '试听失败')
resetPreviewState()
@@ -269,7 +269,7 @@ export function useTTS(options = {}) {
if (options.autoPlay !== false) {
playAudioPreview(res.data.audioUrl, {
revokeOnEnd: true,
onEnded: function() {
onEnded() {
URL.revokeObjectURL(res.data.audioUrl)
}
})
@@ -279,7 +279,7 @@ export function useTTS(options = {}) {
const audioData = await decodeAndCacheBase64(res.data.audioBase64, res.data.format, cacheKey)
resetPreviewState()
if (options.autoPlay !== false) {
playCachedAudio(audioData, function() {
playCachedAudio(audioData, () => {
URL.revokeObjectURL(audioData.objectUrl)
})
}
@@ -292,7 +292,7 @@ export function useTTS(options = {}) {
} catch (error) {
message.error('试听失败')
resetPreviewState()
onError && onError(error)
onError?.(error)
}
}
@@ -335,7 +335,7 @@ export function useTTS(options = {}) {
* 清除音频缓存
*/
function clearAudioCache() {
previewAudioCache.forEach(function(audioData) {
previewAudioCache.forEach((audioData) => {
URL.revokeObjectURL(audioData.objectUrl)
})
previewAudioCache.clear()

View File

@@ -10,17 +10,17 @@
<h3 class="card-title">输入播文案</h3>
<a-textarea
v-model:value="ttsText"
:placeholder="textareaPlaceholder"
v-model:value="store.text"
:placeholder="placeholder"
:rows="4"
:maxlength="maxTextLength"
:maxlength="4000"
:show-count="true"
class="text-input"
:bordered="false"
/>
<div class="input-meta">
<span>当前字数{{ ttsText?.length || 0 }}</span>
<span>当前字数{{ store.text?.length || 0 }}</span>
</div>
</div>
@@ -33,9 +33,9 @@
<div class="setting-group">
<label class="setting-label">选择音色</label>
<VoiceSelector
:synth-text="ttsText"
:speech-rate="speechRate"
@select="handleVoiceSelect"
:synth-text="store.text"
:speech-rate="store.speechRate"
@select="store.setVoice"
/>
</div>
@@ -44,13 +44,13 @@
<div class="model-options">
<button
class="model-btn"
:class="{ 'model-btn--active': speechRate <= 1 }"
:class="{ 'model-btn--active': store.speechRate <= 1 }"
>
标准版 (1x积分)
</button>
<button
class="model-btn model-btn--pro"
:class="{ 'model-btn--active': speechRate > 1 }"
:class="{ 'model-btn--active': store.speechRate > 1 }"
>
Pro 旗舰版 (3x积分)
<CrownFilled class="pro-icon" />
@@ -69,8 +69,8 @@
<!-- 上传新视频 -->
<div
class="video-option-card"
:class="{ 'video-option-card--selected': videoState.videoSource === 'upload' }"
@click="handleSelectUpload"
:class="{ 'video-option-card--selected': store.videoSource === 'upload' }"
@click="store.selectUploadMode"
>
<div class="video-option-icon">
<CloudUploadOutlined />
@@ -84,8 +84,8 @@
<!-- 从素材库选择 -->
<div
class="video-option-card"
:class="{ 'video-option-card--selected': videoState.videoSource === 'select' }"
@click="handleSelectFromLibrary"
:class="{ 'video-option-card--selected': store.videoSource === 'select' }"
@click="store.selectLibraryMode"
>
<div class="video-option-icon">
<PictureOutlined />
@@ -98,32 +98,32 @@
</div>
<!-- 已选择视频预览 -->
<div v-if="videoState.selectedVideo" class="selected-video">
<div v-if="store.selectedVideo" class="selected-video">
<div class="video-preview-thumb">
<img
:src="getVideoPreviewUrl(videoState.selectedVideo)"
:alt="videoState.selectedVideo.fileName"
:src="getVideoPreviewUrl(store.selectedVideo)"
:alt="store.selectedVideo.fileName"
/>
</div>
<div class="video-preview-info">
<div class="video-name">{{ videoState.selectedVideo.fileName }}</div>
<div class="video-meta">{{ formatDuration(videoState.selectedVideo.duration) }}</div>
<div class="video-name">{{ store.selectedVideo.fileName }}</div>
<div class="video-meta">{{ formatDuration(store.selectedVideo.duration) }}</div>
</div>
<button class="change-video-btn" @click="replaceVideo">更换</button>
<button class="change-video-btn" @click="store.reset">更换</button>
</div>
<!-- 上传区域 -->
<div
v-if="videoState.videoSource === 'upload'"
v-if="store.videoSource === 'upload'"
class="upload-zone"
:class="{ 'upload-zone--dragover': dragOver }"
@drop.prevent="handleDrop"
@dragover.prevent="dragOver = true"
@dragleave.prevent="dragOver = false"
>
<input ref="fileInput" type="file" accept=".mp4,.mov" class="file-input" @change="handleFileSelectWrapper" />
<input ref="fileInput" type="file" accept=".mp4,.mov" class="file-input" @change="handleFileSelect" />
<div v-if="!videoState.uploadedVideo" class="upload-placeholder">
<div v-if="!store.videoPreviewUrl" class="upload-placeholder">
<CloudUploadOutlined class="upload-icon" />
<span class="upload-text">点击上传新视频</span>
<span class="upload-hint">支持 MP4MOV ( >3)</span>
@@ -132,52 +132,51 @@
<div v-else class="upload-preview">
<video
:src="videoState.uploadedVideo"
:src="store.videoPreviewUrl"
controls
playsinline
preload="metadata"
class="preview-video-player"
@error="handleVideoError"
></video>
<p class="upload-filename">{{ videoState.videoFile?.name }}</p>
<button class="change-video-btn" @click="replaceVideo">更换</button>
<p class="upload-filename">{{ store.videoFile?.name }}</p>
<button class="change-video-btn" @click="store.reset">更换</button>
</div>
</div>
</div>
<!-- Pipeline 进度 -->
<PipelineProgress
v-if="isPipelineBusy || isPipelineReady || isPipelineFailed || isPipelineCompleted"
:state="pipelineState"
:progress="pipelineProgress"
:is-busy="isPipelineBusy"
:is-ready="isPipelineReady"
:is-failed="isPipelineFailed"
:is-completed="isPipelineCompleted"
:error="pipelineError"
@retry="retryPipeline"
@reset="resetPipeline"
/>
<!-- 进度显示 -->
<div v-if="store.isBusy || store.isFailed || store.isDone" class="progress-card">
<div class="progress-header">
<span class="progress-label">{{ store.stepLabel }}</span>
<span class="progress-percent">{{ store.progress }}%</span>
</div>
<a-progress :percent="store.progress" :status="progressStatus" :show-info="false" />
<div v-if="store.isFailed" class="error-actions">
<span class="error-text">{{ store.error }}</span>
<a-button size="small" @click="store.retry">重试</a-button>
</div>
<div v-if="store.isDone" class="success-actions">
<span class="success-text">任务已提交成功</span>
<a-button size="small" @click="store.reset">重新生成</a-button>
</div>
</div>
<!-- 操作按钮 -->
<div class="action-section">
<a-button
v-if="!isPipelineCompleted"
v-if="!store.isDone"
type="primary"
size="large"
:disabled="!canGenerate"
:loading="isPipelineBusy"
:disabled="!store.canGenerate"
:loading="store.isBusy"
block
@click="generateAudio"
@click="store.generate"
class="action-btn"
>
{{ isPipelineBusy ? pipelineStateLabel + '...' : '生成数字人视频' }}
{{ store.isBusy ? store.stepLabel + '...' : '生成数字人视频' }}
</a-button>
<div v-else class="completed-tip">
<span>任务已提交成功</span>
<a-button @click="resetPipeline" class="reset-btn">重新生成</a-button>
</div>
</div>
</section>
@@ -207,90 +206,85 @@
</div>
<!-- 视频选择器弹窗 -->
<VideoSelector v-model:open="videoState.selectorVisible" @select="handleVideoSelect" />
<VideoSelector
v-model:open="store.videoSelectorVisible"
@select="store.selectVideo"
/>
</FullWidthLayout>
</template>
<script setup lang="ts">
import { ref, onMounted } from 'vue'
import { CloudUploadOutlined, CrownFilled } from '@ant-design/icons-vue'
import { ref, computed, onMounted } from 'vue'
import { CloudUploadOutlined, CrownFilled, PictureOutlined } from '@ant-design/icons-vue'
import { useVoiceCopyStore } from '@/stores/voiceCopy'
import { useUserStore } from '@/stores/user'
import VideoSelector from '@/components/VideoSelector.vue'
import VoiceSelector from '@/components/VoiceSelector.vue'
import ResultPanel from '@/components/ResultPanel.vue'
import FullWidthLayout from '@/layouts/components/FullWidthLayout.vue'
import PipelineProgress from '@/components/PipelineProgress.vue'
// Controller Hook
import { useIdentifyFaceController } from './hooks/useIdentifyFaceController'
import { useDigitalHumanStore } from './stores/useDigitalHumanStore'
// ==================== Store ====================
const store = useDigitalHumanStore()
const voiceStore = useVoiceCopyStore()
const userStore = useUserStore()
// ==================== 本地状态 ====================
const dragOver = ref(false)
// ==================== 初始化 Controller ====================
// Controller 内部直接创建和管理两个子 Hook
const controller = useIdentifyFaceController()
// 解构 controller 以简化模板调用
const {
// 语音生成相关
ttsText,
speechRate,
generateAudio,
// 数字人生成相关
videoState,
getVideoPreviewUrl,
// 计算属性
canGenerate,
maxTextLength,
textareaPlaceholder,
// Pipeline 状态(单一状态源)
pipelineState,
pipelineStateLabel,
isPipelineBusy,
isPipelineReady,
isPipelineFailed,
isPipelineCompleted,
pipelineProgress,
pipelineError,
retryPipeline,
resetPipeline,
// 事件处理方法
handleVoiceSelect,
handleDrop,
handleSelectUpload,
handleSelectFromLibrary,
handleVideoSelect,
handleVideoLoaded,
handleVideoError,
replaceVideo,
// UI 辅助方法
formatDuration,
} = controller
// ==================== 生命周期 ====================
// 引用 fileInput 用于手动触发点击
const fileInput = ref<HTMLInputElement | null>(null)
// 触发文件选择
const triggerFileSelect = () => {
// ==================== 计算属性 ====================
const placeholder = computed(() => {
if (store.faceDurationMs > 0) {
const maxChars = Math.floor(store.faceDurationMs / 1000 * 4)
return `请输入文案,建议不超过${maxChars}字以确保与视频匹配`
}
return '请输入你想让角色说话的内容'
})
const progressStatus = computed(() => {
if (store.isFailed) return 'exception'
if (store.isDone) return 'success'
return 'active'
})
// ==================== 方法 ====================
function triggerFileSelect() {
fileInput.value?.click()
}
// 覆盖 controller 中的方法,使用 ref
const handleFileSelectWrapper = (e: Event) => {
controller.handleFileSelect(e)
function handleFileSelect(e: Event) {
const file = (e.target as HTMLInputElement).files?.[0]
if (file) store.handleFileUpload(file)
}
function handleDrop(e: DragEvent) {
dragOver.value = false
const file = e.dataTransfer?.files[0]
if (file) store.handleFileUpload(file)
}
function handleVideoLoaded(_url: string) {
// 可用于更新预览
}
function formatDuration(seconds: number): string {
if (!seconds) return '--:--'
const mins = Math.floor(seconds / 60)
const secs = Math.floor(seconds % 60)
return `${String(mins).padStart(2, '0')}:${String(secs).padStart(2, '0')}`
}
function getVideoPreviewUrl(video: any): string {
if (video.coverBase64) {
return video.coverBase64.startsWith('data:')
? video.coverBase64
: `data:image/jpeg;base64,${video.coverBase64}`
}
return video.previewUrl || video.coverUrl || ''
}
// ==================== 生命周期 ====================
onMounted(async () => {
await Promise.all([
voiceStore.refresh(),
@@ -309,10 +303,9 @@ onMounted(async () => {
padding: 24px;
}
// 布局容器
.config-panel {
flex: 1;
padding:0 20px;
padding: 0 20px;
max-width: 100%;
}
@@ -333,7 +326,6 @@ onMounted(async () => {
}
}
// 配置卡片
.config-card {
position: relative;
background: #fff;
@@ -369,7 +361,6 @@ onMounted(async () => {
padding-left: 44px;
}
// 文案输入
.text-input {
width: 100%;
@@ -384,10 +375,6 @@ onMounted(async () => {
background: #fff;
box-shadow: 0 0 0 1px #E2E8F0;
}
:deep(.ant-input-textarea-show-count) {
bottom: 8px;
}
}
}
@@ -399,12 +386,6 @@ onMounted(async () => {
margin-top: 12px;
}
.meta-value {
font-weight: 600;
color: #1E293B;
}
// 语音设置
.voice-settings {
display: grid;
grid-template-columns: 1fr 1fr;
@@ -468,7 +449,6 @@ onMounted(async () => {
color: #EAB308;
}
// 视频选项
.video-options {
display: grid;
grid-template-columns: 1fr 1fr;
@@ -518,22 +498,21 @@ onMounted(async () => {
.video-option-content {
flex: 1;
h4 {
font-size: 13px;
font-weight: 500;
color: #1E293B;
margin: 0 0 4px 0;
}
p {
font-size: 11px;
color: #94A3B8;
margin: 0;
}
}
.video-option-content h4 {
font-size: 13px;
font-weight: 500;
color: #1E293B;
margin: 0 0 4px 0;
}
.video-option-content p {
font-size: 11px;
color: #94A3B8;
margin: 0;
}
// 已选择视频
.selected-video {
display: flex;
align-items: center;
@@ -593,7 +572,6 @@ onMounted(async () => {
}
}
// 上传区域
.upload-zone {
min-height: 160px;
display: flex;
@@ -643,10 +621,9 @@ onMounted(async () => {
font-size: 12px;
font-weight: 500;
color: #3B82F6;
border: none;
border: 1px solid #3B82F6;
background: transparent;
cursor: pointer;
border: 1px solid #3B82F6;
border-radius: 6px;
transition: all 0.2s ease;
@@ -681,7 +658,61 @@ onMounted(async () => {
border-radius: 6px;
}
// 操作按钮区
// 进度卡片
.progress-card {
background: #fff;
border: 1px solid #E2E8F0;
border-radius: 12px;
padding: 16px;
margin-bottom: 16px;
}
.progress-header {
display: flex;
justify-content: space-between;
margin-bottom: 8px;
}
.progress-label {
font-size: 14px;
font-weight: 500;
color: #1E293B;
}
.progress-percent {
font-size: 14px;
color: #64748B;
}
.error-actions {
display: flex;
align-items: center;
justify-content: space-between;
margin-top: 12px;
padding-top: 12px;
border-top: 1px solid #FEE2E2;
}
.error-text {
font-size: 13px;
color: #DC2626;
}
.success-actions {
display: flex;
align-items: center;
justify-content: space-between;
margin-top: 12px;
padding-top: 12px;
border-top: 1px solid #86EFAC;
}
.success-text {
font-size: 13px;
color: #166534;
}
// 操作按钮
.action-section {
margin-top: 20px;
padding-top: 16px;
@@ -708,35 +739,6 @@ onMounted(async () => {
}
}
.completed-tip {
display: flex;
align-items: center;
justify-content: center;
gap: 16px;
padding: 12px 16px;
background: #F0FDF4;
border: 1px solid #86EFAC;
border-radius: 8px;
color: #166534;
font-size: 14px;
font-weight: 500;
.reset-btn {
padding: 4px 12px;
font-size: 13px;
color: #3B82F6;
border: 1px solid #3B82F6;
border-radius: 4px;
background: transparent;
cursor: pointer;
transition: all 0.2s;
&:hover {
background: rgba(59, 130, 246, 0.1);
}
}
}
// 预览面板
.preview-card {
position: sticky;
@@ -797,7 +799,7 @@ onMounted(async () => {
color: #64748B;
}
.meta-row .meta-value {
.meta-value {
font-weight: 600;
color: #1E293B;
}

View File

@@ -1,124 +0,0 @@
/**
* @fileoverview 状态机配置 - 状态定义和配置
*/
import type { PipelineState, StateConfig } from './types'
/**
* 状态配置映射表
*/
export const STATE_CONFIG: Record<PipelineState, StateConfig> = {
idle: {
label: '等待开始',
progress: 0,
description: '请先选择视频并输入文案',
},
uploading: {
label: '上传视频中',
progress: 15,
description: '正在上传视频文件...',
},
recognizing: {
label: '识别人脸中',
progress: 35,
description: '正在分析视频中的人脸信息...',
},
generating: {
label: '生成配音中',
progress: 55,
description: '正在合成语音...',
},
validating: {
label: '校验时长中',
progress: 70,
description: '正在校验音频与视频时长...',
},
ready: {
label: '准备就绪',
progress: 80,
description: '校验通过,可以创建数字人视频',
},
creating: {
label: '创建任务中',
progress: 95,
description: '正在提交数字人视频生成任务...',
},
completed: {
label: '已完成',
progress: 100,
description: '任务已提交成功',
},
failed: {
label: '失败',
progress: 0,
description: '操作失败,请重试',
},
}
/**
* 状态顺序(用于步骤条显示)
*/
export const STATE_ORDER: PipelineState[] = [
'idle',
'uploading',
'recognizing',
'generating',
'validating',
'ready',
'creating',
'completed',
]
/**
* 忙碌状态(正在执行中的状态)
*/
export const BUSY_STATES: PipelineState[] = [
'uploading',
'recognizing',
'generating',
'validating',
'creating',
]
/**
* 终态(不能再转换的状态)
*/
export const TERMINAL_STATES: PipelineState[] = [
'completed',
'failed',
]
/**
* 获取状态在步骤条中的索引
*/
export function getStateIndex(state: PipelineState): number {
return STATE_ORDER.indexOf(state)
}
/**
* 获取状态的进度百分比
*/
export function getStateProgress(state: PipelineState): number {
return STATE_CONFIG[state].progress
}
/**
* 判断是否为忙碌状态
*/
export function isBusyState(state: PipelineState): boolean {
return BUSY_STATES.includes(state)
}
/**
* 判断是否为终态
*/
export function isTerminalState(state: PipelineState): boolean {
return TERMINAL_STATES.includes(state)
}
/**
* 判断状态是否可以重试
*/
export function canRetryFrom(state: PipelineState): boolean {
return state === 'failed'
}

View File

@@ -1,126 +0,0 @@
/**
* @fileoverview 数字人生成流程状态机 - 类型定义
*/
/**
* 状态机所有可能的状态
*/
export type PipelineState =
| 'idle' // 空闲
| 'uploading' // 上传视频中
| 'recognizing' // 人脸识别中
| 'generating' // 生成配音中
| 'validating' // 校验时长中
| 'ready' // 准备就绪
| 'creating' // 创建任务中
| 'completed' // 已完成
| 'failed' // 失败
/**
* 状态配置
*/
export interface StateConfig {
/** 状态标签 */
label: string
/** 进度百分比 */
progress: number
/** 描述 */
description: string
/** 图标(可选) */
icon?: string
}
/**
* 步骤执行结果
*/
export interface StepResult<T = any> {
/** 是否成功 */
success: boolean
/** 返回数据 */
data?: T
/** 错误信息 */
error?: Error
}
/**
* Pipeline 上下文数据
*/
export interface PipelineContext {
/** 视频文件 */
videoFile: File | null
/** 已选择的视频 */
selectedVideo: any
/** 文案内容 */
text: string
/** 音色 */
voice: any
/** 语速 */
speechRate: number
/** 视频文件ID */
videoFileId: string | number | null
/** 会话ID */
sessionId: string
/** 人脸ID */
faceId: string
/** 人脸开始时间 */
faceStartTime: number
/** 人脸结束时间 */
faceEndTime: number
/** 音频 Base64 */
audioBase64: string
/** 音频格式 */
audioFormat: string
/** 音频时长(毫秒) */
audioDurationMs: number
/** 视频时长(毫秒) */
videoDurationMs: number
/** 校验是否通过 */
validationPassed: boolean
}
/**
* Pipeline 执行参数
*/
export interface PipelineParams {
videoFile: File | null
selectedVideo: any
text: string
voice: any
speechRate: number
}
/**
* Pipeline 选项配置
*/
export interface PipelineOptions {
/** 上传视频 */
uploadVideo: (file: File) => Promise<string | number>
/** 从库中识别 */
recognizeFromLibrary: (video: any) => Promise<any>
/** 识别已上传视频 */
recognizeUploaded: (fileId: string | number) => Promise<any>
/** 生成音频 */
generateAudio: (text: string, voice: any, speechRate: number) => Promise<{
audioBase64: string
format?: string
durationMs?: number
}>
/** 创建任务 */
createTask: (data: any) => Promise<void>
}
/**
* 状态机执行状态
*/
export interface ExecutionState {
/** 当前状态 */
current: PipelineState
/** 历史状态 */
history: PipelineState[]
/** 上下文数据 */
context: Partial<PipelineContext>
/** 是否可以继续下一步 */
canNext: boolean
/** 是否可以重试 */
canRetry: boolean
}

View File

@@ -1,293 +0,0 @@
/**
* @fileoverview 极简状态机 Hook - 数字人生成流程
*
* 设计理念:
* 1. 简单直观 - 用普通 JS/TS 代码,无需学习复杂概念
* 2. 易于调试 - 打断点即可查看状态
* 3. 功能完整 - 支持状态管理、进度显示、错误处理、重试
*/
import { ref, computed } from 'vue'
import { message } from 'ant-design-vue'
import type { LipSyncTaskData } from '../../types/identify-face'
import { createLipSyncTask } from '@/api/kling'
import type {
PipelineState,
PipelineContext,
PipelineParams,
PipelineOptions,
ExecutionState,
} from './types'
import {
STATE_CONFIG,
getStateIndex,
isBusyState,
isTerminalState,
canRetryFrom,
} from './states'
/**
* 初始上下文
*/
const INITIAL_CONTEXT: Partial<PipelineContext> = {
videoFile: null,
selectedVideo: null,
text: '',
voice: null,
speechRate: 1,
videoFileId: null,
sessionId: '',
faceId: '',
faceStartTime: 0,
faceEndTime: 0,
audioBase64: '',
audioFormat: 'mp3',
audioDurationMs: 0,
videoDurationMs: 0,
validationPassed: false,
}
/**
* 极简状态机 Hook
*/
export function useSimplePipeline(options: PipelineOptions) {
// ========== 状态管理 ==========
const state = ref<PipelineState>('idle')
const context = ref<Partial<PipelineContext>>({ ...INITIAL_CONTEXT })
const error = ref<string | null>(null)
const history = ref<PipelineState[]>(['idle'])
// ========== 计算属性 ==========
const stateLabel = computed(() => STATE_CONFIG[state.value].label)
const stateDescription = computed(() => STATE_CONFIG[state.value].description)
const progress = computed(() => STATE_CONFIG[state.value].progress)
const currentStepIndex = computed(() => getStateIndex(state.value))
const isBusy = computed(() => isBusyState(state.value))
const isReady = computed(() => state.value === 'ready')
const isFailed = computed(() => state.value === 'failed')
const isCompleted = computed(() => state.value === 'completed')
const isTerminal = computed(() => isTerminalState(state.value))
const canRetry = computed(() => canRetryFrom(state.value))
// ========== 内部方法 ==========
/**
* 更新状态
*/
function setState(newState: PipelineState) {
const oldState = state.value
state.value = newState
history.value.push(newState)
console.log(`[Pipeline] ${oldState} -> ${newState}`)
}
/**
* 设置错误状态
*/
function setError(err: Error | string) {
const errorMsg = typeof err === 'string' ? err : err.message
error.value = errorMsg
setState('failed')
message.error(errorMsg)
}
/**
* 执行步骤(带错误处理)
*/
async function executeStep<T>(
newState: PipelineState,
fn: () => Promise<T>
): Promise<T> {
setState(newState)
try {
return await fn()
} catch (err) {
setError(err as Error)
throw err
}
}
// ========== 公开方法 ==========
/**
* 运行完整流程(到 ready 状态)
*/
async function run(params: PipelineParams): Promise<void> {
// 重置上下文数据,但保持状态在即将开始工作的状态
context.value = { ...INITIAL_CONTEXT }
error.value = null
history.value = ['idle']
// 立即设置忙碌状态,让 UI 显示 loading
// 根据是否有上传文件决定初始状态
const initialState: PipelineState = params.videoFile && !params.selectedVideo
? 'uploading'
: 'recognizing'
setState(initialState)
try {
// 保存参数到上下文
context.value.videoFile = params.videoFile
context.value.selectedVideo = params.selectedVideo
context.value.text = params.text
context.value.voice = params.voice
context.value.speechRate = params.speechRate
// 步骤1: 上传视频(如果是上传模式)
if (params.videoFile && !params.selectedVideo) {
try {
const fileId = await options.uploadVideo(params.videoFile!)
context.value.videoFileId = fileId
} catch (err) {
setError(err as Error)
throw err
}
} else if (params.selectedVideo) {
context.value.videoFileId = params.selectedVideo.fileId
}
// 步骤2: 识别人脸
setState('recognizing')
let recognizeData
try {
recognizeData = params.selectedVideo
? await options.recognizeFromLibrary(params.selectedVideo)
: await options.recognizeUploaded(context.value.videoFileId!)
} catch (err) {
setError(err as Error)
throw err
}
context.value.sessionId = recognizeData.sessionId
context.value.faceId = recognizeData.faceId
context.value.faceStartTime = recognizeData.startTime || 0
context.value.faceEndTime = recognizeData.endTime || 0
context.value.videoDurationMs = recognizeData.duration || 0
// 步骤3: 生成音频
setState('generating')
let audioData
try {
audioData = await options.generateAudio(params.text, params.voice, params.speechRate)
} catch (err) {
setError(err as Error)
throw err
}
context.value.audioBase64 = audioData.audioBase64
context.value.audioFormat = audioData.format || 'mp3'
context.value.audioDurationMs = audioData.durationMs || 0
// 步骤4: 校验时长
setState('validating')
const videoDurationMs = context.value.videoDurationMs ?? 0
if (context.value.audioDurationMs > videoDurationMs) {
const errorMsg = `校验失败:音频时长(${(context.value.audioDurationMs / 1000).toFixed(1)}秒) 超过人脸时长(${(videoDurationMs / 1000).toFixed(1)}秒)`
setError(new Error(errorMsg))
return
}
context.value.validationPassed = true
// 步骤5: 自动创建任务(校验通过后直接继续)
setState('creating')
const taskData: LipSyncTaskData = {
taskName: `数字人任务_${Date.now()}`,
videoFileId: context.value.videoFileId!,
inputText: context.value.text!,
speechRate: context.value.speechRate!,
volume: 0,
guidanceScale: 1,
seed: 8888,
kling_session_id: context.value.sessionId!,
kling_face_id: context.value.faceId!,
kling_face_start_time: context.value.faceStartTime!,
kling_face_end_time: context.value.faceEndTime!,
ai_provider: 'kling',
voiceConfigId: context.value.voice!.rawId || context.value.voice!.id.match(/[\w-]+$/)?.[0] || context.value.voice!.id,
pre_generated_audio: {
audioBase64: context.value.audioBase64!,
format: context.value.audioFormat!,
},
sound_end_time: context.value.audioDurationMs!,
}
try {
const res = await createLipSyncTask(taskData)
if (res.code !== 0) {
throw new Error(res.msg || '任务创建失败')
}
setState('completed')
message.success('任务已提交,请在任务中心查看生成进度')
} catch (err) {
setError(err as Error)
}
} catch {
// 错误已在各步骤中处理
}
}
/**
* 重试(从 failed 状态恢复)
*/
function retry(): void {
if (!canRetry.value) {
message.warning('当前状态无法重试')
return
}
error.value = null
// 回到 idle 重新开始
setState('idle')
}
/**
* 重置到初始状态
*/
function reset(): void {
state.value = 'idle'
context.value = { ...INITIAL_CONTEXT }
error.value = null
history.value = ['idle']
}
/**
* 获取执行状态(用于调试)
*/
function getExecutionState(): ExecutionState {
return {
current: state.value,
history: [...history.value],
context: { ...context.value },
canNext: state.value === 'ready',
canRetry: canRetry.value,
}
}
// ========== 返回 API ==========
return {
// 状态
state,
context,
error,
history,
// 计算属性
stateLabel,
stateDescription,
progress,
currentStepIndex,
isBusy,
isReady,
isFailed,
isCompleted,
isTerminal,
canRetry,
// 方法
run,
retry,
reset,
getExecutionState,
}
}

View File

@@ -1,171 +0,0 @@
/**
* @fileoverview useDigitalHumanGeneration Hook - 数字人生成逻辑
*
* 重构后:不管理识别状态,只提供数据和操作方法
* 状态由 Pipeline 统一管理
*/
import { ref, computed } from 'vue'
import { message } from 'ant-design-vue'
import type {
VideoState,
IdentifyResult,
Video,
} from '../types/identify-face'
import { identifyUploadedVideo, uploadAndIdentifyVideo } from '@/api/kling'
import { useUpload } from '@/composables/useUpload'
export function useDigitalHumanGeneration() {
// ========== 状态 ==========
const videoState = ref<VideoState>({
uploadedVideo: '',
videoFile: null,
previewVideoUrl: '',
selectedVideo: null,
fileId: null,
videoSource: null,
selectorVisible: false,
})
// 识别结果数据(不含状态标志)
const identifyResult = ref<IdentifyResult>({
sessionId: '',
faceId: '',
faceStartTime: 0,
faceEndTime: 0,
videoFileId: null,
})
const { upload } = useUpload()
// ========== 计算属性 ==========
const faceDuration = computed(function() {
return identifyResult.value.faceEndTime - identifyResult.value.faceStartTime
})
const hasVideo = computed(function() {
return !!videoState.value.uploadedVideo || !!videoState.value.selectedVideo
})
const isIdentified = computed(function() {
return !!identifyResult.value.sessionId
})
// ========== 方法 ==========
async function handleFileUpload(file: File): Promise<void> {
if (!file.name.match(/\.(mp4|mov)$/i)) {
message.error('仅支持 MP4 和 MOV')
return
}
// 释放旧的 blob URL
if (videoState.value.uploadedVideo && videoState.value.uploadedVideo.startsWith('blob:')) {
URL.revokeObjectURL(videoState.value.uploadedVideo)
}
videoState.value.videoFile = file
videoState.value.uploadedVideo = URL.createObjectURL(file)
videoState.value.selectedVideo = null
videoState.value.previewVideoUrl = ''
videoState.value.videoSource = 'upload'
resetIdentifyResult()
}
async function handleVideoSelect(video: Video): Promise<void> {
videoState.value.selectedVideo = video
videoState.value.uploadedVideo = video.fileUrl
videoState.value.videoFile = null
videoState.value.videoSource = 'select'
videoState.value.selectorVisible = false
resetIdentifyResult()
identifyResult.value.videoFileId = video.fileId
}
/**
* 执行人脸识别
* 返回识别结果供 Pipeline 使用
*/
async function performFaceRecognition(): Promise<IdentifyResult> {
const hasUploadFile = videoState.value.videoFile
const hasSelectedVideo = videoState.value.selectedVideo
if (!hasUploadFile && !hasSelectedVideo) {
throw new Error('请先选择视频')
}
if (hasSelectedVideo) {
// 从素材库选择:调用识别接口
const res = await identifyUploadedVideo(hasSelectedVideo) as {
success: boolean;
data: { sessionId: string; faceId: string | null; startTime: number; endTime: number }
}
identifyResult.value.videoFileId = hasSelectedVideo.fileId
identifyResult.value.sessionId = res.data.sessionId
identifyResult.value.faceId = res.data.faceId || ''
identifyResult.value.faceStartTime = res.data.startTime || 0
identifyResult.value.faceEndTime = res.data.endTime || 0
} else {
// 上传新视频:使用 uploadAndIdentifyVideo 完成上传+识别
const file = hasUploadFile!
const res = await uploadAndIdentifyVideo(file) as {
success: boolean;
data: { fileId: string; sessionId: string; faceId: string | null; startTime: number; endTime: number }
}
identifyResult.value.videoFileId = res.data.fileId
identifyResult.value.sessionId = res.data.sessionId
identifyResult.value.faceId = res.data.faceId || ''
identifyResult.value.faceStartTime = res.data.startTime || 0
identifyResult.value.faceEndTime = res.data.endTime || 0
}
return { ...identifyResult.value }
}
function resetVideoState(): void {
// 释放 blob URL 避免内存泄漏
if (videoState.value.uploadedVideo && videoState.value.uploadedVideo.startsWith('blob:')) {
URL.revokeObjectURL(videoState.value.uploadedVideo)
}
videoState.value.uploadedVideo = ''
videoState.value.videoFile = null
videoState.value.selectedVideo = null
videoState.value.fileId = null
videoState.value.videoSource = null
videoState.value.previewVideoUrl = ''
videoState.value.selectorVisible = false
resetIdentifyResult()
}
function getVideoPreviewUrl(video: Video): string {
if (video.coverBase64) {
return video.coverBase64.startsWith('data:')
? video.coverBase64
: `data:image/jpeg;base64,${video.coverBase64}`
}
if (video.previewUrl) return video.previewUrl
if (video.coverUrl) return video.coverUrl
return 'data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjAwIiBoZWlnaHQ9IjExMCIgdmlld0JveD0iMCAwIDIwMCAxMTAiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxyZWN0IHdpZHRoPSIyMDAiIGhlaWdodD0iMTEwIiBmaWxsPSIjMzc0MTUxIi8+CjxwYXRoIGQ9Ik04NSA0NUwxMTUgNjVMMTA1IDg1TDc1IDc1TDg1IDQ1WiIgZmlsbD0iIzU3MjY1MSIvPgo8L3N2Zz4K'
}
function resetIdentifyResult(): void {
identifyResult.value.sessionId = ''
identifyResult.value.faceId = ''
identifyResult.value.videoFileId = null
}
return {
videoState,
identifyResult,
hasVideo,
isIdentified,
faceDuration,
handleFileUpload,
handleVideoSelect,
performFaceRecognition,
resetVideoState,
resetIdentifyResult,
getVideoPreviewUrl,
}
}

View File

@@ -1,386 +0,0 @@
/**
* @fileoverview useIdentifyFaceController Hook - 主控制器(重构版)
*
* 设计理念:
* - 所有操作统一通过 Pipeline 状态机
* - 移除独立的 identifyState使用 pipeline 状态
* - 点击"生成配音" → 运行到 ready 状态
* - 点击"生成数字人视频" → 从 ready 继续 → completed
*
* 模块依赖关系:
* ┌─────────────────────────────────────────────────┐
* │ useIdentifyFaceController │
* │ ┌──────────────┐ ┌──────────────┐ ┌───────────┐│
* │ │ Voice │ │ Digital │ │ Pipeline ││
* │ │ Generation │ │ Human │ │ ││
* │ │ │ │ Generation │ │ 状态机 ││
* │ └──────────────┘ └──────────────┘ └───────────┘│
* └─────────────────────────────────────────────────┘
*/
import { computed } from 'vue'
import { message } from 'ant-design-vue'
import type {
VoiceMeta,
} from '../types/identify-face'
import { useVoiceGeneration } from './useVoiceGeneration'
import { useDigitalHumanGeneration } from './useDigitalHumanGeneration'
import { useSimplePipeline } from './pipeline/useSimplePipeline'
// ==================== 常量 ====================
const SPEECH_RATE_MARKS = { 0.5: '0.5x', 1: '1x', 1.5: '1.5x', 2: '2x' }
const MAX_TEXT_LENGTH = 4000
/**
* 主控制器 Hook
*/
export function useIdentifyFaceController() {
// 子 Hooks
const voice = useVoiceGeneration()
const digitalHuman = useDigitalHumanGeneration()
// Pipeline 流程配置
const pipeline = useSimplePipeline({
uploadVideo: async (_file: File) => {
// 上传已经在 handleFileUpload 中处理
return digitalHuman.identifyResult.value.videoFileId || ''
},
recognizeFromLibrary: async (video: any) => {
await digitalHuman.handleVideoSelect(video)
const result = await digitalHuman.performFaceRecognition()
return {
sessionId: result.sessionId,
faceId: result.faceId,
startTime: result.faceStartTime,
endTime: result.faceEndTime,
duration: digitalHuman.faceDuration.value,
}
},
recognizeUploaded: async (_fileId: string | number) => {
const result = await digitalHuman.performFaceRecognition()
return {
sessionId: result.sessionId,
faceId: result.faceId,
startTime: result.faceStartTime,
endTime: result.faceEndTime,
duration: digitalHuman.faceDuration.value,
}
},
generateAudio: async (text: string, voiceMeta: any, speechRate: number) => {
voice.ttsText.value = text
voice.selectedVoiceMeta.value = voiceMeta
voice.speechRate.value = speechRate
await voice.generateAudio()
const audio = voice.audioState.value.generated!
return {
audioBase64: audio.audioBase64,
format: audio.format || 'mp3',
durationMs: voice.audioState.value.durationMs,
}
},
createTask: async () => {
// 任务创建在 Pipeline 中处理
},
})
// ==================== 计算属性 ====================
/** 是否可以生成数字人视频 */
const canGenerate = computed((): boolean => {
// Pipeline 运行中禁用
if (pipeline.isBusy.value) return false
const hasText = voice.ttsText.value.trim()
const hasVoice = voice.selectedVoiceMeta.value
const hasVideo = digitalHuman.videoState.value.uploadedVideo || digitalHuman.videoState.value.selectedVideo
const hasBasicConfig = hasText && hasVoice && hasVideo
// 未识别或未到 ready 状态需要基础配置
if (!pipeline.isReady.value) return !!hasBasicConfig
// 已到 ready 状态可以生成
return true
})
/** 最大文本长度(根据人脸时长动态计算) */
const maxTextLength = computed(() => {
const faceDuration = digitalHuman.faceDuration.value
if (faceDuration <= 0) return MAX_TEXT_LENGTH
return Math.min(MAX_TEXT_LENGTH, Math.floor(voice.suggestedMaxChars.value * 1.2))
})
/** 文本框占位符提示 */
const textareaPlaceholder = computed(() => {
const faceDuration = digitalHuman.faceDuration.value
if (faceDuration > 0) {
return `请输入文案,建议不超过${voice.suggestedMaxChars.value}字以确保与视频匹配`
}
return '请输入你想让角色说话的内容'
})
/** 语速显示文本 */
const speechRateDisplay = computed(() => `${voice.speechRate.value.toFixed(1)}x`)
/** 人脸时长显示(秒) */
const faceDurationSec = computed(() => (digitalHuman.faceDuration.value / 1000).toFixed(1))
/** 音频时长显示(秒) */
const audioDurationSec = computed(() => (voice.audioState.value.durationMs / 1000).toFixed(1))
/** 音频播放 URL */
const audioUrl = computed(() => {
const audio = voice.audioState.value.generated
if (!audio) return ''
return audio.audioBase64 ? `data:audio/mp3;base64,${audio.audioBase64}` : audio.audioUrl || ''
})
/**
* 校验是否通过
* 规则:音频时长 <= 人脸时长
*/
const validationPassed = computed(() => {
const faceDuration = digitalHuman.faceDuration.value
const audioDuration = voice.audioState.value.durationMs
return audioDuration <= faceDuration
})
// ==================== 业务方法 ====================
/**
* 重置所有状态
*/
function resetAllStates(): void {
voice.resetAudioState()
digitalHuman.resetVideoState()
pipeline.reset()
}
/**
* 生成配音 - 运行 Pipeline 到 ready 状态
*/
async function generateAudio(): Promise<void> {
const hasVideo = digitalHuman.videoState.value.uploadedVideo || digitalHuman.videoState.value.selectedVideo
const hasText = voice.ttsText.value.trim()
const hasVoice = voice.selectedVoiceMeta.value
if (!hasText) {
message.warning('请输入文案内容')
return
}
if (!hasVoice) {
message.warning('请选择音色')
return
}
if (!hasVideo) {
message.warning('请先选择视频')
return
}
try {
// 运行流程到 ready 状态(包含识别、生成、校验)
await pipeline.run({
videoFile: digitalHuman.videoState.value.videoFile,
selectedVideo: digitalHuman.videoState.value.selectedVideo,
text: voice.ttsText.value,
voice: voice.selectedVoiceMeta.value,
speechRate: voice.speechRate.value,
})
} catch {
// 错误已在 Pipeline 中处理
}
}
/**
* 生成数字人视频 - 从 ready 状态继续到 completed
*/
async function generateDigitalHuman(): Promise<void> {
if (!canGenerate.value) {
message.warning('请先完成配置')
return
}
const text = voice.ttsText.value.trim()
const voiceMeta = voice.selectedVoiceMeta.value
if (!text) {
message.warning('请输入文案内容')
return
}
if (!voiceMeta) {
message.warning('请选择音色')
return
}
try {
// 如果还没到 ready 状态,先运行到 ready
if (!pipeline.isReady.value) {
await pipeline.run({
videoFile: digitalHuman.videoState.value.videoFile,
selectedVideo: digitalHuman.videoState.value.selectedVideo,
text,
voice: voiceMeta,
speechRate: voice.speechRate.value,
})
}
// 如果到达 ready 状态,创建任务
if (pipeline.isReady.value) {
await pipeline.createTask()
// 任务提交成功后,重置所有状态
resetAllStates()
}
} catch {
// 错误已在 Pipeline 中处理
}
}
/**
* 更换视频
*/
function replaceVideo(): void {
digitalHuman.resetVideoState()
voice.resetAudioState()
pipeline.reset()
}
// ==================== 事件处理方法 ====================
function handleVoiceSelect(voiceMeta: VoiceMeta): void {
voice.selectedVoiceMeta.value = voiceMeta
}
function handleFileSelect(event: Event): void {
const file = (event.target as HTMLInputElement).files?.[0]
if (file) digitalHuman.handleFileUpload(file)
}
function handleDrop(event: DragEvent): void {
event.preventDefault()
const file = event.dataTransfer?.files[0]
if (file) digitalHuman.handleFileUpload(file)
}
function triggerFileSelect(): void {
document.querySelector<HTMLInputElement>('input[type="file"]')?.click()
}
function handleSelectUpload(): void {
digitalHuman.videoState.value.videoSource = 'upload'
digitalHuman.videoState.value.selectedVideo = null
digitalHuman.resetIdentifyResult()
pipeline.reset()
}
function handleSelectFromLibrary(): void {
digitalHuman.videoState.value.videoSource = 'select'
digitalHuman.videoState.value.videoFile = null
digitalHuman.videoState.value.uploadedVideo = ''
digitalHuman.videoState.value.selectorVisible = true
pipeline.reset()
}
async function handleVideoSelect(video: any): Promise<void> {
await digitalHuman.handleVideoSelect(video)
}
function handleVideoLoaded(videoUrl: string): void {
digitalHuman.videoState.value.previewVideoUrl = videoUrl
}
function handleVideoError(event: Event): void {
console.error('视频加载失败:', event)
message.error('视频无法播放,请尝试其他视频文件')
}
// ==================== UI 工具方法 ====================
function formatDuration(seconds: number): string {
if (!seconds) return '--:--'
const mins = Math.floor(seconds / 60)
const secs = Math.floor(seconds % 60)
return `${String(mins).padStart(2, '0')}:${String(secs).padStart(2, '0')}`
}
function formatFileSize(bytes: number): string {
if (!bytes) return '0 B'
const units = ['B', 'KB', 'MB', 'GB']
let size = bytes
let idx = 0
while (size >= 1024 && idx < units.length - 1) {
size /= 1024
idx++
}
return `${size.toFixed(1)} ${units[idx]}`
}
// ==================== 返回接口 ====================
return {
// 语音生成模块
ttsText: voice.ttsText,
speechRate: voice.speechRate,
selectedVoiceMeta: voice.selectedVoiceMeta,
audioState: voice.audioState,
canGenerateAudio: voice.canGenerateAudio,
suggestedMaxChars: voice.suggestedMaxChars,
generateAudio,
resetAudioState: voice.resetAudioState,
// 数字人生成模块
videoState: digitalHuman.videoState,
identifyResult: digitalHuman.identifyResult,
isIdentified: digitalHuman.isIdentified,
faceDuration: digitalHuman.faceDuration,
handleFileUpload: digitalHuman.handleFileUpload,
getVideoPreviewUrl: digitalHuman.getVideoPreviewUrl,
resetVideoState: digitalHuman.resetVideoState,
resetIdentifyResult: digitalHuman.resetIdentifyResult,
// 业务方法
generateDigitalHuman,
replaceVideo,
// 事件处理
handleVoiceSelect,
handleFileSelect,
handleDrop,
triggerFileSelect,
handleSelectUpload,
handleSelectFromLibrary,
handleVideoSelect,
handleVideoLoaded,
handleVideoError,
// UI 工具
formatDuration,
formatFileSize,
// 计算属性
canGenerate,
maxTextLength,
textareaPlaceholder,
speechRateMarks: SPEECH_RATE_MARKS,
speechRateDisplay,
faceDurationSec,
audioDurationSec,
audioUrl,
validationPassed,
// Pipeline 状态(单一状态源)
pipelineState: pipeline.state,
pipelineStateLabel: pipeline.stateLabel,
pipelineStateDescription: pipeline.stateDescription,
isPipelineBusy: pipeline.isBusy,
isPipelineReady: pipeline.isReady,
isPipelineFailed: pipeline.isFailed,
isPipelineCompleted: pipeline.isCompleted,
pipelineProgress: pipeline.progress,
pipelineCurrentStepIndex: pipeline.currentStepIndex,
pipelineError: pipeline.error,
retryPipeline: pipeline.retry,
resetPipeline: pipeline.reset,
}
}

View File

@@ -1,167 +0,0 @@
/**
* @fileoverview useVoiceGeneration Hook - 语音生成逻辑
*/
import { ref, computed } from 'vue'
import { message } from 'ant-design-vue'
import type {
UseVoiceGeneration,
AudioState,
VoiceMeta,
AudioData,
} from '../types/identify-face'
import { VoiceService } from '@/api/voice'
import { DEFAULT_VOICE_PROVIDER } from '@/config/voiceConfig'
// ========== 常量 ==========
const DEFAULT_MAX_TEXT_LENGTH = 4000
const DEFAULT_SPEECH_RATE = 1.0
export function useVoiceGeneration(): UseVoiceGeneration {
// ========== 状态 ==========
const ttsText = ref<string>('')
const speechRate = ref<number>(DEFAULT_SPEECH_RATE)
const selectedVoiceMeta = ref<VoiceMeta | null>(null)
const audioState = ref<AudioState>({
generated: null,
durationMs: 0,
generating: false,
})
// ========== 计算属性 ==========
const canGenerateAudio = computed(function() {
return !!(ttsText.value.trim() && selectedVoiceMeta.value && !audioState.value.generating)
})
const suggestedMaxChars = computed(function() {
return DEFAULT_MAX_TEXT_LENGTH
})
// ========== 方法 ==========
async function generateAudio(): Promise<void> {
const voice = selectedVoiceMeta.value
if (!voice) {
message.warning('请选择音色')
return
}
if (!ttsText.value.trim()) {
message.warning('请输入文案内容')
return
}
audioState.value.generating = true
try {
const params = {
inputText: ttsText.value,
voiceConfigId: voice.rawId ?? extractIdFromString(voice.id),
speechRate: speechRate.value,
audioFormat: 'mp3' as const,
providerType: DEFAULT_VOICE_PROVIDER,
}
const res = await VoiceService.synthesize(params)
if (res.code === 0) {
const audioData = res.data as AudioData
if (!audioData.audioBase64) {
throw new Error('未收到音频数据')
}
audioState.value.generated = audioData
audioState.value.durationMs = await parseAudioDuration(audioData.audioBase64)
} else {
throw new Error(res.msg || '配音生成失败')
}
} catch (error: unknown) {
const err = error as Error
message.error(err.message || '配音生成失败')
audioState.value.generated = null
audioState.value.durationMs = 0
} finally {
audioState.value.generating = false
}
}
/**
* 解析音频时长(浏览器环境)
* 使用 HTML5 Audio API添加安全边距避免精度误差
*/
async function parseAudioDuration(base64Data: string): Promise<number> {
const base64 = base64Data.includes(',') ? base64Data.split(',')[1] : base64Data
const binaryString = window.atob(base64)
const bytes = new Uint8Array(binaryString.length)
for (let i = 0; i < bytes.length; i++) {
bytes[i] = binaryString.charCodeAt(i)
}
return new Promise<number>(function(resolve, reject) {
const blob = new Blob([bytes], { type: 'audio/mp3' })
const audio = new Audio()
const objectUrl = URL.createObjectURL(blob)
const timeoutId = setTimeout(function() {
cleanup()
reject(new Error('音频时长解析超时'))
}, 10000)
function cleanup() {
clearTimeout(timeoutId)
URL.revokeObjectURL(objectUrl)
audio.removeEventListener('loadedmetadata', onLoadedMetadata)
audio.removeEventListener('error', onError)
audio.removeEventListener('canplay', onLoadedMetadata)
}
function onLoadedMetadata() {
const duration = audio.duration
if (!isFinite(duration) || duration <= 0) {
cleanup()
reject(new Error(`音频时长无效: ${duration}`))
return
}
// 减去安全边距(200ms),避免因解析误差导致 sound_end_time 超过实际音频时长
const durationMs = Math.floor(duration * 1000) - 200
const rawDurationMs = Math.floor(duration * 1000)
console.log('[parseAudioDuration] 解析成功:', durationMs, 'ms (原始:', rawDurationMs, 'ms)')
cleanup()
resolve(durationMs)
}
function onError() {
cleanup()
reject(new Error('音频解析失败,请检查音频格式'))
}
audio.addEventListener('loadedmetadata', onLoadedMetadata)
audio.addEventListener('error', onError)
audio.addEventListener('canplay', onLoadedMetadata, { once: true })
audio.src = objectUrl
audio.load()
})
}
function resetAudioState(): void {
audioState.value.generated = null
audioState.value.durationMs = 0
audioState.value.generating = false
}
return {
ttsText,
speechRate,
selectedVoiceMeta,
audioState,
canGenerateAudio,
suggestedMaxChars,
generateAudio,
resetAudioState,
}
}
function extractIdFromString(str: string): string {
const match = str.match(/[\w-]+$/)
return match ? match[0] : str
}

View File

@@ -0,0 +1,454 @@
/**
* @fileoverview 数字人合成 Store - 单一状态管理
*
* 设计理念:
* 1. 单一状态源 - 所有状态集中管理
* 2. 简单直观 - 一个 generate() 方法完成全流程
* 3. 易于调试 - 断点打在这里即可
*/
import { ref, computed } from 'vue'
import { defineStore } from 'pinia'
import { message } from 'ant-design-vue'
import { VoiceService } from '@/api/voice'
import { uploadAndIdentifyVideo, identifyUploadedVideo, createLipSyncTask } from '@/api/kling'
import { DEFAULT_VOICE_PROVIDER } from '@/config/voiceConfig'
import type { VoiceMeta, Video } from '../types/identify-face'
// ========== 类型定义 ==========
/** 流程步骤 */
export type GenerateStep = 'idle' | 'uploading' | 'recognizing' | 'generating' | 'creating' | 'done' | 'error'
/** 音频数据 */
interface AudioData {
audioBase64: string
format: string
durationMs: number
}
/** 识别结果 */
interface IdentifyData {
fileId: string
sessionId: string
faceId: string
faceStartTime: number
faceEndTime: number
}
// ========== Store 定义 ==========
export const useDigitalHumanStore = defineStore('digitalHuman', () => {
// ==================== 状态 ====================
/** 文案内容 */
const text = ref('')
/** 语速 */
const speechRate = ref(1.0)
/** 选中的音色 */
const voice = ref<VoiceMeta | null>(null)
/** 视频来源 */
const videoSource = ref<'upload' | 'select' | null>(null)
/** 上传的视频文件 */
const videoFile = ref<File | null>(null)
/** 从素材库选择的视频 */
const selectedVideo = ref<Video | null>(null)
/** 视频预览URL */
const videoPreviewUrl = ref('')
/** 当前步骤 */
const step = ref<GenerateStep>('idle')
/** 错误信息 */
const error = ref('')
/** 识别结果 */
const identifyData = ref<IdentifyData | null>(null)
/** 生成的音频 */
const audioData = ref<AudioData | null>(null)
/** 视频选择器可见性 */
const videoSelectorVisible = ref(false)
// ==================== 计算属性 ====================
/** 是否有视频 */
const hasVideo = computed(() => !!(videoFile.value || selectedVideo.value))
/** 是否可以生成 */
const canGenerate = computed(() => {
if (step.value !== 'idle') return false
return !!(text.value.trim() && voice.value && hasVideo.value)
})
/** 是否正在处理 */
const isBusy = computed(() =>
['uploading', 'recognizing', 'generating', 'creating'].includes(step.value)
)
/** 是否完成 */
const isDone = computed(() => step.value === 'done')
/** 是否失败 */
const isFailed = computed(() => step.value === 'error')
/** 人脸时长(ms) */
const faceDurationMs = computed(() => {
if (!identifyData.value) return 0
return identifyData.value.faceEndTime - identifyData.value.faceStartTime
})
/** 步骤进度 (0-100) */
const progress = computed(() => {
const stepProgress: Record<GenerateStep, number> = {
idle: 0,
uploading: 20,
recognizing: 40,
generating: 60,
creating: 80,
done: 100,
error: 0,
}
return stepProgress[step.value]
})
/** 步骤标签 */
const stepLabel = computed(() => {
const labels: Record<GenerateStep, string> = {
idle: '准备就绪',
uploading: '上传视频',
recognizing: '识别人脸',
generating: '生成音频',
creating: '创建任务',
done: '完成',
error: '失败',
}
return labels[step.value]
})
// ==================== 方法 ====================
/** 设置音色 */
function setVoice(v: VoiceMeta) {
voice.value = v
}
/** 选择上传模式 */
function selectUploadMode() {
videoSource.value = 'upload'
selectedVideo.value = null
resetProcess()
}
/** 选择素材库模式 */
function selectLibraryMode() {
videoSource.value = 'select'
videoFile.value = null
videoPreviewUrl.value = ''
videoSelectorVisible.value = true
resetProcess()
}
/** 处理文件上传 */
async function handleFileUpload(file: File) {
if (!file.name.match(/\.(mp4|mov)$/i)) {
message.error('仅支持 MP4 和 MOV 格式')
return
}
// 释放旧的 blob URL
if (videoPreviewUrl.value?.startsWith('blob:')) {
URL.revokeObjectURL(videoPreviewUrl.value)
}
videoFile.value = file
videoPreviewUrl.value = URL.createObjectURL(file)
selectedVideo.value = null
videoSource.value = 'upload'
resetProcess()
}
/** 从素材库选择视频 */
function selectVideo(video: Video) {
selectedVideo.value = video
videoPreviewUrl.value = video.fileUrl
videoFile.value = null
videoSource.value = 'select'
videoSelectorVisible.value = false
resetProcess()
}
/** 重置流程状态 */
function resetProcess() {
step.value = 'idle'
error.value = ''
identifyData.value = null
audioData.value = null
}
/** 完全重置 */
function reset() {
// 释放 blob URL
if (videoPreviewUrl.value?.startsWith('blob:')) {
URL.revokeObjectURL(videoPreviewUrl.value)
}
text.value = ''
speechRate.value = 1.0
voice.value = null
videoSource.value = null
videoFile.value = null
selectedVideo.value = null
videoPreviewUrl.value = ''
step.value = 'idle'
error.value = ''
identifyData.value = null
audioData.value = null
videoSelectorVisible.value = false
}
/** 解析音频时长 */
async function parseAudioDuration(base64Data: string): Promise<number> {
const base64 = base64Data.includes(',') ? base64Data.split(',')[1] : base64Data
const binaryString = window.atob(base64)
const bytes = new Uint8Array(binaryString.length)
for (let i = 0; i < bytes.length; i++) {
bytes[i] = binaryString.charCodeAt(i)
}
return new Promise((resolve, reject) => {
const blob = new Blob([bytes], { type: 'audio/mp3' })
const audio = new Audio()
const objectUrl = URL.createObjectURL(blob)
const timeoutId = setTimeout(() => {
cleanup()
reject(new Error('音频解析超时'))
}, 15000)
let resolved = false
let lastDuration = 0
function cleanup() {
clearTimeout(timeoutId)
URL.revokeObjectURL(objectUrl)
}
function tryResolve(duration: number, source: string) {
if (resolved) return
if (!isFinite(duration) || duration <= 0) return
lastDuration = duration
// 只在 canplaythrough 时 resolve此时时长最准确
if (source === 'canplaythrough') {
resolved = true
cleanup()
resolve(Math.floor(duration * 1000) - 200)
}
}
// VBR MP3 早期事件估算不准,等待 canplaythrough
audio.ondurationchange = () => {
tryResolve(audio.duration, 'durationchange')
}
audio.oncanplay = () => {
tryResolve(audio.duration, 'canplay')
}
audio.oncanplaythrough = () => {
tryResolve(audio.duration, 'canplaythrough')
}
audio.onerror = () => {
if (!resolved) {
if (lastDuration > 0) {
resolved = true
cleanup()
resolve(Math.floor(lastDuration * 1000) - 200)
} else {
cleanup()
reject(new Error('音频解析失败'))
}
}
}
audio.src = objectUrl
audio.load()
})
}
/** 生成数字人视频 - 主流程 */
async function generate() {
// 校验
if (!text.value.trim()) {
message.warning('请输入文案内容')
return
}
if (!voice.value) {
message.warning('请选择音色')
return
}
if (!hasVideo.value) {
message.warning('请先选择视频')
return
}
try {
// ===== 步骤1: 上传并识别 =====
step.value = videoFile.value ? 'uploading' : 'recognizing'
let identifyResult: IdentifyData
if (selectedVideo.value) {
// 素材库视频 - 直接识别
step.value = 'recognizing'
const res = await identifyUploadedVideo(selectedVideo.value) as any
identifyResult = {
fileId: String(selectedVideo.value.fileId),
sessionId: res.data.sessionId,
faceId: res.data.faceId || '',
faceStartTime: res.data.startTime || 0,
faceEndTime: res.data.endTime || 0,
}
} else {
// 上传新视频 - 上传并识别
const res = await uploadAndIdentifyVideo(videoFile.value!) as any
identifyResult = {
fileId: String(res.data.fileId),
sessionId: res.data.sessionId,
faceId: res.data.faceId || '',
faceStartTime: res.data.startTime || 0,
faceEndTime: res.data.endTime || 0,
}
}
identifyData.value = identifyResult
// ===== 步骤2: 生成音频 =====
step.value = 'generating'
const voiceId = voice.value.rawId ?? extractId(voice.value.id)
const res = await VoiceService.synthesize({
inputText: text.value,
voiceConfigId: voiceId,
speechRate: speechRate.value,
audioFormat: 'mp3',
providerType: DEFAULT_VOICE_PROVIDER,
} as any)
if (res.code !== 0 || !res.data?.audioBase64) {
throw new Error(res.msg || '音频生成失败')
}
const durationMs = await parseAudioDuration(res.data.audioBase64)
audioData.value = {
audioBase64: res.data.audioBase64,
format: 'mp3',
durationMs,
}
// ===== 步骤3: 校验时长 =====
const videoDurationMs = identifyResult.faceEndTime - identifyResult.faceStartTime
if (durationMs > videoDurationMs) {
throw new Error(`音频时长(${(durationMs/1000).toFixed(1)}秒)超过人脸时长(${(videoDurationMs/1000).toFixed(1)}秒)`)
}
// ===== 步骤4: 创建任务 =====
step.value = 'creating'
const taskRes = await createLipSyncTask({
taskName: `数字人任务_${Date.now()}`,
videoFileId: identifyResult.fileId,
inputText: text.value,
speechRate: speechRate.value,
volume: 0,
guidanceScale: 1,
seed: 8888,
kling_session_id: identifyResult.sessionId,
kling_face_id: identifyResult.faceId,
kling_face_start_time: identifyResult.faceStartTime,
kling_face_end_time: identifyResult.faceEndTime,
ai_provider: 'kling',
voiceConfigId: voiceId,
pre_generated_audio: {
audioBase64: audioData.value.audioBase64,
format: audioData.value.format,
},
sound_end_time: audioData.value.durationMs,
})
if (taskRes.code !== 0) {
throw new Error(taskRes.msg || '任务创建失败')
}
step.value = 'done'
message.success('任务已提交,请在任务中心查看生成进度')
} catch (err: any) {
step.value = 'error'
error.value = err.message || '生成失败'
message.error(error.value)
}
}
/** 重试 */
function retry() {
if (step.value === 'error') {
resetProcess()
}
}
// ==================== 导出 ====================
return {
// 状态
text,
speechRate,
voice,
videoSource,
videoFile,
selectedVideo,
videoPreviewUrl,
step,
error,
identifyData,
audioData,
videoSelectorVisible,
// 计算属性
hasVideo,
canGenerate,
isBusy,
isDone,
isFailed,
faceDurationMs,
progress,
stepLabel,
// 方法
setVoice,
selectUploadMode,
selectLibraryMode,
handleFileUpload,
selectVideo,
resetProcess,
reset,
generate,
retry,
}
})
// ========== 工具函数 ==========
function extractId(str: string): string {
const match = str.match(/[\w-]+$/)
return match ? match[0] : str
}

View File

@@ -1,17 +1,29 @@
<script setup>
import { computed, onMounted } from 'vue'
import { computed, onMounted, ref, reactive } from 'vue'
import { useUserStore } from '@/stores/user'
import {
UserOutlined,
DatabaseOutlined,
WalletOutlined,
import { getPointRecordPage } from '@/api/pointRecord'
import {
UserOutlined,
DatabaseOutlined,
WalletOutlined,
PayCircleOutlined,
ClockCircleOutlined,
SafetyCertificateOutlined
SafetyCertificateOutlined,
PlusOutlined,
MinusOutlined
} from '@ant-design/icons-vue'
const userStore = useUserStore()
// 积分记录数据
const pointRecords = ref([])
const recordsLoading = ref(false)
const recordsPagination = reactive({
current: 1,
pageSize: 10,
total: 0
})
// 存储空间数据
const GB_TO_MB = 1024
const totalStorage = computed(() => userStore.totalStorage * GB_TO_MB)
@@ -54,6 +66,67 @@ function maskMobile(mobile) {
return mobile.replace(/(\d{3})\d{4}(\d{4})/, '$1****$2')
}
// 获取积分记录
async function fetchPointRecords() {
recordsLoading.value = true
try {
const res = await getPointRecordPage({
pageNo: recordsPagination.current,
pageSize: recordsPagination.pageSize
})
if (res.data) {
pointRecords.value = res.data.list || []
recordsPagination.total = res.data.total || 0
}
} catch (e) {
console.error('获取积分记录失败:', e)
} finally {
recordsLoading.value = false
}
}
// 分页变化
function handleTableChange(pagination) {
recordsPagination.current = pagination.current
recordsPagination.pageSize = pagination.pageSize
fetchPointRecords()
}
// 格式化积分记录时间
function formatRecordTime(dateStr) {
if (!dateStr) return ''
const date = new Date(dateStr)
return date.toLocaleString('zh-CN', {
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit'
})
}
// 获取业务类型显示名称
function getBizTypeName(bizType) {
const typeMap = {
'signin': '签到',
'recharge': '充值',
'exchange': '兑换',
'admin': '后台调整',
'gift': '礼包赠送',
'digital_human': '数字人生成'
}
return typeMap[bizType] || bizType || '其他'
}
// 获取状态显示
function getStatusInfo(status) {
const statusMap = {
'pending': { text: '处理中', color: 'orange' },
'confirmed': { text: '已完成', color: 'green' },
'canceled': { text: '已取消', color: 'default' }
}
return statusMap[status] || { text: status, color: 'default' }
}
onMounted(async () => {
if (userStore.isLoggedIn) {
// 获取用户基本信息和档案信息
@@ -63,6 +136,8 @@ onMounted(async () => {
if (!userStore.profile) {
await userStore.fetchUserProfile()
}
// 获取积分记录
await fetchPointRecords()
}
})
</script>
@@ -160,20 +235,64 @@ onMounted(async () => {
</a-col>
</a-row>
<!-- 最近活动/设置占位 -->
<a-card title="最近活动" :bordered="false" class="activity-card mt-6">
<!-- 积分记录 -->
<a-card title="积分记录" :bordered="false" class="activity-card mt-6">
<template #extra>
<a href="#">查看全部</a>
<span class="record-count"> {{ recordsPagination.total }} 条记录</span>
</template>
<a-list item-layout="horizontal" :data-source="[]">
<template #renderItem="{ item }">
<!-- 这里是空列表暂时留白 -->
</template>
<div class="empty-state">
<ClockCircleOutlined class="empty-icon" />
<p>暂无最近活动记录</p>
</div>
</a-list>
<a-spin :spinning="recordsLoading">
<a-list
v-if="pointRecords.length > 0"
item-layout="horizontal"
:data-source="pointRecords"
class="point-record-list"
>
<template #renderItem="{ item }">
<a-list-item>
<a-list-item-meta>
<template #avatar>
<div :class="['record-icon', item.type === 'increase' ? 'increase' : 'decrease']">
<PlusOutlined v-if="item.type === 'increase'" />
<MinusOutlined v-else />
</div>
</template>
<template #title>
<div class="record-title">
<span class="record-reason">{{ item.reason || getBizTypeName(item.bizType) }}</span>
<a-tag :color="getStatusInfo(item.status).color" size="small">
{{ getStatusInfo(item.status).text }}
</a-tag>
</div>
</template>
<template #description>
<div class="record-desc">
<span>{{ formatRecordTime(item.createTime) }}</span>
<span v-if="item.bizType" class="record-biz-type">{{ getBizTypeName(item.bizType) }}</span>
</div>
</template>
</a-list-item-meta>
<div :class="['record-amount', item.type === 'increase' ? 'increase' : 'decrease']">
{{ item.type === 'increase' ? '+' : '-' }}{{ Math.abs(item.pointAmount) }}
</div>
</a-list-item>
</template>
</a-list>
<a-pagination
v-if="recordsPagination.total > recordsPagination.pageSize"
v-model:current="recordsPagination.current"
v-model:page-size="recordsPagination.pageSize"
:total="recordsPagination.total"
:show-size-changer="false"
:show-total="total => `${total}`"
size="small"
class="record-pagination"
@change="handleTableChange"
/>
<div v-if="pointRecords.length === 0 && !recordsLoading" class="empty-state">
<ClockCircleOutlined class="empty-icon" />
<p>暂无积分记录</p>
</div>
</a-spin>
</a-card>
</a-col>
@@ -382,4 +501,82 @@ onMounted(async () => {
color: #52c41a;
margin-right: 12px;
}
/* Point Record List */
.activity-card {
border-radius: 12px;
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.03);
}
.record-count {
color: var(--color-text-secondary);
font-size: 13px;
}
.point-record-list {
max-height: 400px;
overflow-y: auto;
}
.record-icon {
width: 36px;
height: 36px;
border-radius: 50%;
display: flex;
align-items: center;
justify-content: center;
font-size: 16px;
}
.record-icon.increase {
background: rgba(82, 196, 26, 0.1);
color: #52c41a;
}
.record-icon.decrease {
background: rgba(255, 77, 79, 0.1);
color: #ff4d4f;
}
.record-title {
display: flex;
align-items: center;
gap: 8px;
}
.record-reason {
font-weight: 500;
color: var(--color-text);
}
.record-desc {
display: flex;
gap: 12px;
font-size: 12px;
color: var(--color-text-secondary);
}
.record-biz-type {
padding: 1px 6px;
background: rgba(0, 0, 0, 0.04);
border-radius: 4px;
}
.record-amount {
font-size: 18px;
font-weight: 600;
}
.record-amount.increase {
color: #52c41a;
}
.record-amount.decrease {
color: #ff4d4f;
}
.record-pagination {
margin-top: 16px;
text-align: center;
}
</style>

View File

@@ -0,0 +1,43 @@
package cn.iocoder.yudao.module.tik.muye.pointrecord;
import cn.iocoder.yudao.framework.common.pojo.CommonResult;
import cn.iocoder.yudao.framework.common.pojo.PageResult;
import cn.iocoder.yudao.framework.security.core.util.SecurityFrameworkUtils;
import cn.iocoder.yudao.module.tik.muye.pointrecord.dal.PointRecordDO;
import cn.iocoder.yudao.module.tik.muye.pointrecord.service.PointRecordService;
import cn.iocoder.yudao.module.tik.muye.pointrecord.vo.PointRecordPageReqVO;
import cn.iocoder.yudao.module.tik.muye.pointrecord.vo.PointRecordRespVO;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.tags.Tag;
import jakarta.annotation.Resource;
import jakarta.validation.Valid;
import org.springframework.validation.annotation.Validated;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import static cn.iocoder.yudao.framework.common.pojo.CommonResult.success;
import static cn.iocoder.yudao.framework.common.util.object.BeanUtils.toBean;
/**
* 用户 App - 积分记录
*/
@Tag(name = "用户 App - 积分记录")
@RestController
@RequestMapping("/api/tik/point-record")
@Validated
public class AppPointRecordController {
@Resource
private PointRecordService pointRecordService;
@GetMapping("/page")
@Operation(summary = "获取当前用户积分记录分页")
public CommonResult<PageResult<PointRecordRespVO>> getPointRecordPage(@Valid PointRecordPageReqVO pageReqVO) {
// 强制使用当前登录用户ID防止查询其他用户数据
pageReqVO.setUserId(SecurityFrameworkUtils.getLoginUserId());
PageResult<PointRecordDO> pageResult = pointRecordService.getPointRecordPage(pageReqVO);
return success(toBean(pageResult, PointRecordRespVO.class));
}
}

View File

@@ -52,6 +52,10 @@ public class PointRecordRespVO {
@ExcelProperty("备注")
private String remark;
@Schema(description = "状态pending-预扣 confirmed-已确认 canceled-已取消", requiredMode = Schema.RequiredMode.REQUIRED)
@ExcelProperty("状态")
private String status;
@Schema(description = "创建时间", requiredMode = Schema.RequiredMode.REQUIRED)
@ExcelProperty("创建时间")
private LocalDateTime createTime;

View File

@@ -9,6 +9,7 @@ import cn.iocoder.yudao.module.tik.voice.vo.AppTikLatentsyncResultRespVO;
import cn.iocoder.yudao.module.tik.kling.service.KlingService;
import cn.iocoder.yudao.module.tik.kling.dto.KlingLipSyncQueryResponse;
import cn.iocoder.yudao.module.tik.kling.vo.response.KlingLipSyncVideoVO;
import cn.iocoder.yudao.module.tik.muye.points.service.PointsService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.data.redis.core.StringRedisTemplate;
@@ -35,6 +36,7 @@ public class LatentsyncPollingService {
private final LatentsyncService latentsyncService;
private final StringRedisTemplate stringRedisTemplate;
private final KlingService klingService;
private final PointsService pointsService;
// ========== 常量 ==========
private static final String REDIS_POLLING_PREFIX = "latentsync:polling:";
@@ -218,6 +220,10 @@ public class LatentsyncPollingService {
@Transactional(rollbackFor = Exception.class)
private void completeTask(Long taskId, String videoUrl, String requestId) {
try {
// 1. 先查询任务获取预扣记录ID
TikDigitalHumanTaskDO task = TenantUtils.executeIgnore(() -> taskMapper.selectById(taskId));
// 2. 更新任务状态
TikDigitalHumanTaskDO updateObj = new TikDigitalHumanTaskDO();
updateObj.setId(taskId);
updateObj.setStatus("SUCCESS");
@@ -228,7 +234,17 @@ public class LatentsyncPollingService {
TenantUtils.executeIgnore(() -> taskMapper.updateById(updateObj));
// 缓存结果
// 3. 确认预扣(任务成功,实际扣费)
if (task != null && task.getPendingRecordId() != null) {
try {
pointsService.confirmPendingDeduct(task.getPendingRecordId());
log.info("[completeTask][任务({})成功确认扣费预扣记录ID({})]", taskId, task.getPendingRecordId());
} catch (Exception e) {
log.error("[completeTask][确认扣费失败taskId={}recordId={}]", taskId, task.getPendingRecordId(), e);
}
}
// 4. 缓存结果
String resultKey = REDIS_RESULT_PREFIX + taskId;
stringRedisTemplate.opsForValue().set(resultKey, videoUrl, RESULT_CACHE_TIME);
@@ -248,6 +264,10 @@ public class LatentsyncPollingService {
@Transactional(rollbackFor = Exception.class)
private void markTaskFailed(Long taskId, String errorMessage) {
try {
// 1. 先查询任务获取预扣记录ID
TikDigitalHumanTaskDO task = TenantUtils.executeIgnore(() -> taskMapper.selectById(taskId));
// 2. 更新任务状态
TikDigitalHumanTaskDO updateObj = new TikDigitalHumanTaskDO();
updateObj.setId(taskId);
updateObj.setStatus("FAILED");
@@ -256,6 +276,16 @@ public class LatentsyncPollingService {
TenantUtils.executeIgnore(() -> taskMapper.updateById(updateObj));
// 3. 取消预扣(任务失败,不扣费)
if (task != null && task.getPendingRecordId() != null) {
try {
pointsService.cancelPendingDeduct(task.getPendingRecordId());
log.info("[markTaskFailed][任务({})失败取消预扣预扣记录ID({})]", taskId, task.getPendingRecordId());
} catch (Exception e) {
log.error("[markTaskFailed][取消预扣失败taskId={}recordId={}]", taskId, task.getPendingRecordId(), e);
}
}
log.warn("[markTaskFailed][任务失败][taskId={}, error={}]", taskId, errorMessage);
} catch (Exception e) {
log.error("[markTaskFailed][标记任务失败失败][taskId={}]", taskId, e);
@@ -266,6 +296,10 @@ public class LatentsyncPollingService {
* 更新任务状态
*/
private void updateTaskStatus(Long taskId, String status, String currentStep, Integer progress, String errorMessage) {
// 1. 先查询任务获取预扣记录ID
TikDigitalHumanTaskDO task = TenantUtils.executeIgnore(() -> taskMapper.selectById(taskId));
// 2. 更新任务状态
TikDigitalHumanTaskDO updateObj = new TikDigitalHumanTaskDO();
updateObj.setId(taskId);
updateObj.setStatus(status);
@@ -274,11 +308,29 @@ public class LatentsyncPollingService {
if ("SUCCESS".equals(status)) {
updateObj.setFinishTime(LocalDateTime.now());
// 确认预扣(任务成功)
if (task != null && task.getPendingRecordId() != null) {
try {
pointsService.confirmPendingDeduct(task.getPendingRecordId());
log.info("[updateTaskStatus][任务({})成功,确认扣费]", taskId);
} catch (Exception e) {
log.error("[updateTaskStatus][确认扣费失败taskId={}]", taskId, e);
}
}
} else if ("PROCESSING".equals(status)) {
updateObj.setStartTime(LocalDateTime.now());
} else if ("FAILED".equals(status)) {
updateObj.setErrorMessage(errorMessage);
updateObj.setFinishTime(LocalDateTime.now());
// 取消预扣(任务失败)
if (task != null && task.getPendingRecordId() != null) {
try {
pointsService.cancelPendingDeduct(task.getPendingRecordId());
log.info("[updateTaskStatus][任务({})失败,取消预扣]", taskId);
} catch (Exception e) {
log.error("[updateTaskStatus][取消预扣失败taskId={}]", taskId, e);
}
}
}
TenantUtils.executeIgnore(() -> taskMapper.updateById(updateObj));

View File

@@ -391,10 +391,11 @@ public class TikUserVoiceServiceImpl implements TikUserVoiceService {
transcriptionText = reqVO.getTranscriptionText();
}
String finalText = determineSynthesisText(
transcriptionText,
reqVO.getInputText(),
false);
// transcriptionText 仅用于提高克隆质量,不拼接到合成文本
String finalText = reqVO.getInputText();
if (StrUtil.isBlank(finalText)) {
throw exception(VOICE_TTS_FAILED, "请提供需要合成的文本内容");
}
String cacheKey = buildCacheKey(SYNTH_CACHE_PREFIX,
voiceId,
@@ -444,7 +445,7 @@ public class TikUserVoiceServiceImpl implements TikUserVoiceService {
log.error("[synthesizeVoice][积分扣减失败: {}]", e.getMessage());
}
// 【安全方案】不暴露OSS链接直接返回Base64编码的音频数据
// 不暴露OSS链接直接返回Base64编码的音频数据
String audioBase64 = Base64.getEncoder().encodeToString(ttsResult.getAudio());
log.info("[synthesizeVoice][合成成功,配音编号({})voiceId({})format({})audioSize={}]",
voiceConfigId, finalVoiceId, format, ttsResult.getAudio().length);
@@ -617,9 +618,6 @@ public class TikUserVoiceServiceImpl implements TikUserVoiceService {
/**
* 从URL中提取原始URL去除查询参数和锚点
*
* @param url 可能包含查询参数的URL
* @return 原始URL去除查询参数和锚点
*/
private String extractRawUrl(String url) {
if (StrUtil.isBlank(url)) {
@@ -627,10 +625,8 @@ public class TikUserVoiceServiceImpl implements TikUserVoiceService {
}
try {
java.net.URL urlObj = new java.net.URL(url);
// 只使用协议、主机、路径部分,忽略查询参数和锚点
return urlObj.getProtocol() + "://" + urlObj.getHost() + urlObj.getPath();
} catch (Exception e) {
// 如果URL解析失败使用简单方式去除查询参数
return url.split("\\?")[0].split("#")[0];
}
}
@@ -644,18 +640,15 @@ public class TikUserVoiceServiceImpl implements TikUserVoiceService {
String instruction,
String audioFormat,
Integer sampleRate) {
// 构建标识符优先使用voiceId如果没有则使用fileUrl的稳定部分去除查询参数
String identifier;
if (StrUtil.isNotBlank(voiceId)) {
identifier = voiceId;
} else if (StrUtil.isNotBlank(fileUrl)) {
// 对于fileUrl提取稳定部分去除预签名URL的查询参数避免缓存key不稳定
identifier = extractRawUrl(fileUrl);
} else {
identifier = "no-voice";
}
// 获取默认配置
String defaultFormat = getDefaultFormat();
Integer defaultSampleRate = getDefaultSampleRate();
@@ -667,8 +660,7 @@ public class TikUserVoiceServiceImpl implements TikUserVoiceService {
instruction,
StrUtil.blankToDefault(audioFormat, defaultFormat),
sampleRate != null ? sampleRate : defaultSampleRate);
String hash = cn.hutool.crypto.SecureUtil.sha256(payload);
return prefix + hash;
return prefix + cn.hutool.crypto.SecureUtil.sha256(payload);
}
private PreviewCacheEntry getPreviewCache(String key) {