Files
012-kaopeilian/frontend/src/composables/useVoiceCall.ts
yuliang_guo 64f5d567fa
Some checks failed
continuous-integration/drone/push Build is failing
feat: 实现 KPL 系统功能改进计划
1. 课程学习进度追踪
   - 新增 UserCourseProgress 和 UserMaterialProgress 模型
   - 新增 /api/v1/progress/* 进度追踪 API
   - 更新 admin.py 使用真实课程完成率数据

2. 路由权限检查完善
   - 新增前端 permissionChecker.ts 权限检查工具
   - 更新 router/guard.ts 实现团队和课程权限验证
   - 新增后端 permission_service.py

3. AI 陪练音频转文本
   - 新增 speech_recognition.py 语音识别服务
   - 新增 /api/v1/speech/* API
   - 更新 ai-practice-coze.vue 支持语音输入

4. 双人对练报告生成
   - 更新 practice_room_service.py 添加报告生成功能
   - 新增 /rooms/{room_code}/report API
   - 更新 duo-practice-report.vue 调用真实 API

5. 学习提醒推送
   - 新增 notification_service.py 通知服务
   - 新增 scheduler_service.py 定时任务服务
   - 支持钉钉、企微、站内消息推送

6. 智能学习推荐
   - 新增 recommendation_service.py 推荐服务
   - 新增 /api/v1/recommendations/* API
   - 支持错题、能力、进度、热门多维度推荐

7. 安全问题修复
   - DEBUG 默认值改为 False
   - 添加 SECRET_KEY 安全警告
   - 新增 check_security_settings() 检查函数

8. 证书 PDF 生成
   - 更新 certificate_service.py 添加 PDF 生成
   - 添加 weasyprint、Pillow、qrcode 依赖
   - 更新下载 API 支持 PDF 和 PNG 格式
2026-01-30 14:22:35 +08:00

463 lines
12 KiB
TypeScript
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
/**
* 语音通话组合式函数
*
* 功能:
* - 整合 WebRTC 管理和信令服务
* - 管理通话状态
* - 处理语音转文字
*/
import { ref, computed, onUnmounted } from 'vue'
import { ElMessage } from 'element-plus'
import { WebRTCManager, createWebRTCManager, type ConnectionState } from '@/utils/webrtc'
import request from '@/api/request'
export type VoiceCallState = 'idle' | 'requesting' | 'ringing' | 'connecting' | 'connected' | 'ended'
export interface UseVoiceCallOptions {
roomCode: string
onTranscript?: (text: string, isFinal: boolean) => void
onRemoteTranscript?: (text: string) => void
}
export function useVoiceCall(options: UseVoiceCallOptions) {
const { roomCode, onTranscript, onRemoteTranscript } = options
// ==================== 状态 ====================
const callState = ref<VoiceCallState>('idle')
const connectionState = ref<ConnectionState>('idle')
const isMuted = ref(false)
const isRemoteMuted = ref(false)
const localAudioLevel = ref(0)
const remoteAudioLevel = ref(0)
const callDuration = ref(0)
const errorMessage = ref<string | null>(null)
// 语音识别相关
const isTranscribing = ref(false)
const currentTranscript = ref('')
// 内部状态
let webrtcManager: WebRTCManager | null = null
let recognition: any = null // SpeechRecognition
let callTimer: number | null = null
let audioLevelTimer: number | null = null
// ==================== 计算属性 ====================
const isCallActive = computed(() =>
['requesting', 'ringing', 'connecting', 'connected'].includes(callState.value)
)
const canStartCall = computed(() => callState.value === 'idle')
const canEndCall = computed(() => isCallActive.value)
// ==================== 信令 API ====================
async function sendSignal(signalType: string, payload: any) {
try {
await request.post(`/api/v1/practice/rooms/${roomCode}/signal`, {
signal_type: signalType,
payload
})
} catch (error) {
console.error('[VoiceCall] 发送信令失败:', error)
throw error
}
}
// ==================== 通话控制 ====================
/**
* 发起语音通话
*/
async function startCall() {
if (!canStartCall.value) {
console.warn('[VoiceCall] 无法发起通话,当前状态:', callState.value)
return
}
try {
callState.value = 'requesting'
errorMessage.value = null
// 创建 WebRTC 管理器
webrtcManager = createWebRTCManager({
onConnectionStateChange: handleConnectionStateChange,
onIceCandidate: handleIceCandidate,
onRemoteStream: handleRemoteStream,
onError: handleError
})
// 创建 Offer
const offer = await webrtcManager.createOffer()
// 发送开始信令
await sendSignal('voice_start', {})
// 发送 Offer
await sendSignal('voice_offer', {
type: offer.type,
sdp: offer.sdp
})
callState.value = 'ringing'
ElMessage.info('正在呼叫对方...')
} catch (error: any) {
console.error('[VoiceCall] 发起通话失败:', error)
errorMessage.value = error.message || '发起通话失败'
callState.value = 'idle'
webrtcManager?.close()
webrtcManager = null
ElMessage.error(errorMessage.value)
}
}
/**
* 接听语音通话
*/
async function answerCall(offer: RTCSessionDescriptionInit) {
try {
callState.value = 'connecting'
errorMessage.value = null
// 创建 WebRTC 管理器
webrtcManager = createWebRTCManager({
onConnectionStateChange: handleConnectionStateChange,
onIceCandidate: handleIceCandidate,
onRemoteStream: handleRemoteStream,
onError: handleError
})
// 处理 Offer 并创建 Answer
const answer = await webrtcManager.handleOffer(offer)
// 发送 Answer
await sendSignal('voice_answer', {
type: answer.type,
sdp: answer.sdp
})
ElMessage.success('已接听通话')
} catch (error: any) {
console.error('[VoiceCall] 接听通话失败:', error)
errorMessage.value = error.message || '接听通话失败'
callState.value = 'idle'
webrtcManager?.close()
webrtcManager = null
ElMessage.error(errorMessage.value)
}
}
/**
* 拒绝来电
*/
async function rejectCall() {
try {
await sendSignal('voice_end', { reason: 'rejected' })
callState.value = 'idle'
} catch (error) {
console.error('[VoiceCall] 拒绝通话失败:', error)
}
}
/**
* 结束通话
*/
async function endCall() {
try {
await sendSignal('voice_end', { reason: 'ended' })
} catch (error) {
console.error('[VoiceCall] 发送结束信令失败:', error)
}
cleanup()
callState.value = 'ended'
// 延迟恢复到 idle 状态
setTimeout(() => {
callState.value = 'idle'
}, 1000)
}
/**
* 切换静音
*/
function toggleMute() {
if (webrtcManager) {
isMuted.value = !isMuted.value
webrtcManager.setMuted(isMuted.value)
}
}
// ==================== 信令处理 ====================
/**
* 处理接收到的信令消息
*/
async function handleSignal(signalType: string, payload: any, fromUserId: number) {
console.log('[VoiceCall] 收到信令:', signalType)
switch (signalType) {
case 'voice_start':
// 收到通话请求
if (callState.value === 'idle') {
callState.value = 'ringing'
ElMessage.info('收到语音通话请求')
}
break
case 'voice_offer':
// 收到 Offer自动接听
if (callState.value === 'ringing' || callState.value === 'idle') {
await answerCall({
type: payload.type,
sdp: payload.sdp
})
}
break
case 'voice_answer':
// 收到 Answer
if (webrtcManager && callState.value === 'ringing') {
await webrtcManager.handleAnswer({
type: payload.type,
sdp: payload.sdp
})
}
break
case 'ice_candidate':
// 收到 ICE 候选
if (webrtcManager && payload.candidate) {
await webrtcManager.addIceCandidate(payload)
}
break
case 'voice_end':
// 对方结束通话
cleanup()
callState.value = 'ended'
ElMessage.info('通话已结束')
setTimeout(() => {
callState.value = 'idle'
}, 1000)
break
}
}
// ==================== WebRTC 回调 ====================
function handleConnectionStateChange(state: ConnectionState) {
connectionState.value = state
if (state === 'connected') {
callState.value = 'connected'
startCallTimer()
startAudioLevelMonitor()
startSpeechRecognition()
ElMessage.success('语音通话已连接')
} else if (state === 'failed' || state === 'disconnected') {
if (callState.value === 'connected') {
ElMessage.warning('通话连接断开')
}
}
}
async function handleIceCandidate(candidate: RTCIceCandidate) {
try {
await sendSignal('ice_candidate', candidate.toJSON())
} catch (error) {
console.error('[VoiceCall] 发送 ICE 候选失败:', error)
}
}
function handleRemoteStream(stream: MediaStream) {
console.log('[VoiceCall] 收到远程音频流')
// 播放远程音频
const audio = new Audio()
audio.srcObject = stream
audio.play().catch(e => console.error('[VoiceCall] 播放远程音频失败:', e))
}
function handleError(error: Error) {
console.error('[VoiceCall] WebRTC 错误:', error)
errorMessage.value = error.message
}
// ==================== 语音识别 ====================
function startSpeechRecognition() {
// 检查浏览器支持
const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition
if (!SpeechRecognition) {
console.warn('[VoiceCall] 浏览器不支持语音识别')
return
}
recognition = new SpeechRecognition()
recognition.continuous = true
recognition.interimResults = true
recognition.lang = 'zh-CN'
recognition.onstart = () => {
isTranscribing.value = true
console.log('[VoiceCall] 语音识别已启动')
}
recognition.onresult = (event: any) => {
let interimTranscript = ''
let finalTranscript = ''
for (let i = event.resultIndex; i < event.results.length; i++) {
const transcript = event.results[i][0].transcript
if (event.results[i].isFinal) {
finalTranscript += transcript
} else {
interimTranscript += transcript
}
}
currentTranscript.value = interimTranscript || finalTranscript
if (finalTranscript) {
onTranscript?.(finalTranscript, true)
} else if (interimTranscript) {
onTranscript?.(interimTranscript, false)
}
}
recognition.onerror = (event: any) => {
console.error('[VoiceCall] 语音识别错误:', event.error)
if (event.error !== 'no-speech') {
isTranscribing.value = false
}
}
recognition.onend = () => {
// 如果通话还在进行,重新启动识别
if (callState.value === 'connected' && !isMuted.value) {
recognition.start()
} else {
isTranscribing.value = false
}
}
try {
recognition.start()
} catch (error) {
console.error('[VoiceCall] 启动语音识别失败:', error)
}
}
function stopSpeechRecognition() {
if (recognition) {
recognition.stop()
recognition = null
}
isTranscribing.value = false
}
// ==================== 辅助功能 ====================
function startCallTimer() {
callDuration.value = 0
callTimer = window.setInterval(() => {
callDuration.value++
}, 1000)
}
function stopCallTimer() {
if (callTimer) {
clearInterval(callTimer)
callTimer = null
}
}
function startAudioLevelMonitor() {
audioLevelTimer = window.setInterval(async () => {
if (webrtcManager) {
const localStream = webrtcManager.getLocalStream()
const remoteStream = webrtcManager.getRemoteStream()
if (localStream) {
localAudioLevel.value = await webrtcManager.getAudioLevel(localStream)
}
if (remoteStream) {
remoteAudioLevel.value = await webrtcManager.getAudioLevel(remoteStream)
}
}
}, 100)
}
function stopAudioLevelMonitor() {
if (audioLevelTimer) {
clearInterval(audioLevelTimer)
audioLevelTimer = null
}
}
function formatDuration(seconds: number): string {
const mins = Math.floor(seconds / 60)
const secs = seconds % 60
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}
// ==================== 清理 ====================
function cleanup() {
stopCallTimer()
stopAudioLevelMonitor()
stopSpeechRecognition()
webrtcManager?.close()
webrtcManager = null
isMuted.value = false
isRemoteMuted.value = false
localAudioLevel.value = 0
remoteAudioLevel.value = 0
currentTranscript.value = ''
}
// 组件卸载时清理
onUnmounted(() => {
if (isCallActive.value) {
endCall()
}
cleanup()
})
// ==================== 返回 ====================
return {
// 状态
callState,
connectionState,
isMuted,
isRemoteMuted,
localAudioLevel,
remoteAudioLevel,
callDuration,
errorMessage,
isTranscribing,
currentTranscript,
// 计算属性
isCallActive,
canStartCall,
canEndCall,
// 方法
startCall,
answerCall,
rejectCall,
endCall,
toggleMute,
handleSignal,
formatDuration
}
}