feat: 添加双人对练语音通话功能
Some checks failed
continuous-integration/drone/push Build is failing

- 后端:扩展 SSE 支持 WebRTC 信令消息转发
- 前端:创建 WebRTC 连接管理模块 (webrtc.ts)
- 前端:创建 useVoiceCall 组合式函数
- 前端:在对练房间添加语音通话 UI
- 集成 Web Speech API 实现语音转文字
This commit is contained in:
yuliang_guo
2026-01-28 15:45:47 +08:00
parent c27ad55e95
commit c5d460b413
6 changed files with 1254 additions and 19 deletions

View File

@@ -48,6 +48,13 @@ class JoinRoomRequest(BaseModel):
class SendMessageRequest(BaseModel):
"""发送消息请求"""
content: str = Field(..., description="消息内容")
source: Optional[str] = Field("text", description="消息来源: text/voice")
class WebRTCSignalRequest(BaseModel):
"""WebRTC 信令请求"""
signal_type: str = Field(..., description="信令类型: voice_offer/voice_answer/ice_candidate/voice_start/voice_end")
payload: dict = Field(..., description="信令数据SDP/ICE候选等")
class RoomResponse(BaseModel):
@@ -399,6 +406,60 @@ async def send_message(
}
@router.post("/{room_code}/signal", summary="发送WebRTC信令")
async def send_signal(
room_code: str,
request: WebRTCSignalRequest,
db: AsyncSession = Depends(get_db),
current_user: User = Depends(get_current_user)
):
"""
发送 WebRTC 信令消息
信令类型:
- voice_start: 发起语音通话
- voice_offer: SDP Offer
- voice_answer: SDP Answer
- ice_candidate: ICE 候选
- voice_end: 结束语音通话
"""
service = PracticeRoomService(db)
# 获取房间
room = await service.get_room_by_code(room_code.upper())
if not room:
raise HTTPException(status_code=404, detail="房间不存在")
# 检查用户是否在房间中
user_role = room.get_user_role(current_user.id)
if not user_role:
raise HTTPException(status_code=403, detail="您不是房间参与者")
# 验证信令类型
valid_signal_types = ["voice_start", "voice_offer", "voice_answer", "ice_candidate", "voice_end"]
if request.signal_type not in valid_signal_types:
raise HTTPException(status_code=400, detail=f"无效的信令类型,必须是: {', '.join(valid_signal_types)}")
# 发送信令消息(作为系统消息存储,用于 SSE 推送)
message = await service.send_message(
room_id=room.id,
user_id=current_user.id,
content=None, # 信令消息不需要文本内容
role_name=None,
message_type=request.signal_type,
extra_data=request.payload
)
return {
"code": 200,
"message": "信令发送成功",
"data": {
"signal_type": request.signal_type,
"sequence": message.sequence
}
}
@router.get("/{room_code}/messages", summary="获取消息列表")
async def get_messages(
room_code: str,

View File

@@ -271,36 +271,48 @@ class PracticeRoomService:
self,
room_id: int,
user_id: int,
content: str,
role_name: Optional[str] = None
content: Optional[str],
role_name: Optional[str] = None,
message_type: Optional[str] = None,
extra_data: Optional[dict] = None
) -> PracticeRoomMessage:
"""
发送聊天消息
发送聊天消息或信令消息
Args:
room_id: 房间ID
user_id: 发送者ID
content: 消息内容
role_name: 角色名称
message_type: 消息类型(默认为 chat
extra_data: 额外数据(用于 WebRTC 信令等)
Returns:
PracticeRoomMessage: 消息对象
"""
import json
# 获取当前消息序号
sequence = await self._get_next_sequence(room_id)
# 如果是信令消息,将 extra_data 序列化到 content 中
actual_content = content
if extra_data and not content:
actual_content = json.dumps(extra_data)
message = PracticeRoomMessage(
room_id=room_id,
user_id=user_id,
message_type=self.MSG_TYPE_CHAT,
content=content,
message_type=message_type or self.MSG_TYPE_CHAT,
content=actual_content,
role_name=role_name,
sequence=sequence
)
self.db.add(message)
# 更新房间统计
# 只有聊天消息才更新房间统计
if (message_type or self.MSG_TYPE_CHAT) == self.MSG_TYPE_CHAT:
room = await self.get_room_by_id(room_id)
if room:
room.total_turns += 1

View File

@@ -76,17 +76,35 @@ export interface RoomDetailResponse {
is_host: boolean
}
export type MessageType =
| 'chat'
| 'system'
| 'join'
| 'leave'
| 'start'
| 'end'
| 'voice_start'
| 'voice_offer'
| 'voice_answer'
| 'ice_candidate'
| 'voice_end'
export interface RoomMessage {
id: number
room_id: number
user_id?: number
message_type: 'chat' | 'system' | 'join' | 'leave' | 'start' | 'end'
message_type: MessageType
content?: string
role_name?: string
sequence: number
created_at: string
}
export interface WebRTCSignalRequest {
signal_type: 'voice_start' | 'voice_offer' | 'voice_answer' | 'ice_candidate' | 'voice_end'
payload: Record<string, any>
}
export interface MessagesResponse {
messages: RoomMessage[]
room_status: string
@@ -185,3 +203,20 @@ export function generateShareLink(roomCode: string): string {
const baseUrl = window.location.origin
return `${baseUrl}/trainee/duo-practice/join/${roomCode}`
}
/**
* 发送 WebRTC 信令
*/
export function sendSignal(roomCode: string, signalType: string, payload: Record<string, any>) {
return request.post(`/api/v1/practice/rooms/${roomCode}/signal`, {
signal_type: signalType,
payload
})
}
/**
* 获取对练报告
*/
export function getPracticeReport(roomCode: string) {
return request.get(`/api/v1/practice/rooms/${roomCode}/report`)
}

View File

@@ -0,0 +1,462 @@
/**
* 语音通话组合式函数
*
* 功能:
* - 整合 WebRTC 管理和信令服务
* - 管理通话状态
* - 处理语音转文字
*/
import { ref, computed, onUnmounted } from 'vue'
import { ElMessage } from 'element-plus'
import { WebRTCManager, createWebRTCManager, type ConnectionState } from '@/utils/webrtc'
import request from '@/api/request'
export type VoiceCallState = 'idle' | 'requesting' | 'ringing' | 'connecting' | 'connected' | 'ended'
export interface UseVoiceCallOptions {
roomCode: string
onTranscript?: (text: string, isFinal: boolean) => void
onRemoteTranscript?: (text: string) => void
}
export function useVoiceCall(options: UseVoiceCallOptions) {
const { roomCode, onTranscript, onRemoteTranscript } = options
// ==================== 状态 ====================
const callState = ref<VoiceCallState>('idle')
const connectionState = ref<ConnectionState>('idle')
const isMuted = ref(false)
const isRemoteMuted = ref(false)
const localAudioLevel = ref(0)
const remoteAudioLevel = ref(0)
const callDuration = ref(0)
const errorMessage = ref<string | null>(null)
// 语音识别相关
const isTranscribing = ref(false)
const currentTranscript = ref('')
// 内部状态
let webrtcManager: WebRTCManager | null = null
let recognition: any = null // SpeechRecognition
let callTimer: number | null = null
let audioLevelTimer: number | null = null
// ==================== 计算属性 ====================
const isCallActive = computed(() =>
['requesting', 'ringing', 'connecting', 'connected'].includes(callState.value)
)
const canStartCall = computed(() => callState.value === 'idle')
const canEndCall = computed(() => isCallActive.value)
// ==================== 信令 API ====================
async function sendSignal(signalType: string, payload: any) {
try {
await request.post(`/api/v1/practice/rooms/${roomCode}/signal`, {
signal_type: signalType,
payload
})
} catch (error) {
console.error('[VoiceCall] 发送信令失败:', error)
throw error
}
}
// ==================== 通话控制 ====================
/**
* 发起语音通话
*/
async function startCall() {
if (!canStartCall.value) {
console.warn('[VoiceCall] 无法发起通话,当前状态:', callState.value)
return
}
try {
callState.value = 'requesting'
errorMessage.value = null
// 创建 WebRTC 管理器
webrtcManager = createWebRTCManager({
onConnectionStateChange: handleConnectionStateChange,
onIceCandidate: handleIceCandidate,
onRemoteStream: handleRemoteStream,
onError: handleError
})
// 创建 Offer
const offer = await webrtcManager.createOffer()
// 发送开始信令
await sendSignal('voice_start', {})
// 发送 Offer
await sendSignal('voice_offer', {
type: offer.type,
sdp: offer.sdp
})
callState.value = 'ringing'
ElMessage.info('正在呼叫对方...')
} catch (error: any) {
console.error('[VoiceCall] 发起通话失败:', error)
errorMessage.value = error.message || '发起通话失败'
callState.value = 'idle'
webrtcManager?.close()
webrtcManager = null
ElMessage.error(errorMessage.value)
}
}
/**
* 接听语音通话
*/
async function answerCall(offer: RTCSessionDescriptionInit) {
try {
callState.value = 'connecting'
errorMessage.value = null
// 创建 WebRTC 管理器
webrtcManager = createWebRTCManager({
onConnectionStateChange: handleConnectionStateChange,
onIceCandidate: handleIceCandidate,
onRemoteStream: handleRemoteStream,
onError: handleError
})
// 处理 Offer 并创建 Answer
const answer = await webrtcManager.handleOffer(offer)
// 发送 Answer
await sendSignal('voice_answer', {
type: answer.type,
sdp: answer.sdp
})
ElMessage.success('已接听通话')
} catch (error: any) {
console.error('[VoiceCall] 接听通话失败:', error)
errorMessage.value = error.message || '接听通话失败'
callState.value = 'idle'
webrtcManager?.close()
webrtcManager = null
ElMessage.error(errorMessage.value)
}
}
/**
* 拒绝来电
*/
async function rejectCall() {
try {
await sendSignal('voice_end', { reason: 'rejected' })
callState.value = 'idle'
} catch (error) {
console.error('[VoiceCall] 拒绝通话失败:', error)
}
}
/**
* 结束通话
*/
async function endCall() {
try {
await sendSignal('voice_end', { reason: 'ended' })
} catch (error) {
console.error('[VoiceCall] 发送结束信令失败:', error)
}
cleanup()
callState.value = 'ended'
// 延迟恢复到 idle 状态
setTimeout(() => {
callState.value = 'idle'
}, 1000)
}
/**
* 切换静音
*/
function toggleMute() {
if (webrtcManager) {
isMuted.value = !isMuted.value
webrtcManager.setMuted(isMuted.value)
}
}
// ==================== 信令处理 ====================
/**
* 处理接收到的信令消息
*/
async function handleSignal(signalType: string, payload: any, fromUserId: number) {
console.log('[VoiceCall] 收到信令:', signalType)
switch (signalType) {
case 'voice_start':
// 收到通话请求
if (callState.value === 'idle') {
callState.value = 'ringing'
ElMessage.info('收到语音通话请求')
}
break
case 'voice_offer':
// 收到 Offer自动接听
if (callState.value === 'ringing' || callState.value === 'idle') {
await answerCall({
type: payload.type,
sdp: payload.sdp
})
}
break
case 'voice_answer':
// 收到 Answer
if (webrtcManager && callState.value === 'ringing') {
await webrtcManager.handleAnswer({
type: payload.type,
sdp: payload.sdp
})
}
break
case 'ice_candidate':
// 收到 ICE 候选
if (webrtcManager && payload.candidate) {
await webrtcManager.addIceCandidate(payload)
}
break
case 'voice_end':
// 对方结束通话
cleanup()
callState.value = 'ended'
ElMessage.info('通话已结束')
setTimeout(() => {
callState.value = 'idle'
}, 1000)
break
}
}
// ==================== WebRTC 回调 ====================
function handleConnectionStateChange(state: ConnectionState) {
connectionState.value = state
if (state === 'connected') {
callState.value = 'connected'
startCallTimer()
startAudioLevelMonitor()
startSpeechRecognition()
ElMessage.success('语音通话已连接')
} else if (state === 'failed' || state === 'disconnected') {
if (callState.value === 'connected') {
ElMessage.warning('通话连接断开')
}
}
}
async function handleIceCandidate(candidate: RTCIceCandidate) {
try {
await sendSignal('ice_candidate', candidate.toJSON())
} catch (error) {
console.error('[VoiceCall] 发送 ICE 候选失败:', error)
}
}
function handleRemoteStream(stream: MediaStream) {
console.log('[VoiceCall] 收到远程音频流')
// 播放远程音频
const audio = new Audio()
audio.srcObject = stream
audio.play().catch(e => console.error('[VoiceCall] 播放远程音频失败:', e))
}
function handleError(error: Error) {
console.error('[VoiceCall] WebRTC 错误:', error)
errorMessage.value = error.message
}
// ==================== 语音识别 ====================
function startSpeechRecognition() {
// 检查浏览器支持
const SpeechRecognition = (window as any).SpeechRecognition || (window as any).webkitSpeechRecognition
if (!SpeechRecognition) {
console.warn('[VoiceCall] 浏览器不支持语音识别')
return
}
recognition = new SpeechRecognition()
recognition.continuous = true
recognition.interimResults = true
recognition.lang = 'zh-CN'
recognition.onstart = () => {
isTranscribing.value = true
console.log('[VoiceCall] 语音识别已启动')
}
recognition.onresult = (event: any) => {
let interimTranscript = ''
let finalTranscript = ''
for (let i = event.resultIndex; i < event.results.length; i++) {
const transcript = event.results[i][0].transcript
if (event.results[i].isFinal) {
finalTranscript += transcript
} else {
interimTranscript += transcript
}
}
currentTranscript.value = interimTranscript || finalTranscript
if (finalTranscript) {
onTranscript?.(finalTranscript, true)
} else if (interimTranscript) {
onTranscript?.(interimTranscript, false)
}
}
recognition.onerror = (event: any) => {
console.error('[VoiceCall] 语音识别错误:', event.error)
if (event.error !== 'no-speech') {
isTranscribing.value = false
}
}
recognition.onend = () => {
// 如果通话还在进行,重新启动识别
if (callState.value === 'connected' && !isMuted.value) {
recognition.start()
} else {
isTranscribing.value = false
}
}
try {
recognition.start()
} catch (error) {
console.error('[VoiceCall] 启动语音识别失败:', error)
}
}
function stopSpeechRecognition() {
if (recognition) {
recognition.stop()
recognition = null
}
isTranscribing.value = false
}
// ==================== 辅助功能 ====================
function startCallTimer() {
callDuration.value = 0
callTimer = window.setInterval(() => {
callDuration.value++
}, 1000)
}
function stopCallTimer() {
if (callTimer) {
clearInterval(callTimer)
callTimer = null
}
}
function startAudioLevelMonitor() {
audioLevelTimer = window.setInterval(async () => {
if (webrtcManager) {
const localStream = webrtcManager.getLocalStream()
const remoteStream = webrtcManager.getRemoteStream()
if (localStream) {
localAudioLevel.value = await webrtcManager.getAudioLevel(localStream)
}
if (remoteStream) {
remoteAudioLevel.value = await webrtcManager.getAudioLevel(remoteStream)
}
}
}, 100)
}
function stopAudioLevelMonitor() {
if (audioLevelTimer) {
clearInterval(audioLevelTimer)
audioLevelTimer = null
}
}
function formatDuration(seconds: number): string {
const mins = Math.floor(seconds / 60)
const secs = seconds % 60
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`
}
// ==================== 清理 ====================
function cleanup() {
stopCallTimer()
stopAudioLevelMonitor()
stopSpeechRecognition()
webrtcManager?.close()
webrtcManager = null
isMuted.value = false
isRemoteMuted.value = false
localAudioLevel.value = 0
remoteAudioLevel.value = 0
currentTranscript.value = ''
}
// 组件卸载时清理
onUnmounted(() => {
if (isCallActive.value) {
endCall()
}
cleanup()
})
// ==================== 返回 ====================
return {
// 状态
callState,
connectionState,
isMuted,
isRemoteMuted,
localAudioLevel,
remoteAudioLevel,
callDuration,
errorMessage,
isTranscribing,
currentTranscript,
// 计算属性
isCallActive,
canStartCall,
canEndCall,
// 方法
startCall,
answerCall,
rejectCall,
endCall,
toggleMute,
handleSignal,
formatDuration
}
}

View File

@@ -0,0 +1,324 @@
/**
* WebRTC 连接管理模块
*
* 功能:
* - 管理 RTCPeerConnection 生命周期
* - 处理 SDP 交换
* - 处理 ICE 候选收集
* - 音频流管理
*/
export type ConnectionState = 'idle' | 'connecting' | 'connected' | 'disconnected' | 'failed'
export interface WebRTCConfig {
iceServers?: RTCIceServer[]
onLocalStream?: (stream: MediaStream) => void
onRemoteStream?: (stream: MediaStream) => void
onConnectionStateChange?: (state: ConnectionState) => void
onIceCandidate?: (candidate: RTCIceCandidate) => void
onError?: (error: Error) => void
}
// 默认 ICE 服务器配置
const DEFAULT_ICE_SERVERS: RTCIceServer[] = [
{ urls: 'stun:stun.l.google.com:19302' },
{ urls: 'stun:stun1.l.google.com:19302' },
{ urls: 'stun:stun2.l.google.com:19302' }
]
export class WebRTCManager {
private peerConnection: RTCPeerConnection | null = null
private localStream: MediaStream | null = null
private remoteStream: MediaStream | null = null
private config: WebRTCConfig
private connectionState: ConnectionState = 'idle'
private pendingIceCandidates: RTCIceCandidate[] = []
constructor(config: WebRTCConfig = {}) {
this.config = {
iceServers: DEFAULT_ICE_SERVERS,
...config
}
}
/**
* 获取当前连接状态
*/
getConnectionState(): ConnectionState {
return this.connectionState
}
/**
* 获取本地音频流
*/
getLocalStream(): MediaStream | null {
return this.localStream
}
/**
* 获取远程音频流
*/
getRemoteStream(): MediaStream | null {
return this.remoteStream
}
/**
* 初始化本地音频流
*/
async initLocalStream(): Promise<MediaStream> {
try {
this.localStream = await navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true
},
video: false
})
this.config.onLocalStream?.(this.localStream)
return this.localStream
} catch (error) {
const err = error instanceof Error ? error : new Error('获取麦克风权限失败')
this.config.onError?.(err)
throw err
}
}
/**
* 创建 PeerConnection
*/
private createPeerConnection(): RTCPeerConnection {
const pc = new RTCPeerConnection({
iceServers: this.config.iceServers
})
// 监听 ICE 候选
pc.onicecandidate = (event) => {
if (event.candidate) {
console.log('[WebRTC] ICE candidate:', event.candidate.candidate?.substring(0, 50))
this.config.onIceCandidate?.(event.candidate)
}
}
// 监听连接状态变化
pc.onconnectionstatechange = () => {
console.log('[WebRTC] Connection state:', pc.connectionState)
this.updateConnectionState(pc.connectionState)
}
// 监听 ICE 连接状态
pc.oniceconnectionstatechange = () => {
console.log('[WebRTC] ICE connection state:', pc.iceConnectionState)
if (pc.iceConnectionState === 'failed') {
this.updateConnectionState('failed')
}
}
// 监听远程流
pc.ontrack = (event) => {
console.log('[WebRTC] Remote track received')
if (event.streams && event.streams[0]) {
this.remoteStream = event.streams[0]
this.config.onRemoteStream?.(this.remoteStream)
}
}
return pc
}
/**
* 更新连接状态
*/
private updateConnectionState(state: RTCPeerConnectionState | string) {
const stateMap: Record<string, ConnectionState> = {
'new': 'connecting',
'connecting': 'connecting',
'connected': 'connected',
'disconnected': 'disconnected',
'failed': 'failed',
'closed': 'disconnected'
}
this.connectionState = stateMap[state] || 'idle'
this.config.onConnectionStateChange?.(this.connectionState)
}
/**
* 创建 Offer发起方调用
*/
async createOffer(): Promise<RTCSessionDescriptionInit> {
if (!this.localStream) {
await this.initLocalStream()
}
this.peerConnection = this.createPeerConnection()
this.updateConnectionState('connecting')
// 添加本地音频轨道
this.localStream!.getTracks().forEach(track => {
this.peerConnection!.addTrack(track, this.localStream!)
})
// 创建 Offer
const offer = await this.peerConnection.createOffer()
await this.peerConnection.setLocalDescription(offer)
console.log('[WebRTC] Offer created')
return offer
}
/**
* 处理 Offer接收方调用
*/
async handleOffer(offer: RTCSessionDescriptionInit): Promise<RTCSessionDescriptionInit> {
if (!this.localStream) {
await this.initLocalStream()
}
this.peerConnection = this.createPeerConnection()
this.updateConnectionState('connecting')
// 添加本地音频轨道
this.localStream!.getTracks().forEach(track => {
this.peerConnection!.addTrack(track, this.localStream!)
})
// 设置远程描述
await this.peerConnection.setRemoteDescription(new RTCSessionDescription(offer))
// 处理等待中的 ICE 候选
for (const candidate of this.pendingIceCandidates) {
await this.peerConnection.addIceCandidate(candidate)
}
this.pendingIceCandidates = []
// 创建 Answer
const answer = await this.peerConnection.createAnswer()
await this.peerConnection.setLocalDescription(answer)
console.log('[WebRTC] Answer created')
return answer
}
/**
* 处理 Answer发起方调用
*/
async handleAnswer(answer: RTCSessionDescriptionInit): Promise<void> {
if (!this.peerConnection) {
throw new Error('PeerConnection not initialized')
}
await this.peerConnection.setRemoteDescription(new RTCSessionDescription(answer))
// 处理等待中的 ICE 候选
for (const candidate of this.pendingIceCandidates) {
await this.peerConnection.addIceCandidate(candidate)
}
this.pendingIceCandidates = []
console.log('[WebRTC] Answer handled')
}
/**
* 添加 ICE 候选
*/
async addIceCandidate(candidate: RTCIceCandidateInit): Promise<void> {
const iceCandidate = new RTCIceCandidate(candidate)
if (this.peerConnection && this.peerConnection.remoteDescription) {
await this.peerConnection.addIceCandidate(iceCandidate)
console.log('[WebRTC] ICE candidate added')
} else {
// 如果远程描述还没设置,先缓存候选
this.pendingIceCandidates.push(iceCandidate)
console.log('[WebRTC] ICE candidate queued')
}
}
/**
* 静音/取消静音本地音频
*/
setMuted(muted: boolean): void {
if (this.localStream) {
this.localStream.getAudioTracks().forEach(track => {
track.enabled = !muted
})
}
}
/**
* 检查是否静音
*/
isMuted(): boolean {
if (this.localStream) {
const audioTrack = this.localStream.getAudioTracks()[0]
return audioTrack ? !audioTrack.enabled : true
}
return true
}
/**
* 获取音频音量级别(用于音量指示器)
*/
async getAudioLevel(stream: MediaStream): Promise<number> {
return new Promise((resolve) => {
const audioContext = new AudioContext()
const analyser = audioContext.createAnalyser()
const source = audioContext.createMediaStreamSource(stream)
source.connect(analyser)
analyser.fftSize = 256
const dataArray = new Uint8Array(analyser.frequencyBinCount)
analyser.getByteFrequencyData(dataArray)
// 计算平均音量
const average = dataArray.reduce((a, b) => a + b, 0) / dataArray.length
audioContext.close()
resolve(average / 255) // 归一化到 0-1
})
}
/**
* 关闭连接
*/
close(): void {
console.log('[WebRTC] Closing connection')
// 停止本地流
if (this.localStream) {
this.localStream.getTracks().forEach(track => track.stop())
this.localStream = null
}
// 停止远程流
if (this.remoteStream) {
this.remoteStream.getTracks().forEach(track => track.stop())
this.remoteStream = null
}
// 关闭 PeerConnection
if (this.peerConnection) {
this.peerConnection.close()
this.peerConnection = null
}
this.pendingIceCandidates = []
this.updateConnectionState('disconnected')
}
/**
* 重置管理器
*/
reset(): void {
this.close()
this.connectionState = 'idle'
}
}
// 导出单例工厂函数
export function createWebRTCManager(config?: WebRTCConfig): WebRTCManager {
return new WebRTCManager(config)
}

View File

@@ -18,6 +18,16 @@
</div>
</div>
<div class="header-right">
<!-- 语音通话状态 -->
<div class="voice-status" v-if="voiceCall.isCallActive.value">
<el-icon class="voice-icon" :class="{ active: voiceCall.callState.value === 'connected' }">
<Microphone />
</el-icon>
<span class="call-duration" v-if="voiceCall.callState.value === 'connected'">
{{ voiceCall.formatDuration(voiceCall.callDuration.value) }}
</span>
<span class="call-status" v-else>{{ voiceCallStatusText }}</span>
</div>
<el-tag :type="statusType" size="large">{{ statusText }}</el-tag>
</div>
</div>
@@ -81,6 +91,100 @@
</el-button>
</div>
<!-- 语音通话控制 -->
<div class="voice-control" v-if="store.isPracticing">
<h4>语音通话</h4>
<!-- 未通话状态 -->
<div class="voice-idle" v-if="voiceCall.callState.value === 'idle'">
<el-button
type="success"
size="large"
round
@click="voiceCall.startCall"
>
<el-icon><Microphone /></el-icon>
发起语音
</el-button>
<p class="voice-hint">点击发起语音通话实时对话</p>
</div>
<!-- 呼叫中状态 -->
<div class="voice-ringing" v-else-if="voiceCall.callState.value === 'ringing'">
<div class="ringing-animation">
<el-icon :size="48" class="pulse"><Microphone /></el-icon>
</div>
<p>正在呼叫对方...</p>
<el-button type="danger" round @click="voiceCall.endCall">取消</el-button>
</div>
<!-- 来电状态 -->
<div class="voice-incoming" v-else-if="voiceCall.callState.value === 'ringing' && !isCallInitiator">
<div class="incoming-animation">
<el-icon :size="48" class="shake"><PhoneFilled /></el-icon>
</div>
<p>对方发起语音通话</p>
<div class="incoming-actions">
<el-button type="success" circle size="large" @click="voiceCall.answerCall">
<el-icon><Microphone /></el-icon>
</el-button>
<el-button type="danger" circle size="large" @click="voiceCall.rejectCall">
<el-icon><Close /></el-icon>
</el-button>
</div>
</div>
<!-- 连接中状态 -->
<div class="voice-connecting" v-else-if="voiceCall.callState.value === 'connecting'">
<el-icon :size="32" class="spin"><Loading /></el-icon>
<p>正在连接...</p>
</div>
<!-- 通话中状态 -->
<div class="voice-connected" v-else-if="voiceCall.callState.value === 'connected'">
<div class="call-info">
<div class="call-timer">{{ voiceCall.formatDuration(voiceCall.callDuration.value) }}</div>
<div class="audio-levels">
<div class="level-item">
<span></span>
<div class="level-bar">
<div class="level-fill" :style="{ width: (voiceCall.localAudioLevel.value * 100) + '%' }"></div>
</div>
</div>
<div class="level-item">
<span>对方</span>
<div class="level-bar">
<div class="level-fill remote" :style="{ width: (voiceCall.remoteAudioLevel.value * 100) + '%' }"></div>
</div>
</div>
</div>
</div>
<!-- 实时转写 -->
<div class="transcription" v-if="voiceCall.currentTranscript.value">
<el-icon><Edit /></el-icon>
<span>{{ voiceCall.currentTranscript.value }}</span>
</div>
<div class="call-controls">
<el-button
:type="voiceCall.isMuted.value ? 'danger' : 'default'"
circle
size="large"
@click="voiceCall.toggleMute"
>
<el-icon>
<MuteNotification v-if="voiceCall.isMuted.value" />
<Microphone v-else />
</el-icon>
</el-button>
<el-button type="danger" circle size="large" @click="voiceCall.endCall">
<el-icon><Close /></el-icon>
</el-button>
</div>
</div>
</div>
<!-- 操作按钮 -->
<div class="action-buttons">
<template v-if="store.isHost && store.isReady">
@@ -170,14 +274,46 @@
import { ref, computed, onMounted, onUnmounted, watch, nextTick } from 'vue'
import { useRoute, useRouter } from 'vue-router'
import { ElMessageBox } from 'element-plus'
import { ArrowLeft, CopyDocument, Plus, Share } from '@element-plus/icons-vue'
import {
ArrowLeft, CopyDocument, Plus, Share, Microphone,
Close, Loading, MuteNotification, Edit, PhoneFilled
} from '@element-plus/icons-vue'
import { useDuoPracticeStore } from '@/stores/duoPracticeStore'
import { useVoiceCall } from '@/composables/useVoiceCall'
import type { RoomMessage } from '@/api/duoPractice'
const route = useRoute()
const router = useRouter()
const store = useDuoPracticeStore()
const messagesContainer = ref<HTMLElement | null>(null)
const isCallInitiator = ref(false)
// 语音通话
const voiceCall = useVoiceCall({
roomCode: computed(() => store.roomCode).value,
onTranscript: handleTranscript
})
// 语音通话状态文本
const voiceCallStatusText = computed(() => {
const statusMap: Record<string, string> = {
'idle': '未连接',
'requesting': '请求中...',
'ringing': '呼叫中...',
'connecting': '连接中...',
'connected': '通话中',
'ended': '已结束'
}
return statusMap[voiceCall.callState.value] || ''
})
// 处理语音转写
function handleTranscript(text: string, isFinal: boolean) {
if (isFinal && text.trim()) {
// 将转写的文字作为消息发送
store.sendMessage(text)
}
}
// 计算属性
const statusType = computed(() => {
@@ -277,10 +413,28 @@ const scrollToBottom = () => {
})
}
// 监听消息变化,自动滚动到底部
watch(() => store.messages.length, () => {
// 监听消息变化,处理信令消息并自动滚动
watch(() => store.messages, (messages) => {
// 处理新消息
const latestMsg = messages[messages.length - 1]
if (latestMsg) {
// 检查是否是信令消息
const signalTypes = ['voice_start', 'voice_offer', 'voice_answer', 'ice_candidate', 'voice_end']
if (signalTypes.includes(latestMsg.message_type)) {
// 不处理自己发的信令
const myUserId = store.isHost ? store.hostUser?.id : store.guestUser?.id
if (latestMsg.user_id !== myUserId) {
try {
const payload = latestMsg.content ? JSON.parse(latestMsg.content) : {}
voiceCall.handleSignal(latestMsg.message_type, payload, latestMsg.user_id || 0)
} catch (e) {
console.error('[DuoPracticeRoom] 解析信令失败:', e)
}
}
}
}
scrollToBottom()
})
}, { deep: true })
// 初始化
onMounted(async () => {
@@ -296,6 +450,10 @@ onMounted(async () => {
onUnmounted(() => {
store.stopPolling()
// 结束语音通话
if (voiceCall.isCallActive.value) {
voiceCall.endCall()
}
})
</script>
@@ -340,6 +498,41 @@ onUnmounted(() => {
}
}
}
.header-right {
display: flex;
align-items: center;
gap: 16px;
.voice-status {
display: flex;
align-items: center;
gap: 8px;
padding: 6px 12px;
background: #f0f9eb;
border-radius: 20px;
.voice-icon {
color: #67c23a;
&.active {
animation: pulse 1.5s infinite;
}
}
.call-duration {
font-family: monospace;
font-size: 14px;
font-weight: 500;
color: #67c23a;
}
.call-status {
font-size: 13px;
color: #67c23a;
}
}
}
}
.room-body {
@@ -442,6 +635,137 @@ onUnmounted(() => {
}
}
.voice-control {
margin-top: 24px;
padding: 20px;
background: #f8f9fc;
border-radius: 12px;
h4 {
margin: 0 0 16px 0;
font-size: 14px;
font-weight: 600;
color: #333;
}
.voice-idle {
text-align: center;
.voice-hint {
margin-top: 12px;
font-size: 12px;
color: #999;
}
}
.voice-ringing, .voice-incoming, .voice-connecting {
text-align: center;
p {
margin: 12px 0;
color: #666;
}
.ringing-animation, .incoming-animation {
.el-icon {
color: #67c23a;
}
.pulse {
animation: pulse 1.5s infinite;
}
.shake {
animation: shake 0.5s infinite;
}
}
.incoming-actions {
display: flex;
justify-content: center;
gap: 24px;
margin-top: 16px;
}
}
.voice-connecting {
.spin {
animation: spin 1s linear infinite;
color: #409eff;
}
}
.voice-connected {
.call-info {
.call-timer {
text-align: center;
font-size: 24px;
font-weight: 600;
font-family: monospace;
color: #67c23a;
margin-bottom: 16px;
}
.audio-levels {
.level-item {
display: flex;
align-items: center;
gap: 8px;
margin-bottom: 8px;
span {
width: 40px;
font-size: 12px;
color: #666;
}
.level-bar {
flex: 1;
height: 6px;
background: #e0e0e0;
border-radius: 3px;
overflow: hidden;
.level-fill {
height: 100%;
background: linear-gradient(90deg, #67c23a, #95d475);
transition: width 0.1s ease;
&.remote {
background: linear-gradient(90deg, #409eff, #79bbff);
}
}
}
}
}
}
.transcription {
display: flex;
align-items: flex-start;
gap: 8px;
padding: 12px;
background: #fff;
border-radius: 8px;
margin: 16px 0;
font-size: 13px;
color: #666;
.el-icon {
color: #409eff;
margin-top: 2px;
}
}
.call-controls {
display: flex;
justify-content: center;
gap: 24px;
margin-top: 16px;
}
}
}
.action-buttons {
margin-top: auto;
display: flex;
@@ -450,6 +774,23 @@ onUnmounted(() => {
}
}
// 动画
@keyframes pulse {
0%, 100% { transform: scale(1); opacity: 1; }
50% { transform: scale(1.1); opacity: 0.8; }
}
@keyframes shake {
0%, 100% { transform: translateX(0); }
25% { transform: translateX(-5px); }
75% { transform: translateX(5px); }
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
}
.chat-panel {
flex: 1;
display: flex;