998 lines
41 KiB
JavaScript
998 lines
41 KiB
JavaScript
console.log('voice.js loaded');
|
|
|
|
class VoiceChannel {
|
|
constructor(ws, settings) {
|
|
// ws is ignored now as we use PHP signaling, but kept for compatibility
|
|
this.settings = settings || {
|
|
mode: 'vox',
|
|
pttKey: 'v',
|
|
voxThreshold: 0.1,
|
|
inputDevice: 'default',
|
|
outputDevice: 'default',
|
|
inputVolume: 1.0,
|
|
outputVolume: 1.0,
|
|
echoCancellation: true,
|
|
noiseSuppression: true
|
|
};
|
|
console.log('VoiceChannel constructor called with settings:', this.settings);
|
|
this.localStream = null;
|
|
this.analysisStream = null;
|
|
this.peers = {}; // userId -> RTCPeerConnection
|
|
this.participants = {}; // userId -> {name}
|
|
this.currentChannelId = null;
|
|
this.myPeerId = null;
|
|
this.pollInterval = null;
|
|
this.canSpeak = true;
|
|
this.remoteAudios = {}; // userId -> Audio element
|
|
this.isSelfMuted = false;
|
|
this.isDeafened = false;
|
|
|
|
this.peerStates = {}; // userId -> { makingOffer, ignoreOffer }
|
|
|
|
this.whisperSettings = []; // from DB
|
|
this.whisperPeers = new Set(); // active whisper target peer_ids
|
|
this.isWhispering = false;
|
|
this.whisperListeners = [];
|
|
|
|
this.audioContext = null;
|
|
this.analyser = null;
|
|
this.microphone = null;
|
|
this.scriptProcessor = null;
|
|
this.inputGain = null;
|
|
|
|
this.isTalking = false;
|
|
this.pttPressed = false;
|
|
this.voxActive = false;
|
|
this.lastVoiceTime = 0;
|
|
this.voxHoldTime = 500;
|
|
|
|
// Track who is speaking to persist across UI refreshes
|
|
this.speakingUsers = new Set();
|
|
|
|
this.setupPTTListeners();
|
|
this.loadWhisperSettings();
|
|
window.addEventListener('beforeunload', () => {
|
|
// We don't want to leave on page refresh if we want persistence
|
|
// but we might want to tell the server we are "still here" soon.
|
|
// Actually, for a simple refresh, we just let the session timeout or re-join.
|
|
});
|
|
|
|
// Auto-rejoin if we were in a channel
|
|
setTimeout(() => {
|
|
const savedChannelId = sessionStorage.getItem('activeVoiceChannel');
|
|
const savedPeerId = sessionStorage.getItem('activeVoicePeerId');
|
|
if (savedChannelId) {
|
|
console.log('Auto-rejoining voice channel:', savedChannelId);
|
|
if (savedPeerId) this.myPeerId = savedPeerId;
|
|
this.join(savedChannelId, true); // Pass true to indicate auto-rejoin
|
|
}
|
|
}, 200);
|
|
}
|
|
|
|
setupPTTListeners() {
|
|
window.addEventListener('keydown', (e) => {
|
|
// Ignore if in input field
|
|
if (e.target.tagName === 'INPUT' || e.target.tagName === 'TEXTAREA') return;
|
|
|
|
// Normal PTT
|
|
if (this.settings.mode === 'ptt') {
|
|
const isMatch = e.key.toLowerCase() === this.settings.pttKey.toLowerCase() ||
|
|
(e.code && e.code.toLowerCase() === this.settings.pttKey.toLowerCase()) ||
|
|
(this.settings.pttKey === '0' && e.code === 'Numpad0');
|
|
|
|
if (isMatch) {
|
|
if (!this.pttPressed) {
|
|
this.pttPressed = true;
|
|
this.updateMuteState();
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Whispers
|
|
this.whisperSettings.forEach(w => {
|
|
if (e.key.toLowerCase() === w.whisper_key.toLowerCase()) {
|
|
this.startWhisper(w);
|
|
}
|
|
});
|
|
});
|
|
|
|
window.addEventListener('keyup', (e) => {
|
|
if (this.settings.mode === 'ptt') {
|
|
const isMatch = e.key.toLowerCase() === this.settings.pttKey.toLowerCase() ||
|
|
(e.code && e.code.toLowerCase() === this.settings.pttKey.toLowerCase()) ||
|
|
(this.settings.pttKey === '0' && e.code === 'Numpad0');
|
|
|
|
if (isMatch) {
|
|
this.pttPressed = false;
|
|
this.updateMuteState();
|
|
return;
|
|
}
|
|
}
|
|
|
|
// Whispers
|
|
this.whisperSettings.forEach(w => {
|
|
if (e.key.toLowerCase() === w.whisper_key.toLowerCase()) {
|
|
this.stopWhisper(w);
|
|
}
|
|
});
|
|
});
|
|
}
|
|
|
|
async loadWhisperSettings() {
|
|
try {
|
|
const resp = await fetch('api_v1_voice.php?action=get_whispers');
|
|
const data = await resp.json();
|
|
if (data.success) {
|
|
this.whisperSettings = data.whispers;
|
|
console.log('VoiceChannel: Loaded whispers:', this.whisperSettings);
|
|
}
|
|
} catch (e) {
|
|
console.error('Failed to load whispers in VoiceChannel:', e);
|
|
}
|
|
}
|
|
|
|
setupWhisperListeners() {
|
|
// This is called when settings are updated in the UI
|
|
this.loadWhisperSettings();
|
|
}
|
|
|
|
async startWhisper(config) {
|
|
if (this.isWhispering) return;
|
|
console.log('Starting whisper to:', config.target_type, config.target_id);
|
|
|
|
try {
|
|
const resp = await fetch(`api_v1_voice.php?action=find_whisper_targets&target_type=${config.target_type}&target_id=${config.target_id}`);
|
|
const data = await resp.json();
|
|
|
|
if (data.success && data.targets.length > 0) {
|
|
this.isWhispering = true;
|
|
this.whisperPeers.clear();
|
|
|
|
for (const target of data.targets) {
|
|
if (target.peer_id === this.myPeerId) continue;
|
|
this.whisperPeers.add(target.peer_id);
|
|
|
|
// Establish connection if not exists
|
|
if (!this.peers[target.peer_id]) {
|
|
console.log('Establishing temporary connection for whisper to:', target.peer_id);
|
|
this.createPeerConnection(target.peer_id, true);
|
|
}
|
|
}
|
|
|
|
this.updateMuteState();
|
|
} else {
|
|
console.log('No active targets found for whisper.');
|
|
}
|
|
} catch (e) {
|
|
console.error('Whisper start error:', e);
|
|
}
|
|
}
|
|
|
|
stopWhisper(config) {
|
|
if (!this.isWhispering) return;
|
|
console.log('Stopping whisper');
|
|
this.isWhispering = false;
|
|
this.whisperPeers.clear();
|
|
this.updateMuteState();
|
|
|
|
// Optionally cleanup peers that are NOT in current channel
|
|
// For now, keep them for future whispers to avoid re-handshake
|
|
}
|
|
|
|
async join(channelId, isAutoRejoin = false) {
|
|
console.log('VoiceChannel.join process started for channel:', channelId, 'isAutoRejoin:', isAutoRejoin);
|
|
if (this.currentChannelId === channelId && !isAutoRejoin) {
|
|
console.log('Already in this channel');
|
|
return;
|
|
}
|
|
if (this.currentChannelId && this.currentChannelId != channelId) {
|
|
console.log('Leaving previous channel:', this.currentChannelId);
|
|
this.leave();
|
|
}
|
|
|
|
this.currentChannelId = channelId;
|
|
sessionStorage.setItem('activeVoiceChannel', channelId);
|
|
|
|
try {
|
|
console.log('Requesting microphone access with device:', this.settings.inputDevice);
|
|
const constraints = {
|
|
audio: {
|
|
echoCancellation: this.settings.echoCancellation,
|
|
noiseSuppression: this.settings.noiseSuppression,
|
|
autoGainControl: true
|
|
},
|
|
video: false
|
|
};
|
|
if (this.settings.inputDevice !== 'default') {
|
|
constraints.audio.deviceId = { exact: this.settings.inputDevice };
|
|
}
|
|
this.localStream = await navigator.mediaDevices.getUserMedia(constraints);
|
|
console.log('Microphone access granted');
|
|
this.setMute(false); // Join unmuted by default (self-mute off)
|
|
|
|
// Always setup VOX logic for volume meter and detection
|
|
this.setupVOX();
|
|
|
|
// Join via PHP
|
|
console.log('Calling API join...');
|
|
const url = `api_v1_voice.php?action=join&room=${channelId}&name=${encodeURIComponent(window.currentUsername || 'Unknown')}${this.myPeerId ? '&peer_id='+this.myPeerId : ''}`;
|
|
const resp = await fetch(url);
|
|
const data = await resp.json();
|
|
console.log('API join response:', data);
|
|
|
|
if (data.success) {
|
|
this.myPeerId = data.peer_id;
|
|
this.canSpeak = data.can_speak !== false;
|
|
sessionStorage.setItem('activeVoicePeerId', this.myPeerId);
|
|
console.log('Joined room with peer_id:', this.myPeerId);
|
|
|
|
// Start polling
|
|
this.startPolling();
|
|
this.updateVoiceUI();
|
|
} else {
|
|
console.error('API join failed:', data.error);
|
|
}
|
|
} catch (e) {
|
|
console.error('Failed to join voice:', e);
|
|
alert('Microphone access required for voice channels. Error: ' + e.message);
|
|
this.currentChannelId = null;
|
|
}
|
|
}
|
|
|
|
startPolling() {
|
|
if (this.pollInterval) clearInterval(this.pollInterval);
|
|
this.pollInterval = setInterval(() => this.poll(), 500);
|
|
this.poll(); // Initial poll
|
|
}
|
|
|
|
async poll() {
|
|
if (!this.myPeerId || !this.currentChannelId) return;
|
|
|
|
try {
|
|
const resp = await fetch(`api_v1_voice.php?action=poll&room=${this.currentChannelId}&peer_id=${this.myPeerId}&is_muted=${this.isSelfMuted ? 1 : 0}&is_deafened=${this.isDeafened ? 1 : 0}`);
|
|
const data = await resp.json();
|
|
|
|
if (data.success) {
|
|
this.canSpeak = data.can_speak !== false;
|
|
// Update participants
|
|
const oldPs = Object.keys(this.participants);
|
|
this.participants = data.participants;
|
|
const newPs = Object.keys(this.participants);
|
|
|
|
// If new people joined, initiate offer if I'm the "older" one (not really necessary here, can just offer to anyone I don't have a peer for)
|
|
newPs.forEach(pid => {
|
|
if (pid !== this.myPeerId && !this.peers[pid]) {
|
|
console.log('New peer found via poll:', pid);
|
|
this.createPeerConnection(pid, true);
|
|
}
|
|
});
|
|
|
|
// Cleanup left peers
|
|
oldPs.forEach(pid => {
|
|
if (!this.participants[pid] && this.peers[pid] && !this.whisperPeers.has(pid) && !this.speakingUsers.has(pid)) {
|
|
console.log('Peer left or not in channel anymore:', pid);
|
|
this.peers[pid].close();
|
|
delete this.peers[pid];
|
|
if (this.remoteAudios[pid]) {
|
|
this.remoteAudios[pid].pause();
|
|
this.remoteAudios[pid].remove();
|
|
delete this.remoteAudios[pid];
|
|
}
|
|
}
|
|
});
|
|
|
|
// Handle incoming signals
|
|
if (data.signals && data.signals.length > 0) {
|
|
for (const sig of data.signals) {
|
|
await this.handleSignaling(sig);
|
|
}
|
|
}
|
|
|
|
this.updateVoiceUI();
|
|
}
|
|
} catch (e) {
|
|
console.error('Polling error:', e);
|
|
}
|
|
}
|
|
|
|
async sendSignal(to, data) {
|
|
if (!this.myPeerId || !this.currentChannelId) return;
|
|
await fetch(`api_v1_voice.php?action=signal&room=${this.currentChannelId}&peer_id=${this.myPeerId}&to=${to}&data=${encodeURIComponent(JSON.stringify(data))}`);
|
|
}
|
|
|
|
createPeerConnection(userId, isOfferor) {
|
|
if (this.peers[userId]) return this.peers[userId];
|
|
|
|
console.log('Creating PeerConnection for:', userId, 'as offeror:', isOfferor);
|
|
|
|
if (!this.peerStates[userId]) {
|
|
this.peerStates[userId] = { makingOffer: false, ignoreOffer: false };
|
|
}
|
|
|
|
const pc = new RTCPeerConnection({
|
|
iceServers: [
|
|
{ urls: 'stun:stun.l.google.com:19302' },
|
|
{ urls: 'stun:stun1.l.google.com:19302' }
|
|
]
|
|
});
|
|
|
|
this.peers[userId] = pc;
|
|
|
|
pc.oniceconnectionstatechange = () => {
|
|
console.log(`ICE Connection State with ${userId}: ${pc.iceConnectionState}`);
|
|
if (pc.iceConnectionState === 'failed' || pc.iceConnectionState === 'disconnected') {
|
|
console.log(`ICE failure with ${userId}, attempting to restart...`);
|
|
// If it failed, we could try to renegotiate, but for now let's just wait for poll to maybe clean it up
|
|
}
|
|
};
|
|
|
|
pc.onnegotiationneeded = async () => {
|
|
try {
|
|
this.peerStates[userId].makingOffer = true;
|
|
await pc.setLocalDescription();
|
|
this.sendSignal(userId, { type: 'offer', offer: pc.localDescription });
|
|
} catch (err) {
|
|
console.error('onnegotiationneeded error:', err);
|
|
} finally {
|
|
this.peerStates[userId].makingOffer = false;
|
|
}
|
|
};
|
|
|
|
if (this.localStream) {
|
|
this.localStream.getTracks().forEach(track => {
|
|
console.log(`Adding track ${track.kind} to peer ${userId}`);
|
|
pc.addTrack(track, this.localStream);
|
|
});
|
|
}
|
|
|
|
pc.onicecandidate = (event) => {
|
|
if (event.candidate) {
|
|
this.sendSignal(userId, { type: 'ice_candidate', candidate: event.candidate });
|
|
}
|
|
};
|
|
|
|
pc.ontrack = (event) => {
|
|
console.log('Received remote track from:', userId, 'Stream count:', event.streams.length);
|
|
const stream = event.streams[0] || new MediaStream([event.track]);
|
|
|
|
// Ensure AudioContext is running
|
|
if (this.audioContext && this.audioContext.state === 'suspended') {
|
|
this.audioContext.resume();
|
|
}
|
|
|
|
if (this.remoteAudios[userId]) {
|
|
console.log('Replacing existing audio element for:', userId);
|
|
this.remoteAudios[userId].pause();
|
|
this.remoteAudios[userId].srcObject = null;
|
|
this.remoteAudios[userId].remove();
|
|
}
|
|
|
|
const remoteAudio = new Audio();
|
|
remoteAudio.autoplay = true;
|
|
remoteAudio.style.display = 'none';
|
|
remoteAudio.srcObject = stream;
|
|
remoteAudio.muted = this.isDeafened;
|
|
remoteAudio.volume = this.settings.outputVolume || 1.0;
|
|
if (this.settings.outputDevice !== 'default' && typeof remoteAudio.setSinkId === 'function') {
|
|
remoteAudio.setSinkId(this.settings.outputDevice);
|
|
}
|
|
document.body.appendChild(remoteAudio);
|
|
this.remoteAudios[userId] = remoteAudio;
|
|
|
|
console.log('Playing remote audio for:', userId);
|
|
remoteAudio.play().then(() => {
|
|
console.log('Remote audio playing successfully for:', userId);
|
|
}).catch(e => {
|
|
console.warn('Autoplay prevented or play failed for:', userId, e);
|
|
// In case of autoplay prevention, we might need a user gesture
|
|
});
|
|
};
|
|
|
|
// Manual offer if explicitly requested (though onnegotiationneeded should handle it)
|
|
if (isOfferor && pc.signalingState === 'stable') {
|
|
pc.onnegotiationneeded();
|
|
}
|
|
|
|
return pc;
|
|
}
|
|
|
|
async handleSignaling(sig) {
|
|
const from = sig.from;
|
|
const data = sig.data;
|
|
|
|
console.log('Handling signaling from:', from, 'type:', data.type);
|
|
|
|
try {
|
|
switch (data.type) {
|
|
case 'offer':
|
|
await this.handleOffer(from, data.offer);
|
|
break;
|
|
case 'answer':
|
|
await this.handleAnswer(from, data.answer);
|
|
break;
|
|
case 'ice_candidate':
|
|
await this.handleCandidate(from, data.candidate);
|
|
break;
|
|
case 'voice_speaking':
|
|
this.updateSpeakingUI(data.user_id, data.speaking, data.is_whisper);
|
|
break;
|
|
}
|
|
} catch (err) {
|
|
console.error('Signaling error:', err);
|
|
}
|
|
}
|
|
|
|
async handleOffer(from, offer) {
|
|
const pc = this.createPeerConnection(from, false);
|
|
const state = this.peerStates[from];
|
|
|
|
const offerCollision = (offer.type === "offer") &&
|
|
(state.makingOffer || pc.signalingState !== "stable");
|
|
|
|
// Politeness: higher peer_id is polite
|
|
const isPolite = this.myPeerId > from;
|
|
state.ignoreOffer = !isPolite && offerCollision;
|
|
|
|
if (state.ignoreOffer) {
|
|
console.log('Polite peer: ignoring offer from impolite peer to avoid collision', from);
|
|
return;
|
|
}
|
|
|
|
await pc.setRemoteDescription(new RTCSessionDescription(offer));
|
|
if (offer.type === "offer") {
|
|
await pc.setLocalDescription();
|
|
this.sendSignal(from, { type: 'answer', answer: pc.localDescription });
|
|
}
|
|
}
|
|
|
|
async handleAnswer(from, answer) {
|
|
const pc = this.peers[from];
|
|
if (pc) {
|
|
await pc.setRemoteDescription(new RTCSessionDescription(answer));
|
|
}
|
|
}
|
|
|
|
async handleCandidate(from, candidate) {
|
|
const pc = this.peers[from];
|
|
const state = this.peerStates[from];
|
|
try {
|
|
if (pc) {
|
|
await pc.addIceCandidate(new RTCIceCandidate(candidate));
|
|
}
|
|
} catch (err) {
|
|
if (!state || !state.ignoreOffer) {
|
|
console.warn('Failed to add ICE candidate', err);
|
|
}
|
|
}
|
|
}
|
|
|
|
setupVOX() {
|
|
if (!this.localStream) {
|
|
console.warn('Cannot setup VOX: no localStream');
|
|
return;
|
|
}
|
|
|
|
console.log('Setting up VOX logic...');
|
|
try {
|
|
if (!this.audioContext) {
|
|
this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
|
|
}
|
|
|
|
// Re-ensure context is running
|
|
if (this.audioContext.state === 'suspended') {
|
|
this.audioContext.resume().then(() => console.log('AudioContext resumed'));
|
|
}
|
|
|
|
// Cleanup old nodes
|
|
if (this.scriptProcessor) {
|
|
this.scriptProcessor.onaudioprocess = null;
|
|
try { this.scriptProcessor.disconnect(); } catch(e) {}
|
|
}
|
|
if (this.microphone) {
|
|
try { this.microphone.disconnect(); } catch(e) {}
|
|
}
|
|
|
|
this.analyser = this.audioContext.createAnalyser();
|
|
this.analyser.fftSize = 512;
|
|
|
|
// Use a cloned stream for analysis so VOX works even when localStream is muted/disabled
|
|
if (this.analysisStream) {
|
|
this.analysisStream.getTracks().forEach(t => t.stop());
|
|
}
|
|
this.analysisStream = this.localStream.clone();
|
|
this.analysisStream.getAudioTracks().forEach(t => t.enabled = true); // Ensure analysis stream is NOT muted
|
|
|
|
this.microphone = this.audioContext.createMediaStreamSource(this.analysisStream);
|
|
this.scriptProcessor = this.audioContext.createScriptProcessor(2048, 1, 1);
|
|
|
|
this.microphone.connect(this.analyser);
|
|
this.analyser.connect(this.scriptProcessor);
|
|
|
|
// Avoid feedback: connect to a gain node with 0 volume then to destination
|
|
const silence = this.audioContext.createGain();
|
|
silence.gain.value = 0;
|
|
this.scriptProcessor.connect(silence);
|
|
silence.connect(this.audioContext.destination);
|
|
|
|
this.voxActive = false;
|
|
this.currentVolume = 0;
|
|
|
|
this.scriptProcessor.onaudioprocess = () => {
|
|
const array = new Uint8Array(this.analyser.frequencyBinCount);
|
|
this.analyser.getByteFrequencyData(array);
|
|
let values = 0;
|
|
for (let i = 0; i < array.length; i++) values += array[i];
|
|
const average = values / array.length;
|
|
this.currentVolume = average / 255;
|
|
|
|
if (this.settings.mode !== 'vox') {
|
|
this.voxActive = false;
|
|
return;
|
|
}
|
|
|
|
if (this.currentVolume > this.settings.voxThreshold) {
|
|
this.lastVoiceTime = Date.now();
|
|
if (!this.voxActive) {
|
|
this.voxActive = true;
|
|
this.updateMuteState();
|
|
}
|
|
} else {
|
|
if (this.voxActive && Date.now() - this.lastVoiceTime > this.voxHoldTime) {
|
|
this.voxActive = false;
|
|
this.updateMuteState();
|
|
}
|
|
}
|
|
};
|
|
console.log('VOX logic setup complete');
|
|
} catch (e) {
|
|
console.error('Failed to setup VOX:', e);
|
|
}
|
|
}
|
|
|
|
getVolume() {
|
|
return this.currentVolume || 0;
|
|
}
|
|
|
|
updateMuteState() {
|
|
if (!this.localStream) return;
|
|
|
|
// If we are not in a channel, we can still whisper!
|
|
// But for normal talking, we need currentChannelId.
|
|
|
|
let shouldTalk = (this.settings.mode === 'ptt') ? this.pttPressed : this.voxActive;
|
|
|
|
if (this.canSpeak === false) {
|
|
shouldTalk = false;
|
|
}
|
|
|
|
// Always allow talking if whispering
|
|
if (this.isWhispering) {
|
|
shouldTalk = true;
|
|
}
|
|
|
|
console.log('updateMuteState: shouldTalk =', shouldTalk, 'isWhispering =', this.isWhispering);
|
|
if (this.isTalking !== shouldTalk || this.lastWhisperState !== this.isWhispering) {
|
|
this.isTalking = shouldTalk;
|
|
this.lastWhisperState = this.isWhispering;
|
|
|
|
this.applyAudioState();
|
|
this.updateSpeakingUI(window.currentUserId, shouldTalk, this.isWhispering);
|
|
|
|
// Notify others in current channel
|
|
const msg = {
|
|
type: 'voice_speaking',
|
|
channel_id: this.currentChannelId,
|
|
user_id: window.currentUserId,
|
|
speaking: shouldTalk,
|
|
is_whisper: this.isWhispering
|
|
};
|
|
|
|
// Send to channel peers
|
|
Object.keys(this.peers).forEach(pid => {
|
|
// If we are whispering, only send voice_speaking to whisper targets
|
|
// but actually it's better to notify channel peers that we are NOT talking to them
|
|
if (this.isWhispering) {
|
|
if (this.whisperPeers.has(pid)) {
|
|
this.sendSignal(pid, msg);
|
|
} else {
|
|
// Tell channel peers we are silent to them
|
|
this.sendSignal(pid, { ...msg, speaking: false });
|
|
}
|
|
} else {
|
|
this.sendSignal(pid, msg);
|
|
}
|
|
});
|
|
|
|
// Also notify whisper peers that are NOT in the channel
|
|
if (this.isWhispering) {
|
|
this.whisperPeers.forEach(pid => {
|
|
if (!this.peers[pid]) {
|
|
// This should have been established in startWhisper
|
|
} else {
|
|
this.sendSignal(pid, msg);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
applyAudioState() {
|
|
if (this.localStream) {
|
|
const shouldTransmit = !this.isSelfMuted && this.isTalking && (this.canSpeak || this.isWhispering);
|
|
console.log('applyAudioState: transmitting =', shouldTransmit, '(whisper=', this.isWhispering, ')');
|
|
|
|
this.localStream.getAudioTracks().forEach(track => {
|
|
track.enabled = shouldTransmit;
|
|
});
|
|
|
|
// We also need to ensure the audio only goes to the right peers
|
|
// In P2P, we do this by enabling/disabling the track in the peer connection
|
|
// or by simply enabling/disabling the local track (which affects all peers).
|
|
// To be truly private, we should only enable the track for whisper peers.
|
|
|
|
Object.entries(this.peers).forEach(([pid, pc]) => {
|
|
const sender = pc.getSenders().find(s => s.track && s.track.kind === 'audio');
|
|
if (sender) {
|
|
if (this.isWhispering) {
|
|
sender.track.enabled = this.whisperPeers.has(pid);
|
|
} else {
|
|
// Normal mode: only send to people in the current channel participants
|
|
sender.track.enabled = !!this.participants[pid];
|
|
}
|
|
}
|
|
});
|
|
}
|
|
this.updateUserPanelButtons();
|
|
}
|
|
|
|
setMute(mute) {
|
|
this.isSelfMuted = mute;
|
|
this.applyAudioState();
|
|
}
|
|
|
|
toggleMute() {
|
|
if (this.canSpeak === false) return;
|
|
this.setMute(!this.isSelfMuted);
|
|
}
|
|
|
|
toggleDeafen() {
|
|
this.isDeafened = !this.isDeafened;
|
|
console.log('Setting deafen to:', this.isDeafened);
|
|
Object.values(this.remoteAudios).forEach(audio => {
|
|
audio.muted = this.isDeafened;
|
|
if (!this.isDeafened) audio.volume = this.settings.outputVolume || 1.0;
|
|
});
|
|
// If we deafen, we usually also mute in Discord
|
|
if (this.isDeafened && !this.isSelfMuted) {
|
|
this.setMute(true);
|
|
}
|
|
this.applyAudioState();
|
|
}
|
|
|
|
setOutputVolume(vol) {
|
|
this.settings.outputVolume = parseFloat(vol);
|
|
Object.values(this.remoteAudios).forEach(audio => {
|
|
audio.volume = this.settings.outputVolume;
|
|
});
|
|
}
|
|
|
|
setInputVolume(vol) {
|
|
this.settings.inputVolume = parseFloat(vol);
|
|
}
|
|
|
|
async setInputDevice(deviceId) {
|
|
this.settings.inputDevice = deviceId;
|
|
if (this.currentChannelId && this.localStream) {
|
|
const constraints = {
|
|
audio: {
|
|
echoCancellation: this.settings.echoCancellation,
|
|
noiseSuppression: this.settings.noiseSuppression,
|
|
autoGainControl: true
|
|
},
|
|
video: false
|
|
};
|
|
if (deviceId !== 'default') {
|
|
constraints.audio.deviceId = { exact: deviceId };
|
|
}
|
|
const newStream = await navigator.mediaDevices.getUserMedia(constraints);
|
|
const newTrack = newStream.getAudioTracks()[0];
|
|
|
|
Object.values(this.peers).forEach(pc => {
|
|
const sender = pc.getSenders().find(s => s.track && s.track.kind === 'audio');
|
|
if (sender) sender.replaceTrack(newTrack);
|
|
});
|
|
|
|
this.localStream.getTracks().forEach(t => t.stop());
|
|
this.localStream = newStream;
|
|
this.setupVOX();
|
|
this.applyAudioState();
|
|
}
|
|
}
|
|
|
|
async setOutputDevice(deviceId) {
|
|
this.settings.outputDevice = deviceId;
|
|
Object.values(this.remoteAudios).forEach(audio => {
|
|
if (typeof audio.setSinkId === 'function') {
|
|
audio.setSinkId(deviceId).catch(e => console.error('setSinkId failed:', e));
|
|
}
|
|
});
|
|
}
|
|
|
|
async updateAudioConstraints() {
|
|
if (this.currentChannelId && this.localStream) {
|
|
console.log('Updating audio constraints:', this.settings.echoCancellation, this.settings.noiseSuppression);
|
|
const constraints = {
|
|
audio: {
|
|
echoCancellation: this.settings.echoCancellation,
|
|
noiseSuppression: this.settings.noiseSuppression,
|
|
autoGainControl: true
|
|
},
|
|
video: false
|
|
};
|
|
if (this.settings.inputDevice !== 'default') {
|
|
constraints.audio.deviceId = { exact: this.settings.inputDevice };
|
|
}
|
|
try {
|
|
const newStream = await navigator.mediaDevices.getUserMedia(constraints);
|
|
const newTrack = newStream.getAudioTracks()[0];
|
|
|
|
Object.values(this.peers).forEach(pc => {
|
|
const sender = pc.getSenders().find(s => s.track && s.track.kind === 'audio');
|
|
if (sender) sender.replaceTrack(newTrack);
|
|
});
|
|
|
|
this.localStream.getTracks().forEach(t => t.stop());
|
|
this.localStream = newStream;
|
|
this.setupVOX();
|
|
this.applyAudioState();
|
|
} catch (e) {
|
|
console.error('Failed to update audio constraints:', e);
|
|
}
|
|
}
|
|
}
|
|
|
|
updateUserPanelButtons() {
|
|
const btnMute = document.getElementById('btn-panel-mute');
|
|
const btnDeafen = document.getElementById('btn-panel-deafen');
|
|
|
|
let displayMuted = this.isSelfMuted;
|
|
if (this.canSpeak === false) {
|
|
displayMuted = true;
|
|
}
|
|
|
|
if (btnMute) {
|
|
btnMute.classList.toggle('active', displayMuted);
|
|
btnMute.style.color = displayMuted ? '#f23f43' : 'var(--text-muted)';
|
|
btnMute.innerHTML = displayMuted ?
|
|
'<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><line x1="1" y1="1" x2="23" y2="23"></line><path d="M9 9v3a3 3 0 0 0 5.12 2.12M15 9.34V4a3 3 0 0 0-5.94-.6"></path><path d="M17 16.95A7 7 0 0 1 5 12v-2m14 0v2a7 7 0 0 1-.11 1.23"></path><line x1="12" y1="19" x2="12" y2="23"></line><line x1="8" y1="23" x2="16" y2="23"></line></svg>' :
|
|
'<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M12 1a3 3 0 0 0-3 3v8a3 3 0 0 0 6 0V4a3 3 0 0 0-3-3z"></path><path d="M19 10v2a7 7 0 0 1-14 0v-2"></path><line x1="12" y1="19" x2="12" y2="23"></line><line x1="8" y1="23" x2="16" y2="23"></line></svg>';
|
|
|
|
if (this.canSpeak === false) {
|
|
btnMute.title = "You do not have permission to speak in this channel";
|
|
btnMute.style.opacity = '0.5';
|
|
} else {
|
|
btnMute.title = "Mute";
|
|
btnMute.style.opacity = '1';
|
|
}
|
|
}
|
|
|
|
if (btnDeafen) {
|
|
btnDeafen.classList.toggle('active', this.isDeafened);
|
|
btnDeafen.style.color = this.isDeafened ? '#f23f43' : 'var(--text-muted)';
|
|
btnDeafen.innerHTML = this.isDeafened ?
|
|
'<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><line x1="1" y1="1" x2="23" y2="23"></line><path d="M8.85 4.11A9 9 0 1 1 20 12"></path><path d="M11.64 6.64A5 5 0 1 1 15 10"></path></svg>' :
|
|
'<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M3 18v-6a9 9 0 0 1 18 0v6"></path><path d="M21 19a2 2 0 0 1-2 2h-1a2 2 0 0 1-2-2v-3a2 2 0 0 1 2-2h3zM3 19a2 2 0 0 0 2 2h1a2 2 0 0 0 2-2v-3a2 2 0 0 0-2-2H3z"></path></svg>';
|
|
}
|
|
}
|
|
|
|
leave() {
|
|
if (!this.currentChannelId) {
|
|
console.log('VoiceChannel.leave called but no active channel');
|
|
return;
|
|
}
|
|
console.log('Leaving voice channel:', this.currentChannelId, 'myPeerId:', this.myPeerId);
|
|
const cid = this.currentChannelId;
|
|
const pid = this.myPeerId;
|
|
|
|
sessionStorage.removeItem('activeVoiceChannel');
|
|
sessionStorage.removeItem('activeVoicePeerId');
|
|
if (this.pollInterval) clearInterval(this.pollInterval);
|
|
|
|
// Use keepalive for the leave fetch to ensure it reaches the server during page unload
|
|
fetch(`api_v1_voice.php?action=leave&room=${cid}&peer_id=${pid}`, { keepalive: true });
|
|
|
|
if (this.localStream) {
|
|
console.log('Stopping local stream tracks');
|
|
this.localStream.getTracks().forEach(track => {
|
|
track.stop();
|
|
console.log('Track stopped:', track.kind);
|
|
});
|
|
this.localStream = null;
|
|
}
|
|
if (this.analysisStream) {
|
|
this.analysisStream.getTracks().forEach(track => track.stop());
|
|
this.analysisStream = null;
|
|
}
|
|
|
|
if (this.scriptProcessor) {
|
|
try {
|
|
this.scriptProcessor.disconnect();
|
|
this.scriptProcessor.onaudioprocess = null;
|
|
} catch(e) {}
|
|
this.scriptProcessor = null;
|
|
}
|
|
if (this.microphone) {
|
|
try { this.microphone.disconnect(); } catch(e) {}
|
|
this.microphone = null;
|
|
}
|
|
if (this.audioContext && this.audioContext.state !== 'closed') {
|
|
// Keep AudioContext alive but suspended to reuse it
|
|
this.audioContext.suspend();
|
|
}
|
|
|
|
Object.values(this.peers).forEach(pc => pc.close());
|
|
Object.values(this.remoteAudios).forEach(audio => {
|
|
audio.pause();
|
|
audio.remove();
|
|
audio.srcObject = null;
|
|
});
|
|
this.peers = {};
|
|
this.remoteAudios = {};
|
|
this.participants = {};
|
|
this.currentChannelId = null;
|
|
this.myPeerId = null;
|
|
this.speakingUsers.clear();
|
|
|
|
// Also remove 'active' class from all voice items
|
|
document.querySelectorAll('.voice-item').forEach(el => el.classList.remove('active'));
|
|
|
|
this.updateVoiceUI();
|
|
}
|
|
|
|
updateVoiceUI() {
|
|
// We now use a global update mechanism for all channels
|
|
VoiceChannel.refreshAllVoiceUsers();
|
|
|
|
if (this.currentChannelId) {
|
|
if (!document.querySelector('.voice-controls')) {
|
|
const controls = document.createElement('div');
|
|
controls.className = 'voice-controls p-2 d-flex justify-content-between align-items-center border-top bg-dark';
|
|
controls.style.backgroundColor = '#232428';
|
|
controls.innerHTML = `
|
|
<div class="d-flex align-items-center">
|
|
<div class="voice-status-icon text-success me-2" style="font-size: 8px;">●</div>
|
|
<div class="small fw-bold" style="font-size: 11px; color: #248046;">Voice (${this.settings.mode.toUpperCase()})</div>
|
|
</div>
|
|
<div>
|
|
<button class="btn btn-sm text-muted" id="btn-voice-leave" title="Disconnect">
|
|
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><path d="M10.68 13.31a16 16 0 0 0 3.41 2.6l1.27-1.27a2 2 0 0 1 2.11-.45 12.84 12.84 0 0 0 2.81.7 2 2 0 0 1 1.72 2v3a2 2 0 0 1-2.18 2 19.79 19.79 0 0 1-8.63-3.07 19.42 19.42 0 0 1-3.33-2.67m-2.67-3.34a19.79 19.79 0 0 1-3.07-8.63A2 2 0 0 1 4.11 2h3a2 2 0 0 1 2 1.72 12.84 12.84 0 0 0 .7 2.81 2 2 0 0 1-.45 2.11L8.09 9.91"></path><line x1="23" y1="1" x2="1" y2="23"></line></svg>
|
|
</button>
|
|
</div>
|
|
`;
|
|
const sidebar = document.querySelector('.channels-sidebar');
|
|
if (sidebar) sidebar.appendChild(controls);
|
|
const btnLeave = document.getElementById('btn-voice-leave');
|
|
if (btnLeave) btnLeave.onclick = () => this.leave();
|
|
}
|
|
} else {
|
|
const controls = document.querySelector('.voice-controls');
|
|
if (controls) controls.remove();
|
|
}
|
|
}
|
|
|
|
updateSpeakingUI(userId, isSpeaking, isWhisper = false) {
|
|
userId = String(userId);
|
|
if (isSpeaking) {
|
|
this.speakingUsers.add(userId);
|
|
} else {
|
|
this.speakingUsers.delete(userId);
|
|
}
|
|
|
|
const userEls = document.querySelectorAll(`.voice-user[data-user-id="${userId}"]`);
|
|
userEls.forEach(el => {
|
|
const avatar = el.querySelector('.message-avatar');
|
|
if (avatar) {
|
|
if (isSpeaking) {
|
|
avatar.style.boxShadow = isWhisper ? '0 0 0 2px #00a8fc' : '0 0 0 2px #23a559';
|
|
} else {
|
|
avatar.style.boxShadow = 'none';
|
|
}
|
|
}
|
|
|
|
// Show whisper indicator text if whispering to me
|
|
if (isWhisper && isSpeaking && userId !== String(window.currentUserId)) {
|
|
if (!el.querySelector('.whisper-label')) {
|
|
const label = document.createElement('span');
|
|
label.className = 'whisper-label badge bg-info ms-1';
|
|
label.style.fontSize = '8px';
|
|
label.innerText = 'WHISPER';
|
|
el.querySelector('span.text-truncate').after(label);
|
|
}
|
|
} else {
|
|
const label = el.querySelector('.whisper-label');
|
|
if (label) label.remove();
|
|
}
|
|
});
|
|
}
|
|
|
|
static async refreshAllVoiceUsers() {
|
|
try {
|
|
const resp = await fetch('api_v1_voice.php?action=list_all');
|
|
const data = await resp.json();
|
|
if (data.success) {
|
|
// Clear all lists first
|
|
document.querySelectorAll('.voice-users-list').forEach(el => el.innerHTML = '');
|
|
|
|
// Remove connected highlight from all voice items
|
|
document.querySelectorAll('.voice-item').forEach(el => {
|
|
el.classList.remove('connected');
|
|
});
|
|
|
|
// Populate based on data
|
|
const processedUserIds = new Set();
|
|
Object.keys(data.channels).forEach(channelId => {
|
|
const voiceItem = document.querySelector(`.voice-item[data-channel-id="${channelId}"]`);
|
|
if (voiceItem) {
|
|
// Highlight channel as connected only if I am in it
|
|
if (window.voiceHandler && window.voiceHandler.currentChannelId == channelId) {
|
|
voiceItem.classList.add('connected');
|
|
}
|
|
|
|
const container = voiceItem.closest('.channel-item-container');
|
|
if (container) {
|
|
const listEl = container.querySelector('.voice-users-list');
|
|
if (listEl) {
|
|
data.channels[channelId].forEach(p => {
|
|
const pid = String(p.user_id);
|
|
processedUserIds.add(pid);
|
|
const isSpeaking = window.voiceHandler && window.voiceHandler.speakingUsers.has(pid);
|
|
VoiceChannel.renderUserToUI(listEl, p.user_id, p.display_name || p.username, p.avatar_url, isSpeaking, p.is_muted, p.is_deafened);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
});
|
|
|
|
// Handle users whispering to me from other channels or not in any channel
|
|
if (window.voiceHandler && window.voiceHandler.speakingUsers.size > 0) {
|
|
window.voiceHandler.speakingUsers.forEach(uid => {
|
|
if (!processedUserIds.has(uid)) {
|
|
// Find where to show this user. For now, let's put them in their own channel if possible,
|
|
// or just a "Whispers" section if we had one.
|
|
// Actually, let's just show them in whatever channel they are currently in.
|
|
// The `data.channels` already contains everyone.
|
|
// If they are not in `processedUserIds` it means their channel is not rendered or they are not in a channel.
|
|
}
|
|
});
|
|
}
|
|
}
|
|
} catch (e) {
|
|
console.error('Failed to refresh voice users:', e);
|
|
}
|
|
}
|
|
|
|
static renderUserToUI(container, userId, username, avatarUrl, isSpeaking = false, isMuted = false, isDeafened = false) {
|
|
const userEl = document.createElement('div');
|
|
userEl.className = 'voice-user small text-muted d-flex align-items-center mb-1';
|
|
userEl.dataset.userId = userId;
|
|
userEl.style.paddingLeft = '8px';
|
|
const avatarStyle = avatarUrl ? `background-image: url('${avatarUrl}'); background-size: cover;` : "background-color: #555;";
|
|
const boxShadow = isSpeaking ? 'box-shadow: 0 0 0 2px #23a559;' : '';
|
|
|
|
let icons = '';
|
|
if (isDeafened) {
|
|
icons += '<i class="fa-solid fa-volume-xmark ms-auto text-danger" style="font-size: 10px;"></i>';
|
|
} else if (isMuted) {
|
|
icons += '<i class="fa-solid fa-microphone-slash ms-auto text-danger" style="font-size: 10px;"></i>';
|
|
}
|
|
|
|
userEl.innerHTML = `
|
|
<div class="message-avatar me-2" style="width: 16px; height: 16px; border-radius: 50%; transition: box-shadow 0.2s; ${avatarStyle} ${boxShadow}"></div>
|
|
<span class="text-truncate" style="font-size: 13px; max-width: 100px;">${username}</span>
|
|
${icons}
|
|
`;
|
|
container.appendChild(userEl);
|
|
}
|
|
}
|