/* global
__filename,
MediaStreamTrack,
RTCIceCandidate: true,
RTCPeerConnection,
RTCSessionDescription: true
*/
import EventEmitter from 'events';
import { getLogger } from 'jitsi-meet-logger';
import clonedeep from 'lodash.clonedeep';
import JitsiTrackError from '../../JitsiTrackError';
import * as JitsiTrackErrors from '../../JitsiTrackErrors';
import CameraFacingMode from '../../service/RTC/CameraFacingMode';
import * as MediaType from '../../service/RTC/MediaType';
import RTCEvents from '../../service/RTC/RTCEvents';
import Resolutions from '../../service/RTC/Resolutions';
import VideoType from '../../service/RTC/VideoType';
import { AVAILABLE_DEVICE } from '../../service/statistics/AnalyticsEvents';
import browser from '../browser';
import Statistics from '../statistics/statistics';
import GlobalOnErrorHandler from '../util/GlobalOnErrorHandler';
import Listenable from '../util/Listenable';
import SDPUtil from '../xmpp/SDPUtil';
import screenObtainer from './ScreenObtainer';
const logger = getLogger(__filename);
// Require adapter only for certain browsers. This is being done for
// react-native, which has its own shims, and while browsers are being migrated
// over to use adapter's shims.
if (browser.usesAdapter()) {
require('webrtc-adapter');
}
const eventEmitter = new EventEmitter();
const AVAILABLE_DEVICES_POLL_INTERVAL_TIME = 3000; // ms
/**
* Default resolution to obtain for video tracks if no resolution is specified.
* This default is used for old gum flow only, as new gum flow uses
* {@link DEFAULT_CONSTRAINTS}.
*/
const OLD_GUM_DEFAULT_RESOLUTION = 720;
/**
* Default devices to obtain when no specific devices are specified. This
* default is used for old gum flow only.
*/
const OLD_GUM_DEFAULT_DEVICES = [ 'audio', 'video' ];
/**
* Default MediaStreamConstraints to use for calls to getUserMedia.
*
* @private
*/
const DEFAULT_CONSTRAINTS = {
video: {
height: {
ideal: 720,
max: 720,
min: 240
}
}
};
/**
* The default frame rate for Screen Sharing.
*/
const SS_DEFAULT_FRAME_RATE = 5;
// Currently audio output device change is supported only in Chrome and
// default output always has 'default' device ID
let audioOutputDeviceId = 'default'; // default device
// whether user has explicitly set a device to use
let audioOutputChanged = false;
// Disables all audio processing
let disableAP = false;
// Disables Acoustic Echo Cancellation
let disableAEC = false;
// Disables Noise Suppression
let disableNS = false;
// Disables Automatic Gain Control
let disableAGC = false;
// Disables Highpass Filter
let disableHPF = false;
const featureDetectionAudioEl = document.createElement('audio');
const isAudioOutputDeviceChangeAvailable
= typeof featureDetectionAudioEl.setSinkId !== 'undefined';
let availableDevices;
let availableDevicesPollTimer;
/**
* An empty function.
*/
function emptyFuncton() {
// no-op
}
/**
* Initialize wrapper function for enumerating devices.
* TODO: remove this, it should no longer be needed.
*
* @returns {?Function}
*/
function initEnumerateDevicesWithCallback() {
if (navigator.mediaDevices && navigator.mediaDevices.enumerateDevices) {
return callback => {
navigator.mediaDevices.enumerateDevices()
.then(devices => {
updateKnownDevices(devices);
callback(devices);
})
.catch(error => {
logger.warn(`Failed to enumerate devices. ${error}`);
updateKnownDevices([]);
callback([]);
});
};
}
}
/**
*
* @param constraints
* @param isNewStyleConstraintsSupported
* @param resolution
*/
function setResolutionConstraints(
constraints,
isNewStyleConstraintsSupported,
resolution) {
if (Resolutions[resolution]) {
if (isNewStyleConstraintsSupported) {
constraints.video.width = {
ideal: Resolutions[resolution].width
};
constraints.video.height = {
ideal: Resolutions[resolution].height
};
}
constraints.video.mandatory.minWidth = Resolutions[resolution].width;
constraints.video.mandatory.minHeight = Resolutions[resolution].height;
}
if (constraints.video.mandatory.minWidth) {
constraints.video.mandatory.maxWidth
= constraints.video.mandatory.minWidth;
}
if (constraints.video.mandatory.minHeight) {
constraints.video.mandatory.maxHeight
= constraints.video.mandatory.minHeight;
}
}
/**
* @param {string[]} um required user media types
*
* @param {Object} [options={}] optional parameters
* @param {string} options.resolution
* @param {number} options.bandwidth
* @param {number} options.fps
* @param {string} options.desktopStream
* @param {string} options.cameraDeviceId
* @param {string} options.micDeviceId
* @param {CameraFacingMode} options.facingMode
* @param {bool} firefox_fake_device
* @param {Object} options.frameRate - used only for dekstop sharing.
* @param {Object} options.frameRate.min - Minimum fps
* @param {Object} options.frameRate.max - Maximum fps
* @param {bool} options.screenShareAudio - Used by electron clients to
* enable system audio screen sharing.
*/
function getConstraints(um, options = {}) {
const constraints = {
audio: false,
video: false
};
// Don't mix new and old style settings for Chromium as this leads
// to TypeError in new Chromium versions. @see
// https://bugs.chromium.org/p/chromium/issues/detail?id=614716
// This is a temporary solution, in future we will fully split old and
// new style constraints when new versions of Chromium and Firefox will
// have stable support of new constraints format. For more information
// @see https://github.com/jitsi/lib-jitsi-meet/pull/136
const isNewStyleConstraintsSupported
= browser.isFirefox()
|| browser.isSafari()
|| browser.isReactNative();
if (um.indexOf('video') >= 0) {
// same behaviour as true
constraints.video = { mandatory: {},
optional: [] };
if (options.cameraDeviceId) {
if (isNewStyleConstraintsSupported) {
// New style of setting device id.
constraints.video.deviceId = options.cameraDeviceId;
}
// Old style.
constraints.video.mandatory.sourceId = options.cameraDeviceId;
} else {
// Prefer the front i.e. user-facing camera (to the back i.e.
// environment-facing camera, for example).
// TODO: Maybe use "exact" syntax if options.facingMode is defined,
// but this probably needs to be decided when updating other
// constraints, as we currently don't use "exact" syntax anywhere.
const facingMode = options.facingMode || CameraFacingMode.USER;
if (isNewStyleConstraintsSupported) {
constraints.video.facingMode = facingMode;
}
constraints.video.optional.push({
facingMode
});
}
if (options.minFps || options.maxFps || options.fps) {
// for some cameras it might be necessary to request 30fps
// so they choose 30fps mjpg over 10fps yuy2
if (options.minFps || options.fps) {
// Fall back to options.fps for backwards compatibility
options.minFps = options.minFps || options.fps;
constraints.video.mandatory.minFrameRate = options.minFps;
}
if (options.maxFps) {
constraints.video.mandatory.maxFrameRate = options.maxFps;
}
}
setResolutionConstraints(
constraints, isNewStyleConstraintsSupported, options.resolution);
}
if (um.indexOf('audio') >= 0) {
if (browser.isReactNative()) {
// The react-native-webrtc project that we're currently using
// expects the audio constraint to be a boolean.
constraints.audio = true;
} else if (browser.isFirefox()) {
if (options.micDeviceId) {
constraints.audio = {
mandatory: {},
deviceId: options.micDeviceId, // new style
optional: [ {
sourceId: options.micDeviceId // old style
} ] };
} else {
constraints.audio = true;
}
} else {
// same behaviour as true
constraints.audio = { mandatory: {},
optional: [] };
if (options.micDeviceId) {
if (isNewStyleConstraintsSupported) {
// New style of setting device id.
constraints.audio.deviceId = options.micDeviceId;
}
// Old style.
constraints.audio.optional.push({
sourceId: options.micDeviceId
});
}
// if it is good enough for hangouts...
constraints.audio.optional.push(
{ echoCancellation: !disableAEC && !disableAP },
{ googEchoCancellation: !disableAEC && !disableAP },
{ googAutoGainControl: !disableAGC && !disableAP },
{ googNoiseSuppression: !disableNS && !disableAP },
{ googHighpassFilter: !disableHPF && !disableAP },
{ googNoiseSuppression2: !disableNS && !disableAP },
{ googEchoCancellation2: !disableAEC && !disableAP },
{ googAutoGainControl2: !disableAGC && !disableAP }
);
}
}
if (um.indexOf('screen') >= 0) {
if (browser.isChrome()) {
constraints.video = {
mandatory: getSSConstraints({
...options,
source: 'screen'
}),
optional: []
};
} else if (browser.isFirefox()) {
constraints.video = {
mozMediaSource: 'window',
mediaSource: 'window',
frameRate: options.frameRate || {
min: SS_DEFAULT_FRAME_RATE,
max: SS_DEFAULT_FRAME_RATE
}
};
} else {
const errmsg
= '\'screen\' WebRTC media source is supported only in Chrome'
+ ' and Firefox';
GlobalOnErrorHandler.callErrorHandler(new Error(errmsg));
logger.error(errmsg);
}
}
if (um.indexOf('desktop') >= 0) {
constraints.video = {
mandatory: getSSConstraints({
...options,
source: 'desktop'
}),
optional: []
};
// Audio screen sharing for electron only works for screen type devices.
// i.e. when the user shares the whole desktop.
if (browser.isElectron() && options.screenShareAudio
&& (options.desktopStream.indexOf('screen') >= 0)) {
// Provide constraints as described by the electron desktop capturer
// documentation here:
// https://www.electronjs.org/docs/api/desktop-capturer
// Note. The documentation specifies that chromeMediaSourceId should not be present
// which, in the case a users has multiple monitors, leads to them being shared all
// at once. However we tested with chromeMediaSourceId present and it seems to be
// working properly and also takes care of the previously mentioned issue.
constraints.audio = { mandatory: {
chromeMediaSource: constraints.video.mandatory.chromeMediaSource
} };
}
}
if (options.bandwidth) {
if (!constraints.video) {
// same behaviour as true
constraints.video = { mandatory: {},
optional: [] };
}
constraints.video.optional.push({ bandwidth: options.bandwidth });
}
// we turn audio for both audio and video tracks, the fake audio & video
// seems to work only when enabled in one getUserMedia call, we cannot get
// fake audio separate by fake video this later can be a problem with some
// of the tests
if (browser.isFirefox() && options.firefox_fake_device) {
// seems to be fixed now, removing this experimental fix, as having
// multiple audio tracks brake the tests
// constraints.audio = true;
constraints.fake = true;
}
return constraints;
}
/**
* Creates a constraints object to be passed into a call to getUserMedia.
*
* @param {Array} um - An array of user media types to get. The accepted
* types are "video", "audio", and "desktop."
* @param {Object} options - Various values to be added to the constraints.
* @param {string} options.cameraDeviceId - The device id for the video
* capture device to get video from.
* @param {Object} options.constraints - Default constraints object to use
* as a base for the returned constraints.
* @param {Object} options.desktopStream - The desktop source id from which
* to capture a desktop sharing video.
* @param {string} options.facingMode - Which direction the camera is
* pointing to.
* @param {string} options.micDeviceId - The device id for the audio capture
* device to get audio from.
* @param {Object} options.frameRate - used only for dekstop sharing.
* @param {Object} options.frameRate.min - Minimum fps
* @param {Object} options.frameRate.max - Maximum fps
* @private
* @returns {Object}
*/
function newGetConstraints(um = [], options = {}) {
// Create a deep copy of the constraints to avoid any modification of
// the passed in constraints object.
const constraints = clonedeep(options.constraints || DEFAULT_CONSTRAINTS);
if (um.indexOf('video') >= 0) {
if (!constraints.video) {
constraints.video = {};
}
// Override the constraints on Safari because of the following webkit bug.
// https://bugs.webkit.org/show_bug.cgi?id=210932
// Camera doesn't start on older macOS versions if min/max constraints are specified.
// TODO: remove this hack when the bug fix is available on Mojave, Sierra and High Sierra.
if (browser.isSafari()) {
if (constraints.video.height && constraints.video.height.ideal) {
constraints.video.height = { ideal: clonedeep(constraints.video.height.ideal) };
} else {
logger.warn('Ideal camera height missing, camera may not start properly');
}
if (constraints.video.width && constraints.video.width.ideal) {
constraints.video.width = { ideal: clonedeep(constraints.video.width.ideal) };
} else {
logger.warn('Ideal camera width missing, camera may not start properly');
}
}
if (options.cameraDeviceId) {
constraints.video.deviceId = options.cameraDeviceId;
} else {
const facingMode = options.facingMode || CameraFacingMode.USER;
constraints.video.facingMode = facingMode;
}
} else {
constraints.video = false;
}
if (um.indexOf('audio') >= 0) {
if (!constraints.audio || typeof constraints.audio === 'boolean') {
constraints.audio = {};
}
// Use the standard audio constraints on non-chromium browsers.
if (browser.isFirefox() || browser.isSafari()) {
constraints.audio = {
deviceId: options.micDeviceId,
autoGainControl: !disableAGC && !disableAP,
echoCancellation: !disableAEC && !disableAP,
noiseSuppression: !disableNS && !disableAP
};
} else {
// NOTE(brian): the new-style ('advanced' instead of 'optional')
// doesn't seem to carry through the googXXX constraints
// Changing back to 'optional' here (even with video using
// the 'advanced' style) allows them to be passed through
// but also requires the device id to capture to be set in optional
// as sourceId otherwise the constraints are considered malformed.
if (!constraints.audio.optional) {
constraints.audio.optional = [];
}
constraints.audio.optional.push(
{ sourceId: options.micDeviceId },
{ echoCancellation: !disableAEC && !disableAP },
{ googEchoCancellation: !disableAEC && !disableAP },
{ googAutoGainControl: !disableAGC && !disableAP },
{ googNoiseSuppression: !disableNS && !disableAP },
{ googHighpassFilter: !disableHPF && !disableAP },
{ googNoiseSuppression2: !disableNS && !disableAP },
{ googEchoCancellation2: !disableAEC && !disableAP },
{ googAutoGainControl2: !disableAGC && !disableAP }
);
}
} else {
constraints.audio = false;
}
if (um.indexOf('desktop') >= 0) {
if (!constraints.video || typeof constraints.video === 'boolean') {
constraints.video = {};
}
constraints.video = {
mandatory: getSSConstraints({
...options,
source: 'desktop'
})
};
}
return constraints;
}
/**
* Generates GUM constraints for screen sharing.
*
* @param {Object} options - The options passed to
* obtainAudioAndVideoPermissions.
* @returns {Object} - GUM constraints.
*
* TODO: Currently only the new GUM flow and Chrome is using the method. We
* should make it work for all use cases.
*/
function getSSConstraints(options = {}) {
const {
desktopStream,
frameRate = {
min: SS_DEFAULT_FRAME_RATE,
max: SS_DEFAULT_FRAME_RATE
}
} = options;
const { max, min } = frameRate;
const constraints = {
chromeMediaSource: options.source,
maxWidth: window.screen.width,
maxHeight: window.screen.height
};
if (typeof min === 'number') {
constraints.minFrameRate = min;
}
if (typeof max === 'number') {
constraints.maxFrameRate = max;
}
if (typeof desktopStream !== 'undefined') {
constraints.chromeMediaSourceId = desktopStream;
}
return constraints;
}
/**
* Generates constraints for screen sharing when using getDisplayMedia.
* The constraints(MediaTrackConstraints) are applied to the resulting track.
*
* @returns {Object} - MediaTrackConstraints constraints.
*/
function getTrackSSConstraints(options = {}) {
// we used to set height and width in the constraints, but this can lead
// to inconsistencies if the browser is on a lower resolution screen
// and we share a screen with bigger resolution, so they are now not set
const constraints = {
frameRate: SS_DEFAULT_FRAME_RATE
};
const { desktopSharingFrameRate } = options;
if (desktopSharingFrameRate && desktopSharingFrameRate.max) {
constraints.frameRate = desktopSharingFrameRate.max;
}
return constraints;
}
/**
* Updates the granted permissions based on the options we requested and the
* streams we received.
* @param um the options we requested to getUserMedia.
* @param stream the stream we received from calling getUserMedia.
*/
function updateGrantedPermissions(um, stream) {
const audioTracksReceived
= Boolean(stream) && stream.getAudioTracks().length > 0;
const videoTracksReceived
= Boolean(stream) && stream.getVideoTracks().length > 0;
const grantedPermissions = {};
if (um.indexOf('video') !== -1) {
grantedPermissions.video = videoTracksReceived;
}
if (um.indexOf('audio') !== -1) {
grantedPermissions.audio = audioTracksReceived;
}
eventEmitter.emit(RTCEvents.PERMISSIONS_CHANGED, grantedPermissions);
}
/**
* Checks if new list of available media devices differs from previous one.
* @param {MediaDeviceInfo[]} newDevices - list of new devices.
* @returns {boolean} - true if list is different, false otherwise.
*/
function compareAvailableMediaDevices(newDevices) {
if (newDevices.length !== availableDevices.length) {
return true;
}
/* eslint-disable newline-per-chained-call */
return (
newDevices.map(mediaDeviceInfoToJSON).sort().join('')
!== availableDevices
.map(mediaDeviceInfoToJSON).sort().join(''));
/* eslint-enable newline-per-chained-call */
/**
*
* @param info
*/
function mediaDeviceInfoToJSON(info) {
return JSON.stringify({
kind: info.kind,
deviceId: info.deviceId,
groupId: info.groupId,
label: info.label,
facing: info.facing
});
}
}
/**
* Sends analytics event with the passed device list.
*
* @param {Array} deviceList - List with info about the
* available devices.
* @returns {void}
*/
function sendDeviceListToAnalytics(deviceList) {
const audioInputDeviceCount
= deviceList.filter(d => d.kind === 'audioinput').length;
const audioOutputDeviceCount
= deviceList.filter(d => d.kind === 'audiooutput').length;
const videoInputDeviceCount
= deviceList.filter(d => d.kind === 'videoinput').length;
const videoOutputDeviceCount
= deviceList.filter(d => d.kind === 'videooutput').length;
deviceList.forEach(device => {
const attributes = {
'audio_input_device_count': audioInputDeviceCount,
'audio_output_device_count': audioOutputDeviceCount,
'video_input_device_count': videoInputDeviceCount,
'video_output_device_count': videoOutputDeviceCount,
'device_id': device.deviceId,
'device_group_id': device.groupId,
'device_kind': device.kind,
'device_label': device.label
};
Statistics.sendAnalytics(AVAILABLE_DEVICE, attributes);
});
}
/**
* Update known devices.
*
* @param {Array