Преглед на файлове

address code review

dev1
Andrei Gavrilescu преди 5 години
родител
ревизия
67688275ae

+ 5
- 2
modules/detection/ActiveDeviceDetector.js Целия файл

@@ -1,4 +1,5 @@
1 1
 import { getLogger } from 'jitsi-meet-logger';
2
+
2 3
 import * as JitsiTrackEvents from '../../JitsiTrackEvents';
3 4
 import RTC from '../RTC/RTC';
4 5
 import Statistics from '../statistics/statistics';
@@ -74,8 +75,10 @@ export default function getActiveAudioDevice() {
74 75
                 // Cancel the detection in case no devices was found with audioLevel > 0 in the set timeout.
75 76
                 setTimeout(() => {
76 77
                     stopActiveDevices(availableDevices);
77
-                    resolve({ deviceId: '',
78
-                        deviceLabel: '' });
78
+                    resolve({
79
+                        deviceId: '',
80
+                        deviceLabel: '' }
81
+                    );
79 82
                 }, DETECTION_TIMEOUT);
80 83
 
81 84
             });

+ 8
- 29
modules/detection/NoAudioSignalDetection.js Целия файл

@@ -1,5 +1,7 @@
1 1
 import EventEmitter from 'events';
2
+
2 3
 import * as JitsiConferenceEvents from '../../JitsiConferenceEvents';
4
+
3 5
 import * as DetectionEvents from './DetectionEvents';
4 6
 
5 7
 // We wait a certain time interval for constant silence input from the current device to account for
@@ -16,6 +18,8 @@ const SILENCE_PERIOD_MS = 4000;
16 18
  */
17 19
 export default class NoAudioSignalDetection extends EventEmitter {
18 20
     /**
21
+     * Creates new NoAudioSignalDetection.
22
+     *
19 23
      * @param conference the JitsiConference instance that created us.
20 24
      * @constructor
21 25
      */
@@ -42,32 +46,19 @@ export default class NoAudioSignalDetection extends EventEmitter {
42 46
     /**
43 47
      * Generated event triggered by a change in the current conference audio input state.
44 48
      *
45
-     * @param {*} audioLevel
49
+     * @param {*} audioLevel - The audio level of the ssrc.
46 50
      * @fires DetectionEvents.AUDIO_INPUT_STATE_CHANGE
47 51
      */
48 52
     _handleAudioInputStateChange(audioLevel) {
49 53
         // Current audio input state of the active local track in the conference, true for audio input false for no
50 54
         // audio input.
51 55
         const status = audioLevel !== 0;
52
-        let shouldTrigger;
53 56
 
54
-        // If we this is the first audio event picked up or the current status is different from the previous trigger
57
+        // If this is the first audio event picked up or the current status is different from the previous trigger
55 58
         // the event.
56
-        if (this._hasAudioInput === null) {
57
-            shouldTrigger = true;
58
-        } else if (this._hasAudioInput !== status) {
59
-            shouldTrigger = true;
60
-        }
61
-
62
-        if (shouldTrigger) {
59
+        if (this._hasAudioInput === null || this._hasAudioInput !== status) {
63 60
             this._hasAudioInput = status;
64 61
 
65
-            /**
66
-             * Event fired when the audio input state of the conference changes, true for audio input false otherwise.
67
-             *
68
-             * @event DetectionEvents.AUDIO_INPUT_STATE_CHANGE
69
-             * @type {boolean}
70
-             */
71 62
             this.emit(DetectionEvents.AUDIO_INPUT_STATE_CHANGE, this._hasAudioInput);
72 63
         }
73 64
     }
@@ -87,12 +78,6 @@ export default class NoAudioSignalDetection extends EventEmitter {
87 78
             this._timeoutTrigger = setTimeout(() => {
88 79
                 this._eventFired = true;
89 80
 
90
-                /**
91
-                 * Event fired when there is no audio input for a predefined period of time.
92
-                 *
93
-                 * @event DetectionEvents.AUDIO_INPUT_STATE_CHANGE
94
-                 * @type {void}
95
-                 */
96 81
                 this.emit(DetectionEvents.NO_AUDIO_INPUT);
97 82
             }, SILENCE_PERIOD_MS);
98 83
         } else if (audioLevel !== 0 && this._timeoutTrigger) {
@@ -118,16 +103,10 @@ export default class NoAudioSignalDetection extends EventEmitter {
118 103
         // Get currently active local tracks from the TraceablePeerConnection
119 104
         const localSSRCs = tpc.localSSRCs.get(this._audioTrack.rtcId);
120 105
 
121
-        // Check that currently selected audio stream has ssrc in the TraceablePeerConnection
122
-        if (!localSSRCs) {
123
-            return;
124
-        }
125 106
 
126 107
         // Only target the current active track in the tpc. For some reason audio levels for previous
127 108
         // devices are also picked up from the PeerConnection so we filter them out.
128
-        const isCurrentTrack = localSSRCs.ssrcs.includes(ssrc);
129
-
130
-        if (!isCurrentTrack) {
109
+        if (!localSSRCs || !localSSRCs.ssrcs.includes(ssrc)) {
131 110
             return;
132 111
         }
133 112
 

+ 7
- 11
modules/detection/TrackVADEmitter.js Целия файл

@@ -1,7 +1,8 @@
1 1
 import EventEmitter from 'events';
2
-import { VAD_SCORE_PUBLISHED } from './DetectionEvents';
2
+
3 3
 import RTC from '../RTC/RTC';
4 4
 
5
+import { VAD_SCORE_PUBLISHED } from './DetectionEvents';
5 6
 
6 7
 /**
7 8
  * Connects an audio JitsiLocalTrack to a vadProcessor using WebAudio ScriptProcessorNode.
@@ -101,7 +102,11 @@ export default class TrackVADEmitter extends EventEmitter {
101 102
     _initializeAudioContext() {
102 103
         this._audioSource = this._audioContext.createMediaStreamSource(this._localTrack.stream);
103 104
 
104
-        // TODO AudioProcessingNode is deprecated check and replace with alternative.
105
+        // TODO AudioProcessingNode is deprecated in the web audio specifications and the recommended replacement
106
+        // is audio worklet, however at the point of implementation AudioProcessingNode was still de de facto way
107
+        // of achieving this functionality and supported in all major browsers as opposed to audio worklet which
108
+        // was only available in Chrome. This todo is just a reminder that we should replace AudioProcessingNode
109
+        // with audio worklet when it's mature enough and has more browser support.
105 110
         // We don't need stereo for determining the VAD score so we create a single channel processing node.
106 111
         this._audioProcessingNode = this._audioContext.createScriptProcessor(this._procNodeSampleRate, 1, 1);
107 112
     }
@@ -129,15 +134,6 @@ export default class TrackVADEmitter extends EventEmitter {
129 134
             const pcmSample = completeInData.slice(i, i + this._vadSampleSize);
130 135
             const vadScore = this._vadProcessor.calculateAudioFrameVAD(pcmSample);
131 136
 
132
-            /**
133
-             * VAD score publish event
134
-             *
135
-             * @event VAD_SCORE_PUBLISHED
136
-             * @type {Object}
137
-             * @property {Date}   timestamp - Exact time at which processed PCM sample was generated.
138
-             * @property {number} score - VAD score on a scale from 0 to 1 (i.e. 0.7)
139
-             * @property {string} deviceId - Device id of the associated track.
140
-             */
141 137
             this.emit(VAD_SCORE_PUBLISHED, {
142 138
                 timestamp: sampleTimestamp,
143 139
                 score: vadScore,

+ 1
- 0
modules/detection/VADReportingService.js Целия файл

@@ -1,5 +1,6 @@
1 1
 import EventEmitter from 'events';
2 2
 import { getLogger } from 'jitsi-meet-logger';
3
+
3 4
 import * as DetectionEvents from './DetectionEvents';
4 5
 import TrackVADEmitter from './TrackVADEmitter';
5 6
 

+ 9
- 43
modules/detection/VADTalkMutedDetection.js Целия файл

@@ -1,6 +1,8 @@
1 1
 import { EventEmitter } from 'events';
2
-import * as JitsiConferenceEvents from '../../JitsiConferenceEvents';
3 2
 import { getLogger } from 'jitsi-meet-logger';
3
+
4
+import * as JitsiConferenceEvents from '../../JitsiConferenceEvents';
5
+
4 6
 import { VAD_SCORE_PUBLISHED, VAD_TALK_WHILE_MUTED } from './DetectionEvents';
5 7
 import TrackVADEmitter from './TrackVADEmitter';
6 8
 
@@ -79,7 +81,9 @@ export default class VADTalkMutedDetection extends EventEmitter {
79 81
          */
80 82
         this._vadInitTracker = null;
81 83
 
82
-
84
+        /**
85
+         * Listens for {@link TrackVADEmitter} events and processes them.
86
+         */
83 87
         this._processVADScore = this._processVADScore.bind(this);
84 88
 
85 89
         /**
@@ -88,30 +92,6 @@ export default class VADTalkMutedDetection extends EventEmitter {
88 92
         conference.on(JitsiConferenceEvents.TRACK_ADDED, this._trackAdded.bind(this));
89 93
         conference.on(JitsiConferenceEvents.TRACK_REMOVED, this._trackRemoved.bind(this));
90 94
         conference.on(JitsiConferenceEvents.TRACK_MUTE_CHANGED, this._trackMuteChanged.bind(this));
91
-
92
-        // TODO do we need to handle the case where tracks are removed, make sure this cleans up properly so
93
-        // we don't have any leeks i.e. stale JitsiLocalTracks
94
-    }
95
-
96
-    /**
97
-     * Determine if the current score is high enough that we should start the final score processing, and make sure
98
-     * there isn't already a process operation ongoing.
99
-     *
100
-     * @param {number} score - PCM sample VAD score.
101
-     * @return {boolean}
102
-     */
103
-    _shouldStartVADCompute(vadScore) {
104
-        return vadScore > VAD_VOICE_LEVEL && !this._processing;
105
-    }
106
-
107
-    /**
108
-     * Determine if the computed score over the configured timestamp should trigger an event.
109
-     *
110
-     * @param {number} computedScore - Computed VAD score.
111
-     * @returns {boolean} - Should or shouldn't trigger.
112
-     */
113
-    _shouldTriggerNotification(computedScore) {
114
-        return computedScore > VAD_AVG_THRESHOLD;
115 95
     }
116 96
 
117 97
     /**
@@ -139,15 +119,7 @@ export default class VADTalkMutedDetection extends EventEmitter {
139 119
      * @returns {number} - Score average.
140 120
      */
141 121
     _calculateAverage(scoreArray) {
142
-        let avg = 0;
143
-
144
-        if (scoreArray.length) {
145
-            const sum = scoreArray.reduce((a, b) => a + b);
146
-
147
-            avg = sum / scoreArray.length;
148
-        }
149
-
150
-        return avg;
122
+        return scoreArray.length > 0 ? scoreArray.reduce((a, b) => a + b) / scoreArray.length : 0;
151 123
     }
152 124
 
153 125
     /**
@@ -158,13 +130,7 @@ export default class VADTalkMutedDetection extends EventEmitter {
158 130
     _calculateVADScore() {
159 131
         const score = this._calculateAverage(this._scoreArray);
160 132
 
161
-        if (this._shouldTriggerNotification(score)) {
162
-            /**
163
-             * User is talking while the mic is muted, generate event.
164
-             *
165
-             * @event VAD_TALK_WHILE_MUTED.
166
-             * @type {Object}
167
-             */
133
+        if (score > VAD_AVG_THRESHOLD) {
168 134
             this.emit(VAD_TALK_WHILE_MUTED, {});
169 135
 
170 136
             // Event was fired. Stop event emitter and remove listeners so no residue events kick off after this point
@@ -188,7 +154,7 @@ export default class VADTalkMutedDetection extends EventEmitter {
188 154
     _processVADScore(vadScore) {
189 155
         // Because we remove all listeners on the vadEmitter once the main event is triggered,
190 156
         // there is no need to check for rogue events.
191
-        if (this._shouldStartVADCompute(vadScore.score)) {
157
+        if (vadScore.score > VAD_VOICE_LEVEL && !this._processing) {
192 158
             this._processing = true;
193 159
 
194 160
             // Start gathering VAD scores for the configured period of time.

Loading…
Отказ
Запис