|
@@ -1,6 +1,6 @@
|
|
1
|
+// @flow
|
1
|
2
|
|
2
|
|
-import { drawBokehEffect } from '@tensorflow-models/body-pix';
|
3
|
|
-
|
|
3
|
+import * as bodyPix from '@tensorflow-models/body-pix';
|
4
|
4
|
import {
|
5
|
5
|
CLEAR_INTERVAL,
|
6
|
6
|
INTERVAL_TIMEOUT,
|
|
@@ -14,57 +14,86 @@ import {
|
14
|
14
|
* video stream.
|
15
|
15
|
*/
|
16
|
16
|
export default class JitsiStreamBlurEffect {
|
|
17
|
+ _bpModel: Object;
|
|
18
|
+ _inputVideoElement: HTMLVideoElement;
|
|
19
|
+ _onMaskFrameTimer: Function;
|
|
20
|
+ _maskFrameTimerWorker: Worker;
|
|
21
|
+ _maskInProgress: boolean;
|
|
22
|
+ _outputCanvasElement: HTMLCanvasElement;
|
|
23
|
+ _renderMask: Function;
|
|
24
|
+ _segmentationData: Object;
|
|
25
|
+ isEnabled: Function;
|
|
26
|
+ startEffect: Function;
|
|
27
|
+ stopEffect: Function;
|
|
28
|
+
|
17
|
29
|
/**
|
18
|
30
|
* Represents a modified video MediaStream track.
|
19
|
31
|
*
|
20
|
32
|
* @class
|
21
|
33
|
* @param {BodyPix} bpModel - BodyPix model.
|
22
|
34
|
*/
|
23
|
|
- constructor(bpModel) {
|
|
35
|
+ constructor(bpModel: Object) {
|
24
|
36
|
this._bpModel = bpModel;
|
25
|
37
|
|
26
|
38
|
// Bind event handler so it is only bound once for every instance.
|
27
|
39
|
this._onMaskFrameTimer = this._onMaskFrameTimer.bind(this);
|
28
|
|
- this._onVideoFrameTimer = this._onVideoFrameTimer.bind(this);
|
29
|
|
-
|
30
|
|
- this._outputCanvasElement = document.createElement('canvas');
|
31
|
40
|
|
32
|
41
|
// Workaround for FF issue https://bugzilla.mozilla.org/show_bug.cgi?id=1388974
|
|
42
|
+ this._outputCanvasElement = document.createElement('canvas');
|
33
|
43
|
this._outputCanvasElement.getContext('2d');
|
34
|
|
-
|
35
|
|
- this._maskCanvasElement = document.createElement('canvas');
|
36
|
44
|
this._inputVideoElement = document.createElement('video');
|
37
|
45
|
|
38
|
|
- this._videoFrameTimerWorker = new Worker(timerWorkerScript);
|
39
|
46
|
this._maskFrameTimerWorker = new Worker(timerWorkerScript);
|
40
|
|
- this._videoFrameTimerWorker.onmessage = this._onVideoFrameTimer;
|
41
|
47
|
this._maskFrameTimerWorker.onmessage = this._onMaskFrameTimer;
|
42
|
48
|
}
|
43
|
49
|
|
44
|
50
|
/**
|
45
|
|
- * EventHandler onmessage for the videoFrameTimerWorker WebWorker.
|
|
51
|
+ * EventHandler onmessage for the maskFrameTimerWorker WebWorker.
|
46
|
52
|
*
|
47
|
53
|
* @private
|
48
|
54
|
* @param {EventHandler} response - The onmessage EventHandler parameter.
|
49
|
55
|
* @returns {void}
|
50
|
56
|
*/
|
51
|
|
- _onVideoFrameTimer(response) {
|
|
57
|
+ async _onMaskFrameTimer(response: Object) {
|
52
|
58
|
if (response.data.id === INTERVAL_TIMEOUT) {
|
53
|
|
- this._renderVideo();
|
|
59
|
+ if (!this._maskInProgress) {
|
|
60
|
+ await this._renderMask();
|
|
61
|
+ }
|
54
|
62
|
}
|
55
|
63
|
}
|
56
|
64
|
|
57
|
65
|
/**
|
58
|
|
- * EventHandler onmessage for the maskFrameTimerWorker WebWorker.
|
|
66
|
+ * Loop function to render the background mask.
|
59
|
67
|
*
|
60
|
68
|
* @private
|
61
|
|
- * @param {EventHandler} response - The onmessage EventHandler parameter.
|
62
|
69
|
* @returns {void}
|
63
|
70
|
*/
|
64
|
|
- _onMaskFrameTimer(response) {
|
65
|
|
- if (response.data.id === INTERVAL_TIMEOUT) {
|
66
|
|
- this._renderMask();
|
67
|
|
- }
|
|
71
|
+ async _renderMask() {
|
|
72
|
+ this._maskInProgress = true;
|
|
73
|
+ this._segmentationData = await this._bpModel.segmentPerson(this._inputVideoElement, {
|
|
74
|
+ internalResolution: 'low', // resized to 0.25 times of the original resolution before inference
|
|
75
|
+ maxDetections: 1, // max. number of person poses to detect per image
|
|
76
|
+ segmentationThreshold: 0.7 // represents probability that a pixel belongs to a person
|
|
77
|
+ });
|
|
78
|
+ this._maskInProgress = false;
|
|
79
|
+ bodyPix.drawBokehEffect(
|
|
80
|
+ this._outputCanvasElement,
|
|
81
|
+ this._inputVideoElement,
|
|
82
|
+ this._segmentationData,
|
|
83
|
+ 7, // Constant for background blur, integer values between 0-20
|
|
84
|
+ 7 // Constant for edge blur, integer values between 0-20
|
|
85
|
+ );
|
|
86
|
+ }
|
|
87
|
+
|
|
88
|
+ /**
|
|
89
|
+ * Checks if the local track supports this effect.
|
|
90
|
+ *
|
|
91
|
+ * @param {JitsiLocalTrack} jitsiLocalTrack - Track to apply effect.
|
|
92
|
+ * @returns {boolean} - Returns true if this effect can run on the specified track
|
|
93
|
+ * false otherwise.
|
|
94
|
+ */
|
|
95
|
+ isEnabled(jitsiLocalTrack: Object) {
|
|
96
|
+ return jitsiLocalTrack.isVideoTrack() && jitsiLocalTrack.videoType === 'camera';
|
68
|
97
|
}
|
69
|
98
|
|
70
|
99
|
/**
|
|
@@ -73,37 +102,25 @@ export default class JitsiStreamBlurEffect {
|
73
|
102
|
* @param {MediaStream} stream - Stream to be used for processing.
|
74
|
103
|
* @returns {MediaStream} - The stream with the applied effect.
|
75
|
104
|
*/
|
76
|
|
- startEffect(stream) {
|
|
105
|
+ startEffect(stream: MediaStream) {
|
77
|
106
|
const firstVideoTrack = stream.getVideoTracks()[0];
|
78
|
107
|
const { height, frameRate, width }
|
79
|
108
|
= firstVideoTrack.getSettings ? firstVideoTrack.getSettings() : firstVideoTrack.getConstraints();
|
80
|
109
|
|
81
|
|
- this._frameRate = frameRate;
|
82
|
|
- this._height = height;
|
83
|
|
- this._width = width;
|
84
|
|
-
|
85
|
|
- this._outputCanvasElement.width = width;
|
86
|
|
- this._outputCanvasElement.height = height;
|
87
|
|
-
|
88
|
|
- this._maskCanvasElement.width = width;
|
89
|
|
- this._maskCanvasElement.height = height;
|
90
|
|
-
|
91
|
|
- this._maskCanvasContext = this._maskCanvasElement.getContext('2d');
|
92
|
|
- this._inputVideoElement.width = width;
|
93
|
|
- this._inputVideoElement.height = height;
|
|
110
|
+ this._outputCanvasElement.width = parseInt(width, 10);
|
|
111
|
+ this._outputCanvasElement.height = parseInt(height, 10);
|
|
112
|
+ this._inputVideoElement.width = parseInt(width, 10);
|
|
113
|
+ this._inputVideoElement.height = parseInt(height, 10);
|
94
|
114
|
this._inputVideoElement.autoplay = true;
|
95
|
115
|
this._inputVideoElement.srcObject = stream;
|
96
|
|
-
|
97
|
|
- this._videoFrameTimerWorker.postMessage({
|
98
|
|
- id: SET_INTERVAL,
|
99
|
|
- timeMs: 1000 / this._frameRate
|
100
|
|
- });
|
101
|
|
- this._maskFrameTimerWorker.postMessage({
|
102
|
|
- id: SET_INTERVAL,
|
103
|
|
- timeMs: 50
|
104
|
|
- });
|
105
|
|
-
|
106
|
|
- return this._outputCanvasElement.captureStream(this._frameRate);
|
|
116
|
+ this._inputVideoElement.onloadeddata = () => {
|
|
117
|
+ this._maskFrameTimerWorker.postMessage({
|
|
118
|
+ id: SET_INTERVAL,
|
|
119
|
+ timeMs: 1000 / parseInt(frameRate, 10)
|
|
120
|
+ });
|
|
121
|
+ };
|
|
122
|
+
|
|
123
|
+ return this._outputCanvasElement.captureStream(parseInt(frameRate, 10));
|
107
|
124
|
}
|
108
|
125
|
|
109
|
126
|
/**
|
|
@@ -112,65 +129,8 @@ export default class JitsiStreamBlurEffect {
|
112
|
129
|
* @returns {void}
|
113
|
130
|
*/
|
114
|
131
|
stopEffect() {
|
115
|
|
- this._videoFrameTimerWorker.postMessage({
|
116
|
|
- id: CLEAR_INTERVAL
|
117
|
|
- });
|
118
|
132
|
this._maskFrameTimerWorker.postMessage({
|
119
|
133
|
id: CLEAR_INTERVAL
|
120
|
134
|
});
|
121
|
135
|
}
|
122
|
|
-
|
123
|
|
- /**
|
124
|
|
- * Loop function to render the video frame input and draw blur effect.
|
125
|
|
- *
|
126
|
|
- * @private
|
127
|
|
- * @returns {void}
|
128
|
|
- */
|
129
|
|
- _renderVideo() {
|
130
|
|
- this._maskCanvasContext.drawImage(this._inputVideoElement, 0, 0, this._width, this._height);
|
131
|
|
- if (this._segmentationData) {
|
132
|
|
- drawBokehEffect(
|
133
|
|
- this._outputCanvasElement,
|
134
|
|
- this._inputVideoElement,
|
135
|
|
- this._segmentationData,
|
136
|
|
- 7, // Constant for background blur, integer values between 0-20
|
137
|
|
- 7 // Constant for edge blur, integer values between 0-20
|
138
|
|
- );
|
139
|
|
-
|
140
|
|
- // Make sure we clear this buffer before feeding the segmentation data
|
141
|
|
- // to drawBokehEffect for creating the blur. This fixes the memory leak
|
142
|
|
- // that started happening in WebGL in Chrome 77 and up.
|
143
|
|
- this._segmentationData = null;
|
144
|
|
- }
|
145
|
|
- }
|
146
|
|
-
|
147
|
|
- /**
|
148
|
|
- * Loop function to render the background mask.
|
149
|
|
- *
|
150
|
|
- * @private
|
151
|
|
- * @returns {void}
|
152
|
|
- */
|
153
|
|
- _renderMask() {
|
154
|
|
- this._bpModel.estimatePersonSegmentation(
|
155
|
|
- this._maskCanvasElement,
|
156
|
|
- 32, // Chose 32 for better performance
|
157
|
|
- 0.75 // Represents probability that a pixel belongs to a person
|
158
|
|
- )
|
159
|
|
- .then(value => {
|
160
|
|
- this._segmentationData = value;
|
161
|
|
- });
|
162
|
|
- }
|
163
|
|
-
|
164
|
|
- /**
|
165
|
|
- * Checks if the local track supports this effect.
|
166
|
|
- *
|
167
|
|
- * @param {JitsiLocalTrack} jitsiLocalTrack - Track to apply effect.
|
168
|
|
- * @returns {boolean} - Returns true if this effect can run on the specified track
|
169
|
|
- * false otherwise.
|
170
|
|
- */
|
171
|
|
- isEnabled(jitsiLocalTrack) {
|
172
|
|
- return jitsiLocalTrack.isVideoTrack() && jitsiLocalTrack.videoType === 'camera';
|
173
|
|
- }
|
174
|
136
|
}
|
175
|
|
-
|
176
|
|
-
|