|
|
@@ -1,7 +1,4 @@
|
|
1
|
1
|
// @flow
|
|
2
|
|
-
|
|
3
|
|
-import * as StackBlur from 'stackblur-canvas';
|
|
4
|
|
-
|
|
5
|
2
|
import {
|
|
6
|
3
|
CLEAR_TIMEOUT,
|
|
7
|
4
|
TIMEOUT_TICK,
|
|
|
@@ -9,21 +6,27 @@ import {
|
|
9
|
6
|
timerWorkerScript
|
|
10
|
7
|
} from './TimerWorker';
|
|
11
|
8
|
|
|
|
9
|
+const segmentationWidth = 256;
|
|
|
10
|
+const segmentationHeight = 144;
|
|
|
11
|
+const segmentationPixelCount = segmentationWidth * segmentationHeight;
|
|
|
12
|
+const blurValue = '25px';
|
|
|
13
|
+
|
|
12
|
14
|
/**
|
|
13
|
15
|
* Represents a modified MediaStream that adds blur to video background.
|
|
14
|
16
|
* <tt>JitsiStreamBlurEffect</tt> does the processing of the original
|
|
15
|
17
|
* video stream.
|
|
16
|
18
|
*/
|
|
17
|
19
|
export default class JitsiStreamBlurEffect {
|
|
18
|
|
- _bpModel: Object;
|
|
|
20
|
+ _model: Object;
|
|
19
|
21
|
_inputVideoElement: HTMLVideoElement;
|
|
20
|
|
- _inputVideoCanvasElement: HTMLCanvasElement;
|
|
21
|
22
|
_onMaskFrameTimer: Function;
|
|
22
|
23
|
_maskFrameTimerWorker: Worker;
|
|
23
|
|
- _maskInProgress: boolean;
|
|
24
|
24
|
_outputCanvasElement: HTMLCanvasElement;
|
|
|
25
|
+ _outputCanvasCtx: Object;
|
|
|
26
|
+ _segmentationMaskCtx: Object;
|
|
|
27
|
+ _segmentationMask: Object;
|
|
|
28
|
+ _segmentationMaskCanvas: Object;
|
|
25
|
29
|
_renderMask: Function;
|
|
26
|
|
- _segmentationData: Object;
|
|
27
|
30
|
isEnabled: Function;
|
|
28
|
31
|
startEffect: Function;
|
|
29
|
32
|
stopEffect: Function;
|
|
|
@@ -35,7 +38,7 @@ export default class JitsiStreamBlurEffect {
|
|
35
|
38
|
* @param {BodyPix} bpModel - BodyPix model.
|
|
36
|
39
|
*/
|
|
37
|
40
|
constructor(bpModel: Object) {
|
|
38
|
|
- this._bpModel = bpModel;
|
|
|
41
|
+ this._model = bpModel;
|
|
39
|
42
|
|
|
40
|
43
|
// Bind event handler so it is only bound once for every instance.
|
|
41
|
44
|
this._onMaskFrameTimer = this._onMaskFrameTimer.bind(this);
|
|
|
@@ -44,7 +47,6 @@ export default class JitsiStreamBlurEffect {
|
|
44
|
47
|
this._outputCanvasElement = document.createElement('canvas');
|
|
45
|
48
|
this._outputCanvasElement.getContext('2d');
|
|
46
|
49
|
this._inputVideoElement = document.createElement('video');
|
|
47
|
|
- this._inputVideoCanvasElement = document.createElement('canvas');
|
|
48
|
50
|
}
|
|
49
|
51
|
|
|
50
|
52
|
/**
|
|
|
@@ -61,61 +63,108 @@ export default class JitsiStreamBlurEffect {
|
|
61
|
63
|
}
|
|
62
|
64
|
|
|
63
|
65
|
/**
|
|
64
|
|
- * Loop function to render the background mask.
|
|
|
66
|
+ * Represents the run post processing.
|
|
65
|
67
|
*
|
|
66
|
|
- * @private
|
|
67
|
68
|
* @returns {void}
|
|
68
|
69
|
*/
|
|
69
|
|
- async _renderMask() {
|
|
70
|
|
- if (!this._maskInProgress) {
|
|
71
|
|
- this._maskInProgress = true;
|
|
72
|
|
- this._bpModel.segmentPerson(this._inputVideoElement, {
|
|
73
|
|
- internalResolution: 'low', // resized to 0.5 times of the original resolution before inference
|
|
74
|
|
- maxDetections: 1, // max. number of person poses to detect per image
|
|
75
|
|
- segmentationThreshold: 0.7, // represents probability that a pixel belongs to a person
|
|
76
|
|
- flipHorizontal: false,
|
|
77
|
|
- scoreThreshold: 0.2
|
|
78
|
|
- }).then(data => {
|
|
79
|
|
- this._segmentationData = data;
|
|
80
|
|
- this._maskInProgress = false;
|
|
81
|
|
- });
|
|
82
|
|
- }
|
|
83
|
|
- const inputCanvasCtx = this._inputVideoCanvasElement.getContext('2d');
|
|
84
|
|
-
|
|
85
|
|
- inputCanvasCtx.drawImage(this._inputVideoElement, 0, 0);
|
|
|
70
|
+ runPostProcessing() {
|
|
|
71
|
+ this._outputCanvasCtx.globalCompositeOperation = 'copy';
|
|
86
|
72
|
|
|
87
|
|
- const currentFrame = inputCanvasCtx.getImageData(
|
|
|
73
|
+ // Draw segmentation mask.
|
|
|
74
|
+ this._outputCanvasCtx.filter = `blur(${blurValue})`;
|
|
|
75
|
+ this._outputCanvasCtx.drawImage(
|
|
|
76
|
+ this._segmentationMaskCanvas,
|
|
|
77
|
+ 0,
|
|
88
|
78
|
0,
|
|
|
79
|
+ segmentationWidth,
|
|
|
80
|
+ segmentationHeight,
|
|
89
|
81
|
0,
|
|
90
|
|
- this._inputVideoCanvasElement.width,
|
|
91
|
|
- this._inputVideoCanvasElement.height
|
|
|
82
|
+ 0,
|
|
|
83
|
+ this._inputVideoElement.width,
|
|
|
84
|
+ this._inputVideoElement.height
|
|
92
|
85
|
);
|
|
93
|
86
|
|
|
94
|
|
- if (this._segmentationData) {
|
|
95
|
|
- const blurData = new ImageData(currentFrame.data.slice(), currentFrame.width, currentFrame.height);
|
|
96
|
|
-
|
|
97
|
|
- StackBlur.imageDataRGB(blurData, 0, 0, currentFrame.width, currentFrame.height, 12);
|
|
|
87
|
+ this._outputCanvasCtx.globalCompositeOperation = 'source-in';
|
|
|
88
|
+ this._outputCanvasCtx.filter = 'none';
|
|
|
89
|
+ this._outputCanvasCtx.drawImage(this._inputVideoElement, 0, 0);
|
|
98
|
90
|
|
|
99
|
|
- for (let x = 0; x < this._outputCanvasElement.width; x++) {
|
|
100
|
|
- for (let y = 0; y < this._outputCanvasElement.height; y++) {
|
|
101
|
|
- const n = (y * this._outputCanvasElement.width) + x;
|
|
|
91
|
+ this._outputCanvasCtx.globalCompositeOperation = 'destination-over';
|
|
|
92
|
+ this._outputCanvasCtx.filter = `blur(${blurValue})`; // FIXME Does not work on Safari.
|
|
|
93
|
+ this._outputCanvasCtx.drawImage(this._inputVideoElement, 0, 0);
|
|
|
94
|
+ }
|
|
102
|
95
|
|
|
103
|
|
- if (this._segmentationData.data[n] === 0) {
|
|
104
|
|
- currentFrame.data[n * 4] = blurData.data[n * 4];
|
|
105
|
|
- currentFrame.data[(n * 4) + 1] = blurData.data[(n * 4) + 1];
|
|
106
|
|
- currentFrame.data[(n * 4) + 2] = blurData.data[(n * 4) + 2];
|
|
107
|
|
- currentFrame.data[(n * 4) + 3] = blurData.data[(n * 4) + 3];
|
|
108
|
|
- }
|
|
109
|
|
- }
|
|
110
|
|
- }
|
|
|
96
|
+ /**
|
|
|
97
|
+ * Represents the run Tensorflow Interference.
|
|
|
98
|
+ *
|
|
|
99
|
+ * @returns {void}
|
|
|
100
|
+ */
|
|
|
101
|
+ runInference() {
|
|
|
102
|
+ this._model._runInference();
|
|
|
103
|
+ const outputMemoryOffset = this._model._getOutputMemoryOffset() / 4;
|
|
|
104
|
+
|
|
|
105
|
+ for (let i = 0; i < segmentationPixelCount; i++) {
|
|
|
106
|
+ const background = this._model.HEAPF32[outputMemoryOffset + (i * 2)];
|
|
|
107
|
+ const person = this._model.HEAPF32[outputMemoryOffset + (i * 2) + 1];
|
|
|
108
|
+ const shift = Math.max(background, person);
|
|
|
109
|
+ const backgroundExp = Math.exp(background - shift);
|
|
|
110
|
+ const personExp = Math.exp(person - shift);
|
|
|
111
|
+
|
|
|
112
|
+ // Sets only the alpha component of each pixel.
|
|
|
113
|
+ this._segmentationMask.data[(i * 4) + 3] = (255 * personExp) / (backgroundExp + personExp);
|
|
111
|
114
|
}
|
|
112
|
|
- this._outputCanvasElement.getContext('2d').putImageData(currentFrame, 0, 0);
|
|
|
115
|
+ this._segmentationMaskCtx.putImageData(this._segmentationMask, 0, 0);
|
|
|
116
|
+ }
|
|
|
117
|
+
|
|
|
118
|
+ /**
|
|
|
119
|
+ * Loop function to render the background mask.
|
|
|
120
|
+ *
|
|
|
121
|
+ * @private
|
|
|
122
|
+ * @returns {void}
|
|
|
123
|
+ */
|
|
|
124
|
+ _renderMask() {
|
|
|
125
|
+ this.resizeSource();
|
|
|
126
|
+ this.runInference();
|
|
|
127
|
+ this.runPostProcessing();
|
|
|
128
|
+
|
|
113
|
129
|
this._maskFrameTimerWorker.postMessage({
|
|
114
|
130
|
id: SET_TIMEOUT,
|
|
115
|
131
|
timeMs: 1000 / 30
|
|
116
|
132
|
});
|
|
117
|
133
|
}
|
|
118
|
134
|
|
|
|
135
|
+ /**
|
|
|
136
|
+ * Represents the resize source process.
|
|
|
137
|
+ *
|
|
|
138
|
+ * @returns {void}
|
|
|
139
|
+ */
|
|
|
140
|
+ resizeSource() {
|
|
|
141
|
+ this._segmentationMaskCtx.drawImage(
|
|
|
142
|
+ this._inputVideoElement,
|
|
|
143
|
+ 0,
|
|
|
144
|
+ 0,
|
|
|
145
|
+ this._inputVideoElement.width,
|
|
|
146
|
+ this._inputVideoElement.height,
|
|
|
147
|
+ 0,
|
|
|
148
|
+ 0,
|
|
|
149
|
+ segmentationWidth,
|
|
|
150
|
+ segmentationHeight
|
|
|
151
|
+ );
|
|
|
152
|
+
|
|
|
153
|
+ const imageData = this._segmentationMaskCtx.getImageData(
|
|
|
154
|
+ 0,
|
|
|
155
|
+ 0,
|
|
|
156
|
+ segmentationWidth,
|
|
|
157
|
+ segmentationHeight
|
|
|
158
|
+ );
|
|
|
159
|
+ const inputMemoryOffset = this._model._getInputMemoryOffset() / 4;
|
|
|
160
|
+
|
|
|
161
|
+ for (let i = 0; i < segmentationPixelCount; i++) {
|
|
|
162
|
+ this._model.HEAPF32[inputMemoryOffset + (i * 3)] = imageData.data[i * 4] / 255;
|
|
|
163
|
+ this._model.HEAPF32[inputMemoryOffset + (i * 3) + 1] = imageData.data[(i * 4) + 1] / 255;
|
|
|
164
|
+ this._model.HEAPF32[inputMemoryOffset + (i * 3) + 2] = imageData.data[(i * 4) + 2] / 255;
|
|
|
165
|
+ }
|
|
|
166
|
+ }
|
|
|
167
|
+
|
|
119
|
168
|
/**
|
|
120
|
169
|
* Checks if the local track supports this effect.
|
|
121
|
170
|
*
|
|
|
@@ -136,15 +185,18 @@ export default class JitsiStreamBlurEffect {
|
|
136
|
185
|
startEffect(stream: MediaStream) {
|
|
137
|
186
|
this._maskFrameTimerWorker = new Worker(timerWorkerScript, { name: 'Blur effect worker' });
|
|
138
|
187
|
this._maskFrameTimerWorker.onmessage = this._onMaskFrameTimer;
|
|
139
|
|
-
|
|
140
|
188
|
const firstVideoTrack = stream.getVideoTracks()[0];
|
|
141
|
189
|
const { height, frameRate, width }
|
|
142
|
190
|
= firstVideoTrack.getSettings ? firstVideoTrack.getSettings() : firstVideoTrack.getConstraints();
|
|
143
|
191
|
|
|
|
192
|
+ this._segmentationMask = new ImageData(segmentationWidth, segmentationHeight);
|
|
|
193
|
+ this._segmentationMaskCanvas = document.createElement('canvas');
|
|
|
194
|
+ this._segmentationMaskCanvas.width = segmentationWidth;
|
|
|
195
|
+ this._segmentationMaskCanvas.height = segmentationHeight;
|
|
|
196
|
+ this._segmentationMaskCtx = this._segmentationMaskCanvas.getContext('2d');
|
|
144
|
197
|
this._outputCanvasElement.width = parseInt(width, 10);
|
|
145
|
198
|
this._outputCanvasElement.height = parseInt(height, 10);
|
|
146
|
|
- this._inputVideoCanvasElement.width = parseInt(width, 10);
|
|
147
|
|
- this._inputVideoCanvasElement.height = parseInt(height, 10);
|
|
|
199
|
+ this._outputCanvasCtx = this._outputCanvasElement.getContext('2d');
|
|
148
|
200
|
this._inputVideoElement.width = parseInt(width, 10);
|
|
149
|
201
|
this._inputVideoElement.height = parseInt(height, 10);
|
|
150
|
202
|
this._inputVideoElement.autoplay = true;
|