You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. // @flow
  2. import { getLocalVideoTrack } from '../base/tracks';
  3. import 'image-capture';
  4. import './createImageBitmap';
  5. import {
  6. ADD_FACIAL_EXPRESSION,
  7. ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
  8. CLEAR_FACIAL_EXPRESSIONS_BUFFER,
  9. SET_DETECTION_TIME_INTERVAL,
  10. START_FACIAL_RECOGNITION,
  11. STOP_FACIAL_RECOGNITION
  12. } from './actionTypes';
  13. import {
  14. CPU_TIME_INTERVAL,
  15. WEBGL_TIME_INTERVAL,
  16. WEBHOOK_SEND_TIME_INTERVAL
  17. } from './constants';
  18. import { sendDataToWorker, sendFacialExpressionsWebhook } from './functions';
  19. import logger from './logger';
  20. /**
  21. * Object containing a image capture of the local track.
  22. */
  23. let imageCapture;
  24. /**
  25. * Object where the facial expression worker is stored.
  26. */
  27. let worker;
  28. /**
  29. * The last facial expression received from the worker.
  30. */
  31. let lastFacialExpression;
  32. /**
  33. * The last facial expression timestamp.
  34. */
  35. let lastFacialExpressionTimestamp;
  36. /**
  37. * How many duplicate consecutive expression occurred.
  38. * If a expression that is not the same as the last one it is reset to 0.
  39. */
  40. let duplicateConsecutiveExpressions = 0;
  41. /**
  42. * Variable that keeps the interval for sending expressions to webhook.
  43. */
  44. let sendInterval;
  45. /**
  46. * Loads the worker that predicts the facial expression.
  47. *
  48. * @returns {void}
  49. */
  50. export function loadWorker() {
  51. return function(dispatch: Function) {
  52. if (!window.Worker) {
  53. logger.warn('Browser does not support web workers');
  54. return;
  55. }
  56. let baseUrl = '';
  57. const app: Object = document.querySelector('script[src*="app.bundle.min.js"]');
  58. if (app) {
  59. const idx = app.src.lastIndexOf('/');
  60. baseUrl = `${app.src.substring(0, idx)}/`;
  61. }
  62. let workerUrl = `${baseUrl}facial-expressions-worker.min.js`;
  63. const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
  64. workerUrl = window.URL.createObjectURL(workerBlob);
  65. worker = new Worker(workerUrl, { name: 'Facial Expression Worker' });
  66. worker.onmessage = function(e: Object) {
  67. const { type, value } = e.data;
  68. // receives a message indicating what type of backend tfjs decided to use.
  69. // it is received after as a response to the first message sent to the worker.
  70. if (type === 'tf-backend' && value) {
  71. let detectionTimeInterval = -1;
  72. if (value === 'webgl') {
  73. detectionTimeInterval = WEBGL_TIME_INTERVAL;
  74. } else if (value === 'cpu') {
  75. detectionTimeInterval = CPU_TIME_INTERVAL;
  76. }
  77. dispatch(setDetectionTimeInterval(detectionTimeInterval));
  78. }
  79. // receives a message with the predicted facial expression.
  80. if (type === 'facial-expression') {
  81. sendDataToWorker(worker, imageCapture);
  82. if (!value) {
  83. return;
  84. }
  85. if (value === lastFacialExpression) {
  86. duplicateConsecutiveExpressions++;
  87. } else {
  88. if (lastFacialExpression && lastFacialExpressionTimestamp) {
  89. dispatch(
  90. addFacialExpression(
  91. lastFacialExpression,
  92. duplicateConsecutiveExpressions + 1,
  93. lastFacialExpressionTimestamp
  94. )
  95. );
  96. }
  97. lastFacialExpression = value;
  98. lastFacialExpressionTimestamp = Date.now();
  99. duplicateConsecutiveExpressions = 0;
  100. }
  101. }
  102. };
  103. worker.postMessage({
  104. id: 'SET_MODELS_URL',
  105. url: baseUrl
  106. });
  107. dispatch(startFacialRecognition());
  108. };
  109. }
  110. /**
  111. * Starts the recognition and detection of face expressions.
  112. *
  113. * @param {Object} stream - Video stream.
  114. * @returns {Function}
  115. */
  116. export function startFacialRecognition() {
  117. return async function(dispatch: Function, getState: Function) {
  118. if (worker === undefined || worker === null) {
  119. return;
  120. }
  121. const state = getState();
  122. const { recognitionActive } = state['features/facial-recognition'];
  123. if (recognitionActive) {
  124. return;
  125. }
  126. const localVideoTrack = getLocalVideoTrack(state['features/base/tracks']);
  127. if (localVideoTrack === undefined) {
  128. return;
  129. }
  130. const stream = localVideoTrack.jitsiTrack.getOriginalStream();
  131. if (stream === null) {
  132. return;
  133. }
  134. dispatch({ type: START_FACIAL_RECOGNITION });
  135. logger.log('Start face recognition');
  136. const firstVideoTrack = stream.getVideoTracks()[0];
  137. // $FlowFixMe
  138. imageCapture = new ImageCapture(firstVideoTrack);
  139. sendDataToWorker(worker, imageCapture);
  140. sendInterval = setInterval(async () => {
  141. const result = await sendFacialExpressionsWebhook(getState());
  142. if (result) {
  143. dispatch(clearFacialExpressionBuffer());
  144. }
  145. }
  146. , WEBHOOK_SEND_TIME_INTERVAL);
  147. };
  148. }
  149. /**
  150. * Stops the recognition and detection of face expressions.
  151. *
  152. * @returns {void}
  153. */
  154. export function stopFacialRecognition() {
  155. return function(dispatch: Function, getState: Function) {
  156. const state = getState();
  157. const { recognitionActive } = state['features/facial-recognition'];
  158. if (!recognitionActive) {
  159. imageCapture = null;
  160. return;
  161. }
  162. imageCapture = null;
  163. worker.postMessage({
  164. id: 'CLEAR_TIMEOUT'
  165. });
  166. if (lastFacialExpression && lastFacialExpressionTimestamp) {
  167. dispatch(
  168. addFacialExpression(
  169. lastFacialExpression,
  170. duplicateConsecutiveExpressions + 1,
  171. lastFacialExpressionTimestamp
  172. )
  173. );
  174. }
  175. duplicateConsecutiveExpressions = 0;
  176. if (sendInterval) {
  177. clearInterval(sendInterval);
  178. sendInterval = null;
  179. }
  180. dispatch({ type: STOP_FACIAL_RECOGNITION });
  181. logger.log('Stop face recognition');
  182. };
  183. }
  184. /**
  185. * Resets the track in the image capture.
  186. *
  187. * @returns {void}
  188. */
  189. export function resetTrack() {
  190. return function(dispatch: Function, getState: Function) {
  191. const state = getState();
  192. const { jitsiTrack: localVideoTrack } = getLocalVideoTrack(state['features/base/tracks']);
  193. const stream = localVideoTrack.getOriginalStream();
  194. const firstVideoTrack = stream.getVideoTracks()[0];
  195. // $FlowFixMe
  196. imageCapture = new ImageCapture(firstVideoTrack);
  197. };
  198. }
  199. /**
  200. * Changes the track from the image capture with a given one.
  201. *
  202. * @param {Object} track - The track that will be in the new image capture.
  203. * @returns {void}
  204. */
  205. export function changeTrack(track: Object) {
  206. const { jitsiTrack } = track;
  207. const stream = jitsiTrack.getOriginalStream();
  208. const firstVideoTrack = stream.getVideoTracks()[0];
  209. // $FlowFixMe
  210. imageCapture = new ImageCapture(firstVideoTrack);
  211. }
  212. /**
  213. * Adds a new facial expression and its duration.
  214. *
  215. * @param {string} facialExpression - Facial expression to be added.
  216. * @param {number} duration - Duration in seconds of the facial expression.
  217. * @param {number} timestamp - Duration in seconds of the facial expression.
  218. * @returns {Object}
  219. */
  220. function addFacialExpression(facialExpression: string, duration: number, timestamp: number) {
  221. return function(dispatch: Function, getState: Function) {
  222. const { detectionTimeInterval } = getState()['features/facial-recognition'];
  223. let finalDuration = duration;
  224. if (detectionTimeInterval !== -1) {
  225. finalDuration *= detectionTimeInterval / 1000;
  226. }
  227. dispatch({
  228. type: ADD_FACIAL_EXPRESSION,
  229. facialExpression,
  230. duration: finalDuration,
  231. timestamp
  232. });
  233. };
  234. }
  235. /**
  236. * Sets the time interval for the detection worker post message.
  237. *
  238. * @param {number} time - The time interval.
  239. * @returns {Object}
  240. */
  241. function setDetectionTimeInterval(time: number) {
  242. return {
  243. type: SET_DETECTION_TIME_INTERVAL,
  244. time
  245. };
  246. }
  247. /**
  248. * Adds a facial expression with its timestamp to the facial expression buffer.
  249. *
  250. * @param {Object} facialExpression - Object containing facial expression string and its timestamp.
  251. * @returns {Object}
  252. */
  253. export function addToFacialExpressionsBuffer(facialExpression: Object) {
  254. return {
  255. type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
  256. facialExpression
  257. };
  258. }
  259. /**
  260. * Clears the facial expressions array in the state.
  261. *
  262. * @returns {Object}
  263. */
  264. function clearFacialExpressionBuffer() {
  265. return {
  266. type: CLEAR_FACIAL_EXPRESSIONS_BUFFER
  267. };
  268. }