You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

actions.js 8.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. // @flow
  2. import 'image-capture';
  3. import './createImageBitmap';
  4. import { getCurrentConference } from '../base/conference';
  5. import { getLocalParticipant, getParticipantCount } from '../base/participants';
  6. import { getLocalVideoTrack } from '../base/tracks';
  7. import { getBaseUrl } from '../base/util';
  8. import {
  9. ADD_FACE_EXPRESSION,
  10. ADD_TO_FACE_EXPRESSIONS_BUFFER,
  11. CLEAR_FACE_EXPRESSIONS_BUFFER,
  12. START_FACE_LANDMARKS_DETECTION,
  13. STOP_FACE_LANDMARKS_DETECTION,
  14. UPDATE_FACE_COORDINATES
  15. } from './actionTypes';
  16. import {
  17. DETECTION_TYPES,
  18. INIT_WORKER,
  19. WEBHOOK_SEND_TIME_INTERVAL
  20. } from './constants';
  21. import {
  22. getDetectionInterval,
  23. sendDataToWorker,
  24. sendFaceBoxToParticipants,
  25. sendFaceExpressionsWebhook
  26. } from './functions';
  27. import logger from './logger';
  28. declare var APP: Object;
  29. /**
  30. * Object containing a image capture of the local track.
  31. */
  32. let imageCapture;
  33. /**
  34. * Object where the face landmarks worker is stored.
  35. */
  36. let worker;
  37. /**
  38. * The last face expression received from the worker.
  39. */
  40. let lastFaceExpression;
  41. /**
  42. * The last face expression timestamp.
  43. */
  44. let lastFaceExpressionTimestamp;
  45. /**
  46. * How many duplicate consecutive expression occurred.
  47. * If a expression that is not the same as the last one it is reset to 0.
  48. */
  49. let duplicateConsecutiveExpressions = 0;
  50. /**
  51. * Variable that keeps the interval for sending expressions to webhook.
  52. */
  53. let webhookSendInterval;
  54. /**
  55. * Variable that keeps the interval for detecting faces in a frame.
  56. */
  57. let detectionInterval;
  58. /**
  59. * Loads the worker that detects the face landmarks.
  60. *
  61. * @returns {void}
  62. */
  63. export function loadWorker() {
  64. return function(dispatch: Function, getState: Function) {
  65. if (worker) {
  66. logger.info('Worker has already been initialized');
  67. return;
  68. }
  69. if (navigator.product === 'ReactNative') {
  70. logger.warn('Unsupported environment for face recognition');
  71. return;
  72. }
  73. const baseUrl = `${getBaseUrl()}libs/`;
  74. let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
  75. const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
  76. workerUrl = window.URL.createObjectURL(workerBlob);
  77. worker = new Worker(workerUrl, { name: 'Face Recognition Worker' });
  78. worker.onmessage = function(e: Object) {
  79. const { faceExpression, faceBox } = e.data;
  80. if (faceExpression) {
  81. if (faceExpression === lastFaceExpression) {
  82. duplicateConsecutiveExpressions++;
  83. } else {
  84. if (lastFaceExpression && lastFaceExpressionTimestamp) {
  85. dispatch(addFaceExpression(
  86. lastFaceExpression,
  87. duplicateConsecutiveExpressions + 1,
  88. lastFaceExpressionTimestamp
  89. ));
  90. }
  91. lastFaceExpression = faceExpression;
  92. lastFaceExpressionTimestamp = Date.now();
  93. duplicateConsecutiveExpressions = 0;
  94. }
  95. }
  96. if (faceBox) {
  97. const state = getState();
  98. const conference = getCurrentConference(state);
  99. const localParticipant = getLocalParticipant(state);
  100. if (getParticipantCount(state) > 1) {
  101. sendFaceBoxToParticipants(conference, faceBox);
  102. }
  103. dispatch({
  104. type: UPDATE_FACE_COORDINATES,
  105. faceBox,
  106. id: localParticipant.id
  107. });
  108. }
  109. APP.API.notifyFaceLandmarkDetected(faceBox, faceExpression);
  110. };
  111. const { faceLandmarks } = getState()['features/base/config'];
  112. const detectionTypes = [
  113. faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
  114. faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
  115. ].filter(Boolean);
  116. worker.postMessage({
  117. type: INIT_WORKER,
  118. baseUrl,
  119. detectionTypes,
  120. maxFacesDetected: faceLandmarks?.maxFacesDetected
  121. });
  122. dispatch(startFaceLandmarksDetection());
  123. };
  124. }
  125. /**
  126. * Starts the recognition and detection of face expressions.
  127. *
  128. * @param {Track | undefined} track - Track for which to start detecting faces.
  129. * @returns {Function}
  130. */
  131. export function startFaceLandmarksDetection(track) {
  132. return async function(dispatch: Function, getState: Function) {
  133. if (!worker) {
  134. return;
  135. }
  136. const state = getState();
  137. const { recognitionActive } = state['features/face-landmarks'];
  138. if (recognitionActive) {
  139. logger.log('Face recognition already active.');
  140. return;
  141. }
  142. const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
  143. if (localVideoTrack === undefined) {
  144. logger.warn('Face landmarks detection is disabled due to missing local track.');
  145. return;
  146. }
  147. const stream = localVideoTrack.jitsiTrack.getOriginalStream();
  148. dispatch({ type: START_FACE_LANDMARKS_DETECTION });
  149. logger.log('Start face recognition');
  150. const firstVideoTrack = stream.getVideoTracks()[0];
  151. const { faceLandmarks } = state['features/base/config'];
  152. imageCapture = new ImageCapture(firstVideoTrack);
  153. detectionInterval = setInterval(() => {
  154. sendDataToWorker(
  155. worker,
  156. imageCapture,
  157. faceLandmarks?.faceCenteringThreshold
  158. );
  159. }, getDetectionInterval(state));
  160. if (faceLandmarks?.enableFaceExpressionsDetection) {
  161. webhookSendInterval = setInterval(async () => {
  162. const result = await sendFaceExpressionsWebhook(getState());
  163. if (result) {
  164. dispatch(clearFaceExpressionBuffer());
  165. }
  166. }, WEBHOOK_SEND_TIME_INTERVAL);
  167. }
  168. };
  169. }
  170. /**
  171. * Stops the recognition and detection of face expressions.
  172. *
  173. * @returns {void}
  174. */
  175. export function stopFaceLandmarksDetection() {
  176. return function(dispatch: Function) {
  177. if (lastFaceExpression && lastFaceExpressionTimestamp) {
  178. dispatch(
  179. addFaceExpression(
  180. lastFaceExpression,
  181. duplicateConsecutiveExpressions + 1,
  182. lastFaceExpressionTimestamp
  183. )
  184. );
  185. }
  186. clearInterval(webhookSendInterval);
  187. clearInterval(detectionInterval);
  188. duplicateConsecutiveExpressions = 0;
  189. webhookSendInterval = null;
  190. detectionInterval = null;
  191. imageCapture = null;
  192. dispatch({ type: STOP_FACE_LANDMARKS_DETECTION });
  193. logger.log('Stop face recognition');
  194. };
  195. }
  196. /**
  197. * Adds a new face expression and its duration.
  198. *
  199. * @param {string} faceExpression - Face expression to be added.
  200. * @param {number} duration - Duration in seconds of the face expression.
  201. * @param {number} timestamp - Duration in seconds of the face expression.
  202. * @returns {Object}
  203. */
  204. function addFaceExpression(faceExpression: string, duration: number, timestamp: number) {
  205. return function(dispatch: Function, getState: Function) {
  206. const finalDuration = duration * getDetectionInterval(getState()) / 1000;
  207. dispatch({
  208. type: ADD_FACE_EXPRESSION,
  209. faceExpression,
  210. duration: finalDuration,
  211. timestamp
  212. });
  213. };
  214. }
  215. /**
  216. * Adds a face expression with its timestamp to the face expression buffer.
  217. *
  218. * @param {Object} faceExpression - Object containing face expression string and its timestamp.
  219. * @returns {Object}
  220. */
  221. export function addToFaceExpressionsBuffer(faceExpression: Object) {
  222. return {
  223. type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
  224. faceExpression
  225. };
  226. }
  227. /**
  228. * Clears the face expressions array in the state.
  229. *
  230. * @returns {Object}
  231. */
  232. function clearFaceExpressionBuffer() {
  233. return {
  234. type: CLEAR_FACE_EXPRESSIONS_BUFFER
  235. };
  236. }