Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

actions.js 8.0KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. // @flow
  2. import 'image-capture';
  3. import './createImageBitmap';
  4. import { getCurrentConference } from '../base/conference';
  5. import { getLocalParticipant, getParticipantCount } from '../base/participants';
  6. import { getLocalVideoTrack } from '../base/tracks';
  7. import { getBaseUrl } from '../base/util';
  8. import {
  9. ADD_FACE_EXPRESSION,
  10. ADD_TO_FACE_EXPRESSIONS_BUFFER,
  11. CLEAR_FACE_EXPRESSIONS_BUFFER,
  12. START_FACE_LANDMARKS_DETECTION,
  13. STOP_FACE_LANDMARKS_DETECTION,
  14. UPDATE_FACE_COORDINATES
  15. } from './actionTypes';
  16. import {
  17. DETECTION_TYPES,
  18. INIT_WORKER,
  19. WEBHOOK_SEND_TIME_INTERVAL
  20. } from './constants';
  21. import {
  22. getDetectionInterval,
  23. sendDataToWorker,
  24. sendFaceBoxToParticipants,
  25. sendFaceExpressionsWebhook
  26. } from './functions';
  27. import logger from './logger';
  28. declare var APP: Object;
  29. /**
  30. * Object containing a image capture of the local track.
  31. */
  32. let imageCapture;
  33. /**
  34. * Object where the face landmarks worker is stored.
  35. */
  36. let worker;
  37. /**
  38. * The last face expression received from the worker.
  39. */
  40. let lastFaceExpression;
  41. /**
  42. * The last face expression timestamp.
  43. */
  44. let lastFaceExpressionTimestamp;
  45. /**
  46. * How many duplicate consecutive expression occurred.
  47. * If a expression that is not the same as the last one it is reset to 0.
  48. */
  49. let duplicateConsecutiveExpressions = 0;
  50. /**
  51. * Variable that keeps the interval for sending expressions to webhook.
  52. */
  53. let webhookSendInterval;
  54. /**
  55. * Variable that keeps the interval for detecting faces in a frame.
  56. */
  57. let detectionInterval;
  58. /**
  59. * Loads the worker that detects the face landmarks.
  60. *
  61. * @returns {void}
  62. */
  63. export function loadWorker() {
  64. return function(dispatch: Function, getState: Function) {
  65. if (worker) {
  66. logger.info('Worker has already been initialized');
  67. return;
  68. }
  69. if (navigator.product === 'ReactNative') {
  70. logger.warn('Unsupported environment for face recognition');
  71. return;
  72. }
  73. const baseUrl = `${getBaseUrl()}libs/`;
  74. let workerUrl = `${baseUrl}face-landmarks-worker.min.js`;
  75. const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
  76. workerUrl = window.URL.createObjectURL(workerBlob);
  77. worker = new Worker(workerUrl, { name: 'Face Recognition Worker' });
  78. worker.onmessage = function(e: Object) {
  79. const { faceExpression, faceBox } = e.data;
  80. if (faceExpression) {
  81. if (faceExpression === lastFaceExpression) {
  82. duplicateConsecutiveExpressions++;
  83. } else {
  84. if (lastFaceExpression && lastFaceExpressionTimestamp) {
  85. dispatch(addFaceExpression(
  86. lastFaceExpression,
  87. duplicateConsecutiveExpressions + 1,
  88. lastFaceExpressionTimestamp
  89. ));
  90. }
  91. lastFaceExpression = faceExpression;
  92. lastFaceExpressionTimestamp = Date.now();
  93. duplicateConsecutiveExpressions = 0;
  94. }
  95. }
  96. if (faceBox) {
  97. const state = getState();
  98. const conference = getCurrentConference(state);
  99. const localParticipant = getLocalParticipant(state);
  100. if (getParticipantCount(state) > 1) {
  101. sendFaceBoxToParticipants(conference, faceBox);
  102. }
  103. dispatch({
  104. type: UPDATE_FACE_COORDINATES,
  105. faceBox,
  106. id: localParticipant.id
  107. });
  108. }
  109. APP.API.notifyFaceLandmarkDetected(faceBox, faceExpression);
  110. };
  111. const { faceLandmarks } = getState()['features/base/config'];
  112. const detectionTypes = [
  113. faceLandmarks?.enableFaceCentering && DETECTION_TYPES.FACE_BOX,
  114. faceLandmarks?.enableFaceExpressionsDetection && DETECTION_TYPES.FACE_EXPRESSIONS
  115. ].filter(Boolean);
  116. worker.postMessage({
  117. type: INIT_WORKER,
  118. baseUrl,
  119. detectionTypes
  120. });
  121. dispatch(startFaceLandmarksDetection());
  122. };
  123. }
  124. /**
  125. * Starts the recognition and detection of face expressions.
  126. *
  127. * @param {Track | undefined} track - Track for which to start detecting faces.
  128. * @returns {Function}
  129. */
  130. export function startFaceLandmarksDetection(track) {
  131. return async function(dispatch: Function, getState: Function) {
  132. if (!worker) {
  133. return;
  134. }
  135. const state = getState();
  136. const { recognitionActive } = state['features/face-landmarks'];
  137. if (recognitionActive) {
  138. logger.log('Face recognition already active.');
  139. return;
  140. }
  141. const localVideoTrack = track || getLocalVideoTrack(state['features/base/tracks']);
  142. if (localVideoTrack === undefined) {
  143. logger.warn('Face landmarks detection is disabled due to missing local track.');
  144. return;
  145. }
  146. const stream = localVideoTrack.jitsiTrack.getOriginalStream();
  147. dispatch({ type: START_FACE_LANDMARKS_DETECTION });
  148. logger.log('Start face recognition');
  149. const firstVideoTrack = stream.getVideoTracks()[0];
  150. const { faceLandmarks } = state['features/base/config'];
  151. imageCapture = new ImageCapture(firstVideoTrack);
  152. detectionInterval = setInterval(() => {
  153. sendDataToWorker(
  154. worker,
  155. imageCapture,
  156. faceLandmarks?.faceCenteringThreshold
  157. );
  158. }, getDetectionInterval(state));
  159. if (faceLandmarks?.enableFaceExpressionsDetection) {
  160. webhookSendInterval = setInterval(async () => {
  161. const result = await sendFaceExpressionsWebhook(getState());
  162. if (result) {
  163. dispatch(clearFaceExpressionBuffer());
  164. }
  165. }, WEBHOOK_SEND_TIME_INTERVAL);
  166. }
  167. };
  168. }
  169. /**
  170. * Stops the recognition and detection of face expressions.
  171. *
  172. * @returns {void}
  173. */
  174. export function stopFaceLandmarksDetection() {
  175. return function(dispatch: Function) {
  176. if (lastFaceExpression && lastFaceExpressionTimestamp) {
  177. dispatch(
  178. addFaceExpression(
  179. lastFaceExpression,
  180. duplicateConsecutiveExpressions + 1,
  181. lastFaceExpressionTimestamp
  182. )
  183. );
  184. }
  185. clearInterval(webhookSendInterval);
  186. clearInterval(detectionInterval);
  187. duplicateConsecutiveExpressions = 0;
  188. webhookSendInterval = null;
  189. detectionInterval = null;
  190. imageCapture = null;
  191. dispatch({ type: STOP_FACE_LANDMARKS_DETECTION });
  192. logger.log('Stop face recognition');
  193. };
  194. }
  195. /**
  196. * Adds a new face expression and its duration.
  197. *
  198. * @param {string} faceExpression - Face expression to be added.
  199. * @param {number} duration - Duration in seconds of the face expression.
  200. * @param {number} timestamp - Duration in seconds of the face expression.
  201. * @returns {Object}
  202. */
  203. function addFaceExpression(faceExpression: string, duration: number, timestamp: number) {
  204. return function(dispatch: Function, getState: Function) {
  205. const finalDuration = duration * getDetectionInterval(getState()) / 1000;
  206. dispatch({
  207. type: ADD_FACE_EXPRESSION,
  208. faceExpression,
  209. duration: finalDuration,
  210. timestamp
  211. });
  212. };
  213. }
  214. /**
  215. * Adds a face expression with its timestamp to the face expression buffer.
  216. *
  217. * @param {Object} faceExpression - Object containing face expression string and its timestamp.
  218. * @returns {Object}
  219. */
  220. export function addToFaceExpressionsBuffer(faceExpression: Object) {
  221. return {
  222. type: ADD_TO_FACE_EXPRESSIONS_BUFFER,
  223. faceExpression
  224. };
  225. }
  226. /**
  227. * Clears the face expressions array in the state.
  228. *
  229. * @returns {Object}
  230. */
  231. function clearFaceExpressionBuffer() {
  232. return {
  233. type: CLEAR_FACE_EXPRESSIONS_BUFFER
  234. };
  235. }