您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

actions.js 8.8KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // @flow
  2. import { getLocalVideoTrack } from '../base/tracks';
  3. import 'image-capture';
  4. import './createImageBitmap';
  5. import {
  6. ADD_FACIAL_EXPRESSION,
  7. ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
  8. CLEAR_FACIAL_EXPRESSIONS_BUFFER,
  9. SET_DETECTION_TIME_INTERVAL,
  10. START_FACIAL_RECOGNITION,
  11. STOP_FACIAL_RECOGNITION
  12. } from './actionTypes';
  13. import {
  14. CLEAR_TIMEOUT,
  15. FACIAL_EXPRESSION_MESSAGE,
  16. INIT_WORKER,
  17. INTERVAL_MESSAGE,
  18. WEBHOOK_SEND_TIME_INTERVAL
  19. } from './constants';
  20. import { sendDataToWorker, sendFacialExpressionsWebhook } from './functions';
  21. import logger from './logger';
  22. /**
  23. * Object containing a image capture of the local track.
  24. */
  25. let imageCapture;
  26. /**
  27. * Object where the facial expression worker is stored.
  28. */
  29. let worker;
  30. /**
  31. * The last facial expression received from the worker.
  32. */
  33. let lastFacialExpression;
  34. /**
  35. * The last facial expression timestamp.
  36. */
  37. let lastFacialExpressionTimestamp;
  38. /**
  39. * How many duplicate consecutive expression occurred.
  40. * If a expression that is not the same as the last one it is reset to 0.
  41. */
  42. let duplicateConsecutiveExpressions = 0;
  43. /**
  44. * Variable that keeps the interval for sending expressions to webhook.
  45. */
  46. let sendInterval;
  47. /**
  48. * Loads the worker that predicts the facial expression.
  49. *
  50. * @returns {void}
  51. */
  52. export function loadWorker() {
  53. return function(dispatch: Function) {
  54. if (!window.Worker) {
  55. logger.warn('Browser does not support web workers');
  56. return;
  57. }
  58. let baseUrl = '';
  59. const app: Object = document.querySelector('script[src*="app.bundle.min.js"]');
  60. if (app) {
  61. const idx = app.src.lastIndexOf('/');
  62. baseUrl = `${app.src.substring(0, idx)}/`;
  63. }
  64. let workerUrl = `${baseUrl}facial-expressions-worker.min.js`;
  65. const workerBlob = new Blob([ `importScripts("${workerUrl}");` ], { type: 'application/javascript' });
  66. workerUrl = window.URL.createObjectURL(workerBlob);
  67. worker = new Worker(workerUrl, { name: 'Facial Expression Worker' });
  68. worker.onmessage = function(e: Object) {
  69. const { type, value } = e.data;
  70. // receives a message indicating what type of backend tfjs decided to use.
  71. // it is received after as a response to the first message sent to the worker.
  72. if (type === INTERVAL_MESSAGE) {
  73. value && dispatch(setDetectionTimeInterval(value));
  74. }
  75. // receives a message with the predicted facial expression.
  76. if (type === FACIAL_EXPRESSION_MESSAGE) {
  77. sendDataToWorker(worker, imageCapture);
  78. if (!value) {
  79. return;
  80. }
  81. if (value === lastFacialExpression) {
  82. duplicateConsecutiveExpressions++;
  83. } else {
  84. if (lastFacialExpression && lastFacialExpressionTimestamp) {
  85. dispatch(
  86. addFacialExpression(
  87. lastFacialExpression,
  88. duplicateConsecutiveExpressions + 1,
  89. lastFacialExpressionTimestamp
  90. )
  91. );
  92. }
  93. lastFacialExpression = value;
  94. lastFacialExpressionTimestamp = Date.now();
  95. duplicateConsecutiveExpressions = 0;
  96. }
  97. }
  98. };
  99. worker.postMessage({
  100. type: INIT_WORKER,
  101. url: baseUrl,
  102. windowScreenSize: window.screen ? {
  103. width: window.screen.width,
  104. height: window.screen.height
  105. } : undefined
  106. });
  107. dispatch(startFacialRecognition());
  108. };
  109. }
  110. /**
  111. * Starts the recognition and detection of face expressions.
  112. *
  113. * @param {Object} stream - Video stream.
  114. * @returns {Function}
  115. */
  116. export function startFacialRecognition() {
  117. return async function(dispatch: Function, getState: Function) {
  118. if (worker === undefined || worker === null) {
  119. return;
  120. }
  121. const state = getState();
  122. const { recognitionActive } = state['features/facial-recognition'];
  123. if (recognitionActive) {
  124. return;
  125. }
  126. const localVideoTrack = getLocalVideoTrack(state['features/base/tracks']);
  127. if (localVideoTrack === undefined) {
  128. return;
  129. }
  130. const stream = localVideoTrack.jitsiTrack.getOriginalStream();
  131. if (stream === null) {
  132. return;
  133. }
  134. dispatch({ type: START_FACIAL_RECOGNITION });
  135. logger.log('Start face recognition');
  136. const firstVideoTrack = stream.getVideoTracks()[0];
  137. // $FlowFixMe
  138. imageCapture = new ImageCapture(firstVideoTrack);
  139. sendDataToWorker(worker, imageCapture);
  140. sendInterval = setInterval(async () => {
  141. const result = await sendFacialExpressionsWebhook(getState());
  142. if (result) {
  143. dispatch(clearFacialExpressionBuffer());
  144. }
  145. }
  146. , WEBHOOK_SEND_TIME_INTERVAL);
  147. };
  148. }
  149. /**
  150. * Stops the recognition and detection of face expressions.
  151. *
  152. * @returns {void}
  153. */
  154. export function stopFacialRecognition() {
  155. return function(dispatch: Function, getState: Function) {
  156. const state = getState();
  157. const { recognitionActive } = state['features/facial-recognition'];
  158. if (!recognitionActive) {
  159. imageCapture = null;
  160. return;
  161. }
  162. imageCapture = null;
  163. worker.postMessage({
  164. type: CLEAR_TIMEOUT
  165. });
  166. if (lastFacialExpression && lastFacialExpressionTimestamp) {
  167. dispatch(
  168. addFacialExpression(
  169. lastFacialExpression,
  170. duplicateConsecutiveExpressions + 1,
  171. lastFacialExpressionTimestamp
  172. )
  173. );
  174. }
  175. duplicateConsecutiveExpressions = 0;
  176. if (sendInterval) {
  177. clearInterval(sendInterval);
  178. sendInterval = null;
  179. }
  180. dispatch({ type: STOP_FACIAL_RECOGNITION });
  181. logger.log('Stop face recognition');
  182. };
  183. }
  184. /**
  185. * Resets the track in the image capture.
  186. *
  187. * @returns {void}
  188. */
  189. export function resetTrack() {
  190. return function(dispatch: Function, getState: Function) {
  191. const state = getState();
  192. const { jitsiTrack: localVideoTrack } = getLocalVideoTrack(state['features/base/tracks']);
  193. const stream = localVideoTrack.getOriginalStream();
  194. const firstVideoTrack = stream.getVideoTracks()[0];
  195. // $FlowFixMe
  196. imageCapture = new ImageCapture(firstVideoTrack);
  197. };
  198. }
  199. /**
  200. * Changes the track from the image capture with a given one.
  201. *
  202. * @param {Object} track - The track that will be in the new image capture.
  203. * @returns {void}
  204. */
  205. export function changeTrack(track: Object) {
  206. const { jitsiTrack } = track;
  207. const stream = jitsiTrack.getOriginalStream();
  208. const firstVideoTrack = stream.getVideoTracks()[0];
  209. // $FlowFixMe
  210. imageCapture = new ImageCapture(firstVideoTrack);
  211. }
  212. /**
  213. * Adds a new facial expression and its duration.
  214. *
  215. * @param {string} facialExpression - Facial expression to be added.
  216. * @param {number} duration - Duration in seconds of the facial expression.
  217. * @param {number} timestamp - Duration in seconds of the facial expression.
  218. * @returns {Object}
  219. */
  220. function addFacialExpression(facialExpression: string, duration: number, timestamp: number) {
  221. return function(dispatch: Function, getState: Function) {
  222. const { detectionTimeInterval } = getState()['features/facial-recognition'];
  223. let finalDuration = duration;
  224. if (detectionTimeInterval !== -1) {
  225. finalDuration *= detectionTimeInterval / 1000;
  226. }
  227. dispatch({
  228. type: ADD_FACIAL_EXPRESSION,
  229. facialExpression,
  230. duration: finalDuration,
  231. timestamp
  232. });
  233. };
  234. }
  235. /**
  236. * Sets the time interval for the detection worker post message.
  237. *
  238. * @param {number} time - The time interval.
  239. * @returns {Object}
  240. */
  241. function setDetectionTimeInterval(time: number) {
  242. return {
  243. type: SET_DETECTION_TIME_INTERVAL,
  244. time
  245. };
  246. }
  247. /**
  248. * Adds a facial expression with its timestamp to the facial expression buffer.
  249. *
  250. * @param {Object} facialExpression - Object containing facial expression string and its timestamp.
  251. * @returns {Object}
  252. */
  253. export function addToFacialExpressionsBuffer(facialExpression: Object) {
  254. return {
  255. type: ADD_TO_FACIAL_EXPRESSIONS_BUFFER,
  256. facialExpression
  257. };
  258. }
  259. /**
  260. * Clears the facial expressions array in the state.
  261. *
  262. * @returns {Object}
  263. */
  264. function clearFacialExpressionBuffer() {
  265. return {
  266. type: CLEAR_FACIAL_EXPRESSIONS_BUFFER
  267. };
  268. }