Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

actions.js 6.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. // @flow
  2. import { getLocalVideoTrack } from '../base/tracks';
  3. import 'image-capture';
  4. import './createImageBitmap';
  5. import {
  6. ADD_FACIAL_EXPRESSION,
  7. SET_DETECTION_TIME_INTERVAL,
  8. START_FACIAL_RECOGNITION,
  9. STOP_FACIAL_RECOGNITION
  10. } from './actionTypes';
  11. import { sendDataToWorker } from './functions';
  12. import logger from './logger';
  13. /**
  14. * Time used for detection interval when facial expressions worker uses webgl backend.
  15. */
  16. const WEBGL_TIME_INTERVAL = 1000;
  17. /**
  18. * Time used for detection interval when facial expression worker uses cpu backend.
  19. */
  20. const CPU_TIME_INTERVAL = 6000;
  21. /**
  22. * Object containing a image capture of the local track.
  23. */
  24. let imageCapture;
  25. /**
  26. * Object where the facial expression worker is stored.
  27. */
  28. let worker;
  29. /**
  30. * The last facial expression received from the worker.
  31. */
  32. let lastFacialExpression;
  33. /**
  34. * How many duplicate consecutive expression occurred.
  35. * If a expression that is not the same as the last one it is reset to 0.
  36. */
  37. let duplicateConsecutiveExpressions = 0;
  38. /**
  39. * Loads the worker that predicts the facial expression.
  40. *
  41. * @returns {void}
  42. */
  43. export function loadWorker() {
  44. return function(dispatch: Function) {
  45. if (!window.Worker) {
  46. logger.warn('Browser does not support web workers');
  47. return;
  48. }
  49. worker = new Worker('libs/facial-expressions-worker.min.js', { name: 'Facial Expression Worker' });
  50. worker.onmessage = function(e: Object) {
  51. const { type, value } = e.data;
  52. // receives a message indicating what type of backend tfjs decided to use.
  53. // it is received after as a response to the first message sent to the worker.
  54. if (type === 'tf-backend' && value) {
  55. let detectionTimeInterval = -1;
  56. if (value === 'webgl') {
  57. detectionTimeInterval = WEBGL_TIME_INTERVAL;
  58. } else if (value === 'cpu') {
  59. detectionTimeInterval = CPU_TIME_INTERVAL;
  60. }
  61. dispatch(setDetectionTimeInterval(detectionTimeInterval));
  62. }
  63. // receives a message with the predicted facial expression.
  64. if (type === 'facial-expression') {
  65. sendDataToWorker(worker, imageCapture);
  66. if (!value) {
  67. return;
  68. }
  69. if (value === lastFacialExpression) {
  70. duplicateConsecutiveExpressions++;
  71. } else {
  72. lastFacialExpression
  73. && dispatch(addFacialExpression(lastFacialExpression, duplicateConsecutiveExpressions + 1));
  74. lastFacialExpression = value;
  75. duplicateConsecutiveExpressions = 0;
  76. }
  77. }
  78. };
  79. };
  80. }
  81. /**
  82. * Starts the recognition and detection of face expressions.
  83. *
  84. * @param {Object} stream - Video stream.
  85. * @returns {Function}
  86. */
  87. export function startFacialRecognition() {
  88. return async function(dispatch: Function, getState: Function) {
  89. if (worker === undefined || worker === null) {
  90. return;
  91. }
  92. const state = getState();
  93. const { recognitionActive } = state['features/facial-recognition'];
  94. if (recognitionActive) {
  95. return;
  96. }
  97. const localVideoTrack = getLocalVideoTrack(state['features/base/tracks']);
  98. if (localVideoTrack === undefined) {
  99. return;
  100. }
  101. const stream = localVideoTrack.jitsiTrack.getOriginalStream();
  102. if (stream === null) {
  103. return;
  104. }
  105. dispatch({ type: START_FACIAL_RECOGNITION });
  106. logger.log('Start face recognition');
  107. const firstVideoTrack = stream.getVideoTracks()[0];
  108. // $FlowFixMe
  109. imageCapture = new ImageCapture(firstVideoTrack);
  110. sendDataToWorker(worker, imageCapture);
  111. };
  112. }
  113. /**
  114. * Stops the recognition and detection of face expressions.
  115. *
  116. * @returns {void}
  117. */
  118. export function stopFacialRecognition() {
  119. return function(dispatch: Function, getState: Function) {
  120. const state = getState();
  121. const { recognitionActive } = state['features/facial-recognition'];
  122. if (!recognitionActive) {
  123. imageCapture = null;
  124. return;
  125. }
  126. imageCapture = null;
  127. worker.postMessage({
  128. id: 'CLEAR_TIMEOUT'
  129. });
  130. lastFacialExpression
  131. && dispatch(addFacialExpression(lastFacialExpression, duplicateConsecutiveExpressions + 1));
  132. duplicateConsecutiveExpressions = 0;
  133. dispatch({ type: STOP_FACIAL_RECOGNITION });
  134. logger.log('Stop face recognition');
  135. };
  136. }
  137. /**
  138. * Resets the track in the image capture.
  139. *
  140. * @returns {void}
  141. */
  142. export function resetTrack() {
  143. return function(dispatch: Function, getState: Function) {
  144. const state = getState();
  145. const { jitsiTrack: localVideoTrack } = getLocalVideoTrack(state['features/base/tracks']);
  146. const stream = localVideoTrack.getOriginalStream();
  147. const firstVideoTrack = stream.getVideoTracks()[0];
  148. // $FlowFixMe
  149. imageCapture = new ImageCapture(firstVideoTrack);
  150. };
  151. }
  152. /**
  153. * Changes the track from the image capture with a given one.
  154. *
  155. * @param {Object} track - The track that will be in the new image capture.
  156. * @returns {void}
  157. */
  158. export function changeTrack(track: Object) {
  159. const { jitsiTrack } = track;
  160. const stream = jitsiTrack.getOriginalStream();
  161. const firstVideoTrack = stream.getVideoTracks()[0];
  162. // $FlowFixMe
  163. imageCapture = new ImageCapture(firstVideoTrack);
  164. }
  165. /**
  166. * Adds a new facial expression and its duration.
  167. *
  168. * @param {string} facialExpression - Facial expression to be added.
  169. * @param {number} duration - Duration in seconds of the facial expression.
  170. * @returns {Object}
  171. */
  172. function addFacialExpression(facialExpression: string, duration: number) {
  173. return function(dispatch: Function, getState: Function) {
  174. const { detectionTimeInterval } = getState()['features/facial-recognition'];
  175. let finalDuration = duration;
  176. if (detectionTimeInterval !== -1) {
  177. finalDuration *= detectionTimeInterval / 1000;
  178. }
  179. dispatch({
  180. type: ADD_FACIAL_EXPRESSION,
  181. facialExpression,
  182. duration: finalDuration
  183. });
  184. };
  185. }
  186. /**
  187. * Sets the time interval for the detection worker post message.
  188. *
  189. * @param {number} time - The time interval.
  190. * @returns {Object}
  191. */
  192. function setDetectionTimeInterval(time: number) {
  193. return {
  194. type: SET_DETECTION_TIME_INTERVAL,
  195. time
  196. };
  197. }