You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

middleware.js 3.1KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. // @flow
  2. import {
  3. CONFERENCE_JOINED,
  4. CONFERENCE_WILL_LEAVE,
  5. getCurrentConference
  6. } from '../base/conference';
  7. import { getParticipantCount } from '../base/participants';
  8. import { MiddlewareRegistry } from '../base/redux';
  9. import { TRACK_UPDATED, TRACK_ADDED, TRACK_REMOVED } from '../base/tracks';
  10. import { VIRTUAL_BACKGROUND_TRACK_CHANGED } from '../virtual-background/actionTypes';
  11. import { ADD_FACIAL_EXPRESSION } from './actionTypes';
  12. import {
  13. addToFacialExpressionsBuffer,
  14. changeTrack,
  15. loadWorker,
  16. resetTrack,
  17. stopFacialRecognition,
  18. startFacialRecognition
  19. } from './actions';
  20. import { sendFacialExpressionToParticipants, sendFacialExpressionToServer } from './functions';
  21. MiddlewareRegistry.register(({ dispatch, getState }) => next => action => {
  22. const { enableFacialRecognition } = getState()['features/base/config'];
  23. if (!enableFacialRecognition) {
  24. return next(action);
  25. }
  26. if (action.type === CONFERENCE_JOINED) {
  27. dispatch(loadWorker());
  28. return next(action);
  29. }
  30. if (!getCurrentConference(getState())) {
  31. return next(action);
  32. }
  33. switch (action.type) {
  34. case CONFERENCE_WILL_LEAVE : {
  35. dispatch(stopFacialRecognition());
  36. return next(action);
  37. }
  38. case TRACK_UPDATED: {
  39. const { videoType, type } = action.track.jitsiTrack;
  40. if (videoType === 'camera') {
  41. const { muted, videoStarted } = action.track;
  42. if (videoStarted === true) {
  43. dispatch(startFacialRecognition());
  44. }
  45. if (muted !== undefined) {
  46. if (muted) {
  47. dispatch(stopFacialRecognition());
  48. } else {
  49. dispatch(startFacialRecognition());
  50. type === 'presenter' && changeTrack(action.track);
  51. }
  52. }
  53. }
  54. return next(action);
  55. }
  56. case TRACK_ADDED: {
  57. const { mediaType, videoType } = action.track;
  58. if (mediaType === 'presenter' && videoType === 'camera') {
  59. dispatch(startFacialRecognition());
  60. changeTrack(action.track);
  61. }
  62. return next(action);
  63. }
  64. case TRACK_REMOVED: {
  65. const { videoType } = action.track.jitsiTrack;
  66. if ([ 'camera', 'desktop' ].includes(videoType)) {
  67. dispatch(stopFacialRecognition());
  68. }
  69. return next(action);
  70. }
  71. case VIRTUAL_BACKGROUND_TRACK_CHANGED: {
  72. dispatch(resetTrack());
  73. return next(action);
  74. }
  75. case ADD_FACIAL_EXPRESSION: {
  76. const state = getState();
  77. const conference = getCurrentConference(state);
  78. if (getParticipantCount(state) > 1) {
  79. sendFacialExpressionToParticipants(conference, action.facialExpression, action.duration);
  80. }
  81. sendFacialExpressionToServer(conference, action.facialExpression, action.duration);
  82. dispatch(addToFacialExpressionsBuffer({
  83. emotion: action.facialExpression,
  84. timestamp: action.timestamp
  85. }));
  86. return next(action);
  87. }
  88. }
  89. return next(action);
  90. });