You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

faceLandmarksWorker.ts 4.3KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
  2. import { Human, Config, FaceResult } from '@vladmandic/human';
  3. import { DETECTION_TYPES, DETECT_FACE, INIT_WORKER, FACE_EXPRESSIONS_NAMING_MAPPING } from './constants';
  4. type Detection = {
  5. detections: Array<FaceResult>,
  6. threshold?: number
  7. };
  8. type DetectInput = {
  9. image: ImageBitmap | ImageData,
  10. threshold: number
  11. };
  12. type FaceBox = {
  13. left: number,
  14. right: number,
  15. width?: number
  16. };
  17. type InitInput = {
  18. baseUrl: string,
  19. detectionTypes: string[]
  20. }
  21. /**
  22. * An object that is used for using human.
  23. */
  24. let human: Human;
  25. /**
  26. * Detection types to be applied.
  27. */
  28. let faceDetectionTypes: string[] = [];
  29. /**
  30. * Flag for indicating whether a face detection flow is in progress or not.
  31. */
  32. let detectionInProgress = false;
  33. /**
  34. * Contains the last valid face bounding box (passes threshold validation) which was sent to the main process.
  35. */
  36. let lastValidFaceBox: FaceBox;
  37. /**
  38. * Configuration for human.
  39. */
  40. const config: Partial<Config> = {
  41. backend: 'humangl',
  42. async: true,
  43. warmup: 'none',
  44. cacheModels: true,
  45. cacheSensitivity: 0,
  46. debug: false,
  47. deallocate: true,
  48. filter: { enabled: false },
  49. face: {
  50. enabled: true,
  51. detector: {
  52. enabled: true,
  53. rotation: false
  54. },
  55. mesh: { enabled: false },
  56. iris: { enabled: false },
  57. emotion: { enabled: false },
  58. description: { enabled: false }
  59. },
  60. hand: { enabled: false },
  61. gesture: { enabled: false },
  62. body: { enabled: false },
  63. segmentation: { enabled: false }
  64. };
  65. const detectFaceBox = async ({ detections, threshold }: Detection) => {
  66. if (!detections.length) {
  67. return null;
  68. }
  69. const faceBox: FaceBox = {
  70. // normalize to percentage based
  71. left: Math.round(Math.min(...detections.map(d => d.boxRaw[0])) * 100),
  72. right: Math.round(Math.max(...detections.map(d => d.boxRaw[0] + d.boxRaw[2])) * 100)
  73. };
  74. faceBox.width = Math.round(faceBox.right - faceBox.left);
  75. if (lastValidFaceBox && threshold && Math.abs(lastValidFaceBox.left - faceBox.left) < threshold) {
  76. return null;
  77. }
  78. lastValidFaceBox = faceBox;
  79. return faceBox;
  80. };
  81. const detectFaceExpression = async ({ detections }: Detection) =>
  82. detections[0]?.emotion && FACE_EXPRESSIONS_NAMING_MAPPING[detections[0]?.emotion[0].emotion];
  83. const detect = async ({ image, threshold } : DetectInput) => {
  84. let detections;
  85. let faceExpression;
  86. let faceBox;
  87. detectionInProgress = true;
  88. const imageTensor = human.tf.browser.fromPixels(image);
  89. if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS)) {
  90. const { face } = await human.detect(imageTensor, config);
  91. detections = face;
  92. faceExpression = await detectFaceExpression({ detections });
  93. }
  94. if (faceDetectionTypes.includes(DETECTION_TYPES.FACE_BOX)) {
  95. if (!detections) {
  96. const { face } = await human.detect(imageTensor, config);
  97. detections = face;
  98. }
  99. faceBox = await detectFaceBox({
  100. detections,
  101. threshold
  102. });
  103. }
  104. if (faceBox || faceExpression) {
  105. self.postMessage({
  106. faceBox,
  107. faceExpression
  108. });
  109. }
  110. detectionInProgress = false;
  111. };
  112. const init = async ({ baseUrl, detectionTypes }: InitInput) => {
  113. faceDetectionTypes = detectionTypes;
  114. if (!human) {
  115. config.modelBasePath = baseUrl;
  116. if (!self.OffscreenCanvas) {
  117. config.backend = 'wasm';
  118. config.wasmPath = baseUrl;
  119. setWasmPaths(baseUrl);
  120. }
  121. if (detectionTypes.includes(DETECTION_TYPES.FACE_EXPRESSIONS) && config.face) {
  122. config.face.emotion = { enabled: true };
  123. }
  124. const initialHuman = new Human(config);
  125. try {
  126. await initialHuman.load();
  127. } catch (err) {
  128. console.error(err);
  129. }
  130. human = initialHuman;
  131. }
  132. };
  133. onmessage = function(message: MessageEvent<any>) {
  134. switch (message.data.type) {
  135. case DETECT_FACE: {
  136. if (!human || detectionInProgress) {
  137. return;
  138. }
  139. detect(message.data);
  140. break;
  141. }
  142. case INIT_WORKER: {
  143. init(message.data);
  144. break;
  145. }
  146. }
  147. };