Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .babelrc
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"presets": ["@babel/preset-env"]
}
9 changes: 9 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,15 @@ yarn run build

This will create a production version of the library in `/dist` directory.


## Unit Tests

To run the unit tests, run the following command

```
yarn test
```

## Making Releases

_This section is a temporary guide for contributors who wants to make a alpha release manually._
Expand Down
3 changes: 3 additions & 0 deletions babel.config.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"presets": ["@babel/preset-env"]
}
19 changes: 19 additions & 0 deletions jest.config.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
/**
* For a detailed explanation regarding each configuration property, visit:
* https://jestjs.io/docs/configuration
*/

/** @type {import('jest').Config} */
const config = {
collectCoverage: true,
coverageDirectory: "coverage",
coverageProvider: "v8",
globalSetup: "./setupTests.js",
passWithNoTests: true,
testEnvironment: "jsdom",
testEnvironmentOptions: {
resources: "usable", // Load image resources
},
};

module.exports = config;
15 changes: 13 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
"build": "webpack --config webpack.config.js --mode production",
"start": "webpack serve --config webpack.config.js --mode development",
"format": "prettier --write \"**/*.js\"",
"postinstall": "patch-package"
"postinstall": "patch-package",
"test": "jest"
},
"files": [
"dist"
Expand All @@ -26,7 +27,15 @@
"url": "https://github.com/ml5js/ml5-next-gen/issues"
},
"devDependencies": {
"@babel/core": "^7.23.2",
"@babel/preset-env": "^7.23.2",
"@tensorflow/tfjs-node": "^4.17.0",
"babel-jest": "^29.7.0",
"canvas": "^2.11.2",
"cross-fetch": "^4.0.0",
"html-webpack-plugin": "^5.5.3",
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.7.0",
"patch-package": "^8.0.0",
"postinstall-postinstall": "^2.1.0",
"prettier": "2.8.8",
Expand All @@ -47,8 +56,10 @@
"@tensorflow-models/mobilenet": "^2.1.0",
"@tensorflow-models/pose-detection": "^2.1.0",
"@tensorflow/tfjs": "^4.2.0",
"@tensorflow/tfjs-backend-webgpu": "^4.12.0",
"@tensorflow/tfjs-vis": "^1.5.1",
"axios": "^1.3.4",
"canvas": "^2.11.2",
"webpack-merge": "^5.9.0"
}
}
}
27 changes: 27 additions & 0 deletions setupTests.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
const { ImageData } = require("canvas");
import '@tensorflow/tfjs-node'; // loads the tensorflow/node backend to the registry
import crossFetch from 'cross-fetch';
import * as tf from '@tensorflow/tfjs';

async function setupTests() {

console.log("Beginning setup");

await tf.setBackend('tensorflow');
tf.env().set('IS_BROWSER', false);
tf.env().set('IS_NODE', true);

// Use the node-canvas ImageData polyfill
if (!global.ImageData) {
global.ImageData = ImageData;
}

// Use cross-fetch as a polyfill for the browser fetch
if (!global.fetch) {
global.fetch = crossFetch;
}

console.log("Setup complete");
}

module.exports = setupTests;
2 changes: 1 addition & 1 deletion src/BodyPose/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ class BodyPose {
this.model = await poseDetection.createDetector(pipeline, modelConfig);

// for compatibility with p5's preload()
if (this.p5PreLoadExists) window._decrementPreload();
if (this.p5PreLoadExists()) window._decrementPreload();

return this;
}
Expand Down
75 changes: 39 additions & 36 deletions src/BodyPose/index.test.js
Original file line number Diff line number Diff line change
@@ -1,60 +1,63 @@
// Copyright (c) 2018 ml5
// Copyright (c) 2018-2024 ml5
//
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT

import { asyncLoadImage } from "../utils/testingUtils";
import poseNet from "./index";
import bodyPose from "./index";
import crossFetch from "cross-fetch";

const POSENET_IMG =
"https://github.com/ml5js/ml5-adjacent/raw/master/02_ImageClassification_Video/starter.png";

const POSENET_DEFAULTS = {
architecture: "MobileNetV1",
outputStride: 16,
flipHorizontal: false,
minConfidence: 0.5,
maxPoseDetections: 5,
scoreThreshold: 0.5,
nmsRadius: 20,
detectionType: "multiple",
inputResolution: 256,
multiplier: 0.75,
quantBytes: 2,
};

describe("PoseNet", () => {
let net;
describe("bodypose", () => {
let myBodyPose;
let image;

beforeAll(async () => {
jest.setTimeout(10000);
net = await poseNet();

// TODO: this should not be necessary! Should already be handled by setupTests.js.
if (!global.fetch) {
global.fetch = crossFetch;
}

myBodyPose = bodyPose();
await myBodyPose.ready;

image = await asyncLoadImage(POSENET_IMG);
});

it("instantiates poseNet", () => {
expect(net.architecture).toBe(POSENET_DEFAULTS.architecture);
expect(net.outputStride).toBe(POSENET_DEFAULTS.outputStride);
expect(net.inputResolution).toBe(POSENET_DEFAULTS.inputResolution);
expect(net.multiplier).toBe(POSENET_DEFAULTS.multiplier);
expect(net.quantBytes).toBe(POSENET_DEFAULTS.quantBytes);
it("instantiates bodyPose", () => {
expect(myBodyPose).toBeDefined()
expect(myBodyPose.model).toBeDefined();
});

it("detects poses in image", async () => {
const image = await asyncLoadImage(POSENET_IMG);

// Result should be an array with a single object containing pose and skeleton.
const result = await net.singlePose(image);
// Result should be an array with a single object containing the detection.
const result = await myBodyPose.detect(image);
expect(result).toHaveLength(1);
expect(result[0]).toHaveProperty("pose");
expect(result[0]).toHaveProperty("skeleton");
expect(result[0]).toHaveProperty("box");
expect(result[0]).toHaveProperty("score");
expect(result[0].keypoints.length).toBeGreaterThanOrEqual(5);

// Verify a known outcome.
const nose = result[0].pose.keypoints.find(
(keypoint) => keypoint.part === "nose"
const nose = result[0].keypoints.find(
(keypoint) => keypoint.name === "nose"
);
// Should be {"name": "nose", "score": 0.7217329144477844, "x": 454.1112813949585, "y": 256.606980448618}
expect(nose).toBeTruthy();
expect(nose.position.x).toBeCloseTo(448.6, 0);
expect(nose.position.y).toBeCloseTo(255.9, 0);
expect(nose.score).toBeCloseTo(0.999);
expect(nose.x).toBeCloseTo(454.1, 0);
expect(nose.y).toBeCloseTo(256.6, 0);
expect(nose.score).toBeCloseTo(0.721, 2);
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As an aside - It is slightly concerning that the old model had a 0.999 confidence score for detecting the nose and the new model is only 0.721.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am wondering if the confidence score will change when running it with the tfjs runtime rather than mediapipe 🤔

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, maybe it would make more sense to just validate that a floating point confidence score is outputted rather than a specific value? But we can address this later!

});

it("calls the user's callback",(done) => {
expect.assertions(1);
const callback = (result) => {
expect(result).toHaveLength(1); // don't need to repeat the rest
done();
}
myBodyPose.detect(image, callback);
});
});
12 changes: 10 additions & 2 deletions src/HandPose/index.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
// This software is released under the MIT License.
// https://opensource.org/licenses/MIT

import crossFetch from 'cross-fetch';
import { asyncLoadImage } from "../utils/testingUtils";
import handpose from "./index";

Expand All @@ -14,10 +15,17 @@ describe("Handpose", () => {

beforeAll(async () => {
jest.setTimeout(10000);
handposeInstance = await handpose();

// TODO: this should not be necessary! Should already be handled by setupTests.js.
if (!global.fetch) {
global.fetch = crossFetch;
}

handposeInstance = handpose();
await handposeInstance.ready;
});

it("detects poses in image", async () => {
it("detects hands in image", async () => {
testImage = await asyncLoadImage(HANDPOSE_IMG);
const handPredictions = await handposeInstance.predict(testImage);
expect(handPredictions).not.toHaveLength(0);
Expand Down
36 changes: 36 additions & 0 deletions src/utils/testingUtils/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// eslint-disable-next-line import/no-extraneous-dependencies
import { createImageData, ImageData } from "canvas";

export const asyncLoadImage = async (src) => {
const img = new Image();
if (src.startsWith("http")) {
img.crossOrigin = "true";
}
img.src = src;
await new Promise((resolve) => {
img.onload = resolve;
});
return img;
};

export const getRobin = async () => {
return asyncLoadImage(
"https://cdn.jsdelivr.net/gh/ml5js/ml5-library@main/assets/bird.jpg"
);
};

export const randomImageData = (width = 200, height = 100) => {
const length = width * height * 4; // 4 channels - RGBA
// Create an array of random pixel values
const array = Uint8ClampedArray.from({ length }, () =>
Math.floor(Math.random() * 256)
);
// Initialize a new ImageData object
return createImageData(array, width, height);
};

export const polyfillImageData = () => {
if (!global.ImageData) {
global.ImageData = ImageData;
}
};
Loading