From 1eb189070c15d562c8de3c42f3b6df89385f70ba Mon Sep 17 00:00:00 2001 From: mop9047 Date: Mon, 1 Jul 2024 23:53:55 +0800 Subject: [PATCH 01/13] Initial Commit, added files and tests --- .../timeSeries-mousexy-keypoints/index.html | 23 ++++ .../timeSeries-mousexy-keypoints/sketch.js | 105 ++++++++++++++++++ src/LSTM/index.js | 78 +++++++++++++ src/index.js | 3 + 4 files changed, 209 insertions(+) create mode 100644 examples/timeSeries-mousexy-keypoints/index.html create mode 100644 examples/timeSeries-mousexy-keypoints/sketch.js create mode 100644 src/LSTM/index.js diff --git a/examples/timeSeries-mousexy-keypoints/index.html b/examples/timeSeries-mousexy-keypoints/index.html new file mode 100644 index 00000000..38789782 --- /dev/null +++ b/examples/timeSeries-mousexy-keypoints/index.html @@ -0,0 +1,23 @@ + + + + + + + + ml5.js Time Series Mouse classification Example + + + + + + + + diff --git a/examples/timeSeries-mousexy-keypoints/sketch.js b/examples/timeSeries-mousexy-keypoints/sketch.js new file mode 100644 index 00000000..db4ad823 --- /dev/null +++ b/examples/timeSeries-mousexy-keypoints/sketch.js @@ -0,0 +1,105 @@ +let sequence = []; +let datasets = []; + +let pressedOnce = true; + +let boxes = + +[[{"name":"box","mouse":[81,102]},{"name":"box","mouse":[85,101]},{"name":"box","mouse":[134,105]},{"name":"box","mouse":[182,106]},{"name":"box","mouse":[219,108]},{"name":"box","mouse":[244,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[263,154]},{"name":"box","mouse":[267,215]},{"name":"box","mouse":[269,235]},{"name":"box","mouse":[269,239]},{"name":"box","mouse":[265,239]},{"name":"box","mouse":[195,236]},{"name":"box","mouse":[147,243]},{"name":"box","mouse":[113,243]},{"name":"box","mouse":[111,242]},{"name":"box","mouse":[106,236]},{"name":"box","mouse":[101,191]},{"name":"box","mouse":[95,162]}], + +[{"name":"box","mouse":[105,97]},{"name":"box","mouse":[149,99]},{"name":"box","mouse":[191,99]},{"name":"box","mouse":[245,99]},{"name":"box","mouse":[257,99]},{"name":"box","mouse":[260,100]},{"name":"box","mouse":[255,144]},{"name":"box","mouse":[249,215]},{"name":"box","mouse":[249,239]},{"name":"box","mouse":[248,240]},{"name":"box","mouse":[200,234]},{"name":"box","mouse":[134,232]},{"name":"box","mouse":[105,232]},{"name":"box","mouse":[105,229]},{"name":"box","mouse":[96,179]},{"name":"box","mouse":[96,121]},{"name":"box","mouse":[97,97]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]}], + +[{"name":"box","mouse":[94,94]},{"name":"box","mouse":[133,95]},{"name":"box","mouse":[194,100]},{"name":"box","mouse":[245,100]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[257,132]},{"name":"box","mouse":[253,196]},{"name":"box","mouse":[253,227]},{"name":"box","mouse":[253,228]},{"name":"box","mouse":[241,227]},{"name":"box","mouse":[183,221]},{"name":"box","mouse":[137,219]},{"name":"box","mouse":[105,219]},{"name":"box","mouse":[104,219]},{"name":"box","mouse":[98,188]},{"name":"box","mouse":[97,157]},{"name":"box","mouse":[97,132]},{"name":"box","mouse":[97,112]},{"name":"box","mouse":[98,101]}], + +[{"name":"box","mouse":[88,92]},{"name":"box","mouse":[94,91]},{"name":"box","mouse":[158,94]},{"name":"box","mouse":[209,96]},{"name":"box","mouse":[244,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[244,140]},{"name":"box","mouse":[238,203]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[202,222]},{"name":"box","mouse":[154,220]},{"name":"box","mouse":[96,220]},{"name":"box","mouse":[92,220]},{"name":"box","mouse":[90,217]},{"name":"box","mouse":[89,183]},{"name":"box","mouse":[89,149]},{"name":"box","mouse":[89,124]},{"name":"box","mouse":[89,98]}], + +[{"name":"box","mouse":[98,100]},{"name":"box","mouse":[169,101]},{"name":"box","mouse":[278,104]},{"name":"box","mouse":[310,104]},{"name":"box","mouse":[312,104]},{"name":"box","mouse":[312,148]},{"name":"box","mouse":[313,244]},{"name":"box","mouse":[314,248]},{"name":"box","mouse":[313,250]},{"name":"box","mouse":[222,235]},{"name":"box","mouse":[151,237]},{"name":"box","mouse":[85,239]},{"name":"box","mouse":[82,231]},{"name":"box","mouse":[79,185]},{"name":"box","mouse":[84,140]},{"name":"box","mouse":[93,113]},{"name":"box","mouse":[97,104]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]}], + +[{"name":"box","mouse":[69,98]},{"name":"box","mouse":[149,97]},{"name":"box","mouse":[263,97]},{"name":"box","mouse":[303,98]},{"name":"box","mouse":[303,100]},{"name":"box","mouse":[300,201]},{"name":"box","mouse":[306,296]},{"name":"box","mouse":[307,300]},{"name":"box","mouse":[293,296]},{"name":"box","mouse":[199,275]},{"name":"box","mouse":[145,272]},{"name":"box","mouse":[109,272]},{"name":"box","mouse":[99,269]},{"name":"box","mouse":[95,266]},{"name":"box","mouse":[87,206]},{"name":"box","mouse":[82,140]},{"name":"box","mouse":[82,116]},{"name":"box","mouse":[82,102]},{"name":"box","mouse":[82,100]},{"name":"box","mouse":[82,100]}], + +[{"name":"box","mouse":[61,103]},{"name":"box","mouse":[68,100]},{"name":"box","mouse":[145,101]},{"name":"box","mouse":[221,104]},{"name":"box","mouse":[264,104]},{"name":"box","mouse":[291,107]},{"name":"box","mouse":[291,109]},{"name":"box","mouse":[289,177]},{"name":"box","mouse":[281,248]},{"name":"box","mouse":[281,271]},{"name":"box","mouse":[278,272]},{"name":"box","mouse":[230,270]},{"name":"box","mouse":[179,268]},{"name":"box","mouse":[112,268]},{"name":"box","mouse":[83,268]},{"name":"box","mouse":[79,261]},{"name":"box","mouse":[77,211]},{"name":"box","mouse":[77,168]},{"name":"box","mouse":[77,129]},{"name":"box","mouse":[76,117]}], + +[{"name":"box","mouse":[79,108]},{"name":"box","mouse":[98,105]},{"name":"box","mouse":[178,105]},{"name":"box","mouse":[257,106]},{"name":"box","mouse":[277,108]},{"name":"box","mouse":[285,110]},{"name":"box","mouse":[286,119]},{"name":"box","mouse":[285,218]},{"name":"box","mouse":[285,263]},{"name":"box","mouse":[284,265]},{"name":"box","mouse":[273,264]},{"name":"box","mouse":[197,264]},{"name":"box","mouse":[131,262]},{"name":"box","mouse":[92,257]},{"name":"box","mouse":[88,256]},{"name":"box","mouse":[85,222]},{"name":"box","mouse":[81,172]},{"name":"box","mouse":[79,137]},{"name":"box","mouse":[79,113]},{"name":"box","mouse":[79,105]}], + +[{"name":"box","mouse":[81,122]},{"name":"box","mouse":[91,118]},{"name":"box","mouse":[190,124]},{"name":"box","mouse":[291,129]},{"name":"box","mouse":[309,131]},{"name":"box","mouse":[313,152]},{"name":"box","mouse":[305,201]},{"name":"box","mouse":[298,255]},{"name":"box","mouse":[297,274]},{"name":"box","mouse":[292,276]},{"name":"box","mouse":[238,268]},{"name":"box","mouse":[185,266]},{"name":"box","mouse":[129,266]},{"name":"box","mouse":[101,265]},{"name":"box","mouse":[97,264]},{"name":"box","mouse":[97,219]},{"name":"box","mouse":[97,165]},{"name":"box","mouse":[95,134]},{"name":"box","mouse":[93,122]},{"name":"box","mouse":[93,116]}], + +[{"name":"box","mouse":[101,102]},{"name":"box","mouse":[134,100]},{"name":"box","mouse":[237,104]},{"name":"box","mouse":[275,104]},{"name":"box","mouse":[283,105]},{"name":"box","mouse":[285,132]},{"name":"box","mouse":[285,197]},{"name":"box","mouse":[285,237]},{"name":"box","mouse":[286,278]},{"name":"box","mouse":[286,285]},{"name":"box","mouse":[249,284]},{"name":"box","mouse":[186,274]},{"name":"box","mouse":[121,263]},{"name":"box","mouse":[111,262]},{"name":"box","mouse":[108,234]},{"name":"box","mouse":[105,176]},{"name":"box","mouse":[101,130]},{"name":"box","mouse":[97,98]},{"name":"box","mouse":[97,88]},{"name":"box","mouse":[97,88]}]] + +let circles = + +[[{"name":"circle","mouse":[163,93]},{"name":"circle","mouse":[197,92]},{"name":"circle","mouse":[270,122]},{"name":"circle","mouse":[292,160]},{"name":"circle","mouse":[292,226]},{"name":"circle","mouse":[257,278]},{"name":"circle","mouse":[169,300]},{"name":"circle","mouse":[75,283]},{"name":"circle","mouse":[61,234]},{"name":"circle","mouse":[64,192]},{"name":"circle","mouse":[80,134]},{"name":"circle","mouse":[101,97]},{"name":"circle","mouse":[133,82]},{"name":"circle","mouse":[148,81]},{"name":"circle","mouse":[172,83]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[202,90]},{"name":"circle","mouse":[181,85]}], + +[{"name":"circle","mouse":[168,63]},{"name":"circle","mouse":[265,93]},{"name":"circle","mouse":[312,152]},{"name":"circle","mouse":[318,206]},{"name":"circle","mouse":[293,265]},{"name":"circle","mouse":[258,294]},{"name":"circle","mouse":[206,301]},{"name":"circle","mouse":[148,278]},{"name":"circle","mouse":[105,240]},{"name":"circle","mouse":[97,193]},{"name":"circle","mouse":[106,153]},{"name":"circle","mouse":[125,111]},{"name":"circle","mouse":[149,81]},{"name":"circle","mouse":[164,73]},{"name":"circle","mouse":[191,68]},{"name":"circle","mouse":[249,73]},{"name":"circle","mouse":[275,91]},{"name":"circle","mouse":[301,126]},{"name":"circle","mouse":[314,169]},{"name":"circle","mouse":[313,222]}], + +[{"name":"circle","mouse":[169,87]},{"name":"circle","mouse":[222,95]},{"name":"circle","mouse":[276,125]},{"name":"circle","mouse":[309,186]},{"name":"circle","mouse":[309,249]},{"name":"circle","mouse":[286,281]},{"name":"circle","mouse":[212,307]},{"name":"circle","mouse":[170,304]},{"name":"circle","mouse":[123,262]},{"name":"circle","mouse":[95,217]},{"name":"circle","mouse":[90,158]},{"name":"circle","mouse":[109,113]},{"name":"circle","mouse":[130,85]},{"name":"circle","mouse":[153,81]},{"name":"circle","mouse":[207,82]},{"name":"circle","mouse":[235,93]},{"name":"circle","mouse":[253,109]},{"name":"circle","mouse":[293,188]},{"name":"circle","mouse":[289,217]},{"name":"circle","mouse":[219,290]}], + +[{"name":"circle","mouse":[165,85]},{"name":"circle","mouse":[235,98]},{"name":"circle","mouse":[266,129]},{"name":"circle","mouse":[293,184]},{"name":"circle","mouse":[298,237]},{"name":"circle","mouse":[289,281]},{"name":"circle","mouse":[261,316]},{"name":"circle","mouse":[214,330]},{"name":"circle","mouse":[170,329]},{"name":"circle","mouse":[129,307]},{"name":"circle","mouse":[102,264]},{"name":"circle","mouse":[96,221]},{"name":"circle","mouse":[105,172]},{"name":"circle","mouse":[124,125]},{"name":"circle","mouse":[141,95]},{"name":"circle","mouse":[161,85]},{"name":"circle","mouse":[221,92]},{"name":"circle","mouse":[258,111]},{"name":"circle","mouse":[283,137]},{"name":"circle","mouse":[299,169]}], + +[{"name":"circle","mouse":[187,94]},{"name":"circle","mouse":[255,109]},{"name":"circle","mouse":[296,157]},{"name":"circle","mouse":[321,231]},{"name":"circle","mouse":[317,284]},{"name":"circle","mouse":[293,331]},{"name":"circle","mouse":[259,339]},{"name":"circle","mouse":[169,329]},{"name":"circle","mouse":[129,305]},{"name":"circle","mouse":[101,242]},{"name":"circle","mouse":[98,196]},{"name":"circle","mouse":[117,145]},{"name":"circle","mouse":[144,121]},{"name":"circle","mouse":[180,106]},{"name":"circle","mouse":[207,105]},{"name":"circle","mouse":[239,111]},{"name":"circle","mouse":[261,123]},{"name":"circle","mouse":[285,144]},{"name":"circle","mouse":[302,185]},{"name":"circle","mouse":[307,228]}], + +[{"name":"circle","mouse":[181,73]},{"name":"circle","mouse":[242,84]},{"name":"circle","mouse":[301,117]},{"name":"circle","mouse":[336,189]},{"name":"circle","mouse":[344,253]},{"name":"circle","mouse":[321,297]},{"name":"circle","mouse":[270,319]},{"name":"circle","mouse":[204,319]},{"name":"circle","mouse":[139,317]},{"name":"circle","mouse":[73,280]},{"name":"circle","mouse":[57,237]},{"name":"circle","mouse":[58,181]},{"name":"circle","mouse":[76,125]},{"name":"circle","mouse":[97,99]},{"name":"circle","mouse":[125,83]},{"name":"circle","mouse":[159,79]},{"name":"circle","mouse":[179,79]},{"name":"circle","mouse":[197,79]},{"name":"circle","mouse":[218,81]},{"name":"circle","mouse":[237,85]}], + +[{"name":"circle","mouse":[194,41]},{"name":"circle","mouse":[261,46]},{"name":"circle","mouse":[305,80]},{"name":"circle","mouse":[333,133]},{"name":"circle","mouse":[345,185]},{"name":"circle","mouse":[340,252]},{"name":"circle","mouse":[316,290]},{"name":"circle","mouse":[286,312]},{"name":"circle","mouse":[239,320]},{"name":"circle","mouse":[164,305]},{"name":"circle","mouse":[115,273]},{"name":"circle","mouse":[82,221]},{"name":"circle","mouse":[69,181]},{"name":"circle","mouse":[70,129]},{"name":"circle","mouse":[89,91]},{"name":"circle","mouse":[106,69]},{"name":"circle","mouse":[129,50]},{"name":"circle","mouse":[168,39]},{"name":"circle","mouse":[197,38]},{"name":"circle","mouse":[211,38]}], + +[{"name":"circle","mouse":[172,36]},{"name":"circle","mouse":[240,42]},{"name":"circle","mouse":[311,70]},{"name":"circle","mouse":[365,129]},{"name":"circle","mouse":[384,188]},{"name":"circle","mouse":[376,238]},{"name":"circle","mouse":[353,285]},{"name":"circle","mouse":[311,322]},{"name":"circle","mouse":[248,330]},{"name":"circle","mouse":[163,304]},{"name":"circle","mouse":[121,265]},{"name":"circle","mouse":[97,183]},{"name":"circle","mouse":[101,132]},{"name":"circle","mouse":[114,91]},{"name":"circle","mouse":[130,69]},{"name":"circle","mouse":[145,58]},{"name":"circle","mouse":[158,53]},{"name":"circle","mouse":[171,44]},{"name":"circle","mouse":[176,41]},{"name":"circle","mouse":[177,41]}], + +[{"name":"circle","mouse":[191,33]},{"name":"circle","mouse":[262,34]},{"name":"circle","mouse":[317,66]},{"name":"circle","mouse":[347,133]},{"name":"circle","mouse":[350,215]},{"name":"circle","mouse":[328,301]},{"name":"circle","mouse":[289,346]},{"name":"circle","mouse":[241,341]},{"name":"circle","mouse":[191,324]},{"name":"circle","mouse":[141,277]},{"name":"circle","mouse":[120,225]},{"name":"circle","mouse":[124,167]},{"name":"circle","mouse":[133,128]},{"name":"circle","mouse":[146,100]},{"name":"circle","mouse":[157,72]},{"name":"circle","mouse":[164,58]},{"name":"circle","mouse":[173,45]},{"name":"circle","mouse":[180,41]},{"name":"circle","mouse":[182,38]},{"name":"circle","mouse":[187,37]}], + +[{"name":"circle","mouse":[150,39]},{"name":"circle","mouse":[205,36]},{"name":"circle","mouse":[274,62]},{"name":"circle","mouse":[304,110]},{"name":"circle","mouse":[327,197]},{"name":"circle","mouse":[311,282]},{"name":"circle","mouse":[279,332]},{"name":"circle","mouse":[257,345]},{"name":"circle","mouse":[217,333]},{"name":"circle","mouse":[167,296]},{"name":"circle","mouse":[133,249]},{"name":"circle","mouse":[117,201]},{"name":"circle","mouse":[117,150]},{"name":"circle","mouse":[120,120]},{"name":"circle","mouse":[129,85]},{"name":"circle","mouse":[133,69]},{"name":"circle","mouse":[141,52]},{"name":"circle","mouse":[149,41]},{"name":"circle","mouse":[152,37]},{"name":"circle","mouse":[153,37]}]] + +let classifer; + +function setup() { + ml5.setBackend("webgl"); + createCanvas(400, 400); + frameRate(10); + background(220); + classifer = ml5.timeSeries(); +} + +function draw() { + if (keyIsDown(67) && pressedOnce){ + sequence.push({"name":"circle","mouse": [mouseX,mouseY]}); + ellipse(mouseX,mouseY,10); + if (sequence.length == 20){ + pressedOnce = false; + datasets.push(sequence); + sequence = []; + console.log("finished"); + background(220); + } + + } else if (keyIsDown(66) && pressedOnce){ + sequence.push({"name":"box","mouse": [mouseX,mouseY]}); + ellipse(mouseX,mouseY,10); + if (sequence.length == 20){ + pressedOnce = false; + datasets.push(sequence); + sequence = []; + console.log("finished"); + background(220); + } + } + + if (datasets.length == 10){ + file = JSON.stringify(datasets); + console.log(file); + } +} + +function keyReleased(){ + pressedOnce = true; +} + +function keyPressed(){ + if (key == 't'){ + classifer.createArchitecture(); + console.log('done architecture'); + } else if (key == 'y'){ + classifer.compileModel(); + console.log('done compiling the thing'); + } else if (key == 'u'){ + classifer.summarizeModel(); + console.log('done summarizing'); + } +} \ No newline at end of file diff --git a/src/LSTM/index.js b/src/LSTM/index.js new file mode 100644 index 00000000..548d9965 --- /dev/null +++ b/src/LSTM/index.js @@ -0,0 +1,78 @@ + +import * as tf from "@tensorflow/tfjs"; +// import '@tensorflow/tfjs-node'; +// import callCallback from "../utils/callcallback"; + +class LSTMify{ + + constructor (options, callback){ + // sample architecture just to try + this.model = tf.sequential(); + + } + + createArchitecture() { + this.model.add(tf.layers.lstm({ + units: 40, + inputShape: [20,2] + })); + + this.model.add(tf.layers.dense({ + units: 20, + activation: 'relu', + })); + + this.model.add(tf.layers.dense({ + units: 2, + activation: 'softmax', + })); + } + + compileModel(){ + this.model.compile({ + optimizer: 'adam', + loss: 'categoricalCrossentropy', + metrics: ['accuracy'] + }); + } + + fitModel(xs,ys){ + this.model.fit(xs, ys,{ + epochs: 20, + batchSize: 32, + callbacks: { + onEpochEnd: (epoch, logs) => { + console.log(`Epoch ${epoch + 1}: loss = ${logs.loss}, accuracy = ${logs.acc}`); + }} + }) + } + + summarizeModel(){ + this.model.summary() + } + +} + +const timeSeries = (inputsOrOptions, outputsOrCallback, callback) => { + // let options; + // let cb; + + // if (inputsOrOptions instanceof Object) { + // options = inputsOrOptions; + // cb = outputsOrCallback; + // } else { + // options = { + // inputs: inputsOrOptions, + // outputs: outputsOrCallback, + // }; + // cb = callback; + // } + + // const instance = new LSTMify(options, cb); + // return instance; + + const instance = new LSTMify(); + return instance; + }; + + export default timeSeries; \ No newline at end of file diff --git a/src/index.js b/src/index.js index 09416c1c..5e771d34 100644 --- a/src/index.js +++ b/src/index.js @@ -7,6 +7,7 @@ import imageClassifier from "./ImageClassifier"; import soundClassifier from "./SoundClassifier"; import setBackend from "./utils/setBackend"; import bodySegmentation from "./BodySegmentation"; +import timeSeries from "./LSTM"; import communityStatement from "./utils/communityStatement"; import * as tf from "@tensorflow/tfjs"; import * as tfvis from "@tensorflow/tfjs-vis"; @@ -22,6 +23,7 @@ const withPreload = { neuralNetwork, sentiment, soundClassifier, + }; const ml5 = Object.assign({ p5Utils }, withPreload, { @@ -30,6 +32,7 @@ const ml5 = Object.assign({ p5Utils }, withPreload, { setBackend, version: packageInfo.version, setP5: p5Utils.setP5.bind(p5Utils), + timeSeries }); p5Utils.shouldPreload(ml5, Object.keys(withPreload)); From b8685d23a9f311cb54bf1161858f7250a6c6e59d Mon Sep 17 00:00:00 2001 From: mop9047 Date: Fri, 5 Jul 2024 23:49:05 +0800 Subject: [PATCH 02/13] Created methods based on circle or square mousexy --- .../sketch-old.js | 108 ++++++++++++++++ .../timeSeries-mousexy-keypoints/sketch.js | 116 +++++++++--------- src/LSTM/index.js | 112 ++++++++++++++--- 3 files changed, 264 insertions(+), 72 deletions(-) create mode 100644 examples/timeSeries-mousexy-keypoints/sketch-old.js diff --git a/examples/timeSeries-mousexy-keypoints/sketch-old.js b/examples/timeSeries-mousexy-keypoints/sketch-old.js new file mode 100644 index 00000000..9f45c409 --- /dev/null +++ b/examples/timeSeries-mousexy-keypoints/sketch-old.js @@ -0,0 +1,108 @@ +let sequence = []; +let datasets = []; + +let pressedOnce = true; + +let boxes = + +[[{"name":"box","mouse":[81,102]},{"name":"box","mouse":[85,101]},{"name":"box","mouse":[134,105]},{"name":"box","mouse":[182,106]},{"name":"box","mouse":[219,108]},{"name":"box","mouse":[244,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[263,154]},{"name":"box","mouse":[267,215]},{"name":"box","mouse":[269,235]},{"name":"box","mouse":[269,239]},{"name":"box","mouse":[265,239]},{"name":"box","mouse":[195,236]},{"name":"box","mouse":[147,243]},{"name":"box","mouse":[113,243]},{"name":"box","mouse":[111,242]},{"name":"box","mouse":[106,236]},{"name":"box","mouse":[101,191]},{"name":"box","mouse":[95,162]}], + +[{"name":"box","mouse":[105,97]},{"name":"box","mouse":[149,99]},{"name":"box","mouse":[191,99]},{"name":"box","mouse":[245,99]},{"name":"box","mouse":[257,99]},{"name":"box","mouse":[260,100]},{"name":"box","mouse":[255,144]},{"name":"box","mouse":[249,215]},{"name":"box","mouse":[249,239]},{"name":"box","mouse":[248,240]},{"name":"box","mouse":[200,234]},{"name":"box","mouse":[134,232]},{"name":"box","mouse":[105,232]},{"name":"box","mouse":[105,229]},{"name":"box","mouse":[96,179]},{"name":"box","mouse":[96,121]},{"name":"box","mouse":[97,97]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]}], + +[{"name":"box","mouse":[94,94]},{"name":"box","mouse":[133,95]},{"name":"box","mouse":[194,100]},{"name":"box","mouse":[245,100]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[257,132]},{"name":"box","mouse":[253,196]},{"name":"box","mouse":[253,227]},{"name":"box","mouse":[253,228]},{"name":"box","mouse":[241,227]},{"name":"box","mouse":[183,221]},{"name":"box","mouse":[137,219]},{"name":"box","mouse":[105,219]},{"name":"box","mouse":[104,219]},{"name":"box","mouse":[98,188]},{"name":"box","mouse":[97,157]},{"name":"box","mouse":[97,132]},{"name":"box","mouse":[97,112]},{"name":"box","mouse":[98,101]}], + +[{"name":"box","mouse":[88,92]},{"name":"box","mouse":[94,91]},{"name":"box","mouse":[158,94]},{"name":"box","mouse":[209,96]},{"name":"box","mouse":[244,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[244,140]},{"name":"box","mouse":[238,203]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[202,222]},{"name":"box","mouse":[154,220]},{"name":"box","mouse":[96,220]},{"name":"box","mouse":[92,220]},{"name":"box","mouse":[90,217]},{"name":"box","mouse":[89,183]},{"name":"box","mouse":[89,149]},{"name":"box","mouse":[89,124]},{"name":"box","mouse":[89,98]}], + +[{"name":"box","mouse":[98,100]},{"name":"box","mouse":[169,101]},{"name":"box","mouse":[278,104]},{"name":"box","mouse":[310,104]},{"name":"box","mouse":[312,104]},{"name":"box","mouse":[312,148]},{"name":"box","mouse":[313,244]},{"name":"box","mouse":[314,248]},{"name":"box","mouse":[313,250]},{"name":"box","mouse":[222,235]},{"name":"box","mouse":[151,237]},{"name":"box","mouse":[85,239]},{"name":"box","mouse":[82,231]},{"name":"box","mouse":[79,185]},{"name":"box","mouse":[84,140]},{"name":"box","mouse":[93,113]},{"name":"box","mouse":[97,104]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]}], + +[{"name":"box","mouse":[69,98]},{"name":"box","mouse":[149,97]},{"name":"box","mouse":[263,97]},{"name":"box","mouse":[303,98]},{"name":"box","mouse":[303,100]},{"name":"box","mouse":[300,201]},{"name":"box","mouse":[306,296]},{"name":"box","mouse":[307,300]},{"name":"box","mouse":[293,296]},{"name":"box","mouse":[199,275]},{"name":"box","mouse":[145,272]},{"name":"box","mouse":[109,272]},{"name":"box","mouse":[99,269]},{"name":"box","mouse":[95,266]},{"name":"box","mouse":[87,206]},{"name":"box","mouse":[82,140]},{"name":"box","mouse":[82,116]},{"name":"box","mouse":[82,102]},{"name":"box","mouse":[82,100]},{"name":"box","mouse":[82,100]}], + +[{"name":"box","mouse":[61,103]},{"name":"box","mouse":[68,100]},{"name":"box","mouse":[145,101]},{"name":"box","mouse":[221,104]},{"name":"box","mouse":[264,104]},{"name":"box","mouse":[291,107]},{"name":"box","mouse":[291,109]},{"name":"box","mouse":[289,177]},{"name":"box","mouse":[281,248]},{"name":"box","mouse":[281,271]},{"name":"box","mouse":[278,272]},{"name":"box","mouse":[230,270]},{"name":"box","mouse":[179,268]},{"name":"box","mouse":[112,268]},{"name":"box","mouse":[83,268]},{"name":"box","mouse":[79,261]},{"name":"box","mouse":[77,211]},{"name":"box","mouse":[77,168]},{"name":"box","mouse":[77,129]},{"name":"box","mouse":[76,117]}], + +[{"name":"box","mouse":[79,108]},{"name":"box","mouse":[98,105]},{"name":"box","mouse":[178,105]},{"name":"box","mouse":[257,106]},{"name":"box","mouse":[277,108]},{"name":"box","mouse":[285,110]},{"name":"box","mouse":[286,119]},{"name":"box","mouse":[285,218]},{"name":"box","mouse":[285,263]},{"name":"box","mouse":[284,265]},{"name":"box","mouse":[273,264]},{"name":"box","mouse":[197,264]},{"name":"box","mouse":[131,262]},{"name":"box","mouse":[92,257]},{"name":"box","mouse":[88,256]},{"name":"box","mouse":[85,222]},{"name":"box","mouse":[81,172]},{"name":"box","mouse":[79,137]},{"name":"box","mouse":[79,113]},{"name":"box","mouse":[79,105]}], + +[{"name":"box","mouse":[81,122]},{"name":"box","mouse":[91,118]},{"name":"box","mouse":[190,124]},{"name":"box","mouse":[291,129]},{"name":"box","mouse":[309,131]},{"name":"box","mouse":[313,152]},{"name":"box","mouse":[305,201]},{"name":"box","mouse":[298,255]},{"name":"box","mouse":[297,274]},{"name":"box","mouse":[292,276]},{"name":"box","mouse":[238,268]},{"name":"box","mouse":[185,266]},{"name":"box","mouse":[129,266]},{"name":"box","mouse":[101,265]},{"name":"box","mouse":[97,264]},{"name":"box","mouse":[97,219]},{"name":"box","mouse":[97,165]},{"name":"box","mouse":[95,134]},{"name":"box","mouse":[93,122]},{"name":"box","mouse":[93,116]}], + +[{"name":"box","mouse":[101,102]},{"name":"box","mouse":[134,100]},{"name":"box","mouse":[237,104]},{"name":"box","mouse":[275,104]},{"name":"box","mouse":[283,105]},{"name":"box","mouse":[285,132]},{"name":"box","mouse":[285,197]},{"name":"box","mouse":[285,237]},{"name":"box","mouse":[286,278]},{"name":"box","mouse":[286,285]},{"name":"box","mouse":[249,284]},{"name":"box","mouse":[186,274]},{"name":"box","mouse":[121,263]},{"name":"box","mouse":[111,262]},{"name":"box","mouse":[108,234]},{"name":"box","mouse":[105,176]},{"name":"box","mouse":[101,130]},{"name":"box","mouse":[97,98]},{"name":"box","mouse":[97,88]},{"name":"box","mouse":[97,88]}]] + +let circles = + +[[{"name":"circle","mouse":[163,93]},{"name":"circle","mouse":[197,92]},{"name":"circle","mouse":[270,122]},{"name":"circle","mouse":[292,160]},{"name":"circle","mouse":[292,226]},{"name":"circle","mouse":[257,278]},{"name":"circle","mouse":[169,300]},{"name":"circle","mouse":[75,283]},{"name":"circle","mouse":[61,234]},{"name":"circle","mouse":[64,192]},{"name":"circle","mouse":[80,134]},{"name":"circle","mouse":[101,97]},{"name":"circle","mouse":[133,82]},{"name":"circle","mouse":[148,81]},{"name":"circle","mouse":[172,83]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[202,90]},{"name":"circle","mouse":[181,85]}], + +[{"name":"circle","mouse":[168,63]},{"name":"circle","mouse":[265,93]},{"name":"circle","mouse":[312,152]},{"name":"circle","mouse":[318,206]},{"name":"circle","mouse":[293,265]},{"name":"circle","mouse":[258,294]},{"name":"circle","mouse":[206,301]},{"name":"circle","mouse":[148,278]},{"name":"circle","mouse":[105,240]},{"name":"circle","mouse":[97,193]},{"name":"circle","mouse":[106,153]},{"name":"circle","mouse":[125,111]},{"name":"circle","mouse":[149,81]},{"name":"circle","mouse":[164,73]},{"name":"circle","mouse":[191,68]},{"name":"circle","mouse":[249,73]},{"name":"circle","mouse":[275,91]},{"name":"circle","mouse":[301,126]},{"name":"circle","mouse":[314,169]},{"name":"circle","mouse":[313,222]}], + +[{"name":"circle","mouse":[169,87]},{"name":"circle","mouse":[222,95]},{"name":"circle","mouse":[276,125]},{"name":"circle","mouse":[309,186]},{"name":"circle","mouse":[309,249]},{"name":"circle","mouse":[286,281]},{"name":"circle","mouse":[212,307]},{"name":"circle","mouse":[170,304]},{"name":"circle","mouse":[123,262]},{"name":"circle","mouse":[95,217]},{"name":"circle","mouse":[90,158]},{"name":"circle","mouse":[109,113]},{"name":"circle","mouse":[130,85]},{"name":"circle","mouse":[153,81]},{"name":"circle","mouse":[207,82]},{"name":"circle","mouse":[235,93]},{"name":"circle","mouse":[253,109]},{"name":"circle","mouse":[293,188]},{"name":"circle","mouse":[289,217]},{"name":"circle","mouse":[219,290]}], + +[{"name":"circle","mouse":[165,85]},{"name":"circle","mouse":[235,98]},{"name":"circle","mouse":[266,129]},{"name":"circle","mouse":[293,184]},{"name":"circle","mouse":[298,237]},{"name":"circle","mouse":[289,281]},{"name":"circle","mouse":[261,316]},{"name":"circle","mouse":[214,330]},{"name":"circle","mouse":[170,329]},{"name":"circle","mouse":[129,307]},{"name":"circle","mouse":[102,264]},{"name":"circle","mouse":[96,221]},{"name":"circle","mouse":[105,172]},{"name":"circle","mouse":[124,125]},{"name":"circle","mouse":[141,95]},{"name":"circle","mouse":[161,85]},{"name":"circle","mouse":[221,92]},{"name":"circle","mouse":[258,111]},{"name":"circle","mouse":[283,137]},{"name":"circle","mouse":[299,169]}], + +[{"name":"circle","mouse":[187,94]},{"name":"circle","mouse":[255,109]},{"name":"circle","mouse":[296,157]},{"name":"circle","mouse":[321,231]},{"name":"circle","mouse":[317,284]},{"name":"circle","mouse":[293,331]},{"name":"circle","mouse":[259,339]},{"name":"circle","mouse":[169,329]},{"name":"circle","mouse":[129,305]},{"name":"circle","mouse":[101,242]},{"name":"circle","mouse":[98,196]},{"name":"circle","mouse":[117,145]},{"name":"circle","mouse":[144,121]},{"name":"circle","mouse":[180,106]},{"name":"circle","mouse":[207,105]},{"name":"circle","mouse":[239,111]},{"name":"circle","mouse":[261,123]},{"name":"circle","mouse":[285,144]},{"name":"circle","mouse":[302,185]},{"name":"circle","mouse":[307,228]}], + +[{"name":"circle","mouse":[181,73]},{"name":"circle","mouse":[242,84]},{"name":"circle","mouse":[301,117]},{"name":"circle","mouse":[336,189]},{"name":"circle","mouse":[344,253]},{"name":"circle","mouse":[321,297]},{"name":"circle","mouse":[270,319]},{"name":"circle","mouse":[204,319]},{"name":"circle","mouse":[139,317]},{"name":"circle","mouse":[73,280]},{"name":"circle","mouse":[57,237]},{"name":"circle","mouse":[58,181]},{"name":"circle","mouse":[76,125]},{"name":"circle","mouse":[97,99]},{"name":"circle","mouse":[125,83]},{"name":"circle","mouse":[159,79]},{"name":"circle","mouse":[179,79]},{"name":"circle","mouse":[197,79]},{"name":"circle","mouse":[218,81]},{"name":"circle","mouse":[237,85]}], + +[{"name":"circle","mouse":[194,41]},{"name":"circle","mouse":[261,46]},{"name":"circle","mouse":[305,80]},{"name":"circle","mouse":[333,133]},{"name":"circle","mouse":[345,185]},{"name":"circle","mouse":[340,252]},{"name":"circle","mouse":[316,290]},{"name":"circle","mouse":[286,312]},{"name":"circle","mouse":[239,320]},{"name":"circle","mouse":[164,305]},{"name":"circle","mouse":[115,273]},{"name":"circle","mouse":[82,221]},{"name":"circle","mouse":[69,181]},{"name":"circle","mouse":[70,129]},{"name":"circle","mouse":[89,91]},{"name":"circle","mouse":[106,69]},{"name":"circle","mouse":[129,50]},{"name":"circle","mouse":[168,39]},{"name":"circle","mouse":[197,38]},{"name":"circle","mouse":[211,38]}], + +[{"name":"circle","mouse":[172,36]},{"name":"circle","mouse":[240,42]},{"name":"circle","mouse":[311,70]},{"name":"circle","mouse":[365,129]},{"name":"circle","mouse":[384,188]},{"name":"circle","mouse":[376,238]},{"name":"circle","mouse":[353,285]},{"name":"circle","mouse":[311,322]},{"name":"circle","mouse":[248,330]},{"name":"circle","mouse":[163,304]},{"name":"circle","mouse":[121,265]},{"name":"circle","mouse":[97,183]},{"name":"circle","mouse":[101,132]},{"name":"circle","mouse":[114,91]},{"name":"circle","mouse":[130,69]},{"name":"circle","mouse":[145,58]},{"name":"circle","mouse":[158,53]},{"name":"circle","mouse":[171,44]},{"name":"circle","mouse":[176,41]},{"name":"circle","mouse":[177,41]}], + +[{"name":"circle","mouse":[191,33]},{"name":"circle","mouse":[262,34]},{"name":"circle","mouse":[317,66]},{"name":"circle","mouse":[347,133]},{"name":"circle","mouse":[350,215]},{"name":"circle","mouse":[328,301]},{"name":"circle","mouse":[289,346]},{"name":"circle","mouse":[241,341]},{"name":"circle","mouse":[191,324]},{"name":"circle","mouse":[141,277]},{"name":"circle","mouse":[120,225]},{"name":"circle","mouse":[124,167]},{"name":"circle","mouse":[133,128]},{"name":"circle","mouse":[146,100]},{"name":"circle","mouse":[157,72]},{"name":"circle","mouse":[164,58]},{"name":"circle","mouse":[173,45]},{"name":"circle","mouse":[180,41]},{"name":"circle","mouse":[182,38]},{"name":"circle","mouse":[187,37]}], + +[{"name":"circle","mouse":[150,39]},{"name":"circle","mouse":[205,36]},{"name":"circle","mouse":[274,62]},{"name":"circle","mouse":[304,110]},{"name":"circle","mouse":[327,197]},{"name":"circle","mouse":[311,282]},{"name":"circle","mouse":[279,332]},{"name":"circle","mouse":[257,345]},{"name":"circle","mouse":[217,333]},{"name":"circle","mouse":[167,296]},{"name":"circle","mouse":[133,249]},{"name":"circle","mouse":[117,201]},{"name":"circle","mouse":[117,150]},{"name":"circle","mouse":[120,120]},{"name":"circle","mouse":[129,85]},{"name":"circle","mouse":[133,69]},{"name":"circle","mouse":[141,52]},{"name":"circle","mouse":[149,41]},{"name":"circle","mouse":[152,37]},{"name":"circle","mouse":[153,37]}]] + +let classifer; + +function setup() { + ml5.setBackend("webgl"); + createCanvas(400, 400); + frameRate(10); + background(220); + classifer = ml5.timeSeries(); +} + +function draw() { + if (keyIsDown(67) && pressedOnce){ + sequence.push({"name":"circle","mouse": [mouseX,mouseY]}); + ellipse(mouseX,mouseY,10); + if (sequence.length == 20){ + pressedOnce = false; + datasets.push(sequence); + sequence = []; + console.log("finished"); + background(220); + } + + } else if (keyIsDown(66) && pressedOnce){ + sequence.push({"name":"box","mouse": [mouseX,mouseY]}); + ellipse(mouseX,mouseY,10); + if (sequence.length == 20){ + pressedOnce = false; + datasets.push(sequence); + sequence = []; + console.log("finished"); + background(220); + } + } + + if (datasets.length == 10){ + file = JSON.stringify(datasets); + console.log(file); + } +} + +function keyReleased(){ + pressedOnce = true; +} + +function keyPressed(){ + if (key == 't'){ + classifer.createArchitecture(); + console.log('done architecture'); + } else if (key == 'y'){ + classifer.compileModel(); + console.log('done compiling the thing'); + } else if (key == 'u'){ + classifer.summarizeModel(); + console.log('done summarizing'); + } else if (key == 'i'){ + classifer.fitModel(); + console.log('fitting done'); + } +} \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/sketch.js b/examples/timeSeries-mousexy-keypoints/sketch.js index db4ad823..1447e76e 100644 --- a/examples/timeSeries-mousexy-keypoints/sketch.js +++ b/examples/timeSeries-mousexy-keypoints/sketch.js @@ -1,57 +1,30 @@ + let sequence = []; let datasets = []; -let pressedOnce = true; - -let boxes = - -[[{"name":"box","mouse":[81,102]},{"name":"box","mouse":[85,101]},{"name":"box","mouse":[134,105]},{"name":"box","mouse":[182,106]},{"name":"box","mouse":[219,108]},{"name":"box","mouse":[244,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[263,154]},{"name":"box","mouse":[267,215]},{"name":"box","mouse":[269,235]},{"name":"box","mouse":[269,239]},{"name":"box","mouse":[265,239]},{"name":"box","mouse":[195,236]},{"name":"box","mouse":[147,243]},{"name":"box","mouse":[113,243]},{"name":"box","mouse":[111,242]},{"name":"box","mouse":[106,236]},{"name":"box","mouse":[101,191]},{"name":"box","mouse":[95,162]}], - -[{"name":"box","mouse":[105,97]},{"name":"box","mouse":[149,99]},{"name":"box","mouse":[191,99]},{"name":"box","mouse":[245,99]},{"name":"box","mouse":[257,99]},{"name":"box","mouse":[260,100]},{"name":"box","mouse":[255,144]},{"name":"box","mouse":[249,215]},{"name":"box","mouse":[249,239]},{"name":"box","mouse":[248,240]},{"name":"box","mouse":[200,234]},{"name":"box","mouse":[134,232]},{"name":"box","mouse":[105,232]},{"name":"box","mouse":[105,229]},{"name":"box","mouse":[96,179]},{"name":"box","mouse":[96,121]},{"name":"box","mouse":[97,97]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]}], - -[{"name":"box","mouse":[94,94]},{"name":"box","mouse":[133,95]},{"name":"box","mouse":[194,100]},{"name":"box","mouse":[245,100]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[257,132]},{"name":"box","mouse":[253,196]},{"name":"box","mouse":[253,227]},{"name":"box","mouse":[253,228]},{"name":"box","mouse":[241,227]},{"name":"box","mouse":[183,221]},{"name":"box","mouse":[137,219]},{"name":"box","mouse":[105,219]},{"name":"box","mouse":[104,219]},{"name":"box","mouse":[98,188]},{"name":"box","mouse":[97,157]},{"name":"box","mouse":[97,132]},{"name":"box","mouse":[97,112]},{"name":"box","mouse":[98,101]}], - -[{"name":"box","mouse":[88,92]},{"name":"box","mouse":[94,91]},{"name":"box","mouse":[158,94]},{"name":"box","mouse":[209,96]},{"name":"box","mouse":[244,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[244,140]},{"name":"box","mouse":[238,203]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[202,222]},{"name":"box","mouse":[154,220]},{"name":"box","mouse":[96,220]},{"name":"box","mouse":[92,220]},{"name":"box","mouse":[90,217]},{"name":"box","mouse":[89,183]},{"name":"box","mouse":[89,149]},{"name":"box","mouse":[89,124]},{"name":"box","mouse":[89,98]}], - -[{"name":"box","mouse":[98,100]},{"name":"box","mouse":[169,101]},{"name":"box","mouse":[278,104]},{"name":"box","mouse":[310,104]},{"name":"box","mouse":[312,104]},{"name":"box","mouse":[312,148]},{"name":"box","mouse":[313,244]},{"name":"box","mouse":[314,248]},{"name":"box","mouse":[313,250]},{"name":"box","mouse":[222,235]},{"name":"box","mouse":[151,237]},{"name":"box","mouse":[85,239]},{"name":"box","mouse":[82,231]},{"name":"box","mouse":[79,185]},{"name":"box","mouse":[84,140]},{"name":"box","mouse":[93,113]},{"name":"box","mouse":[97,104]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]}], - -[{"name":"box","mouse":[69,98]},{"name":"box","mouse":[149,97]},{"name":"box","mouse":[263,97]},{"name":"box","mouse":[303,98]},{"name":"box","mouse":[303,100]},{"name":"box","mouse":[300,201]},{"name":"box","mouse":[306,296]},{"name":"box","mouse":[307,300]},{"name":"box","mouse":[293,296]},{"name":"box","mouse":[199,275]},{"name":"box","mouse":[145,272]},{"name":"box","mouse":[109,272]},{"name":"box","mouse":[99,269]},{"name":"box","mouse":[95,266]},{"name":"box","mouse":[87,206]},{"name":"box","mouse":[82,140]},{"name":"box","mouse":[82,116]},{"name":"box","mouse":[82,102]},{"name":"box","mouse":[82,100]},{"name":"box","mouse":[82,100]}], - -[{"name":"box","mouse":[61,103]},{"name":"box","mouse":[68,100]},{"name":"box","mouse":[145,101]},{"name":"box","mouse":[221,104]},{"name":"box","mouse":[264,104]},{"name":"box","mouse":[291,107]},{"name":"box","mouse":[291,109]},{"name":"box","mouse":[289,177]},{"name":"box","mouse":[281,248]},{"name":"box","mouse":[281,271]},{"name":"box","mouse":[278,272]},{"name":"box","mouse":[230,270]},{"name":"box","mouse":[179,268]},{"name":"box","mouse":[112,268]},{"name":"box","mouse":[83,268]},{"name":"box","mouse":[79,261]},{"name":"box","mouse":[77,211]},{"name":"box","mouse":[77,168]},{"name":"box","mouse":[77,129]},{"name":"box","mouse":[76,117]}], - -[{"name":"box","mouse":[79,108]},{"name":"box","mouse":[98,105]},{"name":"box","mouse":[178,105]},{"name":"box","mouse":[257,106]},{"name":"box","mouse":[277,108]},{"name":"box","mouse":[285,110]},{"name":"box","mouse":[286,119]},{"name":"box","mouse":[285,218]},{"name":"box","mouse":[285,263]},{"name":"box","mouse":[284,265]},{"name":"box","mouse":[273,264]},{"name":"box","mouse":[197,264]},{"name":"box","mouse":[131,262]},{"name":"box","mouse":[92,257]},{"name":"box","mouse":[88,256]},{"name":"box","mouse":[85,222]},{"name":"box","mouse":[81,172]},{"name":"box","mouse":[79,137]},{"name":"box","mouse":[79,113]},{"name":"box","mouse":[79,105]}], - -[{"name":"box","mouse":[81,122]},{"name":"box","mouse":[91,118]},{"name":"box","mouse":[190,124]},{"name":"box","mouse":[291,129]},{"name":"box","mouse":[309,131]},{"name":"box","mouse":[313,152]},{"name":"box","mouse":[305,201]},{"name":"box","mouse":[298,255]},{"name":"box","mouse":[297,274]},{"name":"box","mouse":[292,276]},{"name":"box","mouse":[238,268]},{"name":"box","mouse":[185,266]},{"name":"box","mouse":[129,266]},{"name":"box","mouse":[101,265]},{"name":"box","mouse":[97,264]},{"name":"box","mouse":[97,219]},{"name":"box","mouse":[97,165]},{"name":"box","mouse":[95,134]},{"name":"box","mouse":[93,122]},{"name":"box","mouse":[93,116]}], - -[{"name":"box","mouse":[101,102]},{"name":"box","mouse":[134,100]},{"name":"box","mouse":[237,104]},{"name":"box","mouse":[275,104]},{"name":"box","mouse":[283,105]},{"name":"box","mouse":[285,132]},{"name":"box","mouse":[285,197]},{"name":"box","mouse":[285,237]},{"name":"box","mouse":[286,278]},{"name":"box","mouse":[286,285]},{"name":"box","mouse":[249,284]},{"name":"box","mouse":[186,274]},{"name":"box","mouse":[121,263]},{"name":"box","mouse":[111,262]},{"name":"box","mouse":[108,234]},{"name":"box","mouse":[105,176]},{"name":"box","mouse":[101,130]},{"name":"box","mouse":[97,98]},{"name":"box","mouse":[97,88]},{"name":"box","mouse":[97,88]}]] - -let circles = - -[[{"name":"circle","mouse":[163,93]},{"name":"circle","mouse":[197,92]},{"name":"circle","mouse":[270,122]},{"name":"circle","mouse":[292,160]},{"name":"circle","mouse":[292,226]},{"name":"circle","mouse":[257,278]},{"name":"circle","mouse":[169,300]},{"name":"circle","mouse":[75,283]},{"name":"circle","mouse":[61,234]},{"name":"circle","mouse":[64,192]},{"name":"circle","mouse":[80,134]},{"name":"circle","mouse":[101,97]},{"name":"circle","mouse":[133,82]},{"name":"circle","mouse":[148,81]},{"name":"circle","mouse":[172,83]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[202,90]},{"name":"circle","mouse":[181,85]}], - -[{"name":"circle","mouse":[168,63]},{"name":"circle","mouse":[265,93]},{"name":"circle","mouse":[312,152]},{"name":"circle","mouse":[318,206]},{"name":"circle","mouse":[293,265]},{"name":"circle","mouse":[258,294]},{"name":"circle","mouse":[206,301]},{"name":"circle","mouse":[148,278]},{"name":"circle","mouse":[105,240]},{"name":"circle","mouse":[97,193]},{"name":"circle","mouse":[106,153]},{"name":"circle","mouse":[125,111]},{"name":"circle","mouse":[149,81]},{"name":"circle","mouse":[164,73]},{"name":"circle","mouse":[191,68]},{"name":"circle","mouse":[249,73]},{"name":"circle","mouse":[275,91]},{"name":"circle","mouse":[301,126]},{"name":"circle","mouse":[314,169]},{"name":"circle","mouse":[313,222]}], - -[{"name":"circle","mouse":[169,87]},{"name":"circle","mouse":[222,95]},{"name":"circle","mouse":[276,125]},{"name":"circle","mouse":[309,186]},{"name":"circle","mouse":[309,249]},{"name":"circle","mouse":[286,281]},{"name":"circle","mouse":[212,307]},{"name":"circle","mouse":[170,304]},{"name":"circle","mouse":[123,262]},{"name":"circle","mouse":[95,217]},{"name":"circle","mouse":[90,158]},{"name":"circle","mouse":[109,113]},{"name":"circle","mouse":[130,85]},{"name":"circle","mouse":[153,81]},{"name":"circle","mouse":[207,82]},{"name":"circle","mouse":[235,93]},{"name":"circle","mouse":[253,109]},{"name":"circle","mouse":[293,188]},{"name":"circle","mouse":[289,217]},{"name":"circle","mouse":[219,290]}], - -[{"name":"circle","mouse":[165,85]},{"name":"circle","mouse":[235,98]},{"name":"circle","mouse":[266,129]},{"name":"circle","mouse":[293,184]},{"name":"circle","mouse":[298,237]},{"name":"circle","mouse":[289,281]},{"name":"circle","mouse":[261,316]},{"name":"circle","mouse":[214,330]},{"name":"circle","mouse":[170,329]},{"name":"circle","mouse":[129,307]},{"name":"circle","mouse":[102,264]},{"name":"circle","mouse":[96,221]},{"name":"circle","mouse":[105,172]},{"name":"circle","mouse":[124,125]},{"name":"circle","mouse":[141,95]},{"name":"circle","mouse":[161,85]},{"name":"circle","mouse":[221,92]},{"name":"circle","mouse":[258,111]},{"name":"circle","mouse":[283,137]},{"name":"circle","mouse":[299,169]}], - -[{"name":"circle","mouse":[187,94]},{"name":"circle","mouse":[255,109]},{"name":"circle","mouse":[296,157]},{"name":"circle","mouse":[321,231]},{"name":"circle","mouse":[317,284]},{"name":"circle","mouse":[293,331]},{"name":"circle","mouse":[259,339]},{"name":"circle","mouse":[169,329]},{"name":"circle","mouse":[129,305]},{"name":"circle","mouse":[101,242]},{"name":"circle","mouse":[98,196]},{"name":"circle","mouse":[117,145]},{"name":"circle","mouse":[144,121]},{"name":"circle","mouse":[180,106]},{"name":"circle","mouse":[207,105]},{"name":"circle","mouse":[239,111]},{"name":"circle","mouse":[261,123]},{"name":"circle","mouse":[285,144]},{"name":"circle","mouse":[302,185]},{"name":"circle","mouse":[307,228]}], +let classifer; +let tensorData; -[{"name":"circle","mouse":[181,73]},{"name":"circle","mouse":[242,84]},{"name":"circle","mouse":[301,117]},{"name":"circle","mouse":[336,189]},{"name":"circle","mouse":[344,253]},{"name":"circle","mouse":[321,297]},{"name":"circle","mouse":[270,319]},{"name":"circle","mouse":[204,319]},{"name":"circle","mouse":[139,317]},{"name":"circle","mouse":[73,280]},{"name":"circle","mouse":[57,237]},{"name":"circle","mouse":[58,181]},{"name":"circle","mouse":[76,125]},{"name":"circle","mouse":[97,99]},{"name":"circle","mouse":[125,83]},{"name":"circle","mouse":[159,79]},{"name":"circle","mouse":[179,79]},{"name":"circle","mouse":[197,79]},{"name":"circle","mouse":[218,81]},{"name":"circle","mouse":[237,85]}], +let pressedOnce = true; +let w =400 +let h = 400; +let type = ["box","circle"] -[{"name":"circle","mouse":[194,41]},{"name":"circle","mouse":[261,46]},{"name":"circle","mouse":[305,80]},{"name":"circle","mouse":[333,133]},{"name":"circle","mouse":[345,185]},{"name":"circle","mouse":[340,252]},{"name":"circle","mouse":[316,290]},{"name":"circle","mouse":[286,312]},{"name":"circle","mouse":[239,320]},{"name":"circle","mouse":[164,305]},{"name":"circle","mouse":[115,273]},{"name":"circle","mouse":[82,221]},{"name":"circle","mouse":[69,181]},{"name":"circle","mouse":[70,129]},{"name":"circle","mouse":[89,91]},{"name":"circle","mouse":[106,69]},{"name":"circle","mouse":[129,50]},{"name":"circle","mouse":[168,39]},{"name":"circle","mouse":[197,38]},{"name":"circle","mouse":[211,38]}], +let circle = [[[0.48,0.165],[0.555,0.175],[0.615,0.1875],[0.715,0.2475],[0.78,0.35],[0.7825,0.545],[0.7225,0.6375],[0.615,0.7175],[0.5175,0.74],[0.395,0.715],[0.25,0.6425],[0.2,0.5375],[0.205,0.44],[0.2425,0.345],[0.3,0.245],[0.3425,0.2025],[0.43,0.1675],[0.495,0.165],[0.5025,0.165],[0.5025,0.165]],[[0.47,0.16],[0.4925,0.1575],[0.6,0.1725],[0.6875,0.235],[0.7925,0.35],[0.8275,0.455],[0.7825,0.585],[0.715,0.695],[0.5825,0.725],[0.3725,0.695],[0.2275,0.6075],[0.1875,0.4775],[0.205,0.3975],[0.2625,0.3],[0.3525,0.21],[0.39,0.18],[0.4275,0.17],[0.4625,0.16],[0.4725,0.1575],[0.475,0.1575]],[[0.4675,0.145],[0.5375,0.15],[0.64,0.205],[0.7375,0.2925],[0.7875,0.4275],[0.7625,0.53],[0.6725,0.65],[0.55,0.7025],[0.4025,0.6975],[0.2975,0.6325],[0.25,0.5375],[0.24,0.4225],[0.265,0.3575],[0.3125,0.2875],[0.3675,0.225],[0.4025,0.1875],[0.4225,0.1625],[0.45,0.1475],[0.4675,0.1475],[0.4775,0.1475]],[[0.5525,0.1475],[0.6825,0.195],[0.7825,0.2775],[0.83,0.39],[0.8225,0.5075],[0.7825,0.62],[0.705,0.6925],[0.4875,0.76],[0.315,0.7625],[0.23,0.7125],[0.2025,0.605],[0.21,0.4625],[0.2575,0.3125],[0.36,0.1875],[0.4625,0.1425],[0.51,0.14],[0.5375,0.14],[0.5525,0.14],[0.5625,0.14],[0.5625,0.14]],[[0.5725,0.1325],[0.6825,0.155],[0.7625,0.2275],[0.8275,0.3525],[0.8325,0.48],[0.81,0.5925],[0.7675,0.685],[0.6425,0.735],[0.4725,0.76],[0.3375,0.715],[0.2775,0.625],[0.27,0.5225],[0.27,0.4325],[0.3025,0.35],[0.3625,0.27],[0.42,0.1975],[0.4625,0.165],[0.5175,0.1375],[0.5575,0.1275],[0.57,0.1275]],[[0.5225,0.1425],[0.62,0.1375],[0.7725,0.19],[0.8325,0.2775],[0.87,0.4275],[0.8375,0.6075],[0.81,0.67],[0.755,0.7075],[0.5525,0.715],[0.4175,0.7075],[0.2775,0.5975],[0.2625,0.48],[0.275,0.405],[0.3225,0.31],[0.3625,0.2525],[0.3925,0.2175],[0.425,0.1875],[0.4675,0.1575],[0.49,0.1475],[0.5025,0.145]],[[0.4875,0.135],[0.6625,0.1525],[0.765,0.2175],[0.8325,0.345],[0.8525,0.495],[0.845,0.64],[0.805,0.7075],[0.715,0.7525],[0.575,0.7775],[0.45,0.7775],[0.3475,0.7375],[0.285,0.675],[0.2625,0.555],[0.2825,0.4125],[0.32,0.315],[0.3625,0.2275],[0.3925,0.1975],[0.425,0.175],[0.4425,0.1675],[0.455,0.1575]],[[0.4025,0.1675],[0.455,0.1525],[0.5625,0.1525],[0.665,0.185],[0.7625,0.2475],[0.8225,0.355],[0.8475,0.5075],[0.83,0.6475],[0.7425,0.725],[0.5775,0.7925],[0.435,0.79],[0.2775,0.7325],[0.2075,0.6275],[0.2025,0.53],[0.2025,0.4525],[0.2225,0.36],[0.2575,0.3],[0.3,0.2475],[0.33,0.225],[0.36,0.1975]],[[0.4925,0.1525],[0.5925,0.1425],[0.69,0.1825],[0.7825,0.2425],[0.8325,0.3125],[0.87,0.45],[0.8825,0.6275],[0.8575,0.76],[0.7825,0.82],[0.5825,0.865],[0.435,0.865],[0.3525,0.8275],[0.2825,0.7075],[0.2675,0.6075],[0.2775,0.4825],[0.2975,0.3925],[0.335,0.31],[0.3725,0.2375],[0.4025,0.2025],[0.43,0.1775]],[[0.4725,0.15],[0.5825,0.1475],[0.6875,0.2],[0.7775,0.2875],[0.82,0.4],[0.8225,0.5575],[0.7525,0.7075],[0.665,0.7725],[0.555,0.8025],[0.395,0.7675],[0.305,0.68],[0.255,0.56],[0.245,0.42],[0.2675,0.34],[0.3325,0.235],[0.365,0.2],[0.4025,0.1725],[0.445,0.15],[0.475,0.1425],[0.5125,0.1425]],[[0.485,0.1375],[0.6175,0.1375],[0.73,0.205],[0.795,0.2875],[0.8175,0.4275],[0.8125,0.5525],[0.7825,0.635],[0.675,0.7125],[0.52,0.7425],[0.33,0.6975],[0.2625,0.5975],[0.2525,0.4975],[0.2675,0.3675],[0.2975,0.2775],[0.3425,0.2175],[0.4125,0.17],[0.4525,0.155],[0.495,0.14],[0.5125,0.14],[0.515,0.14]],[[0.5125,0.1325],[0.6425,0.15],[0.7425,0.225],[0.81,0.43],[0.81,0.6025],[0.74,0.7875],[0.6075,0.8425],[0.4375,0.86],[0.3,0.8125],[0.225,0.71],[0.1875,0.605],[0.2125,0.4825],[0.2625,0.3775],[0.3575,0.2625],[0.4025,0.205],[0.4425,0.1675],[0.515,0.1425],[0.5525,0.135],[0.5625,0.135],[0.5625,0.135]],[[0.5375,0.1375],[0.6275,0.16],[0.7475,0.235],[0.8025,0.3775],[0.8225,0.5725],[0.77,0.7225],[0.6675,0.835],[0.5475,0.8675],[0.38,0.835],[0.2525,0.7],[0.2125,0.5675],[0.2225,0.4375],[0.2625,0.3275],[0.3225,0.2425],[0.395,0.1875],[0.475,0.1625],[0.5275,0.15],[0.5425,0.1475],[0.5525,0.145],[0.5575,0.145]],[[0.53,0.1375],[0.635,0.1525],[0.725,0.2175],[0.8225,0.39],[0.825,0.6025],[0.76,0.79],[0.6625,0.8375],[0.545,0.84],[0.4075,0.8225],[0.3425,0.775],[0.285,0.6675],[0.28,0.5375],[0.295,0.3975],[0.345,0.315],[0.3775,0.2675],[0.4175,0.2175],[0.4575,0.1675],[0.4875,0.1475],[0.5075,0.1375],[0.52,0.1375]],[[0.4775,0.165],[0.5475,0.1575],[0.63,0.1775],[0.6675,0.2025],[0.6975,0.235],[0.7375,0.3225],[0.7525,0.465],[0.7225,0.5775],[0.655,0.6525],[0.5525,0.67],[0.425,0.655],[0.3275,0.59],[0.28,0.4775],[0.275,0.35],[0.305,0.2825],[0.34,0.235],[0.3775,0.1975],[0.455,0.1575],[0.475,0.15],[0.495,0.1475]],[[0.4375,0.165],[0.5425,0.1475],[0.635,0.1725],[0.685,0.2175],[0.735,0.305],[0.7475,0.4],[0.7125,0.4825],[0.64,0.5375],[0.5325,0.555],[0.3825,0.535],[0.3,0.48],[0.2825,0.41],[0.2725,0.3325],[0.2775,0.2825],[0.3,0.245],[0.34,0.2075],[0.3575,0.195],[0.4,0.17],[0.4225,0.1575],[0.465,0.1475]],[[0.4625,0.145],[0.5575,0.15],[0.6575,0.1975],[0.7275,0.2625],[0.755,0.3525],[0.7425,0.4775],[0.6825,0.595],[0.595,0.635],[0.375,0.64],[0.275,0.6175],[0.205,0.54],[0.1825,0.4075],[0.1925,0.3025],[0.2325,0.255],[0.2825,0.22],[0.3525,0.1775],[0.3875,0.1575],[0.4325,0.1425],[0.4725,0.14],[0.5025,0.1475]],[[0.4625,0.1175],[0.5325,0.1125],[0.6425,0.1425],[0.715,0.21],[0.765,0.3175],[0.7775,0.46],[0.7325,0.5775],[0.6175,0.63],[0.4775,0.655],[0.3675,0.635],[0.2725,0.58],[0.2075,0.4875],[0.22,0.385],[0.275,0.2725],[0.3525,0.2],[0.3825,0.1675],[0.42,0.1375],[0.455,0.1175],[0.475,0.1125],[0.4925,0.1125]],[[0.5425,0.125],[0.7125,0.165],[0.7925,0.26],[0.8275,0.415],[0.8175,0.5225],[0.725,0.6125],[0.615,0.675],[0.465,0.7175],[0.3725,0.6825],[0.32,0.6225],[0.2975,0.515],[0.31,0.4],[0.3325,0.33],[0.37,0.2775],[0.4275,0.225],[0.465,0.1925],[0.535,0.1375],[0.5625,0.1225],[0.5725,0.12],[0.5725,0.12]],[[0.505,0.1225],[0.5725,0.13],[0.6825,0.1975],[0.74,0.2925],[0.7375,0.485],[0.67,0.5875],[0.545,0.6775],[0.3825,0.6875],[0.265,0.6375],[0.2125,0.52],[0.215,0.41],[0.24,0.32],[0.28,0.2375],[0.33,0.17],[0.3625,0.1475],[0.3925,0.1425],[0.4475,0.1375],[0.485,0.1275],[0.5025,0.1175],[0.51,0.1175]],[[0.5525,0.1475],[0.6875,0.1825],[0.7425,0.245],[0.7825,0.3475],[0.765,0.475],[0.6625,0.59],[0.545,0.655],[0.385,0.6525],[0.3125,0.6025],[0.28,0.4675],[0.2925,0.35],[0.33,0.2775],[0.3525,0.2425],[0.3925,0.2025],[0.4325,0.1825],[0.485,0.17],[0.5225,0.165],[0.5525,0.1575],[0.56,0.1575],[0.56,0.1575]],[[0.5,0.1675],[0.605,0.17],[0.6825,0.2025],[0.7325,0.2675],[0.7625,0.3875],[0.7525,0.5175],[0.6975,0.6025],[0.6075,0.6475],[0.4625,0.66],[0.3475,0.61],[0.295,0.505],[0.2825,0.375],[0.31,0.2675],[0.36,0.1875],[0.41,0.1525],[0.4725,0.1475],[0.5175,0.1575],[0.5475,0.1625],[0.5525,0.1625],[0.5525,0.165]],[[0.4375,0.1525],[0.4775,0.1525],[0.5875,0.1825],[0.6425,0.255],[0.69,0.385],[0.6375,0.5825],[0.5425,0.68],[0.4375,0.7175],[0.3275,0.6875],[0.2625,0.5975],[0.2325,0.4875],[0.24,0.3175],[0.2575,0.25],[0.3125,0.2],[0.375,0.175],[0.4125,0.165],[0.4375,0.1575],[0.4575,0.155],[0.46,0.155],[0.4625,0.1525]],[[0.41,0.1475],[0.5425,0.145],[0.6275,0.1675],[0.7125,0.24],[0.7625,0.3625],[0.75,0.5125],[0.6625,0.6125],[0.4525,0.655],[0.38,0.6375],[0.3075,0.54],[0.2825,0.4175],[0.2975,0.3225],[0.345,0.2475],[0.375,0.2],[0.405,0.17],[0.4325,0.1525],[0.4625,0.145],[0.495,0.145],[0.5225,0.1475],[0.5325,0.15]],[[0.445,0.1375],[0.545,0.135],[0.6225,0.1575],[0.695,0.1975],[0.7325,0.2875],[0.7675,0.4275],[0.75,0.5725],[0.6075,0.705],[0.49,0.7075],[0.2875,0.65],[0.1725,0.5225],[0.1625,0.415],[0.21,0.2925],[0.2625,0.2275],[0.3225,0.1775],[0.37,0.1625],[0.4025,0.1575],[0.445,0.1575],[0.4925,0.155],[0.53,0.1525]],[[0.5825,0.1325],[0.7375,0.1675],[0.8,0.255],[0.81,0.44],[0.75,0.5975],[0.6425,0.685],[0.4825,0.7175],[0.3375,0.6775],[0.2425,0.595],[0.1875,0.495],[0.1775,0.3925],[0.2175,0.3125],[0.265,0.2475],[0.3225,0.1975],[0.365,0.1725],[0.42,0.1425],[0.4875,0.1],[0.59,0.0825],[0.6325,0.0825],[0.635,0.0825]],[[0.445,0.1675],[0.5625,0.16],[0.65,0.195],[0.72,0.295],[0.745,0.425],[0.7175,0.5625],[0.6125,0.6675],[0.475,0.7],[0.3525,0.685],[0.1625,0.61],[0.1125,0.5575],[0.105,0.475],[0.1625,0.3775],[0.2125,0.3175],[0.2925,0.2475],[0.3575,0.2025],[0.4275,0.1775],[0.475,0.1625],[0.4925,0.1575],[0.5025,0.1575]],[[0.515,0.155],[0.5725,0.1625],[0.71,0.2375],[0.78,0.3375],[0.795,0.4375],[0.77,0.5875],[0.665,0.6975],[0.5,0.7225],[0.4075,0.7025],[0.3025,0.6025],[0.2425,0.485],[0.2425,0.385],[0.285,0.2975],[0.325,0.2475],[0.38,0.2075],[0.4275,0.1875],[0.445,0.18],[0.5,0.1675],[0.55,0.165],[0.5725,0.1625]],[[0.4675,0.15],[0.5525,0.14],[0.6675,0.185],[0.7525,0.2675],[0.7925,0.37],[0.7925,0.4875],[0.6925,0.6175],[0.52,0.69],[0.36,0.6725],[0.2275,0.595],[0.1825,0.4775],[0.18,0.37],[0.2125,0.2975],[0.2725,0.2275],[0.355,0.175],[0.41,0.15],[0.4525,0.1425],[0.495,0.1375],[0.5125,0.1375],[0.515,0.1375]],[[0.5075,0.1375],[0.6,0.1375],[0.6825,0.1775],[0.7525,0.2575],[0.7875,0.3725],[0.7875,0.52],[0.6925,0.6375],[0.545,0.7],[0.395,0.66],[0.3025,0.5375],[0.2825,0.425],[0.31,0.2975],[0.3575,0.2125],[0.4025,0.1775],[0.46,0.1625],[0.52,0.1575],[0.555,0.1575],[0.5725,0.1575],[0.5725,0.1575],[0.5725,0.1575]]]; -[{"name":"circle","mouse":[172,36]},{"name":"circle","mouse":[240,42]},{"name":"circle","mouse":[311,70]},{"name":"circle","mouse":[365,129]},{"name":"circle","mouse":[384,188]},{"name":"circle","mouse":[376,238]},{"name":"circle","mouse":[353,285]},{"name":"circle","mouse":[311,322]},{"name":"circle","mouse":[248,330]},{"name":"circle","mouse":[163,304]},{"name":"circle","mouse":[121,265]},{"name":"circle","mouse":[97,183]},{"name":"circle","mouse":[101,132]},{"name":"circle","mouse":[114,91]},{"name":"circle","mouse":[130,69]},{"name":"circle","mouse":[145,58]},{"name":"circle","mouse":[158,53]},{"name":"circle","mouse":[171,44]},{"name":"circle","mouse":[176,41]},{"name":"circle","mouse":[177,41]}], +let boxes = [[[0.4425,0.75],[0.46,0.7475],[0.545,0.7475],[0.6525,0.7475],[0.6825,0.7475],[0.68,0.78],[0.675,0.8875],[0.675,0.92],[0.665,0.93],[0.4825,0.9175],[0.41,0.925],[0.405,0.925],[0.4125,0.8375],[0.4275,0.7875],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7525]],[[0.4475,0.7375],[0.5425,0.74],[0.6525,0.74],[0.6925,0.74],[0.6925,0.7775],[0.69,0.86],[0.6925,0.915],[0.6875,0.915],[0.5875,0.9125],[0.4775,0.9125],[0.4525,0.9125],[0.4525,0.8875],[0.45,0.8075],[0.4525,0.7675],[0.4525,0.7525],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475]],[[0.4575,0.7425],[0.485,0.74],[0.6025,0.7425],[0.7025,0.7425],[0.7325,0.745],[0.7375,0.8175],[0.7425,0.9175],[0.7425,0.9225],[0.7225,0.92],[0.56,0.91],[0.4375,0.9125],[0.43,0.915],[0.4275,0.8825],[0.4375,0.8175],[0.445,0.77],[0.4475,0.7625],[0.45,0.7575],[0.45,0.7525],[0.45,0.75],[0.45,0.75]],[[0.43,0.7375],[0.4425,0.73],[0.565,0.73],[0.6575,0.73],[0.705,0.73],[0.7075,0.7325],[0.7075,0.82],[0.7025,0.8825],[0.7,0.8875],[0.66,0.885],[0.5475,0.8825],[0.425,0.8825],[0.4,0.8825],[0.3975,0.86],[0.395,0.81],[0.4075,0.77],[0.42,0.7425],[0.4225,0.7375],[0.4225,0.7375],[0.4225,0.7375]],[[0.3825,0.7025],[0.3875,0.7025],[0.56,0.7],[0.68,0.6975],[0.73,0.6925],[0.7325,0.6925],[0.7175,0.7575],[0.7025,0.8475],[0.7,0.8925],[0.6875,0.895],[0.58,0.875],[0.4675,0.87],[0.41,0.87],[0.4025,0.8725],[0.39,0.8075],[0.3875,0.76],[0.3825,0.7225],[0.38,0.7025],[0.38,0.6975],[0.38,0.6975]],[[0.4275,0.6975],[0.6425,0.6975],[0.75,0.6975],[0.7675,0.6975],[0.7675,0.72],[0.7625,0.8025],[0.76,0.865],[0.7525,0.8775],[0.665,0.86],[0.55,0.8575],[0.4725,0.8575],[0.435,0.86],[0.43,0.8625],[0.4175,0.7925],[0.415,0.7275],[0.415,0.71],[0.415,0.6975],[0.415,0.6975],[0.415,0.6975],[0.415,0.6975]],[[0.415,0.6975],[0.485,0.6925],[0.5925,0.6925],[0.7325,0.695],[0.7425,0.6975],[0.7425,0.73],[0.735,0.81],[0.7325,0.87],[0.73,0.895],[0.6925,0.8975],[0.6025,0.8925],[0.515,0.8925],[0.455,0.8875],[0.44,0.8875],[0.435,0.855],[0.4375,0.79],[0.43,0.7475],[0.42,0.705],[0.4175,0.7],[0.4175,0.6975]],[[0.4375,0.69],[0.5725,0.6875],[0.6725,0.6875],[0.7525,0.6875],[0.7625,0.6875],[0.7625,0.7075],[0.755,0.775],[0.7525,0.815],[0.7425,0.8475],[0.7275,0.8675],[0.66,0.86],[0.5625,0.855],[0.4725,0.85],[0.445,0.8525],[0.4375,0.8375],[0.4275,0.7675],[0.4225,0.7275],[0.425,0.7025],[0.4275,0.6975],[0.4275,0.6975]],[[0.4175,0.7125],[0.44,0.7075],[0.5625,0.7075],[0.6875,0.7075],[0.7575,0.7075],[0.76,0.7075],[0.7625,0.745],[0.7625,0.8125],[0.7575,0.8625],[0.7425,0.8875],[0.7325,0.8875],[0.645,0.8675],[0.5525,0.865],[0.4725,0.8675],[0.415,0.875],[0.4125,0.8675],[0.4125,0.8075],[0.4125,0.7675],[0.4125,0.74],[0.4125,0.7275]],[[0.41,0.7225],[0.42,0.72],[0.5575,0.72],[0.665,0.7225],[0.695,0.7225],[0.695,0.7275],[0.695,0.785],[0.7025,0.8575],[0.7025,0.9],[0.7,0.9025],[0.65,0.89],[0.54,0.8775],[0.4525,0.88],[0.435,0.885],[0.4175,0.86],[0.405,0.795],[0.4,0.7525],[0.4,0.7375],[0.4025,0.7225],[0.4025,0.7225]],[[0.405,0.7175],[0.47,0.7125],[0.6175,0.7125],[0.6875,0.7125],[0.715,0.7125],[0.715,0.7125],[0.705,0.765],[0.7025,0.815],[0.6975,0.865],[0.685,0.89],[0.6575,0.8875],[0.5575,0.8825],[0.4825,0.8825],[0.42,0.885],[0.415,0.8825],[0.415,0.815],[0.4075,0.7525],[0.405,0.7275],[0.405,0.7175],[0.405,0.7175]],[[0.405,0.7175],[0.4375,0.705],[0.5525,0.705],[0.66,0.705],[0.6875,0.705],[0.7,0.7075],[0.7025,0.725],[0.7,0.7925],[0.7,0.85],[0.695,0.8825],[0.6925,0.8875],[0.6625,0.8775],[0.5775,0.8675],[0.4825,0.8675],[0.3975,0.875],[0.3775,0.875],[0.3775,0.8375],[0.38,0.785],[0.3825,0.7475],[0.39,0.7275]],[[0.355,0.735],[0.3625,0.7325],[0.44,0.7275],[0.525,0.7275],[0.665,0.73],[0.6775,0.7325],[0.6775,0.7325],[0.675,0.7625],[0.6725,0.83],[0.6725,0.875],[0.6675,0.895],[0.6625,0.905],[0.6425,0.9025],[0.545,0.8875],[0.4625,0.8875],[0.4225,0.89],[0.4025,0.895],[0.375,0.8275],[0.365,0.7725],[0.3625,0.7575]],[[0.38,0.7475],[0.4125,0.7475],[0.47,0.7475],[0.545,0.745],[0.6425,0.745],[0.675,0.745],[0.6775,0.75],[0.685,0.8175],[0.695,0.8675],[0.695,0.9],[0.695,0.905],[0.635,0.8975],[0.54,0.895],[0.42,0.9075],[0.385,0.9075],[0.3725,0.8975],[0.3725,0.8525],[0.3775,0.815],[0.38,0.7925],[0.38,0.7675]],[[0.375,0.7625],[0.395,0.7575],[0.4825,0.7525],[0.56,0.7525],[0.6425,0.7525],[0.675,0.755],[0.685,0.7575],[0.6925,0.7875],[0.6925,0.875],[0.695,0.9025],[0.6825,0.9075],[0.6,0.9075],[0.5125,0.9075],[0.41,0.9075],[0.3775,0.9075],[0.3725,0.885],[0.38,0.84],[0.385,0.7875],[0.385,0.7575],[0.385,0.7575]],[[0.39,0.7525],[0.46,0.745],[0.5525,0.74],[0.6525,0.7375],[0.6625,0.7375],[0.6625,0.755],[0.65,0.815],[0.6475,0.8675],[0.645,0.8875],[0.64,0.9075],[0.6325,0.9075],[0.5925,0.8925],[0.5325,0.8875],[0.42,0.8875],[0.3775,0.8875],[0.3725,0.87],[0.3825,0.8225],[0.3925,0.7875],[0.3925,0.7675],[0.3925,0.76]],[[0.41,0.74],[0.4575,0.7375],[0.5125,0.7375],[0.625,0.7375],[0.65,0.7375],[0.6775,0.7425],[0.6825,0.7475],[0.6825,0.76],[0.6775,0.8125],[0.6725,0.8475],[0.6675,0.87],[0.6625,0.875],[0.5825,0.8675],[0.4925,0.8675],[0.4275,0.87],[0.415,0.87],[0.4125,0.8675],[0.4125,0.815],[0.41,0.7875],[0.405,0.7575]],[[0.405,0.7175],[0.4675,0.715],[0.575,0.715],[0.6475,0.7175],[0.7025,0.7175],[0.705,0.725],[0.705,0.7675],[0.7,0.8475],[0.6875,0.8875],[0.6775,0.9],[0.625,0.8975],[0.5525,0.8875],[0.47,0.8875],[0.4075,0.88],[0.3925,0.855],[0.385,0.8075],[0.3825,0.7675],[0.3925,0.7225],[0.395,0.71],[0.395,0.71]],[[0.395,0.7225],[0.4125,0.7175],[0.4775,0.7175],[0.5675,0.7175],[0.635,0.7175],[0.65,0.7175],[0.655,0.725],[0.66,0.7725],[0.66,0.8125],[0.655,0.8425],[0.65,0.855],[0.6175,0.8475],[0.5325,0.84],[0.475,0.84],[0.415,0.84],[0.375,0.84],[0.37,0.8175],[0.37,0.7775],[0.3725,0.7475],[0.375,0.7275]],[[0.37,0.7375],[0.3925,0.7325],[0.525,0.7275],[0.5825,0.7275],[0.6325,0.7275],[0.655,0.7275],[0.655,0.74],[0.655,0.7875],[0.6475,0.825],[0.645,0.855],[0.645,0.885],[0.6375,0.8925],[0.5725,0.8825],[0.455,0.8775],[0.3925,0.8775],[0.355,0.875],[0.3525,0.865],[0.3625,0.8125],[0.375,0.7775],[0.3825,0.7475]],[[0.39,0.7075],[0.4725,0.705],[0.5575,0.705],[0.67,0.715],[0.6925,0.7175],[0.6975,0.72],[0.695,0.78],[0.6925,0.845],[0.6925,0.89],[0.6925,0.8975],[0.6225,0.8875],[0.525,0.88],[0.445,0.8775],[0.4125,0.8775],[0.3975,0.8575],[0.395,0.7875],[0.3925,0.7425],[0.39,0.7025],[0.39,0.6925],[0.39,0.6925]],[[0.3925,0.73],[0.3975,0.7325],[0.5075,0.73],[0.5875,0.73],[0.6525,0.73],[0.7,0.7275],[0.71,0.7275],[0.71,0.7775],[0.6975,0.85],[0.6875,0.8875],[0.685,0.89],[0.615,0.885],[0.515,0.88],[0.445,0.88],[0.4225,0.88],[0.4175,0.85],[0.4125,0.7925],[0.41,0.7675],[0.4025,0.7425],[0.4025,0.7325]],[[0.4075,0.73],[0.4825,0.7275],[0.585,0.7275],[0.6975,0.7275],[0.7125,0.73],[0.7225,0.7325],[0.725,0.7675],[0.7225,0.8275],[0.7225,0.865],[0.7225,0.875],[0.715,0.8775],[0.61,0.86],[0.5525,0.86],[0.48,0.86],[0.445,0.86],[0.4325,0.8275],[0.4275,0.7775],[0.4225,0.7575],[0.415,0.7375],[0.415,0.73]],[[0.4125,0.7275],[0.425,0.7275],[0.4625,0.7275],[0.515,0.725],[0.5625,0.7225],[0.61,0.7225],[0.6425,0.7225],[0.6525,0.7425],[0.655,0.7975],[0.655,0.845],[0.655,0.875],[0.6275,0.88],[0.555,0.88],[0.4825,0.88],[0.44,0.88],[0.4225,0.855],[0.415,0.7975],[0.4125,0.7525],[0.4125,0.7275],[0.4125,0.7275]],[[0.4225,0.7425],[0.4825,0.7375],[0.5825,0.7325],[0.6425,0.7325],[0.6675,0.7325],[0.6775,0.74],[0.6775,0.795],[0.6725,0.845],[0.6675,0.8775],[0.665,0.8875],[0.6425,0.89],[0.5575,0.88],[0.4725,0.8775],[0.4275,0.875],[0.4125,0.86],[0.405,0.805],[0.405,0.7775],[0.4125,0.7475],[0.4125,0.7375],[0.4125,0.7375]],[[0.4125,0.7575],[0.4225,0.7475],[0.495,0.7425],[0.5825,0.7425],[0.6625,0.74],[0.695,0.74],[0.7025,0.7475],[0.7025,0.7975],[0.7025,0.845],[0.6975,0.865],[0.6925,0.87],[0.65,0.8675],[0.555,0.86],[0.4825,0.86],[0.41,0.86],[0.395,0.8625],[0.39,0.82],[0.3975,0.775],[0.4025,0.7575],[0.405,0.7475]],[[0.3775,0.7375],[0.4025,0.73],[0.4725,0.7275],[0.5725,0.7275],[0.6275,0.7275],[0.6425,0.7275],[0.6575,0.73],[0.66,0.7575],[0.66,0.8075],[0.65,0.8775],[0.6475,0.895],[0.605,0.8825],[0.53,0.875],[0.4375,0.87],[0.405,0.87],[0.39,0.855],[0.3825,0.8075],[0.3825,0.7725],[0.3875,0.75],[0.39,0.7475]],[[0.3475,0.75],[0.4325,0.745],[0.52,0.745],[0.59,0.75],[0.605,0.7525],[0.6125,0.78],[0.6125,0.835],[0.61,0.8775],[0.61,0.89],[0.56,0.8925],[0.4625,0.8775],[0.385,0.8775],[0.37,0.8775],[0.3625,0.8775],[0.3525,0.8275],[0.3475,0.7675],[0.35,0.7475],[0.35,0.7475],[0.35,0.7475],[0.3575,0.7525]],[[0.4,0.7475],[0.455,0.745],[0.5575,0.745],[0.675,0.745],[0.6925,0.7475],[0.7025,0.76],[0.7025,0.7975],[0.7,0.875],[0.7,0.915],[0.6975,0.92],[0.62,0.8975],[0.51,0.885],[0.44,0.885],[0.4225,0.89],[0.41,0.8825],[0.4125,0.8175],[0.4125,0.7775],[0.41,0.7475],[0.4075,0.745],[0.4075,0.745]],[[0.3575,0.755],[0.44,0.75],[0.565,0.75],[0.635,0.7525],[0.67,0.7525],[0.675,0.76],[0.6725,0.835],[0.6675,0.8925],[0.6675,0.9075],[0.6675,0.91],[0.6125,0.8975],[0.525,0.89],[0.455,0.8875],[0.42,0.8875],[0.39,0.885],[0.3525,0.865],[0.3475,0.8575],[0.355,0.8175],[0.3625,0.7625],[0.365,0.7375]]]; -[{"name":"circle","mouse":[191,33]},{"name":"circle","mouse":[262,34]},{"name":"circle","mouse":[317,66]},{"name":"circle","mouse":[347,133]},{"name":"circle","mouse":[350,215]},{"name":"circle","mouse":[328,301]},{"name":"circle","mouse":[289,346]},{"name":"circle","mouse":[241,341]},{"name":"circle","mouse":[191,324]},{"name":"circle","mouse":[141,277]},{"name":"circle","mouse":[120,225]},{"name":"circle","mouse":[124,167]},{"name":"circle","mouse":[133,128]},{"name":"circle","mouse":[146,100]},{"name":"circle","mouse":[157,72]},{"name":"circle","mouse":[164,58]},{"name":"circle","mouse":[173,45]},{"name":"circle","mouse":[180,41]},{"name":"circle","mouse":[182,38]},{"name":"circle","mouse":[187,37]}], +let labels = [[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0], +[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0], +[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0], +[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0]] -[{"name":"circle","mouse":[150,39]},{"name":"circle","mouse":[205,36]},{"name":"circle","mouse":[274,62]},{"name":"circle","mouse":[304,110]},{"name":"circle","mouse":[327,197]},{"name":"circle","mouse":[311,282]},{"name":"circle","mouse":[279,332]},{"name":"circle","mouse":[257,345]},{"name":"circle","mouse":[217,333]},{"name":"circle","mouse":[167,296]},{"name":"circle","mouse":[133,249]},{"name":"circle","mouse":[117,201]},{"name":"circle","mouse":[117,150]},{"name":"circle","mouse":[120,120]},{"name":"circle","mouse":[129,85]},{"name":"circle","mouse":[133,69]},{"name":"circle","mouse":[141,52]},{"name":"circle","mouse":[149,41]},{"name":"circle","mouse":[152,37]},{"name":"circle","mouse":[153,37]}]] +let test =[[[0.2625,0.2075],[0.275,0.205],[0.3925,0.2175],[0.665,0.2175],[0.8325,0.2175],[0.8375,0.2175],[0.8375,0.2275],[0.815,0.3475],[0.8125,0.585],[0.805,0.675],[0.8,0.6775],[0.5425,0.695],[0.3425,0.7425],[0.3325,0.7425],[0.325,0.725],[0.2925,0.5575],[0.2775,0.4],[0.285,0.2875],[0.29,0.23],[0.29,0.2125]]]; -let classifer; function setup() { ml5.setBackend("webgl"); - createCanvas(400, 400); + createCanvas(w, h); frameRate(10); background(220); classifer = ml5.timeSeries(); @@ -59,7 +32,7 @@ function setup() { function draw() { if (keyIsDown(67) && pressedOnce){ - sequence.push({"name":"circle","mouse": [mouseX,mouseY]}); + sequence.push([mouseX/w,mouseY/h]); ellipse(mouseX,mouseY,10); if (sequence.length == 20){ pressedOnce = false; @@ -70,7 +43,7 @@ function draw() { } } else if (keyIsDown(66) && pressedOnce){ - sequence.push({"name":"box","mouse": [mouseX,mouseY]}); + sequence.push([mouseX/w,mouseY/h]); ellipse(mouseX,mouseY,10); if (sequence.length == 20){ pressedOnce = false; @@ -79,7 +52,24 @@ function draw() { console.log("finished"); background(220); } - } + } else if (keyIsDown(65) && pressedOnce){ + sequence.push([mouseX/w,mouseY/h]); + ellipse(mouseX,mouseY,10); + if (sequence.length == 20){ + pressedOnce = false; + datasets.push(sequence); + sequence = []; + console.log("finished"); + background(220); + + tensortrial = classifer.toTensors(datasets,datasets); + predictions = classifer.predict(tensortrial[0]); + console.log(type[predictions], 'dito', predictions); + + text(type[predictions],200,200); + // console.log(predictions[predictions.length-1]) + } + } if (datasets.length == 10){ file = JSON.stringify(datasets); @@ -92,14 +82,28 @@ function keyReleased(){ } function keyPressed(){ - if (key == 't'){ - classifer.createArchitecture(); - console.log('done architecture'); - } else if (key == 'y'){ - classifer.compileModel(); - console.log('done compiling the thing'); - } else if (key == 'u'){ - classifer.summarizeModel(); - console.log('done summarizing'); - } -} \ No newline at end of file + if (key == 't'){ + classifer.createArchitecture(); + console.log('done architecture'); + } else if (key == 'y'){ + classifer.compileModel(); + console.log('done compiling the thing'); + } else if (key == 'u'){ + classifer.summarizeModel(); + console.log('done summarizing'); + } else if (key == 'i'){ + alldata = circle.concat(boxes); + tensorData = classifer.toTensors(alldata,labels); + console.log('conversion done'); + console.log(tensorData[0]); + console.log(tensorData[1]); + } else if (key == 'o'){ + classifer.fitModel(tensorData[0],tensorData[1]); + } else if (key =='p'){ + classifer.modelSummary(); + } else if(key == 'k'){ + tensortrial = classifer.toTensors(test,test); + predictions = classifer.predict(tensortrial[0]); + console.log(predictions) + } + } \ No newline at end of file diff --git a/src/LSTM/index.js b/src/LSTM/index.js index 548d9965..59023fe7 100644 --- a/src/LSTM/index.js +++ b/src/LSTM/index.js @@ -1,5 +1,8 @@ import * as tf from "@tensorflow/tfjs"; +import * as tfvis from "@tensorflow/tfjs-vis"; +import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; + // import '@tensorflow/tfjs-node'; // import callCallback from "../utils/callcallback"; @@ -12,14 +15,28 @@ class LSTMify{ } createArchitecture() { + + // Create the model + this.model = tf.sequential(); + + // Add the LSTM layers with the initializer this.model.add(tf.layers.lstm({ - units: 40, - inputShape: [20,2] + units: 50, + inputShape: [20, 2], + activation: 'relu', + returnSequences: true, + kernelInitializer: tf.initializers.glorotNormal(), + recurrentInitializer: tf.initializers.glorotNormal(), + biasInitializer: tf.initializers.glorotNormal(), + })); - this.model.add(tf.layers.dense({ - units: 20, - activation: 'relu', + + this.model.add(tf.layers.lstm({ + units: 50, + kernelInitializer: tf.initializers.glorotNormal(), + recurrentInitializer: tf.initializers.glorotNormal(), + biasInitializer: tf.initializers.glorotNormal(), })); this.model.add(tf.layers.dense({ @@ -29,27 +46,90 @@ class LSTMify{ } compileModel(){ + const optimizer = tf.train.adam(0.002) + // const optimizer = tf.train.adadelta(0.05) + this.model.compile({ - optimizer: 'adam', - loss: 'categoricalCrossentropy', + optimizer: optimizer, + loss: 'binaryCrossentropy', metrics: ['accuracy'] }); } + + summarizeModel(){ + this.model.summary() + } - fitModel(xs,ys){ - this.model.fit(xs, ys,{ - epochs: 20, - batchSize: 32, + toTensors(x,y){ + const x_tensor = tf.tensor(x); + const y_tensor = tf.tensor(y); + + return [x_tensor,y_tensor] + } + + async fitModel(xs,ys){ + this.loggers = [] + this.history = await this.model.fit(xs, ys,{ + epochs: 50, + batchSize: 16, callbacks: { onEpochEnd: (epoch, logs) => { + this.loggers.push(logs) console.log(`Epoch ${epoch + 1}: loss = ${logs.loss}, accuracy = ${logs.acc}`); - }} - }) + } + // callbacks: { + // onEpochEnd: async (epoch, logs) => { + // // Display the loss and accuracy at the end of each epoch + // this.loggers.push(logs) + + // // Plot loss and accuracy + // tfvis.show.history( + // { name: 'Training Performance' }, + // this.loggers, + // ['loss', 'accuracy'] // or ['loss', 'acc'] based on your metrics + // ); + // }, + // } + }}) } - - summarizeModel(){ - this.model.summary() + + modelSummary() { + console.log(this.history); + tfvis.show.history({ name: 'Training Performance' }, this.loggers, ['loss', 'accuracy']); + } + + // async predict(data){ + // const predictions = this.model.predict(data) + // const predict = await predictions.array(); + // console.log(typeof predict) + // predict.array().then(array => { + // console.log(array); + // // return array + // }) + // // console.log("this is the one") + // // return array_ver + + // } + + predict(_inputs) { + const output = tf.tidy(() => { + return this.model.predict(_inputs); + }); + const result = output.arraySync(); + + output.dispose(); + _inputs.dispose(); + + console.log(result, 'here') + + const final = nnUtils.getMax(result[result.length-1]) + + console.log(result[result.length-1].indexOf(final),'lalal', result, final) + const word = [result[result.length-1].indexOf(final)] + + return word; } + } From 46c7dd10de40aa12602e430b60da4af84b5d2e09 Mon Sep 17 00:00:00 2001 From: mop9047 Date: Thu, 11 Jul 2024 13:25:57 +0800 Subject: [PATCH 03/13] Added new files for different experiments (instantiating classes vs copying methods in new file) --- .../sketch-new.js | 97 ++ .../timeSeries-mousexy-keypoints/sketch.js | 13 +- .../timeSeries-mousexy-keypoints/sketch111.js | 178 +++ src/LSTM/index-1.js | 302 +++++ src/LSTM/index111.js | 1084 +++++++++++++++++ src/LSTM/timeSeries.js | 304 +++++ src/LSTM/timeSeriesData.js | 873 +++++++++++++ src/LSTM/timeSeriesUtils.js | 95 ++ 8 files changed, 2937 insertions(+), 9 deletions(-) create mode 100644 examples/timeSeries-mousexy-keypoints/sketch-new.js create mode 100644 examples/timeSeries-mousexy-keypoints/sketch111.js create mode 100644 src/LSTM/index-1.js create mode 100644 src/LSTM/index111.js create mode 100644 src/LSTM/timeSeries.js create mode 100644 src/LSTM/timeSeriesData.js create mode 100644 src/LSTM/timeSeriesUtils.js diff --git a/examples/timeSeries-mousexy-keypoints/sketch-new.js b/examples/timeSeries-mousexy-keypoints/sketch-new.js new file mode 100644 index 00000000..bc305dd5 --- /dev/null +++ b/examples/timeSeries-mousexy-keypoints/sketch-new.js @@ -0,0 +1,97 @@ +let model; +let targetLabel = 'C'; + +let state = 'collection'; + +let notes = { + C: 261.6256, + D: 293.6648, + E: 329.6276 +} + +let env,wave; + +function setup() { + createCanvas(400, 400); + ml5.setBackend('webgl') + + + + let options = { + inputs: ['x', 'y'], + outputs: ['label'], + task: 'classification', + debug: 'true' + }; + model = ml5.timeSeries(options); + background(255); +} + +function keyPressed() { + + if (key == 't') { + state = 'training'; + console.log('starting training'); + model.normalizeData(); + let options = { + epochs: 200 + } + model.train(options, whileTraining, finishedTraining); + } else { + targetLabel = key.toUpperCase(); + } +} + +function whileTraining(epoch, loss) { + console.log(epoch); +} + +function finishedTraining() { + console.log('finished training.'); + state = 'prediction'; +} + + +function mousePressed() { + + let inputs = { + x: mouseX, + y: mouseY + } + + if (state == 'collection') { + let target = { + label: targetLabel + } + model.addData(inputs, target); + console.log('yeah') + stroke(0); + noFill(); + ellipse(mouseX, mouseY, 24); + fill(0); + noStroke(); + textAlign(CENTER, CENTER); + text(targetLabel, mouseX, mouseY); + + } else if (state == 'prediction') { + model.classify(inputs, gotResults); + + } + +} + +function gotResults(error, results) { + if (error) { + console.error(error); + return; + } + console.log(results); + stroke(0); + fill(0, 0, 255, 100); + ellipse(mouseX, mouseY, 24); + fill(0); + noStroke(); + textAlign(CENTER, CENTER); + let label = results[0].label; + text(label, mouseX, mouseY); +} \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/sketch.js b/examples/timeSeries-mousexy-keypoints/sketch.js index 1447e76e..84ddb1fb 100644 --- a/examples/timeSeries-mousexy-keypoints/sketch.js +++ b/examples/timeSeries-mousexy-keypoints/sketch.js @@ -12,6 +12,7 @@ let type = ["box","circle"] let circle = [[[0.48,0.165],[0.555,0.175],[0.615,0.1875],[0.715,0.2475],[0.78,0.35],[0.7825,0.545],[0.7225,0.6375],[0.615,0.7175],[0.5175,0.74],[0.395,0.715],[0.25,0.6425],[0.2,0.5375],[0.205,0.44],[0.2425,0.345],[0.3,0.245],[0.3425,0.2025],[0.43,0.1675],[0.495,0.165],[0.5025,0.165],[0.5025,0.165]],[[0.47,0.16],[0.4925,0.1575],[0.6,0.1725],[0.6875,0.235],[0.7925,0.35],[0.8275,0.455],[0.7825,0.585],[0.715,0.695],[0.5825,0.725],[0.3725,0.695],[0.2275,0.6075],[0.1875,0.4775],[0.205,0.3975],[0.2625,0.3],[0.3525,0.21],[0.39,0.18],[0.4275,0.17],[0.4625,0.16],[0.4725,0.1575],[0.475,0.1575]],[[0.4675,0.145],[0.5375,0.15],[0.64,0.205],[0.7375,0.2925],[0.7875,0.4275],[0.7625,0.53],[0.6725,0.65],[0.55,0.7025],[0.4025,0.6975],[0.2975,0.6325],[0.25,0.5375],[0.24,0.4225],[0.265,0.3575],[0.3125,0.2875],[0.3675,0.225],[0.4025,0.1875],[0.4225,0.1625],[0.45,0.1475],[0.4675,0.1475],[0.4775,0.1475]],[[0.5525,0.1475],[0.6825,0.195],[0.7825,0.2775],[0.83,0.39],[0.8225,0.5075],[0.7825,0.62],[0.705,0.6925],[0.4875,0.76],[0.315,0.7625],[0.23,0.7125],[0.2025,0.605],[0.21,0.4625],[0.2575,0.3125],[0.36,0.1875],[0.4625,0.1425],[0.51,0.14],[0.5375,0.14],[0.5525,0.14],[0.5625,0.14],[0.5625,0.14]],[[0.5725,0.1325],[0.6825,0.155],[0.7625,0.2275],[0.8275,0.3525],[0.8325,0.48],[0.81,0.5925],[0.7675,0.685],[0.6425,0.735],[0.4725,0.76],[0.3375,0.715],[0.2775,0.625],[0.27,0.5225],[0.27,0.4325],[0.3025,0.35],[0.3625,0.27],[0.42,0.1975],[0.4625,0.165],[0.5175,0.1375],[0.5575,0.1275],[0.57,0.1275]],[[0.5225,0.1425],[0.62,0.1375],[0.7725,0.19],[0.8325,0.2775],[0.87,0.4275],[0.8375,0.6075],[0.81,0.67],[0.755,0.7075],[0.5525,0.715],[0.4175,0.7075],[0.2775,0.5975],[0.2625,0.48],[0.275,0.405],[0.3225,0.31],[0.3625,0.2525],[0.3925,0.2175],[0.425,0.1875],[0.4675,0.1575],[0.49,0.1475],[0.5025,0.145]],[[0.4875,0.135],[0.6625,0.1525],[0.765,0.2175],[0.8325,0.345],[0.8525,0.495],[0.845,0.64],[0.805,0.7075],[0.715,0.7525],[0.575,0.7775],[0.45,0.7775],[0.3475,0.7375],[0.285,0.675],[0.2625,0.555],[0.2825,0.4125],[0.32,0.315],[0.3625,0.2275],[0.3925,0.1975],[0.425,0.175],[0.4425,0.1675],[0.455,0.1575]],[[0.4025,0.1675],[0.455,0.1525],[0.5625,0.1525],[0.665,0.185],[0.7625,0.2475],[0.8225,0.355],[0.8475,0.5075],[0.83,0.6475],[0.7425,0.725],[0.5775,0.7925],[0.435,0.79],[0.2775,0.7325],[0.2075,0.6275],[0.2025,0.53],[0.2025,0.4525],[0.2225,0.36],[0.2575,0.3],[0.3,0.2475],[0.33,0.225],[0.36,0.1975]],[[0.4925,0.1525],[0.5925,0.1425],[0.69,0.1825],[0.7825,0.2425],[0.8325,0.3125],[0.87,0.45],[0.8825,0.6275],[0.8575,0.76],[0.7825,0.82],[0.5825,0.865],[0.435,0.865],[0.3525,0.8275],[0.2825,0.7075],[0.2675,0.6075],[0.2775,0.4825],[0.2975,0.3925],[0.335,0.31],[0.3725,0.2375],[0.4025,0.2025],[0.43,0.1775]],[[0.4725,0.15],[0.5825,0.1475],[0.6875,0.2],[0.7775,0.2875],[0.82,0.4],[0.8225,0.5575],[0.7525,0.7075],[0.665,0.7725],[0.555,0.8025],[0.395,0.7675],[0.305,0.68],[0.255,0.56],[0.245,0.42],[0.2675,0.34],[0.3325,0.235],[0.365,0.2],[0.4025,0.1725],[0.445,0.15],[0.475,0.1425],[0.5125,0.1425]],[[0.485,0.1375],[0.6175,0.1375],[0.73,0.205],[0.795,0.2875],[0.8175,0.4275],[0.8125,0.5525],[0.7825,0.635],[0.675,0.7125],[0.52,0.7425],[0.33,0.6975],[0.2625,0.5975],[0.2525,0.4975],[0.2675,0.3675],[0.2975,0.2775],[0.3425,0.2175],[0.4125,0.17],[0.4525,0.155],[0.495,0.14],[0.5125,0.14],[0.515,0.14]],[[0.5125,0.1325],[0.6425,0.15],[0.7425,0.225],[0.81,0.43],[0.81,0.6025],[0.74,0.7875],[0.6075,0.8425],[0.4375,0.86],[0.3,0.8125],[0.225,0.71],[0.1875,0.605],[0.2125,0.4825],[0.2625,0.3775],[0.3575,0.2625],[0.4025,0.205],[0.4425,0.1675],[0.515,0.1425],[0.5525,0.135],[0.5625,0.135],[0.5625,0.135]],[[0.5375,0.1375],[0.6275,0.16],[0.7475,0.235],[0.8025,0.3775],[0.8225,0.5725],[0.77,0.7225],[0.6675,0.835],[0.5475,0.8675],[0.38,0.835],[0.2525,0.7],[0.2125,0.5675],[0.2225,0.4375],[0.2625,0.3275],[0.3225,0.2425],[0.395,0.1875],[0.475,0.1625],[0.5275,0.15],[0.5425,0.1475],[0.5525,0.145],[0.5575,0.145]],[[0.53,0.1375],[0.635,0.1525],[0.725,0.2175],[0.8225,0.39],[0.825,0.6025],[0.76,0.79],[0.6625,0.8375],[0.545,0.84],[0.4075,0.8225],[0.3425,0.775],[0.285,0.6675],[0.28,0.5375],[0.295,0.3975],[0.345,0.315],[0.3775,0.2675],[0.4175,0.2175],[0.4575,0.1675],[0.4875,0.1475],[0.5075,0.1375],[0.52,0.1375]],[[0.4775,0.165],[0.5475,0.1575],[0.63,0.1775],[0.6675,0.2025],[0.6975,0.235],[0.7375,0.3225],[0.7525,0.465],[0.7225,0.5775],[0.655,0.6525],[0.5525,0.67],[0.425,0.655],[0.3275,0.59],[0.28,0.4775],[0.275,0.35],[0.305,0.2825],[0.34,0.235],[0.3775,0.1975],[0.455,0.1575],[0.475,0.15],[0.495,0.1475]],[[0.4375,0.165],[0.5425,0.1475],[0.635,0.1725],[0.685,0.2175],[0.735,0.305],[0.7475,0.4],[0.7125,0.4825],[0.64,0.5375],[0.5325,0.555],[0.3825,0.535],[0.3,0.48],[0.2825,0.41],[0.2725,0.3325],[0.2775,0.2825],[0.3,0.245],[0.34,0.2075],[0.3575,0.195],[0.4,0.17],[0.4225,0.1575],[0.465,0.1475]],[[0.4625,0.145],[0.5575,0.15],[0.6575,0.1975],[0.7275,0.2625],[0.755,0.3525],[0.7425,0.4775],[0.6825,0.595],[0.595,0.635],[0.375,0.64],[0.275,0.6175],[0.205,0.54],[0.1825,0.4075],[0.1925,0.3025],[0.2325,0.255],[0.2825,0.22],[0.3525,0.1775],[0.3875,0.1575],[0.4325,0.1425],[0.4725,0.14],[0.5025,0.1475]],[[0.4625,0.1175],[0.5325,0.1125],[0.6425,0.1425],[0.715,0.21],[0.765,0.3175],[0.7775,0.46],[0.7325,0.5775],[0.6175,0.63],[0.4775,0.655],[0.3675,0.635],[0.2725,0.58],[0.2075,0.4875],[0.22,0.385],[0.275,0.2725],[0.3525,0.2],[0.3825,0.1675],[0.42,0.1375],[0.455,0.1175],[0.475,0.1125],[0.4925,0.1125]],[[0.5425,0.125],[0.7125,0.165],[0.7925,0.26],[0.8275,0.415],[0.8175,0.5225],[0.725,0.6125],[0.615,0.675],[0.465,0.7175],[0.3725,0.6825],[0.32,0.6225],[0.2975,0.515],[0.31,0.4],[0.3325,0.33],[0.37,0.2775],[0.4275,0.225],[0.465,0.1925],[0.535,0.1375],[0.5625,0.1225],[0.5725,0.12],[0.5725,0.12]],[[0.505,0.1225],[0.5725,0.13],[0.6825,0.1975],[0.74,0.2925],[0.7375,0.485],[0.67,0.5875],[0.545,0.6775],[0.3825,0.6875],[0.265,0.6375],[0.2125,0.52],[0.215,0.41],[0.24,0.32],[0.28,0.2375],[0.33,0.17],[0.3625,0.1475],[0.3925,0.1425],[0.4475,0.1375],[0.485,0.1275],[0.5025,0.1175],[0.51,0.1175]],[[0.5525,0.1475],[0.6875,0.1825],[0.7425,0.245],[0.7825,0.3475],[0.765,0.475],[0.6625,0.59],[0.545,0.655],[0.385,0.6525],[0.3125,0.6025],[0.28,0.4675],[0.2925,0.35],[0.33,0.2775],[0.3525,0.2425],[0.3925,0.2025],[0.4325,0.1825],[0.485,0.17],[0.5225,0.165],[0.5525,0.1575],[0.56,0.1575],[0.56,0.1575]],[[0.5,0.1675],[0.605,0.17],[0.6825,0.2025],[0.7325,0.2675],[0.7625,0.3875],[0.7525,0.5175],[0.6975,0.6025],[0.6075,0.6475],[0.4625,0.66],[0.3475,0.61],[0.295,0.505],[0.2825,0.375],[0.31,0.2675],[0.36,0.1875],[0.41,0.1525],[0.4725,0.1475],[0.5175,0.1575],[0.5475,0.1625],[0.5525,0.1625],[0.5525,0.165]],[[0.4375,0.1525],[0.4775,0.1525],[0.5875,0.1825],[0.6425,0.255],[0.69,0.385],[0.6375,0.5825],[0.5425,0.68],[0.4375,0.7175],[0.3275,0.6875],[0.2625,0.5975],[0.2325,0.4875],[0.24,0.3175],[0.2575,0.25],[0.3125,0.2],[0.375,0.175],[0.4125,0.165],[0.4375,0.1575],[0.4575,0.155],[0.46,0.155],[0.4625,0.1525]],[[0.41,0.1475],[0.5425,0.145],[0.6275,0.1675],[0.7125,0.24],[0.7625,0.3625],[0.75,0.5125],[0.6625,0.6125],[0.4525,0.655],[0.38,0.6375],[0.3075,0.54],[0.2825,0.4175],[0.2975,0.3225],[0.345,0.2475],[0.375,0.2],[0.405,0.17],[0.4325,0.1525],[0.4625,0.145],[0.495,0.145],[0.5225,0.1475],[0.5325,0.15]],[[0.445,0.1375],[0.545,0.135],[0.6225,0.1575],[0.695,0.1975],[0.7325,0.2875],[0.7675,0.4275],[0.75,0.5725],[0.6075,0.705],[0.49,0.7075],[0.2875,0.65],[0.1725,0.5225],[0.1625,0.415],[0.21,0.2925],[0.2625,0.2275],[0.3225,0.1775],[0.37,0.1625],[0.4025,0.1575],[0.445,0.1575],[0.4925,0.155],[0.53,0.1525]],[[0.5825,0.1325],[0.7375,0.1675],[0.8,0.255],[0.81,0.44],[0.75,0.5975],[0.6425,0.685],[0.4825,0.7175],[0.3375,0.6775],[0.2425,0.595],[0.1875,0.495],[0.1775,0.3925],[0.2175,0.3125],[0.265,0.2475],[0.3225,0.1975],[0.365,0.1725],[0.42,0.1425],[0.4875,0.1],[0.59,0.0825],[0.6325,0.0825],[0.635,0.0825]],[[0.445,0.1675],[0.5625,0.16],[0.65,0.195],[0.72,0.295],[0.745,0.425],[0.7175,0.5625],[0.6125,0.6675],[0.475,0.7],[0.3525,0.685],[0.1625,0.61],[0.1125,0.5575],[0.105,0.475],[0.1625,0.3775],[0.2125,0.3175],[0.2925,0.2475],[0.3575,0.2025],[0.4275,0.1775],[0.475,0.1625],[0.4925,0.1575],[0.5025,0.1575]],[[0.515,0.155],[0.5725,0.1625],[0.71,0.2375],[0.78,0.3375],[0.795,0.4375],[0.77,0.5875],[0.665,0.6975],[0.5,0.7225],[0.4075,0.7025],[0.3025,0.6025],[0.2425,0.485],[0.2425,0.385],[0.285,0.2975],[0.325,0.2475],[0.38,0.2075],[0.4275,0.1875],[0.445,0.18],[0.5,0.1675],[0.55,0.165],[0.5725,0.1625]],[[0.4675,0.15],[0.5525,0.14],[0.6675,0.185],[0.7525,0.2675],[0.7925,0.37],[0.7925,0.4875],[0.6925,0.6175],[0.52,0.69],[0.36,0.6725],[0.2275,0.595],[0.1825,0.4775],[0.18,0.37],[0.2125,0.2975],[0.2725,0.2275],[0.355,0.175],[0.41,0.15],[0.4525,0.1425],[0.495,0.1375],[0.5125,0.1375],[0.515,0.1375]],[[0.5075,0.1375],[0.6,0.1375],[0.6825,0.1775],[0.7525,0.2575],[0.7875,0.3725],[0.7875,0.52],[0.6925,0.6375],[0.545,0.7],[0.395,0.66],[0.3025,0.5375],[0.2825,0.425],[0.31,0.2975],[0.3575,0.2125],[0.4025,0.1775],[0.46,0.1625],[0.52,0.1575],[0.555,0.1575],[0.5725,0.1575],[0.5725,0.1575],[0.5725,0.1575]]]; + let boxes = [[[0.4425,0.75],[0.46,0.7475],[0.545,0.7475],[0.6525,0.7475],[0.6825,0.7475],[0.68,0.78],[0.675,0.8875],[0.675,0.92],[0.665,0.93],[0.4825,0.9175],[0.41,0.925],[0.405,0.925],[0.4125,0.8375],[0.4275,0.7875],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7525]],[[0.4475,0.7375],[0.5425,0.74],[0.6525,0.74],[0.6925,0.74],[0.6925,0.7775],[0.69,0.86],[0.6925,0.915],[0.6875,0.915],[0.5875,0.9125],[0.4775,0.9125],[0.4525,0.9125],[0.4525,0.8875],[0.45,0.8075],[0.4525,0.7675],[0.4525,0.7525],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475]],[[0.4575,0.7425],[0.485,0.74],[0.6025,0.7425],[0.7025,0.7425],[0.7325,0.745],[0.7375,0.8175],[0.7425,0.9175],[0.7425,0.9225],[0.7225,0.92],[0.56,0.91],[0.4375,0.9125],[0.43,0.915],[0.4275,0.8825],[0.4375,0.8175],[0.445,0.77],[0.4475,0.7625],[0.45,0.7575],[0.45,0.7525],[0.45,0.75],[0.45,0.75]],[[0.43,0.7375],[0.4425,0.73],[0.565,0.73],[0.6575,0.73],[0.705,0.73],[0.7075,0.7325],[0.7075,0.82],[0.7025,0.8825],[0.7,0.8875],[0.66,0.885],[0.5475,0.8825],[0.425,0.8825],[0.4,0.8825],[0.3975,0.86],[0.395,0.81],[0.4075,0.77],[0.42,0.7425],[0.4225,0.7375],[0.4225,0.7375],[0.4225,0.7375]],[[0.3825,0.7025],[0.3875,0.7025],[0.56,0.7],[0.68,0.6975],[0.73,0.6925],[0.7325,0.6925],[0.7175,0.7575],[0.7025,0.8475],[0.7,0.8925],[0.6875,0.895],[0.58,0.875],[0.4675,0.87],[0.41,0.87],[0.4025,0.8725],[0.39,0.8075],[0.3875,0.76],[0.3825,0.7225],[0.38,0.7025],[0.38,0.6975],[0.38,0.6975]],[[0.4275,0.6975],[0.6425,0.6975],[0.75,0.6975],[0.7675,0.6975],[0.7675,0.72],[0.7625,0.8025],[0.76,0.865],[0.7525,0.8775],[0.665,0.86],[0.55,0.8575],[0.4725,0.8575],[0.435,0.86],[0.43,0.8625],[0.4175,0.7925],[0.415,0.7275],[0.415,0.71],[0.415,0.6975],[0.415,0.6975],[0.415,0.6975],[0.415,0.6975]],[[0.415,0.6975],[0.485,0.6925],[0.5925,0.6925],[0.7325,0.695],[0.7425,0.6975],[0.7425,0.73],[0.735,0.81],[0.7325,0.87],[0.73,0.895],[0.6925,0.8975],[0.6025,0.8925],[0.515,0.8925],[0.455,0.8875],[0.44,0.8875],[0.435,0.855],[0.4375,0.79],[0.43,0.7475],[0.42,0.705],[0.4175,0.7],[0.4175,0.6975]],[[0.4375,0.69],[0.5725,0.6875],[0.6725,0.6875],[0.7525,0.6875],[0.7625,0.6875],[0.7625,0.7075],[0.755,0.775],[0.7525,0.815],[0.7425,0.8475],[0.7275,0.8675],[0.66,0.86],[0.5625,0.855],[0.4725,0.85],[0.445,0.8525],[0.4375,0.8375],[0.4275,0.7675],[0.4225,0.7275],[0.425,0.7025],[0.4275,0.6975],[0.4275,0.6975]],[[0.4175,0.7125],[0.44,0.7075],[0.5625,0.7075],[0.6875,0.7075],[0.7575,0.7075],[0.76,0.7075],[0.7625,0.745],[0.7625,0.8125],[0.7575,0.8625],[0.7425,0.8875],[0.7325,0.8875],[0.645,0.8675],[0.5525,0.865],[0.4725,0.8675],[0.415,0.875],[0.4125,0.8675],[0.4125,0.8075],[0.4125,0.7675],[0.4125,0.74],[0.4125,0.7275]],[[0.41,0.7225],[0.42,0.72],[0.5575,0.72],[0.665,0.7225],[0.695,0.7225],[0.695,0.7275],[0.695,0.785],[0.7025,0.8575],[0.7025,0.9],[0.7,0.9025],[0.65,0.89],[0.54,0.8775],[0.4525,0.88],[0.435,0.885],[0.4175,0.86],[0.405,0.795],[0.4,0.7525],[0.4,0.7375],[0.4025,0.7225],[0.4025,0.7225]],[[0.405,0.7175],[0.47,0.7125],[0.6175,0.7125],[0.6875,0.7125],[0.715,0.7125],[0.715,0.7125],[0.705,0.765],[0.7025,0.815],[0.6975,0.865],[0.685,0.89],[0.6575,0.8875],[0.5575,0.8825],[0.4825,0.8825],[0.42,0.885],[0.415,0.8825],[0.415,0.815],[0.4075,0.7525],[0.405,0.7275],[0.405,0.7175],[0.405,0.7175]],[[0.405,0.7175],[0.4375,0.705],[0.5525,0.705],[0.66,0.705],[0.6875,0.705],[0.7,0.7075],[0.7025,0.725],[0.7,0.7925],[0.7,0.85],[0.695,0.8825],[0.6925,0.8875],[0.6625,0.8775],[0.5775,0.8675],[0.4825,0.8675],[0.3975,0.875],[0.3775,0.875],[0.3775,0.8375],[0.38,0.785],[0.3825,0.7475],[0.39,0.7275]],[[0.355,0.735],[0.3625,0.7325],[0.44,0.7275],[0.525,0.7275],[0.665,0.73],[0.6775,0.7325],[0.6775,0.7325],[0.675,0.7625],[0.6725,0.83],[0.6725,0.875],[0.6675,0.895],[0.6625,0.905],[0.6425,0.9025],[0.545,0.8875],[0.4625,0.8875],[0.4225,0.89],[0.4025,0.895],[0.375,0.8275],[0.365,0.7725],[0.3625,0.7575]],[[0.38,0.7475],[0.4125,0.7475],[0.47,0.7475],[0.545,0.745],[0.6425,0.745],[0.675,0.745],[0.6775,0.75],[0.685,0.8175],[0.695,0.8675],[0.695,0.9],[0.695,0.905],[0.635,0.8975],[0.54,0.895],[0.42,0.9075],[0.385,0.9075],[0.3725,0.8975],[0.3725,0.8525],[0.3775,0.815],[0.38,0.7925],[0.38,0.7675]],[[0.375,0.7625],[0.395,0.7575],[0.4825,0.7525],[0.56,0.7525],[0.6425,0.7525],[0.675,0.755],[0.685,0.7575],[0.6925,0.7875],[0.6925,0.875],[0.695,0.9025],[0.6825,0.9075],[0.6,0.9075],[0.5125,0.9075],[0.41,0.9075],[0.3775,0.9075],[0.3725,0.885],[0.38,0.84],[0.385,0.7875],[0.385,0.7575],[0.385,0.7575]],[[0.39,0.7525],[0.46,0.745],[0.5525,0.74],[0.6525,0.7375],[0.6625,0.7375],[0.6625,0.755],[0.65,0.815],[0.6475,0.8675],[0.645,0.8875],[0.64,0.9075],[0.6325,0.9075],[0.5925,0.8925],[0.5325,0.8875],[0.42,0.8875],[0.3775,0.8875],[0.3725,0.87],[0.3825,0.8225],[0.3925,0.7875],[0.3925,0.7675],[0.3925,0.76]],[[0.41,0.74],[0.4575,0.7375],[0.5125,0.7375],[0.625,0.7375],[0.65,0.7375],[0.6775,0.7425],[0.6825,0.7475],[0.6825,0.76],[0.6775,0.8125],[0.6725,0.8475],[0.6675,0.87],[0.6625,0.875],[0.5825,0.8675],[0.4925,0.8675],[0.4275,0.87],[0.415,0.87],[0.4125,0.8675],[0.4125,0.815],[0.41,0.7875],[0.405,0.7575]],[[0.405,0.7175],[0.4675,0.715],[0.575,0.715],[0.6475,0.7175],[0.7025,0.7175],[0.705,0.725],[0.705,0.7675],[0.7,0.8475],[0.6875,0.8875],[0.6775,0.9],[0.625,0.8975],[0.5525,0.8875],[0.47,0.8875],[0.4075,0.88],[0.3925,0.855],[0.385,0.8075],[0.3825,0.7675],[0.3925,0.7225],[0.395,0.71],[0.395,0.71]],[[0.395,0.7225],[0.4125,0.7175],[0.4775,0.7175],[0.5675,0.7175],[0.635,0.7175],[0.65,0.7175],[0.655,0.725],[0.66,0.7725],[0.66,0.8125],[0.655,0.8425],[0.65,0.855],[0.6175,0.8475],[0.5325,0.84],[0.475,0.84],[0.415,0.84],[0.375,0.84],[0.37,0.8175],[0.37,0.7775],[0.3725,0.7475],[0.375,0.7275]],[[0.37,0.7375],[0.3925,0.7325],[0.525,0.7275],[0.5825,0.7275],[0.6325,0.7275],[0.655,0.7275],[0.655,0.74],[0.655,0.7875],[0.6475,0.825],[0.645,0.855],[0.645,0.885],[0.6375,0.8925],[0.5725,0.8825],[0.455,0.8775],[0.3925,0.8775],[0.355,0.875],[0.3525,0.865],[0.3625,0.8125],[0.375,0.7775],[0.3825,0.7475]],[[0.39,0.7075],[0.4725,0.705],[0.5575,0.705],[0.67,0.715],[0.6925,0.7175],[0.6975,0.72],[0.695,0.78],[0.6925,0.845],[0.6925,0.89],[0.6925,0.8975],[0.6225,0.8875],[0.525,0.88],[0.445,0.8775],[0.4125,0.8775],[0.3975,0.8575],[0.395,0.7875],[0.3925,0.7425],[0.39,0.7025],[0.39,0.6925],[0.39,0.6925]],[[0.3925,0.73],[0.3975,0.7325],[0.5075,0.73],[0.5875,0.73],[0.6525,0.73],[0.7,0.7275],[0.71,0.7275],[0.71,0.7775],[0.6975,0.85],[0.6875,0.8875],[0.685,0.89],[0.615,0.885],[0.515,0.88],[0.445,0.88],[0.4225,0.88],[0.4175,0.85],[0.4125,0.7925],[0.41,0.7675],[0.4025,0.7425],[0.4025,0.7325]],[[0.4075,0.73],[0.4825,0.7275],[0.585,0.7275],[0.6975,0.7275],[0.7125,0.73],[0.7225,0.7325],[0.725,0.7675],[0.7225,0.8275],[0.7225,0.865],[0.7225,0.875],[0.715,0.8775],[0.61,0.86],[0.5525,0.86],[0.48,0.86],[0.445,0.86],[0.4325,0.8275],[0.4275,0.7775],[0.4225,0.7575],[0.415,0.7375],[0.415,0.73]],[[0.4125,0.7275],[0.425,0.7275],[0.4625,0.7275],[0.515,0.725],[0.5625,0.7225],[0.61,0.7225],[0.6425,0.7225],[0.6525,0.7425],[0.655,0.7975],[0.655,0.845],[0.655,0.875],[0.6275,0.88],[0.555,0.88],[0.4825,0.88],[0.44,0.88],[0.4225,0.855],[0.415,0.7975],[0.4125,0.7525],[0.4125,0.7275],[0.4125,0.7275]],[[0.4225,0.7425],[0.4825,0.7375],[0.5825,0.7325],[0.6425,0.7325],[0.6675,0.7325],[0.6775,0.74],[0.6775,0.795],[0.6725,0.845],[0.6675,0.8775],[0.665,0.8875],[0.6425,0.89],[0.5575,0.88],[0.4725,0.8775],[0.4275,0.875],[0.4125,0.86],[0.405,0.805],[0.405,0.7775],[0.4125,0.7475],[0.4125,0.7375],[0.4125,0.7375]],[[0.4125,0.7575],[0.4225,0.7475],[0.495,0.7425],[0.5825,0.7425],[0.6625,0.74],[0.695,0.74],[0.7025,0.7475],[0.7025,0.7975],[0.7025,0.845],[0.6975,0.865],[0.6925,0.87],[0.65,0.8675],[0.555,0.86],[0.4825,0.86],[0.41,0.86],[0.395,0.8625],[0.39,0.82],[0.3975,0.775],[0.4025,0.7575],[0.405,0.7475]],[[0.3775,0.7375],[0.4025,0.73],[0.4725,0.7275],[0.5725,0.7275],[0.6275,0.7275],[0.6425,0.7275],[0.6575,0.73],[0.66,0.7575],[0.66,0.8075],[0.65,0.8775],[0.6475,0.895],[0.605,0.8825],[0.53,0.875],[0.4375,0.87],[0.405,0.87],[0.39,0.855],[0.3825,0.8075],[0.3825,0.7725],[0.3875,0.75],[0.39,0.7475]],[[0.3475,0.75],[0.4325,0.745],[0.52,0.745],[0.59,0.75],[0.605,0.7525],[0.6125,0.78],[0.6125,0.835],[0.61,0.8775],[0.61,0.89],[0.56,0.8925],[0.4625,0.8775],[0.385,0.8775],[0.37,0.8775],[0.3625,0.8775],[0.3525,0.8275],[0.3475,0.7675],[0.35,0.7475],[0.35,0.7475],[0.35,0.7475],[0.3575,0.7525]],[[0.4,0.7475],[0.455,0.745],[0.5575,0.745],[0.675,0.745],[0.6925,0.7475],[0.7025,0.76],[0.7025,0.7975],[0.7,0.875],[0.7,0.915],[0.6975,0.92],[0.62,0.8975],[0.51,0.885],[0.44,0.885],[0.4225,0.89],[0.41,0.8825],[0.4125,0.8175],[0.4125,0.7775],[0.41,0.7475],[0.4075,0.745],[0.4075,0.745]],[[0.3575,0.755],[0.44,0.75],[0.565,0.75],[0.635,0.7525],[0.67,0.7525],[0.675,0.76],[0.6725,0.835],[0.6675,0.8925],[0.6675,0.9075],[0.6675,0.91],[0.6125,0.8975],[0.525,0.89],[0.455,0.8875],[0.42,0.8875],[0.39,0.885],[0.3525,0.865],[0.3475,0.8575],[0.355,0.8175],[0.3625,0.7625],[0.365,0.7375]]]; let labels = [[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0], @@ -85,25 +86,19 @@ function keyPressed(){ if (key == 't'){ classifer.createArchitecture(); console.log('done architecture'); - } else if (key == 'y'){ classifer.compileModel(); console.log('done compiling the thing'); - } else if (key == 'u'){ classifer.summarizeModel(); console.log('done summarizing'); - } else if (key == 'i'){ alldata = circle.concat(boxes); tensorData = classifer.toTensors(alldata,labels); console.log('conversion done'); console.log(tensorData[0]); console.log(tensorData[1]); - } else if (key == 'o'){ - classifer.fitModel(tensorData[0],tensorData[1]); + + classifer.fitModel(tensorData[0],tensorData[1]); + } else if (key =='p'){ classifer.modelSummary(); - } else if(key == 'k'){ - tensortrial = classifer.toTensors(test,test); - predictions = classifer.predict(tensortrial[0]); - console.log(predictions) } } \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/sketch111.js b/examples/timeSeries-mousexy-keypoints/sketch111.js new file mode 100644 index 00000000..fd3dc1ba --- /dev/null +++ b/examples/timeSeries-mousexy-keypoints/sketch111.js @@ -0,0 +1,178 @@ +// https://editor.p5js.org/gohai/sketches/_KdpDSQzH + +let model; + +let curr_shape = 'None, press a button below' + +let state = 'collection'; +let pressedOnce = true; + + +let rec_duration = 2; +let num_seq = 20; +// assuming frameRate is 60, with record time of 2 seconds, there will be 120 datapoints total, which is huge! we use map to get 20 data points instead of 120 + +let count = 0; + + +let sequence = []; + +function setup() { + ml5.setBackend('webgl') + let options = { + inputs: ['x', 'y'], + outputs: ['label'], + task: 'classification', + debug: 'true', + learningRate: 0.5 + }; + model = ml5.timeSeries(options); + createCanvas(600, 400); + background(220); + UI(); + frameRate(60); + +} + +function draw() { + let datapoints = map(count,0,rec_duration*num_seq, 0,num_seq) + + if (mouseIsPressed && pressedOnce){ + + line(pmouseX, pmouseY, mouseX,mouseY); + let inputs = {x: mouseX/400,y: mouseY/400}; + count++; + + if (datapoints % 1 == 0){ + sequence.push(inputs); + } + + if (sequence.length == num_seq){ + + + pressedOnce = false; + count = 0 + + if (state == 'collection'){ + let target = {label: curr_shape}; + background(220); + text("Recording: " + curr_shape, 50,50); + // console.log(sequence, target) + model.addData(sequence, target); + } else if (state == 'prediction'){ + background(220); + model.classify(sequence, gotResults) + } else if (state == 'training') { + background(220); + text("You cannot record while training"); + } + + sequence = []; + } + } +} + +function gotResults(results) { + // if (error) { + // console.log(error); + // } + // console.log('hello', results); + stroke(0); + fill(0, 0, 255, 100); + let label = results[0].label; + text("Prediction: " + label, 50,50); + // let label = error[0].label; + +} + +function keyPressed(){ + if (key == 's') { + model.saveData('trial'); + } else if (key == 'd'){ + console.log(model.getData()); + } +} + +function mouseReleased(){ + pressedOnce = true; +} + +function UI(){ + + textSize(20) + + rec_circle = createButton('Record Circle'); + rec_circle.mouseClicked(recordCircle); + rec_circle.style("font-family", "Georgia"); + rec_circle.style("font-size", "20px"); + + rec_square = createButton('Record Square'); + rec_square.mouseClicked(recordSquare); + rec_square.style("font-family", "Georgia"); + rec_square.style("font-size", "20px"); + + train_but = createButton('Train Model'); + train_but.mouseClicked(trainModel); + train_but.style("font-family", "Georgia"); + train_but.style("font-size", "20px"); + + pred_sha = createButton('Predict Shape'); + pred_sha.mouseClicked(predictShape); + pred_sha.style("font-family", "Georgia"); + pred_sha.style("font-size", "20px"); + + function recordCircle(){ + background(220); + state = 'collection' + curr_shape = 'Circle' + text("Recording: Circle", 50,50); + rec_circle.style("background-color",'#f0f0f0') + rec_square.style('background-color', ''); + pred_sha.style('background-color', ''); + } + + function recordSquare(){ + background(220); + state = 'collection' + curr_shape = 'Square' + text("Recording: Square", 50,50); + rec_square.style("background-color",'#f0f0f0') + rec_circle.style('background-color', ''); + pred_sha.style('background-color', ''); + } + + function trainModel(){ + model.createArchitecture(); + model.compileModel(); + model.summarizeModel(); + background(220); + state = 'training'; + text("Training...", 50,50); + model.normalizeData(); + let options = { + epochs: 100 + } + model.train(options,whileTraining,finishedTraining); + } + + function whileTraining(epoch, loss) { + console.log(epoch); + } + + function finishedTraining() { + console.log('finished training.'); + state = 'prediction'; + } + + function predictShape(){ + background(220); + state = 'prediction' + text("Predicting Shape...", 50,50); + pred_sha.style("background-color",'#f0f0f0') + rec_square.style('background-color', ''); + rec_circle.style('background-color', ''); + + + } +} + diff --git a/src/LSTM/index-1.js b/src/LSTM/index-1.js new file mode 100644 index 00000000..64bfc478 --- /dev/null +++ b/src/LSTM/index-1.js @@ -0,0 +1,302 @@ +import * as tf from "@tensorflow/tfjs"; +import neuralNetwork from "../NeuralNetwork/index"; +import callCallback from "../utils/callcallback"; + + +/* +Since essentially LSTM is a layer and can be used the same way with neuralNetwork class, +this will inherit from the DIYNeuralNetwork Class, a list of modifications and overrides will be +in this list: + +1.) Architecture: + * Default Architecutre when no + +SaveData +formatRawData + +converting inputs to tensors + + +Maintain: +createModel +addlayer +copy + +*/ + + + +class LSTMify{ + constructor(options, callback){ + this.nnInst = neuralNetwork(options, callback); + + const methods = Object.getOwnPropertyNames(Object.getPrototypeOf(this.nnInst)); + for (const method of methods) { + if (method !== 'constructor' && !this[method]) { + this[method] = this.nnInst[method].bind(this.nnInst); + } + } + } + + // train() { + // console.log('Overridden train method in LSTMify'); + // // Optionally, you can still call the original method if needed + // // this.neuralNetworkInstance.train(); + // } + /** + * train + * @public + * @param {*} optionsOrCallback + * @param {*} optionsOrWhileTraining + * @param {*} callback + * @return {Promise} + */ + async train(optionsOrCallback, optionsOrWhileTraining, callback) { + let options; + let whileTrainingCb; + let finishedTrainingCb; + if ( + typeof optionsOrCallback === "object" && + typeof optionsOrWhileTraining === "function" && + typeof callback === "function" + ) { + options = optionsOrCallback; + whileTrainingCb = optionsOrWhileTraining; + finishedTrainingCb = callback; + } else if ( + typeof optionsOrCallback === "object" && + typeof optionsOrWhileTraining === "function" + ) { + options = optionsOrCallback; + whileTrainingCb = null; + finishedTrainingCb = optionsOrWhileTraining; + } else if ( + typeof optionsOrCallback === "function" && + typeof optionsOrWhileTraining === "function" + ) { + options = {}; + whileTrainingCb = optionsOrCallback; + finishedTrainingCb = optionsOrWhileTraining; + } else { + options = {}; + whileTrainingCb = null; + finishedTrainingCb = optionsOrCallback; + } + + return callCallback(this.trainInternal(options, whileTrainingCb), finishedTrainingCb); + } + + /** + * train + * @param {Object} _options + * @param {function} [whileTrainingCb] + * @return {Promise} + */ + async trainInternal(_options, whileTrainingCb) { + const options = { + epochs: 10, + batchSize: 32, + validationSplit: 0.1, + whileTraining: null, + ..._options, + }; + + // if debug mode is true, then use tf vis + if (this.nnInst.options.debug === true || this.nnInst.options.debug === "true") { + options.whileTraining = [ + this.nnInst.neuralNetworkVis.trainingVis(), + { + onEpochEnd: whileTrainingCb, + }, + ]; + } else { + // if not use the default training + // options.whileTraining = whileTrainingCb === null ? [{ + // onEpochEnd: (epoch, loss) => { + // console.log(epoch, loss.loss) + // } + // }] : + // [{ + // onEpochEnd: whileTrainingCb + // }]; + options.whileTraining = [ + { + onEpochEnd: whileTrainingCb, + }, + ]; + } + + // if metadata needs to be generated about the data + if (!this.nnInst.neuralNetworkData.isMetadataReady) { + // if the inputs are defined as an array of [img_width, img_height, channels] + this.nnInst.createMetaData(); + } + + // if the data still need to be summarized, onehotencoded, etc + if (!this.nnInst.neuralNetworkData.isWarmedUp) { + this.nnInst.prepareForTraining(); + } + + // if inputs and outputs are not specified + // in the options, then create the tensors + // from the this.neuralNetworkData.data.raws + if (!options.inputs && !options.outputs) { + const { inputs, outputs } = this.convertTrainingDataToTensors(); + options.inputs = inputs; + options.outputs = outputs; + } + + // check to see if layers are passed into the constructor + // then use those to create your architecture + if (!this.nnInst.neuralNetwork.isLayered) { + // TODO: don't update this.options.layers - Linda + this.nnInst.options.layers = this.createNetworkLayers( + this.nnInst.options.layers + ); + } + + // if the model does not have any layers defined yet + // then use the default structure + if (!this.nnInst.neuralNetwork.isLayered) { + // TODO: don't update this.options.layers - Linda + this.nnInst.options.layers = this.addDefaultLayers(); + } + + if (!this.nnInst.neuralNetwork.isCompiled) { + // compile the model with defaults + this.nnInst.compile(); + } + + // train once the model is compiled + await this.nnInst.neuralNetwork.train(options); + } + + addDefaultLayers() { + const { inputs, outputs } = this.convertTrainingDataToTensors(); + const shape = [1]; + shape.push(...inputs.shape); + console.log(inputs) + console.log(outputs) + + console.log('default', shape) + let layers; + const task = this.nnInst.options.task; + switch (task.toLowerCase()) { + // if the task is classification + case "classification": + layers = [ + { + type: "lstm", + units: this.nnInst.options.hiddenUnits, + activation: "relu", + inputShape: shape, + returnSequences: true, + }, + { + type: "dense", + units: this.nnInst.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "softmax", + }, + ]; + + return this.createNetworkLayers(layers); + // if the task is regression + case "regression": + layers = [ + { + type: "dense", + units: this.nnInst.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", + }, + ]; + return this.createNetworkLayers(layers); + // if the task is imageClassification + case "imageclassification": + layers = [ + { + type: "conv2d", + filters: 8, + kernelSize: 5, + strides: 1, + activation: "relu", + kernelInitializer: "varianceScaling", + }, + { + type: "maxPooling2d", + poolSize: [2, 2], + strides: [2, 2], + }, + { + type: "conv2d", + filters: 16, + kernelSize: 5, + strides: 1, + activation: "relu", + kernelInitializer: "varianceScaling", + }, + { + type: "maxPooling2d", + poolSize: [2, 2], + strides: [2, 2], + }, + { + type: "flatten", + }, + { + type: "dense", + kernelInitializer: "varianceScaling", + activation: "softmax", + }, + ]; + return this.nnInst.createNetworkLayers(layers); + + default: + console.log("no imputUnits or outputUnits defined"); + layers = [ + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", + }, + ]; + return this.nnInst.createNetworkLayers(layers); + } + } + + getData(){ + return this.nnInst.neuralNetworkData.getData(); + } +} + +const timeSeries = (inputsOrOptions, outputsOrCallback, callback) => { + let options; + let cb; + + if (inputsOrOptions instanceof Object) { + options = inputsOrOptions; + cb = outputsOrCallback; + } else { + options = { + inputs: inputsOrOptions, + outputs: outputsOrCallback, + }; + cb = callback; + } + + const instance = new LSTMify(options, cb); + return instance; + }; + + export default timeSeries; \ No newline at end of file diff --git a/src/LSTM/index111.js b/src/LSTM/index111.js new file mode 100644 index 00000000..3919ea54 --- /dev/null +++ b/src/LSTM/index111.js @@ -0,0 +1,1084 @@ +import * as tf from "@tensorflow/tfjs"; +import callCallback from "../utils/callcallback"; +import handleArguments from "../utils/handleArguments"; +import { imgToPixelArray, isInstanceOfSupportedElement, } from "../utils/imageUtilities"; +import NeuralNetwork from "./timeSeries"; +import NeuralNetworkData from "./timeSeriesData"; + +import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; +import NeuralNetworkVis from "../NeuralNetwork/NeuralNetworkVis"; + +import tsUtils from "./timeSeriesUtils"; + +const DEFAULTS = { + inputs: [], + outputs: [], + dataUrl: null, + modelUrl: null, + layers: [], + task: null, + debug: false, + learningRate: 0.2, + hiddenUnits: 16, + neuroEvolution: false, +}; + + +/* +as far as the p5 sketch is concerned, it will directly call only a few functions in the class, +these are the following: + +model.addData +model.saveData, model etc +model.train +model.classify/predict etc + + + +*/ + + + +class timeSeries { + + //reviewed + constructor(options, callback) { + this.options = + { + ...DEFAULTS, + ...options, + } || DEFAULTS; + + this.neuralNetwork = new NeuralNetwork(); + this.neuralNetworkData = new NeuralNetworkData(); + this.neuralNetworkVis = new NeuralNetworkVis(); + + this.data = { + training: [], + }; + + // Methods + this.init = this.init.bind(this); + // adding data + this.addData = this.addData.bind(this); + this.loadDataFromUrl = this.loadDataFromUrl.bind(this); + // metadata prep + this.createMetaData = this.createMetaData.bind(this); + // data prep and handling + this.prepareForTraining = this.prepareForTraining.bind(this); + this.normalizeData = this.normalizeData.bind(this); + this.normalizeInput = this.normalizeInput.bind(this); + // this.searchAndFormat = this.searchAndFormat.bind(this); + // this.formatInputItem = this.formatInputItem.bind(this); + this.convertTrainingDataToTensors = + this.convertTrainingDataToTensors.bind(this); + this.formatInputsForPrediction = this.formatInputsForPrediction.bind(this); + this.formatInputsForPredictionAll = + this.formatInputsForPredictionAll.bind(this); + this.isOneHotEncodedOrNormalized = + this.isOneHotEncodedOrNormalized.bind(this); + // model prep + this.train = this.train.bind(this); + this.trainInternal = this.trainInternal.bind(this); + this.addLayer = this.addLayer.bind(this); + this.createNetworkLayers = this.createNetworkLayers.bind(this); + this.addDefaultLayers = this.addDefaultLayers.bind(this); + this.compile = this.compile.bind(this); + // prediction / classification + this.predict = this.predict.bind(this); + this.predictMultiple = this.predictMultiple.bind(this); + this.classify = this.classify.bind(this); + this.classifyMultiple = this.classifyMultiple.bind(this); + this.predictInternal = this.predictInternal.bind(this); + this.classifyInternal = this.classifyInternal.bind(this); + // save / load data + this.saveData = this.saveData.bind(this); + this.loadData = this.loadData.bind(this); + // save / load model + this.save = this.save.bind(this); + this.load = this.load.bind(this); + + // release model + this.dispose = this.dispose.bind(this); + + // neuroevolution + this.mutate = this.mutate.bind(this); + this.crossover = this.crossover.bind(this); + + // Initialize + this.ready = callCallback(this.init(), callback); + } + + // changed if else from dataURL and model URL, what if both are provided + //reviewed + async init() { + // check if the a static model should be built based on the inputs and output properties + if (this.options.neuroEvolution === true) { + this.createLayersNoTraining(); + } + + if (this.options.dataUrl) { + await this.loadDataFromUrl(); + } else if (this.options.modelUrl) { + // will take a URL to model.json, an object, or files array + await this.load(this.options.modelUrl); + } + return this; + } + + //calls nndata createmetadata, calls add default layers + //reviewed no idea purpose + createLayersNoTraining() { + // Create sample data based on options + const { inputs, outputs, task } = this.options; + if (task === "classification") { + for (let i = 0; i < outputs.length; i += 1) { + const inputSample = new Array(inputs).fill(0); + this.addData(inputSample, [outputs[i]]); + } + } else { + const inputSample = new Array(inputs).fill(0); + const outputSample = new Array(outputs).fill(0); + this.addData(inputSample, outputSample); + } + + // TODO: what about inputShape? + this.neuralNetworkData.createMetadata(); + this.addDefaultLayers(); + } + + //calls timeSeries again, nn.model.getweights, setweights + //reviewed + copy() { + const nnCopy = new timeSeries(this.options); + return tf.tidy(() => { + const weights = this.neuralNetwork.model.getWeights(); + const weightCopies = []; + for (let i = 0; i < weights.length; i += 1) { + weightCopies[i] = weights[i].clone(); + } + nnCopy.neuralNetwork.model.setWeights(weightCopies); + return nnCopy; + }); + } + + + // addData(xInputs, yInputs, options = null) { + + // // ({inputLabels,outputLabels}=tsUtils.prepareLabels(xInputs, yInputs, options = null)); + + // console.log('raw', xInputs); + + // const xs = this.searchAndFormat(xInputs); + // const xs = nnUtils.formatDataAsObject(formattedInputs, inputLabels); + + // const ys = nnUtils.formatDataAsObject(yInputs, outputLabels); + + // console.log('xs and yx', xInputs); + // console.log('xs and yx',ys) + // //create formatted input first, since the data is time series, the format of the data should be the following + // /* [ + // { + // xs:[{x: ,y: },{x: ,y: },{x: ,y: },{x: ,y: }], + // ys: {'label': } + // }, + + // { + // xs:[{x: ,y: },{x: ,y: },{x: ,y: },{x: ,y: }], + // ys: {'label': } + // } + // ] + + // */ + + // this.neuralNetworkData.addData(xInputs, ys); + // } + addData(xInputs, yInputs, options = null) { + const { inputs, outputs } = this.options; + + // get the input and output labels + // or infer them from the data + let inputLabels; + let outputLabels; + + if (options !== null) { + // eslint-disable-next-line prefer-destructuring + inputLabels = options.inputLabels; + // eslint-disable-next-line prefer-destructuring + outputLabels = options.outputLabels; + } else if (inputs.length > 0 && outputs.length > 0) { + // if the inputs and outputs labels have been defined + // in the constructor + if (inputs.every((item) => typeof item === "string")) { + inputLabels = inputs; + } + if (outputs.every((item) => typeof item === "string")) { + outputLabels = outputs; + } + } else if (typeof xInputs === "object" && typeof yInputs === "object") { + inputLabels = Object.keys(xInputs); + outputLabels = Object.keys(yInputs); + } else { + inputLabels = nnUtils.createLabelsFromArrayValues(xInputs, "input"); + outputLabels = nnUtils.createLabelsFromArrayValues(yInputs, "output"); + } + + // Make sure that the inputLabels and outputLabels are arrays + if (!(inputLabels instanceof Array)) { + throw new Error("inputLabels must be an array"); + } + if (!(outputLabels instanceof Array)) { + throw new Error("outputLabels must be an array"); + } + + const formattedInputs = this.searchAndFormat(xInputs); + const xs = nnUtils.formatDataAsObject(formattedInputs, inputLabels); + + const ys = nnUtils.formatDataAsObject(yInputs, outputLabels); + + this.neuralNetworkData.addData(xs, ys); + } + + + async loadDataFromUrl() { + const { dataUrl, inputs, outputs } = this.options; + + await this.neuralNetworkData.loadDataFromUrl( + dataUrl, + inputs, + outputs + ); + + // once the data are loaded, create the metadata + // and prep the data for training + // if the inputs are defined as an array of [img_width, img_height, channels] + this.createMetaData(); + + this.prepareForTraining(); + } + + + createMetaData() { + const { inputs } = this.options; + + let inputShape; + if (Array.isArray(inputs) && inputs.length > 0) { + inputShape = + inputs.every((item) => typeof item === "number") && inputs.length > 0 + ? inputs + : null; + } + + this.neuralNetworkData.createMetadata(inputShape); + } + + + prepareForTraining() { + this.data.training = this.neuralNetworkData.applyOneHotEncodingsToDataRaw(); + this.neuralNetworkData.isWarmedUp = true; + } + + normalizeData() { + if (!this.neuralNetworkData.isMetadataReady) { + // if the inputs are defined as an array of [img_width, img_height, channels] + this.createMetaData(); + } + + if (!this.neuralNetworkData.isWarmedUp) { + this.prepareForTraining(); + } + + const trainingData = this.neuralNetworkData.normalizeDataRaw(); + + // set this equal to the training data + this.data.training = trainingData; + + // set isNormalized to true + this.neuralNetworkData.meta.isNormalized = true; + } + + normalizeInput(value, _key, _meta) { + const key = _key; + const { min, max } = _meta[key]; + return nnUtils.normalizeValue(value, min, max); + } + + searchAndFormat(input) { + let formattedInputs; + if (Array.isArray(input)) { + formattedInputs = input.map((item) => this.formatInputItem(item)); + } else if (typeof input === "object") { + const newXInputs = Object.assign({}, input); + Object.keys(input).forEach((k) => { + const val = input[k]; + newXInputs[k] = this.formatInputItem(val); + }); + formattedInputs = newXInputs; + } + return formattedInputs; + } + + + formatInputItem(input) { + let imgToPredict; + let formattedInputs; + if (isInstanceOfSupportedElement(input)) { + imgToPredict = input; + } else if ( + typeof input === "object" && + isInstanceOfSupportedElement(input.elt) + ) { + imgToPredict = input.elt; // Handle p5.js image and video. + } else if ( + typeof input === "object" && + isInstanceOfSupportedElement(input.canvas) + ) { + imgToPredict = input.canvas; // Handle p5.js image and video. + } + + if (imgToPredict) { + formattedInputs = imgToPixelArray(imgToPredict); + } else { + formattedInputs = input; + } + + console.log("formatted input item", formattedInputs) + return formattedInputs; + } + + convertTrainingDataToTensors() { + return this.neuralNetworkData.convertRawToTensors(this.data.training); + } + + formatInputsForPrediction(_input) { + const { meta } = this.neuralNetworkData; + const inputHeaders = Object.keys(meta.inputs); + + let inputData = []; + + // TODO: check to see if it is a nested array + // to run predict or classify on a batch of data + + if (_input instanceof Array) { + inputData = inputHeaders.map((prop, idx) => { + return this.isOneHotEncodedOrNormalized(_input[idx], prop, meta.inputs); + }); + } else if (_input instanceof Object) { + // TODO: make sure that the input order is preserved! + inputData = inputHeaders.map((prop) => { + return this.isOneHotEncodedOrNormalized( + _input[prop], + prop, + meta.inputs + ); + }); + } + + // inputData = tf.tensor([inputData.flat()]) + inputData = inputData.flat(); + + return inputData; + } + + formatInputsForPredictionAll(_input) { + const { meta } = this.neuralNetworkData; + const inputHeaders = Object.keys(meta.inputs); + + let output; + + if (_input instanceof Array) { + if (_input.every((item) => Array.isArray(item))) { + output = _input.map((item) => { + return this.formatInputsForPrediction(item); + }); + + return tf.tensor(output, [_input.length, inputHeaders.length]); + } + output = this.formatInputsForPrediction(_input); + return tf.tensor([output]); + } + + output = this.formatInputsForPrediction(_input); + return tf.tensor([output]); + } + + isOneHotEncodedOrNormalized(_input, _key, _meta) { + const input = _input; + const key = _key; + + let output; + if (typeof _input !== "number") { + output = _meta[key].legend[input]; + } else { + output = _input; + if (this.neuralNetworkData.meta.isNormalized) { + // output = this.normalizeInput(_input, key, _meta); + } + } + return output; + } + + async train(optionsOrCallback, optionsOrWhileTraining, callback) { + let options; + let whileTrainingCb; + let finishedTrainingCb; + if ( + typeof optionsOrCallback === "object" && + typeof optionsOrWhileTraining === "function" && + typeof callback === "function" + ) { + options = optionsOrCallback; + whileTrainingCb = optionsOrWhileTraining; + finishedTrainingCb = callback; + } else if ( + typeof optionsOrCallback === "object" && + typeof optionsOrWhileTraining === "function" + ) { + options = optionsOrCallback; + whileTrainingCb = null; + finishedTrainingCb = optionsOrWhileTraining; + } else if ( + typeof optionsOrCallback === "function" && + typeof optionsOrWhileTraining === "function" + ) { + options = {}; + whileTrainingCb = optionsOrCallback; + finishedTrainingCb = optionsOrWhileTraining; + } else { + options = {}; + whileTrainingCb = null; + finishedTrainingCb = optionsOrCallback; + } + + return callCallback(this.trainInternal(options, whileTrainingCb), finishedTrainingCb); + } + + async trainInternal(_options, whileTrainingCb) { + const options = { + epochs: 10, + batchSize: 32, + validationSplit: 0.1, + whileTraining: null, + ..._options, + }; + + // if debug mode is true, then use tf vis + if (this.options.debug === true || this.options.debug === "true") { + options.whileTraining = [ + this.neuralNetworkVis.trainingVis(), + { + onEpochEnd: whileTrainingCb, + }, + ]; + } else { + // if not use the default training + // options.whileTraining = whileTrainingCb === null ? [{ + // onEpochEnd: (epoch, loss) => { + // console.log(epoch, loss.loss) + // } + // }] : + // [{ + // onEpochEnd: whileTrainingCb + // }]; + options.whileTraining = [ + { + onEpochEnd: whileTrainingCb, + }, + ]; + } + + // if metadata needs to be generated about the data + if (!this.neuralNetworkData.isMetadataReady) { + // if the inputs are defined as an array of [img_width, img_height, channels] + this.createMetaData(); + } + + // if the data still need to be summarized, onehotencoded, etc + if (!this.neuralNetworkData.isWarmedUp) { + this.prepareForTraining(); + } + + // if inputs and outputs are not specified + // in the options, then create the tensors + // from the this.neuralNetworkData.data.raws + if (!options.inputs && !options.outputs) { + const { inputs, outputs } = this.convertTrainingDataToTensors(); + options.inputs = inputs; + options.outputs = outputs; + } + + // check to see if layers are passed into the constructor + // then use those to create your architecture + if (!this.neuralNetwork.isLayered) { + // TODO: don't update this.options.layers - Linda + this.options.layers = this.createNetworkLayers( + this.options.layers + ); + } + + // if the model does not have any layers defined yet + // then use the default structure + if (!this.neuralNetwork.isLayered) { + // TODO: don't update this.options.layers - Linda + this.options.layers = this.addDefaultLayers(); + } + + if (!this.neuralNetwork.isCompiled) { + // compile the model with defaults + this.compile(); + } + + // train once the model is compiled + await this.neuralNetwork.train(options); + } + + addLayer(layer) { + this.neuralNetwork.addLayer(layer); + } + + createNetworkLayers(layerJsonArray) { + const layers = [...layerJsonArray]; + + const { inputUnits, outputUnits } = this.neuralNetworkData.meta; + const layersLength = layers.length; + + if (!(layers.length >= 2)) { + return false; + } + + // set the inputShape + layers[0].inputShape = layers[0].inputShape + ? layers[0].inputShape + : inputUnits; + // set the output units + const lastIndex = layersLength - 1; + const lastLayer = layers[lastIndex]; + lastLayer.units = lastLayer.units ? lastLayer.units : outputUnits; + + layers.forEach((layer) => { + this.addLayer(tf.layers[layer.type](layer)); + }); + + return layers; + } + + addDefaultLayers() { + let layers; + const task = this.options.task; + switch (task.toLowerCase()) { + // if the task is classification + case "classification": + layers = [ + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "softmax", + }, + ]; + + return this.createNetworkLayers(layers); + // if the task is regression + case "regression": + layers = [ + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", + }, + ]; + return this.createNetworkLayers(layers); + // if the task is imageClassification + case "imageclassification": + layers = [ + { + type: "conv2d", + filters: 8, + kernelSize: 5, + strides: 1, + activation: "relu", + kernelInitializer: "varianceScaling", + }, + { + type: "maxPooling2d", + poolSize: [2, 2], + strides: [2, 2], + }, + { + type: "conv2d", + filters: 16, + kernelSize: 5, + strides: 1, + activation: "relu", + kernelInitializer: "varianceScaling", + }, + { + type: "maxPooling2d", + poolSize: [2, 2], + strides: [2, 2], + }, + { + type: "flatten", + }, + { + type: "dense", + kernelInitializer: "varianceScaling", + activation: "softmax", + }, + ]; + return this.createNetworkLayers(layers); + + default: + console.log("no imputUnits or outputUnits defined"); + layers = [ + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", + }, + ]; + return this.createNetworkLayers(layers); + } + } + compile() { + const LEARNING_RATE = this.options.learningRate; + + let options = {}; + + if ( + this.options.task === "classification" || + this.options.task === "imageClassification" + ) { + options = { + loss: "categoricalCrossentropy", + optimizer: tf.train.sgd, + metrics: ["accuracy"], + }; + } else if (this.options.task === "regression") { + options = { + loss: "meanSquaredError", + optimizer: tf.train.adam, + metrics: ["accuracy"], + }; + } + + options.optimizer = options.optimizer + ? this.neuralNetwork.setOptimizerFunction( + LEARNING_RATE, + options.optimizer + ) + : this.neuralNetwork.setOptimizerFunction(LEARNING_RATE, tf.train.sgd); + + this.neuralNetwork.compile(options); + + // if debug mode is true, then show the model summary + if (this.options.debug) { + this.neuralNetworkVis.modelSummary( + { + name: "Model Summary", + }, + this.neuralNetwork.model + ); + } + } + + + // prediction classification + + predictSync(_input) { + return this.predictSyncInternal(_input); + } + + predict(_input, _cb) { + return callCallback(this.predictInternal(_input), _cb); + } + + predictMultiple(_input, _cb) { + return callCallback(this.predictInternal(_input), _cb); + } + + classifySync(_input) { + return this.classifySyncInternal(_input); + } + + classify(_input, _cb) { + return callCallback(this.classifyInternal(_input), _cb); + } + + classifyMultiple(_input, _cb) { + return callCallback(this.classifyInternal(_input), _cb); + } + + predictSyncInternal(_input) { + const { meta } = this.neuralNetworkData; + + const inputData = this.formatInputsForPredictionAll(_input); + + const unformattedResults = this.neuralNetwork.predictSync(inputData); + inputData.dispose(); + + if (meta !== null) { + const labels = Object.keys(meta.outputs); + + const formattedResults = unformattedResults.map((unformattedResult) => { + return labels.map((item, idx) => { + // check to see if the data were normalized + // if not, then send back the values, otherwise + // unnormalize then return + let val; + let unNormalized; + if (meta.isNormalized) { + const { min, max } = meta.outputs[item]; + val = nnUtils.unnormalizeValue(unformattedResult[idx], min, max); + unNormalized = unformattedResult[idx]; + } else { + val = unformattedResult[idx]; + } + + const d = { + [labels[idx]]: val, + label: item, + value: val, + }; + + // if unNormalized is not undefined, then + // add that to the output + if (unNormalized) { + d.unNormalizedValue = unNormalized; + } + + return d; + }); + }); + + // return single array if the length is less than 2, + // otherwise return array of arrays + if (formattedResults.length < 2) { + return formattedResults[0]; + } + return formattedResults; + } + + // if no meta exists, then return unformatted results; + return unformattedResults; + } + + async predictInternal(_input) { + const { meta } = this.neuralNetworkData; + + const inputData = this.formatInputsForPredictionAll(_input); + + const unformattedResults = await this.neuralNetwork.predict(inputData); + inputData.dispose(); + + if (meta !== null) { + const labels = Object.keys(meta.outputs); + + const formattedResults = unformattedResults.map((unformattedResult) => { + return labels.map((item, idx) => { + // check to see if the data were normalized + // if not, then send back the values, otherwise + // unnormalize then return + let val; + let unNormalized; + if (meta.isNormalized) { + const { min, max } = meta.outputs[item]; + val = nnUtils.unnormalizeValue(unformattedResult[idx], min, max); + unNormalized = unformattedResult[idx]; + } else { + val = unformattedResult[idx]; + } + + const d = { + [labels[idx]]: val, + label: item, + value: val, + }; + + // if unNormalized is not undefined, then + // add that to the output + if (unNormalized) { + d.unNormalizedValue = unNormalized; + } + + return d; + }); + }); + + // return single array if the length is less than 2, + // otherwise return array of arrays + if (formattedResults.length < 2) { + return formattedResults[0]; + } + return formattedResults; + } + + // if no meta exists, then return unformatted results; + return unformattedResults; + } + + classifySyncInternal(_input) { + const { meta } = this.neuralNetworkData; + const headers = Object.keys(meta.inputs); + + let inputData; + + if (this.options.task === "imageClassification") { + // get the inputData for classification + // if it is a image type format it and + // flatten it + inputData = this.searchAndFormat(_input); + if (Array.isArray(inputData)) { + inputData = inputData.flat(); + } else { + inputData = inputData[headers[0]]; + } + + if (meta.isNormalized) { + // TODO: check to make sure this property is not static!!!! + const { min, max } = meta.inputs[headers[0]]; + inputData = this.neuralNetworkData.normalizeArray( + Array.from(inputData), + { min, max } + ); + } else { + inputData = Array.from(inputData); + } + + inputData = tf.tensor([inputData], [1, ...meta.inputUnits]); + } else { + inputData = this.formatInputsForPredictionAll(_input); + } + + const unformattedResults = this.neuralNetwork.classifySync(inputData); + inputData.dispose(); + + if (meta !== null) { + const label = Object.keys(meta.outputs)[0]; + const vals = Object.entries(meta.outputs[label].legend); + + const formattedResults = unformattedResults.map((unformattedResult) => { + return vals + .map((item, idx) => { + return { + [item[0]]: unformattedResult[idx], + label: item[0], + confidence: unformattedResult[idx], + }; + }) + .sort((a, b) => b.confidence - a.confidence); + }); + + // return single array if the length is less than 2, + // otherwise return array of arrays + if (formattedResults.length < 2) { + return formattedResults[0]; + } + return formattedResults; + } + + return unformattedResults; + } + + async classifyInternal(_input) { + const { meta } = this.neuralNetworkData; + const headers = Object.keys(meta.inputs); + + let inputData; + + if (this.options.task === "imageClassification") { + // get the inputData for classification + // if it is a image type format it and + // flatten it + inputData = this.searchAndFormat(_input); + if (Array.isArray(inputData)) { + inputData = inputData.flat(); + } else { + inputData = inputData[headers[0]]; + } + + if (meta.isNormalized) { + // TODO: check to make sure this property is not static!!!! + const { min, max } = meta.inputs[headers[0]]; + inputData = this.neuralNetworkData.normalizeArray( + Array.from(inputData), + { min, max } + ); + } else { + inputData = Array.from(inputData); + } + + inputData = tf.tensor([inputData], [1, ...meta.inputUnits]); + } else { + inputData = this.formatInputsForPredictionAll(_input); + } + + const unformattedResults = await this.neuralNetwork.classify(inputData); + inputData.dispose(); + + if (meta !== null) { + const label = Object.keys(meta.outputs)[0]; + const vals = Object.entries(meta.outputs[label].legend); + + const formattedResults = unformattedResults.map((unformattedResult) => { + return vals + .map((item, idx) => { + return { + [item[0]]: unformattedResult[idx], + label: item[0], + confidence: unformattedResult[idx], + }; + }) + .sort((a, b) => b.confidence - a.confidence); + }); + + // return single array if the length is less than 2, + // otherwise return array of arrays + if (formattedResults.length < 2) { + return formattedResults[0]; + } + return formattedResults; + } + + return unformattedResults; + } + + /** + * //////////////////////////////////////////////////////////// + * Save / Load Data + * //////////////////////////////////////////////////////////// + */ + + /** + * @public + * saves the training data to a JSON file. + * @param {string} [name] Optional - The name for the saved file. + * Should not include the file extension. + * Defaults to the current date and time. + * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. + * @return {Promise} + */ + saveData(name, callback) { + const args = handleArguments(name, callback); + return callCallback(this.neuralNetworkData.saveData(args.name), args.callback); + } + + /** + * @public + * load data + * @param {string | FileList | Object} filesOrPath - The URL of the file to load, + * or a FileList object (.files) from an HTML element . + * @param {ML5Callback} [callback] Optional - A function to call when the loading is complete. + * @return {Promise} + */ + async loadData(filesOrPath, callback) { + return callCallback(this.neuralNetworkData.loadData(filesOrPath), callback); + } + + /** + * //////////////////////////////////////////////////////////// + * Save / Load Model + * //////////////////////////////////////////////////////////// + */ + + /** + * @public + * saves the model, weights, and metadata + * @param {string} [name] Optional - The name for the saved file. + * Should not include the file extension. + * Defaults to 'model'. + * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. + * @return {Promise} + */ + async save(name, callback) { + const args = handleArguments(name, callback); + const modelName = args.string || 'model'; + + // save the model + return callCallback(Promise.all([ + this.neuralNetwork.save(modelName), + this.neuralNetworkData.saveMeta(modelName) + ]), args.callback); + } + + /** + * @public - also called internally by init() when there is a modelUrl in the options + * load a model and metadata + * @param {string | FileList | Object} filesOrPath - The URL of the file to load, + * or a FileList object (.files) from an HTML element . + * @param {ML5Callback} [callback] Optional - A function to call when the loading is complete. + * @return {Promise} + */ + async load(filesOrPath, callback) { + return callCallback(Promise.all([ + this.neuralNetwork.load(filesOrPath), + this.neuralNetworkData.loadMeta(filesOrPath) + ]), callback); + } + + /** + * dispose and release memory for a model + */ + dispose() { + this.neuralNetwork.dispose(); + } + + /** + * //////////////////////////////////////////////////////////// + * New methods for Neuro Evolution + * //////////////////////////////////////////////////////////// + */ + + /** + * mutate the weights of a model + * @param {*} rate + * @param {*} mutateFunction + */ + + mutate(rate, mutateFunction) { + this.neuralNetwork.mutate(rate, mutateFunction); + } + + /** + * create a new neural network with crossover + * @param {*} other + */ + + crossover(other) { + const nnCopy = this.copy(); + nnCopy.neuralNetwork.crossover(other.neuralNetwork); + return nnCopy; + } +} + +const neuralNetwork = (inputsOrOptions, outputsOrCallback, callback) => { + let options; + let cb; + + if (inputsOrOptions instanceof Object) { + options = inputsOrOptions; + cb = outputsOrCallback; + } else { + options = { + inputs: inputsOrOptions, + outputs: outputsOrCallback, + }; + cb = callback; + } + + const instance = new timeSeries(options, cb); + return instance; +}; + +export default neuralNetwork; diff --git a/src/LSTM/timeSeries.js b/src/LSTM/timeSeries.js new file mode 100644 index 00000000..9fe25e2b --- /dev/null +++ b/src/LSTM/timeSeries.js @@ -0,0 +1,304 @@ +import * as tf from "@tensorflow/tfjs"; +import { saveBlob } from "../utils/io"; +import { randomGaussian } from "../utils/random"; + +class NeuralNetwork { + constructor() { + // flags + this.isTrained = false; + this.isCompiled = false; + this.isLayered = false; + /** + * @type {tf.Sequential | null} - the TensorFlow model + */ + this.model = null; + + // methods + this.init = this.init.bind(this); + this.createModel = this.createModel.bind(this); + this.addLayer = this.addLayer.bind(this); + this.compile = this.compile.bind(this); + this.setOptimizerFunction = this.setOptimizerFunction.bind(this); + this.train = this.train.bind(this); + this.predict = this.predict.bind(this); + this.classify = this.classify.bind(this); + this.save = this.save.bind(this); + this.load = this.load.bind(this); + + // initialize + this.init(); + } + + /** + * initialize with create model + */ + init() { + this.createModel(); + } + + /** + * creates a sequential model + * uses switch/case for potential future where different formats are supported + * @param {*} _type + */ + createModel(_type = "sequential") { + switch (_type.toLowerCase()) { + case "sequential": + this.model = tf.sequential(); + return this.model; + default: + this.model = tf.sequential(); + return this.model; + } + } + + /** + * add layer to the model + * if the model has 2 or more layers switch the isLayered flag + * @param {tf.Layer} layer + * @void + */ + addLayer(layer) { + this.model.add(layer); + + // check if it has at least an input and output layer + if (this.model.layers.length >= 2) { + this.isLayered = true; + } + } + + /** + * Compile the model + * if the model is compiled, set the isCompiled flag to true + * @param {*} _modelOptions + */ + compile(_modelOptions) { + this.model.compile(_modelOptions); + this.isCompiled = true; + } + + /** + * Set the optimizer function given the learning rate + * as a parameter + * @param {*} learningRate + * @param {*} optimizer + */ + setOptimizerFunction(learningRate, optimizer) { + return optimizer.call(this, learningRate); + } + + /** + * Train the model + * @param {Object} _options + */ + async train(_options) { + const TRAINING_OPTIONS = _options; + + const xs = TRAINING_OPTIONS.inputs; + const ys = TRAINING_OPTIONS.outputs; + + const { batchSize, epochs, shuffle, validationSplit, whileTraining } = + TRAINING_OPTIONS; + + await this.model.fit(xs, ys, { + batchSize, + epochs, + shuffle, + validationSplit, + callbacks: whileTraining, + }); + + xs.dispose(); + ys.dispose(); + + this.isTrained = true; + } + + /** + * returns the prediction as an array synchronously + * @param {*} _inputs + */ + predictSync(_inputs) { + const output = tf.tidy(() => { + return this.model.predict(_inputs); + }); + const result = output.arraySync(); + + output.dispose(); + _inputs.dispose(); + + return result; + } + + /** + * returns the prediction as an array + * @param {*} _inputs + */ + async predict(_inputs) { + const output = tf.tidy(() => { + return this.model.predict(_inputs); + }); + const result = await output.array(); + + output.dispose(); + _inputs.dispose(); + + return result; + } + + /** + * classify is the same as .predict() + * @param {*} _inputs + */ + async classify(_inputs) { + return this.predict(_inputs); + } + + /** + * classify is the same as .predict() + * @param {*} _inputs + */ + classifySync(_inputs) { + return this.predictSync(_inputs); + } + + // predictMultiple + // classifyMultiple + // are the same as .predict() + + /** + * save the model.json and the weights.bin files + * @param {string} modelName + * @return {Promise} + */ + async save(modelName = "model") { + await this.model.save( + tf.io.withSaveHandler(async (data) => { + this.weightsManifest = { + modelTopology: data.modelTopology, + weightsManifest: [ + { + paths: [`./${modelName}.weights.bin`], + weights: data.weightSpecs, + }, + ], + }; + + await saveBlob( + data.weightData, + `${modelName}.weights.bin`, + "application/octet-stream" + ); + await saveBlob( + JSON.stringify(this.weightsManifest), + `${modelName}.json`, + "text/plain" + ); + }) + ); + } + + /** + * loads the model and weights + * @param {string | FileList | Object} filesOrPath + */ + async load(filesOrPath) { + if (filesOrPath instanceof FileList) { + const files = Array.from(filesOrPath); + // find the correct files + const model = files.find((file) => file.name.includes(".json") && !file.name.includes("_meta")); + const weights = files.find((file) => file.name.includes(".bin")); + // load the model + this.model = await tf.loadLayersModel( + tf.io.browserFiles([model, weights]) + ); + } else if (filesOrPath instanceof Object) { + this.model = await tf.loadLayersModel( + tf.io.http(filesOrPath.model, { + // Override the weights path from the JSON weightsManifest + weightUrlConverter: (weightFileName) => { + return filesOrPath.weights || weightFileName; + } + }) + ); + } else { + this.model = await tf.loadLayersModel(filesOrPath); + } + + this.isCompiled = true; + this.isLayered = true; + this.isTrained = true; + } + + /** + * dispose and release the memory for the model + */ + dispose() { + this.model.dispose(); + } + + // NeuroEvolution Functions + + /** + * mutate the weights of a model + * @param {*} rate + * @param {*} mutateFunction + */ + + mutate(rate = 0.1, mutateFunction) { + tf.tidy(() => { + const weights = this.model.getWeights(); + const mutatedWeights = []; + for (let i = 0; i < weights.length; i += 1) { + const tensor = weights[i]; + const { shape } = weights[i]; + // TODO: Evaluate if this should be sync or not + const values = tensor.dataSync().slice(); + for (let j = 0; j < values.length; j += 1) { + if (Math.random() < rate) { + if (mutateFunction) { + values[j] = mutateFunction(values[j]); + } else { + values[j] = Math.min( + Math.max(values[j] + randomGaussian(), -1), + 1 + ); + } + } + } + const newTensor = tf.tensor(values, shape); + mutatedWeights[i] = newTensor; + } + this.model.setWeights(mutatedWeights); + }); + } + + /** + * create a new neural network with crossover + * @param {*} other + */ + crossover(other) { + return tf.tidy(() => { + const weightsA = this.model.getWeights(); + const weightsB = other.model.getWeights(); + const childWeights = []; + for (let i = 0; i < weightsA.length; i += 1) { + const tensorA = weightsA[i]; + const tensorB = weightsB[i]; + const { shape } = weightsA[i]; + // TODO: Evaluate if this should be sync or not + const valuesA = tensorA.dataSync().slice(); + const valuesB = tensorB.dataSync().slice(); + for (let j = 0; j < valuesA.length; j += 1) { + if (Math.random() < 0.5) { + valuesA[j] = valuesB[j]; + } + } + const newTensor = tf.tensor(valuesA, shape); + childWeights[i] = newTensor; + } + this.model.setWeights(childWeights); + }); + } +} +export default NeuralNetwork; diff --git a/src/LSTM/timeSeriesData.js b/src/LSTM/timeSeriesData.js new file mode 100644 index 00000000..287552bd --- /dev/null +++ b/src/LSTM/timeSeriesData.js @@ -0,0 +1,873 @@ +import * as tf from "@tensorflow/tfjs"; +import axios from "axios"; +import { saveBlob } from "../utils/io"; +import modelLoader from '../utils/modelLoader'; +import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; + +class NeuralNetworkData { + constructor() { + this.meta = { + inputUnits: null, // Number + outputUnits: null, // Number + // objects describing input/output data by property name + inputs: {}, // { name1: {dtype}, name2: {dtype} } + outputs: {}, // { name1: {dtype} } + isNormalized: false, // Boolean - keep this in meta for model saving/loading + }; + + this.isMetadataReady = false; + this.isWarmedUp = false; + + this.data = { + raw: [], // array of {xs:{}, ys:{}} + }; + } + + /** + * //////////////////////////////////////////////////////// + * Summarize Data + * //////////////////////////////////////////////////////// + */ + + /** + * create the metadata from the data + * this covers: + * 1. getting the datatype from the data + * 2. getting the min and max from the data + * 3. getting the oneHot encoded values + * 4. getting the inputShape and outputUnits from the data + * @param {Array} [inputShape] + * @void + */ + createMetadata(inputShape = null) { + // get the data type for each property + this.getDTypesFromSeriesData(); + // get the stats - min, max + this.getDataStats(); + // onehot encode + this.getDataOneHot(); + // calculate the input units from the data + this.getDataUnits(inputShape); + + this.isMetadataReady = true; + } + + /* + * //////////////////////////////////////////////// + * data Summary + * //////////////////////////////////////////////// + */ + + /** + * get stats about the data + * @private + * @void + */ + getDataStats() { + this.meta.inputs = this.getInputMetaStats(this.meta.inputs, "xs"); + this.meta.outputs = this.getInputMetaStats(this.meta.outputs, "ys"); + } + + /** + * get back the min and max of each label + * @private + * @param {Object} inputOrOutputMeta + * @param {"xs" | "ys"} xsOrYs + * @return {Object} + */ + getInputMetaStats(inputOrOutputMeta, xsOrYs) { + const inputMeta = Object.assign({}, inputOrOutputMeta); + + Object.keys(inputMeta).forEach((k) => { + if (inputMeta[k].dtype === "string") { + inputMeta[k].min = 0; + inputMeta[k].max = 1; + } else if (inputMeta[k].dtype === "number") { + const dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]); + inputMeta[k].min = nnUtils.getMin(dataAsArray); + inputMeta[k].max = nnUtils.getMax(dataAsArray); + } else if (inputMeta[k].dtype === "array") { + const dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]).flat(); + inputMeta[k].min = nnUtils.getMin(dataAsArray); + inputMeta[k].max = nnUtils.getMax(dataAsArray); + } + }); + + return inputMeta; + } + + /** + * get the data units, inputshape and output units + * @private + * @param {Array} arrayShape + * @void + */ + getDataUnits(arrayShape = null) { + // if the data has a shape pass it in + if (arrayShape) { + this.meta.inputUnits = arrayShape; + } else { + this.meta.inputUnits = [this.getInputMetaUnits(this.meta.inputs)].flat(); + } + + this.meta.outputUnits = this.getInputMetaUnits(this.meta.outputs); + } + + /** + * @private + * @param {Object} inputsMeta + * @return {number | Array} + */ + // eslint-disable-next-line class-methods-use-this + getInputMetaUnits(inputsMeta) { + let units = 0; + + Object.entries(inputsMeta).forEach((arr) => { + const { dtype } = arr[1]; + if (dtype === "number") { + units += 1; + } else if (dtype === "string") { + const { uniqueValues } = arr[1]; + + const uniqueCount = uniqueValues.length; + units += uniqueCount; + } else if (dtype === "array") { + // TODO: User must input the shape of the + // image size correctly. + units = []; + } + }); + + return units; + } + + /** + * getDTypesFromData + * gets the data types of the data we're using + * important for handling oneHot + * @private + * @void - updates this.meta + */ + getDTypesFromSeriesData() { + const meta = { + ...this.meta, + inputs: {}, + outputs: {}, + }; + + const sample = this.data.raw[0]; + const xs = Object.keys(sample.xs); + const ys = Object.keys(sample.ys); + + xs.forEach((prop) => { + meta.inputs[prop] = { + dtype: nnUtils.getDataType(sample.xs[prop]), + }; + }); + + ys.forEach((prop) => { + meta.outputs[prop] = { + dtype: nnUtils.getDataType(sample.ys[prop]), + }; + }); + + // TODO: check if all entries have the same dtype. + // otherwise throw an error + + this.meta = meta; + } + + /** + * //////////////////////////////////////////////////////// + * Add Data + * //////////////////////////////////////////////////////// + */ + + /** + * Add Data + * @param {object} xInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array + * @param {*} yInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array + * @void - updates this.data + */ + addData(xInputObj, yInputObj) { + this.data.raw.push({ + xs: xInputObj, + ys: yInputObj, + }); + + console.log(this.data.raw); + } + + /** + * //////////////////////////////////////////////////////// + * Tensor handling + * //////////////////////////////////////////////////////// + */ + + /** + * convertRawToTensors + * converts array of {xs, ys} to tensors + * @param {*} dataRaw + * + * @return {{ inputs: tf.Tensor, outputs: tf.Tensor }} + */ + // eslint-disable-next-line class-methods-use-this, no-unused-vars + convertRawToTensors(dataRaw) { + const meta = Object.assign({}, this.meta); + const dataLength = dataRaw.length; + + return tf.tidy(() => { + const inputArr = []; + const outputArr = []; + + dataRaw.forEach((row) => { + // get xs + const xs = Object.keys(meta.inputs) + .map((k) => { + return row.xs[k]; + }) + .flat(); + + inputArr.push(xs); + + // get ys + const ys = Object.keys(meta.outputs) + .map((k) => { + return row.ys[k]; + }) + .flat(); + + outputArr.push(ys); + }); + + const inputs = tf.tensor(inputArr.flat(), [ + dataLength, + ...meta.inputUnits, + ]); + const outputs = tf.tensor(outputArr.flat(), [ + dataLength, + meta.outputUnits, + ]); + + return { + inputs, + outputs, + }; + }); + } + + /** + * //////////////////////////////////////////////////////// + * data normalization / unnormalization + * //////////////////////////////////////////////////////// + */ + + /** + * normalize the dataRaw input + * @return {Array} + */ + normalizeDataRaw() { + const normXs = this.normalizeInputData(this.meta.inputs, "xs"); + const normYs = this.normalizeInputData(this.meta.outputs, "ys"); + + const normalizedData = nnUtils.zipArrays(normXs, normYs); + + return normalizedData; + } + + /** + * @param {Object} inputOrOutputMeta + * @param {"xs" | "ys"} xsOrYs + * @return {Array} + */ + normalizeInputData(inputOrOutputMeta, xsOrYs) { + const dataRaw = this.data.raw; + // the data length + const dataLength = dataRaw.length; + // the copy of the inputs.meta[inputOrOutput] + const inputMeta = Object.assign({}, inputOrOutputMeta); + + // normalized output object + const normalized = {}; + Object.keys(inputMeta).forEach((k) => { + // get the min and max values + const options = { + min: inputMeta[k].min, + max: inputMeta[k].max, + }; + + const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); + // depending on the input type, normalize accordingly + if (inputMeta[k].dtype === "string") { + options.legend = inputMeta[k].legend; + normalized[k] = this.normalizeArray(dataAsArray, options); + } else if (inputMeta[k].dtype === "number") { + normalized[k] = this.normalizeArray(dataAsArray, options); + } else if (inputMeta[k].dtype === "array") { + normalized[k] = dataAsArray.map((item) => + this.normalizeArray(item, options) + ); + } + }); + + // create a normalized version of data.raws + const output = [...new Array(dataLength).fill(null)].map((item, idx) => { + const row = { + [xsOrYs]: {}, + }; + + Object.keys(inputMeta).forEach((k) => { + row[xsOrYs][k] = normalized[k][idx]; + }); + + return row; + }); + + return output; + } + + /** + * normalizeArray + * @param {*} _input + * @param {*} _options + */ + // eslint-disable-next-line no-unused-vars, class-methods-use-this + normalizeArray(inputArray, options) { + const { min, max } = options; + + // if the data are onehot encoded, replace the string + // value with the onehot array + // if none exists, return the given value + if (options.legend) { + const normalized = inputArray.map((v) => { + return options.legend[v] ? options.legend[v] : v; + }); + return normalized; + } + + // if the dtype is a number + if (inputArray.every((v) => typeof v === "number")) { + const normalized = inputArray.map((v) => + nnUtils.normalizeValue(v, min, max) + ); + return normalized; + } + + // otherwise return the input array + // return inputArray; + throw new Error("error in inputArray of normalizeArray() function"); + } + + /** + * unNormalizeArray + * @param {*} _input + * @param {*} _options + */ + // eslint-disable-next-line no-unused-vars, class-methods-use-this + unnormalizeArray(inputArray, options) { + const { min, max } = options; + + // if the data is onehot encoded then remap the + // values from those oneHot arrays + if (options.legend) { + const unnormalized = inputArray.map((v) => { + let res; + Object.entries(options.legend).forEach((item) => { + const key = item[0]; + const val = item[1]; + const matches = v + .map((num, idx) => num === val[idx]) + .every((truthy) => truthy === true); + if (matches) res = key; + }); + return res; + }); + + return unnormalized; + } + + // if the dtype is a number + if (inputArray.every((v) => typeof v === "number")) { + const unnormalized = inputArray.map((v) => + nnUtils.unnormalizeValue(v, min, max) + ); + return unnormalized; + } + + // otherwise return the input array + // return inputArray; + throw new Error("error in inputArray of normalizeArray() function"); + } + + /* + * //////////////////////////////////////////////// + * One hot encoding handling + * //////////////////////////////////////////////// + */ + + /** + * applyOneHotEncodingsToDataRaw + * does not set this.data.raws + * but rather returns them + */ + applyOneHotEncodingsToDataRaw() { + const meta = Object.assign({}, this.meta); + + const output = this.data.raw.map((row) => { + const xs = { + ...row.xs, + }; + const ys = { + ...row.ys, + }; + // get xs + Object.keys(meta.inputs).forEach((k) => { + if (meta.inputs[k].legend) { + xs[k] = meta.inputs[k].legend[row.xs[k]]; + } + }); + + Object.keys(meta.outputs).forEach((k) => { + if (meta.outputs[k].legend) { + ys[k] = meta.outputs[k].legend[row.ys[k]]; + } + }); + + + return { + xs, + ys, + }; + }); + console.log('onhot',output); + return output; + } + + /** + * getDataOneHot + * creates onehot encodings for the input and outputs + * and adds them to the meta info + * @private + * @void + */ + getDataOneHot() { + this.meta.inputs = this.getInputMetaOneHot(this.meta.inputs, "xs"); + this.meta.outputs = this.getInputMetaOneHot(this.meta.outputs, "ys"); + } + + /** + * getOneHotMeta + * @param {Object} _inputsMeta + * @param {"xs" | "ys"} xsOrYs + * @return {Object} + */ + getInputMetaOneHot(_inputsMeta, xsOrYs) { + const inputsMeta = Object.assign({}, _inputsMeta); + + Object.entries(inputsMeta).forEach((arr) => { + // the key + const key = arr[0]; + // the value + const { dtype } = arr[1]; + + if (dtype === "string") { + const uniqueVals = [ + ...new Set(this.data.raw.map((obj) => obj[xsOrYs][key])), + ]; + const oneHotMeta = this.createOneHotEncodings(uniqueVals); + inputsMeta[key] = { + ...inputsMeta[key], + ...oneHotMeta, + }; + } + }); + + return inputsMeta; + } + + /** + * Returns a legend mapping the + * data values to oneHot encoded values + * @private + * @param {Array} _uniqueValuesArray + * @return {Object} + */ + // eslint-disable-next-line class-methods-use-this, no-unused-vars + createOneHotEncodings(_uniqueValuesArray) { + return tf.tidy(() => { + const output = { + uniqueValues: _uniqueValuesArray, + legend: {}, + }; + + const uniqueVals = _uniqueValuesArray; // [...new Set(this.data.raw.map(obj => obj.xs[prop]))] + // get back values from 0 to the length of the uniqueVals array + const onehotValues = uniqueVals.map((item, idx) => idx); + // oneHot encode the values in the 1d tensor + const oneHotEncodedValues = tf.oneHot( + tf.tensor1d(onehotValues, "int32"), + uniqueVals.length + ); + // convert them from tensors back out to an array + const oneHotEncodedValuesArray = oneHotEncodedValues.arraySync(); + + // populate the legend with the key/values + uniqueVals.forEach((uVal, uIdx) => { + output.legend[uVal] = oneHotEncodedValuesArray[uIdx]; + }); + + return output; + }); + } + + /** + * //////////////////////////////////////////////// + * saving / loading data + * //////////////////////////////////////////////// + */ + + /** + * Loads data from a URL using the appropriate function + * @param {*} dataUrl + * @param {*} inputs + * @param {*} outputs + * @void + */ + async loadDataFromUrl(dataUrl, inputs, outputs) { + try { + + if (dataUrl.endsWith(".csv")) { + await this.loadCSV(dataUrl, inputs, outputs); + } else if (dataUrl.endsWith(".json")) { + await this.loadJSON(dataUrl, inputs, outputs); + } else if (dataUrl.includes("blob")) { + await this.loadBlob(dataUrl, inputs, outputs); + } else { + throw new Error("Not a valid data format. Must be csv or json"); + } + } catch (error) { + console.error(error); + throw new Error(error); + } + } + + /** + * loadJSON + * @param {*} dataUrlOrJson + * @param {*} inputLabels + * @param {*} outputLabels + * @void + */ + async loadJSON(dataUrlOrJson, inputLabels, outputLabels) { + try { + let json; + // handle loading parsedJson + if (dataUrlOrJson instanceof Object) { + json = Object.assign({}, dataUrlOrJson); + } else { + const { data } = await axios.get(dataUrlOrJson); + json = data; + } + + // format the data.raw array + this.formatRawData(json, inputLabels, outputLabels); + } catch (err) { + console.error("error loading json"); + throw new Error(err); + } + } + + /** + * loadCSV + * @param {*} dataUrl + * @param {*} inputLabels + * @param {*} outputLabels + * @void + */ + async loadCSV(dataUrl, inputLabels, outputLabels) { + try { + const myCsv = tf.data.csv(dataUrl); + const loadedData = await myCsv.toArray(); + const json = { + entries: loadedData, + }; + // format the data.raw array + this.formatRawData(json, inputLabels, outputLabels); + } catch (err) { + console.error("error loading csv", err); + throw new Error(err); + } + } + + /** + * loadBlob + * @param {*} dataUrlOrJson + * @param {*} inputLabels + * @param {*} outputLabels + * @void + */ + async loadBlob(dataUrlOrJson, inputLabels, outputLabels) { + try { + const { data } = await axios.get(dataUrlOrJson); + const text = data; // await data.text(); + + if (nnUtils.isJsonOrString(text)) { + const json = JSON.parse(text); + await this.loadJSON(json, inputLabels, outputLabels); + } else { + const json = this.csvToJSON(text); + await this.loadJSON(json, inputLabels, outputLabels); + } + } catch (err) { + console.log("mmm might be passing in a string or something!", err); + throw new Error(err); + } + } + + /** + * loadData from fileinput or path + * @param {string | FileList | Object} filesOrPath + * @return {Promise} + */ + async loadData(filesOrPath) { + try { + let loadedData; + + if (typeof filesOrPath !== "string") { + const file = filesOrPath[0]; + const fr = new FileReader(); + fr.readAsText(file); + if (file.name.includes(".json")) { + const temp = await file.text(); + loadedData = JSON.parse(temp); + } else { + console.log( + 'data must be a json object containing an array called "data" or "entries' + ); + } + } else { + loadedData = await axios.get(filesOrPath, { responseType: "text" }); + const text = JSON.stringify(loadedData.data); + if (nnUtils.isJsonOrString(text)) { + loadedData = JSON.parse(text); + } else { + console.log( + "Whoops! something went wrong. Either this kind of data is not supported yet or there is an issue with .loadData" + ); + } + } + + this.data.raw = this.findEntries(loadedData); + + // check if a data or entries property exists + if (!this.data.raw.length > 0) { + console.log( + 'data must be a json object containing an array called "data" ' + ); + } + } catch (error) { + throw new Error(error); + } + } + + /** + * saveData + * @param {string} [name] + * @return {Promise} + */ + async saveData(name) { + const today = new Date(); + const date = `${String(today.getFullYear())}-${String( + today.getMonth() + 1 + )}-${String(today.getDate())}`; + const time = `${String(today.getHours())}-${String( + today.getMinutes() + )}-${String(today.getSeconds())}`; + const datetime = `${date}_${time}`; + + let dataName = datetime; + if (name) dataName = name; + + const output = { + data: this.data.raw, + }; + + await saveBlob(JSON.stringify(output), `${dataName}.json`, "text/plain"); + } + + /** + * Saves metadata of the data + * @param {string} modelName + * @return {Promise} + */ + async saveMeta(modelName = "model") { + await saveBlob( + JSON.stringify(this.meta), + `${modelName}_meta.json`, + "text/plain" + ); + } + + /** + * load a model and metadata + * @param {string | FileList | Object} filesOrPath + * @return {Promise} + */ + async loadMeta(filesOrPath) { + if (filesOrPath instanceof FileList) { + const file = Array.from(filesOrPath).find((file) => + file.name.includes("_meta.json") + ); + if (!file) { + console.warn('no model_meta.json file found in FileList'); + return; + } + const text = await file.text(); + this.meta = JSON.parse(text); + } else if (filesOrPath instanceof Object) { + // filesOrPath = {model: URL, metadata: URL, weights: URL} + const metadataResult = await axios.get(filesOrPath.metadata); + this.meta = metadataResult.data; + } else { + const loader = modelLoader(filesOrPath); + // TODO: it is not always "model_meta.json", it is "{model_name}_meta.json" + const metaPath = loader.getPath("model_meta.json"); + // TODO: figure out how to use loader.loadMetadataJson() which has wrapped error messages + const metadataResult = await axios.get(metaPath); + this.meta = metadataResult.data; + } + + this.isMetadataReady = true; + this.isWarmedUp = true; + } + + /* + * //////////////////////////////////////////////// + * data loading helpers + * //////////////////////////////////////////////// + */ + + /** + * // TODO: convert ys into strings, if the task is classification + // if (this.config.architecture.task === "classification" && typeof output.ys[prop] !== "string") { + // output.ys[prop] += ""; + // } + * formatRawData + * takes a json and set the this.data.raw + * @param {*} json + * @param {Array} inputLabels + * @param {Array} outputLabels + * @void + */ + formatRawData(json, inputLabels, outputLabels) { + // Recurse through the json object to find + // an array containing `entries` or `data` + const dataArray = this.findEntries(json); + + if (!dataArray.length > 0) { + console.log(`your data must be contained in an array in \n + a property called 'entries' or 'data' of your json object`); + } + + // create an array of json objects [{xs,ys}] + const result = dataArray.map((item, idx) => { + const output = { + xs: {}, + ys: {}, + }; + + inputLabels.forEach((k) => { + if (item[k] !== undefined) { + output.xs[k] = item[k]; + } else { + console.error(`the input label ${k} does not exist at row ${idx}`); + } + }); + + outputLabels.forEach((k) => { + if (item[k] !== undefined) { + output.ys[k] = item[k]; + } else { + console.error(`the output label ${k} does not exist at row ${idx}`); + } + }); + + return output; + }); + + // set this.data.raw + this.data.raw = result; + } + + /** + * csvToJSON + * Creates a csv from a string + * @param {*} csv + */ + // via: http://techslides.com/convert-csv-to-json-in-javascript + // eslint-disable-next-line class-methods-use-this + csvToJSON(csv) { + // split the string by linebreak + const lines = csv.split("\n"); + const result = []; + // get the header row as an array + const headers = lines[0].split(","); + + // iterate through every row + for (let i = 1; i < lines.length; i += 1) { + // create a json object for each row + const row = {}; + // split the current line into an array + const currentline = lines[i].split(","); + + // for each header, create a key/value pair + headers.forEach((k, idx) => { + row[k] = currentline[idx]; + }); + // add this to the result array + result.push(row); + } + + return { + entries: result, + }; + } + + /** + * findEntries + * recursively attempt to find the entries + * or data array for the given json object + * @param {*} _data + */ + findEntries(_data) { + const parentCopy = Object.assign({}, _data); + + if (parentCopy.entries && parentCopy.entries instanceof Array) { + return parentCopy.entries; + } else if (parentCopy.data && parentCopy.data instanceof Array) { + return parentCopy.data; + } + + const keys = Object.keys(parentCopy); + // eslint-disable-next-line consistent-return + keys.forEach((k) => { + if (typeof parentCopy[k] === "object") { + return this.findEntries(parentCopy[k]); + } + }); + + return parentCopy; + } + + /** + * getData + * return data object's raw array + * to make getting raw data easier + */ + getData() { + const rawArray = this.data.raw; + return rawArray; + } +} + +export default NeuralNetworkData; diff --git a/src/LSTM/timeSeriesUtils.js b/src/LSTM/timeSeriesUtils.js new file mode 100644 index 00000000..3093b621 --- /dev/null +++ b/src/LSTM/timeSeriesUtils.js @@ -0,0 +1,95 @@ +import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; + +class TimeSeriesUtils { + constructor(options) { + this.options = options || {}; + } + + prepareLabels(xInputs, yInputs, options = null){ + const {inputs, outputs} = this.options; + + let inputLabels; + let outputLabels; + + // options-based values to assign + if (options !== null) { + ({inputLabels, outputLabels} = options) + } else if (inputs.length > 0 && outputs.length > 0) { + if (inputs.every((item) => typeof item === "string")) { + inputLabels = inputs; + } + if (outputs.every((item) => typeof item === "string")) { + outputLabels = outputs; + } + + // input-based values to assign + } else { + inputLabels = this.labelsFromNestedArray(xInputs); + if (typeof yInputs === "object") { + outputLabels = Object.keys(yInputs); + } else { + inputLabels = this.labelsFromNestedArray(yInputs); + } + } + + + // Make sure that the inputLabels and outputLabels are arrays + if (!(inputLabels instanceof Array)) { + throw new Error("inputLabels must be an array"); + } + if (!(outputLabels instanceof Array)) { + throw new Error("outputLabels must be an array"); + } + + return inputLabels, outputLabels + + } + + labelsFromNestedArray(data){ + function processData(data, prefix = 'label') { + // Recursive function to find the deepest level of the data and return the result + function traverse(value) { + if (Array.isArray(value)) { + if (value.length > 0 && typeof value[0] === 'string') { + // If the deepest unit is an array with strings + return { type: 'array', data: value }; + } else if (value.length > 0 && typeof value[0] === 'number') { + // If the deepest unit is an array with numbers + return { type: 'array', data: value }; + } else { + for (const item of value) { + const result = traverse(item); + if (result) return result; + } + } + } else if (value !== null && typeof value === 'object') { + return { type: 'object', data: value }; // If the deepest unit is an object + } + return null; + } + + const result = traverse(data); + + if (result) { + if (result.type === 'object') { + // If the deepest level is an object, get the unique keys + return Object.keys(result.data); + } else if (result.type === 'array') { + // If the deepest level is an array with strings or numbers, get the labels + return result.data.map((_, index) => `${prefix}_${index}`); + } + } else { + // No recognizable structure found + throw new Error('Data does not match expected structure for objects or arrays.'); + } + } + } +} + +const timeSeriesUtils = () => { + const instance = new TimeSeriesUtils(); + return instance; +}; + +export default timeSeriesUtils(); + \ No newline at end of file From 212e3a2ee3e86fd77168e2eb4b2b77ae3db85e1b Mon Sep 17 00:00:00 2001 From: mop9047 Date: Wed, 17 Jul 2024 15:15:01 +0800 Subject: [PATCH 04/13] added custom method for addData --- .../sketch-new.js | 97 -- .../sketch-old.js | 108 -- .../timeSeries-mousexy-keypoints/sketch.js | 228 ++-- .../timeSeries-mousexy-keypoints/sketch111.js | 178 --- src/LSTM/index-1.js | 302 ----- src/LSTM/index.js | 214 ++-- src/LSTM/index111.js | 1084 ----------------- src/LSTM/timeSeries.js | 75 +- src/LSTM/timeSeriesUtils.js | 177 ++- 9 files changed, 426 insertions(+), 2037 deletions(-) delete mode 100644 examples/timeSeries-mousexy-keypoints/sketch-new.js delete mode 100644 examples/timeSeries-mousexy-keypoints/sketch-old.js delete mode 100644 examples/timeSeries-mousexy-keypoints/sketch111.js delete mode 100644 src/LSTM/index-1.js delete mode 100644 src/LSTM/index111.js diff --git a/examples/timeSeries-mousexy-keypoints/sketch-new.js b/examples/timeSeries-mousexy-keypoints/sketch-new.js deleted file mode 100644 index bc305dd5..00000000 --- a/examples/timeSeries-mousexy-keypoints/sketch-new.js +++ /dev/null @@ -1,97 +0,0 @@ -let model; -let targetLabel = 'C'; - -let state = 'collection'; - -let notes = { - C: 261.6256, - D: 293.6648, - E: 329.6276 -} - -let env,wave; - -function setup() { - createCanvas(400, 400); - ml5.setBackend('webgl') - - - - let options = { - inputs: ['x', 'y'], - outputs: ['label'], - task: 'classification', - debug: 'true' - }; - model = ml5.timeSeries(options); - background(255); -} - -function keyPressed() { - - if (key == 't') { - state = 'training'; - console.log('starting training'); - model.normalizeData(); - let options = { - epochs: 200 - } - model.train(options, whileTraining, finishedTraining); - } else { - targetLabel = key.toUpperCase(); - } -} - -function whileTraining(epoch, loss) { - console.log(epoch); -} - -function finishedTraining() { - console.log('finished training.'); - state = 'prediction'; -} - - -function mousePressed() { - - let inputs = { - x: mouseX, - y: mouseY - } - - if (state == 'collection') { - let target = { - label: targetLabel - } - model.addData(inputs, target); - console.log('yeah') - stroke(0); - noFill(); - ellipse(mouseX, mouseY, 24); - fill(0); - noStroke(); - textAlign(CENTER, CENTER); - text(targetLabel, mouseX, mouseY); - - } else if (state == 'prediction') { - model.classify(inputs, gotResults); - - } - -} - -function gotResults(error, results) { - if (error) { - console.error(error); - return; - } - console.log(results); - stroke(0); - fill(0, 0, 255, 100); - ellipse(mouseX, mouseY, 24); - fill(0); - noStroke(); - textAlign(CENTER, CENTER); - let label = results[0].label; - text(label, mouseX, mouseY); -} \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/sketch-old.js b/examples/timeSeries-mousexy-keypoints/sketch-old.js deleted file mode 100644 index 9f45c409..00000000 --- a/examples/timeSeries-mousexy-keypoints/sketch-old.js +++ /dev/null @@ -1,108 +0,0 @@ -let sequence = []; -let datasets = []; - -let pressedOnce = true; - -let boxes = - -[[{"name":"box","mouse":[81,102]},{"name":"box","mouse":[85,101]},{"name":"box","mouse":[134,105]},{"name":"box","mouse":[182,106]},{"name":"box","mouse":[219,108]},{"name":"box","mouse":[244,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[258,108]},{"name":"box","mouse":[263,154]},{"name":"box","mouse":[267,215]},{"name":"box","mouse":[269,235]},{"name":"box","mouse":[269,239]},{"name":"box","mouse":[265,239]},{"name":"box","mouse":[195,236]},{"name":"box","mouse":[147,243]},{"name":"box","mouse":[113,243]},{"name":"box","mouse":[111,242]},{"name":"box","mouse":[106,236]},{"name":"box","mouse":[101,191]},{"name":"box","mouse":[95,162]}], - -[{"name":"box","mouse":[105,97]},{"name":"box","mouse":[149,99]},{"name":"box","mouse":[191,99]},{"name":"box","mouse":[245,99]},{"name":"box","mouse":[257,99]},{"name":"box","mouse":[260,100]},{"name":"box","mouse":[255,144]},{"name":"box","mouse":[249,215]},{"name":"box","mouse":[249,239]},{"name":"box","mouse":[248,240]},{"name":"box","mouse":[200,234]},{"name":"box","mouse":[134,232]},{"name":"box","mouse":[105,232]},{"name":"box","mouse":[105,229]},{"name":"box","mouse":[96,179]},{"name":"box","mouse":[96,121]},{"name":"box","mouse":[97,97]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]},{"name":"box","mouse":[97,96]}], - -[{"name":"box","mouse":[94,94]},{"name":"box","mouse":[133,95]},{"name":"box","mouse":[194,100]},{"name":"box","mouse":[245,100]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[261,102]},{"name":"box","mouse":[257,132]},{"name":"box","mouse":[253,196]},{"name":"box","mouse":[253,227]},{"name":"box","mouse":[253,228]},{"name":"box","mouse":[241,227]},{"name":"box","mouse":[183,221]},{"name":"box","mouse":[137,219]},{"name":"box","mouse":[105,219]},{"name":"box","mouse":[104,219]},{"name":"box","mouse":[98,188]},{"name":"box","mouse":[97,157]},{"name":"box","mouse":[97,132]},{"name":"box","mouse":[97,112]},{"name":"box","mouse":[98,101]}], - -[{"name":"box","mouse":[88,92]},{"name":"box","mouse":[94,91]},{"name":"box","mouse":[158,94]},{"name":"box","mouse":[209,96]},{"name":"box","mouse":[244,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[245,96]},{"name":"box","mouse":[244,140]},{"name":"box","mouse":[238,203]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[238,226]},{"name":"box","mouse":[202,222]},{"name":"box","mouse":[154,220]},{"name":"box","mouse":[96,220]},{"name":"box","mouse":[92,220]},{"name":"box","mouse":[90,217]},{"name":"box","mouse":[89,183]},{"name":"box","mouse":[89,149]},{"name":"box","mouse":[89,124]},{"name":"box","mouse":[89,98]}], - -[{"name":"box","mouse":[98,100]},{"name":"box","mouse":[169,101]},{"name":"box","mouse":[278,104]},{"name":"box","mouse":[310,104]},{"name":"box","mouse":[312,104]},{"name":"box","mouse":[312,148]},{"name":"box","mouse":[313,244]},{"name":"box","mouse":[314,248]},{"name":"box","mouse":[313,250]},{"name":"box","mouse":[222,235]},{"name":"box","mouse":[151,237]},{"name":"box","mouse":[85,239]},{"name":"box","mouse":[82,231]},{"name":"box","mouse":[79,185]},{"name":"box","mouse":[84,140]},{"name":"box","mouse":[93,113]},{"name":"box","mouse":[97,104]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]},{"name":"box","mouse":[97,100]}], - -[{"name":"box","mouse":[69,98]},{"name":"box","mouse":[149,97]},{"name":"box","mouse":[263,97]},{"name":"box","mouse":[303,98]},{"name":"box","mouse":[303,100]},{"name":"box","mouse":[300,201]},{"name":"box","mouse":[306,296]},{"name":"box","mouse":[307,300]},{"name":"box","mouse":[293,296]},{"name":"box","mouse":[199,275]},{"name":"box","mouse":[145,272]},{"name":"box","mouse":[109,272]},{"name":"box","mouse":[99,269]},{"name":"box","mouse":[95,266]},{"name":"box","mouse":[87,206]},{"name":"box","mouse":[82,140]},{"name":"box","mouse":[82,116]},{"name":"box","mouse":[82,102]},{"name":"box","mouse":[82,100]},{"name":"box","mouse":[82,100]}], - -[{"name":"box","mouse":[61,103]},{"name":"box","mouse":[68,100]},{"name":"box","mouse":[145,101]},{"name":"box","mouse":[221,104]},{"name":"box","mouse":[264,104]},{"name":"box","mouse":[291,107]},{"name":"box","mouse":[291,109]},{"name":"box","mouse":[289,177]},{"name":"box","mouse":[281,248]},{"name":"box","mouse":[281,271]},{"name":"box","mouse":[278,272]},{"name":"box","mouse":[230,270]},{"name":"box","mouse":[179,268]},{"name":"box","mouse":[112,268]},{"name":"box","mouse":[83,268]},{"name":"box","mouse":[79,261]},{"name":"box","mouse":[77,211]},{"name":"box","mouse":[77,168]},{"name":"box","mouse":[77,129]},{"name":"box","mouse":[76,117]}], - -[{"name":"box","mouse":[79,108]},{"name":"box","mouse":[98,105]},{"name":"box","mouse":[178,105]},{"name":"box","mouse":[257,106]},{"name":"box","mouse":[277,108]},{"name":"box","mouse":[285,110]},{"name":"box","mouse":[286,119]},{"name":"box","mouse":[285,218]},{"name":"box","mouse":[285,263]},{"name":"box","mouse":[284,265]},{"name":"box","mouse":[273,264]},{"name":"box","mouse":[197,264]},{"name":"box","mouse":[131,262]},{"name":"box","mouse":[92,257]},{"name":"box","mouse":[88,256]},{"name":"box","mouse":[85,222]},{"name":"box","mouse":[81,172]},{"name":"box","mouse":[79,137]},{"name":"box","mouse":[79,113]},{"name":"box","mouse":[79,105]}], - -[{"name":"box","mouse":[81,122]},{"name":"box","mouse":[91,118]},{"name":"box","mouse":[190,124]},{"name":"box","mouse":[291,129]},{"name":"box","mouse":[309,131]},{"name":"box","mouse":[313,152]},{"name":"box","mouse":[305,201]},{"name":"box","mouse":[298,255]},{"name":"box","mouse":[297,274]},{"name":"box","mouse":[292,276]},{"name":"box","mouse":[238,268]},{"name":"box","mouse":[185,266]},{"name":"box","mouse":[129,266]},{"name":"box","mouse":[101,265]},{"name":"box","mouse":[97,264]},{"name":"box","mouse":[97,219]},{"name":"box","mouse":[97,165]},{"name":"box","mouse":[95,134]},{"name":"box","mouse":[93,122]},{"name":"box","mouse":[93,116]}], - -[{"name":"box","mouse":[101,102]},{"name":"box","mouse":[134,100]},{"name":"box","mouse":[237,104]},{"name":"box","mouse":[275,104]},{"name":"box","mouse":[283,105]},{"name":"box","mouse":[285,132]},{"name":"box","mouse":[285,197]},{"name":"box","mouse":[285,237]},{"name":"box","mouse":[286,278]},{"name":"box","mouse":[286,285]},{"name":"box","mouse":[249,284]},{"name":"box","mouse":[186,274]},{"name":"box","mouse":[121,263]},{"name":"box","mouse":[111,262]},{"name":"box","mouse":[108,234]},{"name":"box","mouse":[105,176]},{"name":"box","mouse":[101,130]},{"name":"box","mouse":[97,98]},{"name":"box","mouse":[97,88]},{"name":"box","mouse":[97,88]}]] - -let circles = - -[[{"name":"circle","mouse":[163,93]},{"name":"circle","mouse":[197,92]},{"name":"circle","mouse":[270,122]},{"name":"circle","mouse":[292,160]},{"name":"circle","mouse":[292,226]},{"name":"circle","mouse":[257,278]},{"name":"circle","mouse":[169,300]},{"name":"circle","mouse":[75,283]},{"name":"circle","mouse":[61,234]},{"name":"circle","mouse":[64,192]},{"name":"circle","mouse":[80,134]},{"name":"circle","mouse":[101,97]},{"name":"circle","mouse":[133,82]},{"name":"circle","mouse":[148,81]},{"name":"circle","mouse":[172,83]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[205,90]},{"name":"circle","mouse":[202,90]},{"name":"circle","mouse":[181,85]}], - -[{"name":"circle","mouse":[168,63]},{"name":"circle","mouse":[265,93]},{"name":"circle","mouse":[312,152]},{"name":"circle","mouse":[318,206]},{"name":"circle","mouse":[293,265]},{"name":"circle","mouse":[258,294]},{"name":"circle","mouse":[206,301]},{"name":"circle","mouse":[148,278]},{"name":"circle","mouse":[105,240]},{"name":"circle","mouse":[97,193]},{"name":"circle","mouse":[106,153]},{"name":"circle","mouse":[125,111]},{"name":"circle","mouse":[149,81]},{"name":"circle","mouse":[164,73]},{"name":"circle","mouse":[191,68]},{"name":"circle","mouse":[249,73]},{"name":"circle","mouse":[275,91]},{"name":"circle","mouse":[301,126]},{"name":"circle","mouse":[314,169]},{"name":"circle","mouse":[313,222]}], - -[{"name":"circle","mouse":[169,87]},{"name":"circle","mouse":[222,95]},{"name":"circle","mouse":[276,125]},{"name":"circle","mouse":[309,186]},{"name":"circle","mouse":[309,249]},{"name":"circle","mouse":[286,281]},{"name":"circle","mouse":[212,307]},{"name":"circle","mouse":[170,304]},{"name":"circle","mouse":[123,262]},{"name":"circle","mouse":[95,217]},{"name":"circle","mouse":[90,158]},{"name":"circle","mouse":[109,113]},{"name":"circle","mouse":[130,85]},{"name":"circle","mouse":[153,81]},{"name":"circle","mouse":[207,82]},{"name":"circle","mouse":[235,93]},{"name":"circle","mouse":[253,109]},{"name":"circle","mouse":[293,188]},{"name":"circle","mouse":[289,217]},{"name":"circle","mouse":[219,290]}], - -[{"name":"circle","mouse":[165,85]},{"name":"circle","mouse":[235,98]},{"name":"circle","mouse":[266,129]},{"name":"circle","mouse":[293,184]},{"name":"circle","mouse":[298,237]},{"name":"circle","mouse":[289,281]},{"name":"circle","mouse":[261,316]},{"name":"circle","mouse":[214,330]},{"name":"circle","mouse":[170,329]},{"name":"circle","mouse":[129,307]},{"name":"circle","mouse":[102,264]},{"name":"circle","mouse":[96,221]},{"name":"circle","mouse":[105,172]},{"name":"circle","mouse":[124,125]},{"name":"circle","mouse":[141,95]},{"name":"circle","mouse":[161,85]},{"name":"circle","mouse":[221,92]},{"name":"circle","mouse":[258,111]},{"name":"circle","mouse":[283,137]},{"name":"circle","mouse":[299,169]}], - -[{"name":"circle","mouse":[187,94]},{"name":"circle","mouse":[255,109]},{"name":"circle","mouse":[296,157]},{"name":"circle","mouse":[321,231]},{"name":"circle","mouse":[317,284]},{"name":"circle","mouse":[293,331]},{"name":"circle","mouse":[259,339]},{"name":"circle","mouse":[169,329]},{"name":"circle","mouse":[129,305]},{"name":"circle","mouse":[101,242]},{"name":"circle","mouse":[98,196]},{"name":"circle","mouse":[117,145]},{"name":"circle","mouse":[144,121]},{"name":"circle","mouse":[180,106]},{"name":"circle","mouse":[207,105]},{"name":"circle","mouse":[239,111]},{"name":"circle","mouse":[261,123]},{"name":"circle","mouse":[285,144]},{"name":"circle","mouse":[302,185]},{"name":"circle","mouse":[307,228]}], - -[{"name":"circle","mouse":[181,73]},{"name":"circle","mouse":[242,84]},{"name":"circle","mouse":[301,117]},{"name":"circle","mouse":[336,189]},{"name":"circle","mouse":[344,253]},{"name":"circle","mouse":[321,297]},{"name":"circle","mouse":[270,319]},{"name":"circle","mouse":[204,319]},{"name":"circle","mouse":[139,317]},{"name":"circle","mouse":[73,280]},{"name":"circle","mouse":[57,237]},{"name":"circle","mouse":[58,181]},{"name":"circle","mouse":[76,125]},{"name":"circle","mouse":[97,99]},{"name":"circle","mouse":[125,83]},{"name":"circle","mouse":[159,79]},{"name":"circle","mouse":[179,79]},{"name":"circle","mouse":[197,79]},{"name":"circle","mouse":[218,81]},{"name":"circle","mouse":[237,85]}], - -[{"name":"circle","mouse":[194,41]},{"name":"circle","mouse":[261,46]},{"name":"circle","mouse":[305,80]},{"name":"circle","mouse":[333,133]},{"name":"circle","mouse":[345,185]},{"name":"circle","mouse":[340,252]},{"name":"circle","mouse":[316,290]},{"name":"circle","mouse":[286,312]},{"name":"circle","mouse":[239,320]},{"name":"circle","mouse":[164,305]},{"name":"circle","mouse":[115,273]},{"name":"circle","mouse":[82,221]},{"name":"circle","mouse":[69,181]},{"name":"circle","mouse":[70,129]},{"name":"circle","mouse":[89,91]},{"name":"circle","mouse":[106,69]},{"name":"circle","mouse":[129,50]},{"name":"circle","mouse":[168,39]},{"name":"circle","mouse":[197,38]},{"name":"circle","mouse":[211,38]}], - -[{"name":"circle","mouse":[172,36]},{"name":"circle","mouse":[240,42]},{"name":"circle","mouse":[311,70]},{"name":"circle","mouse":[365,129]},{"name":"circle","mouse":[384,188]},{"name":"circle","mouse":[376,238]},{"name":"circle","mouse":[353,285]},{"name":"circle","mouse":[311,322]},{"name":"circle","mouse":[248,330]},{"name":"circle","mouse":[163,304]},{"name":"circle","mouse":[121,265]},{"name":"circle","mouse":[97,183]},{"name":"circle","mouse":[101,132]},{"name":"circle","mouse":[114,91]},{"name":"circle","mouse":[130,69]},{"name":"circle","mouse":[145,58]},{"name":"circle","mouse":[158,53]},{"name":"circle","mouse":[171,44]},{"name":"circle","mouse":[176,41]},{"name":"circle","mouse":[177,41]}], - -[{"name":"circle","mouse":[191,33]},{"name":"circle","mouse":[262,34]},{"name":"circle","mouse":[317,66]},{"name":"circle","mouse":[347,133]},{"name":"circle","mouse":[350,215]},{"name":"circle","mouse":[328,301]},{"name":"circle","mouse":[289,346]},{"name":"circle","mouse":[241,341]},{"name":"circle","mouse":[191,324]},{"name":"circle","mouse":[141,277]},{"name":"circle","mouse":[120,225]},{"name":"circle","mouse":[124,167]},{"name":"circle","mouse":[133,128]},{"name":"circle","mouse":[146,100]},{"name":"circle","mouse":[157,72]},{"name":"circle","mouse":[164,58]},{"name":"circle","mouse":[173,45]},{"name":"circle","mouse":[180,41]},{"name":"circle","mouse":[182,38]},{"name":"circle","mouse":[187,37]}], - -[{"name":"circle","mouse":[150,39]},{"name":"circle","mouse":[205,36]},{"name":"circle","mouse":[274,62]},{"name":"circle","mouse":[304,110]},{"name":"circle","mouse":[327,197]},{"name":"circle","mouse":[311,282]},{"name":"circle","mouse":[279,332]},{"name":"circle","mouse":[257,345]},{"name":"circle","mouse":[217,333]},{"name":"circle","mouse":[167,296]},{"name":"circle","mouse":[133,249]},{"name":"circle","mouse":[117,201]},{"name":"circle","mouse":[117,150]},{"name":"circle","mouse":[120,120]},{"name":"circle","mouse":[129,85]},{"name":"circle","mouse":[133,69]},{"name":"circle","mouse":[141,52]},{"name":"circle","mouse":[149,41]},{"name":"circle","mouse":[152,37]},{"name":"circle","mouse":[153,37]}]] - -let classifer; - -function setup() { - ml5.setBackend("webgl"); - createCanvas(400, 400); - frameRate(10); - background(220); - classifer = ml5.timeSeries(); -} - -function draw() { - if (keyIsDown(67) && pressedOnce){ - sequence.push({"name":"circle","mouse": [mouseX,mouseY]}); - ellipse(mouseX,mouseY,10); - if (sequence.length == 20){ - pressedOnce = false; - datasets.push(sequence); - sequence = []; - console.log("finished"); - background(220); - } - - } else if (keyIsDown(66) && pressedOnce){ - sequence.push({"name":"box","mouse": [mouseX,mouseY]}); - ellipse(mouseX,mouseY,10); - if (sequence.length == 20){ - pressedOnce = false; - datasets.push(sequence); - sequence = []; - console.log("finished"); - background(220); - } - } - - if (datasets.length == 10){ - file = JSON.stringify(datasets); - console.log(file); - } -} - -function keyReleased(){ - pressedOnce = true; -} - -function keyPressed(){ - if (key == 't'){ - classifer.createArchitecture(); - console.log('done architecture'); - } else if (key == 'y'){ - classifer.compileModel(); - console.log('done compiling the thing'); - } else if (key == 'u'){ - classifer.summarizeModel(); - console.log('done summarizing'); - } else if (key == 'i'){ - classifer.fitModel(); - console.log('fitting done'); - } -} \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/sketch.js b/examples/timeSeries-mousexy-keypoints/sketch.js index 84ddb1fb..85ab6a0a 100644 --- a/examples/timeSeries-mousexy-keypoints/sketch.js +++ b/examples/timeSeries-mousexy-keypoints/sketch.js @@ -1,104 +1,180 @@ +// https://editor.p5js.org/gohai/sketches/_KdpDSQzH -let sequence = []; -let datasets = []; +let model; -let classifer; -let tensorData; +let curr_shape = 'None, press a button below' +let state = 'collection'; let pressedOnce = true; -let w =400 -let h = 400; -let type = ["box","circle"] - -let circle = [[[0.48,0.165],[0.555,0.175],[0.615,0.1875],[0.715,0.2475],[0.78,0.35],[0.7825,0.545],[0.7225,0.6375],[0.615,0.7175],[0.5175,0.74],[0.395,0.715],[0.25,0.6425],[0.2,0.5375],[0.205,0.44],[0.2425,0.345],[0.3,0.245],[0.3425,0.2025],[0.43,0.1675],[0.495,0.165],[0.5025,0.165],[0.5025,0.165]],[[0.47,0.16],[0.4925,0.1575],[0.6,0.1725],[0.6875,0.235],[0.7925,0.35],[0.8275,0.455],[0.7825,0.585],[0.715,0.695],[0.5825,0.725],[0.3725,0.695],[0.2275,0.6075],[0.1875,0.4775],[0.205,0.3975],[0.2625,0.3],[0.3525,0.21],[0.39,0.18],[0.4275,0.17],[0.4625,0.16],[0.4725,0.1575],[0.475,0.1575]],[[0.4675,0.145],[0.5375,0.15],[0.64,0.205],[0.7375,0.2925],[0.7875,0.4275],[0.7625,0.53],[0.6725,0.65],[0.55,0.7025],[0.4025,0.6975],[0.2975,0.6325],[0.25,0.5375],[0.24,0.4225],[0.265,0.3575],[0.3125,0.2875],[0.3675,0.225],[0.4025,0.1875],[0.4225,0.1625],[0.45,0.1475],[0.4675,0.1475],[0.4775,0.1475]],[[0.5525,0.1475],[0.6825,0.195],[0.7825,0.2775],[0.83,0.39],[0.8225,0.5075],[0.7825,0.62],[0.705,0.6925],[0.4875,0.76],[0.315,0.7625],[0.23,0.7125],[0.2025,0.605],[0.21,0.4625],[0.2575,0.3125],[0.36,0.1875],[0.4625,0.1425],[0.51,0.14],[0.5375,0.14],[0.5525,0.14],[0.5625,0.14],[0.5625,0.14]],[[0.5725,0.1325],[0.6825,0.155],[0.7625,0.2275],[0.8275,0.3525],[0.8325,0.48],[0.81,0.5925],[0.7675,0.685],[0.6425,0.735],[0.4725,0.76],[0.3375,0.715],[0.2775,0.625],[0.27,0.5225],[0.27,0.4325],[0.3025,0.35],[0.3625,0.27],[0.42,0.1975],[0.4625,0.165],[0.5175,0.1375],[0.5575,0.1275],[0.57,0.1275]],[[0.5225,0.1425],[0.62,0.1375],[0.7725,0.19],[0.8325,0.2775],[0.87,0.4275],[0.8375,0.6075],[0.81,0.67],[0.755,0.7075],[0.5525,0.715],[0.4175,0.7075],[0.2775,0.5975],[0.2625,0.48],[0.275,0.405],[0.3225,0.31],[0.3625,0.2525],[0.3925,0.2175],[0.425,0.1875],[0.4675,0.1575],[0.49,0.1475],[0.5025,0.145]],[[0.4875,0.135],[0.6625,0.1525],[0.765,0.2175],[0.8325,0.345],[0.8525,0.495],[0.845,0.64],[0.805,0.7075],[0.715,0.7525],[0.575,0.7775],[0.45,0.7775],[0.3475,0.7375],[0.285,0.675],[0.2625,0.555],[0.2825,0.4125],[0.32,0.315],[0.3625,0.2275],[0.3925,0.1975],[0.425,0.175],[0.4425,0.1675],[0.455,0.1575]],[[0.4025,0.1675],[0.455,0.1525],[0.5625,0.1525],[0.665,0.185],[0.7625,0.2475],[0.8225,0.355],[0.8475,0.5075],[0.83,0.6475],[0.7425,0.725],[0.5775,0.7925],[0.435,0.79],[0.2775,0.7325],[0.2075,0.6275],[0.2025,0.53],[0.2025,0.4525],[0.2225,0.36],[0.2575,0.3],[0.3,0.2475],[0.33,0.225],[0.36,0.1975]],[[0.4925,0.1525],[0.5925,0.1425],[0.69,0.1825],[0.7825,0.2425],[0.8325,0.3125],[0.87,0.45],[0.8825,0.6275],[0.8575,0.76],[0.7825,0.82],[0.5825,0.865],[0.435,0.865],[0.3525,0.8275],[0.2825,0.7075],[0.2675,0.6075],[0.2775,0.4825],[0.2975,0.3925],[0.335,0.31],[0.3725,0.2375],[0.4025,0.2025],[0.43,0.1775]],[[0.4725,0.15],[0.5825,0.1475],[0.6875,0.2],[0.7775,0.2875],[0.82,0.4],[0.8225,0.5575],[0.7525,0.7075],[0.665,0.7725],[0.555,0.8025],[0.395,0.7675],[0.305,0.68],[0.255,0.56],[0.245,0.42],[0.2675,0.34],[0.3325,0.235],[0.365,0.2],[0.4025,0.1725],[0.445,0.15],[0.475,0.1425],[0.5125,0.1425]],[[0.485,0.1375],[0.6175,0.1375],[0.73,0.205],[0.795,0.2875],[0.8175,0.4275],[0.8125,0.5525],[0.7825,0.635],[0.675,0.7125],[0.52,0.7425],[0.33,0.6975],[0.2625,0.5975],[0.2525,0.4975],[0.2675,0.3675],[0.2975,0.2775],[0.3425,0.2175],[0.4125,0.17],[0.4525,0.155],[0.495,0.14],[0.5125,0.14],[0.515,0.14]],[[0.5125,0.1325],[0.6425,0.15],[0.7425,0.225],[0.81,0.43],[0.81,0.6025],[0.74,0.7875],[0.6075,0.8425],[0.4375,0.86],[0.3,0.8125],[0.225,0.71],[0.1875,0.605],[0.2125,0.4825],[0.2625,0.3775],[0.3575,0.2625],[0.4025,0.205],[0.4425,0.1675],[0.515,0.1425],[0.5525,0.135],[0.5625,0.135],[0.5625,0.135]],[[0.5375,0.1375],[0.6275,0.16],[0.7475,0.235],[0.8025,0.3775],[0.8225,0.5725],[0.77,0.7225],[0.6675,0.835],[0.5475,0.8675],[0.38,0.835],[0.2525,0.7],[0.2125,0.5675],[0.2225,0.4375],[0.2625,0.3275],[0.3225,0.2425],[0.395,0.1875],[0.475,0.1625],[0.5275,0.15],[0.5425,0.1475],[0.5525,0.145],[0.5575,0.145]],[[0.53,0.1375],[0.635,0.1525],[0.725,0.2175],[0.8225,0.39],[0.825,0.6025],[0.76,0.79],[0.6625,0.8375],[0.545,0.84],[0.4075,0.8225],[0.3425,0.775],[0.285,0.6675],[0.28,0.5375],[0.295,0.3975],[0.345,0.315],[0.3775,0.2675],[0.4175,0.2175],[0.4575,0.1675],[0.4875,0.1475],[0.5075,0.1375],[0.52,0.1375]],[[0.4775,0.165],[0.5475,0.1575],[0.63,0.1775],[0.6675,0.2025],[0.6975,0.235],[0.7375,0.3225],[0.7525,0.465],[0.7225,0.5775],[0.655,0.6525],[0.5525,0.67],[0.425,0.655],[0.3275,0.59],[0.28,0.4775],[0.275,0.35],[0.305,0.2825],[0.34,0.235],[0.3775,0.1975],[0.455,0.1575],[0.475,0.15],[0.495,0.1475]],[[0.4375,0.165],[0.5425,0.1475],[0.635,0.1725],[0.685,0.2175],[0.735,0.305],[0.7475,0.4],[0.7125,0.4825],[0.64,0.5375],[0.5325,0.555],[0.3825,0.535],[0.3,0.48],[0.2825,0.41],[0.2725,0.3325],[0.2775,0.2825],[0.3,0.245],[0.34,0.2075],[0.3575,0.195],[0.4,0.17],[0.4225,0.1575],[0.465,0.1475]],[[0.4625,0.145],[0.5575,0.15],[0.6575,0.1975],[0.7275,0.2625],[0.755,0.3525],[0.7425,0.4775],[0.6825,0.595],[0.595,0.635],[0.375,0.64],[0.275,0.6175],[0.205,0.54],[0.1825,0.4075],[0.1925,0.3025],[0.2325,0.255],[0.2825,0.22],[0.3525,0.1775],[0.3875,0.1575],[0.4325,0.1425],[0.4725,0.14],[0.5025,0.1475]],[[0.4625,0.1175],[0.5325,0.1125],[0.6425,0.1425],[0.715,0.21],[0.765,0.3175],[0.7775,0.46],[0.7325,0.5775],[0.6175,0.63],[0.4775,0.655],[0.3675,0.635],[0.2725,0.58],[0.2075,0.4875],[0.22,0.385],[0.275,0.2725],[0.3525,0.2],[0.3825,0.1675],[0.42,0.1375],[0.455,0.1175],[0.475,0.1125],[0.4925,0.1125]],[[0.5425,0.125],[0.7125,0.165],[0.7925,0.26],[0.8275,0.415],[0.8175,0.5225],[0.725,0.6125],[0.615,0.675],[0.465,0.7175],[0.3725,0.6825],[0.32,0.6225],[0.2975,0.515],[0.31,0.4],[0.3325,0.33],[0.37,0.2775],[0.4275,0.225],[0.465,0.1925],[0.535,0.1375],[0.5625,0.1225],[0.5725,0.12],[0.5725,0.12]],[[0.505,0.1225],[0.5725,0.13],[0.6825,0.1975],[0.74,0.2925],[0.7375,0.485],[0.67,0.5875],[0.545,0.6775],[0.3825,0.6875],[0.265,0.6375],[0.2125,0.52],[0.215,0.41],[0.24,0.32],[0.28,0.2375],[0.33,0.17],[0.3625,0.1475],[0.3925,0.1425],[0.4475,0.1375],[0.485,0.1275],[0.5025,0.1175],[0.51,0.1175]],[[0.5525,0.1475],[0.6875,0.1825],[0.7425,0.245],[0.7825,0.3475],[0.765,0.475],[0.6625,0.59],[0.545,0.655],[0.385,0.6525],[0.3125,0.6025],[0.28,0.4675],[0.2925,0.35],[0.33,0.2775],[0.3525,0.2425],[0.3925,0.2025],[0.4325,0.1825],[0.485,0.17],[0.5225,0.165],[0.5525,0.1575],[0.56,0.1575],[0.56,0.1575]],[[0.5,0.1675],[0.605,0.17],[0.6825,0.2025],[0.7325,0.2675],[0.7625,0.3875],[0.7525,0.5175],[0.6975,0.6025],[0.6075,0.6475],[0.4625,0.66],[0.3475,0.61],[0.295,0.505],[0.2825,0.375],[0.31,0.2675],[0.36,0.1875],[0.41,0.1525],[0.4725,0.1475],[0.5175,0.1575],[0.5475,0.1625],[0.5525,0.1625],[0.5525,0.165]],[[0.4375,0.1525],[0.4775,0.1525],[0.5875,0.1825],[0.6425,0.255],[0.69,0.385],[0.6375,0.5825],[0.5425,0.68],[0.4375,0.7175],[0.3275,0.6875],[0.2625,0.5975],[0.2325,0.4875],[0.24,0.3175],[0.2575,0.25],[0.3125,0.2],[0.375,0.175],[0.4125,0.165],[0.4375,0.1575],[0.4575,0.155],[0.46,0.155],[0.4625,0.1525]],[[0.41,0.1475],[0.5425,0.145],[0.6275,0.1675],[0.7125,0.24],[0.7625,0.3625],[0.75,0.5125],[0.6625,0.6125],[0.4525,0.655],[0.38,0.6375],[0.3075,0.54],[0.2825,0.4175],[0.2975,0.3225],[0.345,0.2475],[0.375,0.2],[0.405,0.17],[0.4325,0.1525],[0.4625,0.145],[0.495,0.145],[0.5225,0.1475],[0.5325,0.15]],[[0.445,0.1375],[0.545,0.135],[0.6225,0.1575],[0.695,0.1975],[0.7325,0.2875],[0.7675,0.4275],[0.75,0.5725],[0.6075,0.705],[0.49,0.7075],[0.2875,0.65],[0.1725,0.5225],[0.1625,0.415],[0.21,0.2925],[0.2625,0.2275],[0.3225,0.1775],[0.37,0.1625],[0.4025,0.1575],[0.445,0.1575],[0.4925,0.155],[0.53,0.1525]],[[0.5825,0.1325],[0.7375,0.1675],[0.8,0.255],[0.81,0.44],[0.75,0.5975],[0.6425,0.685],[0.4825,0.7175],[0.3375,0.6775],[0.2425,0.595],[0.1875,0.495],[0.1775,0.3925],[0.2175,0.3125],[0.265,0.2475],[0.3225,0.1975],[0.365,0.1725],[0.42,0.1425],[0.4875,0.1],[0.59,0.0825],[0.6325,0.0825],[0.635,0.0825]],[[0.445,0.1675],[0.5625,0.16],[0.65,0.195],[0.72,0.295],[0.745,0.425],[0.7175,0.5625],[0.6125,0.6675],[0.475,0.7],[0.3525,0.685],[0.1625,0.61],[0.1125,0.5575],[0.105,0.475],[0.1625,0.3775],[0.2125,0.3175],[0.2925,0.2475],[0.3575,0.2025],[0.4275,0.1775],[0.475,0.1625],[0.4925,0.1575],[0.5025,0.1575]],[[0.515,0.155],[0.5725,0.1625],[0.71,0.2375],[0.78,0.3375],[0.795,0.4375],[0.77,0.5875],[0.665,0.6975],[0.5,0.7225],[0.4075,0.7025],[0.3025,0.6025],[0.2425,0.485],[0.2425,0.385],[0.285,0.2975],[0.325,0.2475],[0.38,0.2075],[0.4275,0.1875],[0.445,0.18],[0.5,0.1675],[0.55,0.165],[0.5725,0.1625]],[[0.4675,0.15],[0.5525,0.14],[0.6675,0.185],[0.7525,0.2675],[0.7925,0.37],[0.7925,0.4875],[0.6925,0.6175],[0.52,0.69],[0.36,0.6725],[0.2275,0.595],[0.1825,0.4775],[0.18,0.37],[0.2125,0.2975],[0.2725,0.2275],[0.355,0.175],[0.41,0.15],[0.4525,0.1425],[0.495,0.1375],[0.5125,0.1375],[0.515,0.1375]],[[0.5075,0.1375],[0.6,0.1375],[0.6825,0.1775],[0.7525,0.2575],[0.7875,0.3725],[0.7875,0.52],[0.6925,0.6375],[0.545,0.7],[0.395,0.66],[0.3025,0.5375],[0.2825,0.425],[0.31,0.2975],[0.3575,0.2125],[0.4025,0.1775],[0.46,0.1625],[0.52,0.1575],[0.555,0.1575],[0.5725,0.1575],[0.5725,0.1575],[0.5725,0.1575]]]; -let boxes = [[[0.4425,0.75],[0.46,0.7475],[0.545,0.7475],[0.6525,0.7475],[0.6825,0.7475],[0.68,0.78],[0.675,0.8875],[0.675,0.92],[0.665,0.93],[0.4825,0.9175],[0.41,0.925],[0.405,0.925],[0.4125,0.8375],[0.4275,0.7875],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7575],[0.4375,0.7525]],[[0.4475,0.7375],[0.5425,0.74],[0.6525,0.74],[0.6925,0.74],[0.6925,0.7775],[0.69,0.86],[0.6925,0.915],[0.6875,0.915],[0.5875,0.9125],[0.4775,0.9125],[0.4525,0.9125],[0.4525,0.8875],[0.45,0.8075],[0.4525,0.7675],[0.4525,0.7525],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475],[0.4525,0.7475]],[[0.4575,0.7425],[0.485,0.74],[0.6025,0.7425],[0.7025,0.7425],[0.7325,0.745],[0.7375,0.8175],[0.7425,0.9175],[0.7425,0.9225],[0.7225,0.92],[0.56,0.91],[0.4375,0.9125],[0.43,0.915],[0.4275,0.8825],[0.4375,0.8175],[0.445,0.77],[0.4475,0.7625],[0.45,0.7575],[0.45,0.7525],[0.45,0.75],[0.45,0.75]],[[0.43,0.7375],[0.4425,0.73],[0.565,0.73],[0.6575,0.73],[0.705,0.73],[0.7075,0.7325],[0.7075,0.82],[0.7025,0.8825],[0.7,0.8875],[0.66,0.885],[0.5475,0.8825],[0.425,0.8825],[0.4,0.8825],[0.3975,0.86],[0.395,0.81],[0.4075,0.77],[0.42,0.7425],[0.4225,0.7375],[0.4225,0.7375],[0.4225,0.7375]],[[0.3825,0.7025],[0.3875,0.7025],[0.56,0.7],[0.68,0.6975],[0.73,0.6925],[0.7325,0.6925],[0.7175,0.7575],[0.7025,0.8475],[0.7,0.8925],[0.6875,0.895],[0.58,0.875],[0.4675,0.87],[0.41,0.87],[0.4025,0.8725],[0.39,0.8075],[0.3875,0.76],[0.3825,0.7225],[0.38,0.7025],[0.38,0.6975],[0.38,0.6975]],[[0.4275,0.6975],[0.6425,0.6975],[0.75,0.6975],[0.7675,0.6975],[0.7675,0.72],[0.7625,0.8025],[0.76,0.865],[0.7525,0.8775],[0.665,0.86],[0.55,0.8575],[0.4725,0.8575],[0.435,0.86],[0.43,0.8625],[0.4175,0.7925],[0.415,0.7275],[0.415,0.71],[0.415,0.6975],[0.415,0.6975],[0.415,0.6975],[0.415,0.6975]],[[0.415,0.6975],[0.485,0.6925],[0.5925,0.6925],[0.7325,0.695],[0.7425,0.6975],[0.7425,0.73],[0.735,0.81],[0.7325,0.87],[0.73,0.895],[0.6925,0.8975],[0.6025,0.8925],[0.515,0.8925],[0.455,0.8875],[0.44,0.8875],[0.435,0.855],[0.4375,0.79],[0.43,0.7475],[0.42,0.705],[0.4175,0.7],[0.4175,0.6975]],[[0.4375,0.69],[0.5725,0.6875],[0.6725,0.6875],[0.7525,0.6875],[0.7625,0.6875],[0.7625,0.7075],[0.755,0.775],[0.7525,0.815],[0.7425,0.8475],[0.7275,0.8675],[0.66,0.86],[0.5625,0.855],[0.4725,0.85],[0.445,0.8525],[0.4375,0.8375],[0.4275,0.7675],[0.4225,0.7275],[0.425,0.7025],[0.4275,0.6975],[0.4275,0.6975]],[[0.4175,0.7125],[0.44,0.7075],[0.5625,0.7075],[0.6875,0.7075],[0.7575,0.7075],[0.76,0.7075],[0.7625,0.745],[0.7625,0.8125],[0.7575,0.8625],[0.7425,0.8875],[0.7325,0.8875],[0.645,0.8675],[0.5525,0.865],[0.4725,0.8675],[0.415,0.875],[0.4125,0.8675],[0.4125,0.8075],[0.4125,0.7675],[0.4125,0.74],[0.4125,0.7275]],[[0.41,0.7225],[0.42,0.72],[0.5575,0.72],[0.665,0.7225],[0.695,0.7225],[0.695,0.7275],[0.695,0.785],[0.7025,0.8575],[0.7025,0.9],[0.7,0.9025],[0.65,0.89],[0.54,0.8775],[0.4525,0.88],[0.435,0.885],[0.4175,0.86],[0.405,0.795],[0.4,0.7525],[0.4,0.7375],[0.4025,0.7225],[0.4025,0.7225]],[[0.405,0.7175],[0.47,0.7125],[0.6175,0.7125],[0.6875,0.7125],[0.715,0.7125],[0.715,0.7125],[0.705,0.765],[0.7025,0.815],[0.6975,0.865],[0.685,0.89],[0.6575,0.8875],[0.5575,0.8825],[0.4825,0.8825],[0.42,0.885],[0.415,0.8825],[0.415,0.815],[0.4075,0.7525],[0.405,0.7275],[0.405,0.7175],[0.405,0.7175]],[[0.405,0.7175],[0.4375,0.705],[0.5525,0.705],[0.66,0.705],[0.6875,0.705],[0.7,0.7075],[0.7025,0.725],[0.7,0.7925],[0.7,0.85],[0.695,0.8825],[0.6925,0.8875],[0.6625,0.8775],[0.5775,0.8675],[0.4825,0.8675],[0.3975,0.875],[0.3775,0.875],[0.3775,0.8375],[0.38,0.785],[0.3825,0.7475],[0.39,0.7275]],[[0.355,0.735],[0.3625,0.7325],[0.44,0.7275],[0.525,0.7275],[0.665,0.73],[0.6775,0.7325],[0.6775,0.7325],[0.675,0.7625],[0.6725,0.83],[0.6725,0.875],[0.6675,0.895],[0.6625,0.905],[0.6425,0.9025],[0.545,0.8875],[0.4625,0.8875],[0.4225,0.89],[0.4025,0.895],[0.375,0.8275],[0.365,0.7725],[0.3625,0.7575]],[[0.38,0.7475],[0.4125,0.7475],[0.47,0.7475],[0.545,0.745],[0.6425,0.745],[0.675,0.745],[0.6775,0.75],[0.685,0.8175],[0.695,0.8675],[0.695,0.9],[0.695,0.905],[0.635,0.8975],[0.54,0.895],[0.42,0.9075],[0.385,0.9075],[0.3725,0.8975],[0.3725,0.8525],[0.3775,0.815],[0.38,0.7925],[0.38,0.7675]],[[0.375,0.7625],[0.395,0.7575],[0.4825,0.7525],[0.56,0.7525],[0.6425,0.7525],[0.675,0.755],[0.685,0.7575],[0.6925,0.7875],[0.6925,0.875],[0.695,0.9025],[0.6825,0.9075],[0.6,0.9075],[0.5125,0.9075],[0.41,0.9075],[0.3775,0.9075],[0.3725,0.885],[0.38,0.84],[0.385,0.7875],[0.385,0.7575],[0.385,0.7575]],[[0.39,0.7525],[0.46,0.745],[0.5525,0.74],[0.6525,0.7375],[0.6625,0.7375],[0.6625,0.755],[0.65,0.815],[0.6475,0.8675],[0.645,0.8875],[0.64,0.9075],[0.6325,0.9075],[0.5925,0.8925],[0.5325,0.8875],[0.42,0.8875],[0.3775,0.8875],[0.3725,0.87],[0.3825,0.8225],[0.3925,0.7875],[0.3925,0.7675],[0.3925,0.76]],[[0.41,0.74],[0.4575,0.7375],[0.5125,0.7375],[0.625,0.7375],[0.65,0.7375],[0.6775,0.7425],[0.6825,0.7475],[0.6825,0.76],[0.6775,0.8125],[0.6725,0.8475],[0.6675,0.87],[0.6625,0.875],[0.5825,0.8675],[0.4925,0.8675],[0.4275,0.87],[0.415,0.87],[0.4125,0.8675],[0.4125,0.815],[0.41,0.7875],[0.405,0.7575]],[[0.405,0.7175],[0.4675,0.715],[0.575,0.715],[0.6475,0.7175],[0.7025,0.7175],[0.705,0.725],[0.705,0.7675],[0.7,0.8475],[0.6875,0.8875],[0.6775,0.9],[0.625,0.8975],[0.5525,0.8875],[0.47,0.8875],[0.4075,0.88],[0.3925,0.855],[0.385,0.8075],[0.3825,0.7675],[0.3925,0.7225],[0.395,0.71],[0.395,0.71]],[[0.395,0.7225],[0.4125,0.7175],[0.4775,0.7175],[0.5675,0.7175],[0.635,0.7175],[0.65,0.7175],[0.655,0.725],[0.66,0.7725],[0.66,0.8125],[0.655,0.8425],[0.65,0.855],[0.6175,0.8475],[0.5325,0.84],[0.475,0.84],[0.415,0.84],[0.375,0.84],[0.37,0.8175],[0.37,0.7775],[0.3725,0.7475],[0.375,0.7275]],[[0.37,0.7375],[0.3925,0.7325],[0.525,0.7275],[0.5825,0.7275],[0.6325,0.7275],[0.655,0.7275],[0.655,0.74],[0.655,0.7875],[0.6475,0.825],[0.645,0.855],[0.645,0.885],[0.6375,0.8925],[0.5725,0.8825],[0.455,0.8775],[0.3925,0.8775],[0.355,0.875],[0.3525,0.865],[0.3625,0.8125],[0.375,0.7775],[0.3825,0.7475]],[[0.39,0.7075],[0.4725,0.705],[0.5575,0.705],[0.67,0.715],[0.6925,0.7175],[0.6975,0.72],[0.695,0.78],[0.6925,0.845],[0.6925,0.89],[0.6925,0.8975],[0.6225,0.8875],[0.525,0.88],[0.445,0.8775],[0.4125,0.8775],[0.3975,0.8575],[0.395,0.7875],[0.3925,0.7425],[0.39,0.7025],[0.39,0.6925],[0.39,0.6925]],[[0.3925,0.73],[0.3975,0.7325],[0.5075,0.73],[0.5875,0.73],[0.6525,0.73],[0.7,0.7275],[0.71,0.7275],[0.71,0.7775],[0.6975,0.85],[0.6875,0.8875],[0.685,0.89],[0.615,0.885],[0.515,0.88],[0.445,0.88],[0.4225,0.88],[0.4175,0.85],[0.4125,0.7925],[0.41,0.7675],[0.4025,0.7425],[0.4025,0.7325]],[[0.4075,0.73],[0.4825,0.7275],[0.585,0.7275],[0.6975,0.7275],[0.7125,0.73],[0.7225,0.7325],[0.725,0.7675],[0.7225,0.8275],[0.7225,0.865],[0.7225,0.875],[0.715,0.8775],[0.61,0.86],[0.5525,0.86],[0.48,0.86],[0.445,0.86],[0.4325,0.8275],[0.4275,0.7775],[0.4225,0.7575],[0.415,0.7375],[0.415,0.73]],[[0.4125,0.7275],[0.425,0.7275],[0.4625,0.7275],[0.515,0.725],[0.5625,0.7225],[0.61,0.7225],[0.6425,0.7225],[0.6525,0.7425],[0.655,0.7975],[0.655,0.845],[0.655,0.875],[0.6275,0.88],[0.555,0.88],[0.4825,0.88],[0.44,0.88],[0.4225,0.855],[0.415,0.7975],[0.4125,0.7525],[0.4125,0.7275],[0.4125,0.7275]],[[0.4225,0.7425],[0.4825,0.7375],[0.5825,0.7325],[0.6425,0.7325],[0.6675,0.7325],[0.6775,0.74],[0.6775,0.795],[0.6725,0.845],[0.6675,0.8775],[0.665,0.8875],[0.6425,0.89],[0.5575,0.88],[0.4725,0.8775],[0.4275,0.875],[0.4125,0.86],[0.405,0.805],[0.405,0.7775],[0.4125,0.7475],[0.4125,0.7375],[0.4125,0.7375]],[[0.4125,0.7575],[0.4225,0.7475],[0.495,0.7425],[0.5825,0.7425],[0.6625,0.74],[0.695,0.74],[0.7025,0.7475],[0.7025,0.7975],[0.7025,0.845],[0.6975,0.865],[0.6925,0.87],[0.65,0.8675],[0.555,0.86],[0.4825,0.86],[0.41,0.86],[0.395,0.8625],[0.39,0.82],[0.3975,0.775],[0.4025,0.7575],[0.405,0.7475]],[[0.3775,0.7375],[0.4025,0.73],[0.4725,0.7275],[0.5725,0.7275],[0.6275,0.7275],[0.6425,0.7275],[0.6575,0.73],[0.66,0.7575],[0.66,0.8075],[0.65,0.8775],[0.6475,0.895],[0.605,0.8825],[0.53,0.875],[0.4375,0.87],[0.405,0.87],[0.39,0.855],[0.3825,0.8075],[0.3825,0.7725],[0.3875,0.75],[0.39,0.7475]],[[0.3475,0.75],[0.4325,0.745],[0.52,0.745],[0.59,0.75],[0.605,0.7525],[0.6125,0.78],[0.6125,0.835],[0.61,0.8775],[0.61,0.89],[0.56,0.8925],[0.4625,0.8775],[0.385,0.8775],[0.37,0.8775],[0.3625,0.8775],[0.3525,0.8275],[0.3475,0.7675],[0.35,0.7475],[0.35,0.7475],[0.35,0.7475],[0.3575,0.7525]],[[0.4,0.7475],[0.455,0.745],[0.5575,0.745],[0.675,0.745],[0.6925,0.7475],[0.7025,0.76],[0.7025,0.7975],[0.7,0.875],[0.7,0.915],[0.6975,0.92],[0.62,0.8975],[0.51,0.885],[0.44,0.885],[0.4225,0.89],[0.41,0.8825],[0.4125,0.8175],[0.4125,0.7775],[0.41,0.7475],[0.4075,0.745],[0.4075,0.745]],[[0.3575,0.755],[0.44,0.75],[0.565,0.75],[0.635,0.7525],[0.67,0.7525],[0.675,0.76],[0.6725,0.835],[0.6675,0.8925],[0.6675,0.9075],[0.6675,0.91],[0.6125,0.8975],[0.525,0.89],[0.455,0.8875],[0.42,0.8875],[0.39,0.885],[0.3525,0.865],[0.3475,0.8575],[0.355,0.8175],[0.3625,0.7625],[0.365,0.7375]]]; +let rec_duration = 2; +let num_seq = 20; +// assuming frameRate is 60, with record time of 2 seconds, there will be 120 datapoints total, which is huge! we use map to get 20 data points instead of 120 -let labels = [[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[0, 1],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0], -[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0], -[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0], -[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0],[1, 0]] +let count = 0; -let test =[[[0.2625,0.2075],[0.275,0.205],[0.3925,0.2175],[0.665,0.2175],[0.8325,0.2175],[0.8375,0.2175],[0.8375,0.2275],[0.815,0.3475],[0.8125,0.585],[0.805,0.675],[0.8,0.6775],[0.5425,0.695],[0.3425,0.7425],[0.3325,0.7425],[0.325,0.725],[0.2925,0.5575],[0.2775,0.4],[0.285,0.2875],[0.29,0.23],[0.29,0.2125]]]; +let sequence = []; function setup() { - ml5.setBackend("webgl"); - createCanvas(w, h); - frameRate(10); + ml5.setBackend('webgl') + let options = { + inputs: ['x', 'y'], + outputs: ['label'], + task: 'classification', + debug: 'true', + learningRate: 0.5 + }; + model = ml5.timeSeries(options); + createCanvas(600, 400); background(220); - classifer = ml5.timeSeries(); + UI(); + frameRate(60); + } function draw() { - if (keyIsDown(67) && pressedOnce){ - sequence.push([mouseX/w,mouseY/h]); - ellipse(mouseX,mouseY,10); - if (sequence.length == 20){ - pressedOnce = false; - datasets.push(sequence); - sequence = []; - console.log("finished"); - background(220); - } + let datapoints = map(count,0,rec_duration*num_seq, 0,num_seq) + + if (mouseIsPressed && pressedOnce){ + + line(pmouseX, pmouseY, mouseX,mouseY); + let inputs = {x: mouseX,y: mouseY}; + count++; + + if (datapoints % 1 == 0){ + // sequence.push(inputs); + sequence.push(inputs); + } - } else if (keyIsDown(66) && pressedOnce){ - sequence.push([mouseX/w,mouseY/h]); - ellipse(mouseX,mouseY,10); - if (sequence.length == 20){ - pressedOnce = false; - datasets.push(sequence); - sequence = []; - console.log("finished"); + if (sequence.length == num_seq){ + + + pressedOnce = false; + count = 0 + + if (state == 'collection'){ + let target = {label: curr_shape}; + background(220); + text("Recording: " + curr_shape, 50,50); + // console.log(sequence, target) + options = {inputLabels:['x','y']} + model.addData(sequence, target, options); + } else if (state == 'prediction'){ + background(220); + model.classify(sequence, gotResults) + } else if (state == 'training') { background(220); + text("You cannot record while training"); } - } else if (keyIsDown(65) && pressedOnce){ - sequence.push([mouseX/w,mouseY/h]); - ellipse(mouseX,mouseY,10); - if (sequence.length == 20){ - pressedOnce = false; - datasets.push(sequence); - sequence = []; - console.log("finished"); - background(220); - - tensortrial = classifer.toTensors(datasets,datasets); - predictions = classifer.predict(tensortrial[0]); - console.log(type[predictions], 'dito', predictions); - text(type[predictions],200,200); - // console.log(predictions[predictions.length-1]) + sequence = []; } } - - if (datasets.length == 10){ - file = JSON.stringify(datasets); - console.log(file); +} + +function gotResults(results) { + // if (error) { + // console.log(error); + // } + // console.log('hello', results); + stroke(0); + fill(0, 0, 255, 100); + let label = results[0].label; + text("Prediction: " + label, 50,50); + // let label = error[0].label; + +} + +function keyPressed(){ + if (key == 's') { + model.saveData('trial'); + } else if (key == 'd'){ + console.log(model.getData()); } } -function keyReleased(){ +function mouseReleased(){ pressedOnce = true; } -function keyPressed(){ - if (key == 't'){ - classifer.createArchitecture(); - console.log('done architecture'); - classifer.compileModel(); - console.log('done compiling the thing'); - classifer.summarizeModel(); - console.log('done summarizing'); - alldata = circle.concat(boxes); - tensorData = classifer.toTensors(alldata,labels); - console.log('conversion done'); - console.log(tensorData[0]); - console.log(tensorData[1]); - - classifer.fitModel(tensorData[0],tensorData[1]); - - } else if (key =='p'){ - classifer.modelSummary(); +function UI(){ + + textSize(20) + + rec_circle = createButton('Record Circle'); + rec_circle.mouseClicked(recordCircle); + rec_circle.style("font-family", "Georgia"); + rec_circle.style("font-size", "20px"); + + rec_square = createButton('Record Square'); + rec_square.mouseClicked(recordSquare); + rec_square.style("font-family", "Georgia"); + rec_square.style("font-size", "20px"); + + train_but = createButton('Train Model'); + train_but.mouseClicked(trainModel); + train_but.style("font-family", "Georgia"); + train_but.style("font-size", "20px"); + + pred_sha = createButton('Predict Shape'); + pred_sha.mouseClicked(predictShape); + pred_sha.style("font-family", "Georgia"); + pred_sha.style("font-size", "20px"); + + function recordCircle(){ + background(220); + state = 'collection' + curr_shape = 'Circle' + text("Recording: Circle", 50,50); + rec_circle.style("background-color",'#f0f0f0') + rec_square.style('background-color', ''); + pred_sha.style('background-color', ''); + } + + function recordSquare(){ + background(220); + state = 'collection' + curr_shape = 'Square' + text("Recording: Square", 50,50); + rec_square.style("background-color",'#f0f0f0') + rec_circle.style('background-color', ''); + pred_sha.style('background-color', ''); + } + + function trainModel(){ + model.createArchitecture(); + model.compileModel(); + model.summarizeModel(); + background(220); + state = 'training'; + text("Training...", 50,50); + model.normalizeData(); + let options = { + epochs: 100 } - } \ No newline at end of file + model.train(options,whileTraining,finishedTraining); + } + + function whileTraining(epoch, loss) { + console.log(epoch); + } + + function finishedTraining() { + console.log('finished training.'); + state = 'prediction'; + } + + function predictShape(){ + background(220); + state = 'prediction' + text("Predicting Shape...", 50,50); + pred_sha.style("background-color",'#f0f0f0') + rec_square.style('background-color', ''); + rec_circle.style('background-color', ''); + + + } +} + diff --git a/examples/timeSeries-mousexy-keypoints/sketch111.js b/examples/timeSeries-mousexy-keypoints/sketch111.js deleted file mode 100644 index fd3dc1ba..00000000 --- a/examples/timeSeries-mousexy-keypoints/sketch111.js +++ /dev/null @@ -1,178 +0,0 @@ -// https://editor.p5js.org/gohai/sketches/_KdpDSQzH - -let model; - -let curr_shape = 'None, press a button below' - -let state = 'collection'; -let pressedOnce = true; - - -let rec_duration = 2; -let num_seq = 20; -// assuming frameRate is 60, with record time of 2 seconds, there will be 120 datapoints total, which is huge! we use map to get 20 data points instead of 120 - -let count = 0; - - -let sequence = []; - -function setup() { - ml5.setBackend('webgl') - let options = { - inputs: ['x', 'y'], - outputs: ['label'], - task: 'classification', - debug: 'true', - learningRate: 0.5 - }; - model = ml5.timeSeries(options); - createCanvas(600, 400); - background(220); - UI(); - frameRate(60); - -} - -function draw() { - let datapoints = map(count,0,rec_duration*num_seq, 0,num_seq) - - if (mouseIsPressed && pressedOnce){ - - line(pmouseX, pmouseY, mouseX,mouseY); - let inputs = {x: mouseX/400,y: mouseY/400}; - count++; - - if (datapoints % 1 == 0){ - sequence.push(inputs); - } - - if (sequence.length == num_seq){ - - - pressedOnce = false; - count = 0 - - if (state == 'collection'){ - let target = {label: curr_shape}; - background(220); - text("Recording: " + curr_shape, 50,50); - // console.log(sequence, target) - model.addData(sequence, target); - } else if (state == 'prediction'){ - background(220); - model.classify(sequence, gotResults) - } else if (state == 'training') { - background(220); - text("You cannot record while training"); - } - - sequence = []; - } - } -} - -function gotResults(results) { - // if (error) { - // console.log(error); - // } - // console.log('hello', results); - stroke(0); - fill(0, 0, 255, 100); - let label = results[0].label; - text("Prediction: " + label, 50,50); - // let label = error[0].label; - -} - -function keyPressed(){ - if (key == 's') { - model.saveData('trial'); - } else if (key == 'd'){ - console.log(model.getData()); - } -} - -function mouseReleased(){ - pressedOnce = true; -} - -function UI(){ - - textSize(20) - - rec_circle = createButton('Record Circle'); - rec_circle.mouseClicked(recordCircle); - rec_circle.style("font-family", "Georgia"); - rec_circle.style("font-size", "20px"); - - rec_square = createButton('Record Square'); - rec_square.mouseClicked(recordSquare); - rec_square.style("font-family", "Georgia"); - rec_square.style("font-size", "20px"); - - train_but = createButton('Train Model'); - train_but.mouseClicked(trainModel); - train_but.style("font-family", "Georgia"); - train_but.style("font-size", "20px"); - - pred_sha = createButton('Predict Shape'); - pred_sha.mouseClicked(predictShape); - pred_sha.style("font-family", "Georgia"); - pred_sha.style("font-size", "20px"); - - function recordCircle(){ - background(220); - state = 'collection' - curr_shape = 'Circle' - text("Recording: Circle", 50,50); - rec_circle.style("background-color",'#f0f0f0') - rec_square.style('background-color', ''); - pred_sha.style('background-color', ''); - } - - function recordSquare(){ - background(220); - state = 'collection' - curr_shape = 'Square' - text("Recording: Square", 50,50); - rec_square.style("background-color",'#f0f0f0') - rec_circle.style('background-color', ''); - pred_sha.style('background-color', ''); - } - - function trainModel(){ - model.createArchitecture(); - model.compileModel(); - model.summarizeModel(); - background(220); - state = 'training'; - text("Training...", 50,50); - model.normalizeData(); - let options = { - epochs: 100 - } - model.train(options,whileTraining,finishedTraining); - } - - function whileTraining(epoch, loss) { - console.log(epoch); - } - - function finishedTraining() { - console.log('finished training.'); - state = 'prediction'; - } - - function predictShape(){ - background(220); - state = 'prediction' - text("Predicting Shape...", 50,50); - pred_sha.style("background-color",'#f0f0f0') - rec_square.style('background-color', ''); - rec_circle.style('background-color', ''); - - - } -} - diff --git a/src/LSTM/index-1.js b/src/LSTM/index-1.js deleted file mode 100644 index 64bfc478..00000000 --- a/src/LSTM/index-1.js +++ /dev/null @@ -1,302 +0,0 @@ -import * as tf from "@tensorflow/tfjs"; -import neuralNetwork from "../NeuralNetwork/index"; -import callCallback from "../utils/callcallback"; - - -/* -Since essentially LSTM is a layer and can be used the same way with neuralNetwork class, -this will inherit from the DIYNeuralNetwork Class, a list of modifications and overrides will be -in this list: - -1.) Architecture: - * Default Architecutre when no - -SaveData -formatRawData - -converting inputs to tensors - - -Maintain: -createModel -addlayer -copy - -*/ - - - -class LSTMify{ - constructor(options, callback){ - this.nnInst = neuralNetwork(options, callback); - - const methods = Object.getOwnPropertyNames(Object.getPrototypeOf(this.nnInst)); - for (const method of methods) { - if (method !== 'constructor' && !this[method]) { - this[method] = this.nnInst[method].bind(this.nnInst); - } - } - } - - // train() { - // console.log('Overridden train method in LSTMify'); - // // Optionally, you can still call the original method if needed - // // this.neuralNetworkInstance.train(); - // } - /** - * train - * @public - * @param {*} optionsOrCallback - * @param {*} optionsOrWhileTraining - * @param {*} callback - * @return {Promise} - */ - async train(optionsOrCallback, optionsOrWhileTraining, callback) { - let options; - let whileTrainingCb; - let finishedTrainingCb; - if ( - typeof optionsOrCallback === "object" && - typeof optionsOrWhileTraining === "function" && - typeof callback === "function" - ) { - options = optionsOrCallback; - whileTrainingCb = optionsOrWhileTraining; - finishedTrainingCb = callback; - } else if ( - typeof optionsOrCallback === "object" && - typeof optionsOrWhileTraining === "function" - ) { - options = optionsOrCallback; - whileTrainingCb = null; - finishedTrainingCb = optionsOrWhileTraining; - } else if ( - typeof optionsOrCallback === "function" && - typeof optionsOrWhileTraining === "function" - ) { - options = {}; - whileTrainingCb = optionsOrCallback; - finishedTrainingCb = optionsOrWhileTraining; - } else { - options = {}; - whileTrainingCb = null; - finishedTrainingCb = optionsOrCallback; - } - - return callCallback(this.trainInternal(options, whileTrainingCb), finishedTrainingCb); - } - - /** - * train - * @param {Object} _options - * @param {function} [whileTrainingCb] - * @return {Promise} - */ - async trainInternal(_options, whileTrainingCb) { - const options = { - epochs: 10, - batchSize: 32, - validationSplit: 0.1, - whileTraining: null, - ..._options, - }; - - // if debug mode is true, then use tf vis - if (this.nnInst.options.debug === true || this.nnInst.options.debug === "true") { - options.whileTraining = [ - this.nnInst.neuralNetworkVis.trainingVis(), - { - onEpochEnd: whileTrainingCb, - }, - ]; - } else { - // if not use the default training - // options.whileTraining = whileTrainingCb === null ? [{ - // onEpochEnd: (epoch, loss) => { - // console.log(epoch, loss.loss) - // } - // }] : - // [{ - // onEpochEnd: whileTrainingCb - // }]; - options.whileTraining = [ - { - onEpochEnd: whileTrainingCb, - }, - ]; - } - - // if metadata needs to be generated about the data - if (!this.nnInst.neuralNetworkData.isMetadataReady) { - // if the inputs are defined as an array of [img_width, img_height, channels] - this.nnInst.createMetaData(); - } - - // if the data still need to be summarized, onehotencoded, etc - if (!this.nnInst.neuralNetworkData.isWarmedUp) { - this.nnInst.prepareForTraining(); - } - - // if inputs and outputs are not specified - // in the options, then create the tensors - // from the this.neuralNetworkData.data.raws - if (!options.inputs && !options.outputs) { - const { inputs, outputs } = this.convertTrainingDataToTensors(); - options.inputs = inputs; - options.outputs = outputs; - } - - // check to see if layers are passed into the constructor - // then use those to create your architecture - if (!this.nnInst.neuralNetwork.isLayered) { - // TODO: don't update this.options.layers - Linda - this.nnInst.options.layers = this.createNetworkLayers( - this.nnInst.options.layers - ); - } - - // if the model does not have any layers defined yet - // then use the default structure - if (!this.nnInst.neuralNetwork.isLayered) { - // TODO: don't update this.options.layers - Linda - this.nnInst.options.layers = this.addDefaultLayers(); - } - - if (!this.nnInst.neuralNetwork.isCompiled) { - // compile the model with defaults - this.nnInst.compile(); - } - - // train once the model is compiled - await this.nnInst.neuralNetwork.train(options); - } - - addDefaultLayers() { - const { inputs, outputs } = this.convertTrainingDataToTensors(); - const shape = [1]; - shape.push(...inputs.shape); - console.log(inputs) - console.log(outputs) - - console.log('default', shape) - let layers; - const task = this.nnInst.options.task; - switch (task.toLowerCase()) { - // if the task is classification - case "classification": - layers = [ - { - type: "lstm", - units: this.nnInst.options.hiddenUnits, - activation: "relu", - inputShape: shape, - returnSequences: true, - }, - { - type: "dense", - units: this.nnInst.options.hiddenUnits, - activation: "relu", - }, - { - type: "dense", - activation: "softmax", - }, - ]; - - return this.createNetworkLayers(layers); - // if the task is regression - case "regression": - layers = [ - { - type: "dense", - units: this.nnInst.options.hiddenUnits, - activation: "relu", - }, - { - type: "dense", - activation: "sigmoid", - }, - ]; - return this.createNetworkLayers(layers); - // if the task is imageClassification - case "imageclassification": - layers = [ - { - type: "conv2d", - filters: 8, - kernelSize: 5, - strides: 1, - activation: "relu", - kernelInitializer: "varianceScaling", - }, - { - type: "maxPooling2d", - poolSize: [2, 2], - strides: [2, 2], - }, - { - type: "conv2d", - filters: 16, - kernelSize: 5, - strides: 1, - activation: "relu", - kernelInitializer: "varianceScaling", - }, - { - type: "maxPooling2d", - poolSize: [2, 2], - strides: [2, 2], - }, - { - type: "flatten", - }, - { - type: "dense", - kernelInitializer: "varianceScaling", - activation: "softmax", - }, - ]; - return this.nnInst.createNetworkLayers(layers); - - default: - console.log("no imputUnits or outputUnits defined"); - layers = [ - { - type: "dense", - units: this.options.hiddenUnits, - activation: "relu", - }, - { - type: "dense", - activation: "sigmoid", - }, - ]; - return this.nnInst.createNetworkLayers(layers); - } - } - - getData(){ - return this.nnInst.neuralNetworkData.getData(); - } -} - -const timeSeries = (inputsOrOptions, outputsOrCallback, callback) => { - let options; - let cb; - - if (inputsOrOptions instanceof Object) { - options = inputsOrOptions; - cb = outputsOrCallback; - } else { - options = { - inputs: inputsOrOptions, - outputs: outputsOrCallback, - }; - cb = callback; - } - - const instance = new LSTMify(options, cb); - return instance; - }; - - export default timeSeries; \ No newline at end of file diff --git a/src/LSTM/index.js b/src/LSTM/index.js index 59023fe7..2f5a4a17 100644 --- a/src/LSTM/index.js +++ b/src/LSTM/index.js @@ -1,158 +1,118 @@ - import * as tf from "@tensorflow/tfjs"; -import * as tfvis from "@tensorflow/tfjs-vis"; +import callCallback from "../utils/callcallback"; +import handleArguments from "../utils/handleArguments"; +import { imgToPixelArray, isInstanceOfSupportedElement, } from "../utils/imageUtilities"; +import NeuralNetwork from "./timeSeries"; +import NeuralNetworkData from "./timeSeriesData"; + import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; +import NeuralNetworkVis from "../NeuralNetwork/NeuralNetworkVis"; -// import '@tensorflow/tfjs-node'; -// import callCallback from "../utils/callcallback"; +import tsUtils from "./timeSeriesUtils"; -class LSTMify{ - - constructor (options, callback){ - // sample architecture just to try - this.model = tf.sequential(); +const DEFAULTS = { + inputs: [], + outputs: [], + dataUrl: null, + modelUrl: null, + layers: [], + task: null, + debug: false, + learningRate: 0.2, + hiddenUnits: 16, + neuroEvolution: false, +}; - } - createArchitecture() { +/* +as far as the p5 sketch is concerned, it will directly call only a few functions in the class, +these are the following: - // Create the model - this.model = tf.sequential(); +model.addData +model.saveData, model etc +model.train +model.classify/predict etc - // Add the LSTM layers with the initializer - this.model.add(tf.layers.lstm({ - units: 50, - inputShape: [20, 2], - activation: 'relu', - returnSequences: true, - kernelInitializer: tf.initializers.glorotNormal(), - recurrentInitializer: tf.initializers.glorotNormal(), - biasInitializer: tf.initializers.glorotNormal(), - })); +*/ - this.model.add(tf.layers.lstm({ - units: 50, - kernelInitializer: tf.initializers.glorotNormal(), - recurrentInitializer: tf.initializers.glorotNormal(), - biasInitializer: tf.initializers.glorotNormal(), - })); +class timeSeries { - this.model.add(tf.layers.dense({ - units: 2, - activation: 'softmax', - })); - } + //reviewed + constructor(options, callback) { + this.options = + { + ...DEFAULTS, + ...options, + } || DEFAULTS; - compileModel(){ - const optimizer = tf.train.adam(0.002) - // const optimizer = tf.train.adadelta(0.05) + this.neuralNetwork = new NeuralNetwork(); + this.neuralNetworkData = new NeuralNetworkData(); + this.neuralNetworkVis = new NeuralNetworkVis(); - this.model.compile({ - optimizer: optimizer, - loss: 'binaryCrossentropy', - metrics: ['accuracy'] - }); + this.data = { + training: [], + }; } - - summarizeModel(){ - this.model.summary() - } - - toTensors(x,y){ - const x_tensor = tf.tensor(x); - const y_tensor = tf.tensor(y); - return [x_tensor,y_tensor] + // mainly for loading data - should be async + async init() { + return 0; } - async fitModel(xs,ys){ - this.loggers = [] - this.history = await this.model.fit(xs, ys,{ - epochs: 50, - batchSize: 16, - callbacks: { - onEpochEnd: (epoch, logs) => { - this.loggers.push(logs) - console.log(`Epoch ${epoch + 1}: loss = ${logs.loss}, accuracy = ${logs.acc}`); - } - // callbacks: { - // onEpochEnd: async (epoch, logs) => { - // // Display the loss and accuracy at the end of each epoch - // this.loggers.push(logs) - - // // Plot loss and accuracy - // tfvis.show.history( - // { name: 'Training Performance' }, - // this.loggers, - // ['loss', 'accuracy'] // or ['loss', 'acc'] based on your metrics - // ); - // }, - // } - }}) - } - modelSummary() { - console.log(this.history); - tfvis.show.history({ name: 'Training Performance' }, this.loggers, ['loss', 'accuracy']); - } + ///////////////////////////////////////////////////////////////////////////// + // ADD DATA // + ///////////////////////////////////////////////////////////////////////////// - // async predict(data){ - // const predictions = this.model.predict(data) - // const predict = await predictions.array(); - // console.log(typeof predict) - // predict.array().then(array => { - // console.log(array); - // // return array - // }) - // // console.log("this is the one") - // // return array_ver + /* adding data: can only accept the following formats: + - for xInputs: + 1. Sequence of objects (array of objects) + [{x: , y: },{x: , y: },{x: , y: },{x: , y: }] + 2. Sequence of arrays (array of array, order matters) + [[],[],[],[]] + 3. Sequence of values (inputlabels should be provided by user) + [[,,,,,]] e.g. shape = {inputLabels: ['x','y']} will become [{x: , y: },{x: , y: },{x: , y: },{x: , y: }] + + - for yInputs: + 1. similar to neural network, so use same logic + */ - // } + addData(xInputs, yInputs, options = null){ + // 1. verify format between the three possible types of xinputs + const xs = tsUtils.verifyAndFormatInputs(xInputs,options,this.options); - predict(_inputs) { - const output = tf.tidy(() => { - return this.model.predict(_inputs); - }); - const result = output.arraySync(); + // 2. format the yInput - same logic as NN class + const ys = tsUtils.verifyAndFormatOutputs(yInputs,options,this.options); + + this.neuralNetworkData.addData(xs,ys); + } - output.dispose(); - _inputs.dispose(); - console.log(result, 'here') - const final = nnUtils.getMax(result[result.length-1]) - console.log(result[result.length-1].indexOf(final),'lalal', result, final) - const word = [result[result.length-1].indexOf(final)] - return word; - } - } -const timeSeries = (inputsOrOptions, outputsOrCallback, callback) => { - // let options; - // let cb; - - // if (inputsOrOptions instanceof Object) { - // options = inputsOrOptions; - // cb = outputsOrCallback; - // } else { - // options = { - // inputs: inputsOrOptions, - // outputs: outputsOrCallback, - // }; - // cb = callback; - // } - - // const instance = new LSTMify(options, cb); - // return instance; +const TimeSeries = (inputsOrOptions, outputsOrCallback, callback) => { + let options; + let cb; + + if (inputsOrOptions instanceof Object) { + options = inputsOrOptions; + cb = outputsOrCallback; + } else { + options = { + inputs: inputsOrOptions, + outputs: outputsOrCallback, + }; + cb = callback; + } - const instance = new LSTMify(); - return instance; - }; - - export default timeSeries; \ No newline at end of file + const instance = new timeSeries(options, cb); + return instance; +}; + +export default TimeSeries; diff --git a/src/LSTM/index111.js b/src/LSTM/index111.js deleted file mode 100644 index 3919ea54..00000000 --- a/src/LSTM/index111.js +++ /dev/null @@ -1,1084 +0,0 @@ -import * as tf from "@tensorflow/tfjs"; -import callCallback from "../utils/callcallback"; -import handleArguments from "../utils/handleArguments"; -import { imgToPixelArray, isInstanceOfSupportedElement, } from "../utils/imageUtilities"; -import NeuralNetwork from "./timeSeries"; -import NeuralNetworkData from "./timeSeriesData"; - -import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; -import NeuralNetworkVis from "../NeuralNetwork/NeuralNetworkVis"; - -import tsUtils from "./timeSeriesUtils"; - -const DEFAULTS = { - inputs: [], - outputs: [], - dataUrl: null, - modelUrl: null, - layers: [], - task: null, - debug: false, - learningRate: 0.2, - hiddenUnits: 16, - neuroEvolution: false, -}; - - -/* -as far as the p5 sketch is concerned, it will directly call only a few functions in the class, -these are the following: - -model.addData -model.saveData, model etc -model.train -model.classify/predict etc - - - -*/ - - - -class timeSeries { - - //reviewed - constructor(options, callback) { - this.options = - { - ...DEFAULTS, - ...options, - } || DEFAULTS; - - this.neuralNetwork = new NeuralNetwork(); - this.neuralNetworkData = new NeuralNetworkData(); - this.neuralNetworkVis = new NeuralNetworkVis(); - - this.data = { - training: [], - }; - - // Methods - this.init = this.init.bind(this); - // adding data - this.addData = this.addData.bind(this); - this.loadDataFromUrl = this.loadDataFromUrl.bind(this); - // metadata prep - this.createMetaData = this.createMetaData.bind(this); - // data prep and handling - this.prepareForTraining = this.prepareForTraining.bind(this); - this.normalizeData = this.normalizeData.bind(this); - this.normalizeInput = this.normalizeInput.bind(this); - // this.searchAndFormat = this.searchAndFormat.bind(this); - // this.formatInputItem = this.formatInputItem.bind(this); - this.convertTrainingDataToTensors = - this.convertTrainingDataToTensors.bind(this); - this.formatInputsForPrediction = this.formatInputsForPrediction.bind(this); - this.formatInputsForPredictionAll = - this.formatInputsForPredictionAll.bind(this); - this.isOneHotEncodedOrNormalized = - this.isOneHotEncodedOrNormalized.bind(this); - // model prep - this.train = this.train.bind(this); - this.trainInternal = this.trainInternal.bind(this); - this.addLayer = this.addLayer.bind(this); - this.createNetworkLayers = this.createNetworkLayers.bind(this); - this.addDefaultLayers = this.addDefaultLayers.bind(this); - this.compile = this.compile.bind(this); - // prediction / classification - this.predict = this.predict.bind(this); - this.predictMultiple = this.predictMultiple.bind(this); - this.classify = this.classify.bind(this); - this.classifyMultiple = this.classifyMultiple.bind(this); - this.predictInternal = this.predictInternal.bind(this); - this.classifyInternal = this.classifyInternal.bind(this); - // save / load data - this.saveData = this.saveData.bind(this); - this.loadData = this.loadData.bind(this); - // save / load model - this.save = this.save.bind(this); - this.load = this.load.bind(this); - - // release model - this.dispose = this.dispose.bind(this); - - // neuroevolution - this.mutate = this.mutate.bind(this); - this.crossover = this.crossover.bind(this); - - // Initialize - this.ready = callCallback(this.init(), callback); - } - - // changed if else from dataURL and model URL, what if both are provided - //reviewed - async init() { - // check if the a static model should be built based on the inputs and output properties - if (this.options.neuroEvolution === true) { - this.createLayersNoTraining(); - } - - if (this.options.dataUrl) { - await this.loadDataFromUrl(); - } else if (this.options.modelUrl) { - // will take a URL to model.json, an object, or files array - await this.load(this.options.modelUrl); - } - return this; - } - - //calls nndata createmetadata, calls add default layers - //reviewed no idea purpose - createLayersNoTraining() { - // Create sample data based on options - const { inputs, outputs, task } = this.options; - if (task === "classification") { - for (let i = 0; i < outputs.length; i += 1) { - const inputSample = new Array(inputs).fill(0); - this.addData(inputSample, [outputs[i]]); - } - } else { - const inputSample = new Array(inputs).fill(0); - const outputSample = new Array(outputs).fill(0); - this.addData(inputSample, outputSample); - } - - // TODO: what about inputShape? - this.neuralNetworkData.createMetadata(); - this.addDefaultLayers(); - } - - //calls timeSeries again, nn.model.getweights, setweights - //reviewed - copy() { - const nnCopy = new timeSeries(this.options); - return tf.tidy(() => { - const weights = this.neuralNetwork.model.getWeights(); - const weightCopies = []; - for (let i = 0; i < weights.length; i += 1) { - weightCopies[i] = weights[i].clone(); - } - nnCopy.neuralNetwork.model.setWeights(weightCopies); - return nnCopy; - }); - } - - - // addData(xInputs, yInputs, options = null) { - - // // ({inputLabels,outputLabels}=tsUtils.prepareLabels(xInputs, yInputs, options = null)); - - // console.log('raw', xInputs); - - // const xs = this.searchAndFormat(xInputs); - // const xs = nnUtils.formatDataAsObject(formattedInputs, inputLabels); - - // const ys = nnUtils.formatDataAsObject(yInputs, outputLabels); - - // console.log('xs and yx', xInputs); - // console.log('xs and yx',ys) - // //create formatted input first, since the data is time series, the format of the data should be the following - // /* [ - // { - // xs:[{x: ,y: },{x: ,y: },{x: ,y: },{x: ,y: }], - // ys: {'label': } - // }, - - // { - // xs:[{x: ,y: },{x: ,y: },{x: ,y: },{x: ,y: }], - // ys: {'label': } - // } - // ] - - // */ - - // this.neuralNetworkData.addData(xInputs, ys); - // } - addData(xInputs, yInputs, options = null) { - const { inputs, outputs } = this.options; - - // get the input and output labels - // or infer them from the data - let inputLabels; - let outputLabels; - - if (options !== null) { - // eslint-disable-next-line prefer-destructuring - inputLabels = options.inputLabels; - // eslint-disable-next-line prefer-destructuring - outputLabels = options.outputLabels; - } else if (inputs.length > 0 && outputs.length > 0) { - // if the inputs and outputs labels have been defined - // in the constructor - if (inputs.every((item) => typeof item === "string")) { - inputLabels = inputs; - } - if (outputs.every((item) => typeof item === "string")) { - outputLabels = outputs; - } - } else if (typeof xInputs === "object" && typeof yInputs === "object") { - inputLabels = Object.keys(xInputs); - outputLabels = Object.keys(yInputs); - } else { - inputLabels = nnUtils.createLabelsFromArrayValues(xInputs, "input"); - outputLabels = nnUtils.createLabelsFromArrayValues(yInputs, "output"); - } - - // Make sure that the inputLabels and outputLabels are arrays - if (!(inputLabels instanceof Array)) { - throw new Error("inputLabels must be an array"); - } - if (!(outputLabels instanceof Array)) { - throw new Error("outputLabels must be an array"); - } - - const formattedInputs = this.searchAndFormat(xInputs); - const xs = nnUtils.formatDataAsObject(formattedInputs, inputLabels); - - const ys = nnUtils.formatDataAsObject(yInputs, outputLabels); - - this.neuralNetworkData.addData(xs, ys); - } - - - async loadDataFromUrl() { - const { dataUrl, inputs, outputs } = this.options; - - await this.neuralNetworkData.loadDataFromUrl( - dataUrl, - inputs, - outputs - ); - - // once the data are loaded, create the metadata - // and prep the data for training - // if the inputs are defined as an array of [img_width, img_height, channels] - this.createMetaData(); - - this.prepareForTraining(); - } - - - createMetaData() { - const { inputs } = this.options; - - let inputShape; - if (Array.isArray(inputs) && inputs.length > 0) { - inputShape = - inputs.every((item) => typeof item === "number") && inputs.length > 0 - ? inputs - : null; - } - - this.neuralNetworkData.createMetadata(inputShape); - } - - - prepareForTraining() { - this.data.training = this.neuralNetworkData.applyOneHotEncodingsToDataRaw(); - this.neuralNetworkData.isWarmedUp = true; - } - - normalizeData() { - if (!this.neuralNetworkData.isMetadataReady) { - // if the inputs are defined as an array of [img_width, img_height, channels] - this.createMetaData(); - } - - if (!this.neuralNetworkData.isWarmedUp) { - this.prepareForTraining(); - } - - const trainingData = this.neuralNetworkData.normalizeDataRaw(); - - // set this equal to the training data - this.data.training = trainingData; - - // set isNormalized to true - this.neuralNetworkData.meta.isNormalized = true; - } - - normalizeInput(value, _key, _meta) { - const key = _key; - const { min, max } = _meta[key]; - return nnUtils.normalizeValue(value, min, max); - } - - searchAndFormat(input) { - let formattedInputs; - if (Array.isArray(input)) { - formattedInputs = input.map((item) => this.formatInputItem(item)); - } else if (typeof input === "object") { - const newXInputs = Object.assign({}, input); - Object.keys(input).forEach((k) => { - const val = input[k]; - newXInputs[k] = this.formatInputItem(val); - }); - formattedInputs = newXInputs; - } - return formattedInputs; - } - - - formatInputItem(input) { - let imgToPredict; - let formattedInputs; - if (isInstanceOfSupportedElement(input)) { - imgToPredict = input; - } else if ( - typeof input === "object" && - isInstanceOfSupportedElement(input.elt) - ) { - imgToPredict = input.elt; // Handle p5.js image and video. - } else if ( - typeof input === "object" && - isInstanceOfSupportedElement(input.canvas) - ) { - imgToPredict = input.canvas; // Handle p5.js image and video. - } - - if (imgToPredict) { - formattedInputs = imgToPixelArray(imgToPredict); - } else { - formattedInputs = input; - } - - console.log("formatted input item", formattedInputs) - return formattedInputs; - } - - convertTrainingDataToTensors() { - return this.neuralNetworkData.convertRawToTensors(this.data.training); - } - - formatInputsForPrediction(_input) { - const { meta } = this.neuralNetworkData; - const inputHeaders = Object.keys(meta.inputs); - - let inputData = []; - - // TODO: check to see if it is a nested array - // to run predict or classify on a batch of data - - if (_input instanceof Array) { - inputData = inputHeaders.map((prop, idx) => { - return this.isOneHotEncodedOrNormalized(_input[idx], prop, meta.inputs); - }); - } else if (_input instanceof Object) { - // TODO: make sure that the input order is preserved! - inputData = inputHeaders.map((prop) => { - return this.isOneHotEncodedOrNormalized( - _input[prop], - prop, - meta.inputs - ); - }); - } - - // inputData = tf.tensor([inputData.flat()]) - inputData = inputData.flat(); - - return inputData; - } - - formatInputsForPredictionAll(_input) { - const { meta } = this.neuralNetworkData; - const inputHeaders = Object.keys(meta.inputs); - - let output; - - if (_input instanceof Array) { - if (_input.every((item) => Array.isArray(item))) { - output = _input.map((item) => { - return this.formatInputsForPrediction(item); - }); - - return tf.tensor(output, [_input.length, inputHeaders.length]); - } - output = this.formatInputsForPrediction(_input); - return tf.tensor([output]); - } - - output = this.formatInputsForPrediction(_input); - return tf.tensor([output]); - } - - isOneHotEncodedOrNormalized(_input, _key, _meta) { - const input = _input; - const key = _key; - - let output; - if (typeof _input !== "number") { - output = _meta[key].legend[input]; - } else { - output = _input; - if (this.neuralNetworkData.meta.isNormalized) { - // output = this.normalizeInput(_input, key, _meta); - } - } - return output; - } - - async train(optionsOrCallback, optionsOrWhileTraining, callback) { - let options; - let whileTrainingCb; - let finishedTrainingCb; - if ( - typeof optionsOrCallback === "object" && - typeof optionsOrWhileTraining === "function" && - typeof callback === "function" - ) { - options = optionsOrCallback; - whileTrainingCb = optionsOrWhileTraining; - finishedTrainingCb = callback; - } else if ( - typeof optionsOrCallback === "object" && - typeof optionsOrWhileTraining === "function" - ) { - options = optionsOrCallback; - whileTrainingCb = null; - finishedTrainingCb = optionsOrWhileTraining; - } else if ( - typeof optionsOrCallback === "function" && - typeof optionsOrWhileTraining === "function" - ) { - options = {}; - whileTrainingCb = optionsOrCallback; - finishedTrainingCb = optionsOrWhileTraining; - } else { - options = {}; - whileTrainingCb = null; - finishedTrainingCb = optionsOrCallback; - } - - return callCallback(this.trainInternal(options, whileTrainingCb), finishedTrainingCb); - } - - async trainInternal(_options, whileTrainingCb) { - const options = { - epochs: 10, - batchSize: 32, - validationSplit: 0.1, - whileTraining: null, - ..._options, - }; - - // if debug mode is true, then use tf vis - if (this.options.debug === true || this.options.debug === "true") { - options.whileTraining = [ - this.neuralNetworkVis.trainingVis(), - { - onEpochEnd: whileTrainingCb, - }, - ]; - } else { - // if not use the default training - // options.whileTraining = whileTrainingCb === null ? [{ - // onEpochEnd: (epoch, loss) => { - // console.log(epoch, loss.loss) - // } - // }] : - // [{ - // onEpochEnd: whileTrainingCb - // }]; - options.whileTraining = [ - { - onEpochEnd: whileTrainingCb, - }, - ]; - } - - // if metadata needs to be generated about the data - if (!this.neuralNetworkData.isMetadataReady) { - // if the inputs are defined as an array of [img_width, img_height, channels] - this.createMetaData(); - } - - // if the data still need to be summarized, onehotencoded, etc - if (!this.neuralNetworkData.isWarmedUp) { - this.prepareForTraining(); - } - - // if inputs and outputs are not specified - // in the options, then create the tensors - // from the this.neuralNetworkData.data.raws - if (!options.inputs && !options.outputs) { - const { inputs, outputs } = this.convertTrainingDataToTensors(); - options.inputs = inputs; - options.outputs = outputs; - } - - // check to see if layers are passed into the constructor - // then use those to create your architecture - if (!this.neuralNetwork.isLayered) { - // TODO: don't update this.options.layers - Linda - this.options.layers = this.createNetworkLayers( - this.options.layers - ); - } - - // if the model does not have any layers defined yet - // then use the default structure - if (!this.neuralNetwork.isLayered) { - // TODO: don't update this.options.layers - Linda - this.options.layers = this.addDefaultLayers(); - } - - if (!this.neuralNetwork.isCompiled) { - // compile the model with defaults - this.compile(); - } - - // train once the model is compiled - await this.neuralNetwork.train(options); - } - - addLayer(layer) { - this.neuralNetwork.addLayer(layer); - } - - createNetworkLayers(layerJsonArray) { - const layers = [...layerJsonArray]; - - const { inputUnits, outputUnits } = this.neuralNetworkData.meta; - const layersLength = layers.length; - - if (!(layers.length >= 2)) { - return false; - } - - // set the inputShape - layers[0].inputShape = layers[0].inputShape - ? layers[0].inputShape - : inputUnits; - // set the output units - const lastIndex = layersLength - 1; - const lastLayer = layers[lastIndex]; - lastLayer.units = lastLayer.units ? lastLayer.units : outputUnits; - - layers.forEach((layer) => { - this.addLayer(tf.layers[layer.type](layer)); - }); - - return layers; - } - - addDefaultLayers() { - let layers; - const task = this.options.task; - switch (task.toLowerCase()) { - // if the task is classification - case "classification": - layers = [ - { - type: "dense", - units: this.options.hiddenUnits, - activation: "relu", - }, - { - type: "dense", - activation: "softmax", - }, - ]; - - return this.createNetworkLayers(layers); - // if the task is regression - case "regression": - layers = [ - { - type: "dense", - units: this.options.hiddenUnits, - activation: "relu", - }, - { - type: "dense", - activation: "sigmoid", - }, - ]; - return this.createNetworkLayers(layers); - // if the task is imageClassification - case "imageclassification": - layers = [ - { - type: "conv2d", - filters: 8, - kernelSize: 5, - strides: 1, - activation: "relu", - kernelInitializer: "varianceScaling", - }, - { - type: "maxPooling2d", - poolSize: [2, 2], - strides: [2, 2], - }, - { - type: "conv2d", - filters: 16, - kernelSize: 5, - strides: 1, - activation: "relu", - kernelInitializer: "varianceScaling", - }, - { - type: "maxPooling2d", - poolSize: [2, 2], - strides: [2, 2], - }, - { - type: "flatten", - }, - { - type: "dense", - kernelInitializer: "varianceScaling", - activation: "softmax", - }, - ]; - return this.createNetworkLayers(layers); - - default: - console.log("no imputUnits or outputUnits defined"); - layers = [ - { - type: "dense", - units: this.options.hiddenUnits, - activation: "relu", - }, - { - type: "dense", - activation: "sigmoid", - }, - ]; - return this.createNetworkLayers(layers); - } - } - compile() { - const LEARNING_RATE = this.options.learningRate; - - let options = {}; - - if ( - this.options.task === "classification" || - this.options.task === "imageClassification" - ) { - options = { - loss: "categoricalCrossentropy", - optimizer: tf.train.sgd, - metrics: ["accuracy"], - }; - } else if (this.options.task === "regression") { - options = { - loss: "meanSquaredError", - optimizer: tf.train.adam, - metrics: ["accuracy"], - }; - } - - options.optimizer = options.optimizer - ? this.neuralNetwork.setOptimizerFunction( - LEARNING_RATE, - options.optimizer - ) - : this.neuralNetwork.setOptimizerFunction(LEARNING_RATE, tf.train.sgd); - - this.neuralNetwork.compile(options); - - // if debug mode is true, then show the model summary - if (this.options.debug) { - this.neuralNetworkVis.modelSummary( - { - name: "Model Summary", - }, - this.neuralNetwork.model - ); - } - } - - - // prediction classification - - predictSync(_input) { - return this.predictSyncInternal(_input); - } - - predict(_input, _cb) { - return callCallback(this.predictInternal(_input), _cb); - } - - predictMultiple(_input, _cb) { - return callCallback(this.predictInternal(_input), _cb); - } - - classifySync(_input) { - return this.classifySyncInternal(_input); - } - - classify(_input, _cb) { - return callCallback(this.classifyInternal(_input), _cb); - } - - classifyMultiple(_input, _cb) { - return callCallback(this.classifyInternal(_input), _cb); - } - - predictSyncInternal(_input) { - const { meta } = this.neuralNetworkData; - - const inputData = this.formatInputsForPredictionAll(_input); - - const unformattedResults = this.neuralNetwork.predictSync(inputData); - inputData.dispose(); - - if (meta !== null) { - const labels = Object.keys(meta.outputs); - - const formattedResults = unformattedResults.map((unformattedResult) => { - return labels.map((item, idx) => { - // check to see if the data were normalized - // if not, then send back the values, otherwise - // unnormalize then return - let val; - let unNormalized; - if (meta.isNormalized) { - const { min, max } = meta.outputs[item]; - val = nnUtils.unnormalizeValue(unformattedResult[idx], min, max); - unNormalized = unformattedResult[idx]; - } else { - val = unformattedResult[idx]; - } - - const d = { - [labels[idx]]: val, - label: item, - value: val, - }; - - // if unNormalized is not undefined, then - // add that to the output - if (unNormalized) { - d.unNormalizedValue = unNormalized; - } - - return d; - }); - }); - - // return single array if the length is less than 2, - // otherwise return array of arrays - if (formattedResults.length < 2) { - return formattedResults[0]; - } - return formattedResults; - } - - // if no meta exists, then return unformatted results; - return unformattedResults; - } - - async predictInternal(_input) { - const { meta } = this.neuralNetworkData; - - const inputData = this.formatInputsForPredictionAll(_input); - - const unformattedResults = await this.neuralNetwork.predict(inputData); - inputData.dispose(); - - if (meta !== null) { - const labels = Object.keys(meta.outputs); - - const formattedResults = unformattedResults.map((unformattedResult) => { - return labels.map((item, idx) => { - // check to see if the data were normalized - // if not, then send back the values, otherwise - // unnormalize then return - let val; - let unNormalized; - if (meta.isNormalized) { - const { min, max } = meta.outputs[item]; - val = nnUtils.unnormalizeValue(unformattedResult[idx], min, max); - unNormalized = unformattedResult[idx]; - } else { - val = unformattedResult[idx]; - } - - const d = { - [labels[idx]]: val, - label: item, - value: val, - }; - - // if unNormalized is not undefined, then - // add that to the output - if (unNormalized) { - d.unNormalizedValue = unNormalized; - } - - return d; - }); - }); - - // return single array if the length is less than 2, - // otherwise return array of arrays - if (formattedResults.length < 2) { - return formattedResults[0]; - } - return formattedResults; - } - - // if no meta exists, then return unformatted results; - return unformattedResults; - } - - classifySyncInternal(_input) { - const { meta } = this.neuralNetworkData; - const headers = Object.keys(meta.inputs); - - let inputData; - - if (this.options.task === "imageClassification") { - // get the inputData for classification - // if it is a image type format it and - // flatten it - inputData = this.searchAndFormat(_input); - if (Array.isArray(inputData)) { - inputData = inputData.flat(); - } else { - inputData = inputData[headers[0]]; - } - - if (meta.isNormalized) { - // TODO: check to make sure this property is not static!!!! - const { min, max } = meta.inputs[headers[0]]; - inputData = this.neuralNetworkData.normalizeArray( - Array.from(inputData), - { min, max } - ); - } else { - inputData = Array.from(inputData); - } - - inputData = tf.tensor([inputData], [1, ...meta.inputUnits]); - } else { - inputData = this.formatInputsForPredictionAll(_input); - } - - const unformattedResults = this.neuralNetwork.classifySync(inputData); - inputData.dispose(); - - if (meta !== null) { - const label = Object.keys(meta.outputs)[0]; - const vals = Object.entries(meta.outputs[label].legend); - - const formattedResults = unformattedResults.map((unformattedResult) => { - return vals - .map((item, idx) => { - return { - [item[0]]: unformattedResult[idx], - label: item[0], - confidence: unformattedResult[idx], - }; - }) - .sort((a, b) => b.confidence - a.confidence); - }); - - // return single array if the length is less than 2, - // otherwise return array of arrays - if (formattedResults.length < 2) { - return formattedResults[0]; - } - return formattedResults; - } - - return unformattedResults; - } - - async classifyInternal(_input) { - const { meta } = this.neuralNetworkData; - const headers = Object.keys(meta.inputs); - - let inputData; - - if (this.options.task === "imageClassification") { - // get the inputData for classification - // if it is a image type format it and - // flatten it - inputData = this.searchAndFormat(_input); - if (Array.isArray(inputData)) { - inputData = inputData.flat(); - } else { - inputData = inputData[headers[0]]; - } - - if (meta.isNormalized) { - // TODO: check to make sure this property is not static!!!! - const { min, max } = meta.inputs[headers[0]]; - inputData = this.neuralNetworkData.normalizeArray( - Array.from(inputData), - { min, max } - ); - } else { - inputData = Array.from(inputData); - } - - inputData = tf.tensor([inputData], [1, ...meta.inputUnits]); - } else { - inputData = this.formatInputsForPredictionAll(_input); - } - - const unformattedResults = await this.neuralNetwork.classify(inputData); - inputData.dispose(); - - if (meta !== null) { - const label = Object.keys(meta.outputs)[0]; - const vals = Object.entries(meta.outputs[label].legend); - - const formattedResults = unformattedResults.map((unformattedResult) => { - return vals - .map((item, idx) => { - return { - [item[0]]: unformattedResult[idx], - label: item[0], - confidence: unformattedResult[idx], - }; - }) - .sort((a, b) => b.confidence - a.confidence); - }); - - // return single array if the length is less than 2, - // otherwise return array of arrays - if (formattedResults.length < 2) { - return formattedResults[0]; - } - return formattedResults; - } - - return unformattedResults; - } - - /** - * //////////////////////////////////////////////////////////// - * Save / Load Data - * //////////////////////////////////////////////////////////// - */ - - /** - * @public - * saves the training data to a JSON file. - * @param {string} [name] Optional - The name for the saved file. - * Should not include the file extension. - * Defaults to the current date and time. - * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. - * @return {Promise} - */ - saveData(name, callback) { - const args = handleArguments(name, callback); - return callCallback(this.neuralNetworkData.saveData(args.name), args.callback); - } - - /** - * @public - * load data - * @param {string | FileList | Object} filesOrPath - The URL of the file to load, - * or a FileList object (.files) from an HTML element . - * @param {ML5Callback} [callback] Optional - A function to call when the loading is complete. - * @return {Promise} - */ - async loadData(filesOrPath, callback) { - return callCallback(this.neuralNetworkData.loadData(filesOrPath), callback); - } - - /** - * //////////////////////////////////////////////////////////// - * Save / Load Model - * //////////////////////////////////////////////////////////// - */ - - /** - * @public - * saves the model, weights, and metadata - * @param {string} [name] Optional - The name for the saved file. - * Should not include the file extension. - * Defaults to 'model'. - * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. - * @return {Promise} - */ - async save(name, callback) { - const args = handleArguments(name, callback); - const modelName = args.string || 'model'; - - // save the model - return callCallback(Promise.all([ - this.neuralNetwork.save(modelName), - this.neuralNetworkData.saveMeta(modelName) - ]), args.callback); - } - - /** - * @public - also called internally by init() when there is a modelUrl in the options - * load a model and metadata - * @param {string | FileList | Object} filesOrPath - The URL of the file to load, - * or a FileList object (.files) from an HTML element . - * @param {ML5Callback} [callback] Optional - A function to call when the loading is complete. - * @return {Promise} - */ - async load(filesOrPath, callback) { - return callCallback(Promise.all([ - this.neuralNetwork.load(filesOrPath), - this.neuralNetworkData.loadMeta(filesOrPath) - ]), callback); - } - - /** - * dispose and release memory for a model - */ - dispose() { - this.neuralNetwork.dispose(); - } - - /** - * //////////////////////////////////////////////////////////// - * New methods for Neuro Evolution - * //////////////////////////////////////////////////////////// - */ - - /** - * mutate the weights of a model - * @param {*} rate - * @param {*} mutateFunction - */ - - mutate(rate, mutateFunction) { - this.neuralNetwork.mutate(rate, mutateFunction); - } - - /** - * create a new neural network with crossover - * @param {*} other - */ - - crossover(other) { - const nnCopy = this.copy(); - nnCopy.neuralNetwork.crossover(other.neuralNetwork); - return nnCopy; - } -} - -const neuralNetwork = (inputsOrOptions, outputsOrCallback, callback) => { - let options; - let cb; - - if (inputsOrOptions instanceof Object) { - options = inputsOrOptions; - cb = outputsOrCallback; - } else { - options = { - inputs: inputsOrOptions, - outputs: outputsOrCallback, - }; - cb = callback; - } - - const instance = new timeSeries(options, cb); - return instance; -}; - -export default neuralNetwork; diff --git a/src/LSTM/timeSeries.js b/src/LSTM/timeSeries.js index 9fe25e2b..d0823857 100644 --- a/src/LSTM/timeSeries.js +++ b/src/LSTM/timeSeries.js @@ -2,6 +2,17 @@ import * as tf from "@tensorflow/tfjs"; import { saveBlob } from "../utils/io"; import { randomGaussian } from "../utils/random"; + + +/* + +Things changed from neural network class: + +1. No neuro evolution + + +*/ + class NeuralNetwork { constructor() { // flags @@ -236,69 +247,5 @@ class NeuralNetwork { dispose() { this.model.dispose(); } - - // NeuroEvolution Functions - - /** - * mutate the weights of a model - * @param {*} rate - * @param {*} mutateFunction - */ - - mutate(rate = 0.1, mutateFunction) { - tf.tidy(() => { - const weights = this.model.getWeights(); - const mutatedWeights = []; - for (let i = 0; i < weights.length; i += 1) { - const tensor = weights[i]; - const { shape } = weights[i]; - // TODO: Evaluate if this should be sync or not - const values = tensor.dataSync().slice(); - for (let j = 0; j < values.length; j += 1) { - if (Math.random() < rate) { - if (mutateFunction) { - values[j] = mutateFunction(values[j]); - } else { - values[j] = Math.min( - Math.max(values[j] + randomGaussian(), -1), - 1 - ); - } - } - } - const newTensor = tf.tensor(values, shape); - mutatedWeights[i] = newTensor; - } - this.model.setWeights(mutatedWeights); - }); - } - - /** - * create a new neural network with crossover - * @param {*} other - */ - crossover(other) { - return tf.tidy(() => { - const weightsA = this.model.getWeights(); - const weightsB = other.model.getWeights(); - const childWeights = []; - for (let i = 0; i < weightsA.length; i += 1) { - const tensorA = weightsA[i]; - const tensorB = weightsB[i]; - const { shape } = weightsA[i]; - // TODO: Evaluate if this should be sync or not - const valuesA = tensorA.dataSync().slice(); - const valuesB = tensorB.dataSync().slice(); - for (let j = 0; j < valuesA.length; j += 1) { - if (Math.random() < 0.5) { - valuesA[j] = valuesB[j]; - } - } - const newTensor = tf.tensor(valuesA, shape); - childWeights[i] = newTensor; - } - this.model.setWeights(childWeights); - }); - } } export default NeuralNetwork; diff --git a/src/LSTM/timeSeriesUtils.js b/src/LSTM/timeSeriesUtils.js index 3093b621..41bca9d9 100644 --- a/src/LSTM/timeSeriesUtils.js +++ b/src/LSTM/timeSeriesUtils.js @@ -1,3 +1,4 @@ +import { data, input } from "@tensorflow/tfjs"; import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; class TimeSeriesUtils { @@ -5,7 +6,159 @@ class TimeSeriesUtils { this.options = options || {}; } - prepareLabels(xInputs, yInputs, options = null){ + /* adding data: can only accept the following formats: + - for xInputs: + 1. Sequence of objects (array of objects) + [{x: , y: },{x: , y: },{x: , y: },{x: , y: }] + 2. Sequence of arrays (array of array, order matters) + [[],[],[],[]] + 3. Sequence of values (shape should be provided by user) + [[,,,,,]] e.g. shape = {steps: 4, values: 2} will become [{x: , y: },{x: , y: },{x: , y: },{x: , y: }] + */ + + verifyAndFormatInputs(xInputs, options = null,classOptions){ + const dataFormat = this.checkInputStructure(xInputs, options); + console.log(dataFormat); + return this.formatInputsToObjects(xInputs,options,classOptions,dataFormat); + } + + checkInputStructure(xInputs, options=null){ + if(!Array.isArray(xInputs)){ + throw new error('Syntax Error: Data Should be in an Array') + } + + let isObjects = true; + let isArrays = true; + let isValues = true; + + for (let i = 0; i < xInputs.length ; i++){ + if (nnUtils.getDataType(xInputs[i]) === 'object'){ + isArrays = false; + isValues = false; + if ( i > 0 ) { + if (Object.keys(xInputs[i-1]).length !== Object.keys(xInputs[i]).length || nnUtils.getDataType(xInputs[i-1]) === 'object'){ + throw new error('Data format is inconsistent') + } + } + } else if (Array.isArray(xInputs[i])){ + isObjects = false; + isValues = false; + if ( i > 0 ) { + if (xInputs[i-1].length !== xInputs[i].length || !Array.isArray(xInputs[i-1])){ + throw new error('Data format is inconsistent') + } + } + } else { + if (options.inputLabels){ + + isObjects = false; + isArrays = false; + + } else { + throw new error('inputLabels is needed for 1D array inputs') + } + } + + if (isObjects) { + return "ObjectSequence"; + } else if (isArrays) { + return "ArraySequence"; + } else if (isValues) { + return "ValueSequence"; + } else { + throw new error('Syntax Error: Input Structure is unknown') + } + } + } + + formatInputsToObjects(xInputs, options=null,classOptions, dataFormat){ + switch(dataFormat){ + case 'ObjectSequence': + return xInputs; + case 'ArraySequence': + return this.convertArraySequence(xInputs, options, classOptions); + case 'ValueSequence': + return this.convertValueSequence(xInputs,options); + default: + throw new error('Input Data Structure is unknown'); + } + } + + convertArraySequence(xInputs, options=null, classOptions){ + let label = '' + + if (options !== null){ + if (options.inputLabels){ + label = options.inputLabels + } + } else if (classOptions !== null){ + if (classOptions.inputs){ + label = classOptions.inputs; + } + } + + if (label === '') { + const label = this.getLabelFromNestedArray(xInputs); + } + + return xInputs.map((input)=>{ + const obj = {}; + input.forEach((value,ind) => { + obj[label[ind]] = value; + }); + return obj; + }) + } + + convertValueSequence(xInputs, options=null){ + const {inputLabels} = options; + if (xInputs.length % inputLabels.length !== 0){ + throw new error ("Invalid Input: Number of Labels don't match amount of values") + } + return xInputs.reduce((acc, _, index, array) => { + if (index % inputLabels.length === 0) { + // Create a new object for the current set of values + const obj = {}; + for (let i = 0; i < inputLabels.length; i++) { + obj[inputLabels[i]] = array[index + i]; + } + acc.push(obj); + } + return acc; + }, []); + } + + verifyAndFormatOutputs(yInputs, options=null,classOptions){ + const {outputs} = classOptions; + + let outputLabels; + + + if (options !== null) { + if (options.outputLabels){ + outputLabels = options.outputLabels; + } + } + + if (outputs.length > 0) { + if (outputs.every((item) => typeof item === "string")) { + outputLabels = outputs; + } + } else if ( typeof yInputs === "object") { + outputLabels = Object.keys(yInputs); + } else { + outputLabels = nnUtils.createLabelsFromArrayValues(yInputs, "output"); + } + + // Make sure that the inputLabels and outputLabels are arrays + if (!(outputLabels instanceof Array)) { + throw new Error("outputLabels must be an array"); + } + + return nnUtils.formatDataAsObject(yInputs, outputLabels); + } + + prepareLabels(xInputs, yInputs, options = null,classOptions){ const {inputs, outputs} = this.options; let inputLabels; @@ -45,6 +198,28 @@ class TimeSeriesUtils { } + getLabelFromNestedArray(xInputs,prefix = 'label'){ + // Recursive function to find the deepest level of the array + function traverseArray(array) { + if (array.length > 0 && (typeof array[0] === 'string' || typeof array[0] === 'number')) { + return array.map((_, index) => `${prefix}_${index}`); + } else { + for (const item of array) { + if (Array.isArray(item)) { + const result = traverseArray(item); + if (result) return result; + } + } + } + return null; + } + + if (Array.isArray(data)) { + return traverseArray(data); + } else { + throw new Error('Input data must be an array.'); + } + } labelsFromNestedArray(data){ function processData(data, prefix = 'label') { // Recursive function to find the deepest level of the data and return the result From 2d7ad7279b05cdf764176d252570d65971c14685 Mon Sep 17 00:00:00 2001 From: mop9047 Date: Mon, 22 Jul 2024 08:42:58 +0800 Subject: [PATCH 05/13] modified normalizeData() --- .../timeSeries-mousexy-keypoints/sketch.js | 6 +- src/LSTM/index.js | 320 +++++++++++++++++- src/LSTM/timeSeriesData.js | 313 +++++++++-------- src/LSTM/timeSeriesUtils.js | 17 + 4 files changed, 513 insertions(+), 143 deletions(-) diff --git a/examples/timeSeries-mousexy-keypoints/sketch.js b/examples/timeSeries-mousexy-keypoints/sketch.js index 85ab6a0a..84967c51 100644 --- a/examples/timeSeries-mousexy-keypoints/sketch.js +++ b/examples/timeSeries-mousexy-keypoints/sketch.js @@ -144,9 +144,9 @@ function UI(){ } function trainModel(){ - model.createArchitecture(); - model.compileModel(); - model.summarizeModel(); + // model.createArchitecture(); + // model.compileModel(); + // model.summarizeModel(); background(220); state = 'training'; text("Training...", 50,50); diff --git a/src/LSTM/index.js b/src/LSTM/index.js index 2f5a4a17..c69ac37e 100644 --- a/src/LSTM/index.js +++ b/src/LSTM/index.js @@ -62,9 +62,11 @@ class timeSeries { } - ///////////////////////////////////////////////////////////////////////////// - // ADD DATA // - ///////////////////////////////////////////////////////////////////////////// + /** + * //////////////////////////////////////////////////////////// + * Add and Format Data + * //////////////////////////////////////////////////////////// + */ /* adding data: can only accept the following formats: - for xInputs: @@ -86,12 +88,324 @@ class timeSeries { // 2. format the yInput - same logic as NN class const ys = tsUtils.verifyAndFormatOutputs(yInputs,options,this.options); + // 3. add data to raw this.neuralNetworkData.addData(xs,ys); } + /** + * //////////////////////////////////////////////////////////// + * Train Data + * //////////////////////////////////////////////////////////// + */ + async train(optionsOrCallback, optionsOrWhileTraining, callback) { + let options = {}; + let whileTrainingCb = null; + let finishedTrainingCb; + if (typeof optionsOrCallback === "object") { + options = optionsOrCallback; + if (typeof optionsOrWhileTraining === "function") { + whileTrainingCb = null; + finishedTrainingCb = callback || optionsOrWhileTraining; + } else { + finishedTrainingCb = optionsOrWhileTraining; + } + } else if (typeof optionsOrCallback === "function") { + whileTrainingCb = optionsOrCallback; + finishedTrainingCb = optionsOrWhileTraining; + } else { + finishedTrainingCb = optionsOrCallback; + } + + return callCallback(this.trainInternal(options, whileTrainingCb), finishedTrainingCb); + } + + async trainInternal(_options, whileTrainingCb) { + const options = { + epochs: 10, + batchSize: 32, + validationSplit: 0.1, + whileTraining: null, + ..._options, + }; + + // if debug mode is true, then use tf vis + if (this.options.debug === true || this.options.debug === "true") { + options.whileTraining = [ + this.neuralNetworkVis.trainingVis(), + { + onEpochEnd: whileTrainingCb, + }, + ]; + } else { + // if not use the default training + // options.whileTraining = whileTrainingCb === null ? [{ + // onEpochEnd: (epoch, loss) => { + // console.log(epoch, loss.loss) + // } + // }] : + // [{ + // onEpochEnd: whileTrainingCb + // }]; + options.whileTraining = [ + { + onEpochEnd: whileTrainingCb, + }, + ]; + } + + // if metadata needs to be generated about the data + if (!this.neuralNetworkData.isMetadataReady) { + // if the inputs are defined as an array of [img_width, img_height, channels] + this.createMetaData(); + } + + // if the data still need to be summarized, onehotencoded, etc + if (!this.neuralNetworkData.isWarmedUp) { + this.prepareForTraining(); + } + + // if inputs and outputs are not specified + // in the options, then create the tensors + // from the this.neuralNetworkData.data.raws + if (!options.inputs && !options.outputs) { + const { inputs, outputs } = this.convertTrainingDataToTensors(); + options.inputs = inputs; + options.outputs = outputs; + } + + // check to see if layers are passed into the constructor + // then use those to create your architecture + if (!this.neuralNetwork.isLayered) { + // TODO: don't update this.options.layers - Linda + this.options.layers = this.createNetworkLayers( + this.options.layers + ); + } + + // if the model does not have any layers defined yet + // then use the default structure + if (!this.neuralNetwork.isLayered) { + // TODO: don't update this.options.layers - Linda + this.options.layers = this.addDefaultLayers(); + } + + if (!this.neuralNetwork.isCompiled) { + // compile the model with defaults + this.compile(); + } + + // train once the model is compiled + await this.neuralNetwork.train(options); + } + + createMetaData() { + const { inputs } = this.options; + + let inputShape; + if (Array.isArray(inputs) && inputs.length > 0) { + inputShape = + inputs.every((item) => typeof item === "number") && inputs.length > 0 + ? inputs + : null; + } + + this.neuralNetworkData.createMetadata(inputShape); + } + + prepareForTraining() { + this.data.training = this.neuralNetworkData.applyOneHotEncodingsToDataRaw(); + this.neuralNetworkData.isWarmedUp = true; + } + + convertTrainingDataToTensors() { + return this.neuralNetworkData.convertRawToTensors(this.data.training); + } + + createNetworkLayers(layerJsonArray) { + const layers = [...layerJsonArray]; + + const { inputUnits, outputUnits } = this.neuralNetworkData.meta; + const layersLength = layers.length; + + if (!(layers.length >= 2)) { + return false; + } + + // set the inputShape + layers[0].inputShape = layers[0].inputShape + ? layers[0].inputShape + : inputUnits; + // set the output units + const lastIndex = layersLength - 1; + const lastLayer = layers[lastIndex]; + lastLayer.units = lastLayer.units ? lastLayer.units : outputUnits; + + layers.forEach((layer) => { + this.addLayer(tf.layers[layer.type](layer)); + }); + + return layers; + } + + addDefaultLayers() { + let layers; + const task = this.options.task; + switch (task.toLowerCase()) { + // if the task is classification + case "classification": + layers = [ + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "softmax", + }, + ]; + + return this.createNetworkLayers(layers); + // if the task is regression + case "regression": + layers = [ + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", + }, + ]; + return this.createNetworkLayers(layers); + // if the task is imageClassification + case "imageclassification": + layers = [ + { + type: "conv2d", + filters: 8, + kernelSize: 5, + strides: 1, + activation: "relu", + kernelInitializer: "varianceScaling", + }, + { + type: "maxPooling2d", + poolSize: [2, 2], + strides: [2, 2], + }, + { + type: "conv2d", + filters: 16, + kernelSize: 5, + strides: 1, + activation: "relu", + kernelInitializer: "varianceScaling", + }, + { + type: "maxPooling2d", + poolSize: [2, 2], + strides: [2, 2], + }, + { + type: "flatten", + }, + { + type: "dense", + kernelInitializer: "varianceScaling", + activation: "softmax", + }, + ]; + return this.createNetworkLayers(layers); + + default: + console.log("no imputUnits or outputUnits defined"); + layers = [ + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", + }, + ]; + return this.createNetworkLayers(layers); + } + } + + addLayer(layer) { + this.neuralNetwork.addLayer(layer); + } + + compile(){ + const LEARNING_RATE = this.options.learningRate; + + let options = {}; + + if ( + this.options.task === "classification" || + this.options.task === "imageClassification" + ) { + options = { + loss: "categoricalCrossentropy", + optimizer: tf.train.sgd, + metrics: ["accuracy"], + }; + } else if (this.options.task === "regression") { + options = { + loss: "meanSquaredError", + optimizer: tf.train.adam, + metrics: ["accuracy"], + }; + } + + options.optimizer = options.optimizer + ? this.neuralNetwork.setOptimizerFunction( + LEARNING_RATE, + options.optimizer + ) + : this.neuralNetwork.setOptimizerFunction(LEARNING_RATE, tf.train.sgd); + + this.neuralNetwork.compile(options); + + // if debug mode is true, then show the model summary + if (this.options.debug) { + this.neuralNetworkVis.modelSummary( + { + name: "Model Summary", + }, + this.neuralNetwork.model + ); + } + } + + normalizeData() { + if (!this.neuralNetworkData.isMetadataReady) { + // if the inputs are defined as an array of [img_width, img_height, channels] + this.createMetaData(); + } + + if (!this.neuralNetworkData.isWarmedUp) { + this.prepareForTraining(); + } + + const trainingData = this.neuralNetworkData.normalizeDataRaw(); + + console.log('normalized', trainingData); + + // set this equal to the training data + this.data.training = trainingData; + + // set isNormalized to true + this.neuralNetworkData.meta.isNormalized = true; + } } diff --git a/src/LSTM/timeSeriesData.js b/src/LSTM/timeSeriesData.js index 287552bd..da0aac4c 100644 --- a/src/LSTM/timeSeriesData.js +++ b/src/LSTM/timeSeriesData.js @@ -23,6 +23,29 @@ class NeuralNetworkData { }; } + /** + * //////////////////////////////////////////////////////// + * Add Data + * //////////////////////////////////////////////////////// + */ + + /** + * Add Data + * @param {object} xInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array + * @param {*} yInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array + * @void - updates this.data + */ + addData(xInputObj, yInputObj) { + this.data.raw.push({ + xs: xInputObj, + ys: yInputObj, + }); + + console.log(this.data.raw); + } + + + /** * //////////////////////////////////////////////////////// * Summarize Data @@ -52,11 +75,43 @@ class NeuralNetworkData { this.isMetadataReady = true; } - /* - * //////////////////////////////////////////////// - * data Summary - * //////////////////////////////////////////////// + /** + * getDTypesFromData + * gets the data types of the data we're using + * important for handling oneHot + * @private + * @void - updates this.meta */ + getDTypesFromSeriesData() { + const meta = { + ...this.meta, + inputs: {}, + outputs: {}, + }; + + const sample = this.data.raw[0]; + + + + const xs = Object.keys(sample.xs[0]); //since time series data is in form of array + const ys = Object.keys(sample.ys); + + xs.forEach((prop) => { + meta.inputs[prop] = { + dtype: nnUtils.getDataType(sample.xs[0][prop]), + }; + }); + + ys.forEach((prop) => { + meta.outputs[prop] = { + dtype: nnUtils.getDataType(sample.ys[prop]), + }; + }); + + // TODO: check if all entries have the same dtype. + // otherwise throw an error + this.meta = meta; + } /** * get stats about the data @@ -83,7 +138,8 @@ class NeuralNetworkData { inputMeta[k].min = 0; inputMeta[k].max = 1; } else if (inputMeta[k].dtype === "number") { - const dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]); + console.log('raw',this.data.raw) + const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); inputMeta[k].min = nnUtils.getMin(dataAsArray); inputMeta[k].max = nnUtils.getMax(dataAsArray); } else if (inputMeta[k].dtype === "array") { @@ -96,6 +152,83 @@ class NeuralNetworkData { return inputMeta; } + /** + * getDataOneHot + * creates onehot encodings for the input and outputs + * and adds them to the meta info + * @private + * @void + */ + getDataOneHot() { + this.meta.inputs = this.getInputMetaOneHot(this.meta.inputs, "xs"); + this.meta.outputs = this.getInputMetaOneHot(this.meta.outputs, "ys"); + } + + /** + * getOneHotMeta + * @param {Object} _inputsMeta + * @param {"xs" | "ys"} xsOrYs + * @return {Object} + */ + getInputMetaOneHot(_inputsMeta, xsOrYs) { + const inputsMeta = Object.assign({}, _inputsMeta); + + Object.entries(inputsMeta).forEach((arr) => { + // the key + const key = arr[0]; + // the value + const { dtype } = arr[1]; + + if (dtype === "string") { + const uniqueVals = [ + ...new Set(this.data.raw.map((obj) => obj[xsOrYs][key])), + ]; + const oneHotMeta = this.createOneHotEncodings(uniqueVals); + inputsMeta[key] = { + ...inputsMeta[key], + ...oneHotMeta, + }; + } + }); + return inputsMeta; + } + + /** + * Returns a legend mapping the + * data values to oneHot encoded values + * @private + * @param {Array} _uniqueValuesArray + * @return {Object} + */ + // eslint-disable-next-line class-methods-use-this, no-unused-vars + createOneHotEncodings(_uniqueValuesArray) { + return tf.tidy(() => { + const output = { + uniqueValues: _uniqueValuesArray, + legend: {}, + }; + + const uniqueVals = _uniqueValuesArray; // [...new Set(this.data.raw.map(obj => obj.xs[prop]))] + // get back values from 0 to the length of the uniqueVals array + const onehotValues = uniqueVals.map((item, idx) => idx); + // oneHot encode the values in the 1d tensor + const oneHotEncodedValues = tf.oneHot( + tf.tensor1d(onehotValues, "int32"), + uniqueVals.length + ); + // convert them from tensors back out to an array + const oneHotEncodedValuesArray = oneHotEncodedValues.arraySync(); + + // populate the legend with the key/values + uniqueVals.forEach((uVal, uIdx) => { + output.legend[uVal] = oneHotEncodedValuesArray[uIdx]; + }); + + return output; + }); + } + + /** * get the data units, inputshape and output units * @private @@ -141,62 +274,7 @@ class NeuralNetworkData { return units; } - /** - * getDTypesFromData - * gets the data types of the data we're using - * important for handling oneHot - * @private - * @void - updates this.meta - */ - getDTypesFromSeriesData() { - const meta = { - ...this.meta, - inputs: {}, - outputs: {}, - }; - - const sample = this.data.raw[0]; - const xs = Object.keys(sample.xs); - const ys = Object.keys(sample.ys); - - xs.forEach((prop) => { - meta.inputs[prop] = { - dtype: nnUtils.getDataType(sample.xs[prop]), - }; - }); - - ys.forEach((prop) => { - meta.outputs[prop] = { - dtype: nnUtils.getDataType(sample.ys[prop]), - }; - }); - - // TODO: check if all entries have the same dtype. - // otherwise throw an error - - this.meta = meta; - } - - /** - * //////////////////////////////////////////////////////// - * Add Data - * //////////////////////////////////////////////////////// - */ - - /** - * Add Data - * @param {object} xInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array - * @param {*} yInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array - * @void - updates this.data - */ - addData(xInputObj, yInputObj) { - this.data.raw.push({ - xs: xInputObj, - ys: yInputObj, - }); - console.log(this.data.raw); - } /** * //////////////////////////////////////////////////////// @@ -275,6 +353,30 @@ class NeuralNetworkData { return normalizedData; } + normalizer(inputOrOutputMeta,xsOrYs){ + const dataRaw = this.data.raw; + // the data length + const dataLength = dataRaw.length; + // the copy of the inputs.meta[inputOrOutput] + const inputMeta = Object.assign({}, inputOrOutputMeta); + + const normalized = {}; + Object.keys(inputMeta).forEach((k) => { + console.log(k); + // get the min and max values + const options = { + min: inputMeta[k].min, + max: inputMeta[k].max, + }; + + const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); + normalized[k] = this.normalizeArray(dataAsArray, options); + + }); + + + } + /** * @param {Object} inputOrOutputMeta * @param {"xs" | "ys"} xsOrYs @@ -287,29 +389,40 @@ class NeuralNetworkData { // the copy of the inputs.meta[inputOrOutput] const inputMeta = Object.assign({}, inputOrOutputMeta); + + + console.log('heremeta', inputOrOutputMeta); // normalized output object const normalized = {}; Object.keys(inputMeta).forEach((k) => { + console.log(k); // get the min and max values const options = { min: inputMeta[k].min, max: inputMeta[k].max, }; - const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); + + // depending on the input type, normalize accordingly if (inputMeta[k].dtype === "string") { + const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); options.legend = inputMeta[k].legend; normalized[k] = this.normalizeArray(dataAsArray, options); } else if (inputMeta[k].dtype === "number") { + const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); normalized[k] = this.normalizeArray(dataAsArray, options); + } else if (inputMeta[k].dtype === "array") { + const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); normalized[k] = dataAsArray.map((item) => this.normalizeArray(item, options) ); } + }); + console.log('opopp',normalized); // create a normalized version of data.raws const output = [...new Array(dataLength).fill(null)].map((item, idx) => { const row = { @@ -323,6 +436,8 @@ class NeuralNetworkData { return row; }); + + return output; } @@ -443,82 +558,6 @@ class NeuralNetworkData { return output; } - /** - * getDataOneHot - * creates onehot encodings for the input and outputs - * and adds them to the meta info - * @private - * @void - */ - getDataOneHot() { - this.meta.inputs = this.getInputMetaOneHot(this.meta.inputs, "xs"); - this.meta.outputs = this.getInputMetaOneHot(this.meta.outputs, "ys"); - } - - /** - * getOneHotMeta - * @param {Object} _inputsMeta - * @param {"xs" | "ys"} xsOrYs - * @return {Object} - */ - getInputMetaOneHot(_inputsMeta, xsOrYs) { - const inputsMeta = Object.assign({}, _inputsMeta); - - Object.entries(inputsMeta).forEach((arr) => { - // the key - const key = arr[0]; - // the value - const { dtype } = arr[1]; - - if (dtype === "string") { - const uniqueVals = [ - ...new Set(this.data.raw.map((obj) => obj[xsOrYs][key])), - ]; - const oneHotMeta = this.createOneHotEncodings(uniqueVals); - inputsMeta[key] = { - ...inputsMeta[key], - ...oneHotMeta, - }; - } - }); - - return inputsMeta; - } - - /** - * Returns a legend mapping the - * data values to oneHot encoded values - * @private - * @param {Array} _uniqueValuesArray - * @return {Object} - */ - // eslint-disable-next-line class-methods-use-this, no-unused-vars - createOneHotEncodings(_uniqueValuesArray) { - return tf.tidy(() => { - const output = { - uniqueValues: _uniqueValuesArray, - legend: {}, - }; - - const uniqueVals = _uniqueValuesArray; // [...new Set(this.data.raw.map(obj => obj.xs[prop]))] - // get back values from 0 to the length of the uniqueVals array - const onehotValues = uniqueVals.map((item, idx) => idx); - // oneHot encode the values in the 1d tensor - const oneHotEncodedValues = tf.oneHot( - tf.tensor1d(onehotValues, "int32"), - uniqueVals.length - ); - // convert them from tensors back out to an array - const oneHotEncodedValuesArray = oneHotEncodedValues.arraySync(); - - // populate the legend with the key/values - uniqueVals.forEach((uVal, uIdx) => { - output.legend[uVal] = oneHotEncodedValuesArray[uIdx]; - }); - - return output; - }); - } /** * //////////////////////////////////////////////// diff --git a/src/LSTM/timeSeriesUtils.js b/src/LSTM/timeSeriesUtils.js index 41bca9d9..e44ee16e 100644 --- a/src/LSTM/timeSeriesUtils.js +++ b/src/LSTM/timeSeriesUtils.js @@ -259,6 +259,23 @@ class TimeSeriesUtils { } } } + + + + // normalize utilities + reshapeTo3DArray(data, shape) { + let result = []; + let index = 0; + for (let i = 0; i < shape[0]; i++) { + let subArray = []; + for (let j = 0; j < shape[1]; j++) { + subArray.push(data[index]); + index++; + } + result.push(subArray); + } + return result; + } } const timeSeriesUtils = () => { From 55a45584d40d132cd9e9f35ffc2b9db289d589af Mon Sep 17 00:00:00 2001 From: mop9047 Date: Mon, 5 Aug 2024 16:13:10 +0800 Subject: [PATCH 06/13] fixed normalized, added hands example, implemented load data, removed redundant code --- examples/timeSeries-hand-gestures/index.html | 23 + examples/timeSeries-hand-gestures/sketch.js | 119 +++++ .../timeSeries-mousexy-keypoints/sketch.js | 16 +- package.json | 3 +- src/LSTM/index.js | 313 ++++++++++++- src/LSTM/timeSeries.js | 1 - src/LSTM/timeSeriesData.js | 419 ++++++++++-------- src/LSTM/timeSeriesUtils.js | 129 +++--- 8 files changed, 753 insertions(+), 270 deletions(-) create mode 100644 examples/timeSeries-hand-gestures/index.html create mode 100644 examples/timeSeries-hand-gestures/sketch.js diff --git a/examples/timeSeries-hand-gestures/index.html b/examples/timeSeries-hand-gestures/index.html new file mode 100644 index 00000000..38789782 --- /dev/null +++ b/examples/timeSeries-hand-gestures/index.html @@ -0,0 +1,23 @@ + + + + + + + + ml5.js Time Series Mouse classification Example + + + + + + + + diff --git a/examples/timeSeries-hand-gestures/sketch.js b/examples/timeSeries-hand-gestures/sketch.js new file mode 100644 index 00000000..ed76e818 --- /dev/null +++ b/examples/timeSeries-hand-gestures/sketch.js @@ -0,0 +1,119 @@ + +let handPose; +let video; +let hands = []; +let sequence = []; +const seqlength = 50; +let recording_finished = false; + +function preload() { + ml5.setBackend('webgl') + // Load the handPose model + handPose = ml5.handPose(); +} + +function setup() { + createCanvas(640, 480); + + video = createCapture(VIDEO); + video.size(640, 480); + video.hide(); + + handPose.detectStart(video, gotHands); + + let options = { + outputs: ['label'], + task: 'classification', + debug: 'true', + learningRate: 0.005, + dataUrl: "http://127.0.0.1:5500/2024-8-5_13-43-10.json", + }; + + model = ml5.timeSeries(options); + + nameField = createInput('') + nameField.attribute('placeholder', 'word to train') + nameField.position(100, 100) + nameField.size(250) +} + +function draw() { + + image(video, 0, 0, width, height); + + + if(hands.length>0 && recording_finished == false){ + if (sequence.length <= seqlength){ + handpoints = drawPoints(); + sequence.push(handpoints); + } else if (sequence.length>0){ + recording_finished = true; + + let word = nameField.value() + + if (word.length > 0){ + let target = {label:word} + model.addData(sequence, target); + } else { + model.classify(sequence, gotResults); + } + + sequence = []; + } + } else { + if (hands.length == 0){ + recording_finished = false; + } + } +} + +function drawPoints(){ + let handpoints = [] + for (let i = 0; i < hands.length; i++) { + let hand = hands[i]; + for (let j = 0; j < hand.keypoints.length; j++) { + let keypoint = hand.keypoints[j]; + fill(0, 255, 0); + noStroke(); + circle(keypoint.x, keypoint.y, 5); + handpoints.push(keypoint.x,keypoint.y) + } + } + const output = handpoints; + handpoints = []; return output; +} + +// Callback function for when handPose outputs data +function gotHands(results) { + // save the output to the hands variable + hands = results; +} + +function keyPressed(){ + if (key == 's'){ + model.save(); + } + if (key == 'z'){ + model.saveData(); + } + + if (key == 't'){ + model.normalizeData(); + let options = { + epochs: 20 + } + model.train(options,whileTraining,finishedTraining); + } +} + +function whileTraining(epoch, loss) { + console.log(epoch); +} + +function finishedTraining() { + console.log('finished training.'); +} + +function gotResults(results){ + console.log(results) +} \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/sketch.js b/examples/timeSeries-mousexy-keypoints/sketch.js index 84967c51..4bacf292 100644 --- a/examples/timeSeries-mousexy-keypoints/sketch.js +++ b/examples/timeSeries-mousexy-keypoints/sketch.js @@ -20,11 +20,11 @@ let sequence = []; function setup() { ml5.setBackend('webgl') let options = { - inputs: ['x', 'y'], + inputs: ['x','y'], outputs: ['label'], task: 'classification', debug: 'true', - learningRate: 0.5 + learningRate: 0.05 }; model = ml5.timeSeries(options); createCanvas(600, 400); @@ -78,11 +78,11 @@ function gotResults(results) { // if (error) { // console.log(error); // } - // console.log('hello', results); - stroke(0); - fill(0, 0, 255, 100); - let label = results[0].label; - text("Prediction: " + label, 50,50); + console.log('hello', results); + // stroke(0); + // fill(0, 0, 255, 100); + // let label = results[0].label; + // text("Prediction: " + label, 50,50); // let label = error[0].label; } @@ -152,7 +152,7 @@ function UI(){ text("Training...", 50,50); model.normalizeData(); let options = { - epochs: 100 + epochs: 20 } model.train(options,whileTraining,finishedTraining); } diff --git a/package.json b/package.json index 63b88db7..3aa05bba 100644 --- a/package.json +++ b/package.json @@ -71,5 +71,6 @@ "@babel/preset-env" ] }, - "prettier": {} + "prettier": {}, + "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e" } diff --git a/src/LSTM/index.js b/src/LSTM/index.js index c69ac37e..4f6efbd2 100644 --- a/src/LSTM/index.js +++ b/src/LSTM/index.js @@ -54,11 +54,23 @@ class timeSeries { this.data = { training: [], }; + + this.init = this.init.bind(this); + + this.ready = callCallback(this.init(), callback); } // mainly for loading data - should be async async init() { - return 0; + console.log('init yeah') + if (this.options.dataUrl) { + console.log('URL provided, will load data') + await this.loadDataFromUrl(this.options.dataUrl); + } else if (this.options.modelUrl) { + // will take a URL to model.json, an object, or files array + await this.load(this.options.modelUrl); + } + return this; } @@ -202,26 +214,31 @@ class timeSeries { } createMetaData() { + // this method does not get shape for images but instead for timesteps const { inputs } = this.options; - let inputShape; - if (Array.isArray(inputs) && inputs.length > 0) { - inputShape = - inputs.every((item) => typeof item === "number") && inputs.length > 0 - ? inputs - : null; - } + console.log('meta',inputs); + + let inputShape; + if (typeof inputs === 'number'){ + inputShape = inputs; + } else if (Array.isArray(inputs) && inputs.length > 0){ + inputShape = inputs.length; //will be fed into the tensors later + } + + console.log('inputshape',inputShape); this.neuralNetworkData.createMetadata(inputShape); } prepareForTraining() { - this.data.training = this.neuralNetworkData.applyOneHotEncodingsToDataRaw(); + // this.data.training = this.neuralNetworkData.applyOneHotEncodingsToDataRaw(); this.neuralNetworkData.isWarmedUp = true; } convertTrainingDataToTensors() { - return this.neuralNetworkData.convertRawToTensors(this.data.training); + console.log('training',this.data.training); + return this.neuralNetworkData.convertRawToTensors(this.data.training); } createNetworkLayers(layerJsonArray) { @@ -257,16 +274,87 @@ class timeSeries { // if the task is classification case "classification": layers = [ + { + type: "conv1d", + filters: 64, + kernelSize: 3, + activation: "relu", + inputShape: this.neuralNetworkData.meta.seriesShape, + }, + { + type: "maxPooling1d", + poolSize: 2, + }, + { + type: "conv1d", + filters: 128, + kernelSize: 3, + activation: "relu", + inputShape: this.neuralNetworkData.meta.seriesShape, + }, + { + type: "maxPooling1d", + poolSize: 2, + }, + { + type: "flatten", + }, { type: "dense", - units: this.options.hiddenUnits, + units: 128, activation: "relu", }, { type: "dense", + units:2, activation: "softmax", }, ]; + // let shape = this.neuralNetworkData.meta.seriesShape + // layers = [ + // { + // type: "input", + // shape: shape, + // }, + // { + // type: "reshape", + // targetShape: [shape[0],shape[1]*shape[2]], + // }, + // { + // type: "conv1d", + // filters: 64, + // kernelSize: 3, + // activation: "relu", + // inputShape: shape, + // }, + // { + // type: "maxPooling1d", + // poolSize: 2, + // }, + // { + // type: "conv1d", + // filters: 128, + // kernelSize: 3, + // activation: "relu", + // }, + // { + // type: "maxPooling1d", + // poolSize: 2, + // }, + // { + // type: "flatten", + // }, + // { + // type: "dense", + // units: 128, + // activation: "relu", + // }, + // { + // type: "dense", + // units:2, + // activation: "softmax", + // }, + // ]; return this.createNetworkLayers(layers); // if the task is regression @@ -355,7 +443,7 @@ class timeSeries { ) { options = { loss: "categoricalCrossentropy", - optimizer: tf.train.sgd, + optimizer: tf.train.adam, metrics: ["accuracy"], }; } else if (this.options.task === "regression") { @@ -398,16 +486,213 @@ class timeSeries { const trainingData = this.neuralNetworkData.normalizeDataRaw(); - console.log('normalized', trainingData); - // set this equal to the training data this.data.training = trainingData; // set isNormalized to true this.neuralNetworkData.meta.isNormalized = true; + + console.log('train',this.data.training) + } + + classify(_input, _cb) { + return callCallback(this.classifyInternal(_input), _cb); + } + + async classifyInternal(_input){ + const { meta } = this.neuralNetworkData; + const inputData = this.formatInputsForPredictionAll(_input); + + const unformattedResults = await this.neuralNetwork.classify(inputData); + inputData.dispose(); + + return unformattedResults; + } + + // async classifyInternal(_input) { + // const { meta } = this.neuralNetworkData; + // const headers = Object.keys(meta.inputs); + + // let inputData; + // console.log(_input) + // // inputData = this.neuralNetworkData. + // inputData = this.formatInputsForPredictionAll(_input); + + // const unformattedResults = await this.neuralNetwork.classify(inputData); + // inputData.dispose(); + + // if (meta !== null) { + // const label = Object.keys(meta.outputs)[0]; + // const vals = Object.entries(meta.outputs[label].legend); + + // const formattedResults = unformattedResults.map((unformattedResult) => { + // return vals + // .map((item, idx) => { + // return { + // [item[0]]: unformattedResult[idx], + // label: item[0], + // confidence: unformattedResult[idx], + // }; + // }) + // .sort((a, b) => b.confidence - a.confidence); + // }); + + // // return single array if the length is less than 2, + // // otherwise return array of arrays + // if (formattedResults.length < 2) { + // return formattedResults[0]; + // } + // return formattedResults; + // } + + // return unformattedResults; + // } + + + + formatInputsForPredictionAll(_input) { + const { meta } = this.neuralNetworkData; + const inputHeaders = Object.keys(meta.inputs); + + const formatted_inputs = tsUtils.verifyAndFormatInputs(_input,null,this.options); + const normalized_inputs = this.neuralNetworkData.normalizePredictData(formatted_inputs, meta.inputs); + const output = tf.tensor(normalized_inputs); + + return output; + } + + + /** + * //////////////////////////////////////////////////////////// + * Save / Load Data + * //////////////////////////////////////////////////////////// + */ + + /** + * @public + * saves the training data to a JSON file. + * @param {string} [name] Optional - The name for the saved file. + * Should not include the file extension. + * Defaults to the current date and time. + * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. + * @return {Promise} + */ + saveData(name, callback) { + const args = handleArguments(name, callback); + return callCallback(this.neuralNetworkData.saveData(args.name), args.callback); + } + + /** + * @public + * load data + * @param {string | FileList | Object} filesOrPath - The URL of the file to load, + * or a FileList object (.files) from an HTML element . + * @param {ML5Callback} [callback] Optional - A function to call when the loading is complete. + * @return {Promise} + */ + async loadData(filesOrPath, callback) { + return callCallback(this.neuralNetworkData.loadData(filesOrPath), callback); + } + + /** + * Loads data from a URL using the appropriate function + * @param {*} dataUrl + * @param {*} inputs + * @param {*} outputs + * @void + */ + async loadDataFromUrl(dataUrl, inputs, outputs) { + let json; + let dataFromUrl + try { + if (dataUrl.endsWith(".csv")) { + dataFromUrl = await this.neuralNetworkData.loadCSV(dataUrl, inputs, outputs); + } else if (dataUrl.endsWith(".json")) { + dataFromUrl = await this.neuralNetworkData.loadJSON(dataUrl, inputs, outputs); + } else if (dataUrl.includes("blob")) { + dataFromUrl = await this.loadBlob(dataUrl, inputs, outputs); + } else { + throw new Error("Not a valid data format. Must be csv or json"); + } + } catch (error) { + console.error(error); + throw new Error(error); + } + + dataFromUrl.map((item) => { + this.addData(item.xs, item.ys) + }) + + this.createMetaData(); + + this.prepareForTraining(); } + // async loadDataFromUrl() { + // const { dataUrl, inputs, outputs } = this.options; + // console.log(this.options) + // await this.neuralNetworkData.loadDataFromUrl( + // dataUrl, + // inputs, + // outputs + // ); + + // // once the data are loaded, create the metadata + // // and prep the data for training + // // if the inputs are defined as an array of [img_width, img_height, channels] + // this.createMetaData(); + + // this.prepareForTraining(); + // } + + /** + * //////////////////////////////////////////////////////////// + * Save / Load Model + * //////////////////////////////////////////////////////////// + */ + + /** + * @public + * saves the model, weights, and metadata + * @param {string} [name] Optional - The name for the saved file. + * Should not include the file extension. + * Defaults to 'model'. + * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. + * @return {Promise} + */ + async save(name, callback) { + const args = handleArguments(name, callback); + const modelName = args.string || 'model'; + console.log("hello") + // save the model + return callCallback(Promise.all([ + this.neuralNetwork.save(modelName), + this.neuralNetworkData.saveMeta(modelName) + ]), args.callback); + } + + /** + * @public - also called internally by init() when there is a modelUrl in the options + * load a model and metadata + * @param {string | FileList | Object} filesOrPath - The URL of the file to load, + * or a FileList object (.files) from an HTML element . + * @param {ML5Callback} [callback] Optional - A function to call when the loading is complete. + * @return {Promise} + */ + async load(filesOrPath, callback) { + return callCallback(Promise.all([ + this.neuralNetwork.load(filesOrPath), + this.neuralNetworkData.loadMeta(filesOrPath) + ]), callback); + } + + /** + * dispose and release memory for a model + */ + dispose() { + this.neuralNetwork.dispose(); + } } const TimeSeries = (inputsOrOptions, outputsOrCallback, callback) => { diff --git a/src/LSTM/timeSeries.js b/src/LSTM/timeSeries.js index d0823857..2be1d675 100644 --- a/src/LSTM/timeSeries.js +++ b/src/LSTM/timeSeries.js @@ -10,7 +10,6 @@ Things changed from neural network class: 1. No neuro evolution - */ class NeuralNetwork { diff --git a/src/LSTM/timeSeriesData.js b/src/LSTM/timeSeriesData.js index da0aac4c..b7fb9254 100644 --- a/src/LSTM/timeSeriesData.js +++ b/src/LSTM/timeSeriesData.js @@ -4,6 +4,8 @@ import { saveBlob } from "../utils/io"; import modelLoader from '../utils/modelLoader'; import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; +import tsUtils from "./timeSeriesUtils"; + class NeuralNetworkData { constructor() { this.meta = { @@ -19,7 +21,7 @@ class NeuralNetworkData { this.isWarmedUp = false; this.data = { - raw: [], // array of {xs:{}, ys:{}} + raw: [], // array of {xs:[{},{}], ys:{}} }; } @@ -40,8 +42,6 @@ class NeuralNetworkData { xs: xInputObj, ys: yInputObj, }); - - console.log(this.data.raw); } @@ -71,47 +71,44 @@ class NeuralNetworkData { this.getDataOneHot(); // calculate the input units from the data this.getDataUnits(inputShape); + // get the shape of batch this.isMetadataReady = true; } - /** + /** * getDTypesFromData * gets the data types of the data we're using * important for handling oneHot * @private * @void - updates this.meta */ - getDTypesFromSeriesData() { - const meta = { - ...this.meta, - inputs: {}, - outputs: {}, + getDTypesFromSeriesData() { + const meta = { + ...this.meta, + inputs: {}, + outputs: {}, + }; + + const sample = this.data.raw[0]; + + //consistent dTypes have already been checked at add data + const xs = Object.keys(sample.xs[0]); //since time series data is in form of array + const ys = Object.keys(sample.ys); + xs.forEach((prop) => { + meta.inputs[prop] = { + dtype: nnUtils.getDataType(sample.xs[0][prop]), }; - - const sample = this.data.raw[0]; + }); - - - const xs = Object.keys(sample.xs[0]); //since time series data is in form of array - const ys = Object.keys(sample.ys); + ys.forEach((prop) => { + meta.outputs[prop] = { + dtype: nnUtils.getDataType(sample.ys[prop]), + }; + }); - xs.forEach((prop) => { - meta.inputs[prop] = { - dtype: nnUtils.getDataType(sample.xs[0][prop]), - }; - }); - - ys.forEach((prop) => { - meta.outputs[prop] = { - dtype: nnUtils.getDataType(sample.ys[prop]), - }; - }); - - // TODO: check if all entries have the same dtype. - // otherwise throw an error - this.meta = meta; - } + this.meta = meta; + } /** * get stats about the data @@ -138,7 +135,6 @@ class NeuralNetworkData { inputMeta[k].min = 0; inputMeta[k].max = 1; } else if (inputMeta[k].dtype === "number") { - console.log('raw',this.data.raw) const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); inputMeta[k].min = nnUtils.getMin(dataAsArray); inputMeta[k].max = nnUtils.getMax(dataAsArray); @@ -193,43 +189,7 @@ class NeuralNetworkData { return inputsMeta; } - /** - * Returns a legend mapping the - * data values to oneHot encoded values - * @private - * @param {Array} _uniqueValuesArray - * @return {Object} - */ - // eslint-disable-next-line class-methods-use-this, no-unused-vars - createOneHotEncodings(_uniqueValuesArray) { - return tf.tidy(() => { - const output = { - uniqueValues: _uniqueValuesArray, - legend: {}, - }; - - const uniqueVals = _uniqueValuesArray; // [...new Set(this.data.raw.map(obj => obj.xs[prop]))] - // get back values from 0 to the length of the uniqueVals array - const onehotValues = uniqueVals.map((item, idx) => idx); - // oneHot encode the values in the 1d tensor - const oneHotEncodedValues = tf.oneHot( - tf.tensor1d(onehotValues, "int32"), - uniqueVals.length - ); - // convert them from tensors back out to an array - const oneHotEncodedValuesArray = oneHotEncodedValues.arraySync(); - - // populate the legend with the key/values - uniqueVals.forEach((uVal, uIdx) => { - output.legend[uVal] = oneHotEncodedValuesArray[uIdx]; - }); - - return output; - }); - } - - - /** + /** * get the data units, inputshape and output units * @private * @param {Array} arrayShape @@ -274,6 +234,42 @@ class NeuralNetworkData { return units; } + /** + * Returns a legend mapping the + * data values to oneHot encoded values + * @private + * @param {Array} _uniqueValuesArray + * @return {Object} + */ + // eslint-disable-next-line class-methods-use-this, no-unused-vars + createOneHotEncodings(_uniqueValuesArray) { + return tf.tidy(() => { + const output = { + uniqueValues: _uniqueValuesArray, + legend: {}, + }; + + const uniqueVals = _uniqueValuesArray; // [...new Set(this.data.raw.map(obj => obj.xs[prop]))] + // get back values from 0 to the length of the uniqueVals array + const onehotValues = uniqueVals.map((item, idx) => idx); + // oneHot encode the values in the 1d tensor + const oneHotEncodedValues = tf.oneHot( + tf.tensor1d(onehotValues, "int32"), + uniqueVals.length + ); + // convert them from tensors back out to an array + const oneHotEncodedValuesArray = oneHotEncodedValues.arraySync(); + + // populate the legend with the key/values + uniqueVals.forEach((uVal, uIdx) => { + output.legend[uVal] = oneHotEncodedValuesArray[uIdx]; + }); + + return output; + }); + } + + /** @@ -298,14 +294,19 @@ class NeuralNetworkData { const inputArr = []; const outputArr = []; + + dataRaw.forEach((row) => { // get xs - const xs = Object.keys(meta.inputs) - .map((k) => { - return row.xs[k]; - }) - .flat(); + // const xs = Object.keys(meta.inputs) + // .map((k) => { + // return row.xs[k]; + // }) + // .flat(); + // inputArr.push(xs); + + const xs = row.xs; inputArr.push(xs); // get ys @@ -317,16 +318,22 @@ class NeuralNetworkData { outputArr.push(ys); }); + + + // const inputs = tf.tensor(inputArr.flat(), [ + // dataLength, + // ...meta.inputUnits, + // ]); + const inputs = tf.tensor(inputArr); + - const inputs = tf.tensor(inputArr.flat(), [ - dataLength, - ...meta.inputUnits, - ]); const outputs = tf.tensor(outputArr.flat(), [ dataLength, meta.outputUnits, ]); + + return { inputs, outputs, @@ -345,36 +352,12 @@ class NeuralNetworkData { * @return {Array} */ normalizeDataRaw() { + const normXs = this.normalizeInputData(this.meta.inputs, "xs"); const normYs = this.normalizeInputData(this.meta.outputs, "ys"); - - const normalizedData = nnUtils.zipArrays(normXs, normYs); - - return normalizedData; - } - - normalizer(inputOrOutputMeta,xsOrYs){ - const dataRaw = this.data.raw; - // the data length - const dataLength = dataRaw.length; - // the copy of the inputs.meta[inputOrOutput] - const inputMeta = Object.assign({}, inputOrOutputMeta); - - const normalized = {}; - Object.keys(inputMeta).forEach((k) => { - console.log(k); - // get the min and max values - const options = { - min: inputMeta[k].min, - max: inputMeta[k].max, - }; - - const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); - normalized[k] = this.normalizeArray(dataAsArray, options); - - }); - + const normalizedData = tsUtils.zipArraySequence(normXs, normYs); + return normalizedData; } /** @@ -384,26 +367,22 @@ class NeuralNetworkData { */ normalizeInputData(inputOrOutputMeta, xsOrYs) { const dataRaw = this.data.raw; + // the data length const dataLength = dataRaw.length; + // the copy of the inputs.meta[inputOrOutput] const inputMeta = Object.assign({}, inputOrOutputMeta); - - - console.log('heremeta', inputOrOutputMeta); // normalized output object const normalized = {}; Object.keys(inputMeta).forEach((k) => { - console.log(k); // get the min and max values const options = { min: inputMeta[k].min, max: inputMeta[k].max, }; - - // depending on the input type, normalize accordingly if (inputMeta[k].dtype === "string") { const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); @@ -412,7 +391,6 @@ class NeuralNetworkData { } else if (inputMeta[k].dtype === "number") { const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); normalized[k] = this.normalizeArray(dataAsArray, options); - } else if (inputMeta[k].dtype === "array") { const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); normalized[k] = dataAsArray.map((item) => @@ -421,23 +399,45 @@ class NeuralNetworkData { } }); + - console.log('opopp',normalized); - // create a normalized version of data.raws - const output = [...new Array(dataLength).fill(null)].map((item, idx) => { - const row = { - [xsOrYs]: {}, - }; - - Object.keys(inputMeta).forEach((k) => { - row[xsOrYs][k] = normalized[k][idx]; + let output; + if (xsOrYs == "ys"){ + output = [...new Array(dataLength).fill(null)].map((item, idx) => { + const row = { + [xsOrYs]: {}, + }; + + Object.keys(inputMeta).forEach((k) => { + row[xsOrYs][k] = normalized[k][idx]; + }); + + return row; }); + } else if ((xsOrYs == "xs")){ + // reshape array - already ready for tensorconversion + const features = Object.keys(inputMeta); + const feature_length = features.length; + + const seriesStep = dataRaw[0]['xs'].length; + + const batch = normalized[features[0]].length / seriesStep; - return row; - }); + this.meta.seriesShape = [seriesStep,feature_length]; - + let zipped = []; + // zip arrays before reshaping + for (let idx =0; idx < seriesStep*feature_length*batch; idx++){ + features.forEach((k) => { + zipped.push(normalized[k][idx]) + }) + } + + // reshaping + output = tsUtils.reshapeTo3DArray(zipped,[batch,seriesStep,feature_length]) + } + return output; } @@ -473,46 +473,87 @@ class NeuralNetworkData { throw new Error("error in inputArray of normalizeArray() function"); } + normalizePredictData(dataRaw, inputOrOutputMeta){ + const inputMeta = Object.assign({}, inputOrOutputMeta); + const xsOrYs = "xs" + const predict_normalized = {}; + Object.keys(inputMeta).forEach((k) => { + // get the min and max values + const options = { + min: inputMeta[k].min, + max: inputMeta[k].max, + }; + if (inputMeta[k].dtype === "string") { + const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); + options.legend = inputMeta[k].legend; + predict_normalized[k] = this.normalizeArray(dataAsArray, options); + } else if (inputMeta[k].dtype === "number") { + const dataAsArray = Array(dataRaw).flatMap((item) => item.map((obj) => (obj[k]))); + console.log(dataAsArray); + predict_normalized[k] = this.normalizeArray(dataAsArray, options); + } + + }); + + console.log('done', predict_normalized); + + const features = Object.keys(inputMeta); + const feature_length = features.length; + + const seriesStep = dataRaw.length; + + const batch = 1; + let zipped = []; + + // zip arrays before reshaping + for (let idx =0; idx < seriesStep*feature_length*batch; idx++){ + features.forEach((k) => {zipped.push(predict_normalized[k][idx])}) + } + // reshaping + const output = tsUtils.reshapeTo3DArray(zipped,[batch,seriesStep,feature_length]) + return output + } + /** * unNormalizeArray * @param {*} _input * @param {*} _options */ // eslint-disable-next-line no-unused-vars, class-methods-use-this - unnormalizeArray(inputArray, options) { - const { min, max } = options; - - // if the data is onehot encoded then remap the - // values from those oneHot arrays - if (options.legend) { - const unnormalized = inputArray.map((v) => { - let res; - Object.entries(options.legend).forEach((item) => { - const key = item[0]; - const val = item[1]; - const matches = v - .map((num, idx) => num === val[idx]) - .every((truthy) => truthy === true); - if (matches) res = key; - }); - return res; - }); - - return unnormalized; - } - - // if the dtype is a number - if (inputArray.every((v) => typeof v === "number")) { - const unnormalized = inputArray.map((v) => - nnUtils.unnormalizeValue(v, min, max) - ); - return unnormalized; - } - - // otherwise return the input array - // return inputArray; - throw new Error("error in inputArray of normalizeArray() function"); - } + // unnormalizeArray(inputArray, options) { + // const { min, max } = options; + + // // if the data is onehot encoded then remap the + // // values from those oneHot arrays + // if (options.legend) { + // const unnormalized = inputArray.map((v) => { + // let res; + // Object.entries(options.legend).forEach((item) => { + // const key = item[0]; + // const val = item[1]; + // const matches = v + // .map((num, idx) => num === val[idx]) + // .every((truthy) => truthy === true); + // if (matches) res = key; + // }); + // return res; + // }); + + // return unnormalized; + // } + + // // if the dtype is a number + // if (inputArray.every((v) => typeof v === "number")) { + // const unnormalized = inputArray.map((v) => + // nnUtils.unnormalizeValue(v, min, max) + // ); + // return unnormalized; + // } + + // // otherwise return the input array + // // return inputArray; + // throw new Error("error in inputArray of normalizeArray() function"); + // } /* * //////////////////////////////////////////////// @@ -529,12 +570,15 @@ class NeuralNetworkData { const meta = Object.assign({}, this.meta); const output = this.data.raw.map((row) => { + + const xs = { ...row.xs, }; const ys = { ...row.ys, }; + // get xs Object.keys(meta.inputs).forEach((k) => { if (meta.inputs[k].legend) { @@ -554,7 +598,6 @@ class NeuralNetworkData { ys, }; }); - console.log('onhot',output); return output; } @@ -609,7 +652,9 @@ class NeuralNetworkData { } // format the data.raw array - this.formatRawData(json, inputLabels, outputLabels); + // this.formatRawData(json, inputLabels, outputLabels); + return this.findEntries(json); + } catch (err) { console.error("error loading json"); throw new Error(err); @@ -631,7 +676,9 @@ class NeuralNetworkData { entries: loadedData, }; // format the data.raw array - this.formatRawData(json, inputLabels, outputLabels); + // this.formatRawData(json, inputLabels, outputLabels); + return this.findEntries(json); + } catch (err) { console.error("error loading csv", err); throw new Error(err); @@ -689,6 +736,7 @@ class NeuralNetworkData { const text = JSON.stringify(loadedData.data); if (nnUtils.isJsonOrString(text)) { loadedData = JSON.parse(text); + console.log(loadedData); } else { console.log( "Whoops! something went wrong. Either this kind of data is not supported yet or there is an issue with .loadData" @@ -740,11 +788,13 @@ class NeuralNetworkData { * @return {Promise} */ async saveMeta(modelName = "model") { + console.log("meta saved"); await saveBlob( JSON.stringify(this.meta), `${modelName}_meta.json`, "text/plain" ); + } /** @@ -798,45 +848,22 @@ class NeuralNetworkData { * @param {Array} outputLabels * @void */ - formatRawData(json, inputLabels, outputLabels) { - // Recurse through the json object to find - // an array containing `entries` or `data` - const dataArray = this.findEntries(json); - - if (!dataArray.length > 0) { - console.log(`your data must be contained in an array in \n - a property called 'entries' or 'data' of your json object`); - } + // formatRawData(json, inputLabels, outputLabels) { - // create an array of json objects [{xs,ys}] - const result = dataArray.map((item, idx) => { - const output = { - xs: {}, - ys: {}, - }; + // // Recurse through the json object to find + // // an array containing `entries` or `data` + // const dataArray = this.findEntries(json); - inputLabels.forEach((k) => { - if (item[k] !== undefined) { - output.xs[k] = item[k]; - } else { - console.error(`the input label ${k} does not exist at row ${idx}`); - } - }); + // if (!dataArray.length > 0) { + // console.log(`your data must be contained in an array in \n + // a property called 'entries' or 'data' of your json object`); + // } - outputLabels.forEach((k) => { - if (item[k] !== undefined) { - output.ys[k] = item[k]; - } else { - console.error(`the output label ${k} does not exist at row ${idx}`); - } - }); - - return output; - }); + // //////////// - // set this.data.raw - this.data.raw = result; - } + // // set this.data.raw + // this.data.raw = result; + // } /** * csvToJSON diff --git a/src/LSTM/timeSeriesUtils.js b/src/LSTM/timeSeriesUtils.js index e44ee16e..9334cdfd 100644 --- a/src/LSTM/timeSeriesUtils.js +++ b/src/LSTM/timeSeriesUtils.js @@ -18,7 +18,6 @@ class TimeSeriesUtils { verifyAndFormatInputs(xInputs, options = null,classOptions){ const dataFormat = this.checkInputStructure(xInputs, options); - console.log(dataFormat); return this.formatInputsToObjects(xInputs,options,classOptions,dataFormat); } @@ -90,6 +89,7 @@ class TimeSeriesUtils { if (options !== null){ if (options.inputLabels){ label = options.inputLabels + console.log('here1') } } else if (classOptions !== null){ if (classOptions.inputs){ @@ -97,8 +97,9 @@ class TimeSeriesUtils { } } - if (label === '') { - const label = this.getLabelFromNestedArray(xInputs); + if ((typeof label === 'string' && label === '') || + (Array.isArray(label) && label.length === 0)) { + label = this.getLabelFromNestedArray(xInputs); } return xInputs.map((input)=>{ @@ -177,11 +178,11 @@ class TimeSeriesUtils { // input-based values to assign } else { - inputLabels = this.labelsFromNestedArray(xInputs); + inputLabels = this.getLabelFromNestedArray(xInputs); if (typeof yInputs === "object") { outputLabels = Object.keys(yInputs); } else { - inputLabels = this.labelsFromNestedArray(yInputs); + inputLabels = this.getLabelFromNestedArray(yInputs); } } @@ -214,68 +215,96 @@ class TimeSeriesUtils { return null; } - if (Array.isArray(data)) { - return traverseArray(data); + if (Array.isArray(xInputs)) { + return traverseArray(xInputs); } else { throw new Error('Input data must be an array.'); } } - labelsFromNestedArray(data){ - function processData(data, prefix = 'label') { - // Recursive function to find the deepest level of the data and return the result - function traverse(value) { - if (Array.isArray(value)) { - if (value.length > 0 && typeof value[0] === 'string') { - // If the deepest unit is an array with strings - return { type: 'array', data: value }; - } else if (value.length > 0 && typeof value[0] === 'number') { - // If the deepest unit is an array with numbers - return { type: 'array', data: value }; - } else { - for (const item of value) { - const result = traverse(item); - if (result) return result; - } - } - } else if (value !== null && typeof value === 'object') { - return { type: 'object', data: value }; // If the deepest unit is an object - } - return null; - } + + // labelsFromNestedArray(data){ + // function processData(data, prefix = 'label') { + // // Recursive function to find the deepest level of the data and return the result + // function traverse(value) { + // if (Array.isArray(value)) { + // if (value.length > 0 && typeof value[0] === 'string') { + // // If the deepest unit is an array with strings + // return { type: 'array', data: value }; + // } else if (value.length > 0 && typeof value[0] === 'number') { + // // If the deepest unit is an array with numbers + // return { type: 'array', data: value }; + // } else { + // for (const item of value) { + // const result = traverse(item); + // if (result) return result; + // } + // } + // } else if (value !== null && typeof value === 'object') { + // return { type: 'object', data: value }; // If the deepest unit is an object + // } + // return null; + // } - const result = traverse(data); + // const result = traverse(data); - if (result) { - if (result.type === 'object') { - // If the deepest level is an object, get the unique keys - return Object.keys(result.data); - } else if (result.type === 'array') { - // If the deepest level is an array with strings or numbers, get the labels - return result.data.map((_, index) => `${prefix}_${index}`); - } - } else { - // No recognizable structure found - throw new Error('Data does not match expected structure for objects or arrays.'); - } - } - } + // if (result) { + // if (result.type === 'object') { + // // If the deepest level is an object, get the unique keys + // return Object.keys(result.data); + // } else if (result.type === 'array') { + // // If the deepest level is an array with strings or numbers, get the labels + // return result.data.map((_, index) => `${prefix}_${index}`); + // } + // } else { + // // No recognizable structure found + // throw new Error('Data does not match expected structure for objects or arrays.'); + // } + // } + // output = processData(data, "label"); + + // console.log('labeling',output); + // return processData(data, "label"); + // } // normalize utilities reshapeTo3DArray(data, shape) { + const [batch, timeStep, feature] = shape; let result = []; let index = 0; - for (let i = 0; i < shape[0]; i++) { - let subArray = []; - for (let j = 0; j < shape[1]; j++) { - subArray.push(data[index]); - index++; + + for (let i = 0; i < batch; i++) { + let batchArray = []; + for (let j = 0; j < timeStep; j++) { + let timeStepArray = []; + for (let k = 0; k < feature; k++) { + timeStepArray.push(data[index]); + index++; + } + batchArray.push(timeStepArray); } - result.push(subArray); + result.push(batchArray); } + + return result; } + + zipArraySequence(arr1, arr2) { + if (arr1.length !== arr2.length) { + console.error("arrays do not have the same length"); + return []; + } + + return arr1.map((xs, idx) => { + const ys = arr2[idx].ys; // Extract the inner `ys` object + return { + xs: xs, + ys: ys + }; + }); + } } const timeSeriesUtils = () => { From 11c62200f5e4a94c08a7dd2978cadd9281098d33 Mon Sep 17 00:00:00 2001 From: mop9047 Date: Thu, 8 Aug 2024 18:44:03 +0800 Subject: [PATCH 07/13] changed folder name to TimeSeries, added new examples, code cleanup --- examples/timeSeries-hand-gestures/index.html | 2 +- examples/timeSeries-hand-gestures/sketch.js | 146 +++++-- .../index.html | 2 +- .../model/model.json | 1 + .../model/model.weights.bin | Bin 0 -> 17128 bytes .../model/model_meta.json | 1 + .../sketch.js | 145 +++++++ .../timeSeries-mousexy-keypoints/sketch.js | 180 -------- .../timeSeries-stock-prediction/index.html | 23 + .../timeSeries-stock-prediction/sketch.js | 213 +++++++++ .../stock_data.json | 36 ++ .../timeSeries-train-quickdraw/index.html | 23 + examples/timeSeries-train-quickdraw/sketch.js | 221 ++++++++++ src/{LSTM => TimeSeries}/index.js | 405 ++++++++++-------- src/{LSTM => TimeSeries}/timeSeries.js | 5 +- src/{LSTM => TimeSeries}/timeSeriesData.js | 18 +- src/{LSTM => TimeSeries}/timeSeriesUtils.js | 5 +- src/index.js | 2 +- 18 files changed, 1024 insertions(+), 404 deletions(-) rename examples/{timeSeries-mousexy-keypoints => timeSeries-load-model-hand-gestures}/index.html (92%) create mode 100644 examples/timeSeries-load-model-hand-gestures/model/model.json create mode 100644 examples/timeSeries-load-model-hand-gestures/model/model.weights.bin create mode 100644 examples/timeSeries-load-model-hand-gestures/model/model_meta.json create mode 100644 examples/timeSeries-load-model-hand-gestures/sketch.js delete mode 100644 examples/timeSeries-mousexy-keypoints/sketch.js create mode 100644 examples/timeSeries-stock-prediction/index.html create mode 100644 examples/timeSeries-stock-prediction/sketch.js create mode 100644 examples/timeSeries-stock-prediction/stock_data.json create mode 100644 examples/timeSeries-train-quickdraw/index.html create mode 100644 examples/timeSeries-train-quickdraw/sketch.js rename src/{LSTM => TimeSeries}/index.js (70%) rename src/{LSTM => TimeSeries}/timeSeries.js (97%) rename src/{LSTM => TimeSeries}/timeSeriesData.js (97%) rename src/{LSTM => TimeSeries}/timeSeriesUtils.js (99%) diff --git a/examples/timeSeries-hand-gestures/index.html b/examples/timeSeries-hand-gestures/index.html index 38789782..1a6026c9 100644 --- a/examples/timeSeries-hand-gestures/index.html +++ b/examples/timeSeries-hand-gestures/index.html @@ -12,7 +12,7 @@ - ml5.js Time Series Mouse classification Example + ml5.js Sign Language Neural Network Train and Save diff --git a/examples/timeSeries-hand-gestures/sketch.js b/examples/timeSeries-hand-gestures/sketch.js index ed76e818..a0c971d9 100644 --- a/examples/timeSeries-hand-gestures/sketch.js +++ b/examples/timeSeries-hand-gestures/sketch.js @@ -1,13 +1,27 @@ +/* + * 👋 Hello! This is an ml5.js example made and shared with ❤️. + * Learn more about the ml5.js project: https://ml5js.org/ + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md + * + * This example demonstrates training a color classifier through ml5.neuralNetwork. + */ + +const seqlength = 50; let handPose; let video; + let hands = []; let sequence = []; -const seqlength = 50; + let recording_finished = false; +let predicted_word = ''; + +// UI variables +let training_words = {}; + function preload() { - ml5.setBackend('webgl') // Load the handPose model handPose = ml5.handPose(); } @@ -15,51 +29,69 @@ function preload() { function setup() { createCanvas(640, 480); + // setup video capture video = createCapture(VIDEO); video.size(640, 480); video.hide(); + // place UI elements + UI(); + + // set backend as either webgl or cpu + ml5.setBackend('webgl') + + // use handpose model on video handPose.detectStart(video, gotHands); + // setup the timeseries neural network let options = { outputs: ['label'], task: 'classification', + dataModality: 'spatial', debug: 'true', - learningRate: 0.005, - dataUrl: "http://127.0.0.1:5500/2024-8-5_13-43-10.json", + learningRate: 0.001, }; - model = ml5.timeSeries(options); - - nameField = createInput('') - nameField.attribute('placeholder', 'word to train') - nameField.position(100, 100) - nameField.size(250) + } function draw() { - + // draw video on frame image(video, 0, 0, width, height); - + drawPredictedWord(); + + // if hands are found then start recording if(hands.length>0 && recording_finished == false){ if (sequence.length <= seqlength){ + // get coordinates from hands (21 points) handpoints = drawPoints(); sequence.push(handpoints); + + // once sequence reaches the seqlength, add sequence as just one X value } else if (sequence.length>0){ - recording_finished = true; - - let word = nameField.value() + // get the training word from the input box + let train_word = nameField.value() - if (word.length > 0){ - let target = {label:word} + // if there is a word currently in the box then add data with that label + if (train_word.length > 0){ + // add data to the model + let target = {label:train_word} model.addData(sequence, target); + trainingWordsUpdate() + + // if there is no word in the box then classify instead } else { + // classify the data model.classify(sequence, gotResults); } - + + // reset the sequence sequence = []; + recording_finished = true; } + + // can only record again when hand is out of frame } else { if (hands.length == 0){ recording_finished = false; @@ -69,18 +101,24 @@ function draw() { function drawPoints(){ let handpoints = [] + // iterate through both hands for (let i = 0; i < hands.length; i++) { let hand = hands[i]; for (let j = 0; j < hand.keypoints.length; j++) { + // access the keypoints in the hand let keypoint = hand.keypoints[j]; + handpoints.push(keypoint.x,keypoint.y) + fill(0, 255, 0); noStroke(); circle(keypoint.x, keypoint.y, 5); - handpoints.push(keypoint.x,keypoint.y) } } + // assign to a different variable before clearing const output = handpoints; - handpoints = []; return output; + handpoints = []; + + return output; } // Callback function for when handPose outputs data @@ -91,7 +129,7 @@ function gotHands(results) { function keyPressed(){ if (key == 's'){ - model.save(); + model.save('hello'); } if (key == 'z'){ model.saveData(); @@ -100,20 +138,80 @@ function keyPressed(){ if (key == 't'){ model.normalizeData(); let options = { - epochs: 20 + epochs: 100 } model.train(options,whileTraining,finishedTraining); } } -function whileTraining(epoch, loss) { +function trainModelAndSave(){ + model.normalizeData(); + let options = { + epochs: 100 + } + model.train(options,whileTraining,finishedTraining); + nameField.value('') +} + +function whileTraining(epoch) { console.log(epoch); } function finishedTraining() { console.log('finished training.'); + model.save('model'); } function gotResults(results){ - console.log(results) + predicted_word = results[0].label + console.log(predicted_word) + text(predicted_word, 200,200) +} + +function UI(){ + nameField = createInput('') + nameField.attribute('placeholder', 'Type the word to train') + nameField.position(110, 500) + nameField.size(250) + + instructionP = createP( + 'I want to train:

1.) Type any word you want to pair with a gesture, e.g. "HELLO"
2.) Do the gesture associated to the word, make sure to do it until the points disappear.
3.) Move your hand out of the frame and repeat the gesture, do this multiple times
4.) Do the same for other words e.g. "BYE"
5.) Once all data is collected, press Train and Save

Tip: have at least 5 datasets for each word' + ); + instructionP.style("width", "640px"); + dataCountsP = createP( + "-> After the gesture a tally will appear here <-" + ); + + train_but = createButton('Train and Save'); + train_but.mouseClicked(trainModelAndSave); + train_but.style("font-family", "Georgia"); + train_but.style("font-size", "20px"); + train_but.position(500, 490) +} + +function drawPredictedWord(){ + textSize(100) + fill(255) + text(predicted_word, 100, height/2) +} + +function trainingWordsUpdate(){ + let temp_word = nameField.value(); + console.log(Object.keys(training_words)); + if (!(temp_word in training_words)){ + training_words[temp_word] = 1; + console.log('here') + } else { + console.log(training_words[temp_word]) + training_words[temp_word]++; + } + let counts = '' + let keys = Object.keys(training_words) + keys.forEach(element => { + counts += element + ' : ' + training_words[element] + "
" + }); + dataCountsP.html( + counts + ); + } \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/index.html b/examples/timeSeries-load-model-hand-gestures/index.html similarity index 92% rename from examples/timeSeries-mousexy-keypoints/index.html rename to examples/timeSeries-load-model-hand-gestures/index.html index 38789782..266905e1 100644 --- a/examples/timeSeries-mousexy-keypoints/index.html +++ b/examples/timeSeries-load-model-hand-gestures/index.html @@ -12,7 +12,7 @@ - ml5.js Time Series Mouse classification Example + ml5.js Sign Language Neural Network load model diff --git a/examples/timeSeries-load-model-hand-gestures/model/model.json b/examples/timeSeries-load-model-hand-gestures/model/model.json new file mode 100644 index 00000000..ad7c44f6 --- /dev/null +++ b/examples/timeSeries-load-model-hand-gestures/model/model.json @@ -0,0 +1 @@ +{"modelTopology":{"class_name":"Sequential","config":{"name":"sequential_1","layers":[{"class_name":"Conv1D","config":{"filters":8,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[3],"strides":[1],"padding":"valid","dilation_rate":[1],"activation":"relu","use_bias":true,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"conv1d_Conv1D1","trainable":true,"batch_input_shape":[null,51,42],"dtype":"float32"}},{"class_name":"MaxPooling1D","config":{"pool_size":[2],"padding":"valid","strides":[2],"name":"max_pooling1d_MaxPooling1D1","trainable":true}},{"class_name":"Conv1D","config":{"filters":16,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[3],"strides":[1],"padding":"valid","dilation_rate":[1],"activation":"relu","use_bias":true,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"conv1d_Conv1D2","trainable":true,"batch_input_shape":[null,51,42],"dtype":"float32"}},{"class_name":"MaxPooling1D","config":{"pool_size":[2],"padding":"valid","strides":[2],"name":"max_pooling1d_MaxPooling1D2","trainable":true}},{"class_name":"Flatten","config":{"name":"flatten_Flatten1","trainable":true}},{"class_name":"Dense","config":{"units":16,"activation":"relu","use_bias":true,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_regularizer":null,"bias_regularizer":null,"activity_regularizer":null,"kernel_constraint":null,"bias_constraint":null,"name":"dense_Dense1","trainable":true}},{"class_name":"Dense","config":{"units":2,"activation":"softmax","use_bias":true,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_regularizer":null,"bias_regularizer":null,"activity_regularizer":null,"kernel_constraint":null,"bias_constraint":null,"name":"dense_Dense2","trainable":true}}]},"keras_version":"tfjs-layers 4.8.0","backend":"tensor_flow.js"},"weightsManifest":[{"paths":["./hello.weights.bin"],"weights":[{"name":"conv1d_Conv1D1/kernel","shape":[3,42,8],"dtype":"float32"},{"name":"conv1d_Conv1D1/bias","shape":[8],"dtype":"float32"},{"name":"conv1d_Conv1D2/kernel","shape":[3,8,16],"dtype":"float32"},{"name":"conv1d_Conv1D2/bias","shape":[16],"dtype":"float32"},{"name":"dense_Dense1/kernel","shape":[176,16],"dtype":"float32"},{"name":"dense_Dense1/bias","shape":[16],"dtype":"float32"},{"name":"dense_Dense2/kernel","shape":[16,2],"dtype":"float32"},{"name":"dense_Dense2/bias","shape":[2],"dtype":"float32"}]}]} \ No newline at end of file diff --git a/examples/timeSeries-load-model-hand-gestures/model/model.weights.bin b/examples/timeSeries-load-model-hand-gestures/model/model.weights.bin new file mode 100644 index 0000000000000000000000000000000000000000..e57f181601f525897b348ac9ca8a03203e2c4420 GIT binary patch literal 17128 zcmW)nc{r8d*TxNH9ztX&MI=IKFr2+MDWnXkR2np@Xdp$?w<1Z>V8~FS(1a9`;_S6a zQc{MZD20+n(ws=|`Mvwk^T)Za=bZiAYpr{I?xH1@@F>s_eE;Ud7`T9&Y7f)#z1{JZ*iy5Yac_;Qpo zOb=G2GJV!`pHnW`nGuOw9LMte{a%79xga3p-?3ln=`ekLCs}aT7y3?7IOIPT{91Hq zTksnks?diPQTg!kBp30IQZjdtg3sDhLo z)PGfjl*kO6@xqNxel3Y}D%7CWI09F(Jg~pJ1(tN>vrPxhaOoC9Fs{_&5zeXb)*)N4+OSh@UJ} z-7XI|btK5B50mL&ZVFVq%7NZ`6+X1!0JyBuqMyEfspCRQesUzqf*K zxobICQ<~qi?gCqtW1zBBhh|?egW$y#P$IFFj#w$n6W2aq&I+6Hk#Q5c zE9ugOdxvrK(}Qr;MVM7mjStpn;=h`Sv@(7V_*FabvUBd#h1o!<>oK-qt_zg>+sXzW z=u?xp?ReVjF=|Oy32N2Cp?}z7n3^2MK93GW*}a2sQc4VQ=tWqyCYns_yN*Y@W-^27 zDKvKUT6(hVJsT6Z5c)Oz$t0CDxOmNoKe=d&DspyIDl#3`>v2d5xd-jB-I#moD_f*y zz`MSlW!^K#;wj|`JbJ-o=>03thXrn@*PxY+y}trqeQLqL|E@u>`wuvE-x!wvO~41- zfvWv?gnkwDvc%W7V02m!(Q2@P|Ew=SbMq&5XVG}r)%TsD{2y4GJc36~`HA2Et)BBf#2?H)m)Rt)+* zI!@Y7jN%g|W7+i?OZYaswfHdh3^>G%;U8><6Ve`z`+w}F&nvhv;DQMswpfq)SH$Cm zPajz7%Oc2-n?ap-%w+Taj;Bss8~^i^;)na6!Vr}rw(sH=TK-`V|8@NYIbbl6j(clM z{@G^2kDnUcGdGq@RXBn#pU8sa?p&5XVG{i}_6{V1armYX=m+pP2 zMfJm{bD#BEyi3~{Hu4U9w6~9_@>Te5d=M@!3&B6W`h3f}1X7SZ30!pL`P{7%Bx~eN z;OpDiL@!ldUfaNC|8|3tQPucM;TX*D$tRYP68uwr2Rs_B&DTgo!qd)z+^C+HOoAh76~@-w-YqEMUr)V`0@NeY(G=4X!?%jU~6v3+p;wzz~^t|1IEZZVTo-8jLJMlWZW*nm%hi~ zIaUWMe-%Q)t5mE@ih^xX@1V-PPPkzA4Q!rbLC+SA zJ$a86$Hw8g1X;cyyAu*?KY%gUK@Ybi(%|Zf3yxS3SJxosc=RIr`=1j`KV(KXSwF;} znNi@MGn&2jR_AUTr1<-Ndi-*qBJ~RDhWnvK=p#RgE`B|Qp4MMS2hI6yzVD&i6?+;vwref%o!N^xD^C+`D!VE59R5;p8&7SHDIvY9zW*J;1-^rz#~4D#Hg+1g=_TLlD0b-x92R;7*~Uz1=`qr z_*H4ZIt?26>m_>2d2sV!Yd){$1TNRG!RLKTg&pa~pvKgT_tpB~2CEXBw_k!bH@grG zzs2;KwFDb%52Ag~w$c@j37VMCHS#<5oGDHE@Ij68uo}O(VL4Lh|#sP!l=D^bmHBEV5pQ0v)FHVqMl3^Neba} zR|G5ze9eS=-2}7W&qJRZ(J+!+gO`q~yymS0J=If#Sxvocy4w=il`#P(&hCK|3;&j<=;opmKZ}T zUz~)jnfIY>b27Z%w-JVKj%GtvQ=D#VMKwRR;=u3*_;7eCTK8Urko0Qd=EZTS6Lbvk zyt~D`U%o}XtFEAKSw$ZIn8PE2efgVp$!L)y&)4iwg=N*J;Xik2+A~jZw6i==--nqe3$2qKgT&Vh}R5o`~y!iP8X;KBD%`0)&(k6O)z z)3#m3&sR6llUo{DZsJXLrLmZNYu4r;`|O$Is`*?)SDZIIw&KQq+922Jh(7l<>8Mxv zD8G9@X8#$=`_f*+q*>XhJ<}Vj<<1DTQuIk+fH%IW$-@tmMfJe%Ck}}nLoTN`lK!7E z%=2?UHg1;&qs(gjG50vZ)4F zTfc$H&o7aG!A@-G?XN^;*KnSs6NdUNCeYSnL=VV)A=yKUpmC%kqtC9nz%c$E0d)BmE6OY+$;8Q-#;hWU|fZ3Mibe@bH zu!qfd?0-1T7Y{m&W3X5TiCwu4@l}HfV7-0Rdcm~r>zH= zg-H(V=r-p6D!V{s7soB>hA8#=3_d@vOSdc-PIDw{SZ_=|SZ%%{7%^fKNWYY&vksaN z@#jhKFX@`_>J=rpkQ9x#p3Q7t++D%~Qw0xx&f#G@ zRj_)xE|of3fPMZWI6u}0*Y?k%MO9xg8{UKEw<$0x{V44sQI37iT!%Z3LZrMvF!)(4gnP?1pt|9Q8hAqPF`*TsM3N8$$9?qv!dlLx@MyZwaQG2os~|@2)=uE-m7{UXZ5!GXwO^&4{0)>|8$ ztc6&AY8kEnmx6a1WVyRHg|0FS2&?{r&rY4gq0W=&liD)!x4I8vuGvtz8~H>+iEzU` zX9XH7r||C?+O*2N8QqsH!;ovoVEHW}l;_cSz&n3=MJ{5*nuZi z51_cSBQyDZ6OUKyf)4j(ur%@yJFgf`F3TIy^&dURX}SVlLJwQBNSUwba^b$u-{QFs zlGx_I8n&)m3Cv3o+5T2Axqb$fU(e+?jq*{OiBtL64zMRpoTf?=`uIf_98NkZoZxg1 z3kMsS*Ul>3*?$G(77T|VYh7+JQj)s7zk(rUs&HoVTCyvR@F&Awpv_hn+HEi%vqAz$ z)(mle*X$gu%N_y}&7av1{sLY;)WBQA>X;*cTRJ%VBF0ZLfL>uVTh!nW=F083xI%~j zcSDbQ?DN3e5gcN7+0w^30gyC*9LMkG6~4xxc*&U1=rFQ^%(fzt!7`gRlpl_QeZg7cb=R zcgoTWC#tc0q$(}pGx!cC8UAA9DE>5hAa zWU#jZl=t}KHND>`tEmcXv8aCfj-{JN^uqmP6RBH8GiGM1Q-eF%DEDbRJl?+w8$4Ii z`T-LzKl>Nt4~pjP^d#`<&lSF(v4V(+&dmP3Wc$T>LbQ5IPDBeymx~R1I%OhJJ}t?m zhQ1}UZrrD;Hh~!W;Sluoc~Qe(v+?AMw;&cNAVi zx+T%r$xm6z$IrN{WG;6*A;s$@XF^EA6!@d84qv|2!Vs5fe6~^sDe@M=P!)HsF!g{y zPPmIt#)-TjK$ce|W#Xmzt6_V^6Vfrbik61hz;f>->V7B=7sri&H7}RK#BI5-rRq5> zzUD@!*on0EhZ9Y`wH^ZIzJ|MY>a??SI`|Jt(vXiFb-ko`VBJaTeDn}kUgyJ~-`L8` zK3WQj9j)j~?I_4ni=k-VLoEMUaKnY}RAG~lY#!~xGmKOrM{F}S_BEv!bj8r8+lS70 zdKcrq1dHz2LT++04a&|)@$#2S{KCZqXgt@=O1~|jkMc$d6n~85r^4iU&heG_H82~~ z|Lvj1PG@0me>A=G)e$}0|DpODMecSjhjxvANss(~1Y;i+<3G8tq|yHqT+XZ$3Xhcu z&jqaEw>C%fpI_Wi+h8mmqF9M5Kj)#t;v(UMz4Oqc<~SWb=QUIGzJisuDI_6cA8zp5 zz}tTvf@^{I$obeQkXOBfF8y_dHcRah_F7HG2BXzb8kvf=Y1RS>w_!v{Pt+GU&*yQO zMxa&p4xBHaV%xEe^l0uUD5qmY zUO{NlQDQnlK&Rj4urAS#q^^0z*zFWjdwe|xIV_|F9}xRvtT9w@6PMQIv9ogy!PRhW zK74{XFc`@y62{ZIOE%+1iF=a7**CovU0s}i;O*QVN=Q?(V>$=XW}{IuE-{J8QuocK1ECazr1Usn#JyM}M2Ys8n+$=j?+po0YU z%U8$gMm9JiW(DsvEFzzBg}BMjm&?Qt<^4Ad@fdF?J;Vmlw|^+TSMv=YK03lZeRPH1 zGfwmRvT*8@F^Lvs^uyV3A3kqk9A8_x64$qffq8Q8yr>F$+1VOYwbF!&$-0QzYDojuCzgnS-+sU86CV^#J`v2FC z|9$qqQ&px$}3m8=@wJDZ#I2{w@E2J=)6md+S9YNwQsbNWAEVB!a7gjueL!7;{ zpy#2I@Vk65GfDXg#_k_r&f#>lHjtzrGy`#I`4=+Yx*s;0_>rpLB5x{pEKBZ>$4nJt z@^)-kYETFt}$fgl3c-~g8isulF6dqWU$KXA8?Y+Ez}&5f^%jp03J7t2Odg<)qV23 zEl>v&)(i?~ty&1T_Gr?P>PTd7_OO3#pD^>D5q)L_M#UDT^_4*D5ebKBUl)KJS4hmH}$tw&5T#cUrKBtF2a*DG+6@<@Kd z-hn}$BDGSyg3(@?OlLXe#$)91k{Dw@W!kaW$_cAydjsCxgQf+U5PoYn47?}@y@!$b z=GAPFoM%m6XR4431^!SfI}2Ph58#NQov2o}0LN}Of_p|9T>5G^l2@v9jWdUaja|&f zuoSHt6|i-_6cC>!xV^p>Vm`RS%&9JLYS=lDld$H6jY2T7?#3NaM@Vtm2+VDog$sK2 z!`tn>f+Np7Ag)Xsm83QC_D3Nkm?W{3!%y)@R0Q$I0zqAsESk8O(3?M^Kqp3;RzEF= z0l#Z3GQg1h?tg|pc88$QZwiljr-0LHPQe2^d4A>Tb*2)bz{|y_<3GzJv|N4^G`1VV zJqI18AsGew?=O&Vi`sBwt2G@MBSt$OInbp`AHq1H4QUkh_v1c@iM+P)eCft!U_O#C z%6AG4?3hU9b31X7Mk>z8ccD2CwQ1VYI_$Yt32Ssxprqpt$c~ZbE7Wt~zvX6BeRL%e zq(s2LH!)Gqb_-(Hzd*fkYgilWjekJr2?jSX^PCR9Odhl8c?jY0U+}kX zI2rFQ#mmAa@nhCBx+qqSdgq_O(pwVrq|GNXkRii*T$>=ITasG7I!&s2Zm=as^Mvya7EvD*#3iEm$xwo&`G`C)4(+lVd$HeA5UU z9A~Wv%TKvMMv5-%4>X|K3*InQ6;ry3+yl!EWso&b6P2#~hJaxOWK)k9@qN6KZMSOx z$2B|Q%*L%;-oUr-+qM^94=zJ&YXl<3zy)gX9vOU%T9Wo*@9uz%I^orUY<(r-hGFLX`79GhryfJ7-E)oSLBsd3i4+>M%C^J z;-RaAi(A~#YrYfR)7n6O#wU}e8*wOmM2Wi|a9|#}?+{#f;M2%gB<86k3N&L-^;k6a zFIa}{odm3#tHAT+Buts2L*oyxgNE|wQ1WCkG<}MN+s3-MOW_I@-87>6f|?nInDcvc zDslTq9ipAHzvTGTSa!HOpB4Iw=HY)!1ksvSQOywEy&g_7j zX(wRo)~v*8|=nKqIsg0(D~G9P;+{S<*^g7OiqFuf2bmbmm=7pffXLv zcn8kD$YzJv#z9HT0Bn#s4`!7+arW2ASf+82bhgC_qKkD{@6HFf{hlV+-hBoRmoo9P z+Yr{!XU4alsb#}%IpYWUConJ63pzS(!TQ_2m?Zxbj;;9v5ARiDnP{Fh%bia3&Gw>H zk_|VNm8ae{pOE~N!Y^~@&|e)vczo#%>{l@&2b|>a&G{aDG37EbeJ3f@zIc)SJog0? zw~R$;|5-To&3`by+nYAeA^2{kJoMzxpa~b}gVwp-croif>IJCKp|jn=qIDdf@a!Rk zi8P>Tk}H?KF%*xk5+l-H14PyKBiPvvh4?*cR8Ogmw9Yk!?tM1|hw{$i*{6SDP(hbE zor=J+CGT*sHC1qNi5wkOHV+=joWa$Pn=vN-GqlVQ&9=Ma$kd0$*qJbvd~`Noflv?X z=Mv$q?H=LBp$f1da|e2V?ZYL0)?g9j1Cz}nF?mM?YIYfU;L)y4b|f?G$Vq< zJb48wp~HFED@(fn=UYe}qeo9#MU&MEJ*3CeABBFcc7A{Cv9QIG@1Gb9N1V*z-%mq2 zJKG+=+v)MXFlX-k{63bJcwv3A1n(ba3!Oj3AVTdETlD=TTm1GI&L~??OdZn6e@9m{ z{i#yW{5zFq)q-jA4)L6{$*45T0X*1Ck@@v_C2|Vxuh_=sSuMhmEk| zd@Oqrug(pJDL}WcBNZz@0m?)4>36l+_|8w6U6ye{^RE$jdQUTx&9}u{+v}NJyE>h5 zqYW$O$HHfyonSF=5wBZR3LeLPg4Sy@@$d2qoc>=Jj2o}ZYdWvO;nM)-yBcB5cz3FK zJ_$|Y=LpA3zGePz&FIppWBDVaKz6fOh9BRvn|Qcc!Fxdh#4s&x(-A}N{Pd(vlBe+d z9s}-hU7dz&wvkya$q>G&6wW^R0jJYcc}fc4SB(*UN{}FfjJ(#BsiEl zAJ-}#f{}y0(6#P4c69>`xqgfMIKK^4FX`X`@krdEF$;Da_JhmABxzpc5-@R<ll{bE?a4+X4zUCz3Sd!cUpR5qvT8m4^9CnLAo^BK1z zh^y^*D*5Xth|hlln;rbwP4#$qeasQIwcm!P<>u^~sVPm3ae;#1nV`5CK*RJ9e7NL9 zkH=NwMuY2cNMj7_*om0q5mXu%Y)d9@&%~Dh9O+$KRW@h8G7VqRKsNnlLJz}lf-N7F zc<;dwHrj47ymN_1I4@DUiE8pI)6(JcdmFy}Z8eh^B2Hrr$Dp}LL+z?_;nS1=wrSS^ z8a<)_w@oWz>G72Q@xBFx^KzKvS~)w5CnI?7<3Kd?c`AHjdj?-VI|XBPnEG8=~?1pDg%LEe)TFN>Ra>vl(;Ou(wqQ;rG!>5@IkBgYDDt zbnJ7oM7n^O|0%^p+cywBH2uPr&z&7L_@cL(&|c5V`w$)OSuEcDcX9fJ!my zdw(SveS8EjwqFN#D$kMCK6zv+Q{t!33?usc&fw7d6kV^MfKzpvaC#BK*I^#Kxy=YD5E;%elan5oGEr?{226G|XFd0r}kK@K(i+IYwGxi{M68`?~1*-of zP@*D%_?bG>ac4U4>1_&*m!0XlWLMDJ?SjcI&8+R_Awfg)IIbuw&fm3^lD`vvvmASW z_Dswc_Q&;sHhqX0qf+2-M+BL?{~ywIvta9)D98}|PV|?af^|oy(_g`5xVj(~KhKqc z+9moh?`{w({Cz}TTCAo~c8dY081SNcb8ZkRN$V}-x%6<#Jv*alp+Os@Xx3qs^<9jw zFUQRWk*vq<2%EC~3QRcYkFF|1_&|XwfA1qr#WK#p#yOfC9xTGCy5aC5_Mb4rCIlXu z09|Eu8F%L2!!jSnnhM9$|CCkex=9XveEVK3e>xo>+1CpLob2Jk1T_%6ljrlVUqaUn zZ$RAX8hRcXOJCm(6LhTGjhCwP@IrYZ%(s_;Xe%|^eP$eY8!ACVyDD*9-gU@T+Jd2r zuGoEgqCoTgCWAGd$RA7E;^wYcJe3-Tk_%Po%YQ!vhfMq6R+I)9AJ2i6@^9htRH0DH z!;S}8Xu;g;8Sr03G(JD~3FmHH0_=1Y-nUDG(cdIc@x^7R_!GvK&DkmJu#*LRunOCr zrvfY2IVFOut*-Vm`~(eV3SkDn|a19k_GgMsWE-1SF_7Wx-4C7BV}ys-h|8a-)g*$Cb# zBgJQSSVH9W%Mk8677qCyf?pSx!mNAR>|@<$JThoW(%p_g@&31Xx8WO%eiDmnqmszx z4fn|4#Y}8IFa;J&7)>U=FNQt-4=_pLJo8V{^iPo5Zi?$O4`YJTC#YH2 zfFIB8W77}zVDI*M^wmTiZj|+%EMK;Ox{e=5E4D~dq3%fV`uPFX*AxlzHnkBo!Bf0f zFp0jgoi3VL9O(E?JAg%vFzTQX^sgSkBToil#gd`4%W4o)W!(6}0}Q9Qeu6>^MmoM^ zuzkfsd^ut}HTCg@XZ>gKn65I5?oEK2pL@vF*A7C7fit+x)SV2S_XmcUB!P871zLUh z1LKmk`QXZ1EOn{|&7W`|%0zrrQtCwB{x1#O&Mty^$p@hPEQS5L#ZW%W1khz69VMkp z#6))Aq4B44_0I8V_UI(|-FbnY+A5H$FooPJP$owtN@0<|6wlC*=ZYui zK*c$ZKKd_kL(5}`J_s<}VFFY?pM-tuenIhuC^~*#B_6AJkK&11^y0j6up*{|L=^qT zEfH_Qv-2DFJh{(y7mLBJ(Q7f=J`ZxbHnTHhrlC!y7GJ;3fgW6ap6#4G6m+DA!JF=8 zw1)yTYf#~5!}>{TkRG~M?#DG#V%YUdTHw6nJ!uuqY2w#8@oR)n{KQ4G`7SUw@5LQ1N63S_dok{BB8%8PlO${zjfzdW z{F|LLE;wL@%UAuveGi8c6Q@{~wb_9d%~;PvO?8AHp0*Lzfi~gT5b9P+O2hicUnc;Q_y(qsN;5UZ_joIN0&QD;0S3 zvK@VzUJL_xpVTUa;Da1<5*xLau9p_j2gC_-t9|+B_x8~HbQ^tJbrd~TMj{??py|~% zaI{?;MvHpk^ogHvmuMH|EWLsSNbdx-*rP0A_H`njt3!_-y8=i4PUJ^*_o4j`J!*UC z05&;Gb5@is%qVrImkO3~|M+1)5wb zu>xFX>;t)wDln-o7HGQ{vb$qcxYy?m_;od~p!9_(D-_ z42c=Xv!nW;zCTjf=90;d`K|+@+Ix6&yN9gqyF=8TW}QF^0OiN+IrkHN}J5)rWm<-Bkb_eq#^^I ze0y025xdp+j;?deF)js1rAX7?6)T|nTRS=YV+^(3Yf6tAc=F$)o8ilrC_$3gJG|aL z9lkDnjB}*#f%b!5!Sc~LSmR#{iz?Nqe7!QiQ7b`d?q_%xbDjOyJcL%758=-zjpTyr zOQnHIJ6P-6pWvUWLd!(Gis`K*aA@lYy7<9Exc5$*F3hrq0h>xpJ-C-#v7U^+@*mOb z@)3+mx&tpC|Az~1MMJ4eK0eJa0ROsE(7NL`+%9wCFE_8n#0n2uYki4$`;Wxyv;`H_ z*1{W^vHZxJ1cAwASNd8#o+bA##I(KFaa7|aa;(q^T`q-X2oHKy&uY^ZX)Q`X(av%*`;!+O*lENk-HM>Uwn@>=Z{qPzi!L{E z*nrQ9>I9b}(#ZHuf97M93Wn{{7=s2o>{a{@NAVZkLZqHZ={?9xPZI<4T&esLaPV8GWW zn!}qCB`RsC$IFIyGvV2EREnqtYq+wr>>1_Xv*Z6NPL4n`Gj zp#P3ep;;Sj`RCE5ymOTm&41X>bS5;BJ>QbB!EZKQ^vW4(XAXmdZzZX_=@6QAIuebB zwc}2cC}@5*ifUY;DD&edZWEtM_dWjrHQQVzQ^)|j#+~pd$Z#3ku{i{(p7z>R?HX?r11J4DvquW7G zGA7Fs!fiD86=y?QyI8cpNi@MRsTEN7?04zWC(-zR@F;c!NMj~bL$h~&_}CX=jL}5) zIxrOdM{Xdmm$nLY-b?^L{{Y<8J`%+?P>``|0~K2_x;*Rxrn}t~EZC<;qbn!VPP1Eh zu52z}IZK6V70iOpZgXD1M9(-k4$^nY@Z6MM7#dAzUcqPfSKJe7vxnl!`BF@BPb^9P zDnlhbr_-@6r6Mk(sC1>9Cj@?yLz~;7@UJupPTm{C%+df{*QP_|8bS@q-op0{vp?ZJg~Q-BWCJz1?uzLO-55MsK%dX^;G5TH zgZC0`-lQ;@|1x_Hbea(UiF|_&#TZC9nF!;)t^%DXWpXF&04Be92Wt{!Xf{US(#4q& zm~#y*McUoHy?{)wNx8F1SW`isEG%9n>a#PR;Mu9;dFttD*!}MjR@^yE#wwapuPO_!I%g|Z8|MiQ$Vk&c zzf?HrcoThP$MCbFU1j**dt`6eaqzh*&z)E~c0w(ziH?K4vz*C`s3a)p%pa`?0&`f;Gd#&$bNB-9UOfdx}J`veo%pFt>z@)@k5Ze&|;g6 z7U8vE6WXP=0+XsLm~pN;S!25vex0uuJ{}TEO1`#=IJQctd;N@gPtfDqmp;Sd(1SQE zXB7EmB*AZ;h{F9>6!1!u3sv?K;1u(j=za#^OPr2inrtu`8I!`=LoNB6dr~xRTqLyY zie@Q!uKcfvpBNrK4RjaJe+E{a_A7_V* zf_@7((2yPr2Oqq{6AJ>^5TP2Kw=Ko)_ZSnr-xUCx!$(nPngj2Al=*or32NZ%NKgLx zhP~OwJo=O#)%`P;tFzza)NLn9U#F9giz2b1-jf*IiG-1wAK;MiyKs-jDi9Ox!g3x4 zf~SN6sBI5|-tf_QVaiV^?hIppo{R>Q-0d*zbrx*Q9!V2hH`D7at0)-PlP8s<`0WX@ zaP04E_`7@|L|ste8+TQ~1rR`)}pgTW$?hG`GFGpjAuW%@A zE;fptB693Gp3*ys3)`glcC|Ms^~#k#PHKYF31+Nlj{~;a9vA%nDM{iYKHF97Q^ON$ z(=cq&Axtz#BaXEh5LtH(*XXS*-8adT{D|zpo2^Lv=R3f%tU54j$`b6~E>HbBWvOL` z1I_+DmFARA$9QWma5F0uf|VMqs@7*`Pl@IW-?bPXZ3RJ&!Pxm#7vF@Z!JTbK$oRS# zjOeK*9S_AZXs#$j6r)7vZW+b*dY=ce1Y6LXaTLxgcQK9kpHM4oki5Dsz$3n`ur*PW z)&{m<+Okuqm~2R&`P_l5mof0gYaOoFI1J{=doj#82g+C2aJvl|!fJsr-M>Bq|Lu&& z_iHR+)5UDuE7EVJCBx~st6!j4e<~~tnF}Uq0?=Pt3iDT(;eV6Od70WbpjUOM?$dUm zc$hl`-~RwoMTWd&u?)~52&v_lP)m4)IO)6)Wtp_;=#+7AHPDgH4z3f(bRNK6TZ@Tb zT(@AAy&-jMRN@LH67=+JcY5mQM(X(54$p}G7uGKX=39@O!cx%e?P~h?s3{$JMV2MY zG+^w4Z)AeaV9EAnpUL=Z`D~Y~D>a%<`J5A7_$6r}6eu5pXcu>;t9=a@R^+qU3!V7i z?mukF+GS+beiQsYB9rCqy#OA^U8&VxNj^909)9yM6cm3;hoD!Y9xp{4?J~pZo#s*W zlbRT+4k-n5^(nA)qBif(egx5>btEy~n>P%MCJ?jAnmafFW35sa%IgB<8%+k z$Q@(Nh8lcTL?l(4GJtW5%JKSQSFAqgNu}x@;)Jzg#5iXe11D?j9N|k9r~9FM{9Qp@ z;RT%KQzhb97hsi^815V@OaG-!6Q*o!LE+jD0(B8zp}6x6Lw*#J7GDCvHYw;!J&hlt zw}YL6Hq8oZhYM;)aWJY@#OX%Dj}^7x5qul=w09zn`-v+84B@`+DkvHwfsVye=N#m1dFAIMh)8gzRofUA z?LW+YT^_@&H09FO>Bq5l@B?0XWJ%pGOvIt@7ja)l8+h?_8GSkL9(vx+!bc$*5caYG z_RmU(H*F$Tpk9|R+x`HG#Z%y$mnHQzpTgu1Z^53EYA_{C3A8r678Z6(@Vn)|!QIpw zdWT(xO*5it?(gw*bEgxFB>VW>$6?g|ts4Du(;M}SCh^yEhJ#jS6DtS_AufZttfyx+ z`y$E^Nj*1Uk2}Id*`QglPyG&@yRi+%-|~mmA5M}%aXotZo)~^~y3W=|%mssuzj1HG zY0$m!0O~~iw)uBiw(+|p>3n&eG|lqI?DR)?XT(;lsi=VsFaF>XWe2#iH5nW>T*Jrm z$~e9GDw%wzgGhCJCbjXNWaF7PEIqpsejJHs-SgF9#%^ae@2Mk;JbDAeM;r%z0VDH# zhtbP7^WnMVMbKF+3!5ziaQ%ubIG|@OSQ(mZ*Yp0V@DPM!`1PystME7ZcxR+=LhpAd zQ=d$N2gcJet0efPn;v92+X%Nd4v_rQKgdUOO`Kkkg)Ixu!g7%>(s%qQ_)EWsoX1nR z8%(0T&#sZnNq13h-#y!am~bj-IT36<6zExtH@MTOo)vIK@DV$KQ@mopQB0o{PN>D$ zapl;%KZzNrEhEWEFQ9+qXezG!1y9SRlHlA&ctYAz^XFH2`tw05*9njz?@^pQM)z^{wx1P4%v+p@xc$+6d8t> z-|LbR(VP)Au^yyp|sWv+rJkii|#(Y~=?(;}y8Qr6L`6un454 zP7r#S$FW5{4iGC7HQ?gUMNIyTiPUhqI#t{y4km9_(yV(7ibVe1Ugb25-<$zWXGM4T zt^`)VcFuF{Xh2vvm~LFjqx6Q;=Z}2p^Okw|B5?_nvL?Z6$>G#4ZUCO=-)C+IR|>VR znF@mLuEe1=A0hBU3GQ)QOS69JLXEf&&Xt|YU7B>+54AZ$&3GlcTDSydYwn`yk^!jI z&SxXd1E9-S3r$*e;FHWqJLRjfu&=%ddchQ2%p`?w&#$u(2`g?M>PB<*C-SX^FHl`e zj2ewiguF+8p?pa)3;lZ=hMMWoNmhoa?=8x5bd<9AnIoaF!keX(m&8 z1NW?tfjo;YxE$w2PCh%%3z;_eUB3!~a@FWFuW$JE=6Y@`Qx3K_pJC9MK<-!d9fzv@ zN0Z_npiAadI@Q9JDs(M}t`kYn@G2Z;@7a%iPezf;Ik{l@wF21#Y2=?MeOy*eTGax` z$J3gyCbAhO{Jtsj=i`NB>``d6z9G=L5CvTu&CtJEjPuO>^s$OP4Sm#z2FlMcOG=pr zq@08;ovZCaEquW^U?E+$<19aM+aKp|VIX}!2M;TY(*yNFa#A^oh-por#_}RYeXbQv z+8ux`V}FzNcW3gj+OweJdJU!aRg_kh%F`(_^B~mwJk$E3j>?L1B>dJay5!JF2wY?h z@?odJT$FFQw{irRgSU)mqi|uJuF%` zg3X@3j)@;#OT3NLXlY3(TQL}pf?y+5`!@~tP8)$YG?((_FUN}RcnA&zy@O4QR=|a9 zW$60T3dfCaV$#Ncr16^{n<}gZh8Ruug#PXmiG`Nn-du3JD%1k z)Z*fmmh`3FS$MJHFewm$9dlFRj7m* z=Ct0%Ka=KR<>_G}Hvcj%bWVc+qpPqvv=h3D=fXY78!+*~S9aE_5En$4QpHO@q5fbc z^A%r!nl3vbG^mzv;{w6?I#Ir*J&+3QXW-c*di=Ir8{70V7(#aidvzPpNQVtV%0JHjeiB!zhh7a%bu-naw{wG%q_O}I4 z>uf=VTXrzZ)uBu>%Yfd~yM;$c8xH&Og}4%V797+ES#2#iX~az4Z!m)TdoRUR1vS_p zu1X*AW7vBs34RFTap9z^_7kXT12T^){& zKO_0tlGCi^;TCH3@Un=PGbSG&&xB(g-on!l$7A)u4Dc*6p?w=W;mv9}{B+5RG&qlf z!w014C1o-26xGe9$Sne&#e2|Xf)ibG>mVAd=yR1nuV7)G7NlB!VA_+s`8LxIV)Is; zdcFR|zW54I*)$nk-$XNMgLZUWECq&Jvf=JCQ3kIng{;mtp`BG?lvOxT3F%jmn`S_V zS=O-oQ;pf-C2?S-kjtE=g$nk6un=TL9)q@obJ!H1z_WZ?$qNq?L5Oc8yuGBreZD7x z&p`_;5%G4R{lkgY_d(G45r{5-F2T#XldSDZ81}7oXTvg$h;VHVd9Bq3#d)Lol?xJ} zsV_%IU)v3S=e*(e0~_oPZ9%d9XN8*#wC#H5J%M|%_tCEI2{w*NhPvYS@F7r(OJ@CF z01^W2{ro#knUg*0=Muhb*XF%^!j3)K7hFAqH2FOkJ0!ljl@z`*+Uh;#b5_0I3q`#~ z5h%Vz9}m352GG4Aw2Zt6`vE?GfrdK;9`n758^FB;U#PuMtTsM<9@IVjO<2AhukO9z zgv34jih@2i&1}7V%D6q*kr%y^VXwSJ8rr=_OY}VrzE->wA@aS!y*Ip_8wS1~H`%)l zbCo^5;axo>pkzHyC7`|fy+yo!^(#J8!5lt=yo|kbK%YG*hwDAu|3tmMAp<^14s*S+ zGuFLeS@ylk4`n_{1(80hfh@jJI_bRt*sncQm^(hGQw6;p_!zz=uwy>Py^lS4Nj|<5 z_#8g^^U%DV{usWnh%Y_Uk9a+Xin6@yrjNZ2ln6e9&VfEDHXy!ige1KZT&X=3(5bzn zD-yoU!D&5R;pM$wNN>Km@~u4^yn8+;Fd95R#1K8+a0xtAFxxvPNB=vHnRh(t5{5fT zwvRk=@|-(P4yn9$WllT*0000S6h{^-70iy6GgpxnL*?oK}F zJ1o8TO~<~a8sk0+pNPJ?s$RZOYC%0FWdOh0bEQ70;2b{~e2hK(Ob|cFC$T;BAJM+s zoiROr!vDQBcLBbw8TUS#>?ps>0X4pIp-H}6tT4W`cBMXQ!Yn-tUG2VB?6W;p;i9~Y Sdi}j8ejY#HA~?I>A~-v7%Qid! literal 0 HcmV?d00001 diff --git a/examples/timeSeries-load-model-hand-gestures/model/model_meta.json b/examples/timeSeries-load-model-hand-gestures/model/model_meta.json new file mode 100644 index 00000000..1c0165c7 --- /dev/null +++ b/examples/timeSeries-load-model-hand-gestures/model/model_meta.json @@ -0,0 +1 @@ +{"inputUnits":[42],"outputUnits":2,"inputs":{"label_0":{"dtype":"number","min":4.151249399907168,"max":586.4725394909854},"label_1":{"dtype":"number","min":186.47223882383636,"max":496.34918695509003},"label_2":{"dtype":"number","min":12.818880217505907,"max":564.7860747522525},"label_3":{"dtype":"number","min":160.9460986889124,"max":478.89482602620234},"label_4":{"dtype":"number","min":20.681431005110262,"max":557.1173870582799},"label_5":{"dtype":"number","min":135.1274696802808,"max":454.0862355189599},"label_6":{"dtype":"number","min":29.375938053231934,"max":562.4826339023859},"label_7":{"dtype":"number","min":113.22511415628927,"max":455.15365538508894},"label_8":{"dtype":"number","min":37.27265551578051,"max":573.3838980891996},"label_9":{"dtype":"number","min":98.00531862273047,"max":473.4382341601794},"label_10":{"dtype":"number","min":2.706973037101564,"max":599.2858408346702},"label_11":{"dtype":"number","min":117.7350326456234,"max":453.76022921684716},"label_12":{"dtype":"number","min":11.635752695869659,"max":612.8243751678727},"label_13":{"dtype":"number","min":91.05094143918305,"max":481.6467136241304},"label_14":{"dtype":"number","min":22.9353041163117,"max":621.0127886598051},"label_15":{"dtype":"number","min":61.619264849841635,"max":499.63536096409143},"label_16":{"dtype":"number","min":33.53953084457643,"max":626.4181148091915},"label_17":{"dtype":"number","min":28.455718477478662,"max":512.7953875856006},"label_18":{"dtype":"number","min":-2.8065139589559984,"max":617.7828981986556},"label_19":{"dtype":"number","min":117.6886729722432,"max":459.5357193516273},"label_20":{"dtype":"number","min":3.7782929928570064,"max":633.7038985044576},"label_21":{"dtype":"number","min":86.77279076496669,"max":486.0751342925063},"label_22":{"dtype":"number","min":16.177018651157255,"max":642.8366376068107},"label_23":{"dtype":"number","min":51.687144639081325,"max":502.64037741142846},"label_24":{"dtype":"number","min":28.1461509145229,"max":650.2419536370577},"label_25":{"dtype":"number","min":15.922382743702723,"max":516.9301399988833},"label_26":{"dtype":"number","min":-6.382516546058305,"max":630.7077663350849},"label_27":{"dtype":"number","min":120.16376158664924,"max":461.0881814514869},"label_28":{"dtype":"number","min":-1.4074379536407533,"max":647.5041251714117},"label_29":{"dtype":"number","min":90.58035685591811,"max":485.04491883378125},"label_30":{"dtype":"number","min":10.174906800459325,"max":658.4893875478738},"label_31":{"dtype":"number","min":71.76407331703523,"max":500.55112323964187},"label_32":{"dtype":"number","min":21.11718120932074,"max":668.566957655395},"label_33":{"dtype":"number","min":39.557348432978586,"max":514.4287318106208},"label_34":{"dtype":"number","min":-7.9534800405596595,"max":641.3232619371444},"label_35":{"dtype":"number","min":126.31599791044414,"max":465.6320514399833},"label_36":{"dtype":"number","min":-3.8369034650104927,"max":658.2044139172733},"label_37":{"dtype":"number","min":103.73604938021917,"max":481.03793223993495},"label_38":{"dtype":"number","min":3.7075645592075435,"max":668.8017566330357},"label_39":{"dtype":"number","min":88.76136006394765,"max":494.63688258092407},"label_40":{"dtype":"number","min":6.9609311353376135,"max":676.9525074586147},"label_41":{"dtype":"number","min":75.97401514052241,"max":506.7948506427954}},"outputs":{"label":{"dtype":"string","min":0,"max":1,"uniqueValues":["hello","bye"],"legend":{"hello":[1,0],"bye":[0,1]}}},"isNormalized":true,"seriesShape":[51,42]} \ No newline at end of file diff --git a/examples/timeSeries-load-model-hand-gestures/sketch.js b/examples/timeSeries-load-model-hand-gestures/sketch.js new file mode 100644 index 00000000..9285ae71 --- /dev/null +++ b/examples/timeSeries-load-model-hand-gestures/sketch.js @@ -0,0 +1,145 @@ +/* + * 👋 Hello! This is an ml5.js example made and shared with ❤️. + * Learn more about the ml5.js project: https://ml5js.org/ + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md + * + * This example demonstrates training a color classifier through ml5.neuralNetwork. + */ + +let handPose; +let video; +let hands = []; +let sequence = []; +const seqlength = 50; +let recording_finished = false; +let predicted_word = '' + +function preload() { + // Load the handPose model + handPose = ml5.handPose(); +} + +function setup() { + createCanvas(640, 480); + + video = createCapture(VIDEO); + video.size(640, 480); + video.hide(); + + ml5.setBackend('webgl') + + handPose.detectStart(video, gotHands); + + let options = { + outputs: ['label'], + task: 'classification', + debug: 'true', + learningRate: 0.001, + }; + + model = ml5.timeSeries(options); + + const modelDetails = { + model: "model/model.json", + metadata: "model/model_meta.json", + weights: "model/model.weights.bin", + }; + model.load(modelDetails, modelLoaded); + + nameField = createInput('') + nameField.attribute('placeholder', 'word to train') + nameField.position(100, 100) + nameField.size(250) +} + +function modelLoaded(){ + console.log('model loaded!') +} + +function draw() { + + + image(video, 0, 0, width, height); + + textSize(100) + fill(255) + text(predicted_word, 100, height/2) + + if(hands.length>0 && recording_finished == false){ + if (sequence.length <= seqlength){ + handpoints = drawPoints(); + sequence.push(handpoints); + } else if (sequence.length>0){ + recording_finished = true; + + let word = nameField.value() + + if (word.length > 0){ + let target = {label:word} + console.log(sequence, target); + model.addData(sequence, target); + } else { + model.classify(sequence, gotResults); + } + + sequence = []; + } + } else { + if (hands.length == 0){ + recording_finished = false; + } + } +} + +function drawPoints(){ + let handpoints = [] + for (let i = 0; i < hands.length; i++) { + let hand = hands[i]; + for (let j = 0; j < hand.keypoints.length; j++) { + let keypoint = hand.keypoints[j]; + fill(0, 255, 0); + noStroke(); + circle(keypoint.x, keypoint.y, 5); + handpoints.push(keypoint.x,keypoint.y) + } + } + const output = handpoints; + handpoints = []; return output; +} + +// Callback function for when handPose outputs data +function gotHands(results) { + // save the output to the hands variable + hands = results; +} + +function keyPressed(){ + if (key == 's'){ + model.save('hello'); + } + if (key == 'z'){ + model.saveData(); + } + + if (key == 't'){ + model.normalizeData(); + let options = { + epochs: 100 + } + model.train(options,whileTraining,finishedTraining); + } +} + +function whileTraining(epoch, loss) { + console.log(epoch); +} + +function finishedTraining() { + console.log('finished training.'); +} + +function gotResults(results){ + predicted_word = results[0].label + console.log(predicted_word) + text(predicted_word, 200,200) +} \ No newline at end of file diff --git a/examples/timeSeries-mousexy-keypoints/sketch.js b/examples/timeSeries-mousexy-keypoints/sketch.js deleted file mode 100644 index 4bacf292..00000000 --- a/examples/timeSeries-mousexy-keypoints/sketch.js +++ /dev/null @@ -1,180 +0,0 @@ -// https://editor.p5js.org/gohai/sketches/_KdpDSQzH - -let model; - -let curr_shape = 'None, press a button below' - -let state = 'collection'; -let pressedOnce = true; - - -let rec_duration = 2; -let num_seq = 20; -// assuming frameRate is 60, with record time of 2 seconds, there will be 120 datapoints total, which is huge! we use map to get 20 data points instead of 120 - -let count = 0; - - -let sequence = []; - -function setup() { - ml5.setBackend('webgl') - let options = { - inputs: ['x','y'], - outputs: ['label'], - task: 'classification', - debug: 'true', - learningRate: 0.05 - }; - model = ml5.timeSeries(options); - createCanvas(600, 400); - background(220); - UI(); - frameRate(60); - -} - -function draw() { - let datapoints = map(count,0,rec_duration*num_seq, 0,num_seq) - - if (mouseIsPressed && pressedOnce){ - - line(pmouseX, pmouseY, mouseX,mouseY); - let inputs = {x: mouseX,y: mouseY}; - count++; - - if (datapoints % 1 == 0){ - // sequence.push(inputs); - sequence.push(inputs); - } - - if (sequence.length == num_seq){ - - - pressedOnce = false; - count = 0 - - if (state == 'collection'){ - let target = {label: curr_shape}; - background(220); - text("Recording: " + curr_shape, 50,50); - // console.log(sequence, target) - options = {inputLabels:['x','y']} - model.addData(sequence, target, options); - } else if (state == 'prediction'){ - background(220); - model.classify(sequence, gotResults) - } else if (state == 'training') { - background(220); - text("You cannot record while training"); - } - - sequence = []; - } - } -} - -function gotResults(results) { - // if (error) { - // console.log(error); - // } - console.log('hello', results); - // stroke(0); - // fill(0, 0, 255, 100); - // let label = results[0].label; - // text("Prediction: " + label, 50,50); - // let label = error[0].label; - -} - -function keyPressed(){ - if (key == 's') { - model.saveData('trial'); - } else if (key == 'd'){ - console.log(model.getData()); - } -} - -function mouseReleased(){ - pressedOnce = true; -} - -function UI(){ - - textSize(20) - - rec_circle = createButton('Record Circle'); - rec_circle.mouseClicked(recordCircle); - rec_circle.style("font-family", "Georgia"); - rec_circle.style("font-size", "20px"); - - rec_square = createButton('Record Square'); - rec_square.mouseClicked(recordSquare); - rec_square.style("font-family", "Georgia"); - rec_square.style("font-size", "20px"); - - train_but = createButton('Train Model'); - train_but.mouseClicked(trainModel); - train_but.style("font-family", "Georgia"); - train_but.style("font-size", "20px"); - - pred_sha = createButton('Predict Shape'); - pred_sha.mouseClicked(predictShape); - pred_sha.style("font-family", "Georgia"); - pred_sha.style("font-size", "20px"); - - function recordCircle(){ - background(220); - state = 'collection' - curr_shape = 'Circle' - text("Recording: Circle", 50,50); - rec_circle.style("background-color",'#f0f0f0') - rec_square.style('background-color', ''); - pred_sha.style('background-color', ''); - } - - function recordSquare(){ - background(220); - state = 'collection' - curr_shape = 'Square' - text("Recording: Square", 50,50); - rec_square.style("background-color",'#f0f0f0') - rec_circle.style('background-color', ''); - pred_sha.style('background-color', ''); - } - - function trainModel(){ - // model.createArchitecture(); - // model.compileModel(); - // model.summarizeModel(); - background(220); - state = 'training'; - text("Training...", 50,50); - model.normalizeData(); - let options = { - epochs: 20 - } - model.train(options,whileTraining,finishedTraining); - } - - function whileTraining(epoch, loss) { - console.log(epoch); - } - - function finishedTraining() { - console.log('finished training.'); - state = 'prediction'; - } - - function predictShape(){ - background(220); - state = 'prediction' - text("Predicting Shape...", 50,50); - pred_sha.style("background-color",'#f0f0f0') - rec_square.style('background-color', ''); - rec_circle.style('background-color', ''); - - - } -} - diff --git a/examples/timeSeries-stock-prediction/index.html b/examples/timeSeries-stock-prediction/index.html new file mode 100644 index 00000000..1208fae9 --- /dev/null +++ b/examples/timeSeries-stock-prediction/index.html @@ -0,0 +1,23 @@ + + + + + + + + ml5.js Time Series Weather Example + + + + + + + + diff --git a/examples/timeSeries-stock-prediction/sketch.js b/examples/timeSeries-stock-prediction/sketch.js new file mode 100644 index 00000000..14271875 --- /dev/null +++ b/examples/timeSeries-stock-prediction/sketch.js @@ -0,0 +1,213 @@ +/* + * 👋 Hello! This is an ml5.js example made and shared with ❤️. + * Learn more about the ml5.js project: https://ml5js.org/ + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md + * + * This example demonstrates training a color classifier through ml5.neuralNetwork. + */ + +let classifer; +let data; +let data_index; +let seqlength = 8; +let seq = []; +let x=0; +let y= 0; +let train = false; +let training_done = false; + +function preload(){ + json_data = loadJSON('stock_data.json'); +} + +function setup() { + data = json_data.data; + UI(); + frameRate(30); + createCanvas(640, 480); + background(220); + + ml5.setBackend("webgl"); + + let options = { + task: 'regression', + dataModality: "sequential", + debug: 'true', + learningRate: 0.01, + output:['label'] + }; + model = ml5.timeSeries(options); + + final = [] + + data_index = seqlength - 1; + while(data_index < data.length-1){ + for (let x = seqlength -1; x >= 0; x--){ + let curr = data[data_index - x]; + let inputs = { + Open: curr.Open, + High: curr.High, + Low: curr.Low, + Close: curr.Close, + Volume: curr.Volume + } + + seq.push(inputs) + } + + console.log(data[data_index + 1]); + + + let target = data[data_index + 1]; + let output = { + Open: target.Open, + High: target.High, + Low: target.Low, + Close: target.Close, + Volume: target.Volume + } + // let target = {label:data[data_index + 1]}; + // delete target.Date + model.addData(seq, output) + + seq = [] + + data_index++; + } + model.normalizeData() +} + + +let py = 300 +let px = 80 + +function draw() { + // background(200) + updatedUI() + + + // for (let x = 0; x < data.length; x ++){ + // point = data[x].close + + // } + +} + +function updatedUI(){ + if (y < data.length && train){ + push() + fill(220) + noStroke() + rect(100,300,300,70) + rect(50,350,600,70) + pop() + + console.log(training_done); + + + + text(" Date: " + data[y].Date + " Close value: " + data[y].Close.toFixed(1),150,350) + text('Open: ' + data[y].Open.toFixed(1),80,400) + text('High: ' + data[y].High.toFixed(1),180,400) + text('Low: ' + data[y].Low.toFixed(1),280,400) + text('Close: ' + data[y].Close.toFixed(1),380,400) + text('Volume: ' + data[y].Volume.toFixed(1),480,400) + + + point = data[y].Close; + cy = map(point, 90, 120, 250, 70) + cx = x+80 + push() + if(training_done){ + fill(144,238,144); + } else { + fill(0) + } + ellipse(cx,cy,5,5) + line(px,py,cx,cy); + line(x+80,300,x+80,cy) + pop() + + px = cx + py = cy + y +=1; + x += 8; + } + + + if (train){ + predict.removeAttribute('disabled'); + } else { + predict.attribute('disabled','true'); + } +} + +function UI(){ + + rec_circle = createButton('Open Data and Train'); + rec_circle.mouseClicked(() => {train = true; trainData()}); + rec_circle.style("font-family", "Georgia"); + rec_circle.style("font-size", "15px"); + rec_circle.position(20,20) + + predict = createButton('Predict Close for Next Day'); + predict.mouseClicked(predictData); + predict.style("font-family", "Georgia"); + predict.style("font-size", "15px"); + predict.position(200,440) + +} + +function trainData(){ + model.normalizeData() + let options = { + epochs: 60, + } + model.train(options, finishedTraining); +} + +function predictData(){ + console.log(data.length) + seq = []; + let latest = data.slice(-seqlength) + console.log('latest',latest); + for (let x = 0; x < seqlength ; x++){ + let curr = latest[x]; + let inputs = { + Open: curr.Open, + High: curr.High, + Low: curr.Low, + Close: curr.Close, + Volume: curr.Volume + } + seq.push(inputs) + } + + model.predict(seq, gotResults); +} + +function gotResults(results) { + console.log(results); + addNewData(results); +} + +function addNewData(results){ + let date_old = data[data.length-1].Date + let date = new Date(date_old); + date.setDate(date.getDate() + 1); + let nextDateStr = date.toISOString().split('T')[0]; + new_values = { + "Date": nextDateStr, + "Open": parseFloat(results[0].value), + "High": parseFloat(results[1].value), + "Low": parseFloat(results[2].value), + "Close": parseFloat(results[3].value), + "Volume": parseFloat(results[4].value), + }, + data.push(new_values) +} + +function finishedTraining(){ + console.log("Training Done!") + training_done = true; +} \ No newline at end of file diff --git a/examples/timeSeries-stock-prediction/stock_data.json b/examples/timeSeries-stock-prediction/stock_data.json new file mode 100644 index 00000000..7a928e11 --- /dev/null +++ b/examples/timeSeries-stock-prediction/stock_data.json @@ -0,0 +1,36 @@ +{ + "data": [ + {"Date": "2024-01-01", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.0, "Volume": 1500000}, + {"Date": "2024-01-02", "Open": 102.0, "High": 108.0, "Low": 97.0, "Close": 104.5, "Volume": 1550000}, + {"Date": "2024-01-03", "Open": 104.5, "High": 110.0, "Low": 99.0, "Close": 106.0, "Volume": 1600000}, + {"Date": "2024-01-04", "Open": 106.0, "High": 112.0, "Low": 100.0, "Close": 103.0, "Volume": 1650000}, + {"Date": "2024-01-05", "Open": 103.0, "High": 109.0, "Low": 98.0, "Close": 100.0, "Volume": 1700000}, + {"Date": "2024-01-06", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 99.0, "Volume": 1750000}, + {"Date": "2024-01-07", "Open": 99.0, "High": 104.0, "Low": 94.0, "Close": 101.5, "Volume": 1800000}, + {"Date": "2024-01-08", "Open": 101.5, "High": 107.0, "Low": 96.0, "Close": 104.0, "Volume": 1850000}, + {"Date": "2024-01-09", "Open": 104.0, "High": 109.0, "Low": 99.0, "Close": 107.0, "Volume": 1900000}, + {"Date": "2024-01-10", "Open": 107.0, "High": 113.0, "Low": 102.0, "Close": 105.0, "Volume": 1950000}, + {"Date": "2024-01-11", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 103.5, "Volume": 2000000}, + {"Date": "2024-01-12", "Open": 103.5, "High": 108.0, "Low": 98.0, "Close": 100.0, "Volume": 2050000}, + {"Date": "2024-01-13", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.5, "Volume": 2100000}, + {"Date": "2024-01-14", "Open": 102.5, "High": 108.0, "Low": 98.0, "Close": 105.0, "Volume": 2150000}, + {"Date": "2024-01-15", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 107.5, "Volume": 2200000}, + {"Date": "2024-01-16", "Open": 107.5, "High": 113.0, "Low": 102.0, "Close": 104.0, "Volume": 2250000}, + {"Date": "2024-01-17", "Open": 104.0, "High": 109.0, "Low": 99.0, "Close": 101.0, "Volume": 2300000}, + {"Date": "2024-01-18", "Open": 101.0, "High": 106.0, "Low": 94.0, "Close": 100.0, "Volume": 2350000}, + {"Date": "2024-01-19", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.0, "Volume": 2400000}, + {"Date": "2024-01-20", "Open": 102.0, "High": 107.0, "Low": 97.0, "Close": 105.0, "Volume": 2450000}, + {"Date": "2024-01-21", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 107.5, "Volume": 2500000}, + {"Date": "2024-01-22", "Open": 107.5, "High": 113.0, "Low": 102.0, "Close": 104.0, "Volume": 2550000}, + {"Date": "2024-01-23", "Open": 104.0, "High": 109.0, "Low": 99.0, "Close": 102.0, "Volume": 2600000}, + {"Date": "2024-01-24", "Open": 102.0, "High": 108.0, "Low": 97.0, "Close": 104.5, "Volume": 2650000}, + {"Date": "2024-01-25", "Open": 104.5, "High": 110.0, "Low": 99.0, "Close": 107.0, "Volume": 2700000}, + {"Date": "2024-01-26", "Open": 107.0, "High": 113.0, "Low": 102.0, "Close": 105.0, "Volume": 2750000}, + {"Date": "2024-01-27", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 103.0, "Volume": 2800000}, + {"Date": "2024-01-28", "Open": 103.0, "High": 108.0, "Low": 98.0, "Close": 101.5, "Volume": 2850000}, + {"Date": "2024-01-29", "Open": 101.5, "High": 106.0, "Low": 96.0, "Close": 100.0, "Volume": 2900000}, + {"Date": "2024-01-30", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.0, "Volume": 2950000}, + {"Date": "2024-01-31", "Open": 102.0, "High": 108.0, "Low": 97.0, "Close": 104.0, "Volume": 3000000} + ] + } + \ No newline at end of file diff --git a/examples/timeSeries-train-quickdraw/index.html b/examples/timeSeries-train-quickdraw/index.html new file mode 100644 index 00000000..31246e5f --- /dev/null +++ b/examples/timeSeries-train-quickdraw/index.html @@ -0,0 +1,23 @@ + + + + + + + + ml5.js Time Series Train Quickdraw Example + + + + + + + + diff --git a/examples/timeSeries-train-quickdraw/sketch.js b/examples/timeSeries-train-quickdraw/sketch.js new file mode 100644 index 00000000..219aa69e --- /dev/null +++ b/examples/timeSeries-train-quickdraw/sketch.js @@ -0,0 +1,221 @@ +/* + * 👋 Hello! This is an ml5.js example made and shared with ❤️. + * Learn more about the ml5.js project: https://ml5js.org/ + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md + * + * This example demonstrates training a shape classifier based on mouse movements (coordinates) through ml5.timeSeries + */ + +let model; +let counts = { + Circle_datacount:0, + Square_datacount:0, +} +let curr_shape = 'Circle' +let state = 'collection'; +let pressedOnce = true; +let frame_count = 0; +let datapoints; +let sequence = []; + +// Training Data lenghts +let ink_multiplier = 3; +let num_seq = 20; + +function setup() { + // p5 js elements + createCanvas(600, 400); + background(220); + UI(); + + // set framerate to constant rate for constant data collection + frameRate(60); + + // set the backend to either webgl or cpu + ml5.setBackend('webgl'); + + // set the options to initialize timeSeries Neural Network + let options = { + inputs: ['x','y'], + outputs: ['label'], + task: 'classification', + dataModality: 'spatial', + debug: 'true', + learningRate: 0.005 + }; + + model = ml5.timeSeries(options); +} + +function draw() { + // record data when the mouse is pressed inside the canvas + if (mouseIsPressed && pressedOnce && mouseY<400 && mouseX<600){ + + // draw lines through coordinates + line(pmouseX, pmouseY, mouseX,mouseY); + + frame_count++; + + let inputs = {x: mouseX,y: mouseY}; + sequence.push(inputs); + + if (sequence.length == num_seq*ink_multiplier){ + pressedOnce = false; + frame_count = 0 + + // if state is collection, add whole sequence as X, and shape as Y + if (state == 'collection'){ + let target = {label: curr_shape}; + model.addData(sequence, target); + + // add to the count for each + counts[`${curr_shape}_datacount`] += 1; + updateDataCountUI() + + // reset the screen + background(220); + textSize(20) + fill(0); + text("Recording: " + curr_shape, 50,50); + // if prediction, classify using the whole sequence + } else if (state == 'prediction'){ + model.classify(sequence, gotResults) + + background(220); + } + + // reset the sequence + sequence = []; + } + } + inkBar(); +} + +function trainModel(){ + // normalize Data first before Training + model.normalizeData(); + + // set the number of epochs for training + let options = { + epochs: 40, + } + model.train(options,whileTraining,finishedTraining); + + background(220); + state = 'training'; + text("Training...", 50,50); + rec_circle.style('background-color', ''); + rec_square.style("background-color",''); + train_but.style('background-color', '#f0f0f0'); +} + +function whileTraining(epoch, loss) { + console.log(epoch); +} + +function finishedTraining() { + background(220) + text("Training Finished, Draw again to predict", 50,50); + state = 'prediction'; +} + +function gotResults(results) { + const label = results[0].label; + + fill(0); + text("Prediction: " + label, 50,50); +} + +// code to signify drawing can be done again +function mouseReleased(){ + pressedOnce = true; +} + +////////////// UI Elements //////////// + +// code to visualize how much ink left +function inkBar(){ + datapoints = map(frame_count,0,ink_multiplier*num_seq, 0,num_seq) + + bar_height = 250 + height_miltiplier = bar_height/num_seq + push() + fill(0) + textSize(15) + text('Ink:', 550,90) + rect(550,100,25,num_seq*height_miltiplier) + fill(255) + rect(550,100,25,datapoints*height_miltiplier) + pop() +} + +// code for UI elements such as buttons +function UI(){ + textSize(20) + + rec_circle = createButton('Record Circle'); + rec_circle.mouseClicked(recordCircle); + rec_circle.style("font-family", "Georgia"); + rec_circle.style("font-size", "20px"); + rec_circle.style("background-color",'#f0f0f0'); + + rec_square = createButton('Record Square'); + rec_square.mouseClicked(recordSquare); + rec_square.style("font-family", "Georgia"); + rec_square.style("font-size", "20px"); + + train_but = createButton('Train and Predict'); + train_but.mouseClicked(trainModel); + train_but.style("font-family", "Georgia"); + train_but.style("font-size", "20px"); + + function recordCircle(){ + state = 'collection'; + curr_shape = 'Circle'; + + background(220); + text("Recording: Circle", 50,50); + rec_circle.style("background-color",'#f0f0f0'); + rec_square.style('background-color', ''); + train_but.style('background-color', ''); + } + + function recordSquare(){ + state = 'collection'; + curr_shape = 'Square'; + + background(220); + text("Recording: Square", 50,50); + rec_circle.style('background-color', ''); + rec_square.style("background-color",'#f0f0f0'); + train_but.style('background-color', ''); + } + + instructionP = createP( + 'Instructions:
1.) Press the "Record Circle" or "Record Square" and start drawing until the ink runs out
2.) Draw multiple times for each shape
2.) Press "Train" and wait for training to finish
3.) Draw again to predict drawn shape

Tip: Collect at least 5 drawings for each:' + ); + instructionP.style("width", "640px"); + dataCountsP = createP( + "circle data: " + + counts.Circle_datacount + + "
square data: " + + counts.Square_datacount + ); +} + +// Update the HTML UI with the current data counts +function updateDataCountUI() { + dataCountsP.html( + "circle data: " + + counts.Circle_datacount + + "
square data: " + + counts.Square_datacount + ); +} + +function keyPressed(){ + if (key == 's'){ + + model.save('hello'); + } +} diff --git a/src/LSTM/index.js b/src/TimeSeries/index.js similarity index 70% rename from src/LSTM/index.js rename to src/TimeSeries/index.js index 4f6efbd2..2f28efc6 100644 --- a/src/LSTM/index.js +++ b/src/TimeSeries/index.js @@ -1,10 +1,8 @@ import * as tf from "@tensorflow/tfjs"; import callCallback from "../utils/callcallback"; import handleArguments from "../utils/handleArguments"; -import { imgToPixelArray, isInstanceOfSupportedElement, } from "../utils/imageUtilities"; import NeuralNetwork from "./timeSeries"; import NeuralNetworkData from "./timeSeriesData"; - import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; import NeuralNetworkVis from "../NeuralNetwork/NeuralNetworkVis"; @@ -17,29 +15,26 @@ const DEFAULTS = { modelUrl: null, layers: [], task: null, + dataModality: null, debug: false, learningRate: 0.2, hiddenUnits: 16, - neuroEvolution: false, }; - - /* as far as the p5 sketch is concerned, it will directly call only a few functions in the class, these are the following: -model.addData +model.addData - Done model.saveData, model etc model.train model.classify/predict etc - +No image classification +No neural evolution */ class timeSeries { - - //reviewed constructor(options, callback) { this.options = { @@ -60,14 +55,10 @@ class timeSeries { this.ready = callCallback(this.init(), callback); } - // mainly for loading data - should be async async init() { - console.log('init yeah') if (this.options.dataUrl) { - console.log('URL provided, will load data') await this.loadDataFromUrl(this.options.dataUrl); } else if (this.options.modelUrl) { - // will take a URL to model.json, an object, or files array await this.load(this.options.modelUrl); } return this; @@ -91,6 +82,11 @@ class timeSeries { - for yInputs: 1. similar to neural network, so use same logic + + - at the end of the adding data, the data is formatted to a sequence of objects similar to 1 of xinputs + - + + - new parameter dataModality, either spatial or sequential, spatial uses cnn1d and sequential uses lstm */ addData(xInputs, yInputs, options = null){ @@ -217,8 +213,6 @@ class timeSeries { // this method does not get shape for images but instead for timesteps const { inputs } = this.options; - console.log('meta',inputs); - let inputShape; if (typeof inputs === 'number'){ inputShape = inputs; @@ -226,8 +220,6 @@ class timeSeries { inputShape = inputs.length; //will be fed into the tensors later } - console.log('inputshape',inputShape); - this.neuralNetworkData.createMetadata(inputShape); } @@ -237,7 +229,6 @@ class timeSeries { } convertTrainingDataToTensors() { - console.log('training',this.data.training); return this.neuralNetworkData.convertRawToTensors(this.data.training); } @@ -270,13 +261,16 @@ class timeSeries { addDefaultLayers() { let layers; const task = this.options.task; - switch (task.toLowerCase()) { - // if the task is classification - case "classification": + const dataModality = this.options.dataModality; + let taskConditions = `${task}_${dataModality}`; + console.log(this.neuralNetworkData.meta.seriesShape) + switch (taskConditions.toLowerCase()) { + // if the task is classification and spatial modality + case "classification_spatial": layers = [ { type: "conv1d", - filters: 64, + filters: 8, kernelSize: 3, activation: "relu", inputShape: this.neuralNetworkData.meta.seriesShape, @@ -287,7 +281,7 @@ class timeSeries { }, { type: "conv1d", - filters: 128, + filters: 16, kernelSize: 3, activation: "relu", inputShape: this.neuralNetworkData.meta.seriesShape, @@ -301,65 +295,32 @@ class timeSeries { }, { type: "dense", - units: 128, + units: this.options.hiddenUnits, activation: "relu", }, { type: "dense", - units:2, activation: "softmax", }, ]; - // let shape = this.neuralNetworkData.meta.seriesShape - // layers = [ - // { - // type: "input", - // shape: shape, - // }, - // { - // type: "reshape", - // targetShape: [shape[0],shape[1]*shape[2]], - // }, - // { - // type: "conv1d", - // filters: 64, - // kernelSize: 3, - // activation: "relu", - // inputShape: shape, - // }, - // { - // type: "maxPooling1d", - // poolSize: 2, - // }, - // { - // type: "conv1d", - // filters: 128, - // kernelSize: 3, - // activation: "relu", - // }, - // { - // type: "maxPooling1d", - // poolSize: 2, - // }, - // { - // type: "flatten", - // }, - // { - // type: "dense", - // units: 128, - // activation: "relu", - // }, - // { - // type: "dense", - // units:2, - // activation: "softmax", - // }, - // ]; return this.createNetworkLayers(layers); - // if the task is regression - case "regression": + // if the task is classification and sequential modality + case "classification_sequential": layers = [ + { + type: "lstm", + units: 16, + activation: "relu", + inputShape: this.neuralNetworkData.meta.seriesShape, + returnSequences: true, + }, + { + type: "lstm", + units: 8, + activation: "relu", + returnSequences: false, + }, { type: "dense", units: this.options.hiddenUnits, @@ -367,53 +328,94 @@ class timeSeries { }, { type: "dense", - activation: "sigmoid", + activation: "softmax", }, ]; + return this.createNetworkLayers(layers); - // if the task is imageClassification - case "imageclassification": + + // if the task is regression + case "regression_spatial": layers = [ { - type: "conv2d", + type: "conv1d", filters: 8, - kernelSize: 5, - strides: 1, + kernelSize: 3, activation: "relu", - kernelInitializer: "varianceScaling", + inputShape: this.neuralNetworkData.meta.seriesShape, }, { - type: "maxPooling2d", - poolSize: [2, 2], - strides: [2, 2], + type: "maxPooling1d", + poolSize: 2, }, { - type: "conv2d", + type: "conv1d", filters: 16, - kernelSize: 5, - strides: 1, + kernelSize: 3, activation: "relu", - kernelInitializer: "varianceScaling", + inputShape: this.neuralNetworkData.meta.seriesShape, }, { - type: "maxPooling2d", - poolSize: [2, 2], - strides: [2, 2], + type: "maxPooling1d", + poolSize: 2, }, { type: "flatten", }, { type: "dense", - kernelInitializer: "varianceScaling", - activation: "softmax", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", }, ]; + return this.createNetworkLayers(layers); + case "regression_sequential": + layers = [ + { + type: "lstm", + units: 16, + activation: "relu", + inputShape: this.neuralNetworkData.meta.seriesShape, + returnSequences: true, + }, + { + type: "lstm", + units: 8, + activation: "relu", + }, + { + type: "dense", + units: this.options.hiddenUnits, + activation: "relu", + }, + { + type: "dense", + activation: "sigmoid", + }, + ]; + + return this.createNetworkLayers(layers); + default: - console.log("no imputUnits or outputUnits defined"); + console.log("no inputUnits or outputUnits defined"); layers = [ + { + type: "lstm", + units: 16, + activation: "relu", + inputShape: this.neuralNetworkData.meta.seriesShape, + }, + { + type: "lstm", + units: 8, + activation: "relu", + }, { type: "dense", units: this.options.hiddenUnits, @@ -475,8 +477,10 @@ class timeSeries { } normalizeData() { + if (!this.neuralNetworkData.data.raw.length > 0){ + throw new Error('Empty Data Error: You Cannot Normalize/Train without adding any data! Please add data first') + } if (!this.neuralNetworkData.isMetadataReady) { - // if the inputs are defined as an array of [img_width, img_height, channels] this.createMetaData(); } @@ -492,64 +496,77 @@ class timeSeries { // set isNormalized to true this.neuralNetworkData.meta.isNormalized = true; - console.log('train',this.data.training) } + // //////// + classify(_input, _cb) { return callCallback(this.classifyInternal(_input), _cb); } - async classifyInternal(_input){ + async classifyInternal(_input) { const { meta } = this.neuralNetworkData; - const inputData = this.formatInputsForPredictionAll(_input); + const headers = Object.keys(meta.inputs); + + let inputData; + + if (this.options.task === "imageClassification") { + // get the inputData for classification + // if it is a image type format it and + // flatten it + inputData = this.searchAndFormat(_input); + if (Array.isArray(inputData)) { + inputData = inputData.flat(); + } else { + inputData = inputData[headers[0]]; + } + + if (meta.isNormalized) { + // TODO: check to make sure this property is not static!!!! + const { min, max } = meta.inputs[headers[0]]; + inputData = this.neuralNetworkData.normalizeArray( + Array.from(inputData), + { min, max } + ); + } else { + inputData = Array.from(inputData); + } + + inputData = tf.tensor([inputData], [1, ...meta.inputUnits]); + } else { + inputData = this.formatInputsForPredictionAll(_input); + } const unformattedResults = await this.neuralNetwork.classify(inputData); inputData.dispose(); + if (meta !== null) { + const label = Object.keys(meta.outputs)[0]; + const vals = Object.entries(meta.outputs[label].legend); + + const formattedResults = unformattedResults.map((unformattedResult) => { + return vals + .map((item, idx) => { + return { + [item[0]]: unformattedResult[idx], + label: item[0], + confidence: unformattedResult[idx], + }; + }) + .sort((a, b) => b.confidence - a.confidence); + }); + + // return single array if the length is less than 2, + // otherwise return array of arrays + if (formattedResults.length < 2) { + return formattedResults[0]; + } + return formattedResults; + } + return unformattedResults; } - // async classifyInternal(_input) { - // const { meta } = this.neuralNetworkData; - // const headers = Object.keys(meta.inputs); - - // let inputData; - // console.log(_input) - // // inputData = this.neuralNetworkData. - // inputData = this.formatInputsForPredictionAll(_input); - - // const unformattedResults = await this.neuralNetwork.classify(inputData); - // inputData.dispose(); - - // if (meta !== null) { - // const label = Object.keys(meta.outputs)[0]; - // const vals = Object.entries(meta.outputs[label].legend); - - // const formattedResults = unformattedResults.map((unformattedResult) => { - // return vals - // .map((item, idx) => { - // return { - // [item[0]]: unformattedResult[idx], - // label: item[0], - // confidence: unformattedResult[idx], - // }; - // }) - // .sort((a, b) => b.confidence - a.confidence); - // }); - - // // return single array if the length is less than 2, - // // otherwise return array of arrays - // if (formattedResults.length < 2) { - // return formattedResults[0]; - // } - // return formattedResults; - // } - - // return unformattedResults; - // } - - - formatInputsForPredictionAll(_input) { const { meta } = this.neuralNetworkData; const inputHeaders = Object.keys(meta.inputs); @@ -561,6 +578,68 @@ class timeSeries { return output; } + predict(_input, _cb) { + return callCallback(this.predictInternal(_input), _cb); + } + + async predictInternal(_input) { + const { meta } = this.neuralNetworkData; + + const inputData = this.formatInputsForPredictionAll(_input); + + const unformattedResults = await this.neuralNetwork.predict(inputData); + inputData.dispose(); + + if (meta !== null) { + const labels = Object.keys(meta.outputs); + + const formattedResults = unformattedResults.map((unformattedResult) => { + return labels.map((item, idx) => { + // check to see if the data were normalized + // if not, then send back the values, otherwise + // unnormalize then return + let val; + let unNormalized; + if (meta.isNormalized) { + const { min, max } = meta.outputs[item]; + val = nnUtils.unnormalizeValue(unformattedResult[idx], min, max); + unNormalized = unformattedResult[idx]; + } else { + val = unformattedResult[idx]; + } + + const d = { + [labels[idx]]: val, + label: item, + value: val, + }; + + // if unNormalized is not undefined, then + // add that to the output + if (unNormalized) { + d.unNormalizedValue = unNormalized; + } + + return d; + }); + }); + + // return single array if the length is less than 2, + // otherwise return array of arrays + if (formattedResults.length < 2) { + return formattedResults[0]; + } + return formattedResults; + } + + // if no meta exists, then return unformatted results; + return unformattedResults; + } + + + + + /** * //////////////////////////////////////////////////////////// @@ -568,39 +647,15 @@ class timeSeries { * //////////////////////////////////////////////////////////// */ - /** - * @public - * saves the training data to a JSON file. - * @param {string} [name] Optional - The name for the saved file. - * Should not include the file extension. - * Defaults to the current date and time. - * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. - * @return {Promise} - */ saveData(name, callback) { const args = handleArguments(name, callback); return callCallback(this.neuralNetworkData.saveData(args.name), args.callback); } - /** - * @public - * load data - * @param {string | FileList | Object} filesOrPath - The URL of the file to load, - * or a FileList object (.files) from an HTML element . - * @param {ML5Callback} [callback] Optional - A function to call when the loading is complete. - * @return {Promise} - */ async loadData(filesOrPath, callback) { return callCallback(this.neuralNetworkData.loadData(filesOrPath), callback); } - /** - * Loads data from a URL using the appropriate function - * @param {*} dataUrl - * @param {*} inputs - * @param {*} outputs - * @void - */ async loadDataFromUrl(dataUrl, inputs, outputs) { let json; let dataFromUrl @@ -628,43 +683,15 @@ class timeSeries { this.prepareForTraining(); } - // async loadDataFromUrl() { - // const { dataUrl, inputs, outputs } = this.options; - - // console.log(this.options) - // await this.neuralNetworkData.loadDataFromUrl( - // dataUrl, - // inputs, - // outputs - // ); - - // // once the data are loaded, create the metadata - // // and prep the data for training - // // if the inputs are defined as an array of [img_width, img_height, channels] - // this.createMetaData(); - - // this.prepareForTraining(); - // } - /** * //////////////////////////////////////////////////////////// * Save / Load Model * //////////////////////////////////////////////////////////// */ - /** - * @public - * saves the model, weights, and metadata - * @param {string} [name] Optional - The name for the saved file. - * Should not include the file extension. - * Defaults to 'model'. - * @param {ML5Callback} [callback] Optional - A function to call when the save is complete. - * @return {Promise} - */ async save(name, callback) { const args = handleArguments(name, callback); const modelName = args.string || 'model'; - console.log("hello") // save the model return callCallback(Promise.all([ this.neuralNetwork.save(modelName), diff --git a/src/LSTM/timeSeries.js b/src/TimeSeries/timeSeries.js similarity index 97% rename from src/LSTM/timeSeries.js rename to src/TimeSeries/timeSeries.js index 2be1d675..9d7e8b29 100644 --- a/src/LSTM/timeSeries.js +++ b/src/TimeSeries/timeSeries.js @@ -106,7 +106,7 @@ class NeuralNetwork { const xs = TRAINING_OPTIONS.inputs; const ys = TRAINING_OPTIONS.outputs; - + console.log('train',xs,ys); const { batchSize, epochs, shuffle, validationSplit, whileTraining } = TRAINING_OPTIONS; @@ -193,12 +193,13 @@ class NeuralNetwork { }, ], }; - + console.log('data.weightData',data.weightData); await saveBlob( data.weightData, `${modelName}.weights.bin`, "application/octet-stream" ); + console.log('this.weightsManifest',this.weightsManifest) await saveBlob( JSON.stringify(this.weightsManifest), `${modelName}.json`, diff --git a/src/LSTM/timeSeriesData.js b/src/TimeSeries/timeSeriesData.js similarity index 97% rename from src/LSTM/timeSeriesData.js rename to src/TimeSeries/timeSeriesData.js index b7fb9254..3d7638c0 100644 --- a/src/LSTM/timeSeriesData.js +++ b/src/TimeSeries/timeSeriesData.js @@ -135,7 +135,12 @@ class NeuralNetworkData { inputMeta[k].min = 0; inputMeta[k].max = 1; } else if (inputMeta[k].dtype === "number") { - const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); + let dataAsArray; + if (xsOrYs === 'ys'){ + dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]); + } else if (xsOrYs === 'xs'){ + dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); + } inputMeta[k].min = nnUtils.getMin(dataAsArray); inputMeta[k].max = nnUtils.getMax(dataAsArray); } else if (inputMeta[k].dtype === "array") { @@ -389,7 +394,12 @@ class NeuralNetworkData { options.legend = inputMeta[k].legend; normalized[k] = this.normalizeArray(dataAsArray, options); } else if (inputMeta[k].dtype === "number") { - const dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); + let dataAsArray; + if (xsOrYs === 'ys'){ + dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]); + } else if (xsOrYs === 'xs'){ + dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); + } normalized[k] = this.normalizeArray(dataAsArray, options); } else if (inputMeta[k].dtype === "array") { const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]); @@ -424,7 +434,7 @@ class NeuralNetworkData { const batch = normalized[features[0]].length / seriesStep; this.meta.seriesShape = [seriesStep,feature_length]; - + console.log('series shape',this.meta.seriesShape) let zipped = []; // zip arrays before reshaping @@ -438,6 +448,7 @@ class NeuralNetworkData { output = tsUtils.reshapeTo3DArray(zipped,[batch,seriesStep,feature_length]) } + console.log('thismeta',this.meta) return output; } @@ -794,7 +805,6 @@ class NeuralNetworkData { `${modelName}_meta.json`, "text/plain" ); - } /** diff --git a/src/LSTM/timeSeriesUtils.js b/src/TimeSeries/timeSeriesUtils.js similarity index 99% rename from src/LSTM/timeSeriesUtils.js rename to src/TimeSeries/timeSeriesUtils.js index 9334cdfd..92e1f79d 100644 --- a/src/LSTM/timeSeriesUtils.js +++ b/src/TimeSeries/timeSeriesUtils.js @@ -25,13 +25,13 @@ class TimeSeriesUtils { if(!Array.isArray(xInputs)){ throw new error('Syntax Error: Data Should be in an Array') } - let isObjects = true; let isArrays = true; let isValues = true; - + for (let i = 0; i < xInputs.length ; i++){ if (nnUtils.getDataType(xInputs[i]) === 'object'){ + console.log('here') isArrays = false; isValues = false; if ( i > 0 ) { @@ -40,6 +40,7 @@ class TimeSeriesUtils { } } } else if (Array.isArray(xInputs[i])){ + console.log('here2') isObjects = false; isValues = false; if ( i > 0 ) { diff --git a/src/index.js b/src/index.js index 5e771d34..97c15a55 100644 --- a/src/index.js +++ b/src/index.js @@ -7,7 +7,7 @@ import imageClassifier from "./ImageClassifier"; import soundClassifier from "./SoundClassifier"; import setBackend from "./utils/setBackend"; import bodySegmentation from "./BodySegmentation"; -import timeSeries from "./LSTM"; +import timeSeries from "./TimeSeries"; import communityStatement from "./utils/communityStatement"; import * as tf from "@tensorflow/tfjs"; import * as tfvis from "@tensorflow/tfjs-vis"; From 658290c6140b0b9f22bd74a17147339270c70ecc Mon Sep 17 00:00:00 2001 From: mop9047 Date: Thu, 8 Aug 2024 20:35:11 +0800 Subject: [PATCH 08/13] added comments to example code --- examples/timeSeries-hand-gestures/index.html | 3 +- examples/timeSeries-hand-gestures/sketch.js | 2 +- .../index.html | 3 +- .../sketch.js | 87 ++++----- .../timeSeries-stock-prediction/index.html | 5 +- .../timeSeries-stock-prediction/sketch.js | 166 ++++++++++-------- .../timeSeries-train-quickdraw/index.html | 3 +- examples/timeSeries-train-quickdraw/sketch.js | 2 +- src/TimeSeries/index.js | 3 +- 9 files changed, 130 insertions(+), 144 deletions(-) diff --git a/examples/timeSeries-hand-gestures/index.html b/examples/timeSeries-hand-gestures/index.html index 1a6026c9..01a8da7d 100644 --- a/examples/timeSeries-hand-gestures/index.html +++ b/examples/timeSeries-hand-gestures/index.html @@ -3,8 +3,7 @@ Learn more about the ml5.js project: https://ml5js.org/ ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md - This example demonstrates Sound classification using Google's Teachable Machine and p5.js - Create your own custom model with Google's Teachable Machine! https://teachablemachine.withgoogle.com/ + This example demonstrates training a Sign Language classifier through ml5.TimeSeries. --> diff --git a/examples/timeSeries-hand-gestures/sketch.js b/examples/timeSeries-hand-gestures/sketch.js index a0c971d9..d70eb42f 100644 --- a/examples/timeSeries-hand-gestures/sketch.js +++ b/examples/timeSeries-hand-gestures/sketch.js @@ -3,7 +3,7 @@ * Learn more about the ml5.js project: https://ml5js.org/ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md * - * This example demonstrates training a color classifier through ml5.neuralNetwork. + * This example demonstrates training a Sign Language classifier through ml5.TimeSeries. */ const seqlength = 50; diff --git a/examples/timeSeries-load-model-hand-gestures/index.html b/examples/timeSeries-load-model-hand-gestures/index.html index 266905e1..b9aa65eb 100644 --- a/examples/timeSeries-load-model-hand-gestures/index.html +++ b/examples/timeSeries-load-model-hand-gestures/index.html @@ -3,8 +3,7 @@ Learn more about the ml5.js project: https://ml5js.org/ ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md - This example demonstrates Sound classification using Google's Teachable Machine and p5.js - Create your own custom model with Google's Teachable Machine! https://teachablemachine.withgoogle.com/ + This example demonstrates loading a Sign Language classifier through ml5.TimeSeries. --> diff --git a/examples/timeSeries-load-model-hand-gestures/sketch.js b/examples/timeSeries-load-model-hand-gestures/sketch.js index 9285ae71..61963a9e 100644 --- a/examples/timeSeries-load-model-hand-gestures/sketch.js +++ b/examples/timeSeries-load-model-hand-gestures/sketch.js @@ -3,14 +3,17 @@ * Learn more about the ml5.js project: https://ml5js.org/ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md * - * This example demonstrates training a color classifier through ml5.neuralNetwork. + * This example demonstrates loading a Sign Language classifier through ml5.TimeSeries. */ +// change this to make the recording longer +const seqlength = 50; + + let handPose; let video; let hands = []; let sequence = []; -const seqlength = 50; let recording_finished = false; let predicted_word = '' @@ -22,6 +25,7 @@ function preload() { function setup() { createCanvas(640, 480); + // create video capture video = createCapture(VIDEO); video.size(640, 480); video.hide(); @@ -31,59 +35,52 @@ function setup() { handPose.detectStart(video, gotHands); let options = { - outputs: ['label'], task: 'classification', - debug: 'true', - learningRate: 0.001, + dataModality: 'spatial', }; model = ml5.timeSeries(options); + // setup the model files to load const modelDetails = { model: "model/model.json", metadata: "model/model_meta.json", weights: "model/model.weights.bin", }; + + // load the model and call modelLoaded once finished model.load(modelDetails, modelLoaded); - - nameField = createInput('') - nameField.attribute('placeholder', 'word to train') - nameField.position(100, 100) - nameField.size(250) } - +// call back for load model function modelLoaded(){ console.log('model loaded!') } function draw() { - - + // draw video on the canvas image(video, 0, 0, width, height); - textSize(100) - fill(255) - text(predicted_word, 100, height/2) + // put the text on screen after a prediction + placePredictedText() + // if hands are found then start recording if(hands.length>0 && recording_finished == false){ if (sequence.length <= seqlength){ + // get coordinates from hands (21 points) handpoints = drawPoints(); sequence.push(handpoints); + + // once sequence reaches the seqlength, add sequence as just one X value } else if (sequence.length>0){ - recording_finished = true; - - let word = nameField.value() - - if (word.length > 0){ - let target = {label:word} - console.log(sequence, target); - model.addData(sequence, target); - } else { - model.classify(sequence, gotResults); - } + // classify based on the collected data + model.classify(sequence, gotResults); + // reset the sequence sequence = []; + recording_finished = true; } + + // can only record again when hand is out of frame } else { if (hands.length == 0){ recording_finished = false; @@ -91,6 +88,7 @@ function draw() { } } +// draw the points on the hands function drawPoints(){ let handpoints = [] for (let i = 0; i < hands.length; i++) { @@ -113,33 +111,16 @@ function gotHands(results) { hands = results; } -function keyPressed(){ - if (key == 's'){ - model.save('hello'); - } - if (key == 'z'){ - model.saveData(); - } - - if (key == 't'){ - model.normalizeData(); - let options = { - epochs: 100 - } - model.train(options,whileTraining,finishedTraining); - } -} - -function whileTraining(epoch, loss) { - console.log(epoch); -} - -function finishedTraining() { - console.log('finished training.'); -} - +// call back for accessing the results function gotResults(results){ predicted_word = results[0].label console.log(predicted_word) - text(predicted_word, 200,200) + text(predicted_word, 100,100) +} + +// for drawing text on screen +function placePredictedText(){ + textSize(100) + fill(255) + text(predicted_word, 100, height/2) } \ No newline at end of file diff --git a/examples/timeSeries-stock-prediction/index.html b/examples/timeSeries-stock-prediction/index.html index 1208fae9..d27d162b 100644 --- a/examples/timeSeries-stock-prediction/index.html +++ b/examples/timeSeries-stock-prediction/index.html @@ -3,8 +3,7 @@ Learn more about the ml5.js project: https://ml5js.org/ ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md - This example demonstrates Sound classification using Google's Teachable Machine and p5.js - Create your own custom model with Google's Teachable Machine! https://teachablemachine.withgoogle.com/ + This example demonstrates Training a Stock Price Predictor through ml5.TimeSeries. --> @@ -12,7 +11,7 @@ - ml5.js Time Series Weather Example + ml5.js Time Series Stock Prediction Example diff --git a/examples/timeSeries-stock-prediction/sketch.js b/examples/timeSeries-stock-prediction/sketch.js index 14271875..43776191 100644 --- a/examples/timeSeries-stock-prediction/sketch.js +++ b/examples/timeSeries-stock-prediction/sketch.js @@ -3,7 +3,7 @@ * Learn more about the ml5.js project: https://ml5js.org/ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md * - * This example demonstrates training a color classifier through ml5.neuralNetwork. + * This example demonstrates Training a Stock Price Predictor through ml5.TimeSeries. */ let classifer; @@ -16,19 +16,29 @@ let y= 0; let train = false; let training_done = false; +// load JSON data with same formatting from the internet, this means +// loadData() cannot yet be used as it is formatted differently function preload(){ json_data = loadJSON('stock_data.json'); } + function setup() { + // just get the data property from json data = json_data.data; + + // p5 and UI elements UI(); - frameRate(30); createCanvas(640, 480); background(220); + // set framerate to a constant value + frameRate(30); + + // set the backend to either webgl or cpu ml5.setBackend("webgl"); + // set the options to initialize timeSeries Neural Network let options = { task: 'regression', dataModality: "sequential", @@ -38,12 +48,13 @@ function setup() { }; model = ml5.timeSeries(options); - final = [] - + // iterate through data using simple sliding window algorithm data_index = seqlength - 1; while(data_index < data.length-1){ + // get the values [seqlength] steps before current index, collect and add for (let x = seqlength -1; x >= 0; x--){ let curr = data[data_index - x]; + // choose from the raw data what you want to to feed to the model let inputs = { Open: curr.Open, High: curr.High, @@ -52,13 +63,16 @@ function setup() { Volume: curr.Volume } + // once collected all data into an array to make it into a sequence + // the format of the sequence is like this [{},{},...,{}] + // this is the X value seq.push(inputs) } - - console.log(data[data_index + 1]); - + // the Y value to train is the value that comes after the sequence let target = data[data_index + 1]; + + // select the outputs you want to get, multiple outputs are possible let output = { Open: target.Open, High: target.High, @@ -66,33 +80,87 @@ function setup() { Close: target.Close, Volume: target.Volume } - // let target = {label:data[data_index + 1]}; - // delete target.Date + + // feed data into the model model.addData(seq, output) + // reset the sequence so new values can be added seq = [] + // iterate through the whole dataset moving the sliding window in each iteration data_index++; } + // normalize the data after adding everything model.normalizeData() } +function trainData(){ + model.normalizeData() + let options = { + epochs: 60, + } + model.train(options, finishedTraining); +} + +function finishedTraining(){ + console.log("Training Done!") + training_done = true; +} -let py = 300 -let px = 80 +function predictData(){ + seq = []; -function draw() { - // background(200) - updatedUI() + // choose the most recent sequences + let latest = data.slice(-seqlength) + for (let x = 0; x < seqlength ; x++){ + let curr = latest[x]; + // select the same properties for inputs + let inputs = { + Open: curr.Open, + High: curr.High, + Low: curr.Low, + Close: curr.Close, + Volume: curr.Volume + } + // add them to one array to make them a sequence + seq.push(inputs) + } + // use the sequence to predict + model.predict(seq, gotResults); +} - // for (let x = 0; x < data.length; x ++){ - // point = data[x].close +// put the new data in the dataset so this will be considered for any new predictions +function gotResults(results) { + console.log(results); + addNewData(results); +} - // } - +// code for adding new data to the dataset to be used for future prediction +function addNewData(results){ + let date_old = data[data.length-1].Date + let date = new Date(date_old); + date.setDate(date.getDate() + 1); + let nextDateStr = date.toISOString().split('T')[0]; + new_values = { + "Date": nextDateStr, + "Open": parseFloat(results[0].value), + "High": parseFloat(results[1].value), + "Low": parseFloat(results[2].value), + "Close": parseFloat(results[3].value), + "Volume": parseFloat(results[4].value), + }, + data.push(new_values) +} + +function draw() { + // draw some helpful visualizations + updatedUI() } +// create custom line graph for stock close prices +let py = 300; +let px = 80; function updatedUI(){ if (y < data.length && train){ push() @@ -102,10 +170,6 @@ function updatedUI(){ rect(50,350,600,70) pop() - console.log(training_done); - - - text(" Date: " + data[y].Date + " Close value: " + data[y].Close.toFixed(1),150,350) text('Open: ' + data[y].Open.toFixed(1),80,400) text('High: ' + data[y].High.toFixed(1),180,400) @@ -113,7 +177,6 @@ function updatedUI(){ text('Close: ' + data[y].Close.toFixed(1),380,400) text('Volume: ' + data[y].Volume.toFixed(1),480,400) - point = data[y].Close; cy = map(point, 90, 120, 250, 70) cx = x+80 @@ -134,7 +197,7 @@ function updatedUI(){ x += 8; } - + // if you havent trained yet, you cannot predict if (train){ predict.removeAttribute('disabled'); } else { @@ -142,6 +205,7 @@ function updatedUI(){ } } +// Buttons on screen function UI(){ rec_circle = createButton('Open Data and Train'); @@ -156,58 +220,4 @@ function UI(){ predict.style("font-size", "15px"); predict.position(200,440) -} - -function trainData(){ - model.normalizeData() - let options = { - epochs: 60, - } - model.train(options, finishedTraining); -} - -function predictData(){ - console.log(data.length) - seq = []; - let latest = data.slice(-seqlength) - console.log('latest',latest); - for (let x = 0; x < seqlength ; x++){ - let curr = latest[x]; - let inputs = { - Open: curr.Open, - High: curr.High, - Low: curr.Low, - Close: curr.Close, - Volume: curr.Volume - } - seq.push(inputs) - } - - model.predict(seq, gotResults); -} - -function gotResults(results) { - console.log(results); - addNewData(results); -} - -function addNewData(results){ - let date_old = data[data.length-1].Date - let date = new Date(date_old); - date.setDate(date.getDate() + 1); - let nextDateStr = date.toISOString().split('T')[0]; - new_values = { - "Date": nextDateStr, - "Open": parseFloat(results[0].value), - "High": parseFloat(results[1].value), - "Low": parseFloat(results[2].value), - "Close": parseFloat(results[3].value), - "Volume": parseFloat(results[4].value), - }, - data.push(new_values) -} - -function finishedTraining(){ - console.log("Training Done!") - training_done = true; } \ No newline at end of file diff --git a/examples/timeSeries-train-quickdraw/index.html b/examples/timeSeries-train-quickdraw/index.html index 31246e5f..dd31406d 100644 --- a/examples/timeSeries-train-quickdraw/index.html +++ b/examples/timeSeries-train-quickdraw/index.html @@ -3,8 +3,7 @@ Learn more about the ml5.js project: https://ml5js.org/ ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md - This example demonstrates Sound classification using Google's Teachable Machine and p5.js - Create your own custom model with Google's Teachable Machine! https://teachablemachine.withgoogle.com/ + This example demonstrates How to train your own quickdraw classifier through ml5.TimeSeries. --> diff --git a/examples/timeSeries-train-quickdraw/sketch.js b/examples/timeSeries-train-quickdraw/sketch.js index 219aa69e..aa241eed 100644 --- a/examples/timeSeries-train-quickdraw/sketch.js +++ b/examples/timeSeries-train-quickdraw/sketch.js @@ -3,7 +3,7 @@ * Learn more about the ml5.js project: https://ml5js.org/ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md * - * This example demonstrates training a shape classifier based on mouse movements (coordinates) through ml5.timeSeries + * This example demonstrates How to train your own quickdraw classifier through ml5.TimeSeries. */ let model; diff --git a/src/TimeSeries/index.js b/src/TimeSeries/index.js index 2f28efc6..170f7931 100644 --- a/src/TimeSeries/index.js +++ b/src/TimeSeries/index.js @@ -84,8 +84,7 @@ class timeSeries { 1. similar to neural network, so use same logic - at the end of the adding data, the data is formatted to a sequence of objects similar to 1 of xinputs - - - + - new parameter dataModality, either spatial or sequential, spatial uses cnn1d and sequential uses lstm */ From bbbcc92a4ce2517285135e61b4e6723bf3c47009 Mon Sep 17 00:00:00 2001 From: mop9047 Date: Wed, 21 Aug 2024 06:36:00 +0800 Subject: [PATCH 09/13] code changes based on gottfried's comments --- examples/timeSeries-hand-gestures/index.html | 2 +- examples/timeSeries-hand-gestures/sketch.js | 173 ++++----- .../index.html | 25 +- .../sketch.js | 92 ++--- .../timeSeries-stock-prediction/sketch.js | 223 ----------- .../stock_data.json | 36 -- .../timeSeries-train-mouse-gesture/index.html | 43 +++ .../timeSeries-train-mouse-gesture/sketch.js | 203 ++++++++++ .../timeSeries-train-quickdraw/index.html | 22 -- examples/timeSeries-train-quickdraw/sketch.js | 221 ----------- .../index.html | 2 +- .../weather_data.json | 196 ++++++++++ src/TimeSeries/index.js | 150 ++++---- src/TimeSeries/timeSeries.js | 14 +- src/TimeSeries/timeSeriesData.js | 160 ++++---- src/TimeSeries/timeSeriesUtils.js | 361 +++++++++++------- src/index.js | 3 +- webpack.config.js | 8 +- 18 files changed, 977 insertions(+), 957 deletions(-) delete mode 100644 examples/timeSeries-stock-prediction/sketch.js delete mode 100644 examples/timeSeries-stock-prediction/stock_data.json create mode 100644 examples/timeSeries-train-mouse-gesture/index.html create mode 100644 examples/timeSeries-train-mouse-gesture/sketch.js delete mode 100644 examples/timeSeries-train-quickdraw/index.html delete mode 100644 examples/timeSeries-train-quickdraw/sketch.js rename examples/{timeSeries-stock-prediction => timeSeries-weather-prediction}/index.html (91%) create mode 100644 examples/timeSeries-weather-prediction/weather_data.json diff --git a/examples/timeSeries-hand-gestures/index.html b/examples/timeSeries-hand-gestures/index.html index 01a8da7d..90d238c2 100644 --- a/examples/timeSeries-hand-gestures/index.html +++ b/examples/timeSeries-hand-gestures/index.html @@ -11,7 +11,7 @@ - ml5.js Sign Language Neural Network Train and Save + ml5.js Time Series Hand Gesture Train and Save diff --git a/examples/timeSeries-hand-gestures/sketch.js b/examples/timeSeries-hand-gestures/sketch.js index d70eb42f..8f1346e1 100644 --- a/examples/timeSeries-hand-gestures/sketch.js +++ b/examples/timeSeries-hand-gestures/sketch.js @@ -3,10 +3,10 @@ * Learn more about the ml5.js project: https://ml5js.org/ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md * - * This example demonstrates training a Sign Language classifier through ml5.TimeSeries. + * This example demonstrates training a Hand Gesture classifier through ml5.TimeSeries. */ -const seqlength = 50; +let seqLength = 50; let handPose; let video; @@ -14,16 +14,25 @@ let video; let hands = []; let sequence = []; -let recording_finished = false; -let predicted_word = ''; +let recordingFinished = false; +let predictedWord = ""; // UI variables -let training_words = {}; - +let trainingWords = {}; function preload() { // Load the handPose model handPose = ml5.handPose(); + + // setup the timeseries neural network + let options = { + outputs: ["label"], + task: "classification", + spatialData: "true", + debug: "true", + learningRate: 0.001, + }; + model = ml5.timeSeries(options); } function setup() { @@ -37,50 +46,36 @@ function setup() { // place UI elements UI(); - // set backend as either webgl or cpu - ml5.setBackend('webgl') - // use handpose model on video handPose.detectStart(video, gotHands); - - // setup the timeseries neural network - let options = { - outputs: ['label'], - task: 'classification', - dataModality: 'spatial', - debug: 'true', - learningRate: 0.001, - }; - model = ml5.timeSeries(options); - } function draw() { // draw video on frame image(video, 0, 0, width, height); - + drawPredictedWord(); // if hands are found then start recording - if(hands.length>0 && recording_finished == false){ - if (sequence.length <= seqlength){ + if (hands.length > 0 && recordingFinished == false) { + if (sequence.length <= seqLength) { // get coordinates from hands (21 points) handpoints = drawPoints(); sequence.push(handpoints); - // once sequence reaches the seqlength, add sequence as just one X value - } else if (sequence.length>0){ + // once sequence reaches the seqLength, add sequence as just one X value + } else if (sequence.length > 0) { // get the training word from the input box - let train_word = nameField.value() + let train_word = nameField.value(); // if there is a word currently in the box then add data with that label - if (train_word.length > 0){ + if (train_word.length > 0) { // add data to the model - let target = {label:train_word} + let target = { label: train_word }; model.addData(sequence, target); - trainingWordsUpdate() + trainingWordsUpdate(); - // if there is no word in the box then classify instead + // if there is no word in the box then classify instead } else { // classify the data model.classify(sequence, gotResults); @@ -88,26 +83,26 @@ function draw() { // reset the sequence sequence = []; - recording_finished = true; + recordingFinished = true; } - // can only record again when hand is out of frame + // can only record again when hand is out of frame } else { - if (hands.length == 0){ - recording_finished = false; + if (hands.length == 0) { + recordingFinished = false; } } } -function drawPoints(){ - let handpoints = [] +function drawPoints() { + let handpoints = []; // iterate through both hands for (let i = 0; i < hands.length; i++) { let hand = hands[i]; for (let j = 0; j < hand.keypoints.length; j++) { // access the keypoints in the hand let keypoint = hand.keypoints[j]; - handpoints.push(keypoint.x,keypoint.y) + handpoints.push(keypoint.x, keypoint.y); fill(0, 255, 0); noStroke(); @@ -115,9 +110,9 @@ function drawPoints(){ } } // assign to a different variable before clearing - const output = handpoints; - handpoints = []; - + let output = handpoints; + handpoints = []; + return output; } @@ -127,30 +122,13 @@ function gotHands(results) { hands = results; } -function keyPressed(){ - if (key == 's'){ - model.save('hello'); - } - if (key == 'z'){ - model.saveData(); - } - - if (key == 't'){ - model.normalizeData(); - let options = { - epochs: 100 - } - model.train(options,whileTraining,finishedTraining); - } -} - -function trainModelAndSave(){ +function trainModelAndSave() { model.normalizeData(); let options = { - epochs: 100 - } - model.train(options,whileTraining,finishedTraining); - nameField.value('') + epochs: 100, + }; + model.train(options, whileTraining, finishedTraining); + nameField.value(""); } function whileTraining(epoch) { @@ -158,60 +136,57 @@ function whileTraining(epoch) { } function finishedTraining() { - console.log('finished training.'); - model.save('model'); + console.log("finished training."); + model.save("model"); } -function gotResults(results){ - predicted_word = results[0].label - console.log(predicted_word) - text(predicted_word, 200,200) +function gotResults(results) { + predictedWord = results[0].label; + console.log(predictedWord); + text(predictedWord, 200, 200); } -function UI(){ - nameField = createInput('') - nameField.attribute('placeholder', 'Type the word to train') - nameField.position(110, 500) - nameField.size(250) +function UI() { + nameField = createInput(""); + nameField.attribute("placeholder", "Type the word to train"); + nameField.position(110, 500); + nameField.size(250); instructionP = createP( 'I want to train:

1.) Type any word you want to pair with a gesture, e.g. "HELLO"
2.) Do the gesture associated to the word, make sure to do it until the points disappear.
3.) Move your hand out of the frame and repeat the gesture, do this multiple times
4.) Do the same for other words e.g. "BYE"
5.) Once all data is collected, press Train and Save

Tip: have at least 5 datasets for each word' ); instructionP.style("width", "640px"); - dataCountsP = createP( - "-> After the gesture a tally will appear here <-" - ); + dataCountsP = createP("-> After the gesture a tally will appear here <-"); - train_but = createButton('Train and Save'); + train_but = createButton("Train and Save"); train_but.mouseClicked(trainModelAndSave); train_but.style("font-family", "Georgia"); train_but.style("font-size", "20px"); - train_but.position(500, 490) + train_but.position(500, 490); } -function drawPredictedWord(){ - textSize(100) - fill(255) - text(predicted_word, 100, height/2) +function drawPredictedWord() { + textSize(100); + fill(255); + text(predictedWord, 100, height / 2); } -function trainingWordsUpdate(){ - let temp_word = nameField.value(); - console.log(Object.keys(training_words)); - if (!(temp_word in training_words)){ - training_words[temp_word] = 1; - console.log('here') +function trainingWordsUpdate() { + let tempWord = nameField.value(); + console.log(Object.keys(trainingWords)); + if (!(tempWord in trainingWords)) { + trainingWords[tempWord] = 1; } else { - console.log(training_words[temp_word]) - training_words[temp_word]++; + trainingWords[tempWord]++; } - let counts = '' - let keys = Object.keys(training_words) - keys.forEach(element => { - counts += element + ' : ' + training_words[element] + "
" - }); - dataCountsP.html( - counts - ); -} \ No newline at end of file + let counts = ""; + let keys = Object.keys(trainingWords); + console.log("keys", keys); + + for (let k of keys) { + counts += k + " : " + trainingWords[k] + "
"; + } + + dataCountsP.html(counts); +} diff --git a/examples/timeSeries-load-model-hand-gestures/index.html b/examples/timeSeries-load-model-hand-gestures/index.html index b9aa65eb..92363d69 100644 --- a/examples/timeSeries-load-model-hand-gestures/index.html +++ b/examples/timeSeries-load-model-hand-gestures/index.html @@ -11,12 +11,35 @@ - ml5.js Sign Language Neural Network load model + ml5.js Time Series Hand Gesture load model +
+

+ This example loads a model that is trained with ASL hand gestures for + Hello and Goodbye.
+
+ + Instructions:
+ 1.) Use one hand to do a gesture in front of the camera
+ 2.) Wait for the points to disappear or the prediction appears on + screen
+ 3.) To predict again, remove your hands in the frame and do the gesture + again

+ + How to do gestures for Hello and Goodbye in ASL:
+ Hello: + https://babysignlanguage.com/dictionary/hello/
+ Goodbye: + https://babysignlanguage.com/dictionary/goodbye/
+

diff --git a/examples/timeSeries-load-model-hand-gestures/sketch.js b/examples/timeSeries-load-model-hand-gestures/sketch.js index 61963a9e..946ca76d 100644 --- a/examples/timeSeries-load-model-hand-gestures/sketch.js +++ b/examples/timeSeries-load-model-hand-gestures/sketch.js @@ -3,46 +3,51 @@ * Learn more about the ml5.js project: https://ml5js.org/ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md * - * This example demonstrates loading a Sign Language classifier through ml5.TimeSeries. + * This example demonstrates loading a Hand Gesture classifier through ml5.TimeSeries. + * This example is trained with the ASL gestures for Hello and Goodbye + * + * Reference to sign hello and goodbye in ASL: + * Hello: https://babysignlanguage.com/dictionary/hello/ + * Goodbye: https://babysignlanguage.com/dictionary/goodbye/ */ // change this to make the recording longer -const seqlength = 50; - +let seqLength = 50; let handPose; let video; let hands = []; let sequence = []; -let recording_finished = false; -let predicted_word = '' +let recordingFinished = false; +let predictedWord = ""; function preload() { // Load the handPose model handPose = ml5.handPose(); + + // setup the timeseries neural network + let options = { + task: "classification", + dataModality: "spatial", + spatialData: "true", + }; + + model = ml5.timeSeries(options); } function setup() { - createCanvas(640, 480); + let canvas = createCanvas(640, 480); + canvas.parent("canvasDiv"); // create video capture video = createCapture(VIDEO); video.size(640, 480); video.hide(); - ml5.setBackend('webgl') - handPose.detectStart(video, gotHands); - - let options = { - task: 'classification', - dataModality: 'spatial', - }; - - model = ml5.timeSeries(options); // setup the model files to load - const modelDetails = { + let modelDetails = { model: "model/model.json", metadata: "model/model_meta.json", weights: "model/model.weights.bin", @@ -52,45 +57,45 @@ function setup() { model.load(modelDetails, modelLoaded); } // call back for load model -function modelLoaded(){ - console.log('model loaded!') +function modelLoaded() { + console.log("model loaded!"); } function draw() { // draw video on the canvas image(video, 0, 0, width, height); - + // put the text on screen after a prediction - placePredictedText() + placePredictedText(); // if hands are found then start recording - if(hands.length>0 && recording_finished == false){ - if (sequence.length <= seqlength){ + if (hands.length > 0 && recordingFinished == false) { + if (sequence.length <= seqLength) { // get coordinates from hands (21 points) handpoints = drawPoints(); sequence.push(handpoints); - // once sequence reaches the seqlength, add sequence as just one X value - } else if (sequence.length>0){ + // once sequence reaches the seqLength, add sequence as just one X value + } else if (sequence.length > 0) { // classify based on the collected data model.classify(sequence, gotResults); - + // reset the sequence sequence = []; - recording_finished = true; + recordingFinished = true; } - // can only record again when hand is out of frame + // can only record again when hand is out of frame } else { - if (hands.length == 0){ - recording_finished = false; + if (hands.length == 0) { + recordingFinished = false; } } } // draw the points on the hands -function drawPoints(){ - let handpoints = [] +function drawPoints() { + let handpoints = []; for (let i = 0; i < hands.length; i++) { let hand = hands[i]; for (let j = 0; j < hand.keypoints.length; j++) { @@ -98,11 +103,12 @@ function drawPoints(){ fill(0, 255, 0); noStroke(); circle(keypoint.x, keypoint.y, 5); - handpoints.push(keypoint.x,keypoint.y) + handpoints.push(keypoint.x, keypoint.y); } } - const output = handpoints; - handpoints = []; return output; + let output = handpoints; + handpoints = []; + return output; } // Callback function for when handPose outputs data @@ -112,15 +118,15 @@ function gotHands(results) { } // call back for accessing the results -function gotResults(results){ - predicted_word = results[0].label - console.log(predicted_word) - text(predicted_word, 100,100) +function gotResults(results) { + predictedWord = results[0].label; + console.log(predictedWord); + text(predictedWord, 100, 100); } // for drawing text on screen -function placePredictedText(){ - textSize(100) - fill(255) - text(predicted_word, 100, height/2) -} \ No newline at end of file +function placePredictedText() { + textSize(100); + fill(255); + text(predictedWord, 100, height / 2); +} diff --git a/examples/timeSeries-stock-prediction/sketch.js b/examples/timeSeries-stock-prediction/sketch.js deleted file mode 100644 index 43776191..00000000 --- a/examples/timeSeries-stock-prediction/sketch.js +++ /dev/null @@ -1,223 +0,0 @@ -/* - * 👋 Hello! This is an ml5.js example made and shared with ❤️. - * Learn more about the ml5.js project: https://ml5js.org/ - * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md - * - * This example demonstrates Training a Stock Price Predictor through ml5.TimeSeries. - */ - -let classifer; -let data; -let data_index; -let seqlength = 8; -let seq = []; -let x=0; -let y= 0; -let train = false; -let training_done = false; - -// load JSON data with same formatting from the internet, this means -// loadData() cannot yet be used as it is formatted differently -function preload(){ - json_data = loadJSON('stock_data.json'); -} - - -function setup() { - // just get the data property from json - data = json_data.data; - - // p5 and UI elements - UI(); - createCanvas(640, 480); - background(220); - - // set framerate to a constant value - frameRate(30); - - // set the backend to either webgl or cpu - ml5.setBackend("webgl"); - - // set the options to initialize timeSeries Neural Network - let options = { - task: 'regression', - dataModality: "sequential", - debug: 'true', - learningRate: 0.01, - output:['label'] - }; - model = ml5.timeSeries(options); - - // iterate through data using simple sliding window algorithm - data_index = seqlength - 1; - while(data_index < data.length-1){ - // get the values [seqlength] steps before current index, collect and add - for (let x = seqlength -1; x >= 0; x--){ - let curr = data[data_index - x]; - // choose from the raw data what you want to to feed to the model - let inputs = { - Open: curr.Open, - High: curr.High, - Low: curr.Low, - Close: curr.Close, - Volume: curr.Volume - } - - // once collected all data into an array to make it into a sequence - // the format of the sequence is like this [{},{},...,{}] - // this is the X value - seq.push(inputs) - } - - // the Y value to train is the value that comes after the sequence - let target = data[data_index + 1]; - - // select the outputs you want to get, multiple outputs are possible - let output = { - Open: target.Open, - High: target.High, - Low: target.Low, - Close: target.Close, - Volume: target.Volume - } - - // feed data into the model - model.addData(seq, output) - - // reset the sequence so new values can be added - seq = [] - - // iterate through the whole dataset moving the sliding window in each iteration - data_index++; - } - // normalize the data after adding everything - model.normalizeData() -} - -function trainData(){ - model.normalizeData() - let options = { - epochs: 60, - } - model.train(options, finishedTraining); -} - -function finishedTraining(){ - console.log("Training Done!") - training_done = true; -} - -function predictData(){ - seq = []; - - // choose the most recent sequences - let latest = data.slice(-seqlength) - for (let x = 0; x < seqlength ; x++){ - let curr = latest[x]; - // select the same properties for inputs - let inputs = { - Open: curr.Open, - High: curr.High, - Low: curr.Low, - Close: curr.Close, - Volume: curr.Volume - } - // add them to one array to make them a sequence - seq.push(inputs) - } - - // use the sequence to predict - model.predict(seq, gotResults); -} - -// put the new data in the dataset so this will be considered for any new predictions -function gotResults(results) { - console.log(results); - addNewData(results); -} - -// code for adding new data to the dataset to be used for future prediction -function addNewData(results){ - let date_old = data[data.length-1].Date - let date = new Date(date_old); - date.setDate(date.getDate() + 1); - let nextDateStr = date.toISOString().split('T')[0]; - new_values = { - "Date": nextDateStr, - "Open": parseFloat(results[0].value), - "High": parseFloat(results[1].value), - "Low": parseFloat(results[2].value), - "Close": parseFloat(results[3].value), - "Volume": parseFloat(results[4].value), - }, - data.push(new_values) -} - -function draw() { - // draw some helpful visualizations - updatedUI() -} - -// create custom line graph for stock close prices -let py = 300; -let px = 80; -function updatedUI(){ - if (y < data.length && train){ - push() - fill(220) - noStroke() - rect(100,300,300,70) - rect(50,350,600,70) - pop() - - text(" Date: " + data[y].Date + " Close value: " + data[y].Close.toFixed(1),150,350) - text('Open: ' + data[y].Open.toFixed(1),80,400) - text('High: ' + data[y].High.toFixed(1),180,400) - text('Low: ' + data[y].Low.toFixed(1),280,400) - text('Close: ' + data[y].Close.toFixed(1),380,400) - text('Volume: ' + data[y].Volume.toFixed(1),480,400) - - point = data[y].Close; - cy = map(point, 90, 120, 250, 70) - cx = x+80 - push() - if(training_done){ - fill(144,238,144); - } else { - fill(0) - } - ellipse(cx,cy,5,5) - line(px,py,cx,cy); - line(x+80,300,x+80,cy) - pop() - - px = cx - py = cy - y +=1; - x += 8; - } - - // if you havent trained yet, you cannot predict - if (train){ - predict.removeAttribute('disabled'); - } else { - predict.attribute('disabled','true'); - } -} - -// Buttons on screen -function UI(){ - - rec_circle = createButton('Open Data and Train'); - rec_circle.mouseClicked(() => {train = true; trainData()}); - rec_circle.style("font-family", "Georgia"); - rec_circle.style("font-size", "15px"); - rec_circle.position(20,20) - - predict = createButton('Predict Close for Next Day'); - predict.mouseClicked(predictData); - predict.style("font-family", "Georgia"); - predict.style("font-size", "15px"); - predict.position(200,440) - -} \ No newline at end of file diff --git a/examples/timeSeries-stock-prediction/stock_data.json b/examples/timeSeries-stock-prediction/stock_data.json deleted file mode 100644 index 7a928e11..00000000 --- a/examples/timeSeries-stock-prediction/stock_data.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "data": [ - {"Date": "2024-01-01", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.0, "Volume": 1500000}, - {"Date": "2024-01-02", "Open": 102.0, "High": 108.0, "Low": 97.0, "Close": 104.5, "Volume": 1550000}, - {"Date": "2024-01-03", "Open": 104.5, "High": 110.0, "Low": 99.0, "Close": 106.0, "Volume": 1600000}, - {"Date": "2024-01-04", "Open": 106.0, "High": 112.0, "Low": 100.0, "Close": 103.0, "Volume": 1650000}, - {"Date": "2024-01-05", "Open": 103.0, "High": 109.0, "Low": 98.0, "Close": 100.0, "Volume": 1700000}, - {"Date": "2024-01-06", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 99.0, "Volume": 1750000}, - {"Date": "2024-01-07", "Open": 99.0, "High": 104.0, "Low": 94.0, "Close": 101.5, "Volume": 1800000}, - {"Date": "2024-01-08", "Open": 101.5, "High": 107.0, "Low": 96.0, "Close": 104.0, "Volume": 1850000}, - {"Date": "2024-01-09", "Open": 104.0, "High": 109.0, "Low": 99.0, "Close": 107.0, "Volume": 1900000}, - {"Date": "2024-01-10", "Open": 107.0, "High": 113.0, "Low": 102.0, "Close": 105.0, "Volume": 1950000}, - {"Date": "2024-01-11", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 103.5, "Volume": 2000000}, - {"Date": "2024-01-12", "Open": 103.5, "High": 108.0, "Low": 98.0, "Close": 100.0, "Volume": 2050000}, - {"Date": "2024-01-13", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.5, "Volume": 2100000}, - {"Date": "2024-01-14", "Open": 102.5, "High": 108.0, "Low": 98.0, "Close": 105.0, "Volume": 2150000}, - {"Date": "2024-01-15", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 107.5, "Volume": 2200000}, - {"Date": "2024-01-16", "Open": 107.5, "High": 113.0, "Low": 102.0, "Close": 104.0, "Volume": 2250000}, - {"Date": "2024-01-17", "Open": 104.0, "High": 109.0, "Low": 99.0, "Close": 101.0, "Volume": 2300000}, - {"Date": "2024-01-18", "Open": 101.0, "High": 106.0, "Low": 94.0, "Close": 100.0, "Volume": 2350000}, - {"Date": "2024-01-19", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.0, "Volume": 2400000}, - {"Date": "2024-01-20", "Open": 102.0, "High": 107.0, "Low": 97.0, "Close": 105.0, "Volume": 2450000}, - {"Date": "2024-01-21", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 107.5, "Volume": 2500000}, - {"Date": "2024-01-22", "Open": 107.5, "High": 113.0, "Low": 102.0, "Close": 104.0, "Volume": 2550000}, - {"Date": "2024-01-23", "Open": 104.0, "High": 109.0, "Low": 99.0, "Close": 102.0, "Volume": 2600000}, - {"Date": "2024-01-24", "Open": 102.0, "High": 108.0, "Low": 97.0, "Close": 104.5, "Volume": 2650000}, - {"Date": "2024-01-25", "Open": 104.5, "High": 110.0, "Low": 99.0, "Close": 107.0, "Volume": 2700000}, - {"Date": "2024-01-26", "Open": 107.0, "High": 113.0, "Low": 102.0, "Close": 105.0, "Volume": 2750000}, - {"Date": "2024-01-27", "Open": 105.0, "High": 110.0, "Low": 100.0, "Close": 103.0, "Volume": 2800000}, - {"Date": "2024-01-28", "Open": 103.0, "High": 108.0, "Low": 98.0, "Close": 101.5, "Volume": 2850000}, - {"Date": "2024-01-29", "Open": 101.5, "High": 106.0, "Low": 96.0, "Close": 100.0, "Volume": 2900000}, - {"Date": "2024-01-30", "Open": 100.0, "High": 105.0, "Low": 95.0, "Close": 102.0, "Volume": 2950000}, - {"Date": "2024-01-31", "Open": 102.0, "High": 108.0, "Low": 97.0, "Close": 104.0, "Volume": 3000000} - ] - } - \ No newline at end of file diff --git a/examples/timeSeries-train-mouse-gesture/index.html b/examples/timeSeries-train-mouse-gesture/index.html new file mode 100644 index 00000000..acdfde38 --- /dev/null +++ b/examples/timeSeries-train-mouse-gesture/index.html @@ -0,0 +1,43 @@ + + + + + + + + ml5.js Time Series Train Mouse Gesture classifier Example + + + + + + +
+ + + + +

+ Instructions:
+ 1.) Press the "Record Circle" or "Record Square" and start drawing until + the ink runs out
+ 2.) Draw multiple times for each shape
2.) Press "Train" and wait for + training to finish
+ 3.) Draw again to predict drawn shape

+ Tip: Collect at least 5 drawings for each: +

+ + + + diff --git a/examples/timeSeries-train-mouse-gesture/sketch.js b/examples/timeSeries-train-mouse-gesture/sketch.js new file mode 100644 index 00000000..0af2dac0 --- /dev/null +++ b/examples/timeSeries-train-mouse-gesture/sketch.js @@ -0,0 +1,203 @@ +/* + * 👋 Hello! This is an ml5.js example made and shared with ❤️. + * Learn more about the ml5.js project: https://ml5js.org/ + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md + * + * This example demonstrates How to train your own mouse gesture classifier through ml5.TimeSeries. + */ + +let model; +let counts = { + circleDataCount: 0, + squareDataCount: 0, +}; +let currShape = "circle"; +let state = "collection"; + +let pressedOnce = true; +let frameCount = 0; +let datapoints; +let sequence = []; +let recCircle, recSquare, trainBut; + +// Training Data lenghts +let ink_multiplier = 3; +let num_seq = 20; + +function preload() { + let options = { + inputs: ["x", "y"], + outputs: ["label"], + task: "classification", + spatialData: "true", + debug: "true", + learningRate: 0.005, + }; + + model = ml5.timeSeries(options); +} + +function setup() { + // p5 js elements + let canvas = createCanvas(600, 400); + canvas.parent("canvasDiv"); + background(220); + UI(); + + // set framerate to constant rate for constant data collection + frameRate(60); +} + +function draw() { + // record data when the mouse is pressed inside the canvas + if (mouseIsPressed && pressedOnce && mouseY < 400 && mouseX < 600) { + // draw lines through coordinates + line(pmouseX, pmouseY, mouseX, mouseY); + + frameCount++; + + let inputs = { x: mouseX, y: mouseY }; + sequence.push(inputs); + + if (sequence.length == num_seq * ink_multiplier) { + pressedOnce = false; + frameCount = 0; + + // if state is collection, add whole sequence as X, and shape as Y + if (state == "collection") { + let target = { label: currShape }; + model.addData(sequence, target); + + // add to the count for each + counts[currShape + "DataCount"] += 1; + console.log(counts); + updateDataCountUI(); + + // reset the screen + background(220); + textSize(20); + fill(0); + text("Recording: " + currShape, 50, 50); + // if prediction, classify using the whole sequence + } else if (state == "prediction") { + model.classify(sequence, gotResults); + + background(220); + } + + // reset the sequence + sequence = []; + } + } + inkBar(); +} + +function trainModel() { + // normalize Data first before Training + model.normalizeData(); + + // set the number of epochs for training + let options = { + epochs: 40, + }; + model.train(options, whileTraining, finishedTraining); + + background(220); + state = "training"; + text("Training...", 50, 50); + recCircle.style("background-color", ""); + recSquare.style("background-color", ""); + trainBut.style("background-color", "#f0f0f0"); +} + +function whileTraining(epoch, loss) { + console.log(epoch); +} + +function finishedTraining() { + background(220); + text("Training Finished, Draw again to predict", 50, 50); + state = "prediction"; +} + +function gotResults(results) { + let label = results[0].label; + + fill(0); + text("Prediction: " + label, 50, 50); +} + +// code to signify drawing can be done again +function mouseReleased() { + pressedOnce = true; +} + +////////////// UI Elements //////////// + +// code to visualize how much ink left +function inkBar() { + datapoints = map(frameCount, 0, ink_multiplier * num_seq, 0, num_seq); + + bar_height = 250; + height_miltiplier = bar_height / num_seq; + push(); + fill(0); + textSize(15); + text("Ink:", 550, 90); + rect(550, 100, 25, num_seq * height_miltiplier); + fill(255); + rect(550, 100, 25, datapoints * height_miltiplier); + pop(); +} + +// code for UI elements such as buttons +function UI() { + textSize(20); + + recCircle = select("#recCircle"); + recSquare = select("#recSquare"); + trainBut = select("#trainBut"); + + recCircle.mouseClicked(recordCircle); + recCircle.style("background-color", "#f0f0f0"); + recSquare.mouseClicked(recordSquare); + trainBut.mouseClicked(trainModel); + + function recordCircle() { + state = "collection"; + currShape = "circle"; + + background(220); + text("Recording: circle", 50, 50); + recCircle.style("background-color", "#f0f0f0"); + recSquare.style("background-color", ""); + trainBut.style("background-color", ""); + } + + function recordSquare() { + state = "collection"; + currShape = "square"; + + background(220); + text("Recording: square", 50, 50); + recCircle.style("background-color", ""); + recSquare.style("background-color", "#f0f0f0"); + trainBut.style("background-color", ""); + } + dataCountsP = createP( + "circle data: " + + counts.circleDataCount + + "
square data: " + + counts.squareDataCount + ); +} + +// Update the HTML UI with the current data counts +function updateDataCountUI() { + dataCountsP.html( + "circle data: " + + counts.circleDataCount + + "
square data: " + + counts.squareDataCount + ); +} diff --git a/examples/timeSeries-train-quickdraw/index.html b/examples/timeSeries-train-quickdraw/index.html deleted file mode 100644 index dd31406d..00000000 --- a/examples/timeSeries-train-quickdraw/index.html +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - - - ml5.js Time Series Train Quickdraw Example - - - - - - - - diff --git a/examples/timeSeries-train-quickdraw/sketch.js b/examples/timeSeries-train-quickdraw/sketch.js deleted file mode 100644 index aa241eed..00000000 --- a/examples/timeSeries-train-quickdraw/sketch.js +++ /dev/null @@ -1,221 +0,0 @@ -/* - * 👋 Hello! This is an ml5.js example made and shared with ❤️. - * Learn more about the ml5.js project: https://ml5js.org/ - * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md - * - * This example demonstrates How to train your own quickdraw classifier through ml5.TimeSeries. - */ - -let model; -let counts = { - Circle_datacount:0, - Square_datacount:0, -} -let curr_shape = 'Circle' -let state = 'collection'; -let pressedOnce = true; -let frame_count = 0; -let datapoints; -let sequence = []; - -// Training Data lenghts -let ink_multiplier = 3; -let num_seq = 20; - -function setup() { - // p5 js elements - createCanvas(600, 400); - background(220); - UI(); - - // set framerate to constant rate for constant data collection - frameRate(60); - - // set the backend to either webgl or cpu - ml5.setBackend('webgl'); - - // set the options to initialize timeSeries Neural Network - let options = { - inputs: ['x','y'], - outputs: ['label'], - task: 'classification', - dataModality: 'spatial', - debug: 'true', - learningRate: 0.005 - }; - - model = ml5.timeSeries(options); -} - -function draw() { - // record data when the mouse is pressed inside the canvas - if (mouseIsPressed && pressedOnce && mouseY<400 && mouseX<600){ - - // draw lines through coordinates - line(pmouseX, pmouseY, mouseX,mouseY); - - frame_count++; - - let inputs = {x: mouseX,y: mouseY}; - sequence.push(inputs); - - if (sequence.length == num_seq*ink_multiplier){ - pressedOnce = false; - frame_count = 0 - - // if state is collection, add whole sequence as X, and shape as Y - if (state == 'collection'){ - let target = {label: curr_shape}; - model.addData(sequence, target); - - // add to the count for each - counts[`${curr_shape}_datacount`] += 1; - updateDataCountUI() - - // reset the screen - background(220); - textSize(20) - fill(0); - text("Recording: " + curr_shape, 50,50); - // if prediction, classify using the whole sequence - } else if (state == 'prediction'){ - model.classify(sequence, gotResults) - - background(220); - } - - // reset the sequence - sequence = []; - } - } - inkBar(); -} - -function trainModel(){ - // normalize Data first before Training - model.normalizeData(); - - // set the number of epochs for training - let options = { - epochs: 40, - } - model.train(options,whileTraining,finishedTraining); - - background(220); - state = 'training'; - text("Training...", 50,50); - rec_circle.style('background-color', ''); - rec_square.style("background-color",''); - train_but.style('background-color', '#f0f0f0'); -} - -function whileTraining(epoch, loss) { - console.log(epoch); -} - -function finishedTraining() { - background(220) - text("Training Finished, Draw again to predict", 50,50); - state = 'prediction'; -} - -function gotResults(results) { - const label = results[0].label; - - fill(0); - text("Prediction: " + label, 50,50); -} - -// code to signify drawing can be done again -function mouseReleased(){ - pressedOnce = true; -} - -////////////// UI Elements //////////// - -// code to visualize how much ink left -function inkBar(){ - datapoints = map(frame_count,0,ink_multiplier*num_seq, 0,num_seq) - - bar_height = 250 - height_miltiplier = bar_height/num_seq - push() - fill(0) - textSize(15) - text('Ink:', 550,90) - rect(550,100,25,num_seq*height_miltiplier) - fill(255) - rect(550,100,25,datapoints*height_miltiplier) - pop() -} - -// code for UI elements such as buttons -function UI(){ - textSize(20) - - rec_circle = createButton('Record Circle'); - rec_circle.mouseClicked(recordCircle); - rec_circle.style("font-family", "Georgia"); - rec_circle.style("font-size", "20px"); - rec_circle.style("background-color",'#f0f0f0'); - - rec_square = createButton('Record Square'); - rec_square.mouseClicked(recordSquare); - rec_square.style("font-family", "Georgia"); - rec_square.style("font-size", "20px"); - - train_but = createButton('Train and Predict'); - train_but.mouseClicked(trainModel); - train_but.style("font-family", "Georgia"); - train_but.style("font-size", "20px"); - - function recordCircle(){ - state = 'collection'; - curr_shape = 'Circle'; - - background(220); - text("Recording: Circle", 50,50); - rec_circle.style("background-color",'#f0f0f0'); - rec_square.style('background-color', ''); - train_but.style('background-color', ''); - } - - function recordSquare(){ - state = 'collection'; - curr_shape = 'Square'; - - background(220); - text("Recording: Square", 50,50); - rec_circle.style('background-color', ''); - rec_square.style("background-color",'#f0f0f0'); - train_but.style('background-color', ''); - } - - instructionP = createP( - 'Instructions:
1.) Press the "Record Circle" or "Record Square" and start drawing until the ink runs out
2.) Draw multiple times for each shape
2.) Press "Train" and wait for training to finish
3.) Draw again to predict drawn shape

Tip: Collect at least 5 drawings for each:' - ); - instructionP.style("width", "640px"); - dataCountsP = createP( - "circle data: " + - counts.Circle_datacount + - "
square data: " + - counts.Square_datacount - ); -} - -// Update the HTML UI with the current data counts -function updateDataCountUI() { - dataCountsP.html( - "circle data: " + - counts.Circle_datacount + - "
square data: " + - counts.Square_datacount - ); -} - -function keyPressed(){ - if (key == 's'){ - - model.save('hello'); - } -} diff --git a/examples/timeSeries-stock-prediction/index.html b/examples/timeSeries-weather-prediction/index.html similarity index 91% rename from examples/timeSeries-stock-prediction/index.html rename to examples/timeSeries-weather-prediction/index.html index d27d162b..50c4e895 100644 --- a/examples/timeSeries-stock-prediction/index.html +++ b/examples/timeSeries-weather-prediction/index.html @@ -11,7 +11,7 @@ - ml5.js Time Series Stock Prediction Example + ml5.js Time Series Weather Prediction Example diff --git a/examples/timeSeries-weather-prediction/weather_data.json b/examples/timeSeries-weather-prediction/weather_data.json new file mode 100644 index 00000000..a45429a3 --- /dev/null +++ b/examples/timeSeries-weather-prediction/weather_data.json @@ -0,0 +1,196 @@ +{ + "data": [ + { + "date": "2024-08-01T00:00:00Z", + "temperature": 28.0, + "humidity": 50, + "wind_speed": 3.0, + "pressure": 1015, + "precipitation": 0.0 + }, + { + "date": "2024-08-01T01:00:00Z", + "temperature": 27.5, + "humidity": 52, + "wind_speed": 4.0, + "pressure": 1014, + "precipitation": 0.0 + }, + { + "date": "2024-08-01T02:00:00Z", + "temperature": 27.0, + "humidity": 55, + "wind_speed": 5.0, + "pressure": 1013, + "precipitation": 0.0 + }, + { + "date": "2024-08-01T03:00:00Z", + "temperature": 26.5, + "humidity": 60, + "wind_speed": 6.0, + "pressure": 1012, + "precipitation": 2.0 + }, + { + "date": "2024-08-01T04:00:00Z", + "temperature": 26.0, + "humidity": 65, + "wind_speed": 8.0, + "pressure": 1010, + "precipitation": 5.0 + }, + { + "date": "2024-08-01T05:00:00Z", + "temperature": 25.5, + "humidity": 70, + "wind_speed": 10.0, + "pressure": 1008, + "precipitation": 10.0 + }, + { + "date": "2024-08-01T06:00:00Z", + "temperature": 25.0, + "humidity": 75, + "wind_speed": 12.0, + "pressure": 1006, + "precipitation": 15.0 + }, + { + "date": "2024-08-01T07:00:00Z", + "temperature": 24.5, + "humidity": 80, + "wind_speed": 14.0, + "pressure": 1004, + "precipitation": 20.0 + }, + { + "date": "2024-08-01T08:00:00Z", + "temperature": 24.0, + "humidity": 85, + "wind_speed": 15.0, + "pressure": 1002, + "precipitation": 25.0 + }, + { + "date": "2024-08-01T09:00:00Z", + "temperature": 23.5, + "humidity": 90, + "wind_speed": 17.0, + "pressure": 1000, + "precipitation": 30.0 + }, + { + "date": "2024-08-01T10:00:00Z", + "temperature": 23.0, + "humidity": 95, + "wind_speed": 20.0, + "pressure": 998, + "precipitation": 35.0 + }, + { + "date": "2024-08-01T11:00:00Z", + "temperature": 24.0, + "humidity": 85, + "wind_speed": 10.0, + "pressure": 1005, + "precipitation": 10.0 + }, + { + "date": "2024-08-01T12:00:00Z", + "temperature": 25.0, + "humidity": 75, + "wind_speed": 7.0, + "pressure": 1010, + "precipitation": 5.0 + }, + { + "date": "2024-08-01T13:00:00Z", + "temperature": 26.0, + "humidity": 65, + "wind_speed": 5.0, + "pressure": 1013, + "precipitation": 0.0 + }, + { + "date": "2024-08-01T14:00:00Z", + "temperature": 27.0, + "humidity": 60, + "wind_speed": 4.0, + "pressure": 1015, + "precipitation": 0.0 + }, + { + "date": "2024-08-01T15:00:00Z", + "temperature": 28.0, + "humidity": 50, + "wind_speed": 3.0, + "pressure": 1018, + "precipitation": 0.0 + }, + { + "date": "2024-08-01T16:00:00Z", + "temperature": 27.0, + "humidity": 55, + "wind_speed": 4.0, + "pressure": 1015, + "precipitation": 0.0 + }, + { + "date": "2024-08-01T17:00:00Z", + "temperature": 26.0, + "humidity": 60, + "wind_speed": 5.0, + "pressure": 1012, + "precipitation": 1.0 + }, + { + "date": "2024-08-01T18:00:00Z", + "temperature": 25.0, + "humidity": 70, + "wind_speed": 7.0, + "pressure": 1009, + "precipitation": 5.0 + }, + { + "date": "2024-08-01T19:00:00Z", + "temperature": 24.0, + "humidity": 80, + "wind_speed": 10.0, + "pressure": 1005, + "precipitation": 10.0 + }, + { + "date": "2024-08-01T20:00:00Z", + "temperature": 23.0, + "humidity": 90, + "wind_speed": 12.0, + "pressure": 1002, + "precipitation": 15.0 + }, + { + "date": "2024-08-01T21:00:00Z", + "temperature": 22.0, + "humidity": 95, + "wind_speed": 15.0, + "pressure": 999, + "precipitation": 20.0 + }, + { + "date": "2024-08-01T22:00:00Z", + "temperature": 21.0, + "humidity": 98, + "wind_speed": 18.0, + "pressure": 995, + "precipitation": 25.0 + }, + { + "date": "2024-08-01T23:00:00Z", + "temperature": 20.0, + "humidity": 100, + "wind_speed": 20.0, + "pressure": 992, + "precipitation": 30.0 + } + ] +} diff --git a/src/TimeSeries/index.js b/src/TimeSeries/index.js index 170f7931..f9472f0c 100644 --- a/src/TimeSeries/index.js +++ b/src/TimeSeries/index.js @@ -15,7 +15,7 @@ const DEFAULTS = { modelUrl: null, layers: [], task: null, - dataModality: null, + spatialData: false, debug: false, learningRate: 0.2, hiddenUnits: 16, @@ -56,6 +56,7 @@ class timeSeries { } async init() { + await tf.ready(); if (this.options.dataUrl) { await this.loadDataFromUrl(this.options.dataUrl); } else if (this.options.modelUrl) { @@ -63,8 +64,6 @@ class timeSeries { } return this; } - - /** * //////////////////////////////////////////////////////////// * Add and Format Data @@ -85,21 +84,20 @@ class timeSeries { - at the end of the adding data, the data is formatted to a sequence of objects similar to 1 of xinputs - - new parameter dataModality, either spatial or sequential, spatial uses cnn1d and sequential uses lstm - */ + - changed data Modality into spatialData so its a boolean, true if coordinate data and false if normal lstm + */ - addData(xInputs, yInputs, options = null){ + addData(xInputs, yInputs, options = null) { // 1. verify format between the three possible types of xinputs - const xs = tsUtils.verifyAndFormatInputs(xInputs,options,this.options); + const xs = tsUtils.verifyAndFormatInputs(xInputs, options, this.options); // 2. format the yInput - same logic as NN class - const ys = tsUtils.verifyAndFormatOutputs(yInputs,options,this.options); - + const ys = tsUtils.verifyAndFormatOutputs(yInputs, options, this.options); + // 3. add data to raw - this.neuralNetworkData.addData(xs,ys); + this.neuralNetworkData.addData(xs, ys); } - /** * //////////////////////////////////////////////////////////// * Train Data @@ -112,21 +110,24 @@ class timeSeries { let finishedTrainingCb; if (typeof optionsOrCallback === "object") { - options = optionsOrCallback; - if (typeof optionsOrWhileTraining === "function") { - whileTrainingCb = null; - finishedTrainingCb = callback || optionsOrWhileTraining; - } else { - finishedTrainingCb = optionsOrWhileTraining; - } - } else if (typeof optionsOrCallback === "function") { - whileTrainingCb = optionsOrCallback; + options = optionsOrCallback; + if (typeof optionsOrWhileTraining === "function") { + whileTrainingCb = null; + finishedTrainingCb = callback || optionsOrWhileTraining; + } else { finishedTrainingCb = optionsOrWhileTraining; + } + } else if (typeof optionsOrCallback === "function") { + whileTrainingCb = optionsOrCallback; + finishedTrainingCb = optionsOrWhileTraining; } else { - finishedTrainingCb = optionsOrCallback; + finishedTrainingCb = optionsOrCallback; } - return callCallback(this.trainInternal(options, whileTrainingCb), finishedTrainingCb); + return callCallback( + this.trainInternal(options, whileTrainingCb), + finishedTrainingCb + ); } async trainInternal(_options, whileTrainingCb) { @@ -187,9 +188,7 @@ class timeSeries { // then use those to create your architecture if (!this.neuralNetwork.isLayered) { // TODO: don't update this.options.layers - Linda - this.options.layers = this.createNetworkLayers( - this.options.layers - ); + this.options.layers = this.createNetworkLayers(this.options.layers); } // if the model does not have any layers defined yet @@ -212,12 +211,12 @@ class timeSeries { // this method does not get shape for images but instead for timesteps const { inputs } = this.options; - let inputShape; - if (typeof inputs === 'number'){ + let inputShape; + if (typeof inputs === "number") { inputShape = inputs; - } else if (Array.isArray(inputs) && inputs.length > 0){ + } else if (Array.isArray(inputs) && inputs.length > 0) { inputShape = inputs.length; //will be fed into the tensors later - } + } this.neuralNetworkData.createMetadata(inputShape); } @@ -228,7 +227,7 @@ class timeSeries { } convertTrainingDataToTensors() { - return this.neuralNetworkData.convertRawToTensors(this.data.training); + return this.neuralNetworkData.convertRawToTensors(this.data.training); } createNetworkLayers(layerJsonArray) { @@ -260,12 +259,11 @@ class timeSeries { addDefaultLayers() { let layers; const task = this.options.task; - const dataModality = this.options.dataModality; - let taskConditions = `${task}_${dataModality}`; - console.log(this.neuralNetworkData.meta.seriesShape) + const ifSpatialData = this.options.spatialData; + let taskConditions = `${task}_${ifSpatialData}`; switch (taskConditions.toLowerCase()) { // if the task is classification and spatial modality - case "classification_spatial": + case "classification_true": layers = [ { type: "conv1d", @@ -305,7 +303,7 @@ class timeSeries { return this.createNetworkLayers(layers); // if the task is classification and sequential modality - case "classification_sequential": + case "classification_false": layers = [ { type: "lstm", @@ -334,7 +332,7 @@ class timeSeries { return this.createNetworkLayers(layers); // if the task is regression - case "regression_spatial": + case "regression_true": layers = [ { type: "conv1d", @@ -374,7 +372,7 @@ class timeSeries { return this.createNetworkLayers(layers); - case "regression_sequential": + case "regression_false": layers = [ { type: "lstm", @@ -400,7 +398,7 @@ class timeSeries { ]; return this.createNetworkLayers(layers); - + default: console.log("no inputUnits or outputUnits defined"); layers = [ @@ -433,7 +431,7 @@ class timeSeries { this.neuralNetwork.addLayer(layer); } - compile(){ + compile() { const LEARNING_RATE = this.options.learningRate; let options = {}; @@ -475,9 +473,11 @@ class timeSeries { } } - normalizeData() { - if (!this.neuralNetworkData.data.raw.length > 0){ - throw new Error('Empty Data Error: You Cannot Normalize/Train without adding any data! Please add data first') + async normalizeData() { + if (!this.neuralNetworkData.data.raw.length > 0) { + throw new Error( + "Empty Data Error: You Cannot Normalize/Train without adding any data! Please add data first" + ); } if (!this.neuralNetworkData.isMetadataReady) { this.createMetaData(); @@ -494,7 +494,6 @@ class timeSeries { // set isNormalized to true this.neuralNetworkData.meta.isNormalized = true; - } // //////// @@ -570,8 +569,15 @@ class timeSeries { const { meta } = this.neuralNetworkData; const inputHeaders = Object.keys(meta.inputs); - const formatted_inputs = tsUtils.verifyAndFormatInputs(_input,null,this.options); - const normalized_inputs = this.neuralNetworkData.normalizePredictData(formatted_inputs, meta.inputs); + const formatted_inputs = tsUtils.verifyAndFormatInputs( + _input, + null, + this.options + ); + const normalized_inputs = this.neuralNetworkData.normalizePredictData( + formatted_inputs, + meta.inputs + ); const output = tf.tensor(normalized_inputs); return output; @@ -635,11 +641,6 @@ class timeSeries { return unformattedResults; } - - - - - /** * //////////////////////////////////////////////////////////// * Save / Load Data @@ -648,7 +649,10 @@ class timeSeries { saveData(name, callback) { const args = handleArguments(name, callback); - return callCallback(this.neuralNetworkData.saveData(args.name), args.callback); + return callCallback( + this.neuralNetworkData.saveData(args.name), + args.callback + ); } async loadData(filesOrPath, callback) { @@ -657,12 +661,20 @@ class timeSeries { async loadDataFromUrl(dataUrl, inputs, outputs) { let json; - let dataFromUrl + let dataFromUrl; try { if (dataUrl.endsWith(".csv")) { - dataFromUrl = await this.neuralNetworkData.loadCSV(dataUrl, inputs, outputs); + dataFromUrl = await this.neuralNetworkData.loadCSV( + dataUrl, + inputs, + outputs + ); } else if (dataUrl.endsWith(".json")) { - dataFromUrl = await this.neuralNetworkData.loadJSON(dataUrl, inputs, outputs); + dataFromUrl = await this.neuralNetworkData.loadJSON( + dataUrl, + inputs, + outputs + ); } else if (dataUrl.includes("blob")) { dataFromUrl = await this.loadBlob(dataUrl, inputs, outputs); } else { @@ -674,12 +686,12 @@ class timeSeries { } dataFromUrl.map((item) => { - this.addData(item.xs, item.ys) - }) + this.addData(item.xs, item.ys); + }); this.createMetaData(); - this.prepareForTraining(); + this.prepareForTraining(); } /** @@ -690,12 +702,15 @@ class timeSeries { async save(name, callback) { const args = handleArguments(name, callback); - const modelName = args.string || 'model'; + const modelName = args.string || "model"; // save the model - return callCallback(Promise.all([ - this.neuralNetwork.save(modelName), - this.neuralNetworkData.saveMeta(modelName) - ]), args.callback); + return callCallback( + Promise.all([ + this.neuralNetwork.save(modelName), + this.neuralNetworkData.saveMeta(modelName), + ]), + args.callback + ); } /** @@ -707,10 +722,13 @@ class timeSeries { * @return {Promise} */ async load(filesOrPath, callback) { - return callCallback(Promise.all([ - this.neuralNetwork.load(filesOrPath), - this.neuralNetworkData.loadMeta(filesOrPath) - ]), callback); + return callCallback( + Promise.all([ + this.neuralNetwork.load(filesOrPath), + this.neuralNetworkData.loadMeta(filesOrPath), + ]), + callback + ); } /** diff --git a/src/TimeSeries/timeSeries.js b/src/TimeSeries/timeSeries.js index 9d7e8b29..fc560ecf 100644 --- a/src/TimeSeries/timeSeries.js +++ b/src/TimeSeries/timeSeries.js @@ -2,8 +2,6 @@ import * as tf from "@tensorflow/tfjs"; import { saveBlob } from "../utils/io"; import { randomGaussian } from "../utils/random"; - - /* Things changed from neural network class: @@ -106,7 +104,7 @@ class NeuralNetwork { const xs = TRAINING_OPTIONS.inputs; const ys = TRAINING_OPTIONS.outputs; - console.log('train',xs,ys); + console.log("train", xs, ys); const { batchSize, epochs, shuffle, validationSplit, whileTraining } = TRAINING_OPTIONS; @@ -193,13 +191,13 @@ class NeuralNetwork { }, ], }; - console.log('data.weightData',data.weightData); + console.log("data.weightData", data.weightData); await saveBlob( data.weightData, `${modelName}.weights.bin`, "application/octet-stream" ); - console.log('this.weightsManifest',this.weightsManifest) + console.log("this.weightsManifest", this.weightsManifest); await saveBlob( JSON.stringify(this.weightsManifest), `${modelName}.json`, @@ -217,7 +215,9 @@ class NeuralNetwork { if (filesOrPath instanceof FileList) { const files = Array.from(filesOrPath); // find the correct files - const model = files.find((file) => file.name.includes(".json") && !file.name.includes("_meta")); + const model = files.find( + (file) => file.name.includes(".json") && !file.name.includes("_meta") + ); const weights = files.find((file) => file.name.includes(".bin")); // load the model this.model = await tf.loadLayersModel( @@ -229,7 +229,7 @@ class NeuralNetwork { // Override the weights path from the JSON weightsManifest weightUrlConverter: (weightFileName) => { return filesOrPath.weights || weightFileName; - } + }, }) ); } else { diff --git a/src/TimeSeries/timeSeriesData.js b/src/TimeSeries/timeSeriesData.js index 3d7638c0..ab5f10a8 100644 --- a/src/TimeSeries/timeSeriesData.js +++ b/src/TimeSeries/timeSeriesData.js @@ -1,7 +1,7 @@ import * as tf from "@tensorflow/tfjs"; import axios from "axios"; import { saveBlob } from "../utils/io"; -import modelLoader from '../utils/modelLoader'; +import modelLoader from "../utils/modelLoader"; import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; import tsUtils from "./timeSeriesUtils"; @@ -25,7 +25,7 @@ class NeuralNetworkData { }; } - /** + /** * //////////////////////////////////////////////////////// * Add Data * //////////////////////////////////////////////////////// @@ -44,8 +44,6 @@ class NeuralNetworkData { }); } - - /** * //////////////////////////////////////////////////////// * Summarize Data @@ -136,10 +134,12 @@ class NeuralNetworkData { inputMeta[k].max = 1; } else if (inputMeta[k].dtype === "number") { let dataAsArray; - if (xsOrYs === 'ys'){ + if (xsOrYs === "ys") { dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]); - } else if (xsOrYs === 'xs'){ - dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); + } else if (xsOrYs === "xs") { + dataAsArray = this.data.raw.flatMap((item) => + item[xsOrYs].map((obj) => obj[k]) + ); } inputMeta[k].min = nnUtils.getMin(dataAsArray); inputMeta[k].max = nnUtils.getMax(dataAsArray); @@ -165,36 +165,36 @@ class NeuralNetworkData { this.meta.outputs = this.getInputMetaOneHot(this.meta.outputs, "ys"); } - /** + /** * getOneHotMeta * @param {Object} _inputsMeta * @param {"xs" | "ys"} xsOrYs * @return {Object} */ getInputMetaOneHot(_inputsMeta, xsOrYs) { - const inputsMeta = Object.assign({}, _inputsMeta); - - Object.entries(inputsMeta).forEach((arr) => { - // the key - const key = arr[0]; - // the value - const { dtype } = arr[1]; - - if (dtype === "string") { - const uniqueVals = [ - ...new Set(this.data.raw.map((obj) => obj[xsOrYs][key])), - ]; - const oneHotMeta = this.createOneHotEncodings(uniqueVals); - inputsMeta[key] = { - ...inputsMeta[key], - ...oneHotMeta, - }; - } - }); - return inputsMeta; + const inputsMeta = Object.assign({}, _inputsMeta); + + Object.entries(inputsMeta).forEach((arr) => { + // the key + const key = arr[0]; + // the value + const { dtype } = arr[1]; + + if (dtype === "string") { + const uniqueVals = [ + ...new Set(this.data.raw.map((obj) => obj[xsOrYs][key])), + ]; + const oneHotMeta = this.createOneHotEncodings(uniqueVals); + inputsMeta[key] = { + ...inputsMeta[key], + ...oneHotMeta, + }; + } + }); + return inputsMeta; } - /** + /** * get the data units, inputshape and output units * @private * @param {Array} arrayShape @@ -239,7 +239,7 @@ class NeuralNetworkData { return units; } - /** + /** * Returns a legend mapping the * data values to oneHot encoded values * @private @@ -274,9 +274,6 @@ class NeuralNetworkData { }); } - - - /** * //////////////////////////////////////////////////////// * Tensor handling @@ -299,8 +296,6 @@ class NeuralNetworkData { const inputArr = []; const outputArr = []; - - dataRaw.forEach((row) => { // get xs // const xs = Object.keys(meta.inputs) @@ -310,7 +305,7 @@ class NeuralNetworkData { // .flat(); // inputArr.push(xs); - + const xs = row.xs; inputArr.push(xs); @@ -323,22 +318,18 @@ class NeuralNetworkData { outputArr.push(ys); }); - // const inputs = tf.tensor(inputArr.flat(), [ // dataLength, // ...meta.inputUnits, // ]); const inputs = tf.tensor(inputArr); - const outputs = tf.tensor(outputArr.flat(), [ dataLength, meta.outputUnits, ]); - - return { inputs, outputs, @@ -357,11 +348,10 @@ class NeuralNetworkData { * @return {Array} */ normalizeDataRaw() { - const normXs = this.normalizeInputData(this.meta.inputs, "xs"); const normYs = this.normalizeInputData(this.meta.outputs, "ys"); const normalizedData = tsUtils.zipArraySequence(normXs, normYs); - + return normalizedData; } @@ -372,7 +362,7 @@ class NeuralNetworkData { */ normalizeInputData(inputOrOutputMeta, xsOrYs) { const dataRaw = this.data.raw; - + // the data length const dataLength = dataRaw.length; @@ -395,10 +385,12 @@ class NeuralNetworkData { normalized[k] = this.normalizeArray(dataAsArray, options); } else if (inputMeta[k].dtype === "number") { let dataAsArray; - if (xsOrYs === 'ys'){ + if (xsOrYs === "ys") { dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]); - } else if (xsOrYs === 'xs'){ - dataAsArray = this.data.raw.flatMap((item) => item[xsOrYs].map((obj) => obj[k])); + } else if (xsOrYs === "xs") { + dataAsArray = this.data.raw.flatMap((item) => + item[xsOrYs].map((obj) => obj[k]) + ); } normalized[k] = this.normalizeArray(dataAsArray, options); } else if (inputMeta[k].dtype === "array") { @@ -407,48 +399,50 @@ class NeuralNetworkData { this.normalizeArray(item, options) ); } - }); - let output; - if (xsOrYs == "ys"){ + if (xsOrYs == "ys") { output = [...new Array(dataLength).fill(null)].map((item, idx) => { const row = { [xsOrYs]: {}, }; - + Object.keys(inputMeta).forEach((k) => { row[xsOrYs][k] = normalized[k][idx]; }); - + return row; }); - } else if ((xsOrYs == "xs")){ + } else if (xsOrYs == "xs") { // reshape array - already ready for tensorconversion const features = Object.keys(inputMeta); const feature_length = features.length; - - const seriesStep = dataRaw[0]['xs'].length; - + + const seriesStep = dataRaw[0]["xs"].length; + const batch = normalized[features[0]].length / seriesStep; - this.meta.seriesShape = [seriesStep,feature_length]; - console.log('series shape',this.meta.seriesShape) + this.meta.seriesShape = [seriesStep, feature_length]; + console.log("series shape", this.meta.seriesShape); let zipped = []; // zip arrays before reshaping - for (let idx =0; idx < seriesStep*feature_length*batch; idx++){ + for (let idx = 0; idx < seriesStep * feature_length * batch; idx++) { features.forEach((k) => { - zipped.push(normalized[k][idx]) - }) + zipped.push(normalized[k][idx]); + }); } // reshaping - output = tsUtils.reshapeTo3DArray(zipped,[batch,seriesStep,feature_length]) + output = tsUtils.reshapeTo3DArray(zipped, [ + batch, + seriesStep, + feature_length, + ]); } - - console.log('thismeta',this.meta) + + console.log("thismeta", this.meta); return output; } @@ -484,9 +478,9 @@ class NeuralNetworkData { throw new Error("error in inputArray of normalizeArray() function"); } - normalizePredictData(dataRaw, inputOrOutputMeta){ + normalizePredictData(dataRaw, inputOrOutputMeta) { const inputMeta = Object.assign({}, inputOrOutputMeta); - const xsOrYs = "xs" + const xsOrYs = "xs"; const predict_normalized = {}; Object.keys(inputMeta).forEach((k) => { // get the min and max values @@ -499,30 +493,37 @@ class NeuralNetworkData { options.legend = inputMeta[k].legend; predict_normalized[k] = this.normalizeArray(dataAsArray, options); } else if (inputMeta[k].dtype === "number") { - const dataAsArray = Array(dataRaw).flatMap((item) => item.map((obj) => (obj[k]))); + const dataAsArray = Array(dataRaw).flatMap((item) => + item.map((obj) => obj[k]) + ); console.log(dataAsArray); predict_normalized[k] = this.normalizeArray(dataAsArray, options); } - }); - console.log('done', predict_normalized); + console.log("done", predict_normalized); const features = Object.keys(inputMeta); const feature_length = features.length; - + const seriesStep = dataRaw.length; - + const batch = 1; let zipped = []; // zip arrays before reshaping - for (let idx =0; idx < seriesStep*feature_length*batch; idx++){ - features.forEach((k) => {zipped.push(predict_normalized[k][idx])}) + for (let idx = 0; idx < seriesStep * feature_length * batch; idx++) { + features.forEach((k) => { + zipped.push(predict_normalized[k][idx]); + }); } - // reshaping - const output = tsUtils.reshapeTo3DArray(zipped,[batch,seriesStep,feature_length]) - return output + // reshaping + const output = tsUtils.reshapeTo3DArray(zipped, [ + batch, + seriesStep, + feature_length, + ]); + return output; } /** @@ -581,8 +582,6 @@ class NeuralNetworkData { const meta = Object.assign({}, this.meta); const output = this.data.raw.map((row) => { - - const xs = { ...row.xs, }; @@ -603,7 +602,6 @@ class NeuralNetworkData { } }); - return { xs, ys, @@ -612,7 +610,6 @@ class NeuralNetworkData { return output; } - /** * //////////////////////////////////////////////// * saving / loading data @@ -628,7 +625,6 @@ class NeuralNetworkData { */ async loadDataFromUrl(dataUrl, inputs, outputs) { try { - if (dataUrl.endsWith(".csv")) { await this.loadCSV(dataUrl, inputs, outputs); } else if (dataUrl.endsWith(".json")) { @@ -665,7 +661,6 @@ class NeuralNetworkData { // format the data.raw array // this.formatRawData(json, inputLabels, outputLabels); return this.findEntries(json); - } catch (err) { console.error("error loading json"); throw new Error(err); @@ -689,7 +684,6 @@ class NeuralNetworkData { // format the data.raw array // this.formatRawData(json, inputLabels, outputLabels); return this.findEntries(json); - } catch (err) { console.error("error loading csv", err); throw new Error(err); @@ -818,7 +812,7 @@ class NeuralNetworkData { file.name.includes("_meta.json") ); if (!file) { - console.warn('no model_meta.json file found in FileList'); + console.warn("no model_meta.json file found in FileList"); return; } const text = await file.text(); diff --git a/src/TimeSeries/timeSeriesUtils.js b/src/TimeSeries/timeSeriesUtils.js index 92e1f79d..28193989 100644 --- a/src/TimeSeries/timeSeriesUtils.js +++ b/src/TimeSeries/timeSeriesUtils.js @@ -14,51 +14,61 @@ class TimeSeriesUtils { [[],[],[],[]] 3. Sequence of values (shape should be provided by user) [[,,,,,]] e.g. shape = {steps: 4, values: 2} will become [{x: , y: },{x: , y: },{x: , y: },{x: , y: }] - */ + */ - verifyAndFormatInputs(xInputs, options = null,classOptions){ + verifyAndFormatInputs(xInputs, options = null, classOptions) { const dataFormat = this.checkInputStructure(xInputs, options); - return this.formatInputsToObjects(xInputs,options,classOptions,dataFormat); + return this.formatInputsToObjects( + xInputs, + options, + classOptions, + dataFormat + ); } - checkInputStructure(xInputs, options=null){ - if(!Array.isArray(xInputs)){ - throw new error('Syntax Error: Data Should be in an Array') - } + checkInputStructure(xInputs, options = null) { + if (!Array.isArray(xInputs)) { + throw new error("Syntax Error: Data Should be in an Array"); + } let isObjects = true; let isArrays = true; let isValues = true; - - for (let i = 0; i < xInputs.length ; i++){ - if (nnUtils.getDataType(xInputs[i]) === 'object'){ - console.log('here') + + for (let i = 0; i < xInputs.length; i++) { + if (nnUtils.getDataType(xInputs[i]) === "object") { + console.log("here"); isArrays = false; isValues = false; - if ( i > 0 ) { - if (Object.keys(xInputs[i-1]).length !== Object.keys(xInputs[i]).length || nnUtils.getDataType(xInputs[i-1]) === 'object'){ - throw new error('Data format is inconsistent') + if (i > 0) { + if ( + Object.keys(xInputs[i - 1]).length !== + Object.keys(xInputs[i]).length || + nnUtils.getDataType(xInputs[i - 1]) === "object" + ) { + throw new error("Data format is inconsistent"); } } - } else if (Array.isArray(xInputs[i])){ - console.log('here2') + } else if (Array.isArray(xInputs[i])) { + console.log("here2"); isObjects = false; isValues = false; - if ( i > 0 ) { - if (xInputs[i-1].length !== xInputs[i].length || !Array.isArray(xInputs[i-1])){ - throw new error('Data format is inconsistent') + if (i > 0) { + if ( + xInputs[i - 1].length !== xInputs[i].length || + !Array.isArray(xInputs[i - 1]) + ) { + throw new error("Data format is inconsistent"); } } } else { - if (options.inputLabels){ - + if (options.inputLabels) { isObjects = false; isArrays = false; - } else { - throw new error('inputLabels is needed for 1D array inputs') + throw new error("inputLabels is needed for 1D array inputs"); } } - + if (isObjects) { return "ObjectSequence"; } else if (isArrays) { @@ -66,87 +76,90 @@ class TimeSeriesUtils { } else if (isValues) { return "ValueSequence"; } else { - throw new error('Syntax Error: Input Structure is unknown') + throw new error("Syntax Error: Input Structure is unknown"); } } - } + } - formatInputsToObjects(xInputs, options=null,classOptions, dataFormat){ - switch(dataFormat){ - case 'ObjectSequence': + formatInputsToObjects(xInputs, options = null, classOptions, dataFormat) { + switch (dataFormat) { + case "ObjectSequence": return xInputs; - case 'ArraySequence': + case "ArraySequence": return this.convertArraySequence(xInputs, options, classOptions); - case 'ValueSequence': - return this.convertValueSequence(xInputs,options); + case "ValueSequence": + return this.convertValueSequence(xInputs, options); default: - throw new error('Input Data Structure is unknown'); + throw new error("Input Data Structure is unknown"); } } - convertArraySequence(xInputs, options=null, classOptions){ - let label = '' + convertArraySequence(xInputs, options = null, classOptions) { + let label = ""; - if (options !== null){ - if (options.inputLabels){ - label = options.inputLabels - console.log('here1') + if (options !== null) { + if (options.inputLabels) { + label = options.inputLabels; + console.log("here1"); } - } else if (classOptions !== null){ - if (classOptions.inputs){ + } else if (classOptions !== null) { + if (classOptions.inputs) { label = classOptions.inputs; } - } - - if ((typeof label === 'string' && label === '') || - (Array.isArray(label) && label.length === 0)) { + } + + if ( + (typeof label === "string" && label === "") || + (Array.isArray(label) && label.length === 0) + ) { label = this.getLabelFromNestedArray(xInputs); } - return xInputs.map((input)=>{ + return xInputs.map((input) => { const obj = {}; - input.forEach((value,ind) => { + input.forEach((value, ind) => { obj[label[ind]] = value; }); return obj; - }) + }); } - convertValueSequence(xInputs, options=null){ - const {inputLabels} = options; - if (xInputs.length % inputLabels.length !== 0){ - throw new error ("Invalid Input: Number of Labels don't match amount of values") - } - return xInputs.reduce((acc, _, index, array) => { - if (index % inputLabels.length === 0) { - // Create a new object for the current set of values - const obj = {}; - for (let i = 0; i < inputLabels.length; i++) { - obj[inputLabels[i]] = array[index + i]; - } - acc.push(obj); + convertValueSequence(xInputs, options = null) { + const { inputLabels } = options; + if (xInputs.length % inputLabels.length !== 0) { + throw new error( + "Invalid Input: Number of Labels don't match amount of values" + ); + } + return xInputs.reduce((acc, _, index, array) => { + if (index % inputLabels.length === 0) { + // Create a new object for the current set of values + const obj = {}; + for (let i = 0; i < inputLabels.length; i++) { + obj[inputLabels[i]] = array[index + i]; } - return acc; - }, []); + acc.push(obj); + } + return acc; + }, []); } - verifyAndFormatOutputs(yInputs, options=null,classOptions){ - const {outputs} = classOptions; + verifyAndFormatOutputs(yInputs, options = null, classOptions) { + const { outputs } = classOptions; let outputLabels; - if (options !== null) { - if (options.outputLabels){ + if (options.outputLabels) { outputLabels = options.outputLabels; } - } - + } + if (outputs.length > 0) { if (outputs.every((item) => typeof item === "string")) { outputLabels = outputs; } - } else if ( typeof yInputs === "object") { + } else if (typeof yInputs === "object") { outputLabels = Object.keys(yInputs); } else { outputLabels = nnUtils.createLabelsFromArrayValues(yInputs, "output"); @@ -160,16 +173,16 @@ class TimeSeriesUtils { return nnUtils.formatDataAsObject(yInputs, outputLabels); } - prepareLabels(xInputs, yInputs, options = null,classOptions){ - const {inputs, outputs} = this.options; - + prepareLabels(xInputs, yInputs, options = null, classOptions) { + const { inputs, outputs } = this.options; + let inputLabels; let outputLabels; - + // options-based values to assign if (options !== null) { - ({inputLabels, outputLabels} = options) - } else if (inputs.length > 0 && outputs.length > 0) { + ({ inputLabels, outputLabels } = options); + } else if (inputs.length > 0 && outputs.length > 0) { if (inputs.every((item) => typeof item === "string")) { inputLabels = inputs; } @@ -177,7 +190,7 @@ class TimeSeriesUtils { outputLabels = outputs; } - // input-based values to assign + // input-based values to assign } else { inputLabels = this.getLabelFromNestedArray(xInputs); if (typeof yInputs === "object") { @@ -186,8 +199,7 @@ class TimeSeriesUtils { inputLabels = this.getLabelFromNestedArray(yInputs); } } - - + // Make sure that the inputLabels and outputLabels are arrays if (!(inputLabels instanceof Array)) { throw new Error("inputLabels must be an array"); @@ -195,15 +207,17 @@ class TimeSeriesUtils { if (!(outputLabels instanceof Array)) { throw new Error("outputLabels must be an array"); } - - return inputLabels, outputLabels - + + return inputLabels, outputLabels; } - getLabelFromNestedArray(xInputs,prefix = 'label'){ + getLabelFromNestedArray(xInputs, prefix = "label") { // Recursive function to find the deepest level of the array function traverseArray(array) { - if (array.length > 0 && (typeof array[0] === 'string' || typeof array[0] === 'number')) { + if ( + array.length > 0 && + (typeof array[0] === "string" || typeof array[0] === "number") + ) { return array.map((_, index) => `${prefix}_${index}`); } else { for (const item of array) { @@ -215,60 +229,14 @@ class TimeSeriesUtils { } return null; } - + if (Array.isArray(xInputs)) { return traverseArray(xInputs); } else { - throw new Error('Input data must be an array.'); - } + throw new Error("Input data must be an array."); + } } - // labelsFromNestedArray(data){ - // function processData(data, prefix = 'label') { - // // Recursive function to find the deepest level of the data and return the result - // function traverse(value) { - // if (Array.isArray(value)) { - // if (value.length > 0 && typeof value[0] === 'string') { - // // If the deepest unit is an array with strings - // return { type: 'array', data: value }; - // } else if (value.length > 0 && typeof value[0] === 'number') { - // // If the deepest unit is an array with numbers - // return { type: 'array', data: value }; - // } else { - // for (const item of value) { - // const result = traverse(item); - // if (result) return result; - // } - // } - // } else if (value !== null && typeof value === 'object') { - // return { type: 'object', data: value }; // If the deepest unit is an object - // } - // return null; - // } - - // const result = traverse(data); - - // if (result) { - // if (result.type === 'object') { - // // If the deepest level is an object, get the unique keys - // return Object.keys(result.data); - // } else if (result.type === 'array') { - // // If the deepest level is an array with strings or numbers, get the labels - // return result.data.map((_, index) => `${prefix}_${index}`); - // } - // } else { - // // No recognizable structure found - // throw new Error('Data does not match expected structure for objects or arrays.'); - // } - // } - - // output = processData(data, "label"); - - // console.log('labeling',output); - // return processData(data, "label"); - // } - - // normalize utilities reshapeTo3DArray(data, shape) { const [batch, timeStep, feature] = shape; @@ -276,18 +244,17 @@ class TimeSeriesUtils { let index = 0; for (let i = 0; i < batch; i++) { - let batchArray = []; - for (let j = 0; j < timeStep; j++) { - let timeStepArray = []; - for (let k = 0; k < feature; k++) { - timeStepArray.push(data[index]); - index++; - } - batchArray.push(timeStepArray); + let batchArray = []; + for (let j = 0; j < timeStep; j++) { + let timeStepArray = []; + for (let k = 0; k < feature; k++) { + timeStepArray.push(data[index]); + index++; } - result.push(batchArray); + batchArray.push(timeStepArray); + } + result.push(batchArray); } - return result; } @@ -297,21 +264,119 @@ class TimeSeriesUtils { console.error("arrays do not have the same length"); return []; } - + return arr1.map((xs, idx) => { const ys = arr2[idx].ys; // Extract the inner `ys` object return { xs: xs, - ys: ys + ys: ys, }; }); } + + // point simplification utilities - Ramer-Douglas-Peucker (RDP) algorithm + simplifyPoints(allPoints, targetPointCount, maxEpsilon = 50) { + const rdpPoints = []; + + const epsilon = findEpsilonForPointCount( + allPoints, + targetPointCount, + maxEpsilon + ); + + const total = allPoints.length; + const start = allPoints[0]; + const end = allPoints[total - 1]; + rdpPoints.push(start); + rdp(0, total - 1, allPoints, rdpPoints, epsilon); + rdpPoints.push(end); + + return rdpPoints; + } + + findEpsilonForPointCount(points, targetCount, maxEpsilon) { + let low = 0; + let high = maxEpsilon; + let mid; + let simplifiedPointsCount = 0; + + while (high - low > 0.001) { + // Tolerance for approximation + mid = (low + high) / 2; + simplifiedPointsCount = getSimplifiedPointCount(points, mid); + if (simplifiedPointsCount > targetCount) { + low = mid; + } else { + high = mid; + } + } + + return mid; + } + + getSimplifiedPointCount(points, epsilon) { + const rdpPoints = []; + const total = points.length; + const start = points[0]; + const end = points[total - 1]; + rdpPoints.push(start); + rdp(0, total - 1, points, rdpPoints, epsilon); + rdpPoints.push(end); + return rdpPoints.length; + } + + rdp(startIndex, endIndex, allPoints, rdpPoints, epsilon) { + const nextIndex = findFurthest(allPoints, startIndex, endIndex, epsilon); + if (nextIndex > 0) { + if (startIndex != nextIndex) { + rdp(startIndex, nextIndex, allPoints, rdpPoints, epsilon); + } + rdpPoints.push(allPoints[nextIndex]); + if (endIndex != nextIndex) { + rdp(nextIndex, endIndex, allPoints, rdpPoints, epsilon); + } + } + } + + findFurthest(points, a, b, epsilon) { + let recordDistance = -1; + const start = points[a]; + const end = points[b]; + let furthestIndex = -1; + for (let i = a + 1; i < b; i++) { + const currentPoint = points[i]; + const d = lineDist(currentPoint, start, end); + if (d > recordDistance) { + recordDistance = d; + furthestIndex = i; + } + } + if (recordDistance > epsilon) { + return furthestIndex; + } else { + return -1; + } + } + + lineDist(c, a, b) { + const norm = scalarProjection(c, a, b); + return dist(c.x, c.y, norm.x, norm.y); + } + + scalarProjection(p, a, b) { + const ap = { x: p.x - a.x, y: p.y - a.y }; + const ab = { x: b.x - a.x, y: b.y - a.y }; + const abMag = Math.sqrt(ab.x * ab.x + ab.y * ab.y); + ab.x /= abMag; + ab.y /= abMag; + const dot = ap.x * ab.x + ap.y * ab.y; + return { x: a.x + ab.x * dot, y: a.y + ab.y * dot }; + } } - + const timeSeriesUtils = () => { const instance = new TimeSeriesUtils(); return instance; }; - + export default timeSeriesUtils(); - \ No newline at end of file diff --git a/src/index.js b/src/index.js index 97c15a55..ddf7375c 100644 --- a/src/index.js +++ b/src/index.js @@ -23,7 +23,7 @@ const withPreload = { neuralNetwork, sentiment, soundClassifier, - + timeSeries, }; const ml5 = Object.assign({ p5Utils }, withPreload, { @@ -32,7 +32,6 @@ const ml5 = Object.assign({ p5Utils }, withPreload, { setBackend, version: packageInfo.version, setP5: p5Utils.setP5.bind(p5Utils), - timeSeries }); p5Utils.shouldPreload(ml5, Object.keys(withPreload)); diff --git a/webpack.config.js b/webpack.config.js index ff90e03d..e9bab17b 100644 --- a/webpack.config.js +++ b/webpack.config.js @@ -48,9 +48,9 @@ const developmentConfig = { resolve: { fallback: { fs: false, - util: false + util: false, }, - } + }, }; const productionConfig = { @@ -77,9 +77,9 @@ const productionConfig = { resolve: { fallback: { fs: false, - util: false + util: false, }, - } + }, }; module.exports = function (env, args) { From 0747f513766044d804631d47915ac91c8ed1517f Mon Sep 17 00:00:00 2001 From: mop9047 Date: Wed, 21 Aug 2024 06:37:54 +0800 Subject: [PATCH 10/13] weather prediction example --- .../timeSeries-weather-prediction/sketch.js | 191 ++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 examples/timeSeries-weather-prediction/sketch.js diff --git a/examples/timeSeries-weather-prediction/sketch.js b/examples/timeSeries-weather-prediction/sketch.js new file mode 100644 index 00000000..6fea4863 --- /dev/null +++ b/examples/timeSeries-weather-prediction/sketch.js @@ -0,0 +1,191 @@ +/* + * 👋 Hello! This is an ml5.js example made and shared with ❤️. + * Learn more about the ml5.js project: https://ml5js.org/ + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md + * + * This example demonstrates Training a Stock Price Predictor through ml5.TimeSeries. + */ + +let model; +let data; +let data_index; + +let seq = []; +let targetLength = 5; + +// load JSON data with same formatting from the internet, this means +// loadData() cannot yet be used as it is formatted differently +function preload() { + json_data = loadJSON("weather_data.json"); + + // set the options to initialize timeSeries Neural Network + let options = { + task: "regression", + debug: "true", + learningRate: 0.01, + output: ["label"], + }; + model = ml5.timeSeries(options); +} + +function setup() { + data = json_data.data; + createCanvas(640, 400); + background(220); + + // iterate through data using simple sliding window algorithm + data_index = targetLength - 1; + while (data_index < data.length - 1) { + // get the values [targetLength] steps before current index, collect and add + for (let x = targetLength - 1; x >= 0; x--) { + let curr = data[data_index - x]; + // choose from the raw data what you want to to feed to the model + let inputs = { + temperature: curr.temperature, + humidity: curr.humidity, + windSpeed: curr.wind_speed, + pressure: curr.pressure, + precipitation: curr.precipitation, + }; + + // once collected all data into an array to make it into a sequence + // the format of the sequence is like this [{},{},...,{}] + // this is the X value + seq.push(inputs); + } + + // the Y value to train is the value that comes after the sequence + let target = data[data_index + 1]; + + // select the outputs you want to get, multiple outputs are possible, we want to predict all values + let output = { + temperature: target.temperature, + humidity: target.humidity, + windSpeed: target.wind_speed, + pressure: target.pressure, + precipitation: target.precipitation, + }; + + // feed data into the model + model.addData(seq, output); + + // reset the sequence so new values can be added + seq = []; + + // iterate through the whole dataset moving the sliding window in each iteration + data_index++; + } + // normalize the data after adding everything + model.normalizeData(); + + // put a button to train and predict + trainAndPredictButtons(); +} + +// train data +function trainData() { + model.normalizeData(); + let options = { + epochs: 100, + }; + model.train(options, finishedTraining); +} + +function finishedTraining() { + console.log("Training Done!"); +} + +// predict data +function predictData() { + // set the seq to empty + seq = []; + + // choose the most recent sequences + let latest = data.slice(-targetLength); + for (let x = 0; x < targetLength; x++) { + let curr = latest[x]; + // select the same properties for inputs + let inputs = { + temperature: curr.temperature, + humidity: curr.humidity, + windSpeed: curr.wind_speed, + pressure: curr.pressure, + precipitation: curr.precipitation, + }; + // add them to one array to make them a sequence + seq.push(inputs); + } + + // use the sequence to predict + model.predict(seq, gotResults); +} + +// put the new data in the dataset so this will be considered for any new predictions +function gotResults(results) { + console.log(results); + addNewData(results); +} + +// code for adding new data to the dataset to be used for future prediction +function addNewData(results) { + (new_values = { + date: " for the next hour", + temperature: parseFloat(results[0].value.toFixed(2)), // get string convert to float and round to 2 decimal points + humidity: parseFloat(results[1].value.toFixed(2)), + wind_speed: parseFloat(results[2].value.toFixed(2)), + pressure: parseFloat(results[3].value.toFixed(2)), + precipitation: parseFloat(results[4].value.toFixed(2)), + }), + data.push(new_values); +} + +function draw() { + background(220); + textAlign(CENTER, CENTER); + textSize(16); + + // Draw the table headers + let headers = [ + "Date", + "Temperature", + "Humidity", + "Wind Speed", + "Pressure", + "Precipitation", + ]; + let xOffset = 70; + let yOffset = 100; + for (let i = 0; i < headers.length; i++) { + text(headers[i], xOffset + i * 100, yOffset); + } + + // Display the last 5 entries from the dataset + let latest = data.slice(-targetLength); + for (let i = 0; i < latest.length; i++) { + let entry = latest[i]; + text(entry.date.slice(5), xOffset, yOffset + (i + 1) * 30); + text(entry.temperature, xOffset + 100, yOffset + (i + 1) * 30); + text(entry.humidity, xOffset + 200, yOffset + (i + 1) * 30); + text(entry.wind_speed, xOffset + 300, yOffset + (i + 1) * 30); + text(entry.pressure, xOffset + 400, yOffset + (i + 1) * 30); + text(entry.precipitation, xOffset + 500, yOffset + (i + 1) * 30); + } +} + +function trainAndPredictButtons() { + train_but = createButton("Train Model"); + train_but.mouseClicked(trainData); + train_but.style("font-family", "Georgia"); + train_but.style("font-size", "20px"); + train_but.position(100, 350); + + pred_but = createButton("Predict Next Hour"); + pred_but.mouseClicked(predictData); + pred_but.style("font-family", "Georgia"); + pred_but.style("font-size", "20px"); + pred_but.position(350, 350); + + instructionP = createP( + "Instructions:

1.) Press the 'Train Model' Button and wait for training to finish.
2.) Press 'Predict Next Hour' to see the weather in the next hour!" + ); +} From 6dc456a5f70c32a82cbacc4c0af130f8de84f964 Mon Sep 17 00:00:00 2001 From: mop9047 Date: Tue, 27 Aug 2024 19:19:57 +0800 Subject: [PATCH 11/13] RDP algorithm Util now available, bug fixes for compatibility for different browsers --- .../index.html | 37 +++++ .../sketch.js | 138 ++++++++++++++++++ .../timeSeries-train-mouse-gesture/sketch.js | 1 + src/TimeSeries/index.js | 35 ++--- src/TimeSeries/timeSeriesUtils.js | 36 +++-- 5 files changed, 211 insertions(+), 36 deletions(-) create mode 100644 examples/timeSeries-train-mouse-gesture RDP/index.html create mode 100644 examples/timeSeries-train-mouse-gesture RDP/sketch.js diff --git a/examples/timeSeries-train-mouse-gesture RDP/index.html b/examples/timeSeries-train-mouse-gesture RDP/index.html new file mode 100644 index 00000000..6407c4eb --- /dev/null +++ b/examples/timeSeries-train-mouse-gesture RDP/index.html @@ -0,0 +1,37 @@ + + + + + + + + ml5.js Time Series Train Mouse Gesture classifier Example + + + + + +
+ +
+ +
+ + + +
+ + + + diff --git a/examples/timeSeries-train-mouse-gesture RDP/sketch.js b/examples/timeSeries-train-mouse-gesture RDP/sketch.js new file mode 100644 index 00000000..ca08c416 --- /dev/null +++ b/examples/timeSeries-train-mouse-gesture RDP/sketch.js @@ -0,0 +1,138 @@ +/* + * 👋 Hello! This is an ml5.js example made and shared with ❤️. + * Learn more about the ml5.js project: https://ml5js.org/ + * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md + * + * This example demonstrates How to train your own mouse gesture classifier through ml5.TimeSeries. + */ + +let model; + +let currShape = "circle"; +let state = "collection"; + +let pressedOnce = true; +let datapoints; +let sequence = []; +let recCircle, recSquare, trainBut; + +function preload() { + let options = { + inputs: ["x", "y"], + outputs: ["label"], + task: "classification", + spatialData: "true", + debug: "true", + learningRate: 0.005, + }; + + model = ml5.timeSeries(options); +} + +function setup() { + // p5 js elements + let canvas = createCanvas(600, 400); + canvas.parent("canvasDiv"); + background(220); + UI(); +} + +function draw() { + // record data when the mouse is pressed inside the canvas + if (mouseIsPressed && mouseY < height && mouseX < width) { + // draw lines through coordinates + line(pmouseX, pmouseY, mouseX, mouseY); + let inputs = { x: mouseX, y: mouseY }; + sequence.push(inputs); + } +} + +// code to signify drawing can be done again +function mouseReleased() { + if (mouseY < height && mouseX < width) { + // if state is collection, add whole sequence as X, and shape as Y + if (state == "collection") { + let target = { label: currShape }; + let paddedCoordinates = model.padCoordinates(sequence, 100); + model.addData(paddedCoordinates, target); + clearScreen(); + } else if (state == "prediction") { + let paddedCoordinates = model.padCoordinates(sequence, 100); + model.classify(paddedCoordinates, gotResults); + clearScreen(); + } + } + // reset the sequence + sequence = []; +} + +// cleanup screen and removed drawn elements, add helpful text +function clearScreen() { + background(220); + textSize(20); + fill(0); + text(state + " : " + currShape, 50, 50); +} + +function trainModel() { + // normalize Data first before Training + model.normalizeData(); + + // set the number of epochs for training + let options = { + epochs: 40, + }; + model.train(options, whileTraining, finishedTraining); + + background(220); + state = "training"; + text("Training...", 50, 50); + + recCircle.attribute("disabled", true); + recSquare.attribute("disabled", true); + trainBut.attribute("disabled", true); +} + +function whileTraining(epoch, loss) { + console.log(epoch); +} + +function finishedTraining() { + background(220); + text("Training Finished, Draw again to predict", 50, 50); + state = "prediction"; +} + +function gotResults(results) { + // console.log("results", results); + let label = results[0].label; + + currShape = label; +} + +////////////// UI Elements //////////// +function UI() { + textSize(20); + + recCircle = select("#recCircle"); + recSquare = select("#recSquare"); + trainBut = select("#trainBut"); + + recCircle.mouseClicked(recordCircle); + recSquare.mouseClicked(recordSquare); + trainBut.mouseClicked(trainModel); + + function recordCircle() { + state = "collection"; + currShape = "circle"; + + background(220); + } + + function recordSquare() { + state = "collection"; + currShape = "square"; + + background(220); + } +} diff --git a/examples/timeSeries-train-mouse-gesture/sketch.js b/examples/timeSeries-train-mouse-gesture/sketch.js index 0af2dac0..c0139c65 100644 --- a/examples/timeSeries-train-mouse-gesture/sketch.js +++ b/examples/timeSeries-train-mouse-gesture/sketch.js @@ -57,6 +57,7 @@ function draw() { frameCount++; let inputs = { x: mouseX, y: mouseY }; + sequence.push(inputs); if (sequence.length == num_seq * ink_multiplier) { diff --git a/src/TimeSeries/index.js b/src/TimeSeries/index.js index f9472f0c..cc419ada 100644 --- a/src/TimeSeries/index.js +++ b/src/TimeSeries/index.js @@ -6,6 +6,8 @@ import NeuralNetworkData from "./timeSeriesData"; import nnUtils from "../NeuralNetwork/NeuralNetworkUtils"; import NeuralNetworkVis from "../NeuralNetwork/NeuralNetworkVis"; +import setBackend from "../utils/setBackend"; + import tsUtils from "./timeSeriesUtils"; const DEFAULTS = { @@ -56,6 +58,7 @@ class timeSeries { } async init() { + setBackend("webgl"); await tf.ready(); if (this.options.dataUrl) { await this.loadDataFromUrl(this.options.dataUrl); @@ -508,32 +511,7 @@ class timeSeries { let inputData; - if (this.options.task === "imageClassification") { - // get the inputData for classification - // if it is a image type format it and - // flatten it - inputData = this.searchAndFormat(_input); - if (Array.isArray(inputData)) { - inputData = inputData.flat(); - } else { - inputData = inputData[headers[0]]; - } - - if (meta.isNormalized) { - // TODO: check to make sure this property is not static!!!! - const { min, max } = meta.inputs[headers[0]]; - inputData = this.neuralNetworkData.normalizeArray( - Array.from(inputData), - { min, max } - ); - } else { - inputData = Array.from(inputData); - } - - inputData = tf.tensor([inputData], [1, ...meta.inputUnits]); - } else { - inputData = this.formatInputsForPredictionAll(_input); - } + inputData = this.formatInputsForPredictionAll(_input); const unformattedResults = await this.neuralNetwork.classify(inputData); inputData.dispose(); @@ -737,6 +715,11 @@ class timeSeries { dispose() { this.neuralNetwork.dispose(); } + + padCoordinates(coordinates, targetPointCount) { + const maxEpsilon = int(coordinates.length / 2); + return tsUtils.padCoordinates(coordinates, targetPointCount, maxEpsilon); + } } const TimeSeries = (inputsOrOptions, outputsOrCallback, callback) => { diff --git a/src/TimeSeries/timeSeriesUtils.js b/src/TimeSeries/timeSeriesUtils.js index 28193989..5c980e58 100644 --- a/src/TimeSeries/timeSeriesUtils.js +++ b/src/TimeSeries/timeSeriesUtils.js @@ -275,10 +275,10 @@ class TimeSeriesUtils { } // point simplification utilities - Ramer-Douglas-Peucker (RDP) algorithm - simplifyPoints(allPoints, targetPointCount, maxEpsilon = 50) { + padCoordinates(allPoints, targetPointCount, maxEpsilon = 50) { const rdpPoints = []; - const epsilon = findEpsilonForPointCount( + const epsilon = this.findEpsilonForPointCount( allPoints, targetPointCount, maxEpsilon @@ -288,9 +288,20 @@ class TimeSeriesUtils { const start = allPoints[0]; const end = allPoints[total - 1]; rdpPoints.push(start); - rdp(0, total - 1, allPoints, rdpPoints, epsilon); + this.rdp(0, total - 1, allPoints, rdpPoints, epsilon); rdpPoints.push(end); + if (rdpPoints.length > targetPointCount) { + return rdpPoints.slice(0, targetPointCount); + } else if (rdpPoints.length < targetPointCount) { + const filler = new Array(targetPointCount - rdpPoints.length).fill( + rdpPoints[rdpPoints.length - 1] + ); + + rdpPoints.push(...filler); + return rdpPoints; + } + return rdpPoints; } @@ -303,7 +314,7 @@ class TimeSeriesUtils { while (high - low > 0.001) { // Tolerance for approximation mid = (low + high) / 2; - simplifiedPointsCount = getSimplifiedPointCount(points, mid); + simplifiedPointsCount = this.getSimplifiedPointCount(points, mid); if (simplifiedPointsCount > targetCount) { low = mid; } else { @@ -320,20 +331,25 @@ class TimeSeriesUtils { const start = points[0]; const end = points[total - 1]; rdpPoints.push(start); - rdp(0, total - 1, points, rdpPoints, epsilon); + this.rdp(0, total - 1, points, rdpPoints, epsilon); rdpPoints.push(end); return rdpPoints.length; } rdp(startIndex, endIndex, allPoints, rdpPoints, epsilon) { - const nextIndex = findFurthest(allPoints, startIndex, endIndex, epsilon); + const nextIndex = this.findFurthest( + allPoints, + startIndex, + endIndex, + epsilon + ); if (nextIndex > 0) { if (startIndex != nextIndex) { - rdp(startIndex, nextIndex, allPoints, rdpPoints, epsilon); + this.rdp(startIndex, nextIndex, allPoints, rdpPoints, epsilon); } rdpPoints.push(allPoints[nextIndex]); if (endIndex != nextIndex) { - rdp(nextIndex, endIndex, allPoints, rdpPoints, epsilon); + this.rdp(nextIndex, endIndex, allPoints, rdpPoints, epsilon); } } } @@ -345,7 +361,7 @@ class TimeSeriesUtils { let furthestIndex = -1; for (let i = a + 1; i < b; i++) { const currentPoint = points[i]; - const d = lineDist(currentPoint, start, end); + const d = this.lineDist(currentPoint, start, end); if (d > recordDistance) { recordDistance = d; furthestIndex = i; @@ -359,7 +375,7 @@ class TimeSeriesUtils { } lineDist(c, a, b) { - const norm = scalarProjection(c, a, b); + const norm = this.scalarProjection(c, a, b); return dist(c.x, c.y, norm.x, norm.y); } From a51ec76a197fc9c65fd5230b4b4453788612eb15 Mon Sep 17 00:00:00 2001 From: mop9047 Date: Sat, 31 Aug 2024 14:30:24 +0400 Subject: [PATCH 12/13] changed spatialData to Data Mode --- examples/timeSeries-hand-gestures/sketch.js | 2 +- .../sketch.js | 2 +- .../sketch.js | 10 ++++--- .../timeSeries-weather-prediction/sketch.js | 1 + src/TimeSeries/index.js | 27 +++++-------------- 5 files changed, 16 insertions(+), 26 deletions(-) diff --git a/examples/timeSeries-hand-gestures/sketch.js b/examples/timeSeries-hand-gestures/sketch.js index 8f1346e1..c95392ff 100644 --- a/examples/timeSeries-hand-gestures/sketch.js +++ b/examples/timeSeries-hand-gestures/sketch.js @@ -28,7 +28,7 @@ function preload() { let options = { outputs: ["label"], task: "classification", - spatialData: "true", + dataMode: "spatial", debug: "true", learningRate: 0.001, }; diff --git a/examples/timeSeries-load-model-hand-gestures/sketch.js b/examples/timeSeries-load-model-hand-gestures/sketch.js index 946ca76d..e8e45085 100644 --- a/examples/timeSeries-load-model-hand-gestures/sketch.js +++ b/examples/timeSeries-load-model-hand-gestures/sketch.js @@ -28,7 +28,7 @@ function preload() { // setup the timeseries neural network let options = { task: "classification", - dataModality: "spatial", + dataMode: "spatial", spatialData: "true", }; diff --git a/examples/timeSeries-train-mouse-gesture RDP/sketch.js b/examples/timeSeries-train-mouse-gesture RDP/sketch.js index ca08c416..31b54d27 100644 --- a/examples/timeSeries-train-mouse-gesture RDP/sketch.js +++ b/examples/timeSeries-train-mouse-gesture RDP/sketch.js @@ -11,9 +11,9 @@ let model; let currShape = "circle"; let state = "collection"; -let pressedOnce = true; let datapoints; let sequence = []; +let targetSequence = 30; let recCircle, recSquare, trainBut; function preload() { @@ -21,7 +21,7 @@ function preload() { inputs: ["x", "y"], outputs: ["label"], task: "classification", - spatialData: "true", + dataMode: "spatial", debug: "true", learningRate: 0.005, }; @@ -53,11 +53,11 @@ function mouseReleased() { // if state is collection, add whole sequence as X, and shape as Y if (state == "collection") { let target = { label: currShape }; - let paddedCoordinates = model.padCoordinates(sequence, 100); + let paddedCoordinates = model.padCoordinates(sequence, targetSequence); model.addData(paddedCoordinates, target); clearScreen(); } else if (state == "prediction") { - let paddedCoordinates = model.padCoordinates(sequence, 100); + let paddedCoordinates = model.padCoordinates(sequence, targetSequence); model.classify(paddedCoordinates, gotResults); clearScreen(); } @@ -127,6 +127,7 @@ function UI() { currShape = "circle"; background(220); + text(state + " : " + currShape, 50, 50); } function recordSquare() { @@ -134,5 +135,6 @@ function UI() { currShape = "square"; background(220); + text(state + " : " + currShape, 50, 50); } } diff --git a/examples/timeSeries-weather-prediction/sketch.js b/examples/timeSeries-weather-prediction/sketch.js index 6fea4863..f43be1f0 100644 --- a/examples/timeSeries-weather-prediction/sketch.js +++ b/examples/timeSeries-weather-prediction/sketch.js @@ -21,6 +21,7 @@ function preload() { // set the options to initialize timeSeries Neural Network let options = { task: "regression", + dataMode: "linear", debug: "true", learningRate: 0.01, output: ["label"], diff --git a/src/TimeSeries/index.js b/src/TimeSeries/index.js index cc419ada..e6df1941 100644 --- a/src/TimeSeries/index.js +++ b/src/TimeSeries/index.js @@ -17,24 +17,11 @@ const DEFAULTS = { modelUrl: null, layers: [], task: null, - spatialData: false, + dataMode: "linear", debug: false, learningRate: 0.2, hiddenUnits: 16, }; -/* -as far as the p5 sketch is concerned, it will directly call only a few functions in the class, -these are the following: - -model.addData - Done -model.saveData, model etc -model.train -model.classify/predict etc - - -No image classification -No neural evolution -*/ class timeSeries { constructor(options, callback) { @@ -262,11 +249,11 @@ class timeSeries { addDefaultLayers() { let layers; const task = this.options.task; - const ifSpatialData = this.options.spatialData; - let taskConditions = `${task}_${ifSpatialData}`; + const dataMode = this.options.dataMode; + let taskConditions = `${task}_${dataMode}`; switch (taskConditions.toLowerCase()) { // if the task is classification and spatial modality - case "classification_true": + case "classification_spatial": layers = [ { type: "conv1d", @@ -306,7 +293,7 @@ class timeSeries { return this.createNetworkLayers(layers); // if the task is classification and sequential modality - case "classification_false": + case "classification_linear": layers = [ { type: "lstm", @@ -335,7 +322,7 @@ class timeSeries { return this.createNetworkLayers(layers); // if the task is regression - case "regression_true": + case "regression_spatial": layers = [ { type: "conv1d", @@ -375,7 +362,7 @@ class timeSeries { return this.createNetworkLayers(layers); - case "regression_false": + case "regression_linear": layers = [ { type: "lstm", From 261a8e82ed863da007cac91cdcfec5a194fbeb2d Mon Sep 17 00:00:00 2001 From: mop9047 Date: Sat, 31 Aug 2024 15:21:04 +0400 Subject: [PATCH 13/13] Bug fixes and example UI changes --- .../timeSeries-weather-prediction/index.html | 12 ++++++++++++ .../timeSeries-weather-prediction/sketch.js | 17 ++++------------- package.json | 12 +++++++++--- src/TimeSeries/index.js | 2 ++ 4 files changed, 27 insertions(+), 16 deletions(-) diff --git a/examples/timeSeries-weather-prediction/index.html b/examples/timeSeries-weather-prediction/index.html index 50c4e895..72a3ffdd 100644 --- a/examples/timeSeries-weather-prediction/index.html +++ b/examples/timeSeries-weather-prediction/index.html @@ -18,5 +18,17 @@ + +
+ + +
+ + diff --git a/examples/timeSeries-weather-prediction/sketch.js b/examples/timeSeries-weather-prediction/sketch.js index f43be1f0..a21e8754 100644 --- a/examples/timeSeries-weather-prediction/sketch.js +++ b/examples/timeSeries-weather-prediction/sketch.js @@ -124,7 +124,7 @@ function predictData() { // put the new data in the dataset so this will be considered for any new predictions function gotResults(results) { console.log(results); - addNewData(results); + addNewData(results); //optional but will be helpful in using new prediction as part of dataset } // code for adding new data to the dataset to be used for future prediction @@ -173,20 +173,11 @@ function draw() { } } +// get buttons and assign functions (UI) function trainAndPredictButtons() { - train_but = createButton("Train Model"); + train_but = select("#train_but"); train_but.mouseClicked(trainData); - train_but.style("font-family", "Georgia"); - train_but.style("font-size", "20px"); - train_but.position(100, 350); - pred_but = createButton("Predict Next Hour"); + pred_but = select("#pred_but"); pred_but.mouseClicked(predictData); - pred_but.style("font-family", "Georgia"); - pred_but.style("font-size", "20px"); - pred_but.position(350, 350); - - instructionP = createP( - "Instructions:

1.) Press the 'Train Model' Button and wait for training to finish.
2.) Press 'Predict Next Hour' to see the weather in the next hour!" - ); } diff --git a/package.json b/package.json index 3aa05bba..cd90354e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "ml5", - "version": "1.0.1", + "version": "1.0.2", "description": "A friendly machine learning library for the web.", "main": "dist/ml5.min.js", "scripts": { @@ -11,7 +11,8 @@ "postinstall": "patch-package", "test": "jest --config tests/jest.config.js", "upload-examples": "node scripts/uploadExamples.js", - "update-p5-version": "node scripts/updateP5Version.js" + "update-p5-version": "node scripts/updateP5Version.js", + "update-readme": "node scripts/updateReadme.js" }, "files": [ "dist" @@ -72,5 +73,10 @@ ] }, "prettier": {}, - "packageManager": "yarn@1.22.22+sha512.a6b2f7906b721bba3d67d4aff083df04dad64c399707841b7acf00f6b133b7ac24255f2652fa22ae3534329dc6180534e98d17432037ff6fd140556e2bb3137e" + "packageManager": "yarn@4.3.1", + "engines": { + "node": "^20.15.1", + "yarn": "^4.3.1", + "npm": "please-use-yarn" + } } diff --git a/src/TimeSeries/index.js b/src/TimeSeries/index.js index e6df1941..62af28fe 100644 --- a/src/TimeSeries/index.js +++ b/src/TimeSeries/index.js @@ -45,7 +45,9 @@ class timeSeries { } async init() { + // workaround for Error setBackend("webgl"); + await tf.ready(); if (this.options.dataUrl) { await this.loadDataFromUrl(this.options.dataUrl);