diff --git a/examples/timeSeries-hand-gestures/index.html b/examples/timeSeries-hand-gestures/index.html
new file mode 100644
index 00000000..90d238c2
--- /dev/null
+++ b/examples/timeSeries-hand-gestures/index.html
@@ -0,0 +1,22 @@
+<!--
+  👋 Hello! This is an ml5.js example made and shared with ❤️.
+  Learn more about the ml5.js project: https://ml5js.org/
+  ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ 
+  This example demonstrates training a Sign Language classifier through ml5.TimeSeries.
+-->
+
+<html>
+  <head>
+    <meta charset="UTF-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>ml5.js Time Series Hand Gesture Train and Save</title>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.min.js"></script>
+    <script src="../../dist/ml5.js"></script>
+  </head>
+
+  <body>
+    <script src="sketch.js"></script>
+  </body>
+</html>
diff --git a/examples/timeSeries-hand-gestures/sketch.js b/examples/timeSeries-hand-gestures/sketch.js
new file mode 100644
index 00000000..c95392ff
--- /dev/null
+++ b/examples/timeSeries-hand-gestures/sketch.js
@@ -0,0 +1,192 @@
+/*
+ * 👋 Hello! This is an ml5.js example made and shared with ❤️.
+ * Learn more about the ml5.js project: https://ml5js.org/
+ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ *
+ * This example demonstrates training a Hand Gesture classifier through ml5.TimeSeries.
+ */
+
+let seqLength = 50;
+
+let handPose;
+let video;
+
+let hands = [];
+let sequence = [];
+
+let recordingFinished = false;
+let predictedWord = "";
+
+// UI variables
+let trainingWords = {};
+
+function preload() {
+  // Load the handPose model
+  handPose = ml5.handPose();
+
+  // setup the timeseries neural network
+  let options = {
+    outputs: ["label"],
+    task: "classification",
+    dataMode: "spatial",
+    debug: "true",
+    learningRate: 0.001,
+  };
+  model = ml5.timeSeries(options);
+}
+
+function setup() {
+  createCanvas(640, 480);
+
+  // setup video capture
+  video = createCapture(VIDEO);
+  video.size(640, 480);
+  video.hide();
+
+  // place UI elements
+  UI();
+
+  // use handpose model on video
+  handPose.detectStart(video, gotHands);
+}
+
+function draw() {
+  // draw video on frame
+  image(video, 0, 0, width, height);
+
+  drawPredictedWord();
+
+  // if hands are found then start recording
+  if (hands.length > 0 && recordingFinished == false) {
+    if (sequence.length <= seqLength) {
+      // get coordinates from hands (21 points)
+      handpoints = drawPoints();
+      sequence.push(handpoints);
+
+      // once sequence reaches the seqLength, add sequence as just one X value
+    } else if (sequence.length > 0) {
+      // get the training word from the input box
+      let train_word = nameField.value();
+
+      // if there is a word currently in the box then add data with that label
+      if (train_word.length > 0) {
+        // add data to the model
+        let target = { label: train_word };
+        model.addData(sequence, target);
+        trainingWordsUpdate();
+
+        // if there is no word in the box then classify instead
+      } else {
+        // classify the data
+        model.classify(sequence, gotResults);
+      }
+
+      // reset the sequence
+      sequence = [];
+      recordingFinished = true;
+    }
+
+    // can only record again when hand is out of frame
+  } else {
+    if (hands.length == 0) {
+      recordingFinished = false;
+    }
+  }
+}
+
+function drawPoints() {
+  let handpoints = [];
+  // iterate through both hands
+  for (let i = 0; i < hands.length; i++) {
+    let hand = hands[i];
+    for (let j = 0; j < hand.keypoints.length; j++) {
+      // access the keypoints in the hand
+      let keypoint = hand.keypoints[j];
+      handpoints.push(keypoint.x, keypoint.y);
+
+      fill(0, 255, 0);
+      noStroke();
+      circle(keypoint.x, keypoint.y, 5);
+    }
+  }
+  // assign to a different variable before clearing
+  let output = handpoints;
+  handpoints = [];
+
+  return output;
+}
+
+// Callback function for when handPose outputs data
+function gotHands(results) {
+  // save the output to the hands variable
+  hands = results;
+}
+
+function trainModelAndSave() {
+  model.normalizeData();
+  let options = {
+    epochs: 100,
+  };
+  model.train(options, whileTraining, finishedTraining);
+  nameField.value("");
+}
+
+function whileTraining(epoch) {
+  console.log(epoch);
+}
+
+function finishedTraining() {
+  console.log("finished training.");
+  model.save("model");
+}
+
+function gotResults(results) {
+  predictedWord = results[0].label;
+  console.log(predictedWord);
+  text(predictedWord, 200, 200);
+}
+
+function UI() {
+  nameField = createInput("");
+  nameField.attribute("placeholder", "Type the word to train");
+  nameField.position(110, 500);
+  nameField.size(250);
+
+  instructionP = createP(
+    'I want to train: <br><br> 1.) Type any word you want to pair with a gesture, e.g. "HELLO" <br> 2.) Do the gesture associated to the word, make sure to do it until the points disappear. <br> 3.) Move your hand out of the frame and repeat the gesture, do this multiple times <br> 4.) Do the same for other words e.g. "BYE" <br> 5.) Once all data is collected, press Train and Save<br><br> Tip: have at least 5 datasets for each word'
+  );
+  instructionP.style("width", "640px");
+  dataCountsP = createP("-> After the gesture a tally will appear here <-");
+
+  train_but = createButton("Train and Save");
+  train_but.mouseClicked(trainModelAndSave);
+  train_but.style("font-family", "Georgia");
+  train_but.style("font-size", "20px");
+  train_but.position(500, 490);
+}
+
+function drawPredictedWord() {
+  textSize(100);
+  fill(255);
+  text(predictedWord, 100, height / 2);
+}
+
+function trainingWordsUpdate() {
+  let tempWord = nameField.value();
+  console.log(Object.keys(trainingWords));
+  if (!(tempWord in trainingWords)) {
+    trainingWords[tempWord] = 1;
+  } else {
+    trainingWords[tempWord]++;
+  }
+
+  let counts = "";
+  let keys = Object.keys(trainingWords);
+  console.log("keys", keys);
+
+  for (let k of keys) {
+    counts += k + " : " + trainingWords[k] + "<br>";
+  }
+
+  dataCountsP.html(counts);
+}
diff --git a/examples/timeSeries-load-model-hand-gestures/index.html b/examples/timeSeries-load-model-hand-gestures/index.html
new file mode 100644
index 00000000..92363d69
--- /dev/null
+++ b/examples/timeSeries-load-model-hand-gestures/index.html
@@ -0,0 +1,45 @@
+<!--
+  👋 Hello! This is an ml5.js example made and shared with ❤️.
+  Learn more about the ml5.js project: https://ml5js.org/
+  ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ 
+  This example demonstrates loading a Sign Language classifier through ml5.TimeSeries.
+-->
+
+<html>
+  <head>
+    <meta charset="UTF-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>ml5.js Time Series Hand Gesture load model</title>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.min.js"></script>
+    <script src="../../dist/ml5.js"></script>
+  </head>
+
+  <body>
+    <script src="sketch.js"></script>
+    <div id="canvasDiv"></div>
+    <p>
+      This example loads a model that is trained with ASL hand gestures for
+      Hello and Goodbye. <br />
+      <br />
+
+      Instructions: <br />
+      1.) Use one hand to do a gesture in front of the camera <br />
+      2.) Wait for the points to disappear or the prediction appears on
+      screen<br />
+      3.) To predict again, remove your hands in the frame and do the gesture
+      again<br /><br />
+
+      How to do gestures for Hello and Goodbye in ASL: <br />
+      Hello:
+      <a href="https://babysignlanguage.com/dictionary/hello/"
+        >https://babysignlanguage.com/dictionary/hello/ </a
+      ><br />
+      Goodbye:
+      <a href="https://babysignlanguage.com/dictionary/goodbye/"
+        >https://babysignlanguage.com/dictionary/goodbye/ </a
+      ><br />
+    </p>
+  </body>
+</html>
diff --git a/examples/timeSeries-load-model-hand-gestures/model/model.json b/examples/timeSeries-load-model-hand-gestures/model/model.json
new file mode 100644
index 00000000..ad7c44f6
--- /dev/null
+++ b/examples/timeSeries-load-model-hand-gestures/model/model.json
@@ -0,0 +1 @@
+{"modelTopology":{"class_name":"Sequential","config":{"name":"sequential_1","layers":[{"class_name":"Conv1D","config":{"filters":8,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[3],"strides":[1],"padding":"valid","dilation_rate":[1],"activation":"relu","use_bias":true,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"conv1d_Conv1D1","trainable":true,"batch_input_shape":[null,51,42],"dtype":"float32"}},{"class_name":"MaxPooling1D","config":{"pool_size":[2],"padding":"valid","strides":[2],"name":"max_pooling1d_MaxPooling1D1","trainable":true}},{"class_name":"Conv1D","config":{"filters":16,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"kernel_regularizer":null,"kernel_constraint":null,"kernel_size":[3],"strides":[1],"padding":"valid","dilation_rate":[1],"activation":"relu","use_bias":true,"bias_initializer":{"class_name":"Zeros","config":{}},"bias_regularizer":null,"activity_regularizer":null,"bias_constraint":null,"name":"conv1d_Conv1D2","trainable":true,"batch_input_shape":[null,51,42],"dtype":"float32"}},{"class_name":"MaxPooling1D","config":{"pool_size":[2],"padding":"valid","strides":[2],"name":"max_pooling1d_MaxPooling1D2","trainable":true}},{"class_name":"Flatten","config":{"name":"flatten_Flatten1","trainable":true}},{"class_name":"Dense","config":{"units":16,"activation":"relu","use_bias":true,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_regularizer":null,"bias_regularizer":null,"activity_regularizer":null,"kernel_constraint":null,"bias_constraint":null,"name":"dense_Dense1","trainable":true}},{"class_name":"Dense","config":{"units":2,"activation":"softmax","use_bias":true,"kernel_initializer":{"class_name":"VarianceScaling","config":{"scale":1,"mode":"fan_avg","distribution":"normal","seed":null}},"bias_initializer":{"class_name":"Zeros","config":{}},"kernel_regularizer":null,"bias_regularizer":null,"activity_regularizer":null,"kernel_constraint":null,"bias_constraint":null,"name":"dense_Dense2","trainable":true}}]},"keras_version":"tfjs-layers 4.8.0","backend":"tensor_flow.js"},"weightsManifest":[{"paths":["./hello.weights.bin"],"weights":[{"name":"conv1d_Conv1D1/kernel","shape":[3,42,8],"dtype":"float32"},{"name":"conv1d_Conv1D1/bias","shape":[8],"dtype":"float32"},{"name":"conv1d_Conv1D2/kernel","shape":[3,8,16],"dtype":"float32"},{"name":"conv1d_Conv1D2/bias","shape":[16],"dtype":"float32"},{"name":"dense_Dense1/kernel","shape":[176,16],"dtype":"float32"},{"name":"dense_Dense1/bias","shape":[16],"dtype":"float32"},{"name":"dense_Dense2/kernel","shape":[16,2],"dtype":"float32"},{"name":"dense_Dense2/bias","shape":[2],"dtype":"float32"}]}]}
\ No newline at end of file
diff --git a/examples/timeSeries-load-model-hand-gestures/model/model.weights.bin b/examples/timeSeries-load-model-hand-gestures/model/model.weights.bin
new file mode 100644
index 00000000..e57f1816
Binary files /dev/null and b/examples/timeSeries-load-model-hand-gestures/model/model.weights.bin differ
diff --git a/examples/timeSeries-load-model-hand-gestures/model/model_meta.json b/examples/timeSeries-load-model-hand-gestures/model/model_meta.json
new file mode 100644
index 00000000..1c0165c7
--- /dev/null
+++ b/examples/timeSeries-load-model-hand-gestures/model/model_meta.json
@@ -0,0 +1 @@
+{"inputUnits":[42],"outputUnits":2,"inputs":{"label_0":{"dtype":"number","min":4.151249399907168,"max":586.4725394909854},"label_1":{"dtype":"number","min":186.47223882383636,"max":496.34918695509003},"label_2":{"dtype":"number","min":12.818880217505907,"max":564.7860747522525},"label_3":{"dtype":"number","min":160.9460986889124,"max":478.89482602620234},"label_4":{"dtype":"number","min":20.681431005110262,"max":557.1173870582799},"label_5":{"dtype":"number","min":135.1274696802808,"max":454.0862355189599},"label_6":{"dtype":"number","min":29.375938053231934,"max":562.4826339023859},"label_7":{"dtype":"number","min":113.22511415628927,"max":455.15365538508894},"label_8":{"dtype":"number","min":37.27265551578051,"max":573.3838980891996},"label_9":{"dtype":"number","min":98.00531862273047,"max":473.4382341601794},"label_10":{"dtype":"number","min":2.706973037101564,"max":599.2858408346702},"label_11":{"dtype":"number","min":117.7350326456234,"max":453.76022921684716},"label_12":{"dtype":"number","min":11.635752695869659,"max":612.8243751678727},"label_13":{"dtype":"number","min":91.05094143918305,"max":481.6467136241304},"label_14":{"dtype":"number","min":22.9353041163117,"max":621.0127886598051},"label_15":{"dtype":"number","min":61.619264849841635,"max":499.63536096409143},"label_16":{"dtype":"number","min":33.53953084457643,"max":626.4181148091915},"label_17":{"dtype":"number","min":28.455718477478662,"max":512.7953875856006},"label_18":{"dtype":"number","min":-2.8065139589559984,"max":617.7828981986556},"label_19":{"dtype":"number","min":117.6886729722432,"max":459.5357193516273},"label_20":{"dtype":"number","min":3.7782929928570064,"max":633.7038985044576},"label_21":{"dtype":"number","min":86.77279076496669,"max":486.0751342925063},"label_22":{"dtype":"number","min":16.177018651157255,"max":642.8366376068107},"label_23":{"dtype":"number","min":51.687144639081325,"max":502.64037741142846},"label_24":{"dtype":"number","min":28.1461509145229,"max":650.2419536370577},"label_25":{"dtype":"number","min":15.922382743702723,"max":516.9301399988833},"label_26":{"dtype":"number","min":-6.382516546058305,"max":630.7077663350849},"label_27":{"dtype":"number","min":120.16376158664924,"max":461.0881814514869},"label_28":{"dtype":"number","min":-1.4074379536407533,"max":647.5041251714117},"label_29":{"dtype":"number","min":90.58035685591811,"max":485.04491883378125},"label_30":{"dtype":"number","min":10.174906800459325,"max":658.4893875478738},"label_31":{"dtype":"number","min":71.76407331703523,"max":500.55112323964187},"label_32":{"dtype":"number","min":21.11718120932074,"max":668.566957655395},"label_33":{"dtype":"number","min":39.557348432978586,"max":514.4287318106208},"label_34":{"dtype":"number","min":-7.9534800405596595,"max":641.3232619371444},"label_35":{"dtype":"number","min":126.31599791044414,"max":465.6320514399833},"label_36":{"dtype":"number","min":-3.8369034650104927,"max":658.2044139172733},"label_37":{"dtype":"number","min":103.73604938021917,"max":481.03793223993495},"label_38":{"dtype":"number","min":3.7075645592075435,"max":668.8017566330357},"label_39":{"dtype":"number","min":88.76136006394765,"max":494.63688258092407},"label_40":{"dtype":"number","min":6.9609311353376135,"max":676.9525074586147},"label_41":{"dtype":"number","min":75.97401514052241,"max":506.7948506427954}},"outputs":{"label":{"dtype":"string","min":0,"max":1,"uniqueValues":["hello","bye"],"legend":{"hello":[1,0],"bye":[0,1]}}},"isNormalized":true,"seriesShape":[51,42]}
\ No newline at end of file
diff --git a/examples/timeSeries-load-model-hand-gestures/sketch.js b/examples/timeSeries-load-model-hand-gestures/sketch.js
new file mode 100644
index 00000000..e8e45085
--- /dev/null
+++ b/examples/timeSeries-load-model-hand-gestures/sketch.js
@@ -0,0 +1,132 @@
+/*
+ * 👋 Hello! This is an ml5.js example made and shared with ❤️.
+ * Learn more about the ml5.js project: https://ml5js.org/
+ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ *
+ * This example demonstrates loading a Hand Gesture classifier through ml5.TimeSeries.
+ * This example is trained with the ASL gestures for Hello and Goodbye
+ *
+ * Reference to sign hello and goodbye in ASL:
+ * Hello: https://babysignlanguage.com/dictionary/hello/
+ * Goodbye: https://babysignlanguage.com/dictionary/goodbye/
+ */
+
+// change this to make the recording longer
+let seqLength = 50;
+
+let handPose;
+let video;
+let hands = [];
+let sequence = [];
+let recordingFinished = false;
+let predictedWord = "";
+
+function preload() {
+  // Load the handPose model
+  handPose = ml5.handPose();
+
+  // setup the timeseries neural network
+  let options = {
+    task: "classification",
+    dataMode: "spatial",
+    spatialData: "true",
+  };
+
+  model = ml5.timeSeries(options);
+}
+
+function setup() {
+  let canvas = createCanvas(640, 480);
+  canvas.parent("canvasDiv");
+
+  // create video capture
+  video = createCapture(VIDEO);
+  video.size(640, 480);
+  video.hide();
+
+  handPose.detectStart(video, gotHands);
+
+  // setup the model files to load
+  let modelDetails = {
+    model: "model/model.json",
+    metadata: "model/model_meta.json",
+    weights: "model/model.weights.bin",
+  };
+
+  // load the model and call modelLoaded once finished
+  model.load(modelDetails, modelLoaded);
+}
+// call back for load model
+function modelLoaded() {
+  console.log("model loaded!");
+}
+
+function draw() {
+  // draw video on the canvas
+  image(video, 0, 0, width, height);
+
+  // put the text on screen after a prediction
+  placePredictedText();
+
+  // if hands are found then start recording
+  if (hands.length > 0 && recordingFinished == false) {
+    if (sequence.length <= seqLength) {
+      // get coordinates from hands (21 points)
+      handpoints = drawPoints();
+      sequence.push(handpoints);
+
+      // once sequence reaches the seqLength, add sequence as just one X value
+    } else if (sequence.length > 0) {
+      // classify based on the collected data
+      model.classify(sequence, gotResults);
+
+      // reset the sequence
+      sequence = [];
+      recordingFinished = true;
+    }
+
+    // can only record again when hand is out of frame
+  } else {
+    if (hands.length == 0) {
+      recordingFinished = false;
+    }
+  }
+}
+
+// draw the points on the hands
+function drawPoints() {
+  let handpoints = [];
+  for (let i = 0; i < hands.length; i++) {
+    let hand = hands[i];
+    for (let j = 0; j < hand.keypoints.length; j++) {
+      let keypoint = hand.keypoints[j];
+      fill(0, 255, 0);
+      noStroke();
+      circle(keypoint.x, keypoint.y, 5);
+      handpoints.push(keypoint.x, keypoint.y);
+    }
+  }
+  let output = handpoints;
+  handpoints = [];
+  return output;
+}
+
+// Callback function for when handPose outputs data
+function gotHands(results) {
+  // save the output to the hands variable
+  hands = results;
+}
+
+// call back for accessing the results
+function gotResults(results) {
+  predictedWord = results[0].label;
+  console.log(predictedWord);
+  text(predictedWord, 100, 100);
+}
+
+// for drawing text on screen
+function placePredictedText() {
+  textSize(100);
+  fill(255);
+  text(predictedWord, 100, height / 2);
+}
diff --git a/examples/timeSeries-train-mouse-gesture RDP/index.html b/examples/timeSeries-train-mouse-gesture RDP/index.html
new file mode 100644
index 00000000..6407c4eb
--- /dev/null
+++ b/examples/timeSeries-train-mouse-gesture RDP/index.html	
@@ -0,0 +1,37 @@
+<!--
+  👋 Hello! This is an ml5.js example made and shared with ❤️.
+  Learn more about the ml5.js project: https://ml5js.org/
+  ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ 
+  This example demonstrates How to train your own quickdraw classifier through ml5.TimeSeries.
+-->
+
+<html>
+  <head>
+    <meta charset="UTF-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>ml5.js Time Series Train Mouse Gesture classifier Example</title>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.min.js"></script>
+    <script src="../../dist/ml5.js"></script>
+  </head>
+
+  <body>
+    <div id="canvasDiv">
+      <script src="sketch.js"></script>
+    </div>
+
+    <div>
+      <button id="recCircle">Record Circle</button>
+      <button id="recSquare">Record Square</button>
+      <button id="trainBut">Train & Predict</button>
+    </div>
+  </body>
+
+  <style>
+    button {
+      font-family: Georgia, "Times New Roman", Times, serif;
+      font-size: 20px;
+    }
+  </style>
+</html>
diff --git a/examples/timeSeries-train-mouse-gesture RDP/sketch.js b/examples/timeSeries-train-mouse-gesture RDP/sketch.js
new file mode 100644
index 00000000..31b54d27
--- /dev/null
+++ b/examples/timeSeries-train-mouse-gesture RDP/sketch.js	
@@ -0,0 +1,140 @@
+/*
+ * 👋 Hello! This is an ml5.js example made and shared with ❤️.
+ * Learn more about the ml5.js project: https://ml5js.org/
+ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ *
+ * This example demonstrates How to train your own mouse gesture classifier through ml5.TimeSeries.
+ */
+
+let model;
+
+let currShape = "circle";
+let state = "collection";
+
+let datapoints;
+let sequence = [];
+let targetSequence = 30;
+let recCircle, recSquare, trainBut;
+
+function preload() {
+  let options = {
+    inputs: ["x", "y"],
+    outputs: ["label"],
+    task: "classification",
+    dataMode: "spatial",
+    debug: "true",
+    learningRate: 0.005,
+  };
+
+  model = ml5.timeSeries(options);
+}
+
+function setup() {
+  // p5 js elements
+  let canvas = createCanvas(600, 400);
+  canvas.parent("canvasDiv");
+  background(220);
+  UI();
+}
+
+function draw() {
+  // record data when the mouse is pressed inside the canvas
+  if (mouseIsPressed && mouseY < height && mouseX < width) {
+    // draw lines through coordinates
+    line(pmouseX, pmouseY, mouseX, mouseY);
+    let inputs = { x: mouseX, y: mouseY };
+    sequence.push(inputs);
+  }
+}
+
+// code to signify drawing can be done again
+function mouseReleased() {
+  if (mouseY < height && mouseX < width) {
+    // if state is collection, add whole sequence as X, and shape as Y
+    if (state == "collection") {
+      let target = { label: currShape };
+      let paddedCoordinates = model.padCoordinates(sequence, targetSequence);
+      model.addData(paddedCoordinates, target);
+      clearScreen();
+    } else if (state == "prediction") {
+      let paddedCoordinates = model.padCoordinates(sequence, targetSequence);
+      model.classify(paddedCoordinates, gotResults);
+      clearScreen();
+    }
+  }
+  // reset the sequence
+  sequence = [];
+}
+
+// cleanup screen and removed drawn elements, add helpful text
+function clearScreen() {
+  background(220);
+  textSize(20);
+  fill(0);
+  text(state + " : " + currShape, 50, 50);
+}
+
+function trainModel() {
+  // normalize Data first before Training
+  model.normalizeData();
+
+  // set the number of epochs for training
+  let options = {
+    epochs: 40,
+  };
+  model.train(options, whileTraining, finishedTraining);
+
+  background(220);
+  state = "training";
+  text("Training...", 50, 50);
+
+  recCircle.attribute("disabled", true);
+  recSquare.attribute("disabled", true);
+  trainBut.attribute("disabled", true);
+}
+
+function whileTraining(epoch, loss) {
+  console.log(epoch);
+}
+
+function finishedTraining() {
+  background(220);
+  text("Training Finished, Draw again to predict", 50, 50);
+  state = "prediction";
+}
+
+function gotResults(results) {
+  // console.log("results", results);
+  let label = results[0].label;
+
+  currShape = label;
+}
+
+////////////// UI Elements ////////////
+function UI() {
+  textSize(20);
+
+  recCircle = select("#recCircle");
+  recSquare = select("#recSquare");
+  trainBut = select("#trainBut");
+
+  recCircle.mouseClicked(recordCircle);
+  recSquare.mouseClicked(recordSquare);
+  trainBut.mouseClicked(trainModel);
+
+  function recordCircle() {
+    state = "collection";
+    currShape = "circle";
+
+    background(220);
+    text(state + " : " + currShape, 50, 50);
+  }
+
+  function recordSquare() {
+    state = "collection";
+    currShape = "square";
+
+    background(220);
+    text(state + " : " + currShape, 50, 50);
+  }
+}
diff --git a/examples/timeSeries-train-mouse-gesture/index.html b/examples/timeSeries-train-mouse-gesture/index.html
new file mode 100644
index 00000000..acdfde38
--- /dev/null
+++ b/examples/timeSeries-train-mouse-gesture/index.html
@@ -0,0 +1,43 @@
+<!--
+  👋 Hello! This is an ml5.js example made and shared with ❤️.
+  Learn more about the ml5.js project: https://ml5js.org/
+  ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ 
+  This example demonstrates How to train your own quickdraw classifier through ml5.TimeSeries.
+-->
+
+<html>
+  <head>
+    <meta charset="UTF-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>ml5.js Time Series Train Mouse Gesture classifier Example</title>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.min.js"></script>
+    <script src="../../dist/ml5.js"></script>
+  </head>
+
+  <body>
+    <script src="sketch.js"></script>
+    <div id="canvasDiv"></div>
+    <button id="recCircle">Record Circle</button>
+    <button id="recSquare">Record Square</button>
+    <button id="trainBut">Train & Predict</button>
+
+    <p>
+      Instructions: <br />
+      1.) Press the "Record Circle" or "Record Square" and start drawing until
+      the ink runs out <br />
+      2.) Draw multiple times for each shape<br />2.) Press "Train" and wait for
+      training to finish <br />
+      3.) Draw again to predict drawn shape <br /><br />
+      Tip: Collect at least 5 drawings for each:
+    </p>
+  </body>
+
+  <style>
+    button {
+      font-family: Georgia, "Times New Roman", Times, serif;
+      font-size: 20px;
+    }
+  </style>
+</html>
diff --git a/examples/timeSeries-train-mouse-gesture/sketch.js b/examples/timeSeries-train-mouse-gesture/sketch.js
new file mode 100644
index 00000000..c0139c65
--- /dev/null
+++ b/examples/timeSeries-train-mouse-gesture/sketch.js
@@ -0,0 +1,204 @@
+/*
+ * 👋 Hello! This is an ml5.js example made and shared with ❤️.
+ * Learn more about the ml5.js project: https://ml5js.org/
+ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ *
+ * This example demonstrates How to train your own mouse gesture classifier through ml5.TimeSeries.
+ */
+
+let model;
+let counts = {
+  circleDataCount: 0,
+  squareDataCount: 0,
+};
+let currShape = "circle";
+let state = "collection";
+
+let pressedOnce = true;
+let frameCount = 0;
+let datapoints;
+let sequence = [];
+let recCircle, recSquare, trainBut;
+
+// Training Data lenghts
+let ink_multiplier = 3;
+let num_seq = 20;
+
+function preload() {
+  let options = {
+    inputs: ["x", "y"],
+    outputs: ["label"],
+    task: "classification",
+    spatialData: "true",
+    debug: "true",
+    learningRate: 0.005,
+  };
+
+  model = ml5.timeSeries(options);
+}
+
+function setup() {
+  // p5 js elements
+  let canvas = createCanvas(600, 400);
+  canvas.parent("canvasDiv");
+  background(220);
+  UI();
+
+  // set framerate to constant rate for constant data collection
+  frameRate(60);
+}
+
+function draw() {
+  // record data when the mouse is pressed inside the canvas
+  if (mouseIsPressed && pressedOnce && mouseY < 400 && mouseX < 600) {
+    // draw lines through coordinates
+    line(pmouseX, pmouseY, mouseX, mouseY);
+
+    frameCount++;
+
+    let inputs = { x: mouseX, y: mouseY };
+
+    sequence.push(inputs);
+
+    if (sequence.length == num_seq * ink_multiplier) {
+      pressedOnce = false;
+      frameCount = 0;
+
+      // if state is collection, add whole sequence as X, and shape as Y
+      if (state == "collection") {
+        let target = { label: currShape };
+        model.addData(sequence, target);
+
+        // add to the count for each
+        counts[currShape + "DataCount"] += 1;
+        console.log(counts);
+        updateDataCountUI();
+
+        // reset the screen
+        background(220);
+        textSize(20);
+        fill(0);
+        text("Recording: " + currShape, 50, 50);
+        // if prediction, classify using the whole sequence
+      } else if (state == "prediction") {
+        model.classify(sequence, gotResults);
+
+        background(220);
+      }
+
+      // reset the sequence
+      sequence = [];
+    }
+  }
+  inkBar();
+}
+
+function trainModel() {
+  // normalize Data first before Training
+  model.normalizeData();
+
+  // set the number of epochs for training
+  let options = {
+    epochs: 40,
+  };
+  model.train(options, whileTraining, finishedTraining);
+
+  background(220);
+  state = "training";
+  text("Training...", 50, 50);
+  recCircle.style("background-color", "");
+  recSquare.style("background-color", "");
+  trainBut.style("background-color", "#f0f0f0");
+}
+
+function whileTraining(epoch, loss) {
+  console.log(epoch);
+}
+
+function finishedTraining() {
+  background(220);
+  text("Training Finished, Draw again to predict", 50, 50);
+  state = "prediction";
+}
+
+function gotResults(results) {
+  let label = results[0].label;
+
+  fill(0);
+  text("Prediction: " + label, 50, 50);
+}
+
+// code to signify drawing can be done again
+function mouseReleased() {
+  pressedOnce = true;
+}
+
+////////////// UI Elements ////////////
+
+// code to visualize how much ink left
+function inkBar() {
+  datapoints = map(frameCount, 0, ink_multiplier * num_seq, 0, num_seq);
+
+  bar_height = 250;
+  height_miltiplier = bar_height / num_seq;
+  push();
+  fill(0);
+  textSize(15);
+  text("Ink:", 550, 90);
+  rect(550, 100, 25, num_seq * height_miltiplier);
+  fill(255);
+  rect(550, 100, 25, datapoints * height_miltiplier);
+  pop();
+}
+
+// code for UI elements such as buttons
+function UI() {
+  textSize(20);
+
+  recCircle = select("#recCircle");
+  recSquare = select("#recSquare");
+  trainBut = select("#trainBut");
+
+  recCircle.mouseClicked(recordCircle);
+  recCircle.style("background-color", "#f0f0f0");
+  recSquare.mouseClicked(recordSquare);
+  trainBut.mouseClicked(trainModel);
+
+  function recordCircle() {
+    state = "collection";
+    currShape = "circle";
+
+    background(220);
+    text("Recording: circle", 50, 50);
+    recCircle.style("background-color", "#f0f0f0");
+    recSquare.style("background-color", "");
+    trainBut.style("background-color", "");
+  }
+
+  function recordSquare() {
+    state = "collection";
+    currShape = "square";
+
+    background(220);
+    text("Recording: square", 50, 50);
+    recCircle.style("background-color", "");
+    recSquare.style("background-color", "#f0f0f0");
+    trainBut.style("background-color", "");
+  }
+  dataCountsP = createP(
+    "circle data: " +
+      counts.circleDataCount +
+      "<br>square data: " +
+      counts.squareDataCount
+  );
+}
+
+// Update the HTML UI with the current data counts
+function updateDataCountUI() {
+  dataCountsP.html(
+    "circle data: " +
+      counts.circleDataCount +
+      "<br>square data: " +
+      counts.squareDataCount
+  );
+}
diff --git a/examples/timeSeries-weather-prediction/index.html b/examples/timeSeries-weather-prediction/index.html
new file mode 100644
index 00000000..72a3ffdd
--- /dev/null
+++ b/examples/timeSeries-weather-prediction/index.html
@@ -0,0 +1,34 @@
+<!--
+  👋 Hello! This is an ml5.js example made and shared with ❤️.
+  Learn more about the ml5.js project: https://ml5js.org/
+  ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ 
+  This example demonstrates Training a Stock Price Predictor through ml5.TimeSeries.
+-->
+
+<html>
+  <head>
+    <meta charset="UTF-8" />
+    <meta http-equiv="X-UA-Compatible" content="IE=edge" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <title>ml5.js Time Series Weather Prediction Example</title>
+    <script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.9.4/p5.min.js"></script>
+    <script src="../../dist/ml5.js"></script>
+  </head>
+
+  <body>
+    <script src="sketch.js"></script>
+
+    <div style="position: absolute; margin: 350px 0px 0px 100px">
+      <button id="train_but">Train Model</button>
+      <button id="pred_but">Predict Next Hour</button>
+    </div>
+  </body>
+
+  <style>
+    button {
+      font-family: Georgia, "Times New Roman", Times, serif;
+      font-size: 20px;
+    }
+  </style>
+</html>
diff --git a/examples/timeSeries-weather-prediction/sketch.js b/examples/timeSeries-weather-prediction/sketch.js
new file mode 100644
index 00000000..a21e8754
--- /dev/null
+++ b/examples/timeSeries-weather-prediction/sketch.js
@@ -0,0 +1,183 @@
+/*
+ * 👋 Hello! This is an ml5.js example made and shared with ❤️.
+ * Learn more about the ml5.js project: https://ml5js.org/
+ * ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
+ *
+ * This example demonstrates Training a Stock Price Predictor through ml5.TimeSeries.
+ */
+
+let model;
+let data;
+let data_index;
+
+let seq = [];
+let targetLength = 5;
+
+// load JSON data with same formatting from the internet, this means
+// loadData() cannot yet be used as it is formatted differently
+function preload() {
+  json_data = loadJSON("weather_data.json");
+
+  // set the options to initialize timeSeries Neural Network
+  let options = {
+    task: "regression",
+    dataMode: "linear",
+    debug: "true",
+    learningRate: 0.01,
+    output: ["label"],
+  };
+  model = ml5.timeSeries(options);
+}
+
+function setup() {
+  data = json_data.data;
+  createCanvas(640, 400);
+  background(220);
+
+  // iterate through data using simple sliding window algorithm
+  data_index = targetLength - 1;
+  while (data_index < data.length - 1) {
+    // get the values [targetLength] steps before current index, collect and add
+    for (let x = targetLength - 1; x >= 0; x--) {
+      let curr = data[data_index - x];
+      // choose from the raw data what you want to to feed to the model
+      let inputs = {
+        temperature: curr.temperature,
+        humidity: curr.humidity,
+        windSpeed: curr.wind_speed,
+        pressure: curr.pressure,
+        precipitation: curr.precipitation,
+      };
+
+      // once collected all data into an array to make it into a sequence
+      // the format of the sequence is like this [{},{},...,{}]
+      // this is the X value
+      seq.push(inputs);
+    }
+
+    // the Y value to train is the value that comes after the sequence
+    let target = data[data_index + 1];
+
+    // select the outputs you want to get, multiple outputs are possible, we want to predict all values
+    let output = {
+      temperature: target.temperature,
+      humidity: target.humidity,
+      windSpeed: target.wind_speed,
+      pressure: target.pressure,
+      precipitation: target.precipitation,
+    };
+
+    // feed data into the model
+    model.addData(seq, output);
+
+    // reset the sequence so new values can be added
+    seq = [];
+
+    // iterate through the whole dataset moving the sliding window in each iteration
+    data_index++;
+  }
+  // normalize the data after adding everything
+  model.normalizeData();
+
+  // put a button to train and predict
+  trainAndPredictButtons();
+}
+
+// train data
+function trainData() {
+  model.normalizeData();
+  let options = {
+    epochs: 100,
+  };
+  model.train(options, finishedTraining);
+}
+
+function finishedTraining() {
+  console.log("Training Done!");
+}
+
+// predict data
+function predictData() {
+  // set the seq to empty
+  seq = [];
+
+  // choose the most recent sequences
+  let latest = data.slice(-targetLength);
+  for (let x = 0; x < targetLength; x++) {
+    let curr = latest[x];
+    // select the same properties for inputs
+    let inputs = {
+      temperature: curr.temperature,
+      humidity: curr.humidity,
+      windSpeed: curr.wind_speed,
+      pressure: curr.pressure,
+      precipitation: curr.precipitation,
+    };
+    // add them to one array to make them a sequence
+    seq.push(inputs);
+  }
+
+  // use the sequence to predict
+  model.predict(seq, gotResults);
+}
+
+// put the new data in the dataset so this will be considered for any new predictions
+function gotResults(results) {
+  console.log(results);
+  addNewData(results); //optional but will be helpful in using new prediction as part of dataset
+}
+
+// code for adding new data to the dataset to be used for future prediction
+function addNewData(results) {
+  (new_values = {
+    date: "  for the next hour",
+    temperature: parseFloat(results[0].value.toFixed(2)), // get string convert to float and round to 2 decimal points
+    humidity: parseFloat(results[1].value.toFixed(2)),
+    wind_speed: parseFloat(results[2].value.toFixed(2)),
+    pressure: parseFloat(results[3].value.toFixed(2)),
+    precipitation: parseFloat(results[4].value.toFixed(2)),
+  }),
+    data.push(new_values);
+}
+
+function draw() {
+  background(220);
+  textAlign(CENTER, CENTER);
+  textSize(16);
+
+  // Draw the table headers
+  let headers = [
+    "Date",
+    "Temperature",
+    "Humidity",
+    "Wind Speed",
+    "Pressure",
+    "Precipitation",
+  ];
+  let xOffset = 70;
+  let yOffset = 100;
+  for (let i = 0; i < headers.length; i++) {
+    text(headers[i], xOffset + i * 100, yOffset);
+  }
+
+  // Display the last 5 entries from the dataset
+  let latest = data.slice(-targetLength);
+  for (let i = 0; i < latest.length; i++) {
+    let entry = latest[i];
+    text(entry.date.slice(5), xOffset, yOffset + (i + 1) * 30);
+    text(entry.temperature, xOffset + 100, yOffset + (i + 1) * 30);
+    text(entry.humidity, xOffset + 200, yOffset + (i + 1) * 30);
+    text(entry.wind_speed, xOffset + 300, yOffset + (i + 1) * 30);
+    text(entry.pressure, xOffset + 400, yOffset + (i + 1) * 30);
+    text(entry.precipitation, xOffset + 500, yOffset + (i + 1) * 30);
+  }
+}
+
+// get buttons and assign functions (UI)
+function trainAndPredictButtons() {
+  train_but = select("#train_but");
+  train_but.mouseClicked(trainData);
+
+  pred_but = select("#pred_but");
+  pred_but.mouseClicked(predictData);
+}
diff --git a/examples/timeSeries-weather-prediction/weather_data.json b/examples/timeSeries-weather-prediction/weather_data.json
new file mode 100644
index 00000000..a45429a3
--- /dev/null
+++ b/examples/timeSeries-weather-prediction/weather_data.json
@@ -0,0 +1,196 @@
+{
+  "data": [
+    {
+      "date": "2024-08-01T00:00:00Z",
+      "temperature": 28.0,
+      "humidity": 50,
+      "wind_speed": 3.0,
+      "pressure": 1015,
+      "precipitation": 0.0
+    },
+    {
+      "date": "2024-08-01T01:00:00Z",
+      "temperature": 27.5,
+      "humidity": 52,
+      "wind_speed": 4.0,
+      "pressure": 1014,
+      "precipitation": 0.0
+    },
+    {
+      "date": "2024-08-01T02:00:00Z",
+      "temperature": 27.0,
+      "humidity": 55,
+      "wind_speed": 5.0,
+      "pressure": 1013,
+      "precipitation": 0.0
+    },
+    {
+      "date": "2024-08-01T03:00:00Z",
+      "temperature": 26.5,
+      "humidity": 60,
+      "wind_speed": 6.0,
+      "pressure": 1012,
+      "precipitation": 2.0
+    },
+    {
+      "date": "2024-08-01T04:00:00Z",
+      "temperature": 26.0,
+      "humidity": 65,
+      "wind_speed": 8.0,
+      "pressure": 1010,
+      "precipitation": 5.0
+    },
+    {
+      "date": "2024-08-01T05:00:00Z",
+      "temperature": 25.5,
+      "humidity": 70,
+      "wind_speed": 10.0,
+      "pressure": 1008,
+      "precipitation": 10.0
+    },
+    {
+      "date": "2024-08-01T06:00:00Z",
+      "temperature": 25.0,
+      "humidity": 75,
+      "wind_speed": 12.0,
+      "pressure": 1006,
+      "precipitation": 15.0
+    },
+    {
+      "date": "2024-08-01T07:00:00Z",
+      "temperature": 24.5,
+      "humidity": 80,
+      "wind_speed": 14.0,
+      "pressure": 1004,
+      "precipitation": 20.0
+    },
+    {
+      "date": "2024-08-01T08:00:00Z",
+      "temperature": 24.0,
+      "humidity": 85,
+      "wind_speed": 15.0,
+      "pressure": 1002,
+      "precipitation": 25.0
+    },
+    {
+      "date": "2024-08-01T09:00:00Z",
+      "temperature": 23.5,
+      "humidity": 90,
+      "wind_speed": 17.0,
+      "pressure": 1000,
+      "precipitation": 30.0
+    },
+    {
+      "date": "2024-08-01T10:00:00Z",
+      "temperature": 23.0,
+      "humidity": 95,
+      "wind_speed": 20.0,
+      "pressure": 998,
+      "precipitation": 35.0
+    },
+    {
+      "date": "2024-08-01T11:00:00Z",
+      "temperature": 24.0,
+      "humidity": 85,
+      "wind_speed": 10.0,
+      "pressure": 1005,
+      "precipitation": 10.0
+    },
+    {
+      "date": "2024-08-01T12:00:00Z",
+      "temperature": 25.0,
+      "humidity": 75,
+      "wind_speed": 7.0,
+      "pressure": 1010,
+      "precipitation": 5.0
+    },
+    {
+      "date": "2024-08-01T13:00:00Z",
+      "temperature": 26.0,
+      "humidity": 65,
+      "wind_speed": 5.0,
+      "pressure": 1013,
+      "precipitation": 0.0
+    },
+    {
+      "date": "2024-08-01T14:00:00Z",
+      "temperature": 27.0,
+      "humidity": 60,
+      "wind_speed": 4.0,
+      "pressure": 1015,
+      "precipitation": 0.0
+    },
+    {
+      "date": "2024-08-01T15:00:00Z",
+      "temperature": 28.0,
+      "humidity": 50,
+      "wind_speed": 3.0,
+      "pressure": 1018,
+      "precipitation": 0.0
+    },
+    {
+      "date": "2024-08-01T16:00:00Z",
+      "temperature": 27.0,
+      "humidity": 55,
+      "wind_speed": 4.0,
+      "pressure": 1015,
+      "precipitation": 0.0
+    },
+    {
+      "date": "2024-08-01T17:00:00Z",
+      "temperature": 26.0,
+      "humidity": 60,
+      "wind_speed": 5.0,
+      "pressure": 1012,
+      "precipitation": 1.0
+    },
+    {
+      "date": "2024-08-01T18:00:00Z",
+      "temperature": 25.0,
+      "humidity": 70,
+      "wind_speed": 7.0,
+      "pressure": 1009,
+      "precipitation": 5.0
+    },
+    {
+      "date": "2024-08-01T19:00:00Z",
+      "temperature": 24.0,
+      "humidity": 80,
+      "wind_speed": 10.0,
+      "pressure": 1005,
+      "precipitation": 10.0
+    },
+    {
+      "date": "2024-08-01T20:00:00Z",
+      "temperature": 23.0,
+      "humidity": 90,
+      "wind_speed": 12.0,
+      "pressure": 1002,
+      "precipitation": 15.0
+    },
+    {
+      "date": "2024-08-01T21:00:00Z",
+      "temperature": 22.0,
+      "humidity": 95,
+      "wind_speed": 15.0,
+      "pressure": 999,
+      "precipitation": 20.0
+    },
+    {
+      "date": "2024-08-01T22:00:00Z",
+      "temperature": 21.0,
+      "humidity": 98,
+      "wind_speed": 18.0,
+      "pressure": 995,
+      "precipitation": 25.0
+    },
+    {
+      "date": "2024-08-01T23:00:00Z",
+      "temperature": 20.0,
+      "humidity": 100,
+      "wind_speed": 20.0,
+      "pressure": 992,
+      "precipitation": 30.0
+    }
+  ]
+}
diff --git a/package.json b/package.json
index 63b88db7..cd90354e 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
 {
   "name": "ml5",
-  "version": "1.0.1",
+  "version": "1.0.2",
   "description": "A friendly machine learning library for the web.",
   "main": "dist/ml5.min.js",
   "scripts": {
@@ -11,7 +11,8 @@
     "postinstall": "patch-package",
     "test": "jest --config tests/jest.config.js",
     "upload-examples": "node scripts/uploadExamples.js",
-    "update-p5-version": "node scripts/updateP5Version.js"
+    "update-p5-version": "node scripts/updateP5Version.js",
+    "update-readme": "node scripts/updateReadme.js"
   },
   "files": [
     "dist"
@@ -71,5 +72,11 @@
       "@babel/preset-env"
     ]
   },
-  "prettier": {}
+  "prettier": {},
+  "packageManager": "yarn@4.3.1",
+  "engines": {
+    "node": "^20.15.1",
+    "yarn": "^4.3.1",
+    "npm": "please-use-yarn"
+  }
 }
diff --git a/src/TimeSeries/index.js b/src/TimeSeries/index.js
new file mode 100644
index 00000000..62af28fe
--- /dev/null
+++ b/src/TimeSeries/index.js
@@ -0,0 +1,733 @@
+import * as tf from "@tensorflow/tfjs";
+import callCallback from "../utils/callcallback";
+import handleArguments from "../utils/handleArguments";
+import NeuralNetwork from "./timeSeries";
+import NeuralNetworkData from "./timeSeriesData";
+import nnUtils from "../NeuralNetwork/NeuralNetworkUtils";
+import NeuralNetworkVis from "../NeuralNetwork/NeuralNetworkVis";
+
+import setBackend from "../utils/setBackend";
+
+import tsUtils from "./timeSeriesUtils";
+
+const DEFAULTS = {
+  inputs: [],
+  outputs: [],
+  dataUrl: null,
+  modelUrl: null,
+  layers: [],
+  task: null,
+  dataMode: "linear",
+  debug: false,
+  learningRate: 0.2,
+  hiddenUnits: 16,
+};
+
+class timeSeries {
+  constructor(options, callback) {
+    this.options =
+      {
+        ...DEFAULTS,
+        ...options,
+      } || DEFAULTS;
+
+    this.neuralNetwork = new NeuralNetwork();
+    this.neuralNetworkData = new NeuralNetworkData();
+    this.neuralNetworkVis = new NeuralNetworkVis();
+
+    this.data = {
+      training: [],
+    };
+
+    this.init = this.init.bind(this);
+
+    this.ready = callCallback(this.init(), callback);
+  }
+
+  async init() {
+    // workaround for Error
+    setBackend("webgl");
+
+    await tf.ready();
+    if (this.options.dataUrl) {
+      await this.loadDataFromUrl(this.options.dataUrl);
+    } else if (this.options.modelUrl) {
+      await this.load(this.options.modelUrl);
+    }
+    return this;
+  }
+  /**
+   * ////////////////////////////////////////////////////////////
+   *                   Add and Format Data
+   * ////////////////////////////////////////////////////////////
+   */
+
+  /* adding data: can only accept the following formats:
+     - for xInputs:
+      1. Sequence of objects (array of objects) 
+        [{x: , y: },{x: , y: },{x: , y: },{x: , y: }]
+      2. Sequence of arrays (array of array, order matters)
+        [[],[],[],[]]
+      3. Sequence of values (inputlabels should be provided by user)
+        [[,,,,,]] e.g. shape = {inputLabels: ['x','y']} will become [{x: , y: },{x: , y: },{x: , y: },{x: , y: }]
+  
+    - for yInputs:
+      1. similar to neural network, so use same logic
+
+    - at the end of the adding data, the data is formatted to a sequence of objects similar to 1 of xinputs
+ 
+    - changed data Modality into spatialData so its a boolean, true if coordinate data and false if normal lstm
+  */
+
+  addData(xInputs, yInputs, options = null) {
+    // 1. verify format between the three possible types of xinputs
+    const xs = tsUtils.verifyAndFormatInputs(xInputs, options, this.options);
+
+    // 2. format the yInput - same logic as NN class
+    const ys = tsUtils.verifyAndFormatOutputs(yInputs, options, this.options);
+
+    // 3. add data to raw
+    this.neuralNetworkData.addData(xs, ys);
+  }
+
+  /**
+   * ////////////////////////////////////////////////////////////
+   *                   Train Data
+   * ////////////////////////////////////////////////////////////
+   */
+
+  async train(optionsOrCallback, optionsOrWhileTraining, callback) {
+    let options = {};
+    let whileTrainingCb = null;
+    let finishedTrainingCb;
+
+    if (typeof optionsOrCallback === "object") {
+      options = optionsOrCallback;
+      if (typeof optionsOrWhileTraining === "function") {
+        whileTrainingCb = null;
+        finishedTrainingCb = callback || optionsOrWhileTraining;
+      } else {
+        finishedTrainingCb = optionsOrWhileTraining;
+      }
+    } else if (typeof optionsOrCallback === "function") {
+      whileTrainingCb = optionsOrCallback;
+      finishedTrainingCb = optionsOrWhileTraining;
+    } else {
+      finishedTrainingCb = optionsOrCallback;
+    }
+
+    return callCallback(
+      this.trainInternal(options, whileTrainingCb),
+      finishedTrainingCb
+    );
+  }
+
+  async trainInternal(_options, whileTrainingCb) {
+    const options = {
+      epochs: 10,
+      batchSize: 32,
+      validationSplit: 0.1,
+      whileTraining: null,
+      ..._options,
+    };
+
+    // if debug mode is true, then use tf vis
+    if (this.options.debug === true || this.options.debug === "true") {
+      options.whileTraining = [
+        this.neuralNetworkVis.trainingVis(),
+        {
+          onEpochEnd: whileTrainingCb,
+        },
+      ];
+    } else {
+      // if not use the default training
+      // options.whileTraining = whileTrainingCb === null ? [{
+      //     onEpochEnd: (epoch, loss) => {
+      //       console.log(epoch, loss.loss)
+      //     }
+      //   }] :
+      //   [{
+      //     onEpochEnd: whileTrainingCb
+      //   }];
+      options.whileTraining = [
+        {
+          onEpochEnd: whileTrainingCb,
+        },
+      ];
+    }
+
+    // if metadata needs to be generated about the data
+    if (!this.neuralNetworkData.isMetadataReady) {
+      // if the inputs are defined as an array of [img_width, img_height, channels]
+      this.createMetaData();
+    }
+
+    // if the data still need to be summarized, onehotencoded, etc
+    if (!this.neuralNetworkData.isWarmedUp) {
+      this.prepareForTraining();
+    }
+
+    // if inputs and outputs are not specified
+    // in the options, then create the tensors
+    // from the this.neuralNetworkData.data.raws
+    if (!options.inputs && !options.outputs) {
+      const { inputs, outputs } = this.convertTrainingDataToTensors();
+      options.inputs = inputs;
+      options.outputs = outputs;
+    }
+
+    // check to see if layers are passed into the constructor
+    // then use those to create your architecture
+    if (!this.neuralNetwork.isLayered) {
+      // TODO: don't update this.options.layers - Linda
+      this.options.layers = this.createNetworkLayers(this.options.layers);
+    }
+
+    // if the model does not have any layers defined yet
+    // then use the default structure
+    if (!this.neuralNetwork.isLayered) {
+      // TODO: don't update this.options.layers - Linda
+      this.options.layers = this.addDefaultLayers();
+    }
+
+    if (!this.neuralNetwork.isCompiled) {
+      // compile the model with defaults
+      this.compile();
+    }
+
+    // train once the model is compiled
+    await this.neuralNetwork.train(options);
+  }
+
+  createMetaData() {
+    // this method does not get shape for images but instead for timesteps
+    const { inputs } = this.options;
+
+    let inputShape;
+    if (typeof inputs === "number") {
+      inputShape = inputs;
+    } else if (Array.isArray(inputs) && inputs.length > 0) {
+      inputShape = inputs.length; //will be fed into the tensors later
+    }
+
+    this.neuralNetworkData.createMetadata(inputShape);
+  }
+
+  prepareForTraining() {
+    // this.data.training = this.neuralNetworkData.applyOneHotEncodingsToDataRaw();
+    this.neuralNetworkData.isWarmedUp = true;
+  }
+
+  convertTrainingDataToTensors() {
+    return this.neuralNetworkData.convertRawToTensors(this.data.training);
+  }
+
+  createNetworkLayers(layerJsonArray) {
+    const layers = [...layerJsonArray];
+
+    const { inputUnits, outputUnits } = this.neuralNetworkData.meta;
+    const layersLength = layers.length;
+
+    if (!(layers.length >= 2)) {
+      return false;
+    }
+
+    // set the inputShape
+    layers[0].inputShape = layers[0].inputShape
+      ? layers[0].inputShape
+      : inputUnits;
+    // set the output units
+    const lastIndex = layersLength - 1;
+    const lastLayer = layers[lastIndex];
+    lastLayer.units = lastLayer.units ? lastLayer.units : outputUnits;
+
+    layers.forEach((layer) => {
+      this.addLayer(tf.layers[layer.type](layer));
+    });
+
+    return layers;
+  }
+
+  addDefaultLayers() {
+    let layers;
+    const task = this.options.task;
+    const dataMode = this.options.dataMode;
+    let taskConditions = `${task}_${dataMode}`;
+    switch (taskConditions.toLowerCase()) {
+      // if the task is classification and spatial modality
+      case "classification_spatial":
+        layers = [
+          {
+            type: "conv1d",
+            filters: 8,
+            kernelSize: 3,
+            activation: "relu",
+            inputShape: this.neuralNetworkData.meta.seriesShape,
+          },
+          {
+            type: "maxPooling1d",
+            poolSize: 2,
+          },
+          {
+            type: "conv1d",
+            filters: 16,
+            kernelSize: 3,
+            activation: "relu",
+            inputShape: this.neuralNetworkData.meta.seriesShape,
+          },
+          {
+            type: "maxPooling1d",
+            poolSize: 2,
+          },
+          {
+            type: "flatten",
+          },
+          {
+            type: "dense",
+            units: this.options.hiddenUnits,
+            activation: "relu",
+          },
+          {
+            type: "dense",
+            activation: "softmax",
+          },
+        ];
+
+        return this.createNetworkLayers(layers);
+      // if the task is classification and sequential modality
+      case "classification_linear":
+        layers = [
+          {
+            type: "lstm",
+            units: 16,
+            activation: "relu",
+            inputShape: this.neuralNetworkData.meta.seriesShape,
+            returnSequences: true,
+          },
+          {
+            type: "lstm",
+            units: 8,
+            activation: "relu",
+            returnSequences: false,
+          },
+          {
+            type: "dense",
+            units: this.options.hiddenUnits,
+            activation: "relu",
+          },
+          {
+            type: "dense",
+            activation: "softmax",
+          },
+        ];
+
+        return this.createNetworkLayers(layers);
+
+      // if the task is regression
+      case "regression_spatial":
+        layers = [
+          {
+            type: "conv1d",
+            filters: 8,
+            kernelSize: 3,
+            activation: "relu",
+            inputShape: this.neuralNetworkData.meta.seriesShape,
+          },
+          {
+            type: "maxPooling1d",
+            poolSize: 2,
+          },
+          {
+            type: "conv1d",
+            filters: 16,
+            kernelSize: 3,
+            activation: "relu",
+            inputShape: this.neuralNetworkData.meta.seriesShape,
+          },
+          {
+            type: "maxPooling1d",
+            poolSize: 2,
+          },
+          {
+            type: "flatten",
+          },
+          {
+            type: "dense",
+            units: this.options.hiddenUnits,
+            activation: "relu",
+          },
+          {
+            type: "dense",
+            activation: "sigmoid",
+          },
+        ];
+
+        return this.createNetworkLayers(layers);
+
+      case "regression_linear":
+        layers = [
+          {
+            type: "lstm",
+            units: 16,
+            activation: "relu",
+            inputShape: this.neuralNetworkData.meta.seriesShape,
+            returnSequences: true,
+          },
+          {
+            type: "lstm",
+            units: 8,
+            activation: "relu",
+          },
+          {
+            type: "dense",
+            units: this.options.hiddenUnits,
+            activation: "relu",
+          },
+          {
+            type: "dense",
+            activation: "sigmoid",
+          },
+        ];
+
+        return this.createNetworkLayers(layers);
+
+      default:
+        console.log("no inputUnits or outputUnits defined");
+        layers = [
+          {
+            type: "lstm",
+            units: 16,
+            activation: "relu",
+            inputShape: this.neuralNetworkData.meta.seriesShape,
+          },
+          {
+            type: "lstm",
+            units: 8,
+            activation: "relu",
+          },
+          {
+            type: "dense",
+            units: this.options.hiddenUnits,
+            activation: "relu",
+          },
+          {
+            type: "dense",
+            activation: "sigmoid",
+          },
+        ];
+        return this.createNetworkLayers(layers);
+    }
+  }
+
+  addLayer(layer) {
+    this.neuralNetwork.addLayer(layer);
+  }
+
+  compile() {
+    const LEARNING_RATE = this.options.learningRate;
+
+    let options = {};
+
+    if (
+      this.options.task === "classification" ||
+      this.options.task === "imageClassification"
+    ) {
+      options = {
+        loss: "categoricalCrossentropy",
+        optimizer: tf.train.adam,
+        metrics: ["accuracy"],
+      };
+    } else if (this.options.task === "regression") {
+      options = {
+        loss: "meanSquaredError",
+        optimizer: tf.train.adam,
+        metrics: ["accuracy"],
+      };
+    }
+
+    options.optimizer = options.optimizer
+      ? this.neuralNetwork.setOptimizerFunction(
+          LEARNING_RATE,
+          options.optimizer
+        )
+      : this.neuralNetwork.setOptimizerFunction(LEARNING_RATE, tf.train.sgd);
+
+    this.neuralNetwork.compile(options);
+
+    // if debug mode is true, then show the model summary
+    if (this.options.debug) {
+      this.neuralNetworkVis.modelSummary(
+        {
+          name: "Model Summary",
+        },
+        this.neuralNetwork.model
+      );
+    }
+  }
+
+  async normalizeData() {
+    if (!this.neuralNetworkData.data.raw.length > 0) {
+      throw new Error(
+        "Empty Data Error: You Cannot Normalize/Train without adding any data! Please add data first"
+      );
+    }
+    if (!this.neuralNetworkData.isMetadataReady) {
+      this.createMetaData();
+    }
+
+    if (!this.neuralNetworkData.isWarmedUp) {
+      this.prepareForTraining();
+    }
+
+    const trainingData = this.neuralNetworkData.normalizeDataRaw();
+
+    // set this equal to the training data
+    this.data.training = trainingData;
+
+    // set isNormalized to true
+    this.neuralNetworkData.meta.isNormalized = true;
+  }
+
+  // ////////
+
+  classify(_input, _cb) {
+    return callCallback(this.classifyInternal(_input), _cb);
+  }
+
+  async classifyInternal(_input) {
+    const { meta } = this.neuralNetworkData;
+    const headers = Object.keys(meta.inputs);
+
+    let inputData;
+
+    inputData = this.formatInputsForPredictionAll(_input);
+
+    const unformattedResults = await this.neuralNetwork.classify(inputData);
+    inputData.dispose();
+
+    if (meta !== null) {
+      const label = Object.keys(meta.outputs)[0];
+      const vals = Object.entries(meta.outputs[label].legend);
+
+      const formattedResults = unformattedResults.map((unformattedResult) => {
+        return vals
+          .map((item, idx) => {
+            return {
+              [item[0]]: unformattedResult[idx],
+              label: item[0],
+              confidence: unformattedResult[idx],
+            };
+          })
+          .sort((a, b) => b.confidence - a.confidence);
+      });
+
+      // return single array if the length is less than 2,
+      // otherwise return array of arrays
+      if (formattedResults.length < 2) {
+        return formattedResults[0];
+      }
+      return formattedResults;
+    }
+
+    return unformattedResults;
+  }
+
+  formatInputsForPredictionAll(_input) {
+    const { meta } = this.neuralNetworkData;
+    const inputHeaders = Object.keys(meta.inputs);
+
+    const formatted_inputs = tsUtils.verifyAndFormatInputs(
+      _input,
+      null,
+      this.options
+    );
+    const normalized_inputs = this.neuralNetworkData.normalizePredictData(
+      formatted_inputs,
+      meta.inputs
+    );
+    const output = tf.tensor(normalized_inputs);
+
+    return output;
+  }
+
+  predict(_input, _cb) {
+    return callCallback(this.predictInternal(_input), _cb);
+  }
+
+  async predictInternal(_input) {
+    const { meta } = this.neuralNetworkData;
+
+    const inputData = this.formatInputsForPredictionAll(_input);
+
+    const unformattedResults = await this.neuralNetwork.predict(inputData);
+    inputData.dispose();
+
+    if (meta !== null) {
+      const labels = Object.keys(meta.outputs);
+
+      const formattedResults = unformattedResults.map((unformattedResult) => {
+        return labels.map((item, idx) => {
+          // check to see if the data were normalized
+          // if not, then send back the values, otherwise
+          // unnormalize then return
+          let val;
+          let unNormalized;
+          if (meta.isNormalized) {
+            const { min, max } = meta.outputs[item];
+            val = nnUtils.unnormalizeValue(unformattedResult[idx], min, max);
+            unNormalized = unformattedResult[idx];
+          } else {
+            val = unformattedResult[idx];
+          }
+
+          const d = {
+            [labels[idx]]: val,
+            label: item,
+            value: val,
+          };
+
+          // if unNormalized is not undefined, then
+          // add that to the output
+          if (unNormalized) {
+            d.unNormalizedValue = unNormalized;
+          }
+
+          return d;
+        });
+      });
+
+      // return single array if the length is less than 2,
+      // otherwise return array of arrays
+      if (formattedResults.length < 2) {
+        return formattedResults[0];
+      }
+      return formattedResults;
+    }
+
+    // if no meta exists, then return unformatted results;
+    return unformattedResults;
+  }
+
+  /**
+   * ////////////////////////////////////////////////////////////
+   * Save / Load Data
+   * ////////////////////////////////////////////////////////////
+   */
+
+  saveData(name, callback) {
+    const args = handleArguments(name, callback);
+    return callCallback(
+      this.neuralNetworkData.saveData(args.name),
+      args.callback
+    );
+  }
+
+  async loadData(filesOrPath, callback) {
+    return callCallback(this.neuralNetworkData.loadData(filesOrPath), callback);
+  }
+
+  async loadDataFromUrl(dataUrl, inputs, outputs) {
+    let json;
+    let dataFromUrl;
+    try {
+      if (dataUrl.endsWith(".csv")) {
+        dataFromUrl = await this.neuralNetworkData.loadCSV(
+          dataUrl,
+          inputs,
+          outputs
+        );
+      } else if (dataUrl.endsWith(".json")) {
+        dataFromUrl = await this.neuralNetworkData.loadJSON(
+          dataUrl,
+          inputs,
+          outputs
+        );
+      } else if (dataUrl.includes("blob")) {
+        dataFromUrl = await this.loadBlob(dataUrl, inputs, outputs);
+      } else {
+        throw new Error("Not a valid data format. Must be csv or json");
+      }
+    } catch (error) {
+      console.error(error);
+      throw new Error(error);
+    }
+
+    dataFromUrl.map((item) => {
+      this.addData(item.xs, item.ys);
+    });
+
+    this.createMetaData();
+
+    this.prepareForTraining();
+  }
+
+  /**
+   * ////////////////////////////////////////////////////////////
+   * Save / Load Model
+   * ////////////////////////////////////////////////////////////
+   */
+
+  async save(name, callback) {
+    const args = handleArguments(name, callback);
+    const modelName = args.string || "model";
+    // save the model
+    return callCallback(
+      Promise.all([
+        this.neuralNetwork.save(modelName),
+        this.neuralNetworkData.saveMeta(modelName),
+      ]),
+      args.callback
+    );
+  }
+
+  /**
+   * @public - also called internally by init() when there is a modelUrl in the options
+   * load a model and metadata
+   * @param {string | FileList | Object} filesOrPath - The URL of the file to load,
+   *  or a FileList object (.files) from an HTML element <input type="file">.
+   * @param {ML5Callback<void[]>} [callback] Optional - A function to call when the loading is complete.
+   * @return {Promise<void[]>}
+   */
+  async load(filesOrPath, callback) {
+    return callCallback(
+      Promise.all([
+        this.neuralNetwork.load(filesOrPath),
+        this.neuralNetworkData.loadMeta(filesOrPath),
+      ]),
+      callback
+    );
+  }
+
+  /**
+   * dispose and release memory for a model
+   */
+  dispose() {
+    this.neuralNetwork.dispose();
+  }
+
+  padCoordinates(coordinates, targetPointCount) {
+    const maxEpsilon = int(coordinates.length / 2);
+    return tsUtils.padCoordinates(coordinates, targetPointCount, maxEpsilon);
+  }
+}
+
+const TimeSeries = (inputsOrOptions, outputsOrCallback, callback) => {
+  let options;
+  let cb;
+
+  if (inputsOrOptions instanceof Object) {
+    options = inputsOrOptions;
+    cb = outputsOrCallback;
+  } else {
+    options = {
+      inputs: inputsOrOptions,
+      outputs: outputsOrCallback,
+    };
+    cb = callback;
+  }
+
+  const instance = new timeSeries(options, cb);
+  return instance;
+};
+
+export default TimeSeries;
diff --git a/src/TimeSeries/timeSeries.js b/src/TimeSeries/timeSeries.js
new file mode 100644
index 00000000..fc560ecf
--- /dev/null
+++ b/src/TimeSeries/timeSeries.js
@@ -0,0 +1,251 @@
+import * as tf from "@tensorflow/tfjs";
+import { saveBlob } from "../utils/io";
+import { randomGaussian } from "../utils/random";
+
+/*
+
+Things changed from neural network class:
+
+1. No neuro evolution
+
+*/
+
+class NeuralNetwork {
+  constructor() {
+    // flags
+    this.isTrained = false;
+    this.isCompiled = false;
+    this.isLayered = false;
+    /**
+     * @type {tf.Sequential | null} - the TensorFlow model
+     */
+    this.model = null;
+
+    // methods
+    this.init = this.init.bind(this);
+    this.createModel = this.createModel.bind(this);
+    this.addLayer = this.addLayer.bind(this);
+    this.compile = this.compile.bind(this);
+    this.setOptimizerFunction = this.setOptimizerFunction.bind(this);
+    this.train = this.train.bind(this);
+    this.predict = this.predict.bind(this);
+    this.classify = this.classify.bind(this);
+    this.save = this.save.bind(this);
+    this.load = this.load.bind(this);
+
+    // initialize
+    this.init();
+  }
+
+  /**
+   * initialize with create model
+   */
+  init() {
+    this.createModel();
+  }
+
+  /**
+   * creates a sequential model
+   * uses switch/case for potential future where different formats are supported
+   * @param {*} _type
+   */
+  createModel(_type = "sequential") {
+    switch (_type.toLowerCase()) {
+      case "sequential":
+        this.model = tf.sequential();
+        return this.model;
+      default:
+        this.model = tf.sequential();
+        return this.model;
+    }
+  }
+
+  /**
+   * add layer to the model
+   * if the model has 2 or more layers switch the isLayered flag
+   * @param {tf.Layer} layer
+   * @void
+   */
+  addLayer(layer) {
+    this.model.add(layer);
+
+    // check if it has at least an input and output layer
+    if (this.model.layers.length >= 2) {
+      this.isLayered = true;
+    }
+  }
+
+  /**
+   * Compile the model
+   * if the model is compiled, set the isCompiled flag to true
+   * @param {*} _modelOptions
+   */
+  compile(_modelOptions) {
+    this.model.compile(_modelOptions);
+    this.isCompiled = true;
+  }
+
+  /**
+   * Set the optimizer function given the learning rate
+   * as a parameter
+   * @param {*} learningRate
+   * @param {*} optimizer
+   */
+  setOptimizerFunction(learningRate, optimizer) {
+    return optimizer.call(this, learningRate);
+  }
+
+  /**
+   * Train the model
+   * @param {Object} _options
+   */
+  async train(_options) {
+    const TRAINING_OPTIONS = _options;
+
+    const xs = TRAINING_OPTIONS.inputs;
+    const ys = TRAINING_OPTIONS.outputs;
+    console.log("train", xs, ys);
+    const { batchSize, epochs, shuffle, validationSplit, whileTraining } =
+      TRAINING_OPTIONS;
+
+    await this.model.fit(xs, ys, {
+      batchSize,
+      epochs,
+      shuffle,
+      validationSplit,
+      callbacks: whileTraining,
+    });
+
+    xs.dispose();
+    ys.dispose();
+
+    this.isTrained = true;
+  }
+
+  /**
+   * returns the prediction as an array synchronously
+   * @param {*} _inputs
+   */
+  predictSync(_inputs) {
+    const output = tf.tidy(() => {
+      return this.model.predict(_inputs);
+    });
+    const result = output.arraySync();
+
+    output.dispose();
+    _inputs.dispose();
+
+    return result;
+  }
+
+  /**
+   * returns the prediction as an array
+   * @param {*} _inputs
+   */
+  async predict(_inputs) {
+    const output = tf.tidy(() => {
+      return this.model.predict(_inputs);
+    });
+    const result = await output.array();
+
+    output.dispose();
+    _inputs.dispose();
+
+    return result;
+  }
+
+  /**
+   * classify is the same as .predict()
+   * @param {*} _inputs
+   */
+  async classify(_inputs) {
+    return this.predict(_inputs);
+  }
+
+  /**
+   * classify is the same as .predict()
+   * @param {*} _inputs
+   */
+  classifySync(_inputs) {
+    return this.predictSync(_inputs);
+  }
+
+  // predictMultiple
+  // classifyMultiple
+  // are the same as .predict()
+
+  /**
+   * save the model.json and the weights.bin files
+   * @param {string} modelName
+   * @return {Promise<void>}
+   */
+  async save(modelName = "model") {
+    await this.model.save(
+      tf.io.withSaveHandler(async (data) => {
+        this.weightsManifest = {
+          modelTopology: data.modelTopology,
+          weightsManifest: [
+            {
+              paths: [`./${modelName}.weights.bin`],
+              weights: data.weightSpecs,
+            },
+          ],
+        };
+        console.log("data.weightData", data.weightData);
+        await saveBlob(
+          data.weightData,
+          `${modelName}.weights.bin`,
+          "application/octet-stream"
+        );
+        console.log("this.weightsManifest", this.weightsManifest);
+        await saveBlob(
+          JSON.stringify(this.weightsManifest),
+          `${modelName}.json`,
+          "text/plain"
+        );
+      })
+    );
+  }
+
+  /**
+   * loads the model and weights
+   * @param {string | FileList | Object} filesOrPath
+   */
+  async load(filesOrPath) {
+    if (filesOrPath instanceof FileList) {
+      const files = Array.from(filesOrPath);
+      // find the correct files
+      const model = files.find(
+        (file) => file.name.includes(".json") && !file.name.includes("_meta")
+      );
+      const weights = files.find((file) => file.name.includes(".bin"));
+      // load the model
+      this.model = await tf.loadLayersModel(
+        tf.io.browserFiles([model, weights])
+      );
+    } else if (filesOrPath instanceof Object) {
+      this.model = await tf.loadLayersModel(
+        tf.io.http(filesOrPath.model, {
+          // Override the weights path from the JSON weightsManifest
+          weightUrlConverter: (weightFileName) => {
+            return filesOrPath.weights || weightFileName;
+          },
+        })
+      );
+    } else {
+      this.model = await tf.loadLayersModel(filesOrPath);
+    }
+
+    this.isCompiled = true;
+    this.isLayered = true;
+    this.isTrained = true;
+  }
+
+  /**
+   * dispose and release the memory for the model
+   */
+  dispose() {
+    this.model.dispose();
+  }
+}
+export default NeuralNetwork;
diff --git a/src/TimeSeries/timeSeriesData.js b/src/TimeSeries/timeSeriesData.js
new file mode 100644
index 00000000..ab5f10a8
--- /dev/null
+++ b/src/TimeSeries/timeSeriesData.js
@@ -0,0 +1,943 @@
+import * as tf from "@tensorflow/tfjs";
+import axios from "axios";
+import { saveBlob } from "../utils/io";
+import modelLoader from "../utils/modelLoader";
+import nnUtils from "../NeuralNetwork/NeuralNetworkUtils";
+
+import tsUtils from "./timeSeriesUtils";
+
+class NeuralNetworkData {
+  constructor() {
+    this.meta = {
+      inputUnits: null, // Number
+      outputUnits: null, // Number
+      // objects describing input/output data by property name
+      inputs: {}, // { name1: {dtype}, name2: {dtype}  }
+      outputs: {}, // { name1: {dtype} }
+      isNormalized: false, // Boolean - keep this in meta for model saving/loading
+    };
+
+    this.isMetadataReady = false;
+    this.isWarmedUp = false;
+
+    this.data = {
+      raw: [], // array of {xs:[{},{}], ys:{}}
+    };
+  }
+
+  /**
+   * ////////////////////////////////////////////////////////
+   * Add Data
+   * ////////////////////////////////////////////////////////
+   */
+
+  /**
+   * Add Data
+   * @param {object} xInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array
+   * @param {*} yInputObj, {key: value}, key must be the name of the property value must be a String, Number, or Array
+   * @void - updates this.data
+   */
+  addData(xInputObj, yInputObj) {
+    this.data.raw.push({
+      xs: xInputObj,
+      ys: yInputObj,
+    });
+  }
+
+  /**
+   * ////////////////////////////////////////////////////////
+   * Summarize Data
+   * ////////////////////////////////////////////////////////
+   */
+
+  /**
+   * create the metadata from the data
+   * this covers:
+   *  1. getting the datatype from the data
+   *  2. getting the min and max from the data
+   *  3. getting the oneHot encoded values
+   *  4. getting the inputShape and outputUnits from the data
+   * @param {Array<number>} [inputShape]
+   * @void
+   */
+  createMetadata(inputShape = null) {
+    // get the data type for each property
+    this.getDTypesFromSeriesData();
+    // get the stats - min, max
+    this.getDataStats();
+    // onehot encode
+    this.getDataOneHot();
+    // calculate the input units from the data
+    this.getDataUnits(inputShape);
+    // get the shape of batch
+
+    this.isMetadataReady = true;
+  }
+
+  /**
+   * getDTypesFromData
+   * gets the data types of the data we're using
+   * important for handling oneHot
+   * @private
+   * @void - updates this.meta
+   */
+  getDTypesFromSeriesData() {
+    const meta = {
+      ...this.meta,
+      inputs: {},
+      outputs: {},
+    };
+
+    const sample = this.data.raw[0];
+
+    //consistent dTypes have already been checked at add data
+    const xs = Object.keys(sample.xs[0]); //since time series data is in form of array
+    const ys = Object.keys(sample.ys);
+    xs.forEach((prop) => {
+      meta.inputs[prop] = {
+        dtype: nnUtils.getDataType(sample.xs[0][prop]),
+      };
+    });
+
+    ys.forEach((prop) => {
+      meta.outputs[prop] = {
+        dtype: nnUtils.getDataType(sample.ys[prop]),
+      };
+    });
+
+    this.meta = meta;
+  }
+
+  /**
+   * get stats about the data
+   * @private
+   * @void
+   */
+  getDataStats() {
+    this.meta.inputs = this.getInputMetaStats(this.meta.inputs, "xs");
+    this.meta.outputs = this.getInputMetaStats(this.meta.outputs, "ys");
+  }
+
+  /**
+   * get back the min and max of each label
+   * @private
+   * @param {Object} inputOrOutputMeta
+   * @param {"xs" | "ys"} xsOrYs
+   * @return {Object}
+   */
+  getInputMetaStats(inputOrOutputMeta, xsOrYs) {
+    const inputMeta = Object.assign({}, inputOrOutputMeta);
+
+    Object.keys(inputMeta).forEach((k) => {
+      if (inputMeta[k].dtype === "string") {
+        inputMeta[k].min = 0;
+        inputMeta[k].max = 1;
+      } else if (inputMeta[k].dtype === "number") {
+        let dataAsArray;
+        if (xsOrYs === "ys") {
+          dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]);
+        } else if (xsOrYs === "xs") {
+          dataAsArray = this.data.raw.flatMap((item) =>
+            item[xsOrYs].map((obj) => obj[k])
+          );
+        }
+        inputMeta[k].min = nnUtils.getMin(dataAsArray);
+        inputMeta[k].max = nnUtils.getMax(dataAsArray);
+      } else if (inputMeta[k].dtype === "array") {
+        const dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]).flat();
+        inputMeta[k].min = nnUtils.getMin(dataAsArray);
+        inputMeta[k].max = nnUtils.getMax(dataAsArray);
+      }
+    });
+
+    return inputMeta;
+  }
+
+  /**
+   * getDataOneHot
+   * creates onehot encodings for the input and outputs
+   * and adds them to the meta info
+   * @private
+   * @void
+   */
+  getDataOneHot() {
+    this.meta.inputs = this.getInputMetaOneHot(this.meta.inputs, "xs");
+    this.meta.outputs = this.getInputMetaOneHot(this.meta.outputs, "ys");
+  }
+
+  /**
+   * getOneHotMeta
+   * @param {Object} _inputsMeta
+   * @param {"xs" | "ys"} xsOrYs
+   * @return {Object}
+   */
+  getInputMetaOneHot(_inputsMeta, xsOrYs) {
+    const inputsMeta = Object.assign({}, _inputsMeta);
+
+    Object.entries(inputsMeta).forEach((arr) => {
+      // the key
+      const key = arr[0];
+      // the value
+      const { dtype } = arr[1];
+
+      if (dtype === "string") {
+        const uniqueVals = [
+          ...new Set(this.data.raw.map((obj) => obj[xsOrYs][key])),
+        ];
+        const oneHotMeta = this.createOneHotEncodings(uniqueVals);
+        inputsMeta[key] = {
+          ...inputsMeta[key],
+          ...oneHotMeta,
+        };
+      }
+    });
+    return inputsMeta;
+  }
+
+  /**
+   * get the data units, inputshape and output units
+   * @private
+   * @param {Array<number>} arrayShape
+   * @void
+   */
+  getDataUnits(arrayShape = null) {
+    // if the data has a shape pass it in
+    if (arrayShape) {
+      this.meta.inputUnits = arrayShape;
+    } else {
+      this.meta.inputUnits = [this.getInputMetaUnits(this.meta.inputs)].flat();
+    }
+
+    this.meta.outputUnits = this.getInputMetaUnits(this.meta.outputs);
+  }
+
+  /**
+   * @private
+   * @param {Object} inputsMeta
+   * @return {number | Array<number>}
+   */
+  // eslint-disable-next-line class-methods-use-this
+  getInputMetaUnits(inputsMeta) {
+    let units = 0;
+
+    Object.entries(inputsMeta).forEach((arr) => {
+      const { dtype } = arr[1];
+      if (dtype === "number") {
+        units += 1;
+      } else if (dtype === "string") {
+        const { uniqueValues } = arr[1];
+
+        const uniqueCount = uniqueValues.length;
+        units += uniqueCount;
+      } else if (dtype === "array") {
+        // TODO: User must input the shape of the
+        // image size correctly.
+        units = [];
+      }
+    });
+
+    return units;
+  }
+
+  /**
+   * Returns a legend mapping the
+   * data values to oneHot encoded values
+   * @private
+   * @param {Array<string>} _uniqueValuesArray
+   * @return {Object}
+   */
+  // eslint-disable-next-line class-methods-use-this, no-unused-vars
+  createOneHotEncodings(_uniqueValuesArray) {
+    return tf.tidy(() => {
+      const output = {
+        uniqueValues: _uniqueValuesArray,
+        legend: {},
+      };
+
+      const uniqueVals = _uniqueValuesArray; // [...new Set(this.data.raw.map(obj => obj.xs[prop]))]
+      // get back values from 0 to the length of the uniqueVals array
+      const onehotValues = uniqueVals.map((item, idx) => idx);
+      // oneHot encode the values in the 1d tensor
+      const oneHotEncodedValues = tf.oneHot(
+        tf.tensor1d(onehotValues, "int32"),
+        uniqueVals.length
+      );
+      // convert them from tensors back out to an array
+      const oneHotEncodedValuesArray = oneHotEncodedValues.arraySync();
+
+      // populate the legend with the key/values
+      uniqueVals.forEach((uVal, uIdx) => {
+        output.legend[uVal] = oneHotEncodedValuesArray[uIdx];
+      });
+
+      return output;
+    });
+  }
+
+  /**
+   * ////////////////////////////////////////////////////////
+   * Tensor handling
+   * ////////////////////////////////////////////////////////
+   */
+
+  /**
+   * convertRawToTensors
+   * converts array of {xs, ys} to tensors
+   * @param {*} dataRaw
+   *
+   * @return {{ inputs: tf.Tensor, outputs: tf.Tensor }}
+   */
+  // eslint-disable-next-line class-methods-use-this, no-unused-vars
+  convertRawToTensors(dataRaw) {
+    const meta = Object.assign({}, this.meta);
+    const dataLength = dataRaw.length;
+
+    return tf.tidy(() => {
+      const inputArr = [];
+      const outputArr = [];
+
+      dataRaw.forEach((row) => {
+        // get xs
+        // const xs = Object.keys(meta.inputs)
+        //   .map((k) => {
+        //     return row.xs[k];
+        //   })
+        //   .flat();
+
+        // inputArr.push(xs);
+
+        const xs = row.xs;
+        inputArr.push(xs);
+
+        // get ys
+        const ys = Object.keys(meta.outputs)
+          .map((k) => {
+            return row.ys[k];
+          })
+          .flat();
+
+        outputArr.push(ys);
+      });
+
+      // const inputs = tf.tensor(inputArr.flat(), [
+      //   dataLength,
+      //   ...meta.inputUnits,
+      // ]);
+      const inputs = tf.tensor(inputArr);
+
+      const outputs = tf.tensor(outputArr.flat(), [
+        dataLength,
+        meta.outputUnits,
+      ]);
+
+      return {
+        inputs,
+        outputs,
+      };
+    });
+  }
+
+  /**
+   * ////////////////////////////////////////////////////////
+   * data normalization / unnormalization
+   * ////////////////////////////////////////////////////////
+   */
+
+  /**
+   * normalize the dataRaw input
+   * @return {Array<object>}
+   */
+  normalizeDataRaw() {
+    const normXs = this.normalizeInputData(this.meta.inputs, "xs");
+    const normYs = this.normalizeInputData(this.meta.outputs, "ys");
+    const normalizedData = tsUtils.zipArraySequence(normXs, normYs);
+
+    return normalizedData;
+  }
+
+  /**
+   * @param {Object} inputOrOutputMeta
+   * @param {"xs" | "ys"} xsOrYs
+   * @return {Array<object>}
+   */
+  normalizeInputData(inputOrOutputMeta, xsOrYs) {
+    const dataRaw = this.data.raw;
+
+    // the data length
+    const dataLength = dataRaw.length;
+
+    // the copy of the inputs.meta[inputOrOutput]
+    const inputMeta = Object.assign({}, inputOrOutputMeta);
+
+    // normalized output object
+    const normalized = {};
+    Object.keys(inputMeta).forEach((k) => {
+      // get the min and max values
+      const options = {
+        min: inputMeta[k].min,
+        max: inputMeta[k].max,
+      };
+
+      // depending on the input type, normalize accordingly
+      if (inputMeta[k].dtype === "string") {
+        const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]);
+        options.legend = inputMeta[k].legend;
+        normalized[k] = this.normalizeArray(dataAsArray, options);
+      } else if (inputMeta[k].dtype === "number") {
+        let dataAsArray;
+        if (xsOrYs === "ys") {
+          dataAsArray = this.data.raw.map((item) => item[xsOrYs][k]);
+        } else if (xsOrYs === "xs") {
+          dataAsArray = this.data.raw.flatMap((item) =>
+            item[xsOrYs].map((obj) => obj[k])
+          );
+        }
+        normalized[k] = this.normalizeArray(dataAsArray, options);
+      } else if (inputMeta[k].dtype === "array") {
+        const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]);
+        normalized[k] = dataAsArray.map((item) =>
+          this.normalizeArray(item, options)
+        );
+      }
+    });
+
+    let output;
+    if (xsOrYs == "ys") {
+      output = [...new Array(dataLength).fill(null)].map((item, idx) => {
+        const row = {
+          [xsOrYs]: {},
+        };
+
+        Object.keys(inputMeta).forEach((k) => {
+          row[xsOrYs][k] = normalized[k][idx];
+        });
+
+        return row;
+      });
+    } else if (xsOrYs == "xs") {
+      // reshape array - already ready for tensorconversion
+      const features = Object.keys(inputMeta);
+      const feature_length = features.length;
+
+      const seriesStep = dataRaw[0]["xs"].length;
+
+      const batch = normalized[features[0]].length / seriesStep;
+
+      this.meta.seriesShape = [seriesStep, feature_length];
+      console.log("series shape", this.meta.seriesShape);
+      let zipped = [];
+
+      // zip arrays before reshaping
+      for (let idx = 0; idx < seriesStep * feature_length * batch; idx++) {
+        features.forEach((k) => {
+          zipped.push(normalized[k][idx]);
+        });
+      }
+
+      // reshaping
+      output = tsUtils.reshapeTo3DArray(zipped, [
+        batch,
+        seriesStep,
+        feature_length,
+      ]);
+    }
+
+    console.log("thismeta", this.meta);
+    return output;
+  }
+
+  /**
+   * normalizeArray
+   * @param {*} _input
+   * @param {*} _options
+   */
+  // eslint-disable-next-line no-unused-vars, class-methods-use-this
+  normalizeArray(inputArray, options) {
+    const { min, max } = options;
+
+    // if the data are onehot encoded, replace the string
+    // value with the onehot array
+    // if none exists, return the given value
+    if (options.legend) {
+      const normalized = inputArray.map((v) => {
+        return options.legend[v] ? options.legend[v] : v;
+      });
+      return normalized;
+    }
+
+    // if the dtype is a number
+    if (inputArray.every((v) => typeof v === "number")) {
+      const normalized = inputArray.map((v) =>
+        nnUtils.normalizeValue(v, min, max)
+      );
+      return normalized;
+    }
+
+    // otherwise return the input array
+    // return inputArray;
+    throw new Error("error in inputArray of normalizeArray() function");
+  }
+
+  normalizePredictData(dataRaw, inputOrOutputMeta) {
+    const inputMeta = Object.assign({}, inputOrOutputMeta);
+    const xsOrYs = "xs";
+    const predict_normalized = {};
+    Object.keys(inputMeta).forEach((k) => {
+      // get the min and max values
+      const options = {
+        min: inputMeta[k].min,
+        max: inputMeta[k].max,
+      };
+      if (inputMeta[k].dtype === "string") {
+        const dataAsArray = dataRaw.map((item) => item[xsOrYs][k]);
+        options.legend = inputMeta[k].legend;
+        predict_normalized[k] = this.normalizeArray(dataAsArray, options);
+      } else if (inputMeta[k].dtype === "number") {
+        const dataAsArray = Array(dataRaw).flatMap((item) =>
+          item.map((obj) => obj[k])
+        );
+        console.log(dataAsArray);
+        predict_normalized[k] = this.normalizeArray(dataAsArray, options);
+      }
+    });
+
+    console.log("done", predict_normalized);
+
+    const features = Object.keys(inputMeta);
+    const feature_length = features.length;
+
+    const seriesStep = dataRaw.length;
+
+    const batch = 1;
+    let zipped = [];
+
+    // zip arrays before reshaping
+    for (let idx = 0; idx < seriesStep * feature_length * batch; idx++) {
+      features.forEach((k) => {
+        zipped.push(predict_normalized[k][idx]);
+      });
+    }
+    // reshaping
+    const output = tsUtils.reshapeTo3DArray(zipped, [
+      batch,
+      seriesStep,
+      feature_length,
+    ]);
+    return output;
+  }
+
+  /**
+   * unNormalizeArray
+   * @param {*} _input
+   * @param {*} _options
+   */
+  // eslint-disable-next-line no-unused-vars, class-methods-use-this
+  // unnormalizeArray(inputArray, options) {
+  //   const { min, max } = options;
+
+  //   // if the data is onehot encoded then remap the
+  //   // values from those oneHot arrays
+  //   if (options.legend) {
+  //     const unnormalized = inputArray.map((v) => {
+  //       let res;
+  //       Object.entries(options.legend).forEach((item) => {
+  //         const key = item[0];
+  //         const val = item[1];
+  //         const matches = v
+  //           .map((num, idx) => num === val[idx])
+  //           .every((truthy) => truthy === true);
+  //         if (matches) res = key;
+  //       });
+  //       return res;
+  //     });
+
+  //     return unnormalized;
+  //   }
+
+  //   // if the dtype is a number
+  //   if (inputArray.every((v) => typeof v === "number")) {
+  //     const unnormalized = inputArray.map((v) =>
+  //       nnUtils.unnormalizeValue(v, min, max)
+  //     );
+  //     return unnormalized;
+  //   }
+
+  //   // otherwise return the input array
+  //   // return inputArray;
+  //   throw new Error("error in inputArray of normalizeArray() function");
+  // }
+
+  /*
+   * ////////////////////////////////////////////////
+   * One hot encoding handling
+   * ////////////////////////////////////////////////
+   */
+
+  /**
+   * applyOneHotEncodingsToDataRaw
+   * does not set this.data.raws
+   * but rather returns them
+   */
+  applyOneHotEncodingsToDataRaw() {
+    const meta = Object.assign({}, this.meta);
+
+    const output = this.data.raw.map((row) => {
+      const xs = {
+        ...row.xs,
+      };
+      const ys = {
+        ...row.ys,
+      };
+
+      // get xs
+      Object.keys(meta.inputs).forEach((k) => {
+        if (meta.inputs[k].legend) {
+          xs[k] = meta.inputs[k].legend[row.xs[k]];
+        }
+      });
+
+      Object.keys(meta.outputs).forEach((k) => {
+        if (meta.outputs[k].legend) {
+          ys[k] = meta.outputs[k].legend[row.ys[k]];
+        }
+      });
+
+      return {
+        xs,
+        ys,
+      };
+    });
+    return output;
+  }
+
+  /**
+   * ////////////////////////////////////////////////
+   * saving / loading data
+   * ////////////////////////////////////////////////
+   */
+
+  /**
+   * Loads data from a URL using the appropriate function
+   * @param {*} dataUrl
+   * @param {*} inputs
+   * @param {*} outputs
+   * @void
+   */
+  async loadDataFromUrl(dataUrl, inputs, outputs) {
+    try {
+      if (dataUrl.endsWith(".csv")) {
+        await this.loadCSV(dataUrl, inputs, outputs);
+      } else if (dataUrl.endsWith(".json")) {
+        await this.loadJSON(dataUrl, inputs, outputs);
+      } else if (dataUrl.includes("blob")) {
+        await this.loadBlob(dataUrl, inputs, outputs);
+      } else {
+        throw new Error("Not a valid data format. Must be csv or json");
+      }
+    } catch (error) {
+      console.error(error);
+      throw new Error(error);
+    }
+  }
+
+  /**
+   * loadJSON
+   * @param {*} dataUrlOrJson
+   * @param {*} inputLabels
+   * @param {*} outputLabels
+   * @void
+   */
+  async loadJSON(dataUrlOrJson, inputLabels, outputLabels) {
+    try {
+      let json;
+      // handle loading parsedJson
+      if (dataUrlOrJson instanceof Object) {
+        json = Object.assign({}, dataUrlOrJson);
+      } else {
+        const { data } = await axios.get(dataUrlOrJson);
+        json = data;
+      }
+
+      // format the data.raw array
+      // this.formatRawData(json, inputLabels, outputLabels);
+      return this.findEntries(json);
+    } catch (err) {
+      console.error("error loading json");
+      throw new Error(err);
+    }
+  }
+
+  /**
+   * loadCSV
+   * @param {*} dataUrl
+   * @param {*} inputLabels
+   * @param {*} outputLabels
+   * @void
+   */
+  async loadCSV(dataUrl, inputLabels, outputLabels) {
+    try {
+      const myCsv = tf.data.csv(dataUrl);
+      const loadedData = await myCsv.toArray();
+      const json = {
+        entries: loadedData,
+      };
+      // format the data.raw array
+      // this.formatRawData(json, inputLabels, outputLabels);
+      return this.findEntries(json);
+    } catch (err) {
+      console.error("error loading csv", err);
+      throw new Error(err);
+    }
+  }
+
+  /**
+   * loadBlob
+   * @param {*} dataUrlOrJson
+   * @param {*} inputLabels
+   * @param {*} outputLabels
+   * @void
+   */
+  async loadBlob(dataUrlOrJson, inputLabels, outputLabels) {
+    try {
+      const { data } = await axios.get(dataUrlOrJson);
+      const text = data; // await data.text();
+
+      if (nnUtils.isJsonOrString(text)) {
+        const json = JSON.parse(text);
+        await this.loadJSON(json, inputLabels, outputLabels);
+      } else {
+        const json = this.csvToJSON(text);
+        await this.loadJSON(json, inputLabels, outputLabels);
+      }
+    } catch (err) {
+      console.log("mmm might be passing in a string or something!", err);
+      throw new Error(err);
+    }
+  }
+
+  /**
+   * loadData from fileinput or path
+   * @param {string | FileList | Object} filesOrPath
+   * @return {Promise<void>}
+   */
+  async loadData(filesOrPath) {
+    try {
+      let loadedData;
+
+      if (typeof filesOrPath !== "string") {
+        const file = filesOrPath[0];
+        const fr = new FileReader();
+        fr.readAsText(file);
+        if (file.name.includes(".json")) {
+          const temp = await file.text();
+          loadedData = JSON.parse(temp);
+        } else {
+          console.log(
+            'data must be a json object containing an array called "data" or "entries'
+          );
+        }
+      } else {
+        loadedData = await axios.get(filesOrPath, { responseType: "text" });
+        const text = JSON.stringify(loadedData.data);
+        if (nnUtils.isJsonOrString(text)) {
+          loadedData = JSON.parse(text);
+          console.log(loadedData);
+        } else {
+          console.log(
+            "Whoops! something went wrong. Either this kind of data is not supported yet or there is an issue with .loadData"
+          );
+        }
+      }
+
+      this.data.raw = this.findEntries(loadedData);
+
+      // check if a data or entries property exists
+      if (!this.data.raw.length > 0) {
+        console.log(
+          'data must be a json object containing an array called "data" '
+        );
+      }
+    } catch (error) {
+      throw new Error(error);
+    }
+  }
+
+  /**
+   * saveData
+   * @param {string} [name]
+   * @return {Promise<void>}
+   */
+  async saveData(name) {
+    const today = new Date();
+    const date = `${String(today.getFullYear())}-${String(
+      today.getMonth() + 1
+    )}-${String(today.getDate())}`;
+    const time = `${String(today.getHours())}-${String(
+      today.getMinutes()
+    )}-${String(today.getSeconds())}`;
+    const datetime = `${date}_${time}`;
+
+    let dataName = datetime;
+    if (name) dataName = name;
+
+    const output = {
+      data: this.data.raw,
+    };
+
+    await saveBlob(JSON.stringify(output), `${dataName}.json`, "text/plain");
+  }
+
+  /**
+   * Saves metadata of the data
+   * @param {string} modelName
+   * @return {Promise<void>}
+   */
+  async saveMeta(modelName = "model") {
+    console.log("meta saved");
+    await saveBlob(
+      JSON.stringify(this.meta),
+      `${modelName}_meta.json`,
+      "text/plain"
+    );
+  }
+
+  /**
+   * load a model and metadata
+   * @param {string | FileList | Object} filesOrPath
+   * @return {Promise<void>}
+   */
+  async loadMeta(filesOrPath) {
+    if (filesOrPath instanceof FileList) {
+      const file = Array.from(filesOrPath).find((file) =>
+        file.name.includes("_meta.json")
+      );
+      if (!file) {
+        console.warn("no model_meta.json file found in FileList");
+        return;
+      }
+      const text = await file.text();
+      this.meta = JSON.parse(text);
+    } else if (filesOrPath instanceof Object) {
+      // filesOrPath = {model: URL, metadata: URL, weights: URL}
+      const metadataResult = await axios.get(filesOrPath.metadata);
+      this.meta = metadataResult.data;
+    } else {
+      const loader = modelLoader(filesOrPath);
+      // TODO: it is not always "model_meta.json", it is "{model_name}_meta.json"
+      const metaPath = loader.getPath("model_meta.json");
+      // TODO: figure out how to use loader.loadMetadataJson() which has wrapped error messages
+      const metadataResult = await axios.get(metaPath);
+      this.meta = metadataResult.data;
+    }
+
+    this.isMetadataReady = true;
+    this.isWarmedUp = true;
+  }
+
+  /*
+   * ////////////////////////////////////////////////
+   * data loading helpers
+   * ////////////////////////////////////////////////
+   */
+
+  /**
+   * // TODO: convert ys into strings, if the task is classification
+    // if (this.config.architecture.task === "classification" && typeof output.ys[prop] !== "string") {
+    //   output.ys[prop] += "";
+    // }
+   * formatRawData
+   * takes a json and set the this.data.raw
+   * @param {*} json 
+   * @param {Array<string>} inputLabels
+   * @param {Array<string>} outputLabels
+   * @void
+   */
+  // formatRawData(json, inputLabels, outputLabels) {
+
+  //   // Recurse through the json object to find
+  //   // an array containing `entries` or `data`
+  //   const dataArray = this.findEntries(json);
+
+  //   if (!dataArray.length > 0) {
+  //     console.log(`your data must be contained in an array in \n
+  //       a property called 'entries' or 'data' of your json object`);
+  //   }
+
+  //   ////////////
+
+  //   // set this.data.raw
+  //   this.data.raw = result;
+  // }
+
+  /**
+   * csvToJSON
+   * Creates a csv from a string
+   * @param {*} csv
+   */
+  // via: http://techslides.com/convert-csv-to-json-in-javascript
+  // eslint-disable-next-line class-methods-use-this
+  csvToJSON(csv) {
+    // split the string by linebreak
+    const lines = csv.split("\n");
+    const result = [];
+    // get the header row as an array
+    const headers = lines[0].split(",");
+
+    // iterate through every row
+    for (let i = 1; i < lines.length; i += 1) {
+      // create a json object for each row
+      const row = {};
+      // split the current line into an array
+      const currentline = lines[i].split(",");
+
+      // for each header, create a key/value pair
+      headers.forEach((k, idx) => {
+        row[k] = currentline[idx];
+      });
+      // add this to the result array
+      result.push(row);
+    }
+
+    return {
+      entries: result,
+    };
+  }
+
+  /**
+   * findEntries
+   * recursively attempt to find the entries
+   * or data array for the given json object
+   * @param {*} _data
+   */
+  findEntries(_data) {
+    const parentCopy = Object.assign({}, _data);
+
+    if (parentCopy.entries && parentCopy.entries instanceof Array) {
+      return parentCopy.entries;
+    } else if (parentCopy.data && parentCopy.data instanceof Array) {
+      return parentCopy.data;
+    }
+
+    const keys = Object.keys(parentCopy);
+    // eslint-disable-next-line consistent-return
+    keys.forEach((k) => {
+      if (typeof parentCopy[k] === "object") {
+        return this.findEntries(parentCopy[k]);
+      }
+    });
+
+    return parentCopy;
+  }
+
+  /**
+   * getData
+   * return data object's raw array
+   * to make getting raw data easier
+   */
+  getData() {
+    const rawArray = this.data.raw;
+    return rawArray;
+  }
+}
+
+export default NeuralNetworkData;
diff --git a/src/TimeSeries/timeSeriesUtils.js b/src/TimeSeries/timeSeriesUtils.js
new file mode 100644
index 00000000..5c980e58
--- /dev/null
+++ b/src/TimeSeries/timeSeriesUtils.js
@@ -0,0 +1,398 @@
+import { data, input } from "@tensorflow/tfjs";
+import nnUtils from "../NeuralNetwork/NeuralNetworkUtils";
+
+class TimeSeriesUtils {
+  constructor(options) {
+    this.options = options || {};
+  }
+
+  /* adding data: can only accept the following formats:
+     - for xInputs:
+      1. Sequence of objects (array of objects) 
+        [{x: , y: },{x: , y: },{x: , y: },{x: , y: }]
+      2. Sequence of arrays (array of array, order matters)
+        [[],[],[],[]]
+      3. Sequence of values (shape should be provided by user)
+        [[,,,,,]] e.g. shape = {steps: 4, values: 2} will become [{x: , y: },{x: , y: },{x: , y: },{x: , y: }]
+  */
+
+  verifyAndFormatInputs(xInputs, options = null, classOptions) {
+    const dataFormat = this.checkInputStructure(xInputs, options);
+    return this.formatInputsToObjects(
+      xInputs,
+      options,
+      classOptions,
+      dataFormat
+    );
+  }
+
+  checkInputStructure(xInputs, options = null) {
+    if (!Array.isArray(xInputs)) {
+      throw new error("Syntax Error: Data Should be in an Array");
+    }
+    let isObjects = true;
+    let isArrays = true;
+    let isValues = true;
+
+    for (let i = 0; i < xInputs.length; i++) {
+      if (nnUtils.getDataType(xInputs[i]) === "object") {
+        console.log("here");
+        isArrays = false;
+        isValues = false;
+        if (i > 0) {
+          if (
+            Object.keys(xInputs[i - 1]).length !==
+              Object.keys(xInputs[i]).length ||
+            nnUtils.getDataType(xInputs[i - 1]) === "object"
+          ) {
+            throw new error("Data format is inconsistent");
+          }
+        }
+      } else if (Array.isArray(xInputs[i])) {
+        console.log("here2");
+        isObjects = false;
+        isValues = false;
+        if (i > 0) {
+          if (
+            xInputs[i - 1].length !== xInputs[i].length ||
+            !Array.isArray(xInputs[i - 1])
+          ) {
+            throw new error("Data format is inconsistent");
+          }
+        }
+      } else {
+        if (options.inputLabels) {
+          isObjects = false;
+          isArrays = false;
+        } else {
+          throw new error("inputLabels is needed for 1D array inputs");
+        }
+      }
+
+      if (isObjects) {
+        return "ObjectSequence";
+      } else if (isArrays) {
+        return "ArraySequence";
+      } else if (isValues) {
+        return "ValueSequence";
+      } else {
+        throw new error("Syntax Error: Input Structure is unknown");
+      }
+    }
+  }
+
+  formatInputsToObjects(xInputs, options = null, classOptions, dataFormat) {
+    switch (dataFormat) {
+      case "ObjectSequence":
+        return xInputs;
+      case "ArraySequence":
+        return this.convertArraySequence(xInputs, options, classOptions);
+      case "ValueSequence":
+        return this.convertValueSequence(xInputs, options);
+      default:
+        throw new error("Input Data Structure is unknown");
+    }
+  }
+
+  convertArraySequence(xInputs, options = null, classOptions) {
+    let label = "";
+
+    if (options !== null) {
+      if (options.inputLabels) {
+        label = options.inputLabels;
+        console.log("here1");
+      }
+    } else if (classOptions !== null) {
+      if (classOptions.inputs) {
+        label = classOptions.inputs;
+      }
+    }
+
+    if (
+      (typeof label === "string" && label === "") ||
+      (Array.isArray(label) && label.length === 0)
+    ) {
+      label = this.getLabelFromNestedArray(xInputs);
+    }
+
+    return xInputs.map((input) => {
+      const obj = {};
+      input.forEach((value, ind) => {
+        obj[label[ind]] = value;
+      });
+      return obj;
+    });
+  }
+
+  convertValueSequence(xInputs, options = null) {
+    const { inputLabels } = options;
+    if (xInputs.length % inputLabels.length !== 0) {
+      throw new error(
+        "Invalid Input: Number of Labels don't match amount of values"
+      );
+    }
+    return xInputs.reduce((acc, _, index, array) => {
+      if (index % inputLabels.length === 0) {
+        // Create a new object for the current set of values
+        const obj = {};
+        for (let i = 0; i < inputLabels.length; i++) {
+          obj[inputLabels[i]] = array[index + i];
+        }
+        acc.push(obj);
+      }
+      return acc;
+    }, []);
+  }
+
+  verifyAndFormatOutputs(yInputs, options = null, classOptions) {
+    const { outputs } = classOptions;
+
+    let outputLabels;
+
+    if (options !== null) {
+      if (options.outputLabels) {
+        outputLabels = options.outputLabels;
+      }
+    }
+
+    if (outputs.length > 0) {
+      if (outputs.every((item) => typeof item === "string")) {
+        outputLabels = outputs;
+      }
+    } else if (typeof yInputs === "object") {
+      outputLabels = Object.keys(yInputs);
+    } else {
+      outputLabels = nnUtils.createLabelsFromArrayValues(yInputs, "output");
+    }
+
+    // Make sure that the inputLabels and outputLabels are arrays
+    if (!(outputLabels instanceof Array)) {
+      throw new Error("outputLabels must be an array");
+    }
+
+    return nnUtils.formatDataAsObject(yInputs, outputLabels);
+  }
+
+  prepareLabels(xInputs, yInputs, options = null, classOptions) {
+    const { inputs, outputs } = this.options;
+
+    let inputLabels;
+    let outputLabels;
+
+    // options-based values to assign
+    if (options !== null) {
+      ({ inputLabels, outputLabels } = options);
+    } else if (inputs.length > 0 && outputs.length > 0) {
+      if (inputs.every((item) => typeof item === "string")) {
+        inputLabels = inputs;
+      }
+      if (outputs.every((item) => typeof item === "string")) {
+        outputLabels = outputs;
+      }
+
+      // input-based values to assign
+    } else {
+      inputLabels = this.getLabelFromNestedArray(xInputs);
+      if (typeof yInputs === "object") {
+        outputLabels = Object.keys(yInputs);
+      } else {
+        inputLabels = this.getLabelFromNestedArray(yInputs);
+      }
+    }
+
+    // Make sure that the inputLabels and outputLabels are arrays
+    if (!(inputLabels instanceof Array)) {
+      throw new Error("inputLabels must be an array");
+    }
+    if (!(outputLabels instanceof Array)) {
+      throw new Error("outputLabels must be an array");
+    }
+
+    return inputLabels, outputLabels;
+  }
+
+  getLabelFromNestedArray(xInputs, prefix = "label") {
+    // Recursive function to find the deepest level of the array
+    function traverseArray(array) {
+      if (
+        array.length > 0 &&
+        (typeof array[0] === "string" || typeof array[0] === "number")
+      ) {
+        return array.map((_, index) => `${prefix}_${index}`);
+      } else {
+        for (const item of array) {
+          if (Array.isArray(item)) {
+            const result = traverseArray(item);
+            if (result) return result;
+          }
+        }
+      }
+      return null;
+    }
+
+    if (Array.isArray(xInputs)) {
+      return traverseArray(xInputs);
+    } else {
+      throw new Error("Input data must be an array.");
+    }
+  }
+
+  // normalize utilities
+  reshapeTo3DArray(data, shape) {
+    const [batch, timeStep, feature] = shape;
+    let result = [];
+    let index = 0;
+
+    for (let i = 0; i < batch; i++) {
+      let batchArray = [];
+      for (let j = 0; j < timeStep; j++) {
+        let timeStepArray = [];
+        for (let k = 0; k < feature; k++) {
+          timeStepArray.push(data[index]);
+          index++;
+        }
+        batchArray.push(timeStepArray);
+      }
+      result.push(batchArray);
+    }
+
+    return result;
+  }
+
+  zipArraySequence(arr1, arr2) {
+    if (arr1.length !== arr2.length) {
+      console.error("arrays do not have the same length");
+      return [];
+    }
+
+    return arr1.map((xs, idx) => {
+      const ys = arr2[idx].ys; // Extract the inner `ys` object
+      return {
+        xs: xs,
+        ys: ys,
+      };
+    });
+  }
+
+  // point simplification utilities - Ramer-Douglas-Peucker (RDP) algorithm
+  padCoordinates(allPoints, targetPointCount, maxEpsilon = 50) {
+    const rdpPoints = [];
+
+    const epsilon = this.findEpsilonForPointCount(
+      allPoints,
+      targetPointCount,
+      maxEpsilon
+    );
+
+    const total = allPoints.length;
+    const start = allPoints[0];
+    const end = allPoints[total - 1];
+    rdpPoints.push(start);
+    this.rdp(0, total - 1, allPoints, rdpPoints, epsilon);
+    rdpPoints.push(end);
+
+    if (rdpPoints.length > targetPointCount) {
+      return rdpPoints.slice(0, targetPointCount);
+    } else if (rdpPoints.length < targetPointCount) {
+      const filler = new Array(targetPointCount - rdpPoints.length).fill(
+        rdpPoints[rdpPoints.length - 1]
+      );
+
+      rdpPoints.push(...filler);
+      return rdpPoints;
+    }
+
+    return rdpPoints;
+  }
+
+  findEpsilonForPointCount(points, targetCount, maxEpsilon) {
+    let low = 0;
+    let high = maxEpsilon;
+    let mid;
+    let simplifiedPointsCount = 0;
+
+    while (high - low > 0.001) {
+      // Tolerance for approximation
+      mid = (low + high) / 2;
+      simplifiedPointsCount = this.getSimplifiedPointCount(points, mid);
+      if (simplifiedPointsCount > targetCount) {
+        low = mid;
+      } else {
+        high = mid;
+      }
+    }
+
+    return mid;
+  }
+
+  getSimplifiedPointCount(points, epsilon) {
+    const rdpPoints = [];
+    const total = points.length;
+    const start = points[0];
+    const end = points[total - 1];
+    rdpPoints.push(start);
+    this.rdp(0, total - 1, points, rdpPoints, epsilon);
+    rdpPoints.push(end);
+    return rdpPoints.length;
+  }
+
+  rdp(startIndex, endIndex, allPoints, rdpPoints, epsilon) {
+    const nextIndex = this.findFurthest(
+      allPoints,
+      startIndex,
+      endIndex,
+      epsilon
+    );
+    if (nextIndex > 0) {
+      if (startIndex != nextIndex) {
+        this.rdp(startIndex, nextIndex, allPoints, rdpPoints, epsilon);
+      }
+      rdpPoints.push(allPoints[nextIndex]);
+      if (endIndex != nextIndex) {
+        this.rdp(nextIndex, endIndex, allPoints, rdpPoints, epsilon);
+      }
+    }
+  }
+
+  findFurthest(points, a, b, epsilon) {
+    let recordDistance = -1;
+    const start = points[a];
+    const end = points[b];
+    let furthestIndex = -1;
+    for (let i = a + 1; i < b; i++) {
+      const currentPoint = points[i];
+      const d = this.lineDist(currentPoint, start, end);
+      if (d > recordDistance) {
+        recordDistance = d;
+        furthestIndex = i;
+      }
+    }
+    if (recordDistance > epsilon) {
+      return furthestIndex;
+    } else {
+      return -1;
+    }
+  }
+
+  lineDist(c, a, b) {
+    const norm = this.scalarProjection(c, a, b);
+    return dist(c.x, c.y, norm.x, norm.y);
+  }
+
+  scalarProjection(p, a, b) {
+    const ap = { x: p.x - a.x, y: p.y - a.y };
+    const ab = { x: b.x - a.x, y: b.y - a.y };
+    const abMag = Math.sqrt(ab.x * ab.x + ab.y * ab.y);
+    ab.x /= abMag;
+    ab.y /= abMag;
+    const dot = ap.x * ab.x + ap.y * ab.y;
+    return { x: a.x + ab.x * dot, y: a.y + ab.y * dot };
+  }
+}
+
+const timeSeriesUtils = () => {
+  const instance = new TimeSeriesUtils();
+  return instance;
+};
+
+export default timeSeriesUtils();
diff --git a/src/index.js b/src/index.js
index 09416c1c..ddf7375c 100644
--- a/src/index.js
+++ b/src/index.js
@@ -7,6 +7,7 @@ import imageClassifier from "./ImageClassifier";
 import soundClassifier from "./SoundClassifier";
 import setBackend from "./utils/setBackend";
 import bodySegmentation from "./BodySegmentation";
+import timeSeries from "./TimeSeries";
 import communityStatement from "./utils/communityStatement";
 import * as tf from "@tensorflow/tfjs";
 import * as tfvis from "@tensorflow/tfjs-vis";
@@ -22,6 +23,7 @@ const withPreload = {
   neuralNetwork,
   sentiment,
   soundClassifier,
+  timeSeries,
 };
 
 const ml5 = Object.assign({ p5Utils }, withPreload, {
diff --git a/webpack.config.js b/webpack.config.js
index ff90e03d..e9bab17b 100644
--- a/webpack.config.js
+++ b/webpack.config.js
@@ -48,9 +48,9 @@ const developmentConfig = {
   resolve: {
     fallback: {
       fs: false,
-      util: false
+      util: false,
     },
-  }
+  },
 };
 
 const productionConfig = {
@@ -77,9 +77,9 @@ const productionConfig = {
   resolve: {
     fallback: {
       fs: false,
-      util: false
+      util: false,
     },
-  }
+  },
 };
 
 module.exports = function (env, args) {