From 0634c4f25d830649d3ef24ac5c30322f82e5278d Mon Sep 17 00:00:00 2001 From: Angel Antonio Avalos Cisneros Date: Mon, 10 Jun 2024 14:32:21 -0700 Subject: [PATCH] Project import generated by Copybara. (#104) GitOrigin-RevId: a6c37eeb0694bccabb7e846f9ae08441d3fd437f Co-authored-by: Snowflake Authors --- CHANGELOG.md | 12 + ci/conda_recipe/meta.yaml | 2 +- codegen/sklearn_wrapper_template.py_template | 9 +- snowflake/cortex/_sentiment.py | 11 +- snowflake/cortex/sentiment_test.py | 21 +- .../ml/_internal/utils/temp_file_utils.py | 7 +- .../Dataset_Basic_Demo.ipynb | 195 +- snowflake/ml/feature_store/access_manager.py | 64 +- snowflake/ml/feature_store/feature_store.py | 2 +- snowflake/ml/feature_store/feature_view.py | 23 +- .../Basic_Feature_Store_Demo.ipynb | 658 ------- .../Time_Series_Feature_Demo.ipynb | 1070 ----------- .../internal_demo/winequality-red.csv | 1600 ----------------- snowflake/ml/fileset/BUILD.bazel | 1 - snowflake/ml/fileset/snowfs.py | 33 +- snowflake/ml/model/_client/ops/model_ops.py | 43 + .../ml/model/_client/ops/model_ops_test.py | 56 + .../ml/model/_client/sql/model_version.py | 54 +- .../model/_client/sql/model_version_test.py | 75 +- .../model/_model_composer/model_composer.py | 8 +- .../model_manifest/BUILD.bazel | 1 - .../model_manifest/model_manifest_test.py | 347 ++-- .../model/_packager/model_meta/model_meta.py | 4 +- .../model/_packager/model_runtime/BUILD.bazel | 1 - .../_packager/model_runtime/model_runtime.py | 30 +- .../model_runtime/model_runtime_test.py | 332 ++-- .../snowpark_implementations/BUILD.bazel | 6 + .../distributed_hpo_trainer.py | 197 +- .../distributed_search_udf_file.py | 159 ++ .../ml/modeling/impute/simple_imputer.py | 23 +- snowflake/ml/modeling/parameters/BUILD.bazel | 1 + snowflake/ml/registry/_manager/BUILD.bazel | 1 - .../ml/registry/_manager/model_manager.py | 60 +- .../registry/_manager/model_manager_test.py | 24 + snowflake/ml/registry/registry.py | 11 +- snowflake/ml/registry/registry_test.py | 25 + snowflake/ml/test_utils/mock_data_frame.py | 25 + .../ml/test_utils/mock_data_frame_test.py | 20 + snowflake/ml/utils/BUILD.bazel | 1 + snowflake/ml/version.bzl | 2 +- .../pipeline_with_ohe_and_xgbr_test.py | 51 + .../feature_store_access_test.py | 26 +- .../feature_store_object_test.py | 5 +- .../ml/feature_store/feature_store_test.py | 28 + .../ml/modeling/impute/simple_imputer_test.py | 225 ++- .../snowflake/ml/registry/model/BUILD.bazel | 13 +- .../model/fully_qualified_name_test.py | 2 +- .../ml/registry/model/multiple_model_test.py | 2 +- .../model/registry_catboost_model_test.py | 18 +- .../model/registry_custom_model_test.py | 106 +- ...egistry_huggingface_pipeline_model_test.py | 80 +- .../registry/model/registry_in_sproc_test.py | 111 ++ .../model/registry_lightgbm_model_test.py | 34 +- .../model/registry_mlflow_model_test.py | 14 +- .../model/registry_model_test_base.py | 88 +- .../model/registry_modeling_model_test.py | 112 +- .../model/registry_pytorch_model_test.py | 42 +- ...gistry_sentence_transformers_model_test.py | 16 +- .../model/registry_sklearn_model_test.py | 20 +- .../model/registry_tensorflow_model_test.py | 42 +- .../model/registry_xgboost_model_test.py | 30 +- .../ml/test_utils/common_test_base.py | 46 +- 62 files changed, 2012 insertions(+), 4313 deletions(-) rename snowflake/ml/dataset/notebooks/{internal_demo => }/Dataset_Basic_Demo.ipynb (74%) delete mode 100644 snowflake/ml/feature_store/notebooks/internal_demo/Basic_Feature_Store_Demo.ipynb delete mode 100644 snowflake/ml/feature_store/notebooks/internal_demo/Time_Series_Feature_Demo.ipynb delete mode 100644 snowflake/ml/feature_store/notebooks/internal_demo/winequality-red.csv create mode 100644 snowflake/ml/modeling/_internal/snowpark_implementations/distributed_search_udf_file.py create mode 100644 tests/integ/snowflake/ml/registry/model/registry_in_sproc_test.py diff --git a/CHANGELOG.md b/CHANGELOG.md index e58ed2b1..bb29f0f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Release History +## 1.5.2 + +### Bug Fixes + +- Registry: Fix an issue that leads to unable to log model in store procedure. +- Modeling: Quick fix `import snowflake.ml.modeling.parameters.enable_anonymous_sproc` cannot be imported due to package + dependency error. + +### Behavior Changes + +### New Features + ## 1.5.1 ### Bug Fixes diff --git a/ci/conda_recipe/meta.yaml b/ci/conda_recipe/meta.yaml index 4e928a5a..bd2ce910 100644 --- a/ci/conda_recipe/meta.yaml +++ b/ci/conda_recipe/meta.yaml @@ -17,7 +17,7 @@ build: noarch: python package: name: snowflake-ml-python - version: 1.5.1 + version: 1.5.2 requirements: build: - python diff --git a/codegen/sklearn_wrapper_template.py_template b/codegen/sklearn_wrapper_template.py_template index f541baa0..f48ae884 100644 --- a/codegen/sklearn_wrapper_template.py_template +++ b/codegen/sklearn_wrapper_template.py_template @@ -442,7 +442,14 @@ class {transform.original_class_name}(BaseTransformer): ) -> List[str]: # in case the inferred output column names dimension is different # we use one line of snowpark dataframe and put it into sklearn estimator using pandas - output_df_pd = getattr(self, method)(dataset.limit(1).to_pandas(), output_cols_prefix) + sample_pd_df = dataset.select(self.input_cols).limit(1).to_pandas() + + # Rename the pandas df column names to snowflake identifiers and reorder columns to match the order + # seen during the fit. + snowpark_column_names = dataset.select(self.input_cols).columns + sample_pd_df.columns = snowpark_column_names + + output_df_pd = getattr(self, method)(sample_pd_df, output_cols_prefix) output_df_columns = list(output_df_pd.columns) output_df_columns_set: Set[str] = set(output_df_columns) - set(dataset.columns) if self.sample_weight_col: diff --git a/snowflake/cortex/_sentiment.py b/snowflake/cortex/_sentiment.py index e75e6609..4f2451ff 100644 --- a/snowflake/cortex/_sentiment.py +++ b/snowflake/cortex/_sentiment.py @@ -11,7 +11,7 @@ ) def Sentiment( text: Union[str, snowpark.Column], session: Optional[snowpark.Session] = None -) -> Union[str, snowpark.Column]: +) -> Union[float, snowpark.Column]: """Sentiment calls into the LLM inference service to perform sentiment analysis on the input text. Args: @@ -21,11 +21,14 @@ def Sentiment( Returns: A column of floats. 1 represents positive sentiment, -1 represents negative sentiment. """ - return _sentiment_impl("snowflake.cortex.sentiment", text, session=session) def _sentiment_impl( function: str, text: Union[str, snowpark.Column], session: Optional[snowpark.Session] = None -) -> Union[str, snowpark.Column]: - return call_sql_function(function, session, text) +) -> Union[float, snowpark.Column]: + + output = call_sql_function(function, session, text) + if isinstance(output, snowpark.Column): + return output + return float(output) diff --git a/snowflake/cortex/sentiment_test.py b/snowflake/cortex/sentiment_test.py index 3b2964db..f906262d 100644 --- a/snowflake/cortex/sentiment_test.py +++ b/snowflake/cortex/sentiment_test.py @@ -7,35 +7,36 @@ class SentimentTest(absltest.TestCase): - prompt = "|prompt|" + sentiment = "0.53" @staticmethod - def sentiment_for_test(prompt: str) -> str: - return f"result: {prompt}" + def sentiment_for_test(sentiment: str) -> float: + return float(sentiment) def setUp(self) -> None: self._session = _test_util.create_test_session() functions.udf( self.sentiment_for_test, name="sentiment", - return_type=types.StringType(), - input_types=[types.StringType()], + session=self._session, + return_type=types.FloatType(), + input_types=[types.FloatType()], is_permanent=False, ) def tearDown(self) -> None: - self._session.sql("drop function sentiment(string)").collect() + self._session.sql("drop function sentiment(float)").collect() self._session.close() def test_sentiment_str(self) -> None: - res = _sentiment._sentiment_impl("sentiment", self.prompt) - self.assertEqual(self.sentiment_for_test(self.prompt), res) + res = _sentiment._sentiment_impl("sentiment", self.sentiment, session=self._session) + self.assertEqual(self.sentiment_for_test(self.sentiment), res) def test_sentiment_column(self) -> None: - df_in = self._session.create_dataframe([snowpark.Row(prompt=self.prompt)]) + df_in = self._session.create_dataframe([snowpark.Row(prompt=self.sentiment)]) df_out = df_in.select(_sentiment._sentiment_impl("sentiment", functions.col("prompt"))) res = df_out.collect()[0][0] - self.assertEqual(self.sentiment_for_test(self.prompt), res) + self.assertEqual(self.sentiment_for_test(self.sentiment), res) if __name__ == "__main__": diff --git a/snowflake/ml/_internal/utils/temp_file_utils.py b/snowflake/ml/_internal/utils/temp_file_utils.py index 536bd782..0d639387 100644 --- a/snowflake/ml/_internal/utils/temp_file_utils.py +++ b/snowflake/ml/_internal/utils/temp_file_utils.py @@ -8,14 +8,17 @@ logger = logging.getLogger(__name__) -def get_temp_file_path() -> str: +def get_temp_file_path(prefix: str = "") -> str: """Returns a new random temp file path. + Args: + prefix: A prefix to the temp file path, this can help add stored file information. Defaults to None. + Returns: A new temp file path. """ # TODO(snandamuri): Use in-memory filesystem for temp files. - local_file = tempfile.NamedTemporaryFile(delete=True) + local_file = tempfile.NamedTemporaryFile(prefix=prefix, delete=True) local_file_name = local_file.name local_file.close() return local_file_name diff --git a/snowflake/ml/dataset/notebooks/internal_demo/Dataset_Basic_Demo.ipynb b/snowflake/ml/dataset/notebooks/Dataset_Basic_Demo.ipynb similarity index 74% rename from snowflake/ml/dataset/notebooks/internal_demo/Dataset_Basic_Demo.ipynb rename to snowflake/ml/dataset/notebooks/Dataset_Basic_Demo.ipynb index 89e60e5f..e4eb40ac 100644 --- a/snowflake/ml/dataset/notebooks/internal_demo/Dataset_Basic_Demo.ipynb +++ b/snowflake/ml/dataset/notebooks/Dataset_Basic_Demo.ipynb @@ -1,11 +1,22 @@ { "cells": [ + { + "cell_type": "markdown", + "id": "229bc948", + "metadata": {}, + "source": [ + "# Basic Dataset Demo\n", + "\n", + "- snowflake-ml-python version: 1.5.0\n", + "- Last updated: 5/10/2024" + ] + }, { "cell_type": "markdown", "id": "e645315e-9a73-4cb0-b72e-a1ecb32abf1d", "metadata": {}, "source": [ - "# Setup Environment" + "## Setup Environment" ] }, { @@ -13,7 +24,7 @@ "id": "f5652801-1259-439e-8b70-df7d1995916b", "metadata": {}, "source": [ - "## Import Dependencies and Create Session" + "### Import Dependencies and Create Session" ] }, { @@ -53,7 +64,7 @@ "id": "dc7cdc84-5f2f-491d-97c6-9a0d22f294bc", "metadata": {}, "source": [ - "# Prepare test data\n", + "## Prepare test data\n", "\n", "We will use the [diamond price dataset](https://ggplot2.tidyverse.org/reference/diamonds.html) for this demo. The data can be downloaded from https://raw.githubusercontent.com/tidyverse/ggplot2/main/data-raw/diamonds.csv" ] @@ -70,15 +81,16 @@ "\n", "data_url = \"https://raw.githubusercontent.com/tidyverse/ggplot2/main/data-raw/diamonds.csv\"\n", "data_pd = pd.read_csv(data_url)\n", + "data_pd.columns = [c.upper() for c in data_pd.columns] # Snowflake works best with uppercase columns\n", "\n", "# Encode categorical variables: cut, color, clarity\n", "label_encoder = LabelEncoder()\n", - "data_pd['cut'] = label_encoder.fit_transform(data_pd['cut'])\n", - "data_pd['color'] = label_encoder.fit_transform(data_pd['color'])\n", - "data_pd['clarity'] = label_encoder.fit_transform(data_pd['clarity'])\n", + "data_pd['CUT'] = label_encoder.fit_transform(data_pd['CUT'])\n", + "data_pd['COLOR'] = label_encoder.fit_transform(data_pd['COLOR'])\n", + "data_pd['CLARITY'] = label_encoder.fit_transform(data_pd['CLARITY'])\n", "\n", "# Scale numerical features: carat, x, y, z, depth, table\n", - "numerical_features = ['carat', 'x', 'y', 'z', 'depth', 'table']\n", + "numerical_features = ['CARAT', 'X', 'Y', 'Z', 'DEPTH', 'TABLE']\n", "scaler = StandardScaler()\n", "data_pd[numerical_features] = scaler.fit_transform(data_pd[numerical_features])\n", "\n", @@ -101,7 +113,7 @@ "metadata": {}, "outputs": [], "source": [ - "ds_name = f\"{TEST_DATASET_DB}.{TEST_DATASET_SCHEMA}.wine_data\"\n", + "ds_name = f\"{TEST_DATASET_DB}.{TEST_DATASET_SCHEMA}.diamond_data\"\n", "ds_version = \"v1\"\n", "\n", "session.sql(f\"DROP DATASET IF EXISTS {ds_name}\").collect()\n", @@ -110,7 +122,7 @@ " name=ds_name,\n", " version=ds_version,\n", " input_dataframe=df,\n", - " label_cols=[\"price\"],\n", + " label_cols=[\"PRICE\"],\n", ")\n", "\n", "print(f\"Dataset: {ds.fully_qualified_name}\")\n", @@ -139,16 +151,6 @@ "print(ds.read.to_pandas().shape)" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "919f9e7b-6428-41cc-88d3-70fd08b0d1f6", - "metadata": {}, - "outputs": [], - "source": [ - "type(ds.read)" - ] - }, { "cell_type": "markdown", "id": "2a11261b-c7ea-4bb9-b0b0-7a711dd63cec", @@ -164,25 +166,22 @@ "metadata": {}, "outputs": [], "source": [ - "test_ratio = 0.2\n", - "uniform_min, uniform_max = 1, 10\n", - "pivot = (uniform_max - uniform_min + 1) * test_ratio\n", - "df_aug = df.with_column(\"_UNIFORM\", F.uniform(uniform_min, uniform_max, F.random()))\n", - "ds.create_version(\n", + "# Versions are not normally intended to manage splits, but we'll do so here\n", + "# just to demonstrate using different Dataset Versions.\n", + "train_df, test_df = df.random_split([0.8, 0.2])\n", + "train_ds = ds.create_version(\n", " version=\"train\",\n", - " input_dataframe=df_aug.where(df_aug.col(\"_UNIFORM\") > pivot).drop(df_aug.col(\"_UNIFORM\")),\n", + " input_dataframe=train_df,\n", " label_cols=[\"price\"],\n", ")\n", - "ds.create_version(\n", + "test_ds = ds.create_version(\n", " version=\"test\",\n", - " input_dataframe=df_aug.where(df_aug.col(\"_UNIFORM\") <= pivot).drop(df_aug.col(\"_UNIFORM\")),\n", + " input_dataframe=test_df,\n", " label_cols=[\"price\"],\n", ")\n", "\n", "print(ds.list_versions())\n", - "\n", - "train_ds = ds.select_version(\"train\")\n", - "test_ds = ds.select_version(\"test\")\n", + "print(f\"{ds.selected_version.name=}\\n{train_ds.selected_version.name=}\\n{test_ds.selected_version.name=}\")\n", "\n", "print(\"train rows:\", train_ds.read.to_snowpark_dataframe().count())\n", "print(\"test rows:\", test_ds.read.to_snowpark_dataframe().count())" @@ -193,9 +192,9 @@ "id": "6ca2ea4d-d8c1-4ef9-a7c5-78c8b3aa8779", "metadata": {}, "source": [ - "# Model Training\n", + "## Model Training\n", "\n", - "Let's train and evaluate a basic PyTorch model using our newly created Snowflake Datasets" + "Let's train and evaluate some basic models using our new Dataset" ] }, { @@ -209,15 +208,15 @@ "from sklearn.metrics import mean_squared_error\n", "\n", "train_pd = train_ds.read.to_pandas()\n", - "X_train = train_pd.drop(columns=[\"price\"])\n", - "y_train = train_pd[\"price\"]\n", + "X_train = train_pd.drop(columns=[\"PRICE\"])\n", + "y_train = train_pd[\"PRICE\"]\n", "rf_regressor = RandomForestRegressor(n_estimators=100, random_state=42)\n", "rf_regressor.fit(X_train, y_train)\n", "\n", "# Evaluate the Model\n", "test_pd = test_ds.read.to_pandas()\n", - "X_test = test_pd.drop(columns=[\"price\"])\n", - "y_test = test_pd[\"price\"]\n", + "X_test = test_pd.drop(columns=[\"PRICE\"])\n", + "y_test = test_pd[\"PRICE\"]\n", "y_pred = rf_regressor.predict(X_test)\n", "\n", "# Calculate the Mean Squared Error\n", @@ -240,42 +239,65 @@ "metadata": {}, "outputs": [], "source": [ - "local_code_imports = [\n", - " (os.path.join(snowml_path, 'snowflake', 'ml', '_internal'), 'snowflake.ml._internal'),\n", - " (os.path.join(snowml_path, 'snowflake', 'ml', 'fileset'), 'snowflake.ml.fileset'),\n", - " (os.path.join(snowml_path, 'snowflake', 'ml', 'dataset'), 'snowflake.ml.dataset'),\n", - "]\n", - "for t in local_code_imports:\n", - " session.add_import(*t, whole_file_hash=True)\n", - " \n", - "deps = [\n", - " \"snowflake-snowpark-python\",\n", - " \"snowflake-ml-python\",\n", - " \"cryptography\",\n", - "]\n", - "\n", - "@F.sproc(session=session, packages=deps)\n", + "@F.sproc(session=session, packages=[\"snowflake-snowpark-python\", \"snowflake-ml-python>=1.15\", \"cryptography\"])\n", "def ds_sproc(session: Session) -> float:\n", " train_ds = dataset.load_dataset(session, ds_name, \"train\")\n", " test_ds = dataset.load_dataset(session, ds_name, \"test\")\n", "\n", " train_pd = train_ds.read.to_pandas()\n", - " X_train = train_pd.drop(columns=[\"price\"])\n", - " y_train = train_pd[\"price\"]\n", + " X_train = train_pd.drop(columns=[\"PRICE\"])\n", + " y_train = train_pd[\"PRICE\"]\n", " rf_regressor = RandomForestRegressor(n_estimators=100, random_state=42)\n", " rf_regressor.fit(X_train, y_train)\n", "\n", " # Evaluate the Model\n", " test_pd = test_ds.read.to_pandas()\n", - " X_test = test_pd.drop(columns=[\"price\"])\n", - " y_test = test_pd[\"price\"]\n", + " X_test = test_pd.drop(columns=[\"PRICE\"])\n", + " y_test = test_pd[\"PRICE\"]\n", " y_pred = rf_regressor.predict(X_test)\n", "\n", " # Calculate the Mean Squared Error\n", " return mean_squared_error(y_test, y_pred)\n", "\n", - "print(\"Mean Squared Error:\", ds_sproc(session))\n", - "session.clear_imports()" + "print(\"Mean Squared Error:\", ds_sproc(session))" + ] + }, + { + "cell_type": "markdown", + "id": "d1009685-2c71-4db8-97a0-947f12d693d7", + "metadata": {}, + "source": [ + "We can also pass the Datasets into SnowML modeling APIs using either Snowpark DataFrame or Pandas DataFrame" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ff1a3bb-2747-43e4-80b1-4d847ebff347", + "metadata": {}, + "outputs": [], + "source": [ + "from snowflake.ml.modeling.ensemble import random_forest_regressor as snowml_rfr\n", + "from snowflake.ml.modeling.metrics.regression import mean_squared_error as snowml_mse\n", + "\n", + "ALL_COLS = train_ds.read.to_snowpark_dataframe().columns\n", + "LABEL_COLS = [\"PRICE\"]\n", + "FEATURE_COLS = [c for c in ALL_COLS if c not in LABEL_COLS]\n", + "\n", + "# Train an sklearn model on snowflake.\n", + "xgboost_model = snowml_rfr.RandomForestRegressor(\n", + " n_estimators=100,\n", + " random_state=42,\n", + " input_cols=FEATURE_COLS,\n", + " label_cols=LABEL_COLS,\n", + ")\n", + "\n", + "# Convert Dataset to Snowpark DataFrame for training\n", + "xgboost_model.fit(train_ds.read.to_snowpark_dataframe())\n", + "\n", + "predictions = xgboost_model.predict(test_df) # Use test_df instead of test_ds for prediction\n", + "snowxgb_mse = snowml_mse(df=predictions, y_true_col_names=\"PRICE\", y_pred_col_names=\"OUTPUT_PRICE\")\n", + "print(snowxgb_mse)" ] }, { @@ -305,12 +327,11 @@ " self.fc3 = nn.Linear(32, 1)\n", " self.relu = nn.ReLU()\n", " \n", - " def forward(self, carat, cut, color, clarity, depth, table, x, y, z):\n", - " X = torch.cat((carat, cut, color, clarity, depth, table, x, y, z), axis=1)\n", - " X = self.relu(self.fc1(X))\n", - " X = self.relu(self.fc2(X))\n", - " X = self.fc3(X)\n", - " return X\n", + " def forward(self, x):\n", + " x = self.relu(self.fc1(x))\n", + " x = self.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return x\n", "\n", "\n", "def train_model(model: nn.Module, ds: dataset.Dataset, batch_size: int = 32, num_epochs: int = 10, learning_rate: float = 1e-3):\n", @@ -323,13 +344,13 @@ " # Training loop\n", " for epoch in range(num_epochs):\n", " for batch in ds.read.to_torch_datapipe(batch_size=batch_size):\n", - " targets = torch.from_numpy(batch.pop(\"price\")).unsqueeze(1).to(torch.float32)\n", - " inputs = {k:torch.from_numpy(v).unsqueeze(1) for k,v in batch.items()}\n", - " \n", + " targets = torch.from_numpy(batch.pop(\"PRICE\")).unsqueeze(1).to(torch.float32)\n", + " inputs = torch.cat(tuple(torch.from_numpy(v).unsqueeze(1) for v in batch.values()), axis=1)\n", + "\n", " # Forward pass\n", - " outputs = model(**inputs)\n", + " outputs = model(inputs)\n", " loss = criterion(outputs, targets)\n", - " \n", + "\n", " # Backward and optimize\n", " optimizer.zero_grad()\n", " loss.backward()\n", @@ -342,10 +363,10 @@ " mse = 0.0\n", " with torch.no_grad():\n", " for batch in ds.read.to_torch_datapipe(batch_size=batch_size):\n", - " targets = torch.from_numpy(batch.pop(\"price\")).unsqueeze(1).to(torch.float32)\n", - " inputs = {k:torch.from_numpy(v).unsqueeze(1) for k,v in batch.items()}\n", + " targets = torch.from_numpy(batch.pop(\"PRICE\")).unsqueeze(1).to(torch.float32)\n", + " inputs = torch.cat(tuple(torch.from_numpy(v).unsqueeze(1) for v in batch.values()), axis=1)\n", "\n", - " outputs = model(**inputs)\n", + " outputs = model(inputs)\n", " mse += nn.functional.mse_loss(outputs, targets).item()\n", " return mse\n", "\n", @@ -354,38 +375,6 @@ "eval_model(model, test_ds)" ] }, - { - "cell_type": "markdown", - "id": "d1009685-2c71-4db8-97a0-947f12d693d7", - "metadata": {}, - "source": [ - "(WIP) We can pass the Datasets into SnowML modeling APIs using either Snowpark DataFrame or Pandas DataFrame" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6ff1a3bb-2747-43e4-80b1-4d847ebff347", - "metadata": {}, - "outputs": [], - "source": [ - "from snowflake.ml.modeling.xgboost import XGBRegressor\n", - "\n", - "FEATURE_COLS = [\"carat\", \"cut\", \"color\", \"clarity\", \"depth\", \"table\", \"x\", \"y\", \"z\"]\n", - "LABEL_COLS = [\"price\"]\n", - "\n", - "# Train an XGBoost model on snowflake.\n", - "xgboost_model = XGBRegressor(\n", - " input_cols=FEATURE_COLS,\n", - " label_cols=LABEL_COLS,\n", - ")\n", - "\n", - "xgboost_model.fit(train_ds.read.to_snowpark_dataframe())\n", - "\n", - "# Use the model to make predictions.\n", - "predictions = xgboost_model.predict(test_ds.read.to_snowpark_dataframe())" - ] - }, { "cell_type": "markdown", "id": "bcfad573-9e8b-429e-af38-c4d3316cbb5a", diff --git a/snowflake/ml/feature_store/access_manager.py b/snowflake/ml/feature_store/access_manager.py index d9c2ee39..f11751b2 100644 --- a/snowflake/ml/feature_store/access_manager.py +++ b/snowflake/ml/feature_store/access_manager.py @@ -42,6 +42,8 @@ class _SessionInfo: # Lists of permissions as tuples of (OBJECT_TYPE, [PRIVILEGES, ...]) _PRE_INIT_PRIVILEGES: Dict[_FeatureStoreRole, List[_Privilege]] = { _FeatureStoreRole.PRODUCER: [ + _Privilege("DATABASE", "{database}", ["USAGE"]), + _Privilege("SCHEMA", "{database}.{schema}", ["USAGE"]), _Privilege( "SCHEMA", "{database}.{schema}", @@ -69,8 +71,7 @@ class _SessionInfo: _Privilege("DYNAMIC TABLE", _ALL_OBJECTS, ["SELECT", "MONITOR"], "SCHEMA {database}.{schema}"), _Privilege("VIEW", _ALL_OBJECTS, ["SELECT", "REFERENCES"], "SCHEMA {database}.{schema}"), _Privilege("TABLE", _ALL_OBJECTS, ["SELECT", "REFERENCES"], "SCHEMA {database}.{schema}"), - # FIXME(dhung): FUTURE DATASETS not supported until 8.19 - # _Privilege("DATASET", _ALL_OBJECTS, ["USAGE"], "SCHEMA {database}.{schema}"), + _Privilege("DATASET", _ALL_OBJECTS, ["USAGE"], "SCHEMA {database}.{schema}"), # User should decide whether they want to grant warehouse usage to CONSUMER # _Privilege("WAREHOUSE", "{warehouse}", ["USAGE"]), ], @@ -128,8 +129,7 @@ def _grant_privileges( def _configure_pre_init_privileges( session: Session, session_info: _SessionInfo, - producer_role: str = "SNOWML_FEATURE_STORE_PRODUCER_RL", - consumer_role: str = "SNOWML_FEATURE_STORE_CONSUMER_RL", + roles_to_create: Dict[_FeatureStoreRole, str], ) -> None: """ Configure Feature Store role privileges. Must be run with ACCOUNTADMIN @@ -141,8 +141,7 @@ def _configure_pre_init_privileges( Args: session: Snowpark Session to interact with Snowflake backend. session_info: Session info like database and schema for the FeatureStore instance. - producer_role: Name of producer role to be configured. - consumer_role: Name of consumer role to be configured. + roles_to_create: Producer and optional consumer roles to create. """ # Create schema if not already exists @@ -159,29 +158,30 @@ def _configure_pre_init_privileges( # Pass schema ownership from admin to PRODUCER if schema_created: + # TODO: we are missing a test case for this code path session.sql( - f"GRANT OWNERSHIP ON SCHEMA {session_info.database}.{session_info.schema} TO ROLE {producer_role}" + f"GRANT OWNERSHIP ON SCHEMA {session_info.database}.{session_info.schema} " + f"TO ROLE {roles_to_create[_FeatureStoreRole.PRODUCER]}" ).collect() # Grant privileges to roles - _grant_privileges(session, producer_role, _PRE_INIT_PRIVILEGES[_FeatureStoreRole.PRODUCER], session_info) - _grant_privileges(session, consumer_role, _PRE_INIT_PRIVILEGES[_FeatureStoreRole.CONSUMER], session_info) + for role_type, role in roles_to_create.items(): + _grant_privileges(session, role, _PRE_INIT_PRIVILEGES[role_type], session_info) def _configure_post_init_privileges( session: Session, session_info: _SessionInfo, - producer_role: str = "FS_PRODUCER", - consumer_role: str = "FS_CONSUMER", + roles_to_create: Dict[_FeatureStoreRole, str], ) -> None: - _grant_privileges(session, producer_role, _POST_INIT_PRIVILEGES[_FeatureStoreRole.PRODUCER], session_info) - _grant_privileges(session, consumer_role, _POST_INIT_PRIVILEGES[_FeatureStoreRole.CONSUMER], session_info) + for role_type, role in roles_to_create.items(): + _grant_privileges(session, role, _POST_INIT_PRIVILEGES[role_type], session_info) def _configure_role_hierarchy( session: Session, producer_role: str, - consumer_role: str, + consumer_role: Optional[str], ) -> None: """ Create Feature Store roles and configure role hierarchy hierarchy. Must be run with @@ -195,18 +195,17 @@ def _configure_role_hierarchy( producer_role: Name of producer role to be configured. consumer_role: Name of consumer role to be configured. """ + # Create the necessary roles and build role hierarchy producer_role = SqlIdentifier(producer_role) - consumer_role = SqlIdentifier(consumer_role) - - # Create the necessary roles session.sql(f"CREATE ROLE IF NOT EXISTS {producer_role}").collect() - session.sql(f"CREATE ROLE IF NOT EXISTS {consumer_role}").collect() - - # Build role hierarchy - session.sql(f"GRANT ROLE {consumer_role} TO ROLE {producer_role}").collect() session.sql(f"GRANT ROLE {producer_role} TO ROLE SYSADMIN").collect() session.sql(f"GRANT ROLE {producer_role} TO ROLE {session.get_current_role()}").collect() + if consumer_role is not None: + consumer_role = SqlIdentifier(consumer_role) + session.sql(f"CREATE ROLE IF NOT EXISTS {consumer_role}").collect() + session.sql(f"GRANT ROLE {consumer_role} TO ROLE {producer_role}").collect() + @telemetry.send_api_usage_telemetry(project=_PROJECT) def setup_feature_store( @@ -215,7 +214,7 @@ def setup_feature_store( schema: str, warehouse: str, producer_role: str = "FS_PRODUCER", - consumer_role: str = "FS_CONSUMER", + consumer_role: Optional[str] = None, ) -> FeatureStore: """ Sets up a new Feature Store including role/privilege setup. Must be run with ACCOUNTADMIN @@ -230,7 +229,7 @@ def setup_feature_store( schema: Schema to create the FeatureStore instance. warehouse: Default warehouse for Feature Store compute. producer_role: Name of producer role to be configured. - consumer_role: Name of consumer role to be configured. + consumer_role: Name of consumer role to be configured. If not specified, consumer role won't be created. Returns: Feature Store instance. @@ -249,20 +248,25 @@ def setup_feature_store( ) try: + roles_to_create = {_FeatureStoreRole.PRODUCER: producer_role} + if consumer_role is not None: + roles_to_create.update({_FeatureStoreRole.CONSUMER: consumer_role}) _configure_role_hierarchy(session, producer_role=producer_role, consumer_role=consumer_role) except exceptions.SnowparkSQLException: # Error can be safely ignored if roles already exist and hierarchy is already built - for role in (producer_role, consumer_role): + for _, role in roles_to_create.items(): # Ensure roles already exist if session.sql(f"SHOW ROLES LIKE '{role}' STARTS WITH '{role}'").count() == 0: raise - # Ensure hierarchy already configured - consumer_grants = session.sql(f"SHOW GRANTS ON ROLE {consumer_role}").collect() - if not any(r["granted_to"] == "ROLE" and r["grantee_name"] == producer_role for r in consumer_grants): - raise + + if consumer_role is not None: + # Ensure hierarchy already configured + consumer_grants = session.sql(f"SHOW GRANTS ON ROLE {consumer_role}").collect() + if not any(r["granted_to"] == "ROLE" and r["grantee_name"] == producer_role for r in consumer_grants): + raise # Do any pre-FeatureStore.__init__() privilege setup - _configure_pre_init_privileges(session, session_info, producer_role, consumer_role) + _configure_pre_init_privileges(session, session_info, roles_to_create) # Use PRODUCER role to create and operate new Feature Store current_role = session.get_current_role() @@ -274,6 +278,6 @@ def setup_feature_store( session.use_role(current_role) # Do any post-FeatureStore.__init__() privilege setup - _configure_post_init_privileges(session, session_info, producer_role, consumer_role) + _configure_post_init_privileges(session, session_info, roles_to_create) return fs diff --git a/snowflake/ml/feature_store/feature_store.py b/snowflake/ml/feature_store/feature_store.py index f6b68013..8096cc24 100644 --- a/snowflake/ml/feature_store/feature_store.py +++ b/snowflake/ml/feature_store/feature_store.py @@ -1761,7 +1761,7 @@ def _tag_ref_internal_enabled(self) -> bool: self._session.sql( f""" SELECT * FROM TABLE( - INFORMATION_SCHEMA.TAG_REFERENCES_INTERNAL( + {self._config.database}.INFORMATION_SCHEMA.TAG_REFERENCES_INTERNAL( TAG_NAME => '{_FEATURE_STORE_OBJECT_TAG}' ) ) LIMIT 1; diff --git a/snowflake/ml/feature_store/feature_view.py b/snowflake/ml/feature_store/feature_view.py index 387bfbc5..61254344 100644 --- a/snowflake/ml/feature_store/feature_view.py +++ b/snowflake/ml/feature_store/feature_view.py @@ -7,10 +7,6 @@ from enum import Enum from typing import Any, Dict, List, Optional -from snowflake.ml._internal.exceptions import ( - error_codes, - exceptions as snowml_exceptions, -) from snowflake.ml._internal.utils.identifier import concat_names from snowflake.ml._internal.utils.sql_identifier import ( SqlIdentifier, @@ -34,6 +30,11 @@ _FEATURE_VIEW_VERSION_RE = re.compile(r"^[a-zA-Z0-9][a-zA-Z0-9_.\-]*$") _FEATURE_VIEW_VERSION_MAX_LENGTH = 128 +_RESULT_SCAN_QUERY_PATTERN = re.compile( + r".*FROM\s*TABLE\s*\(\s*RESULT_SCAN\s*\(.*", + flags=re.DOTALL | re.IGNORECASE | re.X, +) + @dataclass(frozen=True) class _FeatureViewMetadata: @@ -54,13 +55,10 @@ def from_json(cls, json_str: str) -> _FeatureViewMetadata: class FeatureViewVersion(str): def __new__(cls, version: str) -> FeatureViewVersion: if not _FEATURE_VIEW_VERSION_RE.match(version) or len(version) > _FEATURE_VIEW_VERSION_MAX_LENGTH: - raise snowml_exceptions.SnowflakeMLException( - error_code=error_codes.INVALID_ARGUMENT, - original_exception=ValueError( - f"`{version}` is not a valid feature view version. " - "It must start with letter or digit, and followed by letter, digit, '_', '-' or '.'. " - f"The length limit is {_FEATURE_VIEW_VERSION_MAX_LENGTH}." - ), + raise ValueError( + f"`{version}` is not a valid feature view version. " + "It must start with letter or digit, and followed by letter, digit, '_', '-' or '.'. " + f"The length limit is {_FEATURE_VIEW_VERSION_MAX_LENGTH}." ) return super().__new__(cls, version) @@ -352,6 +350,9 @@ def _validate(self) -> None: if not isinstance(col_type, (DateType, TimeType, TimestampType, _NumericType)): raise ValueError(f"Invalid data type for timestamp_col {ts_col}: {col_type}.") + if re.match(_RESULT_SCAN_QUERY_PATTERN, self._query) is not None: + raise ValueError(f"feature_df should not be reading from RESULT_SCAN. Invalid query: {self._query}") + def _get_feature_names(self) -> List[SqlIdentifier]: join_keys = [k for e in self._entities for k in e.join_keys] ts_col = [self._timestamp_col] if self._timestamp_col is not None else [] diff --git a/snowflake/ml/feature_store/notebooks/internal_demo/Basic_Feature_Store_Demo.ipynb b/snowflake/ml/feature_store/notebooks/internal_demo/Basic_Feature_Store_Demo.ipynb deleted file mode 100644 index 5b883e20..00000000 --- a/snowflake/ml/feature_store/notebooks/internal_demo/Basic_Feature_Store_Demo.ipynb +++ /dev/null @@ -1,658 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "6ebc1823", - "metadata": {}, - "source": [ - "Steps to run notebook:\n", - "1. Create a conda env with python3.8 (Empty conda env)\n", - "```\n", - "conda create --name snowml python=3.8\n", - "```\n", - "2. Activate conda env\n", - "```\n", - "conda activate snowml\n", - "```\n", - "3. Install conda pkg\n", - "```\n", - "conda install snowflake-ml-python \n", - "# or local build if there are changes in SnowML lib you need: bazel build //snowflake/ml:wheel\n", - "# then do pip install {built pkg}\n", - "```\n", - "4. Install jupyter notebook\n", - "```\n", - "conda install jupyter\n", - "```\n", - "5. Start notebook\n", - "```\n", - "jupyter notebook\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "aeae3429", - "metadata": {}, - "source": [ - "## Basic Feature Store Usage Example\n", - "This notebook demonstrates feature store usage for static features.\n", - "The reference example by Databricks is here: https://docs.databricks.com/en/_extras/notebooks/source/machine-learning/feature-store-with-uc-basic-example.html" - ] - }, - { - "cell_type": "markdown", - "id": "fbcd0549", - "metadata": {}, - "source": [ - "## Setup UI and Auto Import" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3ada0a5b", - "metadata": {}, - "outputs": [], - "source": [ - "# Scale cell width with the browser window to accommodate .show() commands for wider tables.\n", - "from IPython.display import display, HTML\n", - "display(HTML(\"\"))\n", - "\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "markdown", - "id": "09e0f7e5", - "metadata": {}, - "source": [ - "#### [Optional 1] Import from local code repository" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "id": "776268d6", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "import sys\n", - "import os\n", - "\n", - "# Simplify reading from the local repository\n", - "cwd=os.getcwd()\n", - "REPO_PREFIX=\"snowflake/ml\"\n", - "LOCAL_REPO_PATH=cwd[:cwd.find(REPO_PREFIX)].rstrip('/')\n", - "\n", - "if LOCAL_REPO_PATH not in sys.path:\n", - " print(f\"Adding {LOCAL_REPO_PATH} to system path\")\n", - " sys.path.append(LOCAL_REPO_PATH)" - ] - }, - { - "cell_type": "markdown", - "id": "65029121", - "metadata": {}, - "source": [ - "#### [Optional 2] Import from installed snowflake-ml-python wheel" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "id": "db7fa435", - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "\n", - "sys.path.insert(0, '/tmp/snowml')" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "id": "714787e6", - "metadata": {}, - "outputs": [], - "source": [ - "import importlib\n", - "from snowflake.snowpark import Session\n", - "from snowflake.snowpark import functions as F\n", - "from snowflake.ml.feature_store.feature_view import FeatureView\n", - "from snowflake.ml.feature_store.entity import Entity\n", - "from snowflake.ml.feature_store.feature_store import FeatureStore, CreationMode\n", - "from snowflake.ml.utils.connection_params import SnowflakeLoginOptions" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "id": "005f6291", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "SnowflakeLoginOptions() is in private preview since 0.2.0. Do not use it in production. \n" - ] - } - ], - "source": [ - "session = Session.builder.configs(SnowflakeLoginOptions()).create()" - ] - }, - { - "cell_type": "markdown", - "id": "b79ba9be", - "metadata": {}, - "source": [ - "## Prepare demo data\n", - "\n", - "We will use wine quality dataset to demonstrate feature store usage" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e60407c6", - "metadata": {}, - "outputs": [], - "source": [ - "session.file.put(\"file://winequality-red.csv\", session.get_session_stage())\n", - "\n", - "SOURCE_DB = session.get_current_database()\n", - "SOURCE_SCHEMA = session.get_current_schema()\n", - "\n", - "from snowflake.snowpark.types import StructType, StructField, IntegerType, StringType, FloatType\n", - "input_schema = StructType(\n", - " [\n", - " StructField(\"fixed_acidity\", FloatType()), \n", - " StructField(\"volatile_acidity\", FloatType()), \n", - " StructField(\"citric_acid\", FloatType()), \n", - " StructField(\"residual_sugar\", FloatType()), \n", - " StructField(\"chlorides\", FloatType()), \n", - " StructField(\"free_sulfur_dioxide\", IntegerType()),\n", - " StructField(\"total_sulfur_dioxide\", IntegerType()), \n", - " StructField(\"density\", FloatType()), \n", - " StructField(\"pH\", FloatType()), \n", - " StructField(\"sulphates\", FloatType()),\n", - " StructField(\"alcohol\", FloatType()), \n", - " StructField(\"quality\", IntegerType())\n", - " ]\n", - ")\n", - "df = session.read.options({\"field_delimiter\": \";\", \"skip_header\": 1}).schema(input_schema).csv(f\"{session.get_session_stage()}/winequality-red.csv\")\n", - "df.write.mode(\"overwrite\").save_as_table(\"wine_data\")" - ] - }, - { - "cell_type": "markdown", - "id": "4e4fda65", - "metadata": {}, - "source": [ - "## Generate new synthetic data [Optional]\n", - "Run the cell below to generate new synthetic data for the wine dataset if needed.\n", - "NOTE: the synthetic data will be randomized based on the original data's statistics, so it may affect training quality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "836c8c31", - "metadata": {}, - "outputs": [], - "source": [ - "from snowflake.ml.feature_store._internal.synthetic_data_generator import (\n", - " SyntheticDataGenerator,\n", - ")\n", - "session2 = Session.builder.configs(SnowflakeLoginOptions()).create()\n", - "generator = SyntheticDataGenerator(session2, SOURCE_DB, SOURCE_SCHEMA, \"wine_data\")\n", - "generator.trigger(batch_size=10, num_batches=30, freq=10)" - ] - }, - { - "cell_type": "markdown", - "id": "4ece7a2b", - "metadata": {}, - "source": [ - "## Create FeatureStore Client\n", - "\n", - "Let's first create a feature store client.\n", - "\n", - "We can pass in an existing database name, or a new database will be created upon the feature store initialization." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "id": "fe850ccd", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Dumped 10 rows to table wine_data.\n" - ] - } - ], - "source": [ - "DEMO_DB = \"FS_DEMO_DB\"\n", - "session.sql(f\"DROP DATABASE IF EXISTS {DEMO_DB}\").collect() # start from scratch\n", - "session.sql(f\"CREATE DATABASE IF NOT EXISTS {DEMO_DB}\").collect()\n", - "session.sql(f\"CREATE OR REPLACE WAREHOUSE PUBLIC WITH WAREHOUSE_SIZE='XSMALL'\").collect()\n", - "\n", - "fs = FeatureStore(\n", - " session=session, \n", - " database=DEMO_DB, \n", - " name=\"AWESOME_FS\", \n", - " default_warehouse=\"PUBLIC\",\n", - " creation_mode=CreationMode.CREATE_IF_NOT_EXIST,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "b50b7ad1", - "metadata": {}, - "source": [ - "## Create and register a new Entity\n", - "\n", - "We will create an Entity called *wine* and register it with the feature store.\n", - "\n", - "You can retrieve the active Entities in the feature store with list_entities() API." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9c8e9605", - "metadata": {}, - "outputs": [], - "source": [ - "entity = Entity(name=\"wine\", join_keys=[\"wine_id\"])\n", - "fs.register_entity(entity)\n", - "fs.list_entities().to_pandas()" - ] - }, - { - "cell_type": "markdown", - "id": "57a529d7", - "metadata": {}, - "source": [ - "## Load source data and do some simple feature engineering\n", - "\n", - "Then we will load from the source table and conduct some simple feature engineerings.\n", - "\n", - "Here we are just doing two simple data manipulation (but more complex ones are carried out the same way):\n", - "1. Assign a wine_id column to the source\n", - "2. Derive a new column by multipying two existing feature columns" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "b6037ab2", - "metadata": {}, - "outputs": [], - "source": [ - "source_df = session.table(f\"{SOURCE_DB}.{SOURCE_SCHEMA}.wine_data\")\n", - "source_df.to_pandas()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d75e06fe", - "metadata": {}, - "outputs": [], - "source": [ - "def addIdColumn(df, id_column_name):\n", - " # Add id column to dataframe\n", - " columns = df.columns\n", - " new_df = df.withColumn(id_column_name, F.monotonically_increasing_id())\n", - " return new_df[[id_column_name] + columns]\n", - "\n", - "def generate_new_feature(df):\n", - " # Derive a new feature column\n", - " return df.withColumn(\"my_new_feature\", df[\"FIXED_ACIDITY\"] * df[\"CITRIC_ACID\"])\n", - "\n", - "df = addIdColumn(source_df, \"wine_id\")\n", - "feature_df = generate_new_feature(df)\n", - "feature_df = feature_df.select(\n", - " [\n", - " 'WINE_ID',\n", - " 'FIXED_ACIDITY',\n", - " 'VOLATILE_ACIDITY',\n", - " 'CITRIC_ACID',\n", - " 'RESIDUAL_SUGAR',\n", - " 'CHLORIDES',\n", - " 'FREE_SULFUR_DIOXIDE',\n", - " 'TOTAL_SULFUR_DIOXIDE',\n", - " 'DENSITY',\n", - " 'PH',\n", - " 'my_new_feature',\n", - " ]\n", - ")\n", - "feature_df.to_pandas()" - ] - }, - { - "cell_type": "markdown", - "id": "bd4be7da", - "metadata": {}, - "source": [ - "## Create a new FeatureView and materialize the feature pipeline\n", - "\n", - "Once the FeatureView construction is done, we can materialize the FeatureView to the Snowflake backend and incremental maintenance will start." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "f58b30e2", - "metadata": {}, - "outputs": [], - "source": [ - "fv = FeatureView(name=\"wine_features\", entities=[entity], feature_df=feature_df, desc=\"wine features\")\n", - "fs.register_feature_view(feature_view=fv, version=\"v1\", refresh_freq=\"1 minute\", block=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4812ae3d", - "metadata": {}, - "outputs": [], - "source": [ - "# Examine the FeatureView content\n", - "fs.read_feature_view(fv).to_pandas()" - ] - }, - { - "cell_type": "markdown", - "id": "6f6fc02a", - "metadata": {}, - "source": [ - "## Explore additional features\n", - "\n", - "Now I have my FeatureView created with a collection of features, but what if I want to explore additional features on top?\n", - "\n", - "Since a materialized FeatureView is immutable (due to singe DDL for the backend dynamic table), we will need to create a new FeatureView for the additional features and then merge them." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ec83565f", - "metadata": {}, - "outputs": [], - "source": [ - "extra_feature_df = df.select(\n", - " [\n", - " 'WINE_ID',\n", - " 'SULPHATES',\n", - " 'ALCOHOL',\n", - " ]\n", - ")\n", - "\n", - "new_fv = FeatureView(name=\"extra_wine_features\", entities=[entity], feature_df=extra_feature_df, desc=\"extra wine features\")\n", - "fs.register_feature_view(feature_view=new_fv, version=\"v1\", refresh_freq=\"1 minute\", block=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9fd134b1", - "metadata": {}, - "outputs": [], - "source": [ - "# We can easily retrieve all FeatureViews for a given Entity.\n", - "fs.list_feature_views(entity_name=\"wine\").to_pandas()" - ] - }, - { - "cell_type": "markdown", - "id": "018cec24", - "metadata": {}, - "source": [ - "## Create new feature view with combined feature results [Optional]\n", - "\n", - "Now we have two FeatureViews ready, we can choose to create a new one by merging the two (it's just like a join and we provide a handy function for that). The new FeatureView won't incur the cost of feature pipelines but only the table join cost.\n", - "\n", - "Obviously we can also just work with two separate FeatureViews (most of our APIs support multiple FeatureViews), the capability of merging is just to make the features better organized and easier to share." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "901e20f6", - "metadata": {}, - "outputs": [], - "source": [ - "full_fv = fs.merge_features(features=[fv, new_fv], name=\"full_wine_features\")\n", - "fs.register_feature_view(feature_view=full_fv, version=\"v1\")" - ] - }, - { - "cell_type": "markdown", - "id": "4dc1a7dc", - "metadata": {}, - "source": [ - "## Generate Training Data\n", - "\n", - "After our feature pipelines are fully setup, we can start using them to generate training data and later do model prediction.\n", - "\n", - "Generate training data is easy since materialized FeatureViews already carry most of the metadata like join keys, timestamp for point-in-time lookup, etc. We just need to provide the spine data (it's called spine because we are essentially enriching the data by joining features with it)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0824f759", - "metadata": {}, - "outputs": [], - "source": [ - "spine_df = session.table(f\"{SOURCE_DB}.{SOURCE_SCHEMA}.wine_data\")\n", - "spine_df = addIdColumn(source_df, \"wine_id\")\n", - "spine_df = spine_df.select(\"wine_id\", \"quality\")\n", - "spine_df.to_pandas()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "574a810b", - "metadata": {}, - "outputs": [], - "source": [ - "session.sql(f\"DROP TABLE IF EXISTS FS_DEMO_DB.AWESOME_FS.wine_training_data_table\").collect()\n", - "training_data = fs.generate_dataset(\n", - " spine_df=spine_df, \n", - " features=[full_fv], \n", - " materialized_table=\"wine_training_data_table\", \n", - " spine_timestamp_col=None, \n", - " spine_label_cols=[\"quality\"],\n", - " save_mode=\"merge\",\n", - ")\n", - "\n", - "training_pd = training_data.df.to_pandas()\n", - "training_pd" - ] - }, - { - "cell_type": "markdown", - "id": "ddca7543", - "metadata": {}, - "source": [ - "## Train a model\n", - "\n", - "Now let's training a simple random forest model and evaluate the prediction accuracy." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29747582", - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.ensemble import RandomForestRegressor\n", - "from sklearn.metrics import mean_squared_error, r2_score\n", - "\n", - "X = training_pd.drop(\"QUALITY\", axis=1)\n", - "y = training_pd[\"QUALITY\"]\n", - "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", - "X_train.head()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "127da5d7", - "metadata": {}, - "outputs": [], - "source": [ - "def train_model(X_train, X_test, y_train, y_test):\n", - " ## fit and log model \n", - " rf = RandomForestRegressor(max_depth=3, n_estimators=20, random_state=42)\n", - " rf.fit(X_train, y_train)\n", - " y_pred = rf.predict(X_test)\n", - " mse = mean_squared_error(y_test, y_pred)\n", - " print(f\"MSE: {mse}, Accuracy: {round(100*(1-np.mean(np.abs((y_test - y_pred) / np.abs(y_test)))))}\")\n", - " return rf\n", - " \n", - "rf = train_model(X_train, X_test, y_train, y_test)\n", - "print(rf)" - ] - }, - { - "cell_type": "markdown", - "id": "21b81639", - "metadata": {}, - "source": [ - "## Log model with Model Registry\n", - "\n", - "We can log the model along with its training dataset metadata with model registry." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "e0a29768", - "metadata": {}, - "outputs": [], - "source": [ - "from snowflake.ml.registry import model_registry\n", - "from tests.integ.snowflake.ml.test_utils import (\n", - " test_env_utils,\n", - ")\n", - "\n", - "registry = model_registry.ModelRegistry(session=session, database_name=\"my_cool_registry\", create_if_not_exists=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "653b58e7", - "metadata": {}, - "outputs": [], - "source": [ - "model_ref = registry.log_model(\n", - " model_name=\"my_random_forest_regressor\",\n", - " model_version=\"v1\",\n", - " model=rf,\n", - " tags={\"author\": \"my_rf_with_training_data\"},\n", - " conda_dependencies=[\n", - " test_env_utils.get_latest_package_versions_in_server(session, \"snowflake-snowpark-python\")\n", - " ],\n", - " dataset=training_data,\n", - " options={\"embed_local_ml_library\": True},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "3ccf2743", - "metadata": {}, - "source": [ - "## Restore model and predict with latest features\n", - "\n", - "We retrieve the training dataset from registry then construct dataframe of latest feature values. Then we restore the model from registry. At last, we can predict with latest feature values." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d1fcbcb4", - "metadata": {}, - "outputs": [], - "source": [ - "registered_training_data = registry.get_dataset(\n", - " model_name=\"my_random_forest_regressor\", \n", - " model_version=\"v1\",\n", - ")\n", - "\n", - "test_pdf = training_pd.sample(3, random_state=996)[['WINE_ID']]\n", - "test_df = session.create_dataframe(test_pdf)\n", - "\n", - "latest_features = fs.retrieve_feature_values(test_df, registered_training_data.load_features())\n", - "latest_features_pdf = latest_features.to_pandas()\n", - "print(latest_features_pdf)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2d7fd017", - "metadata": {}, - "outputs": [], - "source": [ - "model_ref = model_registry.ModelReference(registry=registry, model_name=\"my_random_forest_regressor\", model_version=\"v1\")\n", - "restored_model = model_ref.load_model() # type: ignore[attr-defined]\n", - "restored_prediction = restored_model.predict(latest_features_pdf)\n", - "\n", - "print(restored_prediction)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/snowflake/ml/feature_store/notebooks/internal_demo/Time_Series_Feature_Demo.ipynb b/snowflake/ml/feature_store/notebooks/internal_demo/Time_Series_Feature_Demo.ipynb deleted file mode 100644 index 8e9fc82f..00000000 --- a/snowflake/ml/feature_store/notebooks/internal_demo/Time_Series_Feature_Demo.ipynb +++ /dev/null @@ -1,1070 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "02ce01b3", - "metadata": {}, - "source": [ - "Steps to run notebook:\n", - "1. Create a conda env with python3.8 (Empty conda env)\n", - "```\n", - "conda create --name snowml python=3.8\n", - "```\n", - "2. Activate conda env\n", - "```\n", - "conda activate snowml\n", - "```\n", - "3. Install conda pkg\n", - "```\n", - "conda install snowflake-ml-python \n", - "# or local build if there are changes in SnowML lib you need: bazel build //snowflake/ml:wheel\n", - "# then do pip install {built pkg}\n", - "```\n", - "4. Install jupyter notebook\n", - "```\n", - "conda install jupyter\n", - "```\n", - "5. Start notebook\n", - "```\n", - "jupyter notebook\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "3160eb3e", - "metadata": {}, - "source": [ - "## Feature Store Example with Time Series Features\n", - "This notebook demonstrates advanced feature store usage with time series features. \n", - "It will compute features from NY taxi trip data and demonstrate connections between training and prediction.\n", - "The reference example by Databricks is here: https://docs.databricks.com/en/_extras/notebooks/source/machine-learning/feature-store-with-uc-taxi-example.html#feature-store/feature-store" - ] - }, - { - "cell_type": "markdown", - "id": "a37f4de1", - "metadata": {}, - "source": [ - "## Setup UI and Auto Import" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "id": "da1a922d", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "The autoreload extension is already loaded. To reload it, use:\n", - " %reload_ext autoreload\n" - ] - } - ], - "source": [ - "# Scale cell width with the browser window to accommodate .show() commands for wider tables.\n", - "from IPython.display import display, HTML\n", - "display(HTML(\"\"))\n", - "\n", - "%load_ext autoreload\n", - "%autoreload 2" - ] - }, - { - "cell_type": "markdown", - "id": "c3b3d0e0", - "metadata": {}, - "source": [ - "#### [Optional 1] Import from local code repository" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "id": "11935b50", - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "import os\n", - "\n", - "# Simplify reading from the local repository\n", - "cwd=os.getcwd()\n", - "REPO_PREFIX=\"snowflake/ml\"\n", - "LOCAL_REPO_PATH=cwd[:cwd.find(REPO_PREFIX)].rstrip('/')\n", - "\n", - "if LOCAL_REPO_PATH not in sys.path:\n", - " print(f\"Adding {LOCAL_REPO_PATH} to system path\")\n", - " sys.path.append(LOCAL_REPO_PATH)" - ] - }, - { - "cell_type": "markdown", - "id": "38dd3a7d", - "metadata": {}, - "source": [ - "#### [Optional 2] Import from installed snowflake-ml-python wheel" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "671378ae", - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "conda_env = os.environ['CONDA_DEFAULT_ENV']\n", - "import sys\n", - "sys.path.append(f'/opt/homebrew/anaconda3/envs/{conda_env}/lib/python3.8/site-packages')" - ] - }, - { - "cell_type": "markdown", - "id": "54f5ac3a", - "metadata": {}, - "source": [ - "## Prepare demo data" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "id": "f39a3f77", - "metadata": {}, - "outputs": [], - "source": [ - "import importlib\n", - "from snowflake.snowpark import Session\n", - "from snowflake.snowpark import functions as F, types as T\n", - "from snowflake.ml.feature_store import FeatureStore, FeatureView, Entity, CreationMode\n", - "from snowflake.ml.utils.connection_params import SnowflakeLoginOptions\n", - "from snowflake.snowpark.types import DateType, TimeType, _NumericType, TimestampType\n", - "import datetime\n" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "id": "e665bd41", - "metadata": {}, - "outputs": [], - "source": [ - "session = Session.builder.configs(SnowflakeLoginOptions()).create()" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "id": "75bfcfd1", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "|\"TRIP_DISTANCE\" |\"FARE_AMOUNT\" |\"PASSENGER_COUNT\" |\"PULOCATIONID\" |\"DOLOCATIONID\" |\"PICKUP_TS\" |\"DROPOFF_TS\" |\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "|3.2 |14.0 |1 |48 |262 |2016-01-01 00:12:22 |2016-01-01 00:29:14 |\n", - "|1.0 |9.5 |2 |162 |48 |2016-01-01 00:41:31 |2016-01-01 00:55:10 |\n", - "|0.9 |6.0 |1 |246 |90 |2016-01-01 00:53:37 |2016-01-01 00:59:57 |\n", - "|0.8 |5.0 |1 |170 |162 |2016-01-01 00:13:28 |2016-01-01 00:18:07 |\n", - "|1.8 |11.0 |1 |161 |140 |2016-01-01 00:33:04 |2016-01-01 00:47:14 |\n", - "|2.3 |11.0 |1 |141 |137 |2016-01-01 00:49:47 |2016-01-01 01:04:44 |\n", - "|13.8 |43.0 |1 |100 |53 |2016-01-01 00:41:58 |2016-01-01 01:22:06 |\n", - "|3.46 |20.0 |5 |48 |79 |2016-01-01 00:25:28 |2016-01-01 00:55:46 |\n", - "|0.83 |5.5 |4 |79 |107 |2016-01-01 00:56:57 |2016-01-01 01:02:24 |\n", - "|0.87 |7.0 |1 |164 |164 |2016-01-01 00:10:08 |2016-01-01 00:23:05 |\n", - "-------------------------------------------------------------------------------------------------------------------------------------\n", - "\n" - ] - } - ], - "source": [ - "source_df = session.table(\"SNOWML_FEATURE_STORE_TEST_DB.TEST_DATASET.yellow_tripdata_2016_01\")\n", - "\n", - "source_df = source_df.select(\n", - " [\n", - " \"TRIP_DISTANCE\", \n", - " \"FARE_AMOUNT\",\n", - " \"PASSENGER_COUNT\",\n", - " \"PULOCATIONID\",\n", - " \"DOLOCATIONID\",\n", - " F.cast(source_df.TPEP_PICKUP_DATETIME / 1000000, TimestampType()).alias(\"PICKUP_TS\"),\n", - " F.cast(source_df.TPEP_DROPOFF_DATETIME / 1000000, TimestampType()).alias(\"DROPOFF_TS\"),\n", - " ]).filter(\"DROPOFF_TS >= '2016-01-01 00:00:00' AND DROPOFF_TS < '2016-01-03 00:00:00'\")\n", - "source_df.show()" - ] - }, - { - "cell_type": "markdown", - "id": "52162799", - "metadata": {}, - "source": [ - "## Create FeatureStore Client\n", - "\n", - "Let's first create a feature store client.\n", - "\n", - "We can pass in an existing database name, or a new database will be created upon the feature store initialization." - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "id": "6c37a635", - "metadata": {}, - "outputs": [], - "source": [ - "DEMO_DB = \"FS_TIME_SERIES_EXAMPLE\"\n", - "session.sql(f\"DROP DATABASE IF EXISTS {DEMO_DB}\").collect() # start from scratch\n", - "session.sql(f\"CREATE DATABASE IF NOT EXISTS {DEMO_DB}\").collect()\n", - "session.sql(f\"CREATE OR REPLACE WAREHOUSE PUBLIC WITH WAREHOUSE_SIZE='XSMALL'\").collect()\n", - "\n", - "fs = FeatureStore(\n", - " session=session, \n", - " database=DEMO_DB, \n", - " name=\"AWESOME_FS\", \n", - " default_warehouse=\"PUBLIC\",\n", - " creation_mode=CreationMode.CREATE_IF_NOT_EXIST,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "3d052074", - "metadata": {}, - "source": [ - "## Create and register new Entities" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "id": "70609920", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
NAMEJOIN_KEYSDESC
0TRIP_DROPOFFDOLOCATIONID
1TRIP_PICKUPPULOCATIONID
\n", - "
" - ], - "text/plain": [ - " NAME JOIN_KEYS DESC\n", - "0 TRIP_DROPOFF DOLOCATIONID \n", - "1 TRIP_PICKUP PULOCATIONID " - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "trip_pickup = Entity(name=\"trip_pickup\", join_keys=[\"PULOCATIONID\"])\n", - "trip_dropoff = Entity(name=\"trip_dropoff\", join_keys=[\"DOLOCATIONID\"])\n", - "fs.register_entity(trip_pickup)\n", - "fs.register_entity(trip_dropoff)\n", - "fs.list_entities().to_pandas()" - ] - }, - { - "cell_type": "markdown", - "id": "3f9c4393", - "metadata": {}, - "source": [ - "## Define feature pipeline\n", - "We will compute a few time series features in the pipeline here.\n", - "Before we have *__value based range between__* in SQL, we will use a work around to mimic the calculation (NOTE: the work around won't be very accurate on computing the time series value due to missing gap filling functionality, but it should be enough for a demo purpose)\n", - "\n", - "We will define two feature groups:\n", - "1. pickup features\n", - " - Mean fare amount over the past 2 and 5 hours\n", - "2. dropoff features\n", - " - Count of trips over the past 2 and 5 hours" - ] - }, - { - "cell_type": "markdown", - "id": "71729be3", - "metadata": {}, - "source": [ - "### This is a UDF computing time window end\n", - "We will later turn these into built in functions for feature store" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "id": "995b4bcd", - "metadata": {}, - "outputs": [], - "source": [ - "@F.pandas_udf(\n", - " name=\"vec_window_end\",\n", - " is_permanent=True,\n", - " stage_location=session.get_session_stage(),\n", - " packages=[\"numpy\", \"pandas\", \"pytimeparse\"],\n", - " replace=True,\n", - " session=session,\n", - " immutable=True,\n", - ")\n", - "def vec_window_end_compute(\n", - " x: T.PandasSeries[datetime.datetime],\n", - " interval: T.PandasSeries[str],\n", - ") -> T.PandasSeries[datetime.datetime]:\n", - " import numpy as np\n", - " import pandas as pd\n", - " from pytimeparse.timeparse import timeparse\n", - "\n", - " time_slice = timeparse(interval[0])\n", - " if time_slice is None:\n", - " raise ValueError(f\"Cannot parse interval {interval[0]}\")\n", - " time_slot = (x - np.datetime64('1970-01-01T00:00:00')) // np.timedelta64(1, 's') // time_slice * time_slice + time_slice\n", - " return pd.to_datetime(time_slot, unit='s')" - ] - }, - { - "cell_type": "markdown", - "id": "73742b89", - "metadata": {}, - "source": [ - "### Define feature pipeline logics" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "id": "7d0c4339", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "----------------------------------------------------------------------------------\n", - "|\"PULOCATIONID\" |\"TS\" |\"MEAN_FARE_2_HR\" |\"MEAN_FARE_5_HR\" |\n", - "----------------------------------------------------------------------------------\n", - "|98 |2016-01-01 04:45:00 |26.0 |26.0 |\n", - "|98 |2016-01-01 14:00:00 |19.75 |19.75 |\n", - "|98 |2016-01-02 22:30:00 |156.5 |156.5 |\n", - "|225 |2016-01-01 00:30:00 |9.6 |9.6 |\n", - "|225 |2016-01-01 00:45:00 |11.833333333333334 |11.833333333333334 |\n", - "|225 |2016-01-01 01:00:00 |15.045454545454545 |15.045454545454545 |\n", - "|225 |2016-01-01 01:15:00 |13.928571428571429 |13.928571428571429 |\n", - "|225 |2016-01-01 01:30:00 |12.717948717948717 |12.717948717948717 |\n", - "|225 |2016-01-01 01:45:00 |13.169811320754716 |13.169811320754716 |\n", - "|225 |2016-01-01 02:00:00 |12.607142857142858 |12.607142857142858 |\n", - "----------------------------------------------------------------------------------\n", - "\n", - "--------------------------------------------------------------------------------\n", - "|\"DOLOCATIONID\" |\"TS\" |\"COUNT_TRIP_2_HR\" |\"COUNT_TRIP_5_HR\" |\n", - "--------------------------------------------------------------------------------\n", - "|227 |2016-01-01 00:30:00 |2 |2 |\n", - "|227 |2016-01-01 00:45:00 |5 |5 |\n", - "|227 |2016-01-01 01:00:00 |12 |12 |\n", - "|227 |2016-01-01 01:15:00 |16 |16 |\n", - "|227 |2016-01-01 01:30:00 |21 |21 |\n", - "|227 |2016-01-01 01:45:00 |25 |25 |\n", - "|227 |2016-01-01 02:00:00 |33 |33 |\n", - "|227 |2016-01-01 02:15:00 |43 |43 |\n", - "|227 |2016-01-01 02:30:00 |48 |50 |\n", - "|227 |2016-01-01 02:45:00 |53 |58 |\n", - "--------------------------------------------------------------------------------\n", - "\n" - ] - } - ], - "source": [ - "from snowflake.snowpark import Window\n", - "from snowflake.snowpark.functions import col\n", - "\n", - "# NOTE: these time window calculations are approximates and are not handling time gaps\n", - "\n", - "def pre_aggregate_fn(df, ts_col, group_by_cols):\n", - " df = df.with_column(\"WINDOW_END\", F.call_udf(\"vec_window_end\", F.col(ts_col), \"15m\"))\n", - " df = df.group_by(group_by_cols + [\"WINDOW_END\"]).agg(\n", - " F.sum(\"FARE_AMOUNT\").alias(\"FARE_SUM_1_hr\"),\n", - " F.count(\"*\").alias(\"TRIP_COUNT_1_hr\")\n", - " )\n", - " return df\n", - "\n", - "def pickup_features_fn(df):\n", - " df = pre_aggregate_fn(df, \"PICKUP_TS\", [\"PULOCATIONID\"])\n", - " \n", - " window1 = Window.partition_by(\"PULOCATIONID\").order_by(col(\"WINDOW_END\").desc()).rows_between(Window.CURRENT_ROW, 7)\n", - " window2 = Window.partition_by(\"PULOCATIONID\").order_by(col(\"WINDOW_END\").desc()).rows_between(Window.CURRENT_ROW, 19)\n", - "\n", - " df = df.with_columns(\n", - " [\n", - " \"SUM_FARE_2_hr\",\n", - " \"COUNT_TRIP_2hr\",\n", - " \"SUM_FARE_5_hr\",\n", - " \"COUNT_TRIP_5hr\",\n", - " ],\n", - " [\n", - " F.sum(\"FARE_SUM_1_hr\").over(window1),\n", - " F.sum(\"TRIP_COUNT_1_hr\").over(window1),\n", - " F.sum(\"FARE_SUM_1_hr\").over(window2),\n", - " F.sum(\"TRIP_COUNT_1_hr\").over(window2),\n", - " ]\n", - " ).select(\n", - " [\n", - " col(\"PULOCATIONID\"),\n", - " col(\"WINDOW_END\").alias(\"TS\"),\n", - " (col(\"SUM_FARE_2_hr\") / col(\"COUNT_TRIP_2hr\")).alias(\"MEAN_FARE_2_hr\"),\n", - " (col(\"SUM_FARE_5_hr\") / col(\"COUNT_TRIP_5hr\")).alias(\"MEAN_FARE_5_hr\"),\n", - " ]\n", - " )\n", - " return df\n", - "\n", - "def dropoff_features_fn(df):\n", - " df = pre_aggregate_fn(df, \"DROPOFF_TS\", [\"DOLOCATIONID\"])\n", - " window1 = Window.partition_by(\"DOLOCATIONID\").order_by(col(\"WINDOW_END\").desc()).rows_between(Window.CURRENT_ROW, 7)\n", - " window2 = Window.partition_by(\"DOLOCATIONID\").order_by(col(\"WINDOW_END\").desc()).rows_between(Window.CURRENT_ROW, 19)\n", - "\n", - " df = df.select(\n", - " [\n", - " col(\"DOLOCATIONID\"),\n", - " col(\"WINDOW_END\").alias(\"TS\"),\n", - " F.sum(\"TRIP_COUNT_1_hr\").over(window1).alias(\"COUNT_TRIP_2_hr\"),\n", - " F.sum(\"TRIP_COUNT_1_hr\").over(window2).alias(\"COUNT_TRIP_5_hr\"),\n", - " ]\n", - " )\n", - " return df\n", - "\n", - "pickup_df = pickup_features_fn(source_df)\n", - "pickup_df.show()\n", - "\n", - "dropoff_df = dropoff_features_fn(source_df)\n", - "dropoff_df.show()" - ] - }, - { - "cell_type": "markdown", - "id": "bd46fa4f", - "metadata": {}, - "source": [ - "## Create FeatureViews and materialize\n", - "\n", - "Once the FeatureView construction is done, we can materialize the FeatureView to the Snowflake backend and incremental maintenance will start." - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "id": "f0cd2075", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/tbao/Desktop/Snowflake/snowml/snowflake/ml/feature_store/feature_store.py:334: UserWarning: Your pipeline won't be incrementally refreshed due to: \"Query contains the function 'VEC_WINDOW_END', but change tracking is not supported on queries with non-IMMUTABLE user-defined functions.\". It will likely incurr higher cost.\n", - " self._create_dynamic_table(\n" - ] - } - ], - "source": [ - "pickup_fv = FeatureView(\n", - " name=\"trip_pickup_features\", \n", - " entities=[trip_pickup], \n", - " feature_df=pickup_df, \n", - " timestamp_col=\"ts\",\n", - " refresh_freq=\"1 minute\",\n", - ").attach_feature_desc({\"MEAN_FARE_2_HR\": \"avg fare over past 2hr\"})\n", - "pickup_fv = fs.register_feature_view(feature_view=pickup_fv, version=\"v1\", block=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "id": "d8960b0e", - "metadata": {}, - "outputs": [], - "source": [ - "dropoff_fv = FeatureView(\n", - " name=\"trip_dropoff_features\", \n", - " entities=[trip_dropoff], \n", - " feature_df=dropoff_df, \n", - " timestamp_col=\"ts\",\n", - " refresh_freq=\"1 minute\",\n", - ").attach_feature_desc({\"COUNT_TRIP_2_HR\": \"trip count over past 2hr\"})\n", - "dropoff_fv = fs.register_feature_view(feature_view=dropoff_fv, version=\"v1\", block=True)" - ] - }, - { - "cell_type": "markdown", - "id": "02009c81", - "metadata": {}, - "source": [ - "## Explore FeatureViews\n", - "We can easily discover what are the materialized FeatureViews and the corresponding features with *__fs.list_feature_views()__*. \n", - "\n", - "We can also apply filters based on Entity name or FeatureView names." - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "id": "bc93de79", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "---------------------------------------------------------------------------------------------------------------------\n", - "|\"NAME\" |\"VERSION\" |\"ENTITIES\" |\"FEATURE_DESC\" |\n", - "---------------------------------------------------------------------------------------------------------------------\n", - "|TRIP_DROPOFF_FEATURES |V1 |[ |{ |\n", - "| | | { | \"COUNT_TRIP_2_HR\": \"trip count over past 2hr\", |\n", - "| | | \"desc\": \"\", | \"COUNT_TRIP_5_HR\": \"\" |\n", - "| | | \"join_keys\": [ |} |\n", - "| | | \"DOLOCATIONID\" | |\n", - "| | | ], | |\n", - "| | | \"name\": \"TRIP_DROPOFF\" | |\n", - "| | | } | |\n", - "| | |] | |\n", - "|TRIP_PICKUP_FEATURES |V1 |[ |{ |\n", - "| | | { | \"MEAN_FARE_2_HR\": \"\", |\n", - "| | | \"desc\": \"\", | \"MEAN_FARE_5_HR\": \"\" |\n", - "| | | \"join_keys\": [ |} |\n", - "| | | \"PULOCATIONID\" | |\n", - "| | | ], | |\n", - "| | | \"name\": \"TRIP_PICKUP\" | |\n", - "| | | } | |\n", - "| | |] | |\n", - "---------------------------------------------------------------------------------------------------------------------\n", - "\n" - ] - } - ], - "source": [ - "fs.list_feature_views().select([\"NAME\", \"VERSION\", \"ENTITIES\", \"FEATURE_DESC\"]).show()" - ] - }, - { - "cell_type": "markdown", - "id": "9302cf23", - "metadata": {}, - "source": [ - "## Generate training data and train a model\n", - "The training data generation will lookup __point-in-time correct__ feature values and join with the spine dataframe." - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "id": "a4e3376c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "-----------------------------------------------------------------------------------------------------------------------------------------------------------\n", - "|\"DOLOCATIONID\" |\"PICKUP_TS\" |\"PULOCATIONID\" |\"FARE_AMOUNT\" |\"MEAN_FARE_2_HR\" |\"MEAN_FARE_5_HR\" |\"COUNT_TRIP_2_HR\" |\"COUNT_TRIP_5_HR\" |\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------------------\n", - "|262 |2016-01-01 00:12:22 |48 |14.0 |NULL |NULL |NULL |NULL |\n", - "|48 |2016-01-01 00:41:31 |162 |9.5 |11.451428571428572 |11.451428571428572 |137 |137 |\n", - "|90 |2016-01-01 00:53:37 |246 |6.0 |13.765232974910393 |13.765232974910393 |214 |214 |\n", - "|162 |2016-01-01 00:13:28 |170 |5.0 |NULL |NULL |NULL |NULL |\n", - "|140 |2016-01-01 00:33:04 |161 |11.0 |13.203869047619047 |13.203869047619047 |83 |83 |\n", - "|137 |2016-01-01 00:49:47 |141 |11.0 |10.352534562211982 |10.352534562211982 |244 |244 |\n", - "|53 |2016-01-01 00:41:58 |100 |43.0 |15.816091954022989 |15.816091954022989 |NULL |NULL |\n", - "|79 |2016-01-01 00:25:28 |48 |20.0 |15.685714285714285 |15.685714285714285 |43 |43 |\n", - "|79 |2016-01-01 00:25:28 |48 |20.0 |15.685714285714285 |15.685714285714285 |43 |43 |\n", - "|79 |2016-01-01 00:25:28 |48 |20.0 |15.685714285714285 |15.685714285714285 |43 |43 |\n", - "-----------------------------------------------------------------------------------------------------------------------------------------------------------\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "{'queries': ['SELECT * FROM FS_TIME_SERIES_EXAMPLE.AWESOME_FS.yellow_tripdata_2016_01_training_data_2023_12_12_14_10_32'],\n", - " 'post_actions': []}" - ] - }, - "execution_count": 44, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "spine_df = source_df.select([\"PULOCATIONID\", \"DOLOCATIONID\", \"PICKUP_TS\", \"FARE_AMOUNT\"])\n", - "training_data = fs.generate_dataset(\n", - " spine_df=spine_df,\n", - " features=[pickup_fv, dropoff_fv],\n", - " materialized_table=\"yellow_tripdata_2016_01_training_data\",\n", - " spine_timestamp_col=\"PICKUP_TS\",\n", - " spine_label_cols = [\"FARE_AMOUNT\"]\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 45, - "id": "6bced5e5", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
DOLOCATIONIDPULOCATIONIDMEAN_FARE_2_HRMEAN_FARE_5_HRCOUNT_TRIP_2_HRCOUNT_TRIP_5_HR
359595902498.8069859.258179404.0995.0
3605621707910.24211110.517555821.02251.0
681540501079.4160969.226157394.0956.0
9510794815110.30851110.2789401289.03380.0
477164792499.5620169.5541241124.02827.0
\n", - "
" - ], - "text/plain": [ - " DOLOCATIONID PULOCATIONID MEAN_FARE_2_HR MEAN_FARE_5_HR \\\n", - "359595 90 249 8.806985 9.258179 \n", - "360562 170 79 10.242111 10.517555 \n", - "681540 50 107 9.416096 9.226157 \n", - "951079 48 151 10.308511 10.278940 \n", - "477164 79 249 9.562016 9.554124 \n", - "\n", - " COUNT_TRIP_2_HR COUNT_TRIP_5_HR \n", - "359595 404.0 995.0 \n", - "360562 821.0 2251.0 \n", - "681540 394.0 956.0 \n", - "951079 1289.0 3380.0 \n", - "477164 1124.0 2827.0 " - ] - }, - "execution_count": 45, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import numpy as np\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.linear_model import LinearRegression\n", - "\n", - "training_pd = training_data.df.to_pandas()\n", - "X = training_pd.drop([\"FARE_AMOUNT\", \"PICKUP_TS\"], axis=1)\n", - "y = training_pd[\"FARE_AMOUNT\"]\n", - "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n", - "X_train.head()" - ] - }, - { - "cell_type": "code", - "execution_count": 46, - "id": "8f0e6902", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "31.1498254012058 %\n", - "Mean squared error: 91.42\n" - ] - } - ], - "source": [ - "from sklearn.impute import SimpleImputer\n", - "from sklearn.pipeline import make_pipeline\n", - "from sklearn.metrics import mean_squared_error\n", - "\n", - "imp = SimpleImputer(missing_values=np.nan, strategy='mean')\n", - "estimator = make_pipeline(imp, LinearRegression())\n", - "\n", - "reg = estimator.fit(X, y)\n", - "r2_score = reg.score(X_test, y_test)\n", - "print(r2_score * 100,'%')\n", - "\n", - "y_pred = reg.predict(X_test)\n", - "print(\"Mean squared error: %.2f\" % mean_squared_error(y_test, y_pred))" - ] - }, - { - "cell_type": "markdown", - "id": "0142c25c", - "metadata": {}, - "source": [ - "## Log model with Model Registry\n", - "We can log the model along with its training dataset metadata with model registry." - ] - }, - { - "cell_type": "code", - "execution_count": 47, - "id": "c57a81e2", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:absl:The database \"my_cool_registry\" already exists. Skipping creation.\n", - "WARNING:absl:The schema \"my_cool_registry\"._SYSTEM_MODEL_REGISTRY_SCHEMA already exists. Skipping creation.\n" - ] - } - ], - "source": [ - "from snowflake.ml.registry import model_registry, artifact\n", - "import time\n", - "\n", - "registry = model_registry.ModelRegistry(session=session, database_name=\"my_cool_registry\", create_if_not_exists=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 48, - "id": "4caab287", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:snowflake.snowpark:ModelRegistry.log_artifact() is in private preview since 1.0.10. Do not use it in production. \n" - ] - } - ], - "source": [ - "DATASET_NAME = \"MY_DATASET\"\n", - "DATASET_VERSION = f\"V1_{time.time()}\"\n", - "\n", - "my_dataset = registry.log_artifact(\n", - " artifact=training_data,\n", - " name=DATASET_NAME,\n", - " version=DATASET_VERSION,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 50, - "id": "a935926a", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/opt/homebrew/anaconda3/envs/feature_store_demo/lib/python3.8/site-packages/snowflake/ml/model/model_signature.py:55: UserWarning: The sample input has 959457 rows, thus a truncation happened before inferring signature. This might cause inaccurate signature inference. If that happens, consider specifying signature manually.\n", - " warnings.warn(\n" - ] - } - ], - "source": [ - "model_name = \"MY_MODEL\"\n", - "model_version = f\"V1_{time.time()}\"\n", - "\n", - "model_ref = registry.log_model(\n", - " model_name=model_name,\n", - " model_version=model_version,\n", - " model=estimator,\n", - " artifacts=[my_dataset],\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "e6e0581d", - "metadata": {}, - "source": [ - "## Restore model and predict with latest features\n", - "We retrieve the training dataset from registry then construct dataframe of latest feature values. Then we restore the model from registry. At last, we can predict with latest feature values." - ] - }, - { - "cell_type": "code", - "execution_count": 51, - "id": "0a18a5ea", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING:snowflake.snowpark:FeatureStore.retrieve_feature_values() is in private preview since 1.0.8. Do not use it in production. \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ 9.71003863 13.95909809 13.95909809 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268 11.70994268\n", - " 9.7267722 10.61725586 10.61725586 12.77915638 12.77915638 12.77915638\n", - " 15.89874567 13.24272348 13.24272348 13.24272348 13.24272348]\n" - ] - } - ], - "source": [ - "pred_df = training_data.df.sample(0.01).select(\n", - " ['PULOCATIONID', 'DOLOCATIONID', 'PICKUP_TS'])\n", - "\n", - "enriched_df = fs.retrieve_feature_values(\n", - " spine_df=pred_df, \n", - " features=training_data.load_features(), \n", - " spine_timestamp_col='PICKUP_TS'\n", - ").drop(['PICKUP_TS']).to_pandas()\n", - "\n", - "pred = estimator.predict(enriched_df)\n", - "print(pred)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3bd545ee", - "metadata": {}, - "outputs": [], - "source": [ - "model_ref = model_registry.ModelReference(\n", - " registry=registry, \n", - " model_name=model_name, \n", - " model_version=model_version,\n", - ").load_model()\n", - "\n", - "pred = model_ref.predict(enriched_df)\n", - "\n", - "print(pred)" - ] - }, - { - "cell_type": "markdown", - "id": "06ad5af0", - "metadata": {}, - "source": [ - "## DO NOT READ\n", - "Below is a simple test for the window_end function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d45ba589", - "metadata": {}, - "outputs": [], - "source": [ - "from snowflake.snowpark import Session\n", - "from snowflake.ml.utils.connection_params import SnowflakeLoginOptions\n", - "from snowflake.snowpark import functions as F, types as T\n", - "import datetime\n", - "\n", - "session = Session.builder.configs(SnowflakeLoginOptions()).create()\n", - "\n", - "udf_name = \"window_end\"\n", - " \n", - "@F.pandas_udf(\n", - " name=udf_name,\n", - " replace=True,\n", - " packages=[\"numpy\", \"pandas\", \"pytimeparse\"],\n", - " session=session,\n", - ")\n", - "def vec_window_end_compute(\n", - " x: T.PandasSeries[datetime.datetime],\n", - " interval: T.PandasSeries[str],\n", - ") -> T.PandasSeries[datetime.datetime]:\n", - " import numpy as np\n", - " import pandas as pd\n", - " from pytimeparse.timeparse import timeparse\n", - "\n", - " time_slice = timeparse(interval[0])\n", - " if time_slice is None:\n", - " raise ValueError(f\"Cannot parse interval {interval[0]}\")\n", - " time_slot = (x - np.datetime64('1970-01-01T00:00:00')) // np.timedelta64(1, 's') // time_slice * time_slice + time_slice\n", - " return pd.to_datetime(time_slot, unit='s')\n", - "\n", - "df = session.create_dataframe(\n", - " [\n", - " '2023-01-31 01:02:03.004',\n", - " '2023-01-31 01:14:59.999',\n", - " '2023-01-31 01:15:00.000',\n", - " '2023-01-31 01:15:00.004',\n", - " '2023-01-31 01:17:10.007',\n", - " ], \n", - " schema=['a']\n", - ")\n", - "df = df.select([F.to_timestamp(\"a\").alias(\"ts\")])\n", - "\n", - "df = df.select([\"TS\", F.call_udf(udf_name, F.col(\"TS\"), \"15m\").alias(\"window_end\")])\n", - "df.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "67a5a484", - "metadata": {}, - "outputs": [], - "source": [ - "session.sql(\"select window_end(ts, '15m') from foobar\").collect()" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python [conda env:feature_store_demo]", - "language": "python", - "name": "conda-env-feature_store_demo-py" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.18" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/snowflake/ml/feature_store/notebooks/internal_demo/winequality-red.csv b/snowflake/ml/feature_store/notebooks/internal_demo/winequality-red.csv deleted file mode 100644 index 9bb4e3cd..00000000 --- a/snowflake/ml/feature_store/notebooks/internal_demo/winequality-red.csv +++ /dev/null @@ -1,1600 +0,0 @@ -"fixed acidity";"volatile acidity";"citric acid";"residual sugar";"chlorides";"free sulfur dioxide";"total sulfur dioxide";"density";"pH";"sulphates";"alcohol";"quality" -7.4;0.7;0;1.9;0.076;11;34;0.9978;3.51;0.56;9.4;5 -7.8;0.88;0;2.6;0.098;25;67;0.9968;3.2;0.68;9.8;5 -7.8;0.76;0.04;2.3;0.092;15;54;0.997;3.26;0.65;9.8;5 -11.2;0.28;0.56;1.9;0.075;17;60;0.998;3.16;0.58;9.8;6 -7.4;0.7;0;1.9;0.076;11;34;0.9978;3.51;0.56;9.4;5 -7.4;0.66;0;1.8;0.075;13;40;0.9978;3.51;0.56;9.4;5 -7.9;0.6;0.06;1.6;0.069;15;59;0.9964;3.3;0.46;9.4;5 -7.3;0.65;0;1.2;0.065;15;21;0.9946;3.39;0.47;10;7 -7.8;0.58;0.02;2;0.073;9;18;0.9968;3.36;0.57;9.5;7 -7.5;0.5;0.36;6.1;0.071;17;102;0.9978;3.35;0.8;10.5;5 -6.7;0.58;0.08;1.8;0.097;15;65;0.9959;3.28;0.54;9.2;5 -7.5;0.5;0.36;6.1;0.071;17;102;0.9978;3.35;0.8;10.5;5 -5.6;0.615;0;1.6;0.089;16;59;0.9943;3.58;0.52;9.9;5 -7.8;0.61;0.29;1.6;0.114;9;29;0.9974;3.26;1.56;9.1;5 -8.9;0.62;0.18;3.8;0.176;52;145;0.9986;3.16;0.88;9.2;5 -8.9;0.62;0.19;3.9;0.17;51;148;0.9986;3.17;0.93;9.2;5 -8.5;0.28;0.56;1.8;0.092;35;103;0.9969;3.3;0.75;10.5;7 -8.1;0.56;0.28;1.7;0.368;16;56;0.9968;3.11;1.28;9.3;5 -7.4;0.59;0.08;4.4;0.086;6;29;0.9974;3.38;0.5;9;4 -7.9;0.32;0.51;1.8;0.341;17;56;0.9969;3.04;1.08;9.2;6 -8.9;0.22;0.48;1.8;0.077;29;60;0.9968;3.39;0.53;9.4;6 -7.6;0.39;0.31;2.3;0.082;23;71;0.9982;3.52;0.65;9.7;5 -7.9;0.43;0.21;1.6;0.106;10;37;0.9966;3.17;0.91;9.5;5 -8.5;0.49;0.11;2.3;0.084;9;67;0.9968;3.17;0.53;9.4;5 -6.9;0.4;0.14;2.4;0.085;21;40;0.9968;3.43;0.63;9.7;6 -6.3;0.39;0.16;1.4;0.08;11;23;0.9955;3.34;0.56;9.3;5 -7.6;0.41;0.24;1.8;0.08;4;11;0.9962;3.28;0.59;9.5;5 -7.9;0.43;0.21;1.6;0.106;10;37;0.9966;3.17;0.91;9.5;5 -7.1;0.71;0;1.9;0.08;14;35;0.9972;3.47;0.55;9.4;5 -7.8;0.645;0;2;0.082;8;16;0.9964;3.38;0.59;9.8;6 -6.7;0.675;0.07;2.4;0.089;17;82;0.9958;3.35;0.54;10.1;5 -6.9;0.685;0;2.5;0.105;22;37;0.9966;3.46;0.57;10.6;6 -8.3;0.655;0.12;2.3;0.083;15;113;0.9966;3.17;0.66;9.8;5 -6.9;0.605;0.12;10.7;0.073;40;83;0.9993;3.45;0.52;9.4;6 -5.2;0.32;0.25;1.8;0.103;13;50;0.9957;3.38;0.55;9.2;5 -7.8;0.645;0;5.5;0.086;5;18;0.9986;3.4;0.55;9.6;6 -7.8;0.6;0.14;2.4;0.086;3;15;0.9975;3.42;0.6;10.8;6 -8.1;0.38;0.28;2.1;0.066;13;30;0.9968;3.23;0.73;9.7;7 -5.7;1.13;0.09;1.5;0.172;7;19;0.994;3.5;0.48;9.8;4 -7.3;0.45;0.36;5.9;0.074;12;87;0.9978;3.33;0.83;10.5;5 -7.3;0.45;0.36;5.9;0.074;12;87;0.9978;3.33;0.83;10.5;5 -8.8;0.61;0.3;2.8;0.088;17;46;0.9976;3.26;0.51;9.3;4 -7.5;0.49;0.2;2.6;0.332;8;14;0.9968;3.21;0.9;10.5;6 -8.1;0.66;0.22;2.2;0.069;9;23;0.9968;3.3;1.2;10.3;5 -6.8;0.67;0.02;1.8;0.05;5;11;0.9962;3.48;0.52;9.5;5 -4.6;0.52;0.15;2.1;0.054;8;65;0.9934;3.9;0.56;13.1;4 -7.7;0.935;0.43;2.2;0.114;22;114;0.997;3.25;0.73;9.2;5 -8.7;0.29;0.52;1.6;0.113;12;37;0.9969;3.25;0.58;9.5;5 -6.4;0.4;0.23;1.6;0.066;5;12;0.9958;3.34;0.56;9.2;5 -5.6;0.31;0.37;1.4;0.074;12;96;0.9954;3.32;0.58;9.2;5 -8.8;0.66;0.26;1.7;0.074;4;23;0.9971;3.15;0.74;9.2;5 -6.6;0.52;0.04;2.2;0.069;8;15;0.9956;3.4;0.63;9.4;6 -6.6;0.5;0.04;2.1;0.068;6;14;0.9955;3.39;0.64;9.4;6 -8.6;0.38;0.36;3;0.081;30;119;0.997;3.2;0.56;9.4;5 -7.6;0.51;0.15;2.8;0.11;33;73;0.9955;3.17;0.63;10.2;6 -7.7;0.62;0.04;3.8;0.084;25;45;0.9978;3.34;0.53;9.5;5 -10.2;0.42;0.57;3.4;0.07;4;10;0.9971;3.04;0.63;9.6;5 -7.5;0.63;0.12;5.1;0.111;50;110;0.9983;3.26;0.77;9.4;5 -7.8;0.59;0.18;2.3;0.076;17;54;0.9975;3.43;0.59;10;5 -7.3;0.39;0.31;2.4;0.074;9;46;0.9962;3.41;0.54;9.4;6 -8.8;0.4;0.4;2.2;0.079;19;52;0.998;3.44;0.64;9.2;5 -7.7;0.69;0.49;1.8;0.115;20;112;0.9968;3.21;0.71;9.3;5 -7.5;0.52;0.16;1.9;0.085;12;35;0.9968;3.38;0.62;9.5;7 -7;0.735;0.05;2;0.081;13;54;0.9966;3.39;0.57;9.8;5 -7.2;0.725;0.05;4.65;0.086;4;11;0.9962;3.41;0.39;10.9;5 -7.2;0.725;0.05;4.65;0.086;4;11;0.9962;3.41;0.39;10.9;5 -7.5;0.52;0.11;1.5;0.079;11;39;0.9968;3.42;0.58;9.6;5 -6.6;0.705;0.07;1.6;0.076;6;15;0.9962;3.44;0.58;10.7;5 -9.3;0.32;0.57;2;0.074;27;65;0.9969;3.28;0.79;10.7;5 -8;0.705;0.05;1.9;0.074;8;19;0.9962;3.34;0.95;10.5;6 -7.7;0.63;0.08;1.9;0.076;15;27;0.9967;3.32;0.54;9.5;6 -7.7;0.67;0.23;2.1;0.088;17;96;0.9962;3.32;0.48;9.5;5 -7.7;0.69;0.22;1.9;0.084;18;94;0.9961;3.31;0.48;9.5;5 -8.3;0.675;0.26;2.1;0.084;11;43;0.9976;3.31;0.53;9.2;4 -9.7;0.32;0.54;2.5;0.094;28;83;0.9984;3.28;0.82;9.6;5 -8.8;0.41;0.64;2.2;0.093;9;42;0.9986;3.54;0.66;10.5;5 -8.8;0.41;0.64;2.2;0.093;9;42;0.9986;3.54;0.66;10.5;5 -6.8;0.785;0;2.4;0.104;14;30;0.9966;3.52;0.55;10.7;6 -6.7;0.75;0.12;2;0.086;12;80;0.9958;3.38;0.52;10.1;5 -8.3;0.625;0.2;1.5;0.08;27;119;0.9972;3.16;1.12;9.1;4 -6.2;0.45;0.2;1.6;0.069;3;15;0.9958;3.41;0.56;9.2;5 -7.8;0.43;0.7;1.9;0.464;22;67;0.9974;3.13;1.28;9.4;5 -7.4;0.5;0.47;2;0.086;21;73;0.997;3.36;0.57;9.1;5 -7.3;0.67;0.26;1.8;0.401;16;51;0.9969;3.16;1.14;9.4;5 -6.3;0.3;0.48;1.8;0.069;18;61;0.9959;3.44;0.78;10.3;6 -6.9;0.55;0.15;2.2;0.076;19;40;0.9961;3.41;0.59;10.1;5 -8.6;0.49;0.28;1.9;0.11;20;136;0.9972;2.93;1.95;9.9;6 -7.7;0.49;0.26;1.9;0.062;9;31;0.9966;3.39;0.64;9.6;5 -9.3;0.39;0.44;2.1;0.107;34;125;0.9978;3.14;1.22;9.5;5 -7;0.62;0.08;1.8;0.076;8;24;0.9978;3.48;0.53;9;5 -7.9;0.52;0.26;1.9;0.079;42;140;0.9964;3.23;0.54;9.5;5 -8.6;0.49;0.28;1.9;0.11;20;136;0.9972;2.93;1.95;9.9;6 -8.6;0.49;0.29;2;0.11;19;133;0.9972;2.93;1.98;9.8;5 -7.7;0.49;0.26;1.9;0.062;9;31;0.9966;3.39;0.64;9.6;5 -5;1.02;0.04;1.4;0.045;41;85;0.9938;3.75;0.48;10.5;4 -4.7;0.6;0.17;2.3;0.058;17;106;0.9932;3.85;0.6;12.9;6 -6.8;0.775;0;3;0.102;8;23;0.9965;3.45;0.56;10.7;5 -7;0.5;0.25;2;0.07;3;22;0.9963;3.25;0.63;9.2;5 -7.6;0.9;0.06;2.5;0.079;5;10;0.9967;3.39;0.56;9.8;5 -8.1;0.545;0.18;1.9;0.08;13;35;0.9972;3.3;0.59;9;6 -8.3;0.61;0.3;2.1;0.084;11;50;0.9972;3.4;0.61;10.2;6 -7.8;0.5;0.3;1.9;0.075;8;22;0.9959;3.31;0.56;10.4;6 -8.1;0.545;0.18;1.9;0.08;13;35;0.9972;3.3;0.59;9;6 -8.1;0.575;0.22;2.1;0.077;12;65;0.9967;3.29;0.51;9.2;5 -7.2;0.49;0.24;2.2;0.07;5;36;0.996;3.33;0.48;9.4;5 -8.1;0.575;0.22;2.1;0.077;12;65;0.9967;3.29;0.51;9.2;5 -7.8;0.41;0.68;1.7;0.467;18;69;0.9973;3.08;1.31;9.3;5 -6.2;0.63;0.31;1.7;0.088;15;64;0.9969;3.46;0.79;9.3;5 -8;0.33;0.53;2.5;0.091;18;80;0.9976;3.37;0.8;9.6;6 -8.1;0.785;0.52;2;0.122;37;153;0.9969;3.21;0.69;9.3;5 -7.8;0.56;0.19;1.8;0.104;12;47;0.9964;3.19;0.93;9.5;5 -8.4;0.62;0.09;2.2;0.084;11;108;0.9964;3.15;0.66;9.8;5 -8.4;0.6;0.1;2.2;0.085;14;111;0.9964;3.15;0.66;9.8;5 -10.1;0.31;0.44;2.3;0.08;22;46;0.9988;3.32;0.67;9.7;6 -7.8;0.56;0.19;1.8;0.104;12;47;0.9964;3.19;0.93;9.5;5 -9.4;0.4;0.31;2.2;0.09;13;62;0.9966;3.07;0.63;10.5;6 -8.3;0.54;0.28;1.9;0.077;11;40;0.9978;3.39;0.61;10;6 -7.8;0.56;0.12;2;0.082;7;28;0.997;3.37;0.5;9.4;6 -8.8;0.55;0.04;2.2;0.119;14;56;0.9962;3.21;0.6;10.9;6 -7;0.69;0.08;1.8;0.097;22;89;0.9959;3.34;0.54;9.2;6 -7.3;1.07;0.09;1.7;0.178;10;89;0.9962;3.3;0.57;9;5 -8.8;0.55;0.04;2.2;0.119;14;56;0.9962;3.21;0.6;10.9;6 -7.3;0.695;0;2.5;0.075;3;13;0.998;3.49;0.52;9.2;5 -8;0.71;0;2.6;0.08;11;34;0.9976;3.44;0.53;9.5;5 -7.8;0.5;0.17;1.6;0.082;21;102;0.996;3.39;0.48;9.5;5 -9;0.62;0.04;1.9;0.146;27;90;0.9984;3.16;0.7;9.4;5 -8.2;1.33;0;1.7;0.081;3;12;0.9964;3.53;0.49;10.9;5 -8.1;1.33;0;1.8;0.082;3;12;0.9964;3.54;0.48;10.9;5 -8;0.59;0.16;1.8;0.065;3;16;0.9962;3.42;0.92;10.5;7 -6.1;0.38;0.15;1.8;0.072;6;19;0.9955;3.42;0.57;9.4;5 -8;0.745;0.56;2;0.118;30;134;0.9968;3.24;0.66;9.4;5 -5.6;0.5;0.09;2.3;0.049;17;99;0.9937;3.63;0.63;13;5 -5.6;0.5;0.09;2.3;0.049;17;99;0.9937;3.63;0.63;13;5 -6.6;0.5;0.01;1.5;0.06;17;26;0.9952;3.4;0.58;9.8;6 -7.9;1.04;0.05;2.2;0.084;13;29;0.9959;3.22;0.55;9.9;6 -8.4;0.745;0.11;1.9;0.09;16;63;0.9965;3.19;0.82;9.6;5 -8.3;0.715;0.15;1.8;0.089;10;52;0.9968;3.23;0.77;9.5;5 -7.2;0.415;0.36;2;0.081;13;45;0.9972;3.48;0.64;9.2;5 -7.8;0.56;0.19;2.1;0.081;15;105;0.9962;3.33;0.54;9.5;5 -7.8;0.56;0.19;2;0.081;17;108;0.9962;3.32;0.54;9.5;5 -8.4;0.745;0.11;1.9;0.09;16;63;0.9965;3.19;0.82;9.6;5 -8.3;0.715;0.15;1.8;0.089;10;52;0.9968;3.23;0.77;9.5;5 -5.2;0.34;0;1.8;0.05;27;63;0.9916;3.68;0.79;14;6 -6.3;0.39;0.08;1.7;0.066;3;20;0.9954;3.34;0.58;9.4;5 -5.2;0.34;0;1.8;0.05;27;63;0.9916;3.68;0.79;14;6 -8.1;0.67;0.55;1.8;0.117;32;141;0.9968;3.17;0.62;9.4;5 -5.8;0.68;0.02;1.8;0.087;21;94;0.9944;3.54;0.52;10;5 -7.6;0.49;0.26;1.6;0.236;10;88;0.9968;3.11;0.8;9.3;5 -6.9;0.49;0.1;2.3;0.074;12;30;0.9959;3.42;0.58;10.2;6 -8.2;0.4;0.44;2.8;0.089;11;43;0.9975;3.53;0.61;10.5;6 -7.3;0.33;0.47;2.1;0.077;5;11;0.9958;3.33;0.53;10.3;6 -9.2;0.52;1;3.4;0.61;32;69;0.9996;2.74;2.0;9.4;4 -7.5;0.6;0.03;1.8;0.095;25;99;0.995;3.35;0.54;10.1;5 -7.5;0.6;0.03;1.8;0.095;25;99;0.995;3.35;0.54;10.1;5 -7.1;0.43;0.42;5.5;0.07;29;129;0.9973;3.42;0.72;10.5;5 -7.1;0.43;0.42;5.5;0.071;28;128;0.9973;3.42;0.71;10.5;5 -7.1;0.43;0.42;5.5;0.07;29;129;0.9973;3.42;0.72;10.5;5 -7.1;0.43;0.42;5.5;0.071;28;128;0.9973;3.42;0.71;10.5;5 -7.1;0.68;0;2.2;0.073;12;22;0.9969;3.48;0.5;9.3;5 -6.8;0.6;0.18;1.9;0.079;18;86;0.9968;3.59;0.57;9.3;6 -7.6;0.95;0.03;2;0.09;7;20;0.9959;3.2;0.56;9.6;5 -7.6;0.68;0.02;1.3;0.072;9;20;0.9965;3.17;1.08;9.2;4 -7.8;0.53;0.04;1.7;0.076;17;31;0.9964;3.33;0.56;10;6 -7.4;0.6;0.26;7.3;0.07;36;121;0.9982;3.37;0.49;9.4;5 -7.3;0.59;0.26;7.2;0.07;35;121;0.9981;3.37;0.49;9.4;5 -7.8;0.63;0.48;1.7;0.1;14;96;0.9961;3.19;0.62;9.5;5 -6.8;0.64;0.1;2.1;0.085;18;101;0.9956;3.34;0.52;10.2;5 -7.3;0.55;0.03;1.6;0.072;17;42;0.9956;3.37;0.48;9;4 -6.8;0.63;0.07;2.1;0.089;11;44;0.9953;3.47;0.55;10.4;6 -7.5;0.705;0.24;1.8;0.36;15;63;0.9964;3;1.59;9.5;5 -7.9;0.885;0.03;1.8;0.058;4;8;0.9972;3.36;0.33;9.1;4 -8;0.42;0.17;2;0.073;6;18;0.9972;3.29;0.61;9.2;6 -8;0.42;0.17;2;0.073;6;18;0.9972;3.29;0.61;9.2;6 -7.4;0.62;0.05;1.9;0.068;24;42;0.9961;3.42;0.57;11.5;6 -7.3;0.38;0.21;2;0.08;7;35;0.9961;3.33;0.47;9.5;5 -6.9;0.5;0.04;1.5;0.085;19;49;0.9958;3.35;0.78;9.5;5 -7.3;0.38;0.21;2;0.08;7;35;0.9961;3.33;0.47;9.5;5 -7.5;0.52;0.42;2.3;0.087;8;38;0.9972;3.58;0.61;10.5;6 -7;0.805;0;2.5;0.068;7;20;0.9969;3.48;0.56;9.6;5 -8.8;0.61;0.14;2.4;0.067;10;42;0.9969;3.19;0.59;9.5;5 -8.8;0.61;0.14;2.4;0.067;10;42;0.9969;3.19;0.59;9.5;5 -8.9;0.61;0.49;2;0.27;23;110;0.9972;3.12;1.02;9.3;5 -7.2;0.73;0.02;2.5;0.076;16;42;0.9972;3.44;0.52;9.3;5 -6.8;0.61;0.2;1.8;0.077;11;65;0.9971;3.54;0.58;9.3;5 -6.7;0.62;0.21;1.9;0.079;8;62;0.997;3.52;0.58;9.3;6 -8.9;0.31;0.57;2;0.111;26;85;0.9971;3.26;0.53;9.7;5 -7.4;0.39;0.48;2;0.082;14;67;0.9972;3.34;0.55;9.2;5 -7.7;0.705;0.1;2.6;0.084;9;26;0.9976;3.39;0.49;9.7;5 -7.9;0.5;0.33;2;0.084;15;143;0.9968;3.2;0.55;9.5;5 -7.9;0.49;0.32;1.9;0.082;17;144;0.9968;3.2;0.55;9.5;5 -8.2;0.5;0.35;2.9;0.077;21;127;0.9976;3.23;0.62;9.4;5 -6.4;0.37;0.25;1.9;0.074;21;49;0.9974;3.57;0.62;9.8;6 -6.8;0.63;0.12;3.8;0.099;16;126;0.9969;3.28;0.61;9.5;5 -7.6;0.55;0.21;2.2;0.071;7;28;0.9964;3.28;0.55;9.7;5 -7.6;0.55;0.21;2.2;0.071;7;28;0.9964;3.28;0.55;9.7;5 -7.8;0.59;0.33;2;0.074;24;120;0.9968;3.25;0.54;9.4;5 -7.3;0.58;0.3;2.4;0.074;15;55;0.9968;3.46;0.59;10.2;5 -11.5;0.3;0.6;2;0.067;12;27;0.9981;3.11;0.97;10.1;6 -5.4;0.835;0.08;1.2;0.046;13;93;0.9924;3.57;0.85;13;7 -6.9;1.09;0.06;2.1;0.061;12;31;0.9948;3.51;0.43;11.4;4 -9.6;0.32;0.47;1.4;0.056;9;24;0.99695;3.22;0.82;10.3;7 -8.8;0.37;0.48;2.1;0.097;39;145;0.9975;3.04;1.03;9.3;5 -6.8;0.5;0.11;1.5;0.075;16;49;0.99545;3.36;0.79;9.5;5 -7;0.42;0.35;1.6;0.088;16;39;0.9961;3.34;0.55;9.2;5 -7;0.43;0.36;1.6;0.089;14;37;0.99615;3.34;0.56;9.2;6 -12.8;0.3;0.74;2.6;0.095;9;28;0.9994;3.2;0.77;10.8;7 -12.8;0.3;0.74;2.6;0.095;9;28;0.9994;3.2;0.77;10.8;7 -7.8;0.57;0.31;1.8;0.069;26;120;0.99625;3.29;0.53;9.3;5 -7.8;0.44;0.28;2.7;0.1;18;95;0.9966;3.22;0.67;9.4;5 -11;0.3;0.58;2.1;0.054;7;19;0.998;3.31;0.88;10.5;7 -9.7;0.53;0.6;2;0.039;5;19;0.99585;3.3;0.86;12.4;6 -8;0.725;0.24;2.8;0.083;10;62;0.99685;3.35;0.56;10;6 -11.6;0.44;0.64;2.1;0.059;5;15;0.998;3.21;0.67;10.2;6 -8.2;0.57;0.26;2.2;0.06;28;65;0.9959;3.3;0.43;10.1;5 -7.8;0.735;0.08;2.4;0.092;10;41;0.9974;3.24;0.71;9.8;6 -7;0.49;0.49;5.6;0.06;26;121;0.9974;3.34;0.76;10.5;5 -8.7;0.625;0.16;2;0.101;13;49;0.9962;3.14;0.57;11;5 -8.1;0.725;0.22;2.2;0.072;11;41;0.9967;3.36;0.55;9.1;5 -7.5;0.49;0.19;1.9;0.076;10;44;0.9957;3.39;0.54;9.7;5 -7.8;0.53;0.33;2.4;0.08;24;144;0.99655;3.3;0.6;9.5;5 -7.8;0.34;0.37;2;0.082;24;58;0.9964;3.34;0.59;9.4;6 -7.4;0.53;0.26;2;0.101;16;72;0.9957;3.15;0.57;9.4;5 -6.8;0.61;0.04;1.5;0.057;5;10;0.99525;3.42;0.6;9.5;5 -8.6;0.645;0.25;2;0.083;8;28;0.99815;3.28;0.6;10;6 -8.4;0.635;0.36;2;0.089;15;55;0.99745;3.31;0.57;10.4;4 -7.7;0.43;0.25;2.6;0.073;29;63;0.99615;3.37;0.58;10.5;6 -8.9;0.59;0.5;2;0.337;27;81;0.9964;3.04;1.61;9.5;6 -9;0.82;0.14;2.6;0.089;9;23;0.9984;3.39;0.63;9.8;5 -7.7;0.43;0.25;2.6;0.073;29;63;0.99615;3.37;0.58;10.5;6 -6.9;0.52;0.25;2.6;0.081;10;37;0.99685;3.46;0.5;11;5 -5.2;0.48;0.04;1.6;0.054;19;106;0.9927;3.54;0.62;12.2;7 -8;0.38;0.06;1.8;0.078;12;49;0.99625;3.37;0.52;9.9;6 -8.5;0.37;0.2;2.8;0.09;18;58;0.998;3.34;0.7;9.6;6 -6.9;0.52;0.25;2.6;0.081;10;37;0.99685;3.46;0.5;11;5 -8.2;1;0.09;2.3;0.065;7;37;0.99685;3.32;0.55;9;6 -7.2;0.63;0;1.9;0.097;14;38;0.99675;3.37;0.58;9;6 -7.2;0.63;0;1.9;0.097;14;38;0.99675;3.37;0.58;9;6 -7.2;0.645;0;1.9;0.097;15;39;0.99675;3.37;0.58;9.2;6 -7.2;0.63;0;1.9;0.097;14;38;0.99675;3.37;0.58;9;6 -8.2;1;0.09;2.3;0.065;7;37;0.99685;3.32;0.55;9;6 -8.9;0.635;0.37;1.7;0.263;5;62;0.9971;3;1.09;9.3;5 -12;0.38;0.56;2.1;0.093;6;24;0.99925;3.14;0.71;10.9;6 -7.7;0.58;0.1;1.8;0.102;28;109;0.99565;3.08;0.49;9.8;6 -15;0.21;0.44;2.2;0.075;10;24;1.00005;3.07;0.84;9.2;7 -15;0.21;0.44;2.2;0.075;10;24;1.00005;3.07;0.84;9.2;7 -7.3;0.66;0;2;0.084;6;23;0.9983;3.61;0.96;9.9;6 -7.1;0.68;0.07;1.9;0.075;16;51;0.99685;3.38;0.52;9.5;5 -8.2;0.6;0.17;2.3;0.072;11;73;0.9963;3.2;0.45;9.3;5 -7.7;0.53;0.06;1.7;0.074;9;39;0.99615;3.35;0.48;9.8;6 -7.3;0.66;0;2;0.084;6;23;0.9983;3.61;0.96;9.9;6 -10.8;0.32;0.44;1.6;0.063;16;37;0.9985;3.22;0.78;10;6 -7.1;0.6;0;1.8;0.074;16;34;0.9972;3.47;0.7;9.9;6 -11.1;0.35;0.48;3.1;0.09;5;21;0.9986;3.17;0.53;10.5;5 -7.7;0.775;0.42;1.9;0.092;8;86;0.9959;3.23;0.59;9.5;5 -7.1;0.6;0;1.8;0.074;16;34;0.9972;3.47;0.7;9.9;6 -8;0.57;0.23;3.2;0.073;17;119;0.99675;3.26;0.57;9.3;5 -9.4;0.34;0.37;2.2;0.075;5;13;0.998;3.22;0.62;9.2;5 -6.6;0.695;0;2.1;0.075;12;56;0.9968;3.49;0.67;9.2;5 -7.7;0.41;0.76;1.8;0.611;8;45;0.9968;3.06;1.26;9.4;5 -10;0.31;0.47;2.6;0.085;14;33;0.99965;3.36;0.8;10.5;7 -7.9;0.33;0.23;1.7;0.077;18;45;0.99625;3.29;0.65;9.3;5 -7;0.975;0.04;2;0.087;12;67;0.99565;3.35;0.6;9.4;4 -8;0.52;0.03;1.7;0.07;10;35;0.99575;3.34;0.57;10;5 -7.9;0.37;0.23;1.8;0.077;23;49;0.9963;3.28;0.67;9.3;5 -12.5;0.56;0.49;2.4;0.064;5;27;0.9999;3.08;0.87;10.9;5 -11.8;0.26;0.52;1.8;0.071;6;10;0.9968;3.2;0.72;10.2;7 -8.1;0.87;0;3.3;0.096;26;61;1.00025;3.6;0.72;9.8;4 -7.9;0.35;0.46;3.6;0.078;15;37;0.9973;3.35;0.86;12.8;8 -6.9;0.54;0.04;3;0.077;7;27;0.9987;3.69;0.91;9.4;6 -11.5;0.18;0.51;4;0.104;4;23;0.9996;3.28;0.97;10.1;6 -7.9;0.545;0.06;4;0.087;27;61;0.9965;3.36;0.67;10.7;6 -11.5;0.18;0.51;4;0.104;4;23;0.9996;3.28;0.97;10.1;6 -10.9;0.37;0.58;4;0.071;17;65;0.99935;3.22;0.78;10.1;5 -8.4;0.715;0.2;2.4;0.076;10;38;0.99735;3.31;0.64;9.4;5 -7.5;0.65;0.18;7;0.088;27;94;0.99915;3.38;0.77;9.4;5 -7.9;0.545;0.06;4;0.087;27;61;0.9965;3.36;0.67;10.7;6 -6.9;0.54;0.04;3;0.077;7;27;0.9987;3.69;0.91;9.4;6 -11.5;0.18;0.51;4;0.104;4;23;0.9996;3.28;0.97;10.1;6 -10.3;0.32;0.45;6.4;0.073;5;13;0.9976;3.23;0.82;12.6;8 -8.9;0.4;0.32;5.6;0.087;10;47;0.9991;3.38;0.77;10.5;7 -11.4;0.26;0.44;3.6;0.071;6;19;0.9986;3.12;0.82;9.3;6 -7.7;0.27;0.68;3.5;0.358;5;10;0.9972;3.25;1.08;9.9;7 -7.6;0.52;0.12;3;0.067;12;53;0.9971;3.36;0.57;9.1;5 -8.9;0.4;0.32;5.6;0.087;10;47;0.9991;3.38;0.77;10.5;7 -9.9;0.59;0.07;3.4;0.102;32;71;1.00015;3.31;0.71;9.8;5 -9.9;0.59;0.07;3.4;0.102;32;71;1.00015;3.31;0.71;9.8;5 -12;0.45;0.55;2;0.073;25;49;0.9997;3.1;0.76;10.3;6 -7.5;0.4;0.12;3;0.092;29;53;0.9967;3.37;0.7;10.3;6 -8.7;0.52;0.09;2.5;0.091;20;49;0.9976;3.34;0.86;10.6;7 -11.6;0.42;0.53;3.3;0.105;33;98;1.001;3.2;0.95;9.2;5 -8.7;0.52;0.09;2.5;0.091;20;49;0.9976;3.34;0.86;10.6;7 -11;0.2;0.48;2;0.343;6;18;0.9979;3.3;0.71;10.5;5 -10.4;0.55;0.23;2.7;0.091;18;48;0.9994;3.22;0.64;10.3;6 -6.9;0.36;0.25;2.4;0.098;5;16;0.9964;3.41;0.6;10.1;6 -13.3;0.34;0.52;3.2;0.094;17;53;1.0014;3.05;0.81;9.5;6 -10.8;0.5;0.46;2.5;0.073;5;27;1.0001;3.05;0.64;9.5;5 -10.6;0.83;0.37;2.6;0.086;26;70;0.9981;3.16;0.52;9.9;5 -7.1;0.63;0.06;2;0.083;8;29;0.99855;3.67;0.73;9.6;5 -7.2;0.65;0.02;2.3;0.094;5;31;0.9993;3.67;0.8;9.7;5 -6.9;0.67;0.06;2.1;0.08;8;33;0.99845;3.68;0.71;9.6;5 -7.5;0.53;0.06;2.6;0.086;20;44;0.9965;3.38;0.59;10.7;6 -11.1;0.18;0.48;1.5;0.068;7;15;0.9973;3.22;0.64;10.1;6 -8.3;0.705;0.12;2.6;0.092;12;28;0.9994;3.51;0.72;10;5 -7.4;0.67;0.12;1.6;0.186;5;21;0.996;3.39;0.54;9.5;5 -8.4;0.65;0.6;2.1;0.112;12;90;0.9973;3.2;0.52;9.2;5 -10.3;0.53;0.48;2.5;0.063;6;25;0.9998;3.12;0.59;9.3;6 -7.6;0.62;0.32;2.2;0.082;7;54;0.9966;3.36;0.52;9.4;5 -10.3;0.41;0.42;2.4;0.213;6;14;0.9994;3.19;0.62;9.5;6 -10.3;0.43;0.44;2.4;0.214;5;12;0.9994;3.19;0.63;9.5;6 -7.4;0.29;0.38;1.7;0.062;9;30;0.9968;3.41;0.53;9.5;6 -10.3;0.53;0.48;2.5;0.063;6;25;0.9998;3.12;0.59;9.3;6 -7.9;0.53;0.24;2;0.072;15;105;0.996;3.27;0.54;9.4;6 -9;0.46;0.31;2.8;0.093;19;98;0.99815;3.32;0.63;9.5;6 -8.6;0.47;0.3;3;0.076;30;135;0.9976;3.3;0.53;9.4;5 -7.4;0.36;0.29;2.6;0.087;26;72;0.99645;3.39;0.68;11;5 -7.1;0.35;0.29;2.5;0.096;20;53;0.9962;3.42;0.65;11;6 -9.6;0.56;0.23;3.4;0.102;37;92;0.9996;3.3;0.65;10.1;5 -9.6;0.77;0.12;2.9;0.082;30;74;0.99865;3.3;0.64;10.4;6 -9.8;0.66;0.39;3.2;0.083;21;59;0.9989;3.37;0.71;11.5;7 -9.6;0.77;0.12;2.9;0.082;30;74;0.99865;3.3;0.64;10.4;6 -9.8;0.66;0.39;3.2;0.083;21;59;0.9989;3.37;0.71;11.5;7 -9.3;0.61;0.26;3.4;0.09;25;87;0.99975;3.24;0.62;9.7;5 -7.8;0.62;0.05;2.3;0.079;6;18;0.99735;3.29;0.63;9.3;5 -10.3;0.59;0.42;2.8;0.09;35;73;0.999;3.28;0.7;9.5;6 -10;0.49;0.2;11;0.071;13;50;1.0015;3.16;0.69;9.2;6 -10;0.49;0.2;11;0.071;13;50;1.0015;3.16;0.69;9.2;6 -11.6;0.53;0.66;3.65;0.121;6;14;0.9978;3.05;0.74;11.5;7 -10.3;0.44;0.5;4.5;0.107;5;13;0.998;3.28;0.83;11.5;5 -13.4;0.27;0.62;2.6;0.082;6;21;1.0002;3.16;0.67;9.7;6 -10.7;0.46;0.39;2;0.061;7;15;0.9981;3.18;0.62;9.5;5 -10.2;0.36;0.64;2.9;0.122;10;41;0.998;3.23;0.66;12.5;6 -10.2;0.36;0.64;2.9;0.122;10;41;0.998;3.23;0.66;12.5;6 -8;0.58;0.28;3.2;0.066;21;114;0.9973;3.22;0.54;9.4;6 -8.4;0.56;0.08;2.1;0.105;16;44;0.9958;3.13;0.52;11;5 -7.9;0.65;0.01;2.5;0.078;17;38;0.9963;3.34;0.74;11.7;7 -11.9;0.695;0.53;3.4;0.128;7;21;0.9992;3.17;0.84;12.2;7 -8.9;0.43;0.45;1.9;0.052;6;16;0.9948;3.35;0.7;12.5;6 -7.8;0.43;0.32;2.8;0.08;29;58;0.9974;3.31;0.64;10.3;5 -12.4;0.49;0.58;3;0.103;28;99;1.0008;3.16;1;11.5;6 -12.5;0.28;0.54;2.3;0.082;12;29;0.9997;3.11;1.36;9.8;7 -12.2;0.34;0.5;2.4;0.066;10;21;1;3.12;1.18;9.2;6 -10.6;0.42;0.48;2.7;0.065;5;18;0.9972;3.21;0.87;11.3;6 -10.9;0.39;0.47;1.8;0.118;6;14;0.9982;3.3;0.75;9.8;6 -10.9;0.39;0.47;1.8;0.118;6;14;0.9982;3.3;0.75;9.8;6 -11.9;0.57;0.5;2.6;0.082;6;32;1.0006;3.12;0.78;10.7;6 -7;0.685;0;1.9;0.067;40;63;0.9979;3.6;0.81;9.9;5 -6.6;0.815;0.02;2.7;0.072;17;34;0.9955;3.58;0.89;12.3;7 -13.8;0.49;0.67;3;0.093;6;15;0.9986;3.02;0.93;12;6 -9.6;0.56;0.31;2.8;0.089;15;46;0.9979;3.11;0.92;10;6 -9.1;0.785;0;2.6;0.093;11;28;0.9994;3.36;0.86;9.4;6 -10.7;0.67;0.22;2.7;0.107;17;34;1.0004;3.28;0.98;9.9;6 -9.1;0.795;0;2.6;0.096;11;26;0.9994;3.35;0.83;9.4;6 -7.7;0.665;0;2.4;0.09;8;19;0.9974;3.27;0.73;9.3;5 -13.5;0.53;0.79;4.8;0.12;23;77;1.0018;3.18;0.77;13;5 -6.1;0.21;0.4;1.4;0.066;40.5;165;0.9912;3.25;0.59;11.9;6 -6.7;0.75;0.01;2.4;0.078;17;32;0.9955;3.55;0.61;12.8;6 -11.5;0.41;0.52;3;0.08;29;55;1.0001;3.26;0.88;11;5 -10.5;0.42;0.66;2.95;0.116;12;29;0.997;3.24;0.75;11.7;7 -11.9;0.43;0.66;3.1;0.109;10;23;1;3.15;0.85;10.4;7 -12.6;0.38;0.66;2.6;0.088;10;41;1.001;3.17;0.68;9.8;6 -8.2;0.7;0.23;2;0.099;14;81;0.9973;3.19;0.7;9.4;5 -8.6;0.45;0.31;2.6;0.086;21;50;0.9982;3.37;0.91;9.9;6 -11.9;0.58;0.66;2.5;0.072;6;37;0.9992;3.05;0.56;10;5 -12.5;0.46;0.63;2;0.071;6;15;0.9988;2.99;0.87;10.2;5 -12.8;0.615;0.66;5.8;0.083;7;42;1.0022;3.07;0.73;10;7 -10;0.42;0.5;3.4;0.107;7;21;0.9979;3.26;0.93;11.8;6 -12.8;0.615;0.66;5.8;0.083;7;42;1.0022;3.07;0.73;10;7 -10.4;0.575;0.61;2.6;0.076;11;24;1;3.16;0.69;9;5 -10.3;0.34;0.52;2.8;0.159;15;75;0.9998;3.18;0.64;9.4;5 -9.4;0.27;0.53;2.4;0.074;6;18;0.9962;3.2;1.13;12;7 -6.9;0.765;0.02;2.3;0.063;35;63;0.9975;3.57;0.78;9.9;5 -7.9;0.24;0.4;1.6;0.056;11;25;0.9967;3.32;0.87;8.7;6 -9.1;0.28;0.48;1.8;0.067;26;46;0.9967;3.32;1.04;10.6;6 -7.4;0.55;0.22;2.2;0.106;12;72;0.9959;3.05;0.63;9.2;5 -14;0.41;0.63;3.8;0.089;6;47;1.0014;3.01;0.81;10.8;6 -11.5;0.54;0.71;4.4;0.124;6;15;0.9984;3.01;0.83;11.8;7 -11.5;0.45;0.5;3;0.078;19;47;1.0003;3.26;1.11;11;6 -9.4;0.27;0.53;2.4;0.074;6;18;0.9962;3.2;1.13;12;7 -11.4;0.625;0.66;6.2;0.088;6;24;0.9988;3.11;0.99;13.3;6 -8.3;0.42;0.38;2.5;0.094;24;60;0.9979;3.31;0.7;10.8;6 -8.3;0.26;0.42;2;0.08;11;27;0.9974;3.21;0.8;9.4;6 -13.7;0.415;0.68;2.9;0.085;17;43;1.0014;3.06;0.8;10;6 -8.3;0.26;0.42;2;0.08;11;27;0.9974;3.21;0.8;9.4;6 -8.3;0.26;0.42;2;0.08;11;27;0.9974;3.21;0.8;9.4;6 -7.7;0.51;0.28;2.1;0.087;23;54;0.998;3.42;0.74;9.2;5 -7.4;0.63;0.07;2.4;0.09;11;37;0.9979;3.43;0.76;9.7;6 -7.8;0.54;0.26;2;0.088;23;48;0.9981;3.41;0.74;9.2;6 -8.3;0.66;0.15;1.9;0.079;17;42;0.9972;3.31;0.54;9.6;6 -7.8;0.46;0.26;1.9;0.088;23;53;0.9981;3.43;0.74;9.2;6 -9.6;0.38;0.31;2.5;0.096;16;49;0.9982;3.19;0.7;10;7 -5.6;0.85;0.05;1.4;0.045;12;88;0.9924;3.56;0.82;12.9;8 -13.7;0.415;0.68;2.9;0.085;17;43;1.0014;3.06;0.8;10;6 -9.5;0.37;0.52;2;0.082;6;26;0.998;3.18;0.51;9.5;5 -8.4;0.665;0.61;2;0.112;13;95;0.997;3.16;0.54;9.1;5 -12.7;0.6;0.65;2.3;0.063;6;25;0.9997;3.03;0.57;9.9;5 -12;0.37;0.76;4.2;0.066;7;38;1.0004;3.22;0.6;13;7 -6.6;0.735;0.02;7.9;0.122;68;124;0.9994;3.47;0.53;9.9;5 -11.5;0.59;0.59;2.6;0.087;13;49;0.9988;3.18;0.65;11;6 -11.5;0.59;0.59;2.6;0.087;13;49;0.9988;3.18;0.65;11;6 -8.7;0.765;0.22;2.3;0.064;9;42;0.9963;3.1;0.55;9.4;5 -6.6;0.735;0.02;7.9;0.122;68;124;0.9994;3.47;0.53;9.9;5 -7.7;0.26;0.3;1.7;0.059;20;38;0.9949;3.29;0.47;10.8;6 -12.2;0.48;0.54;2.6;0.085;19;64;1;3.1;0.61;10.5;6 -11.4;0.6;0.49;2.7;0.085;10;41;0.9994;3.15;0.63;10.5;6 -7.7;0.69;0.05;2.7;0.075;15;27;0.9974;3.26;0.61;9.1;5 -8.7;0.31;0.46;1.4;0.059;11;25;0.9966;3.36;0.76;10.1;6 -9.8;0.44;0.47;2.5;0.063;9;28;0.9981;3.24;0.65;10.8;6 -12;0.39;0.66;3;0.093;12;30;0.9996;3.18;0.63;10.8;7 -10.4;0.34;0.58;3.7;0.174;6;16;0.997;3.19;0.7;11.3;6 -12.5;0.46;0.49;4.5;0.07;26;49;0.9981;3.05;0.57;9.6;4 -9;0.43;0.34;2.5;0.08;26;86;0.9987;3.38;0.62;9.5;6 -9.1;0.45;0.35;2.4;0.08;23;78;0.9987;3.38;0.62;9.5;5 -7.1;0.735;0.16;1.9;0.1;15;77;0.9966;3.27;0.64;9.3;5 -9.9;0.4;0.53;6.7;0.097;6;19;0.9986;3.27;0.82;11.7;7 -8.8;0.52;0.34;2.7;0.087;24;122;0.9982;3.26;0.61;9.5;5 -8.6;0.725;0.24;6.6;0.117;31;134;1.0014;3.32;1.07;9.3;5 -10.6;0.48;0.64;2.2;0.111;6;20;0.997;3.26;0.66;11.7;6 -7;0.58;0.12;1.9;0.091;34;124;0.9956;3.44;0.48;10.5;5 -11.9;0.38;0.51;2;0.121;7;20;0.9996;3.24;0.76;10.4;6 -6.8;0.77;0;1.8;0.066;34;52;0.9976;3.62;0.68;9.9;5 -9.5;0.56;0.33;2.4;0.089;35;67;0.9972;3.28;0.73;11.8;7 -6.6;0.84;0.03;2.3;0.059;32;48;0.9952;3.52;0.56;12.3;7 -7.7;0.96;0.2;2;0.047;15;60;0.9955;3.36;0.44;10.9;5 -10.5;0.24;0.47;2.1;0.066;6;24;0.9978;3.15;0.9;11;7 -7.7;0.96;0.2;2;0.047;15;60;0.9955;3.36;0.44;10.9;5 -6.6;0.84;0.03;2.3;0.059;32;48;0.9952;3.52;0.56;12.3;7 -6.4;0.67;0.08;2.1;0.045;19;48;0.9949;3.49;0.49;11.4;6 -9.5;0.78;0.22;1.9;0.077;6;32;0.9988;3.26;0.56;10.6;6 -9.1;0.52;0.33;1.3;0.07;9;30;0.9978;3.24;0.6;9.3;5 -12.8;0.84;0.63;2.4;0.088;13;35;0.9997;3.1;0.6;10.4;6 -10.5;0.24;0.47;2.1;0.066;6;24;0.9978;3.15;0.9;11;7 -7.8;0.55;0.35;2.2;0.074;21;66;0.9974;3.25;0.56;9.2;5 -11.9;0.37;0.69;2.3;0.078;12;24;0.9958;3;0.65;12.8;6 -12.3;0.39;0.63;2.3;0.091;6;18;1.0004;3.16;0.49;9.5;5 -10.4;0.41;0.55;3.2;0.076;22;54;0.9996;3.15;0.89;9.9;6 -12.3;0.39;0.63;2.3;0.091;6;18;1.0004;3.16;0.49;9.5;5 -8;0.67;0.3;2;0.06;38;62;0.9958;3.26;0.56;10.2;6 -11.1;0.45;0.73;3.2;0.066;6;22;0.9986;3.17;0.66;11.2;6 -10.4;0.41;0.55;3.2;0.076;22;54;0.9996;3.15;0.89;9.9;6 -7;0.62;0.18;1.5;0.062;7;50;0.9951;3.08;0.6;9.3;5 -12.6;0.31;0.72;2.2;0.072;6;29;0.9987;2.88;0.82;9.8;8 -11.9;0.4;0.65;2.15;0.068;7;27;0.9988;3.06;0.68;11.3;6 -15.6;0.685;0.76;3.7;0.1;6;43;1.0032;2.95;0.68;11.2;7 -10;0.44;0.49;2.7;0.077;11;19;0.9963;3.23;0.63;11.6;7 -5.3;0.57;0.01;1.7;0.054;5;27;0.9934;3.57;0.84;12.5;7 -9.5;0.735;0.1;2.1;0.079;6;31;0.9986;3.23;0.56;10.1;6 -12.5;0.38;0.6;2.6;0.081;31;72;0.9996;3.1;0.73;10.5;5 -9.3;0.48;0.29;2.1;0.127;6;16;0.9968;3.22;0.72;11.2;5 -8.6;0.53;0.22;2;0.1;7;27;0.9967;3.2;0.56;10.2;6 -11.9;0.39;0.69;2.8;0.095;17;35;0.9994;3.1;0.61;10.8;6 -11.9;0.39;0.69;2.8;0.095;17;35;0.9994;3.1;0.61;10.8;6 -8.4;0.37;0.53;1.8;0.413;9;26;0.9979;3.06;1.06;9.1;6 -6.8;0.56;0.03;1.7;0.084;18;35;0.9968;3.44;0.63;10;6 -10.4;0.33;0.63;2.8;0.084;5;22;0.9998;3.26;0.74;11.2;7 -7;0.23;0.4;1.6;0.063;21;67;0.9952;3.5;0.63;11.1;5 -11.3;0.62;0.67;5.2;0.086;6;19;0.9988;3.22;0.69;13.4;8 -8.9;0.59;0.39;2.3;0.095;5;22;0.9986;3.37;0.58;10.3;5 -9.2;0.63;0.21;2.7;0.097;29;65;0.9988;3.28;0.58;9.6;5 -10.4;0.33;0.63;2.8;0.084;5;22;0.9998;3.26;0.74;11.2;7 -11.6;0.58;0.66;2.2;0.074;10;47;1.0008;3.25;0.57;9;3 -9.2;0.43;0.52;2.3;0.083;14;23;0.9976;3.35;0.61;11.3;6 -8.3;0.615;0.22;2.6;0.087;6;19;0.9982;3.26;0.61;9.3;5 -11;0.26;0.68;2.55;0.085;10;25;0.997;3.18;0.61;11.8;5 -8.1;0.66;0.7;2.2;0.098;25;129;0.9972;3.08;0.53;9;5 -11.5;0.315;0.54;2.1;0.084;5;15;0.9987;2.98;0.7;9.2;6 -10;0.29;0.4;2.9;0.098;10;26;1.0006;3.48;0.91;9.7;5 -10.3;0.5;0.42;2;0.069;21;51;0.9982;3.16;0.72;11.5;6 -8.8;0.46;0.45;2.6;0.065;7;18;0.9947;3.32;0.79;14;6 -11.4;0.36;0.69;2.1;0.09;6;21;1;3.17;0.62;9.2;6 -8.7;0.82;0.02;1.2;0.07;36;48;0.9952;3.2;0.58;9.8;5 -13;0.32;0.65;2.6;0.093;15;47;0.9996;3.05;0.61;10.6;5 -9.6;0.54;0.42;2.4;0.081;25;52;0.997;3.2;0.71;11.4;6 -12.5;0.37;0.55;2.6;0.083;25;68;0.9995;3.15;0.82;10.4;6 -9.9;0.35;0.55;2.1;0.062;5;14;0.9971;3.26;0.79;10.6;5 -10.5;0.28;0.51;1.7;0.08;10;24;0.9982;3.2;0.89;9.4;6 -9.6;0.68;0.24;2.2;0.087;5;28;0.9988;3.14;0.6;10.2;5 -9.3;0.27;0.41;2;0.091;6;16;0.998;3.28;0.7;9.7;5 -10.4;0.24;0.49;1.8;0.075;6;20;0.9977;3.18;1.06;11;6 -9.6;0.68;0.24;2.2;0.087;5;28;0.9988;3.14;0.6;10.2;5 -9.4;0.685;0.11;2.7;0.077;6;31;0.9984;3.19;0.7;10.1;6 -10.6;0.28;0.39;15.5;0.069;6;23;1.0026;3.12;0.66;9.2;5 -9.4;0.3;0.56;2.8;0.08;6;17;0.9964;3.15;0.92;11.7;8 -10.6;0.36;0.59;2.2;0.152;6;18;0.9986;3.04;1.05;9.4;5 -10.6;0.36;0.6;2.2;0.152;7;18;0.9986;3.04;1.06;9.4;5 -10.6;0.44;0.68;4.1;0.114;6;24;0.997;3.06;0.66;13.4;6 -10.2;0.67;0.39;1.9;0.054;6;17;0.9976;3.17;0.47;10;5 -10.2;0.67;0.39;1.9;0.054;6;17;0.9976;3.17;0.47;10;5 -10.2;0.645;0.36;1.8;0.053;5;14;0.9982;3.17;0.42;10;6 -11.6;0.32;0.55;2.8;0.081;35;67;1.0002;3.32;0.92;10.8;7 -9.3;0.39;0.4;2.6;0.073;10;26;0.9984;3.34;0.75;10.2;6 -9.3;0.775;0.27;2.8;0.078;24;56;0.9984;3.31;0.67;10.6;6 -9.2;0.41;0.5;2.5;0.055;12;25;0.9952;3.34;0.79;13.3;7 -8.9;0.4;0.51;2.6;0.052;13;27;0.995;3.32;0.9;13.4;7 -8.7;0.69;0.31;3;0.086;23;81;1.0002;3.48;0.74;11.6;6 -6.5;0.39;0.23;8.3;0.051;28;91;0.9952;3.44;0.55;12.1;6 -10.7;0.35;0.53;2.6;0.07;5;16;0.9972;3.15;0.65;11;8 -7.8;0.52;0.25;1.9;0.081;14;38;0.9984;3.43;0.65;9;6 -7.2;0.34;0.32;2.5;0.09;43;113;0.9966;3.32;0.79;11.1;5 -10.7;0.35;0.53;2.6;0.07;5;16;0.9972;3.15;0.65;11;8 -8.7;0.69;0.31;3;0.086;23;81;1.0002;3.48;0.74;11.6;6 -7.8;0.52;0.25;1.9;0.081;14;38;0.9984;3.43;0.65;9;6 -10.4;0.44;0.73;6.55;0.074;38;76;0.999;3.17;0.85;12;7 -10.4;0.44;0.73;6.55;0.074;38;76;0.999;3.17;0.85;12;7 -10.5;0.26;0.47;1.9;0.078;6;24;0.9976;3.18;1.04;10.9;7 -10.5;0.24;0.42;1.8;0.077;6;22;0.9976;3.21;1.05;10.8;7 -10.2;0.49;0.63;2.9;0.072;10;26;0.9968;3.16;0.78;12.5;7 -10.4;0.24;0.46;1.8;0.075;6;21;0.9976;3.25;1.02;10.8;7 -11.2;0.67;0.55;2.3;0.084;6;13;1;3.17;0.71;9.5;6 -10;0.59;0.31;2.2;0.09;26;62;0.9994;3.18;0.63;10.2;6 -13.3;0.29;0.75;2.8;0.084;23;43;0.9986;3.04;0.68;11.4;7 -12.4;0.42;0.49;4.6;0.073;19;43;0.9978;3.02;0.61;9.5;5 -10;0.59;0.31;2.2;0.09;26;62;0.9994;3.18;0.63;10.2;6 -10.7;0.4;0.48;2.1;0.125;15;49;0.998;3.03;0.81;9.7;6 -10.5;0.51;0.64;2.4;0.107;6;15;0.9973;3.09;0.66;11.8;7 -10.5;0.51;0.64;2.4;0.107;6;15;0.9973;3.09;0.66;11.8;7 -8.5;0.655;0.49;6.1;0.122;34;151;1.001;3.31;1.14;9.3;5 -12.5;0.6;0.49;4.3;0.1;5;14;1.001;3.25;0.74;11.9;6 -10.4;0.61;0.49;2.1;0.2;5;16;0.9994;3.16;0.63;8.4;3 -10.9;0.21;0.49;2.8;0.088;11;32;0.9972;3.22;0.68;11.7;6 -7.3;0.365;0.49;2.5;0.088;39;106;0.9966;3.36;0.78;11;5 -9.8;0.25;0.49;2.7;0.088;15;33;0.9982;3.42;0.9;10;6 -7.6;0.41;0.49;2;0.088;16;43;0.998;3.48;0.64;9.1;5 -8.2;0.39;0.49;2.3;0.099;47;133;0.9979;3.38;0.99;9.8;5 -9.3;0.4;0.49;2.5;0.085;38;142;0.9978;3.22;0.55;9.4;5 -9.2;0.43;0.49;2.4;0.086;23;116;0.9976;3.23;0.64;9.5;5 -10.4;0.64;0.24;2.8;0.105;29;53;0.9998;3.24;0.67;9.9;5 -7.3;0.365;0.49;2.5;0.088;39;106;0.9966;3.36;0.78;11;5 -7;0.38;0.49;2.5;0.097;33;85;0.9962;3.39;0.77;11.4;6 -8.2;0.42;0.49;2.6;0.084;32;55;0.9988;3.34;0.75;8.7;6 -9.9;0.63;0.24;2.4;0.077;6;33;0.9974;3.09;0.57;9.4;5 -9.1;0.22;0.24;2.1;0.078;1;28;0.999;3.41;0.87;10.3;6 -11.9;0.38;0.49;2.7;0.098;12;42;1.0004;3.16;0.61;10.3;5 -11.9;0.38;0.49;2.7;0.098;12;42;1.0004;3.16;0.61;10.3;5 -10.3;0.27;0.24;2.1;0.072;15;33;0.9956;3.22;0.66;12.8;6 -10;0.48;0.24;2.7;0.102;13;32;1;3.28;0.56;10;6 -9.1;0.22;0.24;2.1;0.078;1;28;0.999;3.41;0.87;10.3;6 -9.9;0.63;0.24;2.4;0.077;6;33;0.9974;3.09;0.57;9.4;5 -8.1;0.825;0.24;2.1;0.084;5;13;0.9972;3.37;0.77;10.7;6 -12.9;0.35;0.49;5.8;0.066;5;35;1.0014;3.2;0.66;12;7 -11.2;0.5;0.74;5.15;0.1;5;17;0.9996;3.22;0.62;11.2;5 -9.2;0.59;0.24;3.3;0.101;20;47;0.9988;3.26;0.67;9.6;5 -9.5;0.46;0.49;6.3;0.064;5;17;0.9988;3.21;0.73;11;6 -9.3;0.715;0.24;2.1;0.07;5;20;0.9966;3.12;0.59;9.9;5 -11.2;0.66;0.24;2.5;0.085;16;53;0.9993;3.06;0.72;11;6 -14.3;0.31;0.74;1.8;0.075;6;15;1.0008;2.86;0.79;8.4;6 -9.1;0.47;0.49;2.6;0.094;38;106;0.9982;3.08;0.59;9.1;5 -7.5;0.55;0.24;2;0.078;10;28;0.9983;3.45;0.78;9.5;6 -10.6;0.31;0.49;2.5;0.067;6;21;0.9987;3.26;0.86;10.7;6 -12.4;0.35;0.49;2.6;0.079;27;69;0.9994;3.12;0.75;10.4;6 -9;0.53;0.49;1.9;0.171;6;25;0.9975;3.27;0.61;9.4;6 -6.8;0.51;0.01;2.1;0.074;9;25;0.9958;3.33;0.56;9.5;6 -9.4;0.43;0.24;2.8;0.092;14;45;0.998;3.19;0.73;10;6 -9.5;0.46;0.24;2.7;0.092;14;44;0.998;3.12;0.74;10;6 -5;1.04;0.24;1.6;0.05;32;96;0.9934;3.74;0.62;11.5;5 -15.5;0.645;0.49;4.2;0.095;10;23;1.00315;2.92;0.74;11.1;5 -15.5;0.645;0.49;4.2;0.095;10;23;1.00315;2.92;0.74;11.1;5 -10.9;0.53;0.49;4.6;0.118;10;17;1.0002;3.07;0.56;11.7;6 -15.6;0.645;0.49;4.2;0.095;10;23;1.00315;2.92;0.74;11.1;5 -10.9;0.53;0.49;4.6;0.118;10;17;1.0002;3.07;0.56;11.7;6 -13;0.47;0.49;4.3;0.085;6;47;1.0021;3.3;0.68;12.7;6 -12.7;0.6;0.49;2.8;0.075;5;19;0.9994;3.14;0.57;11.4;5 -9;0.44;0.49;2.4;0.078;26;121;0.9978;3.23;0.58;9.2;5 -9;0.54;0.49;2.9;0.094;41;110;0.9982;3.08;0.61;9.2;5 -7.6;0.29;0.49;2.7;0.092;25;60;0.9971;3.31;0.61;10.1;6 -13;0.47;0.49;4.3;0.085;6;47;1.0021;3.3;0.68;12.7;6 -12.7;0.6;0.49;2.8;0.075;5;19;0.9994;3.14;0.57;11.4;5 -8.7;0.7;0.24;2.5;0.226;5;15;0.9991;3.32;0.6;9;6 -8.7;0.7;0.24;2.5;0.226;5;15;0.9991;3.32;0.6;9;6 -9.8;0.5;0.49;2.6;0.25;5;20;0.999;3.31;0.79;10.7;6 -6.2;0.36;0.24;2.2;0.095;19;42;0.9946;3.57;0.57;11.7;6 -11.5;0.35;0.49;3.3;0.07;10;37;1.0003;3.32;0.91;11;6 -6.2;0.36;0.24;2.2;0.095;19;42;0.9946;3.57;0.57;11.7;6 -10.2;0.24;0.49;2.4;0.075;10;28;0.9978;3.14;0.61;10.4;5 -10.5;0.59;0.49;2.1;0.07;14;47;0.9991;3.3;0.56;9.6;4 -10.6;0.34;0.49;3.2;0.078;20;78;0.9992;3.19;0.7;10;6 -12.3;0.27;0.49;3.1;0.079;28;46;0.9993;3.2;0.8;10.2;6 -9.9;0.5;0.24;2.3;0.103;6;14;0.9978;3.34;0.52;10;4 -8.8;0.44;0.49;2.8;0.083;18;111;0.9982;3.3;0.6;9.5;5 -8.8;0.47;0.49;2.9;0.085;17;110;0.9982;3.29;0.6;9.8;5 -10.6;0.31;0.49;2.2;0.063;18;40;0.9976;3.14;0.51;9.8;6 -12.3;0.5;0.49;2.2;0.089;5;14;1.0002;3.19;0.44;9.6;5 -12.3;0.5;0.49;2.2;0.089;5;14;1.0002;3.19;0.44;9.6;5 -11.7;0.49;0.49;2.2;0.083;5;15;1;3.19;0.43;9.2;5 -12;0.28;0.49;1.9;0.074;10;21;0.9976;2.98;0.66;9.9;7 -11.8;0.33;0.49;3.4;0.093;54;80;1.0002;3.3;0.76;10.7;7 -7.6;0.51;0.24;2.4;0.091;8;38;0.998;3.47;0.66;9.6;6 -11.1;0.31;0.49;2.7;0.094;16;47;0.9986;3.12;1.02;10.6;7 -7.3;0.73;0.24;1.9;0.108;18;102;0.9967;3.26;0.59;9.3;5 -5;0.42;0.24;2;0.06;19;50;0.9917;3.72;0.74;14;8 -10.2;0.29;0.49;2.6;0.059;5;13;0.9976;3.05;0.74;10.5;7 -9;0.45;0.49;2.6;0.084;21;75;0.9987;3.35;0.57;9.7;5 -6.6;0.39;0.49;1.7;0.07;23;149;0.9922;3.12;0.5;11.5;6 -9;0.45;0.49;2.6;0.084;21;75;0.9987;3.35;0.57;9.7;5 -9.9;0.49;0.58;3.5;0.094;9;43;1.0004;3.29;0.58;9;5 -7.9;0.72;0.17;2.6;0.096;20;38;0.9978;3.4;0.53;9.5;5 -8.9;0.595;0.41;7.9;0.086;30;109;0.9998;3.27;0.57;9.3;5 -12.4;0.4;0.51;2;0.059;6;24;0.9994;3.04;0.6;9.3;6 -11.9;0.58;0.58;1.9;0.071;5;18;0.998;3.09;0.63;10;6 -8.5;0.585;0.18;2.1;0.078;5;30;0.9967;3.2;0.48;9.8;6 -12.7;0.59;0.45;2.3;0.082;11;22;1;3;0.7;9.3;6 -8.2;0.915;0.27;2.1;0.088;7;23;0.9962;3.26;0.47;10;4 -13.2;0.46;0.52;2.2;0.071;12;35;1.0006;3.1;0.56;9;6 -7.7;0.835;0;2.6;0.081;6;14;0.9975;3.3;0.52;9.3;5 -13.2;0.46;0.52;2.2;0.071;12;35;1.0006;3.1;0.56;9;6 -8.3;0.58;0.13;2.9;0.096;14;63;0.9984;3.17;0.62;9.1;6 -8.3;0.6;0.13;2.6;0.085;6;24;0.9984;3.31;0.59;9.2;6 -9.4;0.41;0.48;4.6;0.072;10;20;0.9973;3.34;0.79;12.2;7 -8.8;0.48;0.41;3.3;0.092;26;52;0.9982;3.31;0.53;10.5;6 -10.1;0.65;0.37;5.1;0.11;11;65;1.0026;3.32;0.64;10.4;6 -6.3;0.36;0.19;3.2;0.075;15;39;0.9956;3.56;0.52;12.7;6 -8.8;0.24;0.54;2.5;0.083;25;57;0.9983;3.39;0.54;9.2;5 -13.2;0.38;0.55;2.7;0.081;5;16;1.0006;2.98;0.54;9.4;5 -7.5;0.64;0;2.4;0.077;18;29;0.9965;3.32;0.6;10;6 -8.2;0.39;0.38;1.5;0.058;10;29;0.9962;3.26;0.74;9.8;5 -9.2;0.755;0.18;2.2;0.148;10;103;0.9969;2.87;1.36;10.2;6 -9.6;0.6;0.5;2.3;0.079;28;71;0.9997;3.5;0.57;9.7;5 -9.6;0.6;0.5;2.3;0.079;28;71;0.9997;3.5;0.57;9.7;5 -11.5;0.31;0.51;2.2;0.079;14;28;0.9982;3.03;0.93;9.8;6 -11.4;0.46;0.5;2.7;0.122;4;17;1.0006;3.13;0.7;10.2;5 -11.3;0.37;0.41;2.3;0.088;6;16;0.9988;3.09;0.8;9.3;5 -8.3;0.54;0.24;3.4;0.076;16;112;0.9976;3.27;0.61;9.4;5 -8.2;0.56;0.23;3.4;0.078;14;104;0.9976;3.28;0.62;9.4;5 -10;0.58;0.22;1.9;0.08;9;32;0.9974;3.13;0.55;9.5;5 -7.9;0.51;0.25;2.9;0.077;21;45;0.9974;3.49;0.96;12.1;6 -6.8;0.69;0;5.6;0.124;21;58;0.9997;3.46;0.72;10.2;5 -6.8;0.69;0;5.6;0.124;21;58;0.9997;3.46;0.72;10.2;5 -8.8;0.6;0.29;2.2;0.098;5;15;0.9988;3.36;0.49;9.1;5 -8.8;0.6;0.29;2.2;0.098;5;15;0.9988;3.36;0.49;9.1;5 -8.7;0.54;0.26;2.5;0.097;7;31;0.9976;3.27;0.6;9.3;6 -7.6;0.685;0.23;2.3;0.111;20;84;0.9964;3.21;0.61;9.3;5 -8.7;0.54;0.26;2.5;0.097;7;31;0.9976;3.27;0.6;9.3;6 -10.4;0.28;0.54;2.7;0.105;5;19;0.9988;3.25;0.63;9.5;5 -7.6;0.41;0.14;3;0.087;21;43;0.9964;3.32;0.57;10.5;6 -10.1;0.935;0.22;3.4;0.105;11;86;1.001;3.43;0.64;11.3;4 -7.9;0.35;0.21;1.9;0.073;46;102;0.9964;3.27;0.58;9.5;5 -8.7;0.84;0;1.4;0.065;24;33;0.9954;3.27;0.55;9.7;5 -9.6;0.88;0.28;2.4;0.086;30;147;0.9979;3.24;0.53;9.4;5 -9.5;0.885;0.27;2.3;0.084;31;145;0.9978;3.24;0.53;9.4;5 -7.7;0.915;0.12;2.2;0.143;7;23;0.9964;3.35;0.65;10.2;7 -8.9;0.29;0.35;1.9;0.067;25;57;0.997;3.18;1.36;10.3;6 -9.9;0.54;0.45;2.3;0.071;16;40;0.9991;3.39;0.62;9.4;5 -9.5;0.59;0.44;2.3;0.071;21;68;0.9992;3.46;0.63;9.5;5 -9.9;0.54;0.45;2.3;0.071;16;40;0.9991;3.39;0.62;9.4;5 -9.5;0.59;0.44;2.3;0.071;21;68;0.9992;3.46;0.63;9.5;5 -9.9;0.54;0.45;2.3;0.071;16;40;0.9991;3.39;0.62;9.4;5 -7.8;0.64;0.1;6;0.115;5;11;0.9984;3.37;0.69;10.1;7 -7.3;0.67;0.05;3.6;0.107;6;20;0.9972;3.4;0.63;10.1;5 -8.3;0.845;0.01;2.2;0.07;5;14;0.9967;3.32;0.58;11;4 -8.7;0.48;0.3;2.8;0.066;10;28;0.9964;3.33;0.67;11.2;7 -6.7;0.42;0.27;8.6;0.068;24;148;0.9948;3.16;0.57;11.3;6 -10.7;0.43;0.39;2.2;0.106;8;32;0.9986;2.89;0.5;9.6;5 -9.8;0.88;0.25;2.5;0.104;35;155;1.001;3.41;0.67;11.2;5 -15.9;0.36;0.65;7.5;0.096;22;71;0.9976;2.98;0.84;14.9;5 -9.4;0.33;0.59;2.8;0.079;9;30;0.9976;3.12;0.54;12;6 -8.6;0.47;0.47;2.4;0.074;7;29;0.9979;3.08;0.46;9.5;5 -9.7;0.55;0.17;2.9;0.087;20;53;1.0004;3.14;0.61;9.4;5 -10.7;0.43;0.39;2.2;0.106;8;32;0.9986;2.89;0.5;9.6;5 -12;0.5;0.59;1.4;0.073;23;42;0.998;2.92;0.68;10.5;7 -7.2;0.52;0.07;1.4;0.074;5;20;0.9973;3.32;0.81;9.6;6 -7.1;0.84;0.02;4.4;0.096;5;13;0.997;3.41;0.57;11;4 -7.2;0.52;0.07;1.4;0.074;5;20;0.9973;3.32;0.81;9.6;6 -7.5;0.42;0.31;1.6;0.08;15;42;0.9978;3.31;0.64;9;5 -7.2;0.57;0.06;1.6;0.076;9;27;0.9972;3.36;0.7;9.6;6 -10.1;0.28;0.46;1.8;0.05;5;13;0.9974;3.04;0.79;10.2;6 -12.1;0.4;0.52;2;0.092;15;54;1;3.03;0.66;10.2;5 -9.4;0.59;0.14;2;0.084;25;48;0.9981;3.14;0.56;9.7;5 -8.3;0.49;0.36;1.8;0.222;6;16;0.998;3.18;0.6;9.5;6 -11.3;0.34;0.45;2;0.082;6;15;0.9988;2.94;0.66;9.2;6 -10;0.73;0.43;2.3;0.059;15;31;0.9966;3.15;0.57;11;5 -11.3;0.34;0.45;2;0.082;6;15;0.9988;2.94;0.66;9.2;6 -6.9;0.4;0.24;2.5;0.083;30;45;0.9959;3.26;0.58;10;5 -8.2;0.73;0.21;1.7;0.074;5;13;0.9968;3.2;0.52;9.5;5 -9.8;1.24;0.34;2;0.079;32;151;0.998;3.15;0.53;9.5;5 -8.2;0.73;0.21;1.7;0.074;5;13;0.9968;3.2;0.52;9.5;5 -10.8;0.4;0.41;2.2;0.084;7;17;0.9984;3.08;0.67;9.3;6 -9.3;0.41;0.39;2.2;0.064;12;31;0.9984;3.26;0.65;10.2;5 -10.8;0.4;0.41;2.2;0.084;7;17;0.9984;3.08;0.67;9.3;6 -8.6;0.8;0.11;2.3;0.084;12;31;0.9979;3.4;0.48;9.9;5 -8.3;0.78;0.1;2.6;0.081;45;87;0.9983;3.48;0.53;10;5 -10.8;0.26;0.45;3.3;0.06;20;49;0.9972;3.13;0.54;9.6;5 -13.3;0.43;0.58;1.9;0.07;15;40;1.0004;3.06;0.49;9;5 -8;0.45;0.23;2.2;0.094;16;29;0.9962;3.21;0.49;10.2;6 -8.5;0.46;0.31;2.25;0.078;32;58;0.998;3.33;0.54;9.8;5 -8.1;0.78;0.23;2.6;0.059;5;15;0.997;3.37;0.56;11.3;5 -9.8;0.98;0.32;2.3;0.078;35;152;0.998;3.25;0.48;9.4;5 -8.1;0.78;0.23;2.6;0.059;5;15;0.997;3.37;0.56;11.3;5 -7.1;0.65;0.18;1.8;0.07;13;40;0.997;3.44;0.6;9.1;5 -9.1;0.64;0.23;3.1;0.095;13;38;0.9998;3.28;0.59;9.7;5 -7.7;0.66;0.04;1.6;0.039;4;9;0.9962;3.4;0.47;9.4;5 -8.1;0.38;0.48;1.8;0.157;5;17;0.9976;3.3;1.05;9.4;5 -7.4;1.185;0;4.25;0.097;5;14;0.9966;3.63;0.54;10.7;3 -9.2;0.92;0.24;2.6;0.087;12;93;0.9998;3.48;0.54;9.8;5 -8.6;0.49;0.51;2;0.422;16;62;0.9979;3.03;1.17;9;5 -9;0.48;0.32;2.8;0.084;21;122;0.9984;3.32;0.62;9.4;5 -9;0.47;0.31;2.7;0.084;24;125;0.9984;3.31;0.61;9.4;5 -5.1;0.47;0.02;1.3;0.034;18;44;0.9921;3.9;0.62;12.8;6 -7;0.65;0.02;2.1;0.066;8;25;0.9972;3.47;0.67;9.5;6 -7;0.65;0.02;2.1;0.066;8;25;0.9972;3.47;0.67;9.5;6 -9.4;0.615;0.28;3.2;0.087;18;72;1.0001;3.31;0.53;9.7;5 -11.8;0.38;0.55;2.1;0.071;5;19;0.9986;3.11;0.62;10.8;6 -10.6;1.02;0.43;2.9;0.076;26;88;0.9984;3.08;0.57;10.1;6 -7;0.65;0.02;2.1;0.066;8;25;0.9972;3.47;0.67;9.5;6 -7;0.64;0.02;2.1;0.067;9;23;0.997;3.47;0.67;9.4;6 -7.5;0.38;0.48;2.6;0.073;22;84;0.9972;3.32;0.7;9.6;4 -9.1;0.765;0.04;1.6;0.078;4;14;0.998;3.29;0.54;9.7;4 -8.4;1.035;0.15;6;0.073;11;54;0.999;3.37;0.49;9.9;5 -7;0.78;0.08;2;0.093;10;19;0.9956;3.4;0.47;10;5 -7.4;0.49;0.19;3;0.077;16;37;0.9966;3.37;0.51;10.5;5 -7.8;0.545;0.12;2.5;0.068;11;35;0.996;3.34;0.61;11.6;6 -9.7;0.31;0.47;1.6;0.062;13;33;0.9983;3.27;0.66;10;6 -10.6;1.025;0.43;2.8;0.08;21;84;0.9985;3.06;0.57;10.1;5 -8.9;0.565;0.34;3;0.093;16;112;0.9998;3.38;0.61;9.5;5 -8.7;0.69;0;3.2;0.084;13;33;0.9992;3.36;0.45;9.4;5 -8;0.43;0.36;2.3;0.075;10;48;0.9976;3.34;0.46;9.4;5 -9.9;0.74;0.28;2.6;0.078;21;77;0.998;3.28;0.51;9.8;5 -7.2;0.49;0.18;2.7;0.069;13;34;0.9967;3.29;0.48;9.2;6 -8;0.43;0.36;2.3;0.075;10;48;0.9976;3.34;0.46;9.4;5 -7.6;0.46;0.11;2.6;0.079;12;49;0.9968;3.21;0.57;10;5 -8.4;0.56;0.04;2;0.082;10;22;0.9976;3.22;0.44;9.6;5 -7.1;0.66;0;3.9;0.086;17;45;0.9976;3.46;0.54;9.5;5 -8.4;0.56;0.04;2;0.082;10;22;0.9976;3.22;0.44;9.6;5 -8.9;0.48;0.24;2.85;0.094;35;106;0.9982;3.1;0.53;9.2;5 -7.6;0.42;0.08;2.7;0.084;15;48;0.9968;3.21;0.59;10;5 -7.1;0.31;0.3;2.2;0.053;36;127;0.9965;2.94;1.62;9.5;5 -7.5;1.115;0.1;3.1;0.086;5;12;0.9958;3.54;0.6;11.2;4 -9;0.66;0.17;3;0.077;5;13;0.9976;3.29;0.55;10.4;5 -8.1;0.72;0.09;2.8;0.084;18;49;0.9994;3.43;0.72;11.1;6 -6.4;0.57;0.02;1.8;0.067;4;11;0.997;3.46;0.68;9.5;5 -6.4;0.57;0.02;1.8;0.067;4;11;0.997;3.46;0.68;9.5;5 -6.4;0.865;0.03;3.2;0.071;27;58;0.995;3.61;0.49;12.7;6 -9.5;0.55;0.66;2.3;0.387;12;37;0.9982;3.17;0.67;9.6;5 -8.9;0.875;0.13;3.45;0.088;4;14;0.9994;3.44;0.52;11.5;5 -7.3;0.835;0.03;2.1;0.092;10;19;0.9966;3.39;0.47;9.6;5 -7;0.45;0.34;2.7;0.082;16;72;0.998;3.55;0.6;9.5;5 -7.7;0.56;0.2;2;0.075;9;39;0.9987;3.48;0.62;9.3;5 -7.7;0.965;0.1;2.1;0.112;11;22;0.9963;3.26;0.5;9.5;5 -7.7;0.965;0.1;2.1;0.112;11;22;0.9963;3.26;0.5;9.5;5 -8.2;0.59;0;2.5;0.093;19;58;1.0002;3.5;0.65;9.3;6 -9;0.46;0.23;2.8;0.092;28;104;0.9983;3.1;0.56;9.2;5 -9;0.69;0;2.4;0.088;19;38;0.999;3.35;0.6;9.3;5 -8.3;0.76;0.29;4.2;0.075;12;16;0.9965;3.45;0.68;11.5;6 -9.2;0.53;0.24;2.6;0.078;28;139;0.99788;3.21;0.57;9.5;5 -6.5;0.615;0;1.9;0.065;9;18;0.9972;3.46;0.65;9.2;5 -11.6;0.41;0.58;2.8;0.096;25;101;1.00024;3.13;0.53;10;5 -11.1;0.39;0.54;2.7;0.095;21;101;1.0001;3.13;0.51;9.5;5 -7.3;0.51;0.18;2.1;0.07;12;28;0.99768;3.52;0.73;9.5;6 -8.2;0.34;0.38;2.5;0.08;12;57;0.9978;3.3;0.47;9;6 -8.6;0.33;0.4;2.6;0.083;16;68;0.99782;3.3;0.48;9.4;5 -7.2;0.5;0.18;2.1;0.071;12;31;0.99761;3.52;0.72;9.6;6 -7.3;0.51;0.18;2.1;0.07;12;28;0.99768;3.52;0.73;9.5;6 -8.3;0.65;0.1;2.9;0.089;17;40;0.99803;3.29;0.55;9.5;5 -8.3;0.65;0.1;2.9;0.089;17;40;0.99803;3.29;0.55;9.5;5 -7.6;0.54;0.13;2.5;0.097;24;66;0.99785;3.39;0.61;9.4;5 -8.3;0.65;0.1;2.9;0.089;17;40;0.99803;3.29;0.55;9.5;5 -7.8;0.48;0.68;1.7;0.415;14;32;0.99656;3.09;1.06;9.1;6 -7.8;0.91;0.07;1.9;0.058;22;47;0.99525;3.51;0.43;10.7;6 -6.3;0.98;0.01;2;0.057;15;33;0.99488;3.6;0.46;11.2;6 -8.1;0.87;0;2.2;0.084;10;31;0.99656;3.25;0.5;9.8;5 -8.1;0.87;0;2.2;0.084;10;31;0.99656;3.25;0.5;9.8;5 -8.8;0.42;0.21;2.5;0.092;33;88;0.99823;3.19;0.52;9.2;5 -9;0.58;0.25;2.8;0.075;9;104;0.99779;3.23;0.57;9.7;5 -9.3;0.655;0.26;2;0.096;5;35;0.99738;3.25;0.42;9.6;5 -8.8;0.7;0;1.7;0.069;8;19;0.99701;3.31;0.53;10;6 -9.3;0.655;0.26;2;0.096;5;35;0.99738;3.25;0.42;9.6;5 -9.1;0.68;0.11;2.8;0.093;11;44;0.99888;3.31;0.55;9.5;6 -9.2;0.67;0.1;3;0.091;12;48;0.99888;3.31;0.54;9.5;6 -8.8;0.59;0.18;2.9;0.089;12;74;0.99738;3.14;0.54;9.4;5 -7.5;0.6;0.32;2.7;0.103;13;98;0.99938;3.45;0.62;9.5;5 -7.1;0.59;0.02;2.3;0.082;24;94;0.99744;3.55;0.53;9.7;6 -7.9;0.72;0.01;1.9;0.076;7;32;0.99668;3.39;0.54;9.6;5 -7.1;0.59;0.02;2.3;0.082;24;94;0.99744;3.55;0.53;9.7;6 -9.4;0.685;0.26;2.4;0.082;23;143;0.9978;3.28;0.55;9.4;5 -9.5;0.57;0.27;2.3;0.082;23;144;0.99782;3.27;0.55;9.4;5 -7.9;0.4;0.29;1.8;0.157;1;44;0.9973;3.3;0.92;9.5;6 -7.9;0.4;0.3;1.8;0.157;2;45;0.99727;3.31;0.91;9.5;6 -7.2;1;0;3;0.102;7;16;0.99586;3.43;0.46;10;5 -6.9;0.765;0.18;2.4;0.243;5.5;48;0.99612;3.4;0.6;10.3;6 -6.9;0.635;0.17;2.4;0.241;6;18;0.9961;3.4;0.59;10.3;6 -8.3;0.43;0.3;3.4;0.079;7;34;0.99788;3.36;0.61;10.5;5 -7.1;0.52;0.03;2.6;0.076;21;92;0.99745;3.5;0.6;9.8;5 -7;0.57;0;2;0.19;12;45;0.99676;3.31;0.6;9.4;6 -6.5;0.46;0.14;2.4;0.114;9;37;0.99732;3.66;0.65;9.8;5 -9;0.82;0.05;2.4;0.081;26;96;0.99814;3.36;0.53;10;5 -6.5;0.46;0.14;2.4;0.114;9;37;0.99732;3.66;0.65;9.8;5 -7.1;0.59;0.01;2.5;0.077;20;85;0.99746;3.55;0.59;9.8;5 -9.9;0.35;0.41;2.3;0.083;11;61;0.9982;3.21;0.5;9.5;5 -9.9;0.35;0.41;2.3;0.083;11;61;0.9982;3.21;0.5;9.5;5 -10;0.56;0.24;2.2;0.079;19;58;0.9991;3.18;0.56;10.1;6 -10;0.56;0.24;2.2;0.079;19;58;0.9991;3.18;0.56;10.1;6 -8.6;0.63;0.17;2.9;0.099;21;119;0.998;3.09;0.52;9.3;5 -7.4;0.37;0.43;2.6;0.082;18;82;0.99708;3.33;0.68;9.7;6 -8.8;0.64;0.17;2.9;0.084;25;130;0.99818;3.23;0.54;9.6;5 -7.1;0.61;0.02;2.5;0.081;17;87;0.99745;3.48;0.6;9.7;6 -7.7;0.6;0;2.6;0.055;7;13;0.99639;3.38;0.56;10.8;5 -10.1;0.27;0.54;2.3;0.065;7;26;0.99531;3.17;0.53;12.5;6 -10.8;0.89;0.3;2.6;0.132;7;60;0.99786;2.99;1.18;10.2;5 -8.7;0.46;0.31;2.5;0.126;24;64;0.99746;3.1;0.74;9.6;5 -9.3;0.37;0.44;1.6;0.038;21;42;0.99526;3.24;0.81;10.8;7 -9.4;0.5;0.34;3.6;0.082;5;14;0.9987;3.29;0.52;10.7;6 -9.4;0.5;0.34;3.6;0.082;5;14;0.9987;3.29;0.52;10.7;6 -7.2;0.61;0.08;4;0.082;26;108;0.99641;3.25;0.51;9.4;5 -8.6;0.55;0.09;3.3;0.068;8;17;0.99735;3.23;0.44;10;5 -5.1;0.585;0;1.7;0.044;14;86;0.99264;3.56;0.94;12.9;7 -7.7;0.56;0.08;2.5;0.114;14;46;0.9971;3.24;0.66;9.6;6 -8.4;0.52;0.22;2.7;0.084;4;18;0.99682;3.26;0.57;9.9;6 -8.2;0.28;0.4;2.4;0.052;4;10;0.99356;3.33;0.7;12.8;7 -8.4;0.25;0.39;2;0.041;4;10;0.99386;3.27;0.71;12.5;7 -8.2;0.28;0.4;2.4;0.052;4;10;0.99356;3.33;0.7;12.8;7 -7.4;0.53;0.12;1.9;0.165;4;12;0.99702;3.26;0.86;9.2;5 -7.6;0.48;0.31;2.8;0.07;4;15;0.99693;3.22;0.55;10.3;6 -7.3;0.49;0.1;2.6;0.068;4;14;0.99562;3.3;0.47;10.5;5 -12.9;0.5;0.55;2.8;0.072;7;24;1.00012;3.09;0.68;10.9;6 -10.8;0.45;0.33;2.5;0.099;20;38;0.99818;3.24;0.71;10.8;5 -6.9;0.39;0.24;2.1;0.102;4;7;0.99462;3.44;0.58;11.4;4 -12.6;0.41;0.54;2.8;0.103;19;41;0.99939;3.21;0.76;11.3;6 -10.8;0.45;0.33;2.5;0.099;20;38;0.99818;3.24;0.71;10.8;5 -9.8;0.51;0.19;3.2;0.081;8;30;0.9984;3.23;0.58;10.5;6 -10.8;0.29;0.42;1.6;0.084;19;27;0.99545;3.28;0.73;11.9;6 -7.1;0.715;0;2.35;0.071;21;47;0.99632;3.29;0.45;9.4;5 -9.1;0.66;0.15;3.2;0.097;9;59;0.99976;3.28;0.54;9.6;5 -7;0.685;0;1.9;0.099;9;22;0.99606;3.34;0.6;9.7;5 -4.9;0.42;0;2.1;0.048;16;42;0.99154;3.71;0.74;14;7 -6.7;0.54;0.13;2;0.076;15;36;0.9973;3.61;0.64;9.8;5 -6.7;0.54;0.13;2;0.076;15;36;0.9973;3.61;0.64;9.8;5 -7.1;0.48;0.28;2.8;0.068;6;16;0.99682;3.24;0.53;10.3;5 -7.1;0.46;0.14;2.8;0.076;15;37;0.99624;3.36;0.49;10.7;5 -7.5;0.27;0.34;2.3;0.05;4;8;0.9951;3.4;0.64;11;7 -7.1;0.46;0.14;2.8;0.076;15;37;0.99624;3.36;0.49;10.7;5 -7.8;0.57;0.09;2.3;0.065;34;45;0.99417;3.46;0.74;12.7;8 -5.9;0.61;0.08;2.1;0.071;16;24;0.99376;3.56;0.77;11.1;6 -7.5;0.685;0.07;2.5;0.058;5;9;0.99632;3.38;0.55;10.9;4 -5.9;0.61;0.08;2.1;0.071;16;24;0.99376;3.56;0.77;11.1;6 -10.4;0.44;0.42;1.5;0.145;34;48;0.99832;3.38;0.86;9.9;3 -11.6;0.47;0.44;1.6;0.147;36;51;0.99836;3.38;0.86;9.9;4 -8.8;0.685;0.26;1.6;0.088;16;23;0.99694;3.32;0.47;9.4;5 -7.6;0.665;0.1;1.5;0.066;27;55;0.99655;3.39;0.51;9.3;5 -6.7;0.28;0.28;2.4;0.012;36;100;0.99064;3.26;0.39;11.7;7 -6.7;0.28;0.28;2.4;0.012;36;100;0.99064;3.26;0.39;11.7;7 -10.1;0.31;0.35;1.6;0.075;9;28;0.99672;3.24;0.83;11.2;7 -6;0.5;0.04;2.2;0.092;13;26;0.99647;3.46;0.47;10;5 -11.1;0.42;0.47;2.65;0.085;9;34;0.99736;3.24;0.77;12.1;7 -6.6;0.66;0;3;0.115;21;31;0.99629;3.45;0.63;10.3;5 -10.6;0.5;0.45;2.6;0.119;34;68;0.99708;3.23;0.72;10.9;6 -7.1;0.685;0.35;2;0.088;9;92;0.9963;3.28;0.62;9.4;5 -9.9;0.25;0.46;1.7;0.062;26;42;0.9959;3.18;0.83;10.6;6 -6.4;0.64;0.21;1.8;0.081;14;31;0.99689;3.59;0.66;9.8;5 -6.4;0.64;0.21;1.8;0.081;14;31;0.99689;3.59;0.66;9.8;5 -7.4;0.68;0.16;1.8;0.078;12;39;0.9977;3.5;0.7;9.9;6 -6.4;0.64;0.21;1.8;0.081;14;31;0.99689;3.59;0.66;9.8;5 -6.4;0.63;0.21;1.6;0.08;12;32;0.99689;3.58;0.66;9.8;5 -9.3;0.43;0.44;1.9;0.085;9;22;0.99708;3.28;0.55;9.5;5 -9.3;0.43;0.44;1.9;0.085;9;22;0.99708;3.28;0.55;9.5;5 -8;0.42;0.32;2.5;0.08;26;122;0.99801;3.22;1.07;9.7;5 -9.3;0.36;0.39;1.5;0.08;41;55;0.99652;3.47;0.73;10.9;6 -9.3;0.36;0.39;1.5;0.08;41;55;0.99652;3.47;0.73;10.9;6 -7.6;0.735;0.02;2.5;0.071;10;14;0.99538;3.51;0.71;11.7;7 -9.3;0.36;0.39;1.5;0.08;41;55;0.99652;3.47;0.73;10.9;6 -8.2;0.26;0.34;2.5;0.073;16;47;0.99594;3.4;0.78;11.3;7 -11.7;0.28;0.47;1.7;0.054;17;32;0.99686;3.15;0.67;10.6;7 -6.8;0.56;0.22;1.8;0.074;15;24;0.99438;3.4;0.82;11.2;6 -7.2;0.62;0.06;2.7;0.077;15;85;0.99746;3.51;0.54;9.5;5 -5.8;1.01;0.66;2;0.039;15;88;0.99357;3.66;0.6;11.5;6 -7.5;0.42;0.32;2.7;0.067;7;25;0.99628;3.24;0.44;10.4;5 -7.2;0.62;0.06;2.5;0.078;17;84;0.99746;3.51;0.53;9.7;5 -7.2;0.62;0.06;2.7;0.077;15;85;0.99746;3.51;0.54;9.5;5 -7.2;0.635;0.07;2.6;0.077;16;86;0.99748;3.51;0.54;9.7;5 -6.8;0.49;0.22;2.3;0.071;13;24;0.99438;3.41;0.83;11.3;6 -6.9;0.51;0.23;2;0.072;13;22;0.99438;3.4;0.84;11.2;6 -6.8;0.56;0.22;1.8;0.074;15;24;0.99438;3.4;0.82;11.2;6 -7.6;0.63;0.03;2;0.08;27;43;0.99578;3.44;0.64;10.9;6 -7.7;0.715;0.01;2.1;0.064;31;43;0.99371;3.41;0.57;11.8;6 -6.9;0.56;0.03;1.5;0.086;36;46;0.99522;3.53;0.57;10.6;5 -7.3;0.35;0.24;2;0.067;28;48;0.99576;3.43;0.54;10;4 -9.1;0.21;0.37;1.6;0.067;6;10;0.99552;3.23;0.58;11.1;7 -10.4;0.38;0.46;2.1;0.104;6;10;0.99664;3.12;0.65;11.8;7 -8.8;0.31;0.4;2.8;0.109;7;16;0.99614;3.31;0.79;11.8;7 -7.1;0.47;0;2.2;0.067;7;14;0.99517;3.4;0.58;10.9;4 -7.7;0.715;0.01;2.1;0.064;31;43;0.99371;3.41;0.57;11.8;6 -8.8;0.61;0.19;4;0.094;30;69;0.99787;3.22;0.5;10;6 -7.2;0.6;0.04;2.5;0.076;18;88;0.99745;3.53;0.55;9.5;5 -9.2;0.56;0.18;1.6;0.078;10;21;0.99576;3.15;0.49;9.9;5 -7.6;0.715;0;2.1;0.068;30;35;0.99533;3.48;0.65;11.4;6 -8.4;0.31;0.29;3.1;0.194;14;26;0.99536;3.22;0.78;12;6 -7.2;0.6;0.04;2.5;0.076;18;88;0.99745;3.53;0.55;9.5;5 -8.8;0.61;0.19;4;0.094;30;69;0.99787;3.22;0.5;10;6 -8.9;0.75;0.14;2.5;0.086;9;30;0.99824;3.34;0.64;10.5;5 -9;0.8;0.12;2.4;0.083;8;28;0.99836;3.33;0.65;10.4;6 -10.7;0.52;0.38;2.6;0.066;29;56;0.99577;3.15;0.79;12.1;7 -6.8;0.57;0;2.5;0.072;32;64;0.99491;3.43;0.56;11.2;6 -10.7;0.9;0.34;6.6;0.112;23;99;1.00289;3.22;0.68;9.3;5 -7.2;0.34;0.24;2;0.071;30;52;0.99576;3.44;0.58;10.1;5 -7.2;0.66;0.03;2.3;0.078;16;86;0.99743;3.53;0.57;9.7;5 -10.1;0.45;0.23;1.9;0.082;10;18;0.99774;3.22;0.65;9.3;6 -7.2;0.66;0.03;2.3;0.078;16;86;0.99743;3.53;0.57;9.7;5 -7.2;0.63;0.03;2.2;0.08;17;88;0.99745;3.53;0.58;9.8;6 -7.1;0.59;0.01;2.3;0.08;27;43;0.9955;3.42;0.58;10.7;6 -8.3;0.31;0.39;2.4;0.078;17;43;0.99444;3.31;0.77;12.5;7 -7.1;0.59;0.01;2.3;0.08;27;43;0.9955;3.42;0.58;10.7;6 -8.3;0.31;0.39;2.4;0.078;17;43;0.99444;3.31;0.77;12.5;7 -8.3;1.02;0.02;3.4;0.084;6;11;0.99892;3.48;0.49;11;3 -8.9;0.31;0.36;2.6;0.056;10;39;0.99562;3.4;0.69;11.8;5 -7.4;0.635;0.1;2.4;0.08;16;33;0.99736;3.58;0.69;10.8;7 -7.4;0.635;0.1;2.4;0.08;16;33;0.99736;3.58;0.69;10.8;7 -6.8;0.59;0.06;6;0.06;11;18;0.9962;3.41;0.59;10.8;7 -6.8;0.59;0.06;6;0.06;11;18;0.9962;3.41;0.59;10.8;7 -9.2;0.58;0.2;3;0.081;15;115;0.998;3.23;0.59;9.5;5 -7.2;0.54;0.27;2.6;0.084;12;78;0.9964;3.39;0.71;11;5 -6.1;0.56;0;2.2;0.079;6;9;0.9948;3.59;0.54;11.5;6 -7.4;0.52;0.13;2.4;0.078;34;61;0.99528;3.43;0.59;10.8;6 -7.3;0.305;0.39;1.2;0.059;7;11;0.99331;3.29;0.52;11.5;6 -9.3;0.38;0.48;3.8;0.132;3;11;0.99577;3.23;0.57;13.2;6 -9.1;0.28;0.46;9;0.114;3;9;0.99901;3.18;0.6;10.9;6 -10;0.46;0.44;2.9;0.065;4;8;0.99674;3.33;0.62;12.2;6 -9.4;0.395;0.46;4.6;0.094;3;10;0.99639;3.27;0.64;12.2;7 -7.3;0.305;0.39;1.2;0.059;7;11;0.99331;3.29;0.52;11.5;6 -8.6;0.315;0.4;2.2;0.079;3;6;0.99512;3.27;0.67;11.9;6 -5.3;0.715;0.19;1.5;0.161;7;62;0.99395;3.62;0.61;11;5 -6.8;0.41;0.31;8.8;0.084;26;45;0.99824;3.38;0.64;10.1;6 -8.4;0.36;0.32;2.2;0.081;32;79;0.9964;3.3;0.72;11;6 -8.4;0.62;0.12;1.8;0.072;38;46;0.99504;3.38;0.89;11.8;6 -9.6;0.41;0.37;2.3;0.091;10;23;0.99786;3.24;0.56;10.5;5 -8.4;0.36;0.32;2.2;0.081;32;79;0.9964;3.3;0.72;11;6 -8.4;0.62;0.12;1.8;0.072;38;46;0.99504;3.38;0.89;11.8;6 -6.8;0.41;0.31;8.8;0.084;26;45;0.99824;3.38;0.64;10.1;6 -8.6;0.47;0.27;2.3;0.055;14;28;0.99516;3.18;0.8;11.2;5 -8.6;0.22;0.36;1.9;0.064;53;77;0.99604;3.47;0.87;11;7 -9.4;0.24;0.33;2.3;0.061;52;73;0.99786;3.47;0.9;10.2;6 -8.4;0.67;0.19;2.2;0.093;11;75;0.99736;3.2;0.59;9.2;4 -8.6;0.47;0.27;2.3;0.055;14;28;0.99516;3.18;0.8;11.2;5 -8.7;0.33;0.38;3.3;0.063;10;19;0.99468;3.3;0.73;12;7 -6.6;0.61;0.01;1.9;0.08;8;25;0.99746;3.69;0.73;10.5;5 -7.4;0.61;0.01;2;0.074;13;38;0.99748;3.48;0.65;9.8;5 -7.6;0.4;0.29;1.9;0.078;29;66;0.9971;3.45;0.59;9.5;6 -7.4;0.61;0.01;2;0.074;13;38;0.99748;3.48;0.65;9.8;5 -6.6;0.61;0.01;1.9;0.08;8;25;0.99746;3.69;0.73;10.5;5 -8.8;0.3;0.38;2.3;0.06;19;72;0.99543;3.39;0.72;11.8;6 -8.8;0.3;0.38;2.3;0.06;19;72;0.99543;3.39;0.72;11.8;6 -12;0.63;0.5;1.4;0.071;6;26;0.99791;3.07;0.6;10.4;4 -7.2;0.38;0.38;2.8;0.068;23;42;0.99356;3.34;0.72;12.9;7 -6.2;0.46;0.17;1.6;0.073;7;11;0.99425;3.61;0.54;11.4;5 -9.6;0.33;0.52;2.2;0.074;13;25;0.99509;3.36;0.76;12.4;7 -9.9;0.27;0.49;5;0.082;9;17;0.99484;3.19;0.52;12.5;7 -10.1;0.43;0.4;2.6;0.092;13;52;0.99834;3.22;0.64;10;7 -9.8;0.5;0.34;2.3;0.094;10;45;0.99864;3.24;0.6;9.7;7 -8.3;0.3;0.49;3.8;0.09;11;24;0.99498;3.27;0.64;12.1;7 -10.2;0.44;0.42;2;0.071;7;20;0.99566;3.14;0.79;11.1;7 -10.2;0.44;0.58;4.1;0.092;11;24;0.99745;3.29;0.99;12;7 -8.3;0.28;0.48;2.1;0.093;6;12;0.99408;3.26;0.62;12.4;7 -8.9;0.12;0.45;1.8;0.075;10;21;0.99552;3.41;0.76;11.9;7 -8.9;0.12;0.45;1.8;0.075;10;21;0.99552;3.41;0.76;11.9;7 -8.9;0.12;0.45;1.8;0.075;10;21;0.99552;3.41;0.76;11.9;7 -8.3;0.28;0.48;2.1;0.093;6;12;0.99408;3.26;0.62;12.4;7 -8.2;0.31;0.4;2.2;0.058;6;10;0.99536;3.31;0.68;11.2;7 -10.2;0.34;0.48;2.1;0.052;5;9;0.99458;3.2;0.69;12.1;7 -7.6;0.43;0.4;2.7;0.082;6;11;0.99538;3.44;0.54;12.2;6 -8.5;0.21;0.52;1.9;0.09;9;23;0.99648;3.36;0.67;10.4;5 -9;0.36;0.52;2.1;0.111;5;10;0.99568;3.31;0.62;11.3;6 -9.5;0.37;0.52;2;0.088;12;51;0.99613;3.29;0.58;11.1;6 -6.4;0.57;0.12;2.3;0.12;25;36;0.99519;3.47;0.71;11.3;7 -8;0.59;0.05;2;0.089;12;32;0.99735;3.36;0.61;10;5 -8.5;0.47;0.27;1.9;0.058;18;38;0.99518;3.16;0.85;11.1;6 -7.1;0.56;0.14;1.6;0.078;7;18;0.99592;3.27;0.62;9.3;5 -6.6;0.57;0.02;2.1;0.115;6;16;0.99654;3.38;0.69;9.5;5 -8.8;0.27;0.39;2;0.1;20;27;0.99546;3.15;0.69;11.2;6 -8.5;0.47;0.27;1.9;0.058;18;38;0.99518;3.16;0.85;11.1;6 -8.3;0.34;0.4;2.4;0.065;24;48;0.99554;3.34;0.86;11;6 -9;0.38;0.41;2.4;0.103;6;10;0.99604;3.13;0.58;11.9;7 -8.5;0.66;0.2;2.1;0.097;23;113;0.99733;3.13;0.48;9.2;5 -9;0.4;0.43;2.4;0.068;29;46;0.9943;3.2;0.6;12.2;6 -6.7;0.56;0.09;2.9;0.079;7;22;0.99669;3.46;0.61;10.2;5 -10.4;0.26;0.48;1.9;0.066;6;10;0.99724;3.33;0.87;10.9;6 -10.4;0.26;0.48;1.9;0.066;6;10;0.99724;3.33;0.87;10.9;6 -10.1;0.38;0.5;2.4;0.104;6;13;0.99643;3.22;0.65;11.6;7 -8.5;0.34;0.44;1.7;0.079;6;12;0.99605;3.52;0.63;10.7;5 -8.8;0.33;0.41;5.9;0.073;7;13;0.99658;3.3;0.62;12.1;7 -7.2;0.41;0.3;2.1;0.083;35;72;0.997;3.44;0.52;9.4;5 -7.2;0.41;0.3;2.1;0.083;35;72;0.997;3.44;0.52;9.4;5 -8.4;0.59;0.29;2.6;0.109;31;119;0.99801;3.15;0.5;9.1;5 -7;0.4;0.32;3.6;0.061;9;29;0.99416;3.28;0.49;11.3;7 -12.2;0.45;0.49;1.4;0.075;3;6;0.9969;3.13;0.63;10.4;5 -9.1;0.5;0.3;1.9;0.065;8;17;0.99774;3.32;0.71;10.5;6 -9.5;0.86;0.26;1.9;0.079;13;28;0.99712;3.25;0.62;10;5 -7.3;0.52;0.32;2.1;0.07;51;70;0.99418;3.34;0.82;12.9;6 -9.1;0.5;0.3;1.9;0.065;8;17;0.99774;3.32;0.71;10.5;6 -12.2;0.45;0.49;1.4;0.075;3;6;0.9969;3.13;0.63;10.4;5 -7.4;0.58;0;2;0.064;7;11;0.99562;3.45;0.58;11.3;6 -9.8;0.34;0.39;1.4;0.066;3;7;0.9947;3.19;0.55;11.4;7 -7.1;0.36;0.3;1.6;0.08;35;70;0.99693;3.44;0.5;9.4;5 -7.7;0.39;0.12;1.7;0.097;19;27;0.99596;3.16;0.49;9.4;5 -9.7;0.295;0.4;1.5;0.073;14;21;0.99556;3.14;0.51;10.9;6 -7.7;0.39;0.12;1.7;0.097;19;27;0.99596;3.16;0.49;9.4;5 -7.1;0.34;0.28;2;0.082;31;68;0.99694;3.45;0.48;9.4;5 -6.5;0.4;0.1;2;0.076;30;47;0.99554;3.36;0.48;9.4;6 -7.1;0.34;0.28;2;0.082;31;68;0.99694;3.45;0.48;9.4;5 -10;0.35;0.45;2.5;0.092;20;88;0.99918;3.15;0.43;9.4;5 -7.7;0.6;0.06;2;0.079;19;41;0.99697;3.39;0.62;10.1;6 -5.6;0.66;0;2.2;0.087;3;11;0.99378;3.71;0.63;12.8;7 -5.6;0.66;0;2.2;0.087;3;11;0.99378;3.71;0.63;12.8;7 -8.9;0.84;0.34;1.4;0.05;4;10;0.99554;3.12;0.48;9.1;6 -6.4;0.69;0;1.65;0.055;7;12;0.99162;3.47;0.53;12.9;6 -7.5;0.43;0.3;2.2;0.062;6;12;0.99495;3.44;0.72;11.5;7 -9.9;0.35;0.38;1.5;0.058;31;47;0.99676;3.26;0.82;10.6;7 -9.1;0.29;0.33;2.05;0.063;13;27;0.99516;3.26;0.84;11.7;7 -6.8;0.36;0.32;1.8;0.067;4;8;0.9928;3.36;0.55;12.8;7 -8.2;0.43;0.29;1.6;0.081;27;45;0.99603;3.25;0.54;10.3;5 -6.8;0.36;0.32;1.8;0.067;4;8;0.9928;3.36;0.55;12.8;7 -9.1;0.29;0.33;2.05;0.063;13;27;0.99516;3.26;0.84;11.7;7 -9.1;0.3;0.34;2;0.064;12;25;0.99516;3.26;0.84;11.7;7 -8.9;0.35;0.4;3.6;0.11;12;24;0.99549;3.23;0.7;12;7 -9.6;0.5;0.36;2.8;0.116;26;55;0.99722;3.18;0.68;10.9;5 -8.9;0.28;0.45;1.7;0.067;7;12;0.99354;3.25;0.55;12.3;7 -8.9;0.32;0.31;2;0.088;12;19;0.9957;3.17;0.55;10.4;6 -7.7;1.005;0.15;2.1;0.102;11;32;0.99604;3.23;0.48;10;5 -7.5;0.71;0;1.6;0.092;22;31;0.99635;3.38;0.58;10;6 -8;0.58;0.16;2;0.12;3;7;0.99454;3.22;0.58;11.2;6 -10.5;0.39;0.46;2.2;0.075;14;27;0.99598;3.06;0.84;11.4;6 -8.9;0.38;0.4;2.2;0.068;12;28;0.99486;3.27;0.75;12.6;7 -8;0.18;0.37;0.9;0.049;36;109;0.99007;2.89;0.44;12.7;6 -8;0.18;0.37;0.9;0.049;36;109;0.99007;2.89;0.44;12.7;6 -7;0.5;0.14;1.8;0.078;10;23;0.99636;3.53;0.61;10.4;5 -11.3;0.36;0.66;2.4;0.123;3;8;0.99642;3.2;0.53;11.9;6 -11.3;0.36;0.66;2.4;0.123;3;8;0.99642;3.2;0.53;11.9;6 -7;0.51;0.09;2.1;0.062;4;9;0.99584;3.35;0.54;10.5;5 -8.2;0.32;0.42;2.3;0.098;3;9;0.99506;3.27;0.55;12.3;6 -7.7;0.58;0.01;1.8;0.088;12;18;0.99568;3.32;0.56;10.5;7 -8.6;0.83;0;2.8;0.095;17;43;0.99822;3.33;0.6;10.4;6 -7.9;0.31;0.32;1.9;0.066;14;36;0.99364;3.41;0.56;12.6;6 -6.4;0.795;0;2.2;0.065;28;52;0.99378;3.49;0.52;11.6;5 -7.2;0.34;0.21;2.5;0.075;41;68;0.99586;3.37;0.54;10.1;6 -7.7;0.58;0.01;1.8;0.088;12;18;0.99568;3.32;0.56;10.5;7 -7.1;0.59;0;2.1;0.091;9;14;0.99488;3.42;0.55;11.5;7 -7.3;0.55;0.01;1.8;0.093;9;15;0.99514;3.35;0.58;11;7 -8.1;0.82;0;4.1;0.095;5;14;0.99854;3.36;0.53;9.6;5 -7.5;0.57;0.08;2.6;0.089;14;27;0.99592;3.3;0.59;10.4;6 -8.9;0.745;0.18;2.5;0.077;15;48;0.99739;3.2;0.47;9.7;6 -10.1;0.37;0.34;2.4;0.085;5;17;0.99683;3.17;0.65;10.6;7 -7.6;0.31;0.34;2.5;0.082;26;35;0.99356;3.22;0.59;12.5;7 -7.3;0.91;0.1;1.8;0.074;20;56;0.99672;3.35;0.56;9.2;5 -8.7;0.41;0.41;6.2;0.078;25;42;0.9953;3.24;0.77;12.6;7 -8.9;0.5;0.21;2.2;0.088;21;39;0.99692;3.33;0.83;11.1;6 -7.4;0.965;0;2.2;0.088;16;32;0.99756;3.58;0.67;10.2;5 -6.9;0.49;0.19;1.7;0.079;13;26;0.99547;3.38;0.64;9.8;6 -8.9;0.5;0.21;2.2;0.088;21;39;0.99692;3.33;0.83;11.1;6 -9.5;0.39;0.41;8.9;0.069;18;39;0.99859;3.29;0.81;10.9;7 -6.4;0.39;0.33;3.3;0.046;12;53;0.99294;3.36;0.62;12.2;6 -6.9;0.44;0;1.4;0.07;32;38;0.99438;3.32;0.58;11.4;6 -7.6;0.78;0;1.7;0.076;33;45;0.99612;3.31;0.62;10.7;6 -7.1;0.43;0.17;1.8;0.082;27;51;0.99634;3.49;0.64;10.4;5 -9.3;0.49;0.36;1.7;0.081;3;14;0.99702;3.27;0.78;10.9;6 -9.3;0.5;0.36;1.8;0.084;6;17;0.99704;3.27;0.77;10.8;6 -7.1;0.43;0.17;1.8;0.082;27;51;0.99634;3.49;0.64;10.4;5 -8.5;0.46;0.59;1.4;0.414;16;45;0.99702;3.03;1.34;9.2;5 -5.6;0.605;0.05;2.4;0.073;19;25;0.99258;3.56;0.55;12.9;5 -8.3;0.33;0.42;2.3;0.07;9;20;0.99426;3.38;0.77;12.7;7 -8.2;0.64;0.27;2;0.095;5;77;0.99747;3.13;0.62;9.1;6 -8.2;0.64;0.27;2;0.095;5;77;0.99747;3.13;0.62;9.1;6 -8.9;0.48;0.53;4;0.101;3;10;0.99586;3.21;0.59;12.1;7 -7.6;0.42;0.25;3.9;0.104;28;90;0.99784;3.15;0.57;9.1;5 -9.9;0.53;0.57;2.4;0.093;30;52;0.9971;3.19;0.76;11.6;7 -8.9;0.48;0.53;4;0.101;3;10;0.99586;3.21;0.59;12.1;7 -11.6;0.23;0.57;1.8;0.074;3;8;0.9981;3.14;0.7;9.9;6 -9.1;0.4;0.5;1.8;0.071;7;16;0.99462;3.21;0.69;12.5;8 -8;0.38;0.44;1.9;0.098;6;15;0.9956;3.3;0.64;11.4;6 -10.2;0.29;0.65;2.4;0.075;6;17;0.99565;3.22;0.63;11.8;6 -8.2;0.74;0.09;2;0.067;5;10;0.99418;3.28;0.57;11.8;6 -7.7;0.61;0.18;2.4;0.083;6;20;0.9963;3.29;0.6;10.2;6 -6.6;0.52;0.08;2.4;0.07;13;26;0.99358;3.4;0.72;12.5;7 -11.1;0.31;0.53;2.2;0.06;3;10;0.99572;3.02;0.83;10.9;7 -11.1;0.31;0.53;2.2;0.06;3;10;0.99572;3.02;0.83;10.9;7 -8;0.62;0.35;2.8;0.086;28;52;0.997;3.31;0.62;10.8;5 -9.3;0.33;0.45;1.5;0.057;19;37;0.99498;3.18;0.89;11.1;7 -7.5;0.77;0.2;8.1;0.098;30;92;0.99892;3.2;0.58;9.2;5 -7.2;0.35;0.26;1.8;0.083;33;75;0.9968;3.4;0.58;9.5;6 -8;0.62;0.33;2.7;0.088;16;37;0.9972;3.31;0.58;10.7;6 -7.5;0.77;0.2;8.1;0.098;30;92;0.99892;3.2;0.58;9.2;5 -9.1;0.25;0.34;2;0.071;45;67;0.99769;3.44;0.86;10.2;7 -9.9;0.32;0.56;2;0.073;3;8;0.99534;3.15;0.73;11.4;6 -8.6;0.37;0.65;6.4;0.08;3;8;0.99817;3.27;0.58;11;5 -8.6;0.37;0.65;6.4;0.08;3;8;0.99817;3.27;0.58;11;5 -7.9;0.3;0.68;8.3;0.05;37.5;278;0.99316;3.01;0.51;12.3;7 -10.3;0.27;0.56;1.4;0.047;3;8;0.99471;3.16;0.51;11.8;6 -7.9;0.3;0.68;8.3;0.05;37.5;289;0.99316;3.01;0.51;12.3;7 -7.2;0.38;0.3;1.8;0.073;31;70;0.99685;3.42;0.59;9.5;6 -8.7;0.42;0.45;2.4;0.072;32;59;0.99617;3.33;0.77;12;6 -7.2;0.38;0.3;1.8;0.073;31;70;0.99685;3.42;0.59;9.5;6 -6.8;0.48;0.08;1.8;0.074;40;64;0.99529;3.12;0.49;9.6;5 -8.5;0.34;0.4;4.7;0.055;3;9;0.99738;3.38;0.66;11.6;7 -7.9;0.19;0.42;1.6;0.057;18;30;0.994;3.29;0.69;11.2;6 -11.6;0.41;0.54;1.5;0.095;22;41;0.99735;3.02;0.76;9.9;7 -11.6;0.41;0.54;1.5;0.095;22;41;0.99735;3.02;0.76;9.9;7 -10;0.26;0.54;1.9;0.083;42;74;0.99451;2.98;0.63;11.8;8 -7.9;0.34;0.42;2;0.086;8;19;0.99546;3.35;0.6;11.4;6 -7;0.54;0.09;2;0.081;10;16;0.99479;3.43;0.59;11.5;6 -9.2;0.31;0.36;2.2;0.079;11;31;0.99615;3.33;0.86;12;7 -6.6;0.725;0.09;5.5;0.117;9;17;0.99655;3.35;0.49;10.8;6 -9.4;0.4;0.47;2.5;0.087;6;20;0.99772;3.15;0.5;10.5;5 -6.6;0.725;0.09;5.5;0.117;9;17;0.99655;3.35;0.49;10.8;6 -8.6;0.52;0.38;1.5;0.096;5;18;0.99666;3.2;0.52;9.4;5 -8;0.31;0.45;2.1;0.216;5;16;0.99358;3.15;0.81;12.5;7 -8.6;0.52;0.38;1.5;0.096;5;18;0.99666;3.2;0.52;9.4;5 -8.4;0.34;0.42;2.1;0.072;23;36;0.99392;3.11;0.78;12.4;6 -7.4;0.49;0.27;2.1;0.071;14;25;0.99388;3.35;0.63;12;6 -6.1;0.48;0.09;1.7;0.078;18;30;0.99402;3.45;0.54;11.2;6 -7.4;0.49;0.27;2.1;0.071;14;25;0.99388;3.35;0.63;12;6 -8;0.48;0.34;2.2;0.073;16;25;0.9936;3.28;0.66;12.4;6 -6.3;0.57;0.28;2.1;0.048;13;49;0.99374;3.41;0.6;12.8;5 -8.2;0.23;0.42;1.9;0.069;9;17;0.99376;3.21;0.54;12.3;6 -9.1;0.3;0.41;2;0.068;10;24;0.99523;3.27;0.85;11.7;7 -8.1;0.78;0.1;3.3;0.09;4;13;0.99855;3.36;0.49;9.5;5 -10.8;0.47;0.43;2.1;0.171;27;66;0.9982;3.17;0.76;10.8;6 -8.3;0.53;0;1.4;0.07;6;14;0.99593;3.25;0.64;10;6 -5.4;0.42;0.27;2;0.092;23;55;0.99471;3.78;0.64;12.3;7 -7.9;0.33;0.41;1.5;0.056;6;35;0.99396;3.29;0.71;11;6 -8.9;0.24;0.39;1.6;0.074;3;10;0.99698;3.12;0.59;9.5;6 -5;0.4;0.5;4.3;0.046;29;80;0.9902;3.49;0.66;13.6;6 -7;0.69;0.07;2.5;0.091;15;21;0.99572;3.38;0.6;11.3;6 -7;0.69;0.07;2.5;0.091;15;21;0.99572;3.38;0.6;11.3;6 -7;0.69;0.07;2.5;0.091;15;21;0.99572;3.38;0.6;11.3;6 -7.1;0.39;0.12;2.1;0.065;14;24;0.99252;3.3;0.53;13.3;6 -5.6;0.66;0;2.5;0.066;7;15;0.99256;3.52;0.58;12.9;5 -7.9;0.54;0.34;2.5;0.076;8;17;0.99235;3.2;0.72;13.1;8 -6.6;0.5;0;1.8;0.062;21;28;0.99352;3.44;0.55;12.3;6 -6.3;0.47;0;1.4;0.055;27;33;0.9922;3.45;0.48;12.3;6 -10.7;0.4;0.37;1.9;0.081;17;29;0.99674;3.12;0.65;11.2;6 -6.5;0.58;0;2.2;0.096;3;13;0.99557;3.62;0.62;11.5;4 -8.8;0.24;0.35;1.7;0.055;13;27;0.99394;3.14;0.59;11.3;7 -5.8;0.29;0.26;1.7;0.063;3;11;0.9915;3.39;0.54;13.5;6 -6.3;0.76;0;2.9;0.072;26;52;0.99379;3.51;0.6;11.5;6 -10;0.43;0.33;2.7;0.095;28;89;0.9984;3.22;0.68;10;5 -10.5;0.43;0.35;3.3;0.092;24;70;0.99798;3.21;0.69;10.5;6 -9.1;0.6;0;1.9;0.058;5;10;0.9977;3.18;0.63;10.4;6 -5.9;0.19;0.21;1.7;0.045;57;135;0.99341;3.32;0.44;9.5;5 -7.4;0.36;0.34;1.8;0.075;18;38;0.9933;3.38;0.88;13.6;7 -7.2;0.48;0.07;5.5;0.089;10;18;0.99684;3.37;0.68;11.2;7 -8.5;0.28;0.35;1.7;0.061;6;15;0.99524;3.3;0.74;11.8;7 -8;0.25;0.43;1.7;0.067;22;50;0.9946;3.38;0.6;11.9;6 -10.4;0.52;0.45;2;0.08;6;13;0.99774;3.22;0.76;11.4;6 -10.4;0.52;0.45;2;0.08;6;13;0.99774;3.22;0.76;11.4;6 -7.5;0.41;0.15;3.7;0.104;29;94;0.99786;3.14;0.58;9.1;5 -8.2;0.51;0.24;2;0.079;16;86;0.99764;3.34;0.64;9.5;6 -7.3;0.4;0.3;1.7;0.08;33;79;0.9969;3.41;0.65;9.5;6 -8.2;0.38;0.32;2.5;0.08;24;71;0.99624;3.27;0.85;11;6 -6.9;0.45;0.11;2.4;0.043;6;12;0.99354;3.3;0.65;11.4;6 -7;0.22;0.3;1.8;0.065;16;20;0.99672;3.61;0.82;10;6 -7.3;0.32;0.23;2.3;0.066;35;70;0.99588;3.43;0.62;10.1;5 -8.2;0.2;0.43;2.5;0.076;31;51;0.99672;3.53;0.81;10.4;6 -7.8;0.5;0.12;1.8;0.178;6;21;0.996;3.28;0.87;9.8;6 -10;0.41;0.45;6.2;0.071;6;14;0.99702;3.21;0.49;11.8;7 -7.8;0.39;0.42;2;0.086;9;21;0.99526;3.39;0.66;11.6;6 -10;0.35;0.47;2;0.061;6;11;0.99585;3.23;0.52;12;6 -8.2;0.33;0.32;2.8;0.067;4;12;0.99473;3.3;0.76;12.8;7 -6.1;0.58;0.23;2.5;0.044;16;70;0.99352;3.46;0.65;12.5;6 -8.3;0.6;0.25;2.2;0.118;9;38;0.99616;3.15;0.53;9.8;5 -9.6;0.42;0.35;2.1;0.083;17;38;0.99622;3.23;0.66;11.1;6 -6.6;0.58;0;2.2;0.1;50;63;0.99544;3.59;0.68;11.4;6 -8.3;0.6;0.25;2.2;0.118;9;38;0.99616;3.15;0.53;9.8;5 -8.5;0.18;0.51;1.75;0.071;45;88;0.99524;3.33;0.76;11.8;7 -5.1;0.51;0.18;2.1;0.042;16;101;0.9924;3.46;0.87;12.9;7 -6.7;0.41;0.43;2.8;0.076;22;54;0.99572;3.42;1.16;10.6;6 -10.2;0.41;0.43;2.2;0.11;11;37;0.99728;3.16;0.67;10.8;5 -10.6;0.36;0.57;2.3;0.087;6;20;0.99676;3.14;0.72;11.1;7 -8.8;0.45;0.43;1.4;0.076;12;21;0.99551;3.21;0.75;10.2;6 -8.5;0.32;0.42;2.3;0.075;12;19;0.99434;3.14;0.71;11.8;7 -9;0.785;0.24;1.7;0.078;10;21;0.99692;3.29;0.67;10;5 -9;0.785;0.24;1.7;0.078;10;21;0.99692;3.29;0.67;10;5 -8.5;0.44;0.5;1.9;0.369;15;38;0.99634;3.01;1.1;9.4;5 -9.9;0.54;0.26;2;0.111;7;60;0.99709;2.94;0.98;10.2;5 -8.2;0.33;0.39;2.5;0.074;29;48;0.99528;3.32;0.88;12.4;7 -6.5;0.34;0.27;2.8;0.067;8;44;0.99384;3.21;0.56;12;6 -7.6;0.5;0.29;2.3;0.086;5;14;0.99502;3.32;0.62;11.5;6 -9.2;0.36;0.34;1.6;0.062;5;12;0.99667;3.2;0.67;10.5;6 -7.1;0.59;0;2.2;0.078;26;44;0.99522;3.42;0.68;10.8;6 -9.7;0.42;0.46;2.1;0.074;5;16;0.99649;3.27;0.74;12.3;6 -7.6;0.36;0.31;1.7;0.079;26;65;0.99716;3.46;0.62;9.5;6 -7.6;0.36;0.31;1.7;0.079;26;65;0.99716;3.46;0.62;9.5;6 -6.5;0.61;0;2.2;0.095;48;59;0.99541;3.61;0.7;11.5;6 -6.5;0.88;0.03;5.6;0.079;23;47;0.99572;3.58;0.5;11.2;4 -7.1;0.66;0;2.4;0.052;6;11;0.99318;3.35;0.66;12.7;7 -5.6;0.915;0;2.1;0.041;17;78;0.99346;3.68;0.73;11.4;5 -8.2;0.35;0.33;2.4;0.076;11;47;0.99599;3.27;0.81;11;6 -8.2;0.35;0.33;2.4;0.076;11;47;0.99599;3.27;0.81;11;6 -9.8;0.39;0.43;1.65;0.068;5;11;0.99478;3.19;0.46;11.4;5 -10.2;0.4;0.4;2.5;0.068;41;54;0.99754;3.38;0.86;10.5;6 -6.8;0.66;0.07;1.6;0.07;16;61;0.99572;3.29;0.6;9.3;5 -6.7;0.64;0.23;2.1;0.08;11;119;0.99538;3.36;0.7;10.9;5 -7;0.43;0.3;2;0.085;6;39;0.99346;3.33;0.46;11.9;6 -6.6;0.8;0.03;7.8;0.079;6;12;0.9963;3.52;0.5;12.2;5 -7;0.43;0.3;2;0.085;6;39;0.99346;3.33;0.46;11.9;6 -6.7;0.64;0.23;2.1;0.08;11;119;0.99538;3.36;0.7;10.9;5 -8.8;0.955;0.05;1.8;0.075;5;19;0.99616;3.3;0.44;9.6;4 -9.1;0.4;0.57;4.6;0.08;6;20;0.99652;3.28;0.57;12.5;6 -6.5;0.885;0;2.3;0.166;6;12;0.99551;3.56;0.51;10.8;5 -7.2;0.25;0.37;2.5;0.063;11;41;0.99439;3.52;0.8;12.4;7 -6.4;0.885;0;2.3;0.166;6;12;0.99551;3.56;0.51;10.8;5 -7;0.745;0.12;1.8;0.114;15;64;0.99588;3.22;0.59;9.5;6 -6.2;0.43;0.22;1.8;0.078;21;56;0.99633;3.52;0.6;9.5;6 -7.9;0.58;0.23;2.3;0.076;23;94;0.99686;3.21;0.58;9.5;6 -7.7;0.57;0.21;1.5;0.069;4;9;0.99458;3.16;0.54;9.8;6 -7.7;0.26;0.26;2;0.052;19;77;0.9951;3.15;0.79;10.9;6 -7.9;0.58;0.23;2.3;0.076;23;94;0.99686;3.21;0.58;9.5;6 -7.7;0.57;0.21;1.5;0.069;4;9;0.99458;3.16;0.54;9.8;6 -7.9;0.34;0.36;1.9;0.065;5;10;0.99419;3.27;0.54;11.2;7 -8.6;0.42;0.39;1.8;0.068;6;12;0.99516;3.35;0.69;11.7;8 -9.9;0.74;0.19;5.8;0.111;33;76;0.99878;3.14;0.55;9.4;5 -7.2;0.36;0.46;2.1;0.074;24;44;0.99534;3.4;0.85;11;7 -7.2;0.36;0.46;2.1;0.074;24;44;0.99534;3.4;0.85;11;7 -7.2;0.36;0.46;2.1;0.074;24;44;0.99534;3.4;0.85;11;7 -9.9;0.72;0.55;1.7;0.136;24;52;0.99752;3.35;0.94;10;5 -7.2;0.36;0.46;2.1;0.074;24;44;0.99534;3.4;0.85;11;7 -6.2;0.39;0.43;2;0.071;14;24;0.99428;3.45;0.87;11.2;7 -6.8;0.65;0.02;2.1;0.078;8;15;0.99498;3.35;0.62;10.4;6 -6.6;0.44;0.15;2.1;0.076;22;53;0.9957;3.32;0.62;9.3;5 -6.8;0.65;0.02;2.1;0.078;8;15;0.99498;3.35;0.62;10.4;6 -9.6;0.38;0.42;1.9;0.071;5;13;0.99659;3.15;0.75;10.5;6 -10.2;0.33;0.46;1.9;0.081;6;9;0.99628;3.1;0.48;10.4;6 -8.8;0.27;0.46;2.1;0.095;20;29;0.99488;3.26;0.56;11.3;6 -7.9;0.57;0.31;2;0.079;10;79;0.99677;3.29;0.69;9.5;6 -8.2;0.34;0.37;1.9;0.057;43;74;0.99408;3.23;0.81;12;6 -8.2;0.4;0.31;1.9;0.082;8;24;0.996;3.24;0.69;10.6;6 -9;0.39;0.4;1.3;0.044;25;50;0.99478;3.2;0.83;10.9;6 -10.9;0.32;0.52;1.8;0.132;17;44;0.99734;3.28;0.77;11.5;6 -10.9;0.32;0.52;1.8;0.132;17;44;0.99734;3.28;0.77;11.5;6 -8.1;0.53;0.22;2.2;0.078;33;89;0.99678;3.26;0.46;9.6;6 -10.5;0.36;0.47;2.2;0.074;9;23;0.99638;3.23;0.76;12;6 -12.6;0.39;0.49;2.5;0.08;8;20;0.9992;3.07;0.82;10.3;6 -9.2;0.46;0.23;2.6;0.091;18;77;0.99922;3.15;0.51;9.4;5 -7.5;0.58;0.03;4.1;0.08;27;46;0.99592;3.02;0.47;9.2;5 -9;0.58;0.25;2;0.104;8;21;0.99769;3.27;0.72;9.6;5 -5.1;0.42;0;1.8;0.044;18;88;0.99157;3.68;0.73;13.6;7 -7.6;0.43;0.29;2.1;0.075;19;66;0.99718;3.4;0.64;9.5;5 -7.7;0.18;0.34;2.7;0.066;15;58;0.9947;3.37;0.78;11.8;6 -7.8;0.815;0.01;2.6;0.074;48;90;0.99621;3.38;0.62;10.8;5 -7.6;0.43;0.29;2.1;0.075;19;66;0.99718;3.4;0.64;9.5;5 -10.2;0.23;0.37;2.2;0.057;14;36;0.99614;3.23;0.49;9.3;4 -7.1;0.75;0.01;2.2;0.059;11;18;0.99242;3.39;0.4;12.8;6 -6;0.33;0.32;12.9;0.054;6;113;0.99572;3.3;0.56;11.5;4 -7.8;0.55;0;1.7;0.07;7;17;0.99659;3.26;0.64;9.4;6 -7.1;0.75;0.01;2.2;0.059;11;18;0.99242;3.39;0.4;12.8;6 -8.1;0.73;0;2.5;0.081;12;24;0.99798;3.38;0.46;9.6;4 -6.5;0.67;0;4.3;0.057;11;20;0.99488;3.45;0.56;11.8;4 -7.5;0.61;0.2;1.7;0.076;36;60;0.99494;3.1;0.4;9.3;5 -9.8;0.37;0.39;2.5;0.079;28;65;0.99729;3.16;0.59;9.8;5 -9;0.4;0.41;2;0.058;15;40;0.99414;3.22;0.6;12.2;6 -8.3;0.56;0.22;2.4;0.082;10;86;0.9983;3.37;0.62;9.5;5 -5.9;0.29;0.25;13.4;0.067;72;160;0.99721;3.33;0.54;10.3;6 -7.4;0.55;0.19;1.8;0.082;15;34;0.99655;3.49;0.68;10.5;5 -7.4;0.74;0.07;1.7;0.086;15;48;0.99502;3.12;0.48;10;5 -7.4;0.55;0.19;1.8;0.082;15;34;0.99655;3.49;0.68;10.5;5 -6.9;0.41;0.33;2.2;0.081;22;36;0.9949;3.41;0.75;11.1;6 -7.1;0.6;0.01;2.3;0.079;24;37;0.99514;3.4;0.61;10.9;6 -7.1;0.6;0.01;2.3;0.079;24;37;0.99514;3.4;0.61;10.9;6 -7.5;0.58;0.14;2.2;0.077;27;60;0.9963;3.28;0.59;9.8;5 -7.1;0.72;0;1.8;0.123;6;14;0.99627;3.45;0.58;9.8;5 -7.9;0.66;0;1.4;0.096;6;13;0.99569;3.43;0.58;9.5;5 -7.8;0.7;0.06;1.9;0.079;20;35;0.99628;3.4;0.69;10.9;5 -6.1;0.64;0.02;2.4;0.069;26;46;0.99358;3.47;0.45;11;5 -7.5;0.59;0.22;1.8;0.082;43;60;0.99499;3.1;0.42;9.2;5 -7;0.58;0.28;4.8;0.085;12;69;0.99633;3.32;0.7;11;6 -6.8;0.64;0;2.7;0.123;15;33;0.99538;3.44;0.63;11.3;6 -6.8;0.64;0;2.7;0.123;15;33;0.99538;3.44;0.63;11.3;6 -8.6;0.635;0.68;1.8;0.403;19;56;0.99632;3.02;1.15;9.3;5 -6.3;1.02;0;2;0.083;17;24;0.99437;3.59;0.55;11.2;4 -9.8;0.45;0.38;2.5;0.081;34;66;0.99726;3.15;0.58;9.8;5 -8.2;0.78;0;2.2;0.089;13;26;0.9978;3.37;0.46;9.6;4 -8.5;0.37;0.32;1.8;0.066;26;51;0.99456;3.38;0.72;11.8;6 -7.2;0.57;0.05;2.3;0.081;16;36;0.99564;3.38;0.6;10.3;6 -7.2;0.57;0.05;2.3;0.081;16;36;0.99564;3.38;0.6;10.3;6 -10.4;0.43;0.5;2.3;0.068;13;19;0.996;3.1;0.87;11.4;6 -6.9;0.41;0.31;2;0.079;21;51;0.99668;3.47;0.55;9.5;6 -5.5;0.49;0.03;1.8;0.044;28;87;0.9908;3.5;0.82;14;8 -5;0.38;0.01;1.6;0.048;26;60;0.99084;3.7;0.75;14;6 -7.3;0.44;0.2;1.6;0.049;24;64;0.9935;3.38;0.57;11.7;6 -5.9;0.46;0;1.9;0.077;25;44;0.99385;3.5;0.53;11.2;5 -7.5;0.58;0.2;2;0.073;34;44;0.99494;3.1;0.43;9.3;5 -7.8;0.58;0.13;2.1;0.102;17;36;0.9944;3.24;0.53;11.2;6 -8;0.715;0.22;2.3;0.075;13;81;0.99688;3.24;0.54;9.5;6 -8.5;0.4;0.4;6.3;0.05;3;10;0.99566;3.28;0.56;12;4 -7;0.69;0;1.9;0.114;3;10;0.99636;3.35;0.6;9.7;6 -8;0.715;0.22;2.3;0.075;13;81;0.99688;3.24;0.54;9.5;6 -9.8;0.3;0.39;1.7;0.062;3;9;0.9948;3.14;0.57;11.5;7 -7.1;0.46;0.2;1.9;0.077;28;54;0.9956;3.37;0.64;10.4;6 -7.1;0.46;0.2;1.9;0.077;28;54;0.9956;3.37;0.64;10.4;6 -7.9;0.765;0;2;0.084;9;22;0.99619;3.33;0.68;10.9;6 -8.7;0.63;0.28;2.7;0.096;17;69;0.99734;3.26;0.63;10.2;6 -7;0.42;0.19;2.3;0.071;18;36;0.99476;3.39;0.56;10.9;5 -11.3;0.37;0.5;1.8;0.09;20;47;0.99734;3.15;0.57;10.5;5 -7.1;0.16;0.44;2.5;0.068;17;31;0.99328;3.35;0.54;12.4;6 -8;0.6;0.08;2.6;0.056;3;7;0.99286;3.22;0.37;13;5 -7;0.6;0.3;4.5;0.068;20;110;0.99914;3.3;1.17;10.2;5 -7;0.6;0.3;4.5;0.068;20;110;0.99914;3.3;1.17;10.2;5 -7.6;0.74;0;1.9;0.1;6;12;0.99521;3.36;0.59;11;5 -8.2;0.635;0.1;2.1;0.073;25;60;0.99638;3.29;0.75;10.9;6 -5.9;0.395;0.13;2.4;0.056;14;28;0.99362;3.62;0.67;12.4;6 -7.5;0.755;0;1.9;0.084;6;12;0.99672;3.34;0.49;9.7;4 -8.2;0.635;0.1;2.1;0.073;25;60;0.99638;3.29;0.75;10.9;6 -6.6;0.63;0;4.3;0.093;51;77.5;0.99558;3.2;0.45;9.5;5 -6.6;0.63;0;4.3;0.093;51;77.5;0.99558;3.2;0.45;9.5;5 -7.2;0.53;0.14;2.1;0.064;15;29;0.99323;3.35;0.61;12.1;6 -5.7;0.6;0;1.4;0.063;11;18;0.99191;3.45;0.56;12.2;6 -7.6;1.58;0;2.1;0.137;5;9;0.99476;3.5;0.4;10.9;3 -5.2;0.645;0;2.15;0.08;15;28;0.99444;3.78;0.61;12.5;6 -6.7;0.86;0.07;2;0.1;20;57;0.99598;3.6;0.74;11.7;6 -9.1;0.37;0.32;2.1;0.064;4;15;0.99576;3.3;0.8;11.2;6 -8;0.28;0.44;1.8;0.081;28;68;0.99501;3.36;0.66;11.2;5 -7.6;0.79;0.21;2.3;0.087;21;68;0.9955;3.12;0.44;9.2;5 -7.5;0.61;0.26;1.9;0.073;24;88;0.99612;3.3;0.53;9.8;5 -9.7;0.69;0.32;2.5;0.088;22;91;0.9979;3.29;0.62;10.1;5 -6.8;0.68;0.09;3.9;0.068;15;29;0.99524;3.41;0.52;11.1;4 -9.7;0.69;0.32;2.5;0.088;22;91;0.9979;3.29;0.62;10.1;5 -7;0.62;0.1;1.4;0.071;27;63;0.996;3.28;0.61;9.2;5 -7.5;0.61;0.26;1.9;0.073;24;88;0.99612;3.3;0.53;9.8;5 -6.5;0.51;0.15;3;0.064;12;27;0.9929;3.33;0.59;12.8;6 -8;1.18;0.21;1.9;0.083;14;41;0.99532;3.34;0.47;10.5;5 -7;0.36;0.21;2.3;0.086;20;65;0.99558;3.4;0.54;10.1;6 -7;0.36;0.21;2.4;0.086;24;69;0.99556;3.4;0.53;10.1;6 -7.5;0.63;0.27;2;0.083;17;91;0.99616;3.26;0.58;9.8;6 -5.4;0.74;0;1.2;0.041;16;46;0.99258;4.01;0.59;12.5;6 -9.9;0.44;0.46;2.2;0.091;10;41;0.99638;3.18;0.69;11.9;6 -7.5;0.63;0.27;2;0.083;17;91;0.99616;3.26;0.58;9.8;6 -9.1;0.76;0.68;1.7;0.414;18;64;0.99652;2.9;1.33;9.1;6 -9.7;0.66;0.34;2.6;0.094;12;88;0.99796;3.26;0.66;10.1;5 -5;0.74;0;1.2;0.041;16;46;0.99258;4.01;0.59;12.5;6 -9.1;0.34;0.42;1.8;0.058;9;18;0.99392;3.18;0.55;11.4;5 -9.1;0.36;0.39;1.8;0.06;21;55;0.99495;3.18;0.82;11;7 -6.7;0.46;0.24;1.7;0.077;18;34;0.9948;3.39;0.6;10.6;6 -6.7;0.46;0.24;1.7;0.077;18;34;0.9948;3.39;0.6;10.6;6 -6.7;0.46;0.24;1.7;0.077;18;34;0.9948;3.39;0.6;10.6;6 -6.7;0.46;0.24;1.7;0.077;18;34;0.9948;3.39;0.6;10.6;6 -6.5;0.52;0.11;1.8;0.073;13;38;0.9955;3.34;0.52;9.3;5 -7.4;0.6;0.26;2.1;0.083;17;91;0.99616;3.29;0.56;9.8;6 -7.4;0.6;0.26;2.1;0.083;17;91;0.99616;3.29;0.56;9.8;6 -7.8;0.87;0.26;3.8;0.107;31;67;0.99668;3.26;0.46;9.2;5 -8.4;0.39;0.1;1.7;0.075;6;25;0.99581;3.09;0.43;9.7;6 -9.1;0.775;0.22;2.2;0.079;12;48;0.9976;3.18;0.51;9.6;5 -7.2;0.835;0;2;0.166;4;11;0.99608;3.39;0.52;10;5 -6.6;0.58;0.02;2.4;0.069;19;40;0.99387;3.38;0.66;12.6;6 -6;0.5;0;1.4;0.057;15;26;0.99448;3.36;0.45;9.5;5 -6;0.5;0;1.4;0.057;15;26;0.99448;3.36;0.45;9.5;5 -6;0.5;0;1.4;0.057;15;26;0.99448;3.36;0.45;9.5;5 -7.5;0.51;0.02;1.7;0.084;13;31;0.99538;3.36;0.54;10.5;6 -7.5;0.51;0.02;1.7;0.084;13;31;0.99538;3.36;0.54;10.5;6 -7.5;0.51;0.02;1.7;0.084;13;31;0.99538;3.36;0.54;10.5;6 -7.6;0.54;0.02;1.7;0.085;17;31;0.99589;3.37;0.51;10.4;6 -7.5;0.51;0.02;1.7;0.084;13;31;0.99538;3.36;0.54;10.5;6 -11.5;0.42;0.48;2.6;0.077;8;20;0.99852;3.09;0.53;11;5 -8.2;0.44;0.24;2.3;0.063;10;28;0.99613;3.25;0.53;10.2;6 -6.1;0.59;0.01;2.1;0.056;5;13;0.99472;3.52;0.56;11.4;5 -7.2;0.655;0.03;1.8;0.078;7;12;0.99587;3.34;0.39;9.5;5 -7.2;0.655;0.03;1.8;0.078;7;12;0.99587;3.34;0.39;9.5;5 -6.9;0.57;0;2.8;0.081;21;41;0.99518;3.41;0.52;10.8;5 -9;0.6;0.29;2;0.069;32;73;0.99654;3.34;0.57;10;5 -7.2;0.62;0.01;2.3;0.065;8;46;0.99332;3.32;0.51;11.8;6 -7.6;0.645;0.03;1.9;0.086;14;57;0.9969;3.37;0.46;10.3;5 -7.6;0.645;0.03;1.9;0.086;14;57;0.9969;3.37;0.46;10.3;5 -7.2;0.58;0.03;2.3;0.077;7;28;0.99568;3.35;0.52;10;5 -6.1;0.32;0.25;1.8;0.086;5;32;0.99464;3.36;0.44;10.1;5 -6.1;0.34;0.25;1.8;0.084;4;28;0.99464;3.36;0.44;10.1;5 -7.3;0.43;0.24;2.5;0.078;27;67;0.99648;3.6;0.59;11.1;6 -7.4;0.64;0.17;5.4;0.168;52;98;0.99736;3.28;0.5;9.5;5 -11.6;0.475;0.4;1.4;0.091;6;28;0.99704;3.07;0.65;10.0333333333333;6 -9.2;0.54;0.31;2.3;0.112;11;38;0.99699;3.24;0.56;10.9;5 -8.3;0.85;0.14;2.5;0.093;13;54;0.99724;3.36;0.54;10.1;5 -11.6;0.475;0.4;1.4;0.091;6;28;0.99704;3.07;0.65;10.0333333333333;6 -8;0.83;0.27;2;0.08;11;63;0.99652;3.29;0.48;9.8;4 -7.2;0.605;0.02;1.9;0.096;10;31;0.995;3.46;0.53;11.8;6 -7.8;0.5;0.09;2.2;0.115;10;42;0.9971;3.18;0.62;9.5;5 -7.3;0.74;0.08;1.7;0.094;10;45;0.99576;3.24;0.5;9.8;5 -6.9;0.54;0.3;2.2;0.088;9;105;0.99725;3.25;1.18;10.5;6 -8;0.77;0.32;2.1;0.079;16;74;0.99656;3.27;0.5;9.8;6 -6.6;0.61;0;1.6;0.069;4;8;0.99396;3.33;0.37;10.4;4 -8.7;0.78;0.51;1.7;0.415;12;66;0.99623;3;1.17;9.2;5 -7.5;0.58;0.56;3.1;0.153;5;14;0.99476;3.21;1.03;11.6;6 -8.7;0.78;0.51;1.7;0.415;12;66;0.99623;3;1.17;9.2;5 -7.7;0.75;0.27;3.8;0.11;34;89;0.99664;3.24;0.45;9.3;5 -6.8;0.815;0;1.2;0.267;16;29;0.99471;3.32;0.51;9.8;3 -7.2;0.56;0.26;2;0.083;13;100;0.99586;3.26;0.52;9.9;5 -8.2;0.885;0.2;1.4;0.086;7;31;0.9946;3.11;0.46;10;5 -5.2;0.49;0.26;2.3;0.09;23;74;0.9953;3.71;0.62;12.2;6 -7.2;0.45;0.15;2;0.078;10;28;0.99609;3.29;0.51;9.9;6 -7.5;0.57;0.02;2.6;0.077;11;35;0.99557;3.36;0.62;10.8;6 -7.5;0.57;0.02;2.6;0.077;11;35;0.99557;3.36;0.62;10.8;6 -6.8;0.83;0.09;1.8;0.074;4;25;0.99534;3.38;0.45;9.6;5 -8;0.6;0.22;2.1;0.08;25;105;0.99613;3.3;0.49;9.9;5 -8;0.6;0.22;2.1;0.08;25;105;0.99613;3.3;0.49;9.9;5 -7.1;0.755;0.15;1.8;0.107;20;84;0.99593;3.19;0.5;9.5;5 -8;0.81;0.25;3.4;0.076;34;85;0.99668;3.19;0.42;9.2;5 -7.4;0.64;0.07;1.8;0.1;8;23;0.9961;3.3;0.58;9.6;5 -7.4;0.64;0.07;1.8;0.1;8;23;0.9961;3.3;0.58;9.6;5 -6.6;0.64;0.31;6.1;0.083;7;49;0.99718;3.35;0.68;10.3;5 -6.7;0.48;0.02;2.2;0.08;36;111;0.99524;3.1;0.53;9.7;5 -6;0.49;0;2.3;0.068;15;33;0.99292;3.58;0.59;12.5;6 -8;0.64;0.22;2.4;0.094;5;33;0.99612;3.37;0.58;11;5 -7.1;0.62;0.06;1.3;0.07;5;12;0.9942;3.17;0.48;9.8;5 -8;0.52;0.25;2;0.078;19;59;0.99612;3.3;0.48;10.2;5 -6.4;0.57;0.14;3.9;0.07;27;73;0.99669;3.32;0.48;9.2;5 -8.6;0.685;0.1;1.6;0.092;3;12;0.99745;3.31;0.65;9.55;6 -8.7;0.675;0.1;1.6;0.09;4;11;0.99745;3.31;0.65;9.55;5 -7.3;0.59;0.26;2;0.08;17;104;0.99584;3.28;0.52;9.9;5 -7;0.6;0.12;2.2;0.083;13;28;0.9966;3.52;0.62;10.2;7 -7.2;0.67;0;2.2;0.068;10;24;0.9956;3.42;0.72;11.1;6 -7.9;0.69;0.21;2.1;0.08;33;141;0.9962;3.25;0.51;9.9;5 -7.9;0.69;0.21;2.1;0.08;33;141;0.9962;3.25;0.51;9.9;5 -7.6;0.3;0.42;2;0.052;6;24;0.9963;3.44;0.82;11.9;6 -7.2;0.33;0.33;1.7;0.061;3;13;0.996;3.23;1.1;10;8 -8;0.5;0.39;2.6;0.082;12;46;0.9985;3.43;0.62;10.7;6 -7.7;0.28;0.3;2;0.062;18;34;0.9952;3.28;0.9;11.3;7 -8.2;0.24;0.34;5.1;0.062;8;22;0.9974;3.22;0.94;10.9;6 -6;0.51;0;2.1;0.064;40;54;0.995;3.54;0.93;10.7;6 -8.1;0.29;0.36;2.2;0.048;35;53;0.995;3.27;1.01;12.4;7 -6;0.51;0;2.1;0.064;40;54;0.995;3.54;0.93;10.7;6 -6.6;0.96;0;1.8;0.082;5;16;0.9936;3.5;0.44;11.9;6 -6.4;0.47;0.4;2.4;0.071;8;19;0.9963;3.56;0.73;10.6;6 -8.2;0.24;0.34;5.1;0.062;8;22;0.9974;3.22;0.94;10.9;6 -9.9;0.57;0.25;2;0.104;12;89;0.9963;3.04;0.9;10.1;5 -10;0.32;0.59;2.2;0.077;3;15;0.9994;3.2;0.78;9.6;5 -6.2;0.58;0;1.6;0.065;8;18;0.9966;3.56;0.84;9.4;5 -10;0.32;0.59;2.2;0.077;3;15;0.9994;3.2;0.78;9.6;5 -7.3;0.34;0.33;2.5;0.064;21;37;0.9952;3.35;0.77;12.1;7 -7.8;0.53;0.01;1.6;0.077;3;19;0.995;3.16;0.46;9.8;5 -7.7;0.64;0.21;2.2;0.077;32;133;0.9956;3.27;0.45;9.9;5 -7.8;0.53;0.01;1.6;0.077;3;19;0.995;3.16;0.46;9.8;5 -7.5;0.4;0.18;1.6;0.079;24;58;0.9965;3.34;0.58;9.4;5 -7;0.54;0;2.1;0.079;39;55;0.9956;3.39;0.84;11.4;6 -6.4;0.53;0.09;3.9;0.123;14;31;0.9968;3.5;0.67;11;4 -8.3;0.26;0.37;1.4;0.076;8;23;0.9974;3.26;0.7;9.6;6 -8.3;0.26;0.37;1.4;0.076;8;23;0.9974;3.26;0.7;9.6;6 -7.7;0.23;0.37;1.8;0.046;23;60;0.9971;3.41;0.71;12.1;6 -7.6;0.41;0.33;2.5;0.078;6;23;0.9957;3.3;0.58;11.2;5 -7.8;0.64;0;1.9;0.072;27;55;0.9962;3.31;0.63;11;5 -7.9;0.18;0.4;2.2;0.049;38;67;0.996;3.33;0.93;11.3;5 -7.4;0.41;0.24;1.8;0.066;18;47;0.9956;3.37;0.62;10.4;5 -7.6;0.43;0.31;2.1;0.069;13;74;0.9958;3.26;0.54;9.9;6 -5.9;0.44;0;1.6;0.042;3;11;0.9944;3.48;0.85;11.7;6 -6.1;0.4;0.16;1.8;0.069;11;25;0.9955;3.42;0.74;10.1;7 -10.2;0.54;0.37;15.4;0.214;55;95;1.00369;3.18;0.77;9;6 -10.2;0.54;0.37;15.4;0.214;55;95;1.00369;3.18;0.77;9;6 -10;0.38;0.38;1.6;0.169;27;90;0.99914;3.15;0.65;8.5;5 -6.8;0.915;0.29;4.8;0.07;15;39;0.99577;3.53;0.54;11.1;5 -7;0.59;0;1.7;0.052;3;8;0.996;3.41;0.47;10.3;5 -7.3;0.67;0.02;2.2;0.072;31;92;0.99566;3.32;0.68;11.0666666666667;6 -7.2;0.37;0.32;2;0.062;15;28;0.9947;3.23;0.73;11.3;7 -7.4;0.785;0.19;5.2;0.094;19;98;0.99713;3.16;0.52;9.56666666666667;6 -6.9;0.63;0.02;1.9;0.078;18;30;0.99712;3.4;0.75;9.8;5 -6.9;0.58;0.2;1.75;0.058;8;22;0.99322;3.38;0.49;11.7;5 -7.3;0.67;0.02;2.2;0.072;31;92;0.99566;3.32;0.68;11.1;6 -7.4;0.785;0.19;5.2;0.094;19;98;0.99713;3.16;0.52;9.6;6 -6.9;0.63;0.02;1.9;0.078;18;30;0.99712;3.4;0.75;9.8;5 -6.8;0.67;0;1.9;0.08;22;39;0.99701;3.4;0.74;9.7;5 -6.9;0.58;0.01;1.9;0.08;40;54;0.99683;3.4;0.73;9.7;5 -7.2;0.38;0.31;2;0.056;15;29;0.99472;3.23;0.76;11.3;8 -7.2;0.37;0.32;2;0.062;15;28;0.9947;3.23;0.73;11.3;7 -7.8;0.32;0.44;2.7;0.104;8;17;0.99732;3.33;0.78;11;7 -6.6;0.58;0.02;2;0.062;37;53;0.99374;3.35;0.76;11.6;7 -7.6;0.49;0.33;1.9;0.074;27;85;0.99706;3.41;0.58;9;5 -11.7;0.45;0.63;2.2;0.073;7;23;0.99974;3.21;0.69;10.9;6 -6.5;0.9;0;1.6;0.052;9;17;0.99467;3.5;0.63;10.9;6 -6;0.54;0.06;1.8;0.05;38;89;0.99236;3.3;0.5;10.55;6 -7.6;0.49;0.33;1.9;0.074;27;85;0.99706;3.41;0.58;9;5 -8.4;0.29;0.4;1.7;0.067;8;20;0.99603;3.39;0.6;10.5;5 -7.9;0.2;0.35;1.7;0.054;7;15;0.99458;3.32;0.8;11.9;7 -6.4;0.42;0.09;2.3;0.054;34;64;0.99724;3.41;0.68;10.4;6 -6.2;0.785;0;2.1;0.06;6;13;0.99664;3.59;0.61;10;4 -6.8;0.64;0.03;2.3;0.075;14;31;0.99545;3.36;0.58;10.4;6 -6.9;0.63;0.01;2.4;0.076;14;39;0.99522;3.34;0.53;10.8;6 -6.8;0.59;0.1;1.7;0.063;34;53;0.9958;3.41;0.67;9.7;5 -6.8;0.59;0.1;1.7;0.063;34;53;0.9958;3.41;0.67;9.7;5 -7.3;0.48;0.32;2.1;0.062;31;54;0.99728;3.3;0.65;10;7 -6.7;1.04;0.08;2.3;0.067;19;32;0.99648;3.52;0.57;11;4 -7.3;0.48;0.32;2.1;0.062;31;54;0.99728;3.3;0.65;10;7 -7.3;0.98;0.05;2.1;0.061;20;49;0.99705;3.31;0.55;9.7;3 -10;0.69;0.11;1.4;0.084;8;24;0.99578;2.88;0.47;9.7;5 -6.7;0.7;0.08;3.75;0.067;8;16;0.99334;3.43;0.52;12.6;5 -7.6;0.35;0.6;2.6;0.073;23;44;0.99656;3.38;0.79;11.1;6 -6.1;0.6;0.08;1.8;0.071;14;45;0.99336;3.38;0.54;11;5 -9.9;0.5;0.5;13.8;0.205;48;82;1.00242;3.16;0.75;8.8;5 -5.3;0.47;0.11;2.2;0.048;16;89;0.99182;3.54;0.88;13.5666666666667;7 -9.9;0.5;0.5;13.8;0.205;48;82;1.00242;3.16;0.75;8.8;5 -5.3;0.47;0.11;2.2;0.048;16;89;0.99182;3.54;0.88;13.6;7 -7.1;0.875;0.05;5.7;0.082;3;14;0.99808;3.4;0.52;10.2;3 -8.2;0.28;0.6;3;0.104;10;22;0.99828;3.39;0.68;10.6;5 -5.6;0.62;0.03;1.5;0.08;6;13;0.99498;3.66;0.62;10.1;4 -8.2;0.28;0.6;3;0.104;10;22;0.99828;3.39;0.68;10.6;5 -7.2;0.58;0.54;2.1;0.114;3;9;0.99719;3.33;0.57;10.3;4 -8.1;0.33;0.44;1.5;0.042;6;12;0.99542;3.35;0.61;10.7;5 -6.8;0.91;0.06;2;0.06;4;11;0.99592;3.53;0.64;10.9;4 -7;0.655;0.16;2.1;0.074;8;25;0.99606;3.37;0.55;9.7;5 -6.8;0.68;0.21;2.1;0.07;9;23;0.99546;3.38;0.6;10.3;5 -6;0.64;0.05;1.9;0.066;9;17;0.99496;3.52;0.78;10.6;5 -5.6;0.54;0.04;1.7;0.049;5;13;0.9942;3.72;0.58;11.4;5 -6.2;0.57;0.1;2.1;0.048;4;11;0.99448;3.44;0.76;10.8;6 -7.1;0.22;0.49;1.8;0.039;8;18;0.99344;3.39;0.56;12.4;6 -5.6;0.54;0.04;1.7;0.049;5;13;0.9942;3.72;0.58;11.4;5 -6.2;0.65;0.06;1.6;0.05;6;18;0.99348;3.57;0.54;11.95;5 -7.7;0.54;0.26;1.9;0.089;23;147;0.99636;3.26;0.59;9.7;5 -6.4;0.31;0.09;1.4;0.066;15;28;0.99459;3.42;0.7;10;7 -7;0.43;0.02;1.9;0.08;15;28;0.99492;3.35;0.81;10.6;6 -7.7;0.54;0.26;1.9;0.089;23;147;0.99636;3.26;0.59;9.7;5 -6.9;0.74;0.03;2.3;0.054;7;16;0.99508;3.45;0.63;11.5;6 -6.6;0.895;0.04;2.3;0.068;7;13;0.99582;3.53;0.58;10.8;6 -6.9;0.74;0.03;2.3;0.054;7;16;0.99508;3.45;0.63;11.5;6 -7.5;0.725;0.04;1.5;0.076;8;15;0.99508;3.26;0.53;9.6;5 -7.8;0.82;0.29;4.3;0.083;21;64;0.99642;3.16;0.53;9.4;5 -7.3;0.585;0.18;2.4;0.078;15;60;0.99638;3.31;0.54;9.8;5 -6.2;0.44;0.39;2.5;0.077;6;14;0.99555;3.51;0.69;11;6 -7.5;0.38;0.57;2.3;0.106;5;12;0.99605;3.36;0.55;11.4;6 -6.7;0.76;0.02;1.8;0.078;6;12;0.996;3.55;0.63;9.95;3 -6.8;0.81;0.05;2;0.07;6;14;0.99562;3.51;0.66;10.8;6 -7.5;0.38;0.57;2.3;0.106;5;12;0.99605;3.36;0.55;11.4;6 -7.1;0.27;0.6;2.1;0.074;17;25;0.99814;3.38;0.72;10.6;6 -7.9;0.18;0.4;1.8;0.062;7;20;0.9941;3.28;0.7;11.1;5 -6.4;0.36;0.21;2.2;0.047;26;48;0.99661;3.47;0.77;9.7;6 -7.1;0.69;0.04;2.1;0.068;19;27;0.99712;3.44;0.67;9.8;5 -6.4;0.79;0.04;2.2;0.061;11;17;0.99588;3.53;0.65;10.4;6 -6.4;0.56;0.15;1.8;0.078;17;65;0.99294;3.33;0.6;10.5;6 -6.9;0.84;0.21;4.1;0.074;16;65;0.99842;3.53;0.72;9.23333333333333;6 -6.9;0.84;0.21;4.1;0.074;16;65;0.99842;3.53;0.72;9.25;6 -6.1;0.32;0.25;2.3;0.071;23;58;0.99633;3.42;0.97;10.6;5 -6.5;0.53;0.06;2;0.063;29;44;0.99489;3.38;0.83;10.3;6 -7.4;0.47;0.46;2.2;0.114;7;20;0.99647;3.32;0.63;10.5;5 -6.6;0.7;0.08;2.6;0.106;14;27;0.99665;3.44;0.58;10.2;5 -6.5;0.53;0.06;2;0.063;29;44;0.99489;3.38;0.83;10.3;6 -6.9;0.48;0.2;1.9;0.082;9;23;0.99585;3.39;0.43;9.05;4 -6.1;0.32;0.25;2.3;0.071;23;58;0.99633;3.42;0.97;10.6;5 -6.8;0.48;0.25;2;0.076;29;61;0.9953;3.34;0.6;10.4;5 -6;0.42;0.19;2;0.075;22;47;0.99522;3.39;0.78;10;6 -6.7;0.48;0.08;2.1;0.064;18;34;0.99552;3.33;0.64;9.7;5 -6.8;0.47;0.08;2.2;0.064;18;38;0.99553;3.3;0.65;9.6;6 -7.1;0.53;0.07;1.7;0.071;15;24;0.9951;3.29;0.66;10.8;6 -7.9;0.29;0.49;2.2;0.096;21;59;0.99714;3.31;0.67;10.1;6 -7.1;0.69;0.08;2.1;0.063;42;52;0.99608;3.42;0.6;10.2;6 -6.6;0.44;0.09;2.2;0.063;9;18;0.99444;3.42;0.69;11.3;6 -6.1;0.705;0.1;2.8;0.081;13;28;0.99631;3.6;0.66;10.2;5 -7.2;0.53;0.13;2;0.058;18;22;0.99573;3.21;0.68;9.9;6 -8;0.39;0.3;1.9;0.074;32;84;0.99717;3.39;0.61;9;5 -6.6;0.56;0.14;2.4;0.064;13;29;0.99397;3.42;0.62;11.7;7 -7;0.55;0.13;2.2;0.075;15;35;0.9959;3.36;0.59;9.7;6 -6.1;0.53;0.08;1.9;0.077;24;45;0.99528;3.6;0.68;10.3;6 -5.4;0.58;0.08;1.9;0.059;20;31;0.99484;3.5;0.64;10.2;6 -6.2;0.64;0.09;2.5;0.081;15;26;0.99538;3.57;0.63;12;5 -7.2;0.39;0.32;1.8;0.065;34;60;0.99714;3.46;0.78;9.9;5 -6.2;0.52;0.08;4.4;0.071;11;32;0.99646;3.56;0.63;11.6;6 -7.4;0.25;0.29;2.2;0.054;19;49;0.99666;3.4;0.76;10.9;7 -6.7;0.855;0.02;1.9;0.064;29;38;0.99472;3.3;0.56;10.75;6 -11.1;0.44;0.42;2.2;0.064;14;19;0.99758;3.25;0.57;10.4;6 -8.4;0.37;0.43;2.3;0.063;12;19;0.9955;3.17;0.81;11.2;7 -6.5;0.63;0.33;1.8;0.059;16;28;0.99531;3.36;0.64;10.1;6 -7;0.57;0.02;2;0.072;17;26;0.99575;3.36;0.61;10.2;5 -6.3;0.6;0.1;1.6;0.048;12;26;0.99306;3.55;0.51;12.1;5 -11.2;0.4;0.5;2;0.099;19;50;0.99783;3.1;0.58;10.4;5 -7.4;0.36;0.3;1.8;0.074;17;24;0.99419;3.24;0.7;11.4;8 -7.1;0.68;0;2.3;0.087;17;26;0.99783;3.45;0.53;9.5;5 -7.1;0.67;0;2.3;0.083;18;27;0.99768;3.44;0.54;9.4;5 -6.3;0.68;0.01;3.7;0.103;32;54;0.99586;3.51;0.66;11.3;6 -7.3;0.735;0;2.2;0.08;18;28;0.99765;3.41;0.6;9.4;5 -6.6;0.855;0.02;2.4;0.062;15;23;0.99627;3.54;0.6;11;6 -7;0.56;0.17;1.7;0.065;15;24;0.99514;3.44;0.68;10.55;7 -6.6;0.88;0.04;2.2;0.066;12;20;0.99636;3.53;0.56;9.9;5 -6.6;0.855;0.02;2.4;0.062;15;23;0.99627;3.54;0.6;11;6 -6.9;0.63;0.33;6.7;0.235;66;115;0.99787;3.22;0.56;9.5;5 -7.8;0.6;0.26;2;0.08;31;131;0.99622;3.21;0.52;9.9;5 -7.8;0.6;0.26;2;0.08;31;131;0.99622;3.21;0.52;9.9;5 -7.8;0.6;0.26;2;0.08;31;131;0.99622;3.21;0.52;9.9;5 -7.2;0.695;0.13;2;0.076;12;20;0.99546;3.29;0.54;10.1;5 -7.2;0.695;0.13;2;0.076;12;20;0.99546;3.29;0.54;10.1;5 -7.2;0.695;0.13;2;0.076;12;20;0.99546;3.29;0.54;10.1;5 -6.7;0.67;0.02;1.9;0.061;26;42;0.99489;3.39;0.82;10.9;6 -6.7;0.16;0.64;2.1;0.059;24;52;0.99494;3.34;0.71;11.2;6 -7.2;0.695;0.13;2;0.076;12;20;0.99546;3.29;0.54;10.1;5 -7;0.56;0.13;1.6;0.077;25;42;0.99629;3.34;0.59;9.2;5 -6.2;0.51;0.14;1.9;0.056;15;34;0.99396;3.48;0.57;11.5;6 -6.4;0.36;0.53;2.2;0.23;19;35;0.9934;3.37;0.93;12.4;6 -6.4;0.38;0.14;2.2;0.038;15;25;0.99514;3.44;0.65;11.1;6 -7.3;0.69;0.32;2.2;0.069;35;104;0.99632;3.33;0.51;9.5;5 -6;0.58;0.2;2.4;0.075;15;50;0.99467;3.58;0.67;12.5;6 -5.6;0.31;0.78;13.9;0.074;23;92;0.99677;3.39;0.48;10.5;6 -7.5;0.52;0.4;2.2;0.06;12;20;0.99474;3.26;0.64;11.8;6 -8;0.3;0.63;1.6;0.081;16;29;0.99588;3.3;0.78;10.8;6 -6.2;0.7;0.15;5.1;0.076;13;27;0.99622;3.54;0.6;11.9;6 -6.8;0.67;0.15;1.8;0.118;13;20;0.9954;3.42;0.67;11.3;6 -6.2;0.56;0.09;1.7;0.053;24;32;0.99402;3.54;0.6;11.3;5 -7.4;0.35;0.33;2.4;0.068;9;26;0.9947;3.36;0.6;11.9;6 -6.2;0.56;0.09;1.7;0.053;24;32;0.99402;3.54;0.6;11.3;5 -6.1;0.715;0.1;2.6;0.053;13;27;0.99362;3.57;0.5;11.9;5 -6.2;0.46;0.29;2.1;0.074;32;98;0.99578;3.33;0.62;9.8;5 -6.7;0.32;0.44;2.4;0.061;24;34;0.99484;3.29;0.8;11.6;7 -7.2;0.39;0.44;2.6;0.066;22;48;0.99494;3.3;0.84;11.5;6 -7.5;0.31;0.41;2.4;0.065;34;60;0.99492;3.34;0.85;11.4;6 -5.8;0.61;0.11;1.8;0.066;18;28;0.99483;3.55;0.66;10.9;6 -7.2;0.66;0.33;2.5;0.068;34;102;0.99414;3.27;0.78;12.8;6 -6.6;0.725;0.2;7.8;0.073;29;79;0.9977;3.29;0.54;9.2;5 -6.3;0.55;0.15;1.8;0.077;26;35;0.99314;3.32;0.82;11.6;6 -5.4;0.74;0.09;1.7;0.089;16;26;0.99402;3.67;0.56;11.6;6 -6.3;0.51;0.13;2.3;0.076;29;40;0.99574;3.42;0.75;11;6 -6.8;0.62;0.08;1.9;0.068;28;38;0.99651;3.42;0.82;9.5;6 -6.2;0.6;0.08;2;0.09;32;44;0.9949;3.45;0.58;10.5;5 -5.9;0.55;0.1;2.2;0.062;39;51;0.99512;3.52;0.76;11.2;6 -6.3;0.51;0.13;2.3;0.076;29;40;0.99574;3.42;0.75;11;6 -5.9;0.645;0.12;2;0.075;32;44;0.99547;3.57;0.71;10.2;5 -6;0.31;0.47;3.6;0.067;18;42;0.99549;3.39;0.66;11;6 diff --git a/snowflake/ml/fileset/BUILD.bazel b/snowflake/ml/fileset/BUILD.bazel index f7615db6..1f3df0c5 100644 --- a/snowflake/ml/fileset/BUILD.bazel +++ b/snowflake/ml/fileset/BUILD.bazel @@ -67,7 +67,6 @@ py_library( deps = [ ":embedded_stage_fs", ":sfcfs", - "//snowflake/ml/_internal/utils:snowflake_env", # FIXME(dhung) temporary workaround for SnowURL bug in GS 8.17 ], ) diff --git a/snowflake/ml/fileset/snowfs.py b/snowflake/ml/fileset/snowfs.py index 892b77c0..116dbc92 100644 --- a/snowflake/ml/fileset/snowfs.py +++ b/snowflake/ml/fileset/snowfs.py @@ -1,10 +1,9 @@ import collections import logging import re -from typing import Any, Dict, Optional +from typing import Any, Optional import fsspec -import packaging.version as pkg_version from snowflake import snowpark from snowflake.connector import connection @@ -12,7 +11,7 @@ error_codes, exceptions as snowml_exceptions, ) -from snowflake.ml._internal.utils import identifier, snowflake_env +from snowflake.ml._internal.utils import identifier from snowflake.ml.fileset import embedded_stage_fs, sfcfs PROTOCOL_NAME = "snow" @@ -28,10 +27,6 @@ r"(?Pversions/(?:(?P[^/]+)(?:/(?P.*))?)?)" ) -# FIXME(dhung): Temporary fix for bug in GS version 8.17 -_BUG_VERSION_MIN = pkg_version.Version("8.17") # Inclusive minimum version with bugged behavior -_BUG_VERSION_MAX = pkg_version.Version("8.18") # Exclusive maximum version with bugged behavior - class SnowFileSystem(sfcfs.SFFileSystem): """A filesystem that allows user to access Snowflake embedded stage files with valid Snowflake locations. @@ -54,21 +49,6 @@ def __init__( ) -> None: super().__init__(sf_connection=sf_connection, snowpark_session=snowpark_session, **kwargs) - # FIXME(dhung): Temporary fix for bug in GS version 8.17 - if SnowFileSystem._IS_BUGGED_VERSION is None: - try: - sf_version = snowflake_env.get_current_snowflake_version(self._session) - SnowFileSystem._IS_BUGGED_VERSION = _BUG_VERSION_MIN <= sf_version < _BUG_VERSION_MAX - except Exception: - SnowFileSystem._IS_BUGGED_VERSION = False - - def info(self, path: str, **kwargs: Any) -> Dict[str, Any]: - # FIXME(dhung): Temporary fix for bug in GS version 8.17 - res: Dict[str, Any] = super().info(path, **kwargs) - if res.get("type") == "directory" and not res["name"].endswith("/"): - res["name"] += "/" - return res - def _get_stage_fs( self, sf_file_path: _SFFileEntityPath # type: ignore[override] ) -> embedded_stage_fs.SFEmbeddedStageFileSystem: @@ -100,12 +80,6 @@ def _stage_path_to_absolute_path(self, stage_fs: embedded_stage_fs.SFEmbeddedSta if stage_name.startswith(protocol): stage_name = stage_name[len(protocol) :] abs_path = stage_name + "/" + path - # FIXME(dhung): Temporary fix for bug in GS version 8.17 - if self._IS_BUGGED_VERSION: - match = _SNOWURL_PATTERN.fullmatch(abs_path) - assert match is not None - if match.group("relpath"): - abs_path = abs_path.replace(match.group("relpath"), match.group("relpath").lstrip("/")) return abs_path @classmethod @@ -144,9 +118,6 @@ def _parse_file_path(cls, path: str) -> _SFFileEntityPath: # type: ignore[overr version = snowurl_match.group("version") relative_path = snowurl_match.group("relpath") or "" logging.debug(f"Parsed snow URL: {snowurl_match.groups()}") - # FIXME(dhung): Temporary fix for bug in GS version 8.17 - if cls._IS_BUGGED_VERSION: - filepath = f"versions/{version}//{relative_path}" return _SFFileEntityPath( domain=domain, name=name, version=version, relative_path=relative_path, filepath=filepath ) diff --git a/snowflake/ml/model/_client/ops/model_ops.py b/snowflake/ml/model/_client/ops/model_ops.py index 66e2af77..43fe1447 100644 --- a/snowflake/ml/model/_client/ops/model_ops.py +++ b/snowflake/ml/model/_client/ops/model_ops.py @@ -140,6 +140,49 @@ def create_from_stage( statement_params=statement_params, ) + def create_from_model_version( + self, + *, + source_database_name: Optional[sql_identifier.SqlIdentifier], + source_schema_name: Optional[sql_identifier.SqlIdentifier], + source_model_name: sql_identifier.SqlIdentifier, + source_version_name: sql_identifier.SqlIdentifier, + database_name: Optional[sql_identifier.SqlIdentifier], + schema_name: Optional[sql_identifier.SqlIdentifier], + model_name: sql_identifier.SqlIdentifier, + version_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + if self.validate_existence( + database_name=database_name, + schema_name=schema_name, + model_name=model_name, + statement_params=statement_params, + ): + return self._model_version_client.add_version_from_model_version( + source_database_name=source_database_name, + source_schema_name=source_schema_name, + source_model_name=source_model_name, + source_version_name=source_version_name, + database_name=database_name, + schema_name=schema_name, + model_name=model_name, + version_name=version_name, + statement_params=statement_params, + ) + else: + return self._model_version_client.create_from_model_version( + source_database_name=source_database_name, + source_schema_name=source_schema_name, + source_model_name=source_model_name, + source_version_name=source_version_name, + database_name=database_name, + schema_name=schema_name, + model_name=model_name, + version_name=version_name, + statement_params=statement_params, + ) + def show_models_or_versions( self, *, diff --git a/snowflake/ml/model/_client/ops/model_ops_test.py b/snowflake/ml/model/_client/ops/model_ops_test.py index 0b980e45..ccd35e51 100644 --- a/snowflake/ml/model/_client/ops/model_ops_test.py +++ b/snowflake/ml/model/_client/ops/model_ops_test.py @@ -565,6 +565,62 @@ def test_create_from_stage_3(self) -> None: mock_create_from_stage.assert_not_called() mock_add_version_from_stagel.assert_not_called() + def test_create_from_model_version_create(self) -> None: + with mock.patch.object( + self.m_ops._model_version_client, "create_from_model_version" + ) as mock_create_from_model_version, mock.patch.object(self.m_ops, "validate_existence", return_value=False): + self.m_ops.create_from_model_version( + source_database_name=sql_identifier.SqlIdentifier("TEMP"), + source_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + source_model_name=sql_identifier.SqlIdentifier("SOURCE_MODEL"), + source_version_name=sql_identifier.SqlIdentifier("SOURCE_VERSION"), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=self.m_statement_params, + ) + mock_create_from_model_version.assert_called_once_with( + source_database_name=sql_identifier.SqlIdentifier("TEMP"), + source_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + source_model_name=sql_identifier.SqlIdentifier("SOURCE_MODEL"), + source_version_name=sql_identifier.SqlIdentifier("SOURCE_VERSION"), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=self.m_statement_params, + ) + + def test_create_from_model_version_add(self) -> None: + with mock.patch.object( + self.m_ops._model_version_client, "add_version_from_model_version" + ) as mock_add_version_from_model_version, mock.patch.object( + self.m_ops, "validate_existence", return_value=True + ): + self.m_ops.create_from_model_version( + source_database_name=sql_identifier.SqlIdentifier("TEMP"), + source_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + source_model_name=sql_identifier.SqlIdentifier("SOURCE_MODEL"), + source_version_name=sql_identifier.SqlIdentifier("SOURCE_VERSION"), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=self.m_statement_params, + ) + mock_add_version_from_model_version.assert_called_once_with( + source_database_name=sql_identifier.SqlIdentifier("TEMP"), + source_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + source_model_name=sql_identifier.SqlIdentifier("SOURCE_MODEL"), + source_version_name=sql_identifier.SqlIdentifier("SOURCE_VERSION"), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=self.m_statement_params, + ) + def test_invoke_method_1(self) -> None: pd_df = pd.DataFrame([["1.0"]], columns=["input"], dtype=np.float32) m_sig = _DUMMY_SIG["predict"] diff --git a/snowflake/ml/model/_client/sql/model_version.py b/snowflake/ml/model/_client/sql/model_version.py index 73353fc5..49a38558 100644 --- a/snowflake/ml/model/_client/sql/model_version.py +++ b/snowflake/ml/model/_client/sql/model_version.py @@ -44,6 +44,32 @@ def create_from_stage( statement_params=statement_params, ).has_dimensions(expected_rows=1, expected_cols=1).validate() + def create_from_model_version( + self, + *, + source_database_name: Optional[sql_identifier.SqlIdentifier], + source_schema_name: Optional[sql_identifier.SqlIdentifier], + source_model_name: sql_identifier.SqlIdentifier, + source_version_name: sql_identifier.SqlIdentifier, + database_name: Optional[sql_identifier.SqlIdentifier], + schema_name: Optional[sql_identifier.SqlIdentifier], + model_name: sql_identifier.SqlIdentifier, + version_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + fq_source_model_name = self.fully_qualified_object_name( + source_database_name, source_schema_name, source_model_name + ) + fq_model_name = self.fully_qualified_object_name(database_name, schema_name, model_name) + query_result_checker.SqlResultValidator( + self._session, + ( + f"CREATE MODEL {fq_model_name} WITH VERSION {version_name} FROM MODEL {fq_source_model_name}" + f" VERSION {source_version_name}" + ), + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() + # TODO(SNOW-987381): Merge with above when we have `create or alter module m [with] version v1 ...` def add_version_from_stage( self, @@ -64,6 +90,32 @@ def add_version_from_stage( statement_params=statement_params, ).has_dimensions(expected_rows=1, expected_cols=1).validate() + def add_version_from_model_version( + self, + *, + source_database_name: Optional[sql_identifier.SqlIdentifier], + source_schema_name: Optional[sql_identifier.SqlIdentifier], + source_model_name: sql_identifier.SqlIdentifier, + source_version_name: sql_identifier.SqlIdentifier, + database_name: Optional[sql_identifier.SqlIdentifier], + schema_name: Optional[sql_identifier.SqlIdentifier], + model_name: sql_identifier.SqlIdentifier, + version_name: sql_identifier.SqlIdentifier, + statement_params: Optional[Dict[str, Any]] = None, + ) -> None: + fq_source_model_name = self.fully_qualified_object_name( + source_database_name, source_schema_name, source_model_name + ) + fq_model_name = self.fully_qualified_object_name(database_name, schema_name, model_name) + query_result_checker.SqlResultValidator( + self._session, + ( + f"ALTER MODEL {fq_model_name} ADD VERSION {version_name} FROM MODEL {fq_source_model_name}" + f" VERSION {source_version_name}" + ), + statement_params=statement_params, + ).has_dimensions(expected_rows=1, expected_cols=1).validate() + def set_default_version( self, *, @@ -145,7 +197,7 @@ def get_file( if snowpark_utils.is_in_stored_procedure(): # type: ignore[no-untyped-call] options = {"parallel": 10} cursor = self._session._conn._cursor - cursor._download(stage_location_url, str(target_path), options) # type: ignore[attr-defined] + cursor._download(stage_location_url, str(target_path), options) # type: ignore[union-attr] cursor.fetchall() else: query_result_checker.SqlResultValidator( diff --git a/snowflake/ml/model/_client/sql/model_version_test.py b/snowflake/ml/model/_client/sql/model_version_test.py index f6d96a6d..78e4dc8a 100644 --- a/snowflake/ml/model/_client/sql/model_version_test.py +++ b/snowflake/ml/model/_client/sql/model_version_test.py @@ -57,6 +57,64 @@ def test_create_from_stage(self) -> None: statement_params=m_statement_params, ) + def test_create_from_model_version(self) -> None: + m_statement_params = {"test": "1"} + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row("Model MODEL successfully created.")], collect_statement_params=m_statement_params + ) + self.m_session.add_mock_sql( + ( + 'CREATE MODEL TEMP."test".MODEL WITH VERSION V1' + ' FROM MODEL SOURCE_TEMP."source_test".SOURCE_MODEL VERSION SOURCE_VERSION' + ), + m_df, + ) + c_session = cast(Session, self.m_session) + model_version_sql.ModelVersionSQLClient( + c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + ).create_from_model_version( + source_database_name=sql_identifier.SqlIdentifier("SOURCE_TEMP"), + source_schema_name=sql_identifier.SqlIdentifier("source_test", case_sensitive=True), + source_model_name=sql_identifier.SqlIdentifier("SOURCE_MODEL"), + source_version_name=sql_identifier.SqlIdentifier("SOURCE_VERSION"), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=m_statement_params, + ) + + def test_add_version_from_model_version(self) -> None: + m_statement_params = {"test": "1"} + m_df = mock_data_frame.MockDataFrame( + collect_result=[Row("Model MODEL successfully created.")], collect_statement_params=m_statement_params + ) + self.m_session.add_mock_sql( + ( + 'ALTER MODEL TEMP."test".MODEL ADD VERSION V1' + ' FROM MODEL SOURCE_TEMP."source_test".SOURCE_MODEL VERSION SOURCE_VERSION' + ), + m_df, + ) + c_session = cast(Session, self.m_session) + model_version_sql.ModelVersionSQLClient( + c_session, + database_name=sql_identifier.SqlIdentifier("TEMP"), + schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + ).add_version_from_model_version( + source_database_name=sql_identifier.SqlIdentifier("SOURCE_TEMP"), + source_schema_name=sql_identifier.SqlIdentifier("source_test", case_sensitive=True), + source_model_name=sql_identifier.SqlIdentifier("SOURCE_MODEL"), + source_version_name=sql_identifier.SqlIdentifier("SOURCE_VERSION"), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=m_statement_params, + ) + def test_add_version_from_stage(self) -> None: m_statement_params = {"test": "1"} m_df = mock_data_frame.MockDataFrame( @@ -293,7 +351,8 @@ def test_invoke_function_method(self) -> None: c_session = cast(Session, self.m_session) mock_writer = mock.MagicMock() m_df.__setattr__("write", mock_writer) - m_df.__setattr__("queries", {"queries": ["query_1", "query_2"], "post_actions": []}) + m_df.add_query("queries", "query_1") + m_df.add_query("queries", "query_2") with mock.patch.object(mock_writer, "save_as_table") as mock_save_as_table, mock.patch.object( snowpark_utils, "random_name_for_temp_object", return_value="SNOWPARK_TEMP_TABLE_ABCDEF0123" ) as mock_random_name_for_temp_object: @@ -334,7 +393,8 @@ def test_invoke_function_method_fully_qualified(self) -> None: c_session = cast(Session, self.m_session) mock_writer = mock.MagicMock() m_df.__setattr__("write", mock_writer) - m_df.__setattr__("queries", {"queries": ["query_1", "query_2"], "post_actions": []}) + m_df.add_query("queries", "query_1") + m_df.add_query("queries", "query_2") with mock.patch.object(mock_writer, "save_as_table") as mock_save_as_table, mock.patch.object( snowpark_utils, "random_name_for_temp_object", return_value="SNOWPARK_TEMP_TABLE_ABCDEF0123" ) as mock_random_name_for_temp_object: @@ -375,7 +435,8 @@ def test_invoke_function_method_1(self) -> None: c_session = cast(Session, self.m_session) mock_writer = mock.MagicMock() m_df.__setattr__("write", mock_writer) - m_df.__setattr__("queries", {"queries": ["query_1"], "post_actions": ["query_2"]}) + m_df.add_query("queries", "query_1") + m_df.add_query("queries", "query_2") with mock.patch.object(mock_writer, "save_as_table") as mock_save_as_table, mock.patch.object( snowpark_utils, "random_name_for_temp_object", return_value="SNOWPARK_TEMP_TABLE_ABCDEF0123" ) as mock_random_name_for_temp_object: @@ -415,7 +476,7 @@ def test_invoke_function_method_2(self) -> None: ) m_df.add_mock_with_columns(["OUTPUT_1"], [F.col("OUTPUT_1")]).add_mock_drop("TMP_RESULT") c_session = cast(Session, self.m_session) - m_df.__setattr__("queries", {"queries": ["query_1"], "post_actions": []}) + m_df.add_query("queries", "query_1") model_version_sql.ModelVersionSQLClient( c_session, database_name=sql_identifier.SqlIdentifier("TEMP"), @@ -448,7 +509,8 @@ def test_invoke_table_function_method_partition_col(self) -> None: c_session = cast(Session, self.m_session) mock_writer = mock.MagicMock() m_df.__setattr__("write", mock_writer) - m_df.__setattr__("queries", {"queries": ["query_1", "query_2"], "post_actions": []}) + m_df.add_query("queries", "query_1") + m_df.add_query("queries", "query_2") with mock.patch.object(mock_writer, "save_as_table") as mock_save_as_table, mock.patch.object( snowpark_utils, "random_name_for_temp_object", return_value="SNOWPARK_TEMP_TABLE_ABCDEF0123" ) as mock_random_name_for_temp_object: @@ -492,7 +554,8 @@ def test_invoke_table_function_method_partition_col_fully_qualified(self) -> Non c_session = cast(Session, self.m_session) mock_writer = mock.MagicMock() m_df.__setattr__("write", mock_writer) - m_df.__setattr__("queries", {"queries": ["query_1", "query_2"], "post_actions": []}) + m_df.add_query("queries", "query_1") + m_df.add_query("queries", "query_2") with mock.patch.object(mock_writer, "save_as_table") as mock_save_as_table, mock.patch.object( snowpark_utils, "random_name_for_temp_object", return_value="SNOWPARK_TEMP_TABLE_ABCDEF0123" ) as mock_random_name_for_temp_object: diff --git a/snowflake/ml/model/_model_composer/model_composer.py b/snowflake/ml/model/_model_composer/model_composer.py index e41ba69a..afd9ed7a 100644 --- a/snowflake/ml/model/_model_composer/model_composer.py +++ b/snowflake/ml/model/_model_composer/model_composer.py @@ -136,7 +136,7 @@ def save( model_meta=self.packager.meta, model_file_rel_path=pathlib.PurePosixPath(self.model_file_rel_path), options=options, - data_sources=self._get_data_sources(model), + data_sources=self._get_data_sources(model, sample_input_data), ) file_utils.upload_directory_to_stage( @@ -179,8 +179,12 @@ def load( mp.load(meta_only=meta_only, options=options) return mp - def _get_data_sources(self, model: model_types.SupportedModelType) -> Optional[List[data_source.DataSource]]: + def _get_data_sources( + self, model: model_types.SupportedModelType, sample_input_data: Optional[model_types.SupportedDataType] = None + ) -> Optional[List[data_source.DataSource]]: data_sources = getattr(model, lineage_utils.DATA_SOURCES_ATTR, None) + if not data_sources and sample_input_data is not None: + data_sources = getattr(sample_input_data, lineage_utils.DATA_SOURCES_ATTR, None) if isinstance(data_sources, list) and all(isinstance(item, data_source.DataSource) for item in data_sources): return data_sources return None diff --git a/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel b/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel index e064f71a..f3f415e6 100644 --- a/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel +++ b/snowflake/ml/model/_model_composer/model_manifest/BUILD.bazel @@ -41,7 +41,6 @@ py_test( ], deps = [ ":model_manifest", - "//snowflake/ml/_internal:env_utils", "//snowflake/ml/model:model_signature", "//snowflake/ml/model:type_hints", "//snowflake/ml/model/_packager/model_meta", diff --git a/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py b/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py index 8c1557de..38769cbc 100644 --- a/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py +++ b/snowflake/ml/model/_model_composer/model_manifest/model_manifest_test.py @@ -7,7 +7,6 @@ import yaml from absl.testing import absltest -from snowflake.ml._internal import env_utils from snowflake.ml.model import model_signature, type_hints from snowflake.ml.model._model_composer.model_manifest import model_manifest from snowflake.ml.model._packager.model_meta import model_blob_meta, model_meta @@ -38,211 +37,199 @@ class ModelManifestTest(absltest.TestCase): def setUp(self) -> None: self.m_session = mock.MagicMock() - self.mock_to_use_released_snowml = mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, - ) - self.mock_to_use_local_snowml = mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ) def test_model_manifest_1(self) -> None: with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: mm = model_manifest.ModelManifest(pathlib.Path(workspace)) - with self.mock_to_use_released_snowml: - with model_meta.create_model_metadata( - model_dir_path=tmpdir, - name="model1", - model_type="custom", - signatures=_DUMMY_SIG, - python_version="3.8", - ) as meta: - meta.models["model1"] = _DUMMY_BLOB - - mm.save(self.m_session, meta, pathlib.PurePosixPath("model.zip")) - with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") - .joinpath("fixtures") - .joinpath("MANIFEST_1.yml") - .read_text() - ), - f.read(), - ) - with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_method") - .joinpath("fixtures") - .joinpath("function_1.py") - .read_text() - ), - f.read(), - ) + with model_meta.create_model_metadata( + model_dir_path=tmpdir, + name="model1", + model_type="custom", + signatures=_DUMMY_SIG, + python_version="3.8", + embed_local_ml_library=False, + ) as meta: + meta.models["model1"] = _DUMMY_BLOB + + mm.save(self.m_session, meta, pathlib.PurePosixPath("model.zip")) + with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") + .joinpath("MANIFEST_1.yml") + .read_text() + ), + f.read(), + ) + with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_method") + .joinpath("fixtures") + .joinpath("function_1.py") + .read_text() + ), + f.read(), + ) def test_model_manifest_2(self) -> None: with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: mm = model_manifest.ModelManifest(pathlib.Path(workspace)) - with self.mock_to_use_local_snowml: - with model_meta.create_model_metadata( - model_dir_path=tmpdir, - name="model1", - model_type="custom", - signatures={"__call__": _DUMMY_SIG["predict"]}, - python_version="3.8", - ) as meta: - meta.models["model1"] = _DUMMY_BLOB - - mm.save( - self.m_session, - meta, - pathlib.PurePosixPath("model.zip"), - options=type_hints.BaseModelSaveOption( - method_options={"__call__": type_hints.ModelMethodSaveOptions(max_batch_size=10)} + with model_meta.create_model_metadata( + model_dir_path=tmpdir, + name="model1", + model_type="custom", + signatures={"__call__": _DUMMY_SIG["predict"]}, + python_version="3.8", + embed_local_ml_library=True, + ) as meta: + meta.models["model1"] = _DUMMY_BLOB + + mm.save( + self.m_session, + meta, + pathlib.PurePosixPath("model.zip"), + options=type_hints.BaseModelSaveOption( + method_options={"__call__": type_hints.ModelMethodSaveOptions(max_batch_size=10)} + ), + ) + with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") + .joinpath("MANIFEST_2.yml") + .read_text() + ), + f.read(), + ) + with open(pathlib.Path(workspace, "functions", "__call__.py"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_method") + .joinpath("fixtures") + .joinpath("function_2.py") + .read_text() ), + f.read(), ) - with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") - .joinpath("fixtures") - .joinpath("MANIFEST_2.yml") - .read_text() - ), - f.read(), - ) - with open(pathlib.Path(workspace, "functions", "__call__.py"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_method") - .joinpath("fixtures") - .joinpath("function_2.py") - .read_text() - ), - f.read(), - ) def test_model_manifest_mix(self) -> None: with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: mm = model_manifest.ModelManifest(pathlib.Path(workspace)) - with self.mock_to_use_local_snowml: - with model_meta.create_model_metadata( - model_dir_path=tmpdir, - name="model1", - model_type="custom", - signatures={"predict": _DUMMY_SIG["predict"], "__call__": _DUMMY_SIG["predict"]}, - python_version="3.8", - ) as meta: - meta.models["model1"] = _DUMMY_BLOB - - mm.save( - self.m_session, - meta, - pathlib.PurePosixPath("model.zip"), - options=type_hints.BaseModelSaveOption( - method_options={ - "predict": type_hints.ModelMethodSaveOptions(case_sensitive=True), - "__call__": type_hints.ModelMethodSaveOptions(max_batch_size=10), - } + with model_meta.create_model_metadata( + model_dir_path=tmpdir, + name="model1", + model_type="custom", + signatures={"predict": _DUMMY_SIG["predict"], "__call__": _DUMMY_SIG["predict"]}, + python_version="3.8", + embed_local_ml_library=True, + ) as meta: + meta.models["model1"] = _DUMMY_BLOB + + mm.save( + self.m_session, + meta, + pathlib.PurePosixPath("model.zip"), + options=type_hints.BaseModelSaveOption( + method_options={ + "predict": type_hints.ModelMethodSaveOptions(case_sensitive=True), + "__call__": type_hints.ModelMethodSaveOptions(max_batch_size=10), + } + ), + ) + with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") + .joinpath("MANIFEST_3.yml") + .read_text() ), + f.read(), + ) + with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_method") + .joinpath("fixtures") + .joinpath("function_1.py") + .read_text() + ), + f.read(), + ) + with open(pathlib.Path(workspace, "functions", "__call__.py"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_method") + .joinpath("fixtures") + .joinpath("function_2.py") + .read_text() + ), + f.read(), ) - with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") - .joinpath("fixtures") - .joinpath("MANIFEST_3.yml") - .read_text() - ), - f.read(), - ) - with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_method") - .joinpath("fixtures") - .joinpath("function_1.py") - .read_text() - ), - f.read(), - ) - with open(pathlib.Path(workspace, "functions", "__call__.py"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_method") - .joinpath("fixtures") - .joinpath("function_2.py") - .read_text() - ), - f.read(), - ) def test_model_manifest_bad(self) -> None: with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: mm = model_manifest.ModelManifest(pathlib.Path(workspace)) - with self.mock_to_use_local_snowml: - with model_meta.create_model_metadata( - model_dir_path=tmpdir, - name="model1", - model_type="custom", - signatures={"predict": _DUMMY_SIG["predict"], "PREDICT": _DUMMY_SIG["predict"]}, - ) as meta: - meta.models["model1"] = _DUMMY_BLOB - - with self.assertRaisesRegex( - ValueError, "Found duplicate method named resolved as PREDICT in the model." - ): - mm.save( - self.m_session, - meta, - pathlib.PurePosixPath("model.zip"), - ) - - def test_model_manifest_table_function(self) -> None: - with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: - mm = model_manifest.ModelManifest(pathlib.Path(workspace)) - with self.mock_to_use_local_snowml: - with model_meta.create_model_metadata( - model_dir_path=tmpdir, - name="model1", - model_type="custom", - signatures={"predict": _DUMMY_SIG["predict"]}, - python_version="3.8", - ) as meta: - meta.models["model1"] = _DUMMY_BLOB - + with model_meta.create_model_metadata( + model_dir_path=tmpdir, + name="model1", + model_type="custom", + signatures={"predict": _DUMMY_SIG["predict"], "PREDICT": _DUMMY_SIG["predict"]}, + embed_local_ml_library=True, + ) as meta: + meta.models["model1"] = _DUMMY_BLOB + + with self.assertRaisesRegex(ValueError, "Found duplicate method named resolved as PREDICT in the model."): mm.save( self.m_session, meta, pathlib.PurePosixPath("model.zip"), - options=type_hints.BaseModelSaveOption( - method_options={"predict": type_hints.ModelMethodSaveOptions(function_type="TABLE_FUNCTION")} + ) + + def test_model_manifest_table_function(self) -> None: + with tempfile.TemporaryDirectory() as workspace, tempfile.TemporaryDirectory() as tmpdir: + mm = model_manifest.ModelManifest(pathlib.Path(workspace)) + with model_meta.create_model_metadata( + model_dir_path=tmpdir, + name="model1", + model_type="custom", + signatures={"predict": _DUMMY_SIG["predict"]}, + python_version="3.8", + embed_local_ml_library=True, + ) as meta: + meta.models["model1"] = _DUMMY_BLOB + + mm.save( + self.m_session, + meta, + pathlib.PurePosixPath("model.zip"), + options=type_hints.BaseModelSaveOption( + method_options={"predict": type_hints.ModelMethodSaveOptions(function_type="TABLE_FUNCTION")} + ), + ) + with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") + .joinpath("fixtures") + .joinpath("MANIFEST_4.yml") + .read_text() + ), + f.read(), + ) + with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: + self.assertEqual( + ( + importlib_resources.files("snowflake.ml.model._model_composer.model_method") + .joinpath("fixtures") + .joinpath("function_3.py") + .read_text() ), + f.read(), ) - with open(os.path.join(workspace, "MANIFEST.yml"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_manifest") - .joinpath("fixtures") - .joinpath("MANIFEST_4.yml") - .read_text() - ), - f.read(), - ) - with open(pathlib.Path(workspace, "functions", "predict.py"), encoding="utf-8") as f: - self.assertEqual( - ( - importlib_resources.files("snowflake.ml.model._model_composer.model_method") - .joinpath("fixtures") - .joinpath("function_3.py") - .read_text() - ), - f.read(), - ) def test_load(self) -> None: with tempfile.TemporaryDirectory() as tmpdir: diff --git a/snowflake/ml/model/_packager/model_meta/model_meta.py b/snowflake/ml/model/_packager/model_meta/model_meta.py index 1ab04be3..92a36e29 100644 --- a/snowflake/ml/model/_packager/model_meta/model_meta.py +++ b/snowflake/ml/model/_packager/model_meta/model_meta.py @@ -281,9 +281,7 @@ def runtimes(self) -> Dict[str, model_runtime.ModelRuntime]: "cpu": model_runtime.ModelRuntime("cpu", self.env), } if self.env.cuda_version: - runtimes.update( - {"gpu": model_runtime.ModelRuntime("gpu", self.env, is_gpu=True, server_availability_source="conda")} - ) + runtimes.update({"gpu": model_runtime.ModelRuntime("gpu", self.env, is_gpu=True)}) return runtimes def save(self, model_dir_path: str) -> None: diff --git a/snowflake/ml/model/_packager/model_runtime/BUILD.bazel b/snowflake/ml/model/_packager/model_runtime/BUILD.bazel index 54e884ce..a6192a9b 100644 --- a/snowflake/ml/model/_packager/model_runtime/BUILD.bazel +++ b/snowflake/ml/model/_packager/model_runtime/BUILD.bazel @@ -25,7 +25,6 @@ py_library( srcs = ["model_runtime.py"], deps = [ ":_snowml_inference_alternative_requirements", - "//snowflake/ml/_internal:env", "//snowflake/ml/_internal:env_utils", "//snowflake/ml/_internal:file_utils", "//snowflake/ml/model/_model_composer/model_manifest:model_manifest_schema", diff --git a/snowflake/ml/model/_packager/model_runtime/model_runtime.py b/snowflake/ml/model/_packager/model_runtime/model_runtime.py index e8b12489..1333cd1d 100644 --- a/snowflake/ml/model/_packager/model_runtime/model_runtime.py +++ b/snowflake/ml/model/_packager/model_runtime/model_runtime.py @@ -1,11 +1,11 @@ import copy import pathlib import warnings -from typing import List, Literal, Optional +from typing import List, Optional from packaging import requirements -from snowflake.ml._internal import env as snowml_env, env_utils, file_utils +from snowflake.ml._internal import env_utils, file_utils from snowflake.ml.model._packager.model_env import model_env from snowflake.ml.model._packager.model_meta import model_meta_schema from snowflake.ml.model._packager.model_runtime import ( @@ -37,7 +37,6 @@ def __init__( env: model_env.ModelEnv, imports: Optional[List[pathlib.PurePosixPath]] = None, is_gpu: bool = False, - server_availability_source: Literal["snowflake", "conda"] = "snowflake", loading_from_file: bool = False, ) -> None: self.name = name @@ -48,30 +47,7 @@ def __init__( return snowml_pkg_spec = f"{env_utils.SNOWPARK_ML_PKG_NAME}=={self.runtime_env.snowpark_ml_version}" - if self.runtime_env._snowpark_ml_version.local: - self.embed_local_ml_library = True - else: - if server_availability_source == "snowflake": - snowml_server_availability = ( - len( - env_utils.get_matched_package_versions_in_information_schema_with_active_session( - reqs=[requirements.Requirement(snowml_pkg_spec)], - python_version=snowml_env.PYTHON_VERSION, - ).get(env_utils.SNOWPARK_ML_PKG_NAME, []) - ) - >= 1 - ) - else: - snowml_server_availability = ( - len( - env_utils.get_matched_package_versions_in_snowflake_conda_channel( - req=requirements.Requirement(snowml_pkg_spec), - python_version=snowml_env.PYTHON_VERSION, - ) - ) - >= 1 - ) - self.embed_local_ml_library = not snowml_server_availability + self.embed_local_ml_library = self.runtime_env._snowpark_ml_version.local additional_package = ( _SNOWML_INFERENCE_ALTERNATIVE_DEPENDENCIES if self.embed_local_ml_library else [snowml_pkg_spec] diff --git a/snowflake/ml/model/_packager/model_runtime/model_runtime_test.py b/snowflake/ml/model/_packager/model_runtime/model_runtime_test.py index da7a80c2..ed3f6807 100644 --- a/snowflake/ml/model/_packager/model_runtime/model_runtime_test.py +++ b/snowflake/ml/model/_packager/model_runtime/model_runtime_test.py @@ -1,8 +1,6 @@ -import copy import os import pathlib import tempfile -from unittest import mock import yaml from absl.testing import absltest @@ -21,91 +19,79 @@ ) ) -_BASIC_DEPENDENCIES_TARGET_WITH_SNOWML_RELAXED = [ - str(env_utils.get_local_installed_version_of_pip_package(requirements.Requirement(env_utils.SNOWPARK_ML_PKG_NAME))) -] - class ModelRuntimeTest(absltest.TestCase): - def setUp(self) -> None: - self.m_env = model_env.ModelEnv() - self.mock_to_use_released_snowml = mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, - ) - self.mock_to_use_local_snowml = mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ) - def test_model_runtime(self) -> None: with tempfile.TemporaryDirectory() as workspace: - with self.mock_to_use_released_snowml: - mr = model_runtime.ModelRuntime("cpu", self.m_env, []) - returned_dict = mr.save(pathlib.Path(workspace)) - - self.assertDictEqual( - returned_dict, - { - "imports": [], - "dependencies": { - "conda": "runtimes/cpu/env/conda.yml", - "pip": "runtimes/cpu/env/requirements.txt", - }, + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" + + mr = model_runtime.ModelRuntime("cpu", m_env, []) + returned_dict = mr.save(pathlib.Path(workspace)) + + self.assertDictEqual( + returned_dict, + { + "imports": [], + "dependencies": { + "conda": "runtimes/cpu/env/conda.yml", + "pip": "runtimes/cpu/env/requirements.txt", }, - ) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + }, + ) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(_BASIC_DEPENDENCIES_TARGET_WITH_SNOWML_RELAXED, dependencies["dependencies"]) + self.assertContainsSubset(["snowflake-ml-python==1.0.0"], dependencies["dependencies"]) def test_model_runtime_with_import(self) -> None: with tempfile.TemporaryDirectory() as workspace: + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" - with self.mock_to_use_released_snowml: - mr = model_runtime.ModelRuntime("cpu", self.m_env, [pathlib.PurePosixPath("model.zip")]) - returned_dict = mr.save(pathlib.Path(workspace)) - - self.assertDictEqual( - returned_dict, - { - "imports": ["model.zip"], - "dependencies": { - "conda": "runtimes/cpu/env/conda.yml", - "pip": "runtimes/cpu/env/requirements.txt", - }, + mr = model_runtime.ModelRuntime("cpu", m_env, [pathlib.PurePosixPath("model.zip")]) + returned_dict = mr.save(pathlib.Path(workspace)) + + self.assertDictEqual( + returned_dict, + { + "imports": ["model.zip"], + "dependencies": { + "conda": "runtimes/cpu/env/conda.yml", + "pip": "runtimes/cpu/env/requirements.txt", }, - ) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + }, + ) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(_BASIC_DEPENDENCIES_TARGET_WITH_SNOWML_RELAXED, dependencies["dependencies"]) + self.assertContainsSubset(["snowflake-ml-python==1.0.0"], dependencies["dependencies"]) def test_model_runtime_local_snowml(self) -> None: with tempfile.TemporaryDirectory() as workspace: - with self.mock_to_use_local_snowml: - mr = model_runtime.ModelRuntime( - "cpu", - self.m_env, - ) - returned_dict = mr.save(pathlib.Path(workspace)) - - self.assertDictEqual( - returned_dict, - { - "imports": ["runtimes/cpu/snowflake-ml-python.zip"], - "dependencies": { - "conda": "runtimes/cpu/env/conda.yml", - "pip": "runtimes/cpu/env/requirements.txt", - }, + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0+abcdef" + + mr = model_runtime.ModelRuntime( + "cpu", + m_env, + ) + returned_dict = mr.save(pathlib.Path(workspace)) + + self.assertDictEqual( + returned_dict, + { + "imports": ["runtimes/cpu/snowflake-ml-python.zip"], + "dependencies": { + "conda": "runtimes/cpu/env/conda.yml", + "pip": "runtimes/cpu/env/requirements.txt", }, - ) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + }, + ) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(_BASIC_DEPENDENCIES_TARGET_RELAXED, dependencies["dependencies"]) + self.assertContainsSubset(_BASIC_DEPENDENCIES_TARGET_RELAXED, dependencies["dependencies"]) def test_model_runtime_dup_basic_dep(self) -> None: with tempfile.TemporaryDirectory() as workspace: @@ -114,21 +100,17 @@ def test_model_runtime_dup_basic_dep(self) -> None: dep_target.append("packaging") dep_target.sort() - m_env = copy.deepcopy(self.m_env) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" m_env.conda_dependencies = dep_target - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ): - mr = model_runtime.ModelRuntime("cpu", m_env) - _ = mr.save(pathlib.Path(workspace)) + mr = model_runtime.ModelRuntime("cpu", m_env) + _ = mr.save(pathlib.Path(workspace)) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(dep_target, dependencies["dependencies"]) + self.assertContainsSubset(dep_target, dependencies["dependencies"]) def test_model_runtime_dup_basic_dep_other_channel(self) -> None: with tempfile.TemporaryDirectory() as workspace: @@ -137,192 +119,122 @@ def test_model_runtime_dup_basic_dep_other_channel(self) -> None: dep_target.remove(next(filter(lambda x: x.startswith("packaging"), dep_target))) dep_target.append("conda-forge::packaging") dep_target.sort() - m_env = copy.deepcopy(self.m_env) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" m_env.conda_dependencies = dep_target - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ): - mr = model_runtime.ModelRuntime("cpu", m_env) - _ = mr.save(pathlib.Path(workspace)) + mr = model_runtime.ModelRuntime("cpu", m_env) + _ = mr.save(pathlib.Path(workspace)) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(dep_target, dependencies["dependencies"]) + self.assertContainsSubset(dep_target, dependencies["dependencies"]) def test_model_runtime_dup_basic_dep_pip(self) -> None: with tempfile.TemporaryDirectory() as workspace: dep_target = _BASIC_DEPENDENCIES_TARGET_RELAXED[:] dep_target.remove(next(filter(lambda x: x.startswith("packaging"), dep_target))) dep_target.sort() - m_env = copy.deepcopy(self.m_env) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" m_env.conda_dependencies = dep_target - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ): - mr = model_runtime.ModelRuntime("cpu", m_env) - _ = mr.save(pathlib.Path(workspace)) + mr = model_runtime.ModelRuntime("cpu", m_env) + _ = mr.save(pathlib.Path(workspace)) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(dep_target, dependencies["dependencies"]) + self.assertContainsSubset(dep_target, dependencies["dependencies"]) def test_model_runtime_additional_conda_dep(self) -> None: with tempfile.TemporaryDirectory() as workspace: dep_target = _BASIC_DEPENDENCIES_TARGET_RELAXED[:] dep_target.append("pytorch") dep_target.sort() - m_env = copy.deepcopy(self.m_env) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" m_env.conda_dependencies = dep_target - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ): - mr = model_runtime.ModelRuntime("cpu", m_env) - _ = mr.save(pathlib.Path(workspace)) + mr = model_runtime.ModelRuntime("cpu", m_env) + _ = mr.save(pathlib.Path(workspace)) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(dep_target, dependencies["dependencies"]) + self.assertContainsSubset(dep_target, dependencies["dependencies"]) def test_model_runtime_additional_pip_dep(self) -> None: with tempfile.TemporaryDirectory() as workspace: dep_target = _BASIC_DEPENDENCIES_TARGET_RELAXED[:] dep_target.sort() - m_env = copy.deepcopy(self.m_env) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" m_env.conda_dependencies = dep_target - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ): - mr = model_runtime.ModelRuntime("cpu", m_env) - _ = mr.save(pathlib.Path(workspace)) + mr = model_runtime.ModelRuntime("cpu", m_env) + _ = mr.save(pathlib.Path(workspace)) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(dep_target, dependencies["dependencies"]) + self.assertContainsSubset(dep_target, dependencies["dependencies"]) def test_model_runtime_additional_dep_both(self) -> None: with tempfile.TemporaryDirectory() as workspace: dep_target = _BASIC_DEPENDENCIES_TARGET_RELAXED[:] dep_target.append("pytorch") dep_target.sort() - m_env = copy.deepcopy(self.m_env) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" m_env.conda_dependencies = dep_target - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: []}, - ): - mr = model_runtime.ModelRuntime("cpu", m_env) - _ = mr.save(pathlib.Path(workspace)) + mr = model_runtime.ModelRuntime("cpu", m_env) + _ = mr.save(pathlib.Path(workspace)) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(dep_target, dependencies["dependencies"]) + self.assertContainsSubset(dep_target, dependencies["dependencies"]) def test_model_runtime_gpu(self) -> None: with tempfile.TemporaryDirectory() as workspace: - m_env = copy.deepcopy(self.m_env) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" m_env.conda_dependencies = ["pytorch"] m_env.cuda_version = "11.7" - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_information_schema_with_active_session", - return_value={env_utils.SNOWPARK_ML_PKG_NAME: [""]}, - ): - mr = model_runtime.ModelRuntime("gpu", m_env, is_gpu=True) - returned_dict = mr.save(pathlib.Path(workspace)) - - self.assertDictEqual( - returned_dict, - { - "imports": [], - "dependencies": { - "conda": "runtimes/gpu/env/conda.yml", - "pip": "runtimes/gpu/env/requirements.txt", - }, - }, - ) - with open(os.path.join(workspace, "runtimes/gpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) - - self.assertContainsSubset( - ["nvidia::cuda==11.7.*", "pytorch::pytorch", "pytorch::pytorch-cuda==11.7.*"], - dependencies["dependencies"], - ) - - def test_model_runtime_check_conda(self) -> None: - with tempfile.TemporaryDirectory() as workspace: - with mock.patch.object( - env_utils, - "get_matched_package_versions_in_snowflake_conda_channel", - return_value=[""], - ): - mr = model_runtime.ModelRuntime( - "cpu", - self.m_env, - server_availability_source="conda", - ) - returned_dict = mr.save(pathlib.Path(workspace)) - - self.assertDictEqual( - returned_dict, - { - "imports": [], - "dependencies": { - "conda": "runtimes/cpu/env/conda.yml", - "pip": "runtimes/cpu/env/requirements.txt", - }, - }, - ) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) - self.assertContainsSubset(_BASIC_DEPENDENCIES_TARGET_WITH_SNOWML_RELAXED, dependencies["dependencies"]) + mr = model_runtime.ModelRuntime("gpu", m_env, is_gpu=True) + returned_dict = mr.save(pathlib.Path(workspace)) - def test_model_runtime_local_snowml_check_conda(self) -> None: - with tempfile.TemporaryDirectory() as workspace: - with self.mock_to_use_local_snowml: - mr = model_runtime.ModelRuntime("cpu", self.m_env, server_availability_source="conda") - returned_dict = mr.save(pathlib.Path(workspace)) - - self.assertDictEqual( - returned_dict, - { - "imports": ["runtimes/cpu/snowflake-ml-python.zip"], - "dependencies": { - "conda": "runtimes/cpu/env/conda.yml", - "pip": "runtimes/cpu/env/requirements.txt", - }, + self.assertDictEqual( + returned_dict, + { + "imports": [], + "dependencies": { + "conda": "runtimes/gpu/env/conda.yml", + "pip": "runtimes/gpu/env/requirements.txt", }, - ) - with open(os.path.join(workspace, "runtimes/cpu/env/conda.yml"), encoding="utf-8") as f: - dependencies = yaml.safe_load(f) + }, + ) + with open(os.path.join(workspace, "runtimes/gpu/env/conda.yml"), encoding="utf-8") as f: + dependencies = yaml.safe_load(f) - self.assertContainsSubset(_BASIC_DEPENDENCIES_TARGET_RELAXED, dependencies["dependencies"]) + self.assertContainsSubset( + ["nvidia::cuda==11.7.*", "pytorch::pytorch", "pytorch::pytorch-cuda==11.7.*"], + dependencies["dependencies"], + ) def test_model_runtime_load_from_file(self) -> None: with tempfile.TemporaryDirectory() as workspace: - mr = model_runtime.ModelRuntime("cpu", self.m_env, [pathlib.PurePosixPath("model.zip")]) + m_env = model_env.ModelEnv() + m_env.snowpark_ml_version = "1.0.0" + + mr = model_runtime.ModelRuntime("cpu", m_env, [pathlib.PurePosixPath("model.zip")]) returned_dict = mr.save(pathlib.Path(workspace)) - loaded_mr = model_runtime.ModelRuntime.load(pathlib.Path(workspace), "cpu", self.m_env, returned_dict) + loaded_mr = model_runtime.ModelRuntime.load(pathlib.Path(workspace), "cpu", m_env, returned_dict) self.assertDictEqual(loaded_mr.save(pathlib.Path(workspace)), returned_dict) diff --git a/snowflake/ml/modeling/_internal/snowpark_implementations/BUILD.bazel b/snowflake/ml/modeling/_internal/snowpark_implementations/BUILD.bazel index 6bbb3940..1b672934 100644 --- a/snowflake/ml/modeling/_internal/snowpark_implementations/BUILD.bazel +++ b/snowflake/ml/modeling/_internal/snowpark_implementations/BUILD.bazel @@ -37,10 +37,16 @@ py_library( ], ) +py_library( + name = "distributed_search_udf_file", + srcs = ["distributed_search_udf_file.py"], +) + py_library( name = "distributed_hpo_trainer", srcs = ["distributed_hpo_trainer.py"], deps = [ + ":distributed_search_udf_file", ":snowpark_trainer", "//snowflake/ml/_internal:env_utils", "//snowflake/ml/_internal:telemetry", diff --git a/snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py b/snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py index eef8eb51..0e35fa4c 100644 --- a/snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py +++ b/snowflake/ml/modeling/_internal/snowpark_implementations/distributed_hpo_trainer.py @@ -4,11 +4,10 @@ import os import posixpath import sys -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union import cloudpickle as cp import numpy as np -import numpy.typing as npt from sklearn import model_selection from sklearn.model_selection import GridSearchCV, RandomizedSearchCV @@ -36,6 +35,7 @@ from snowflake.snowpark.functions import sproc, udtf from snowflake.snowpark.row import Row from snowflake.snowpark.types import IntegerType, StringType, StructField, StructType +from snowflake.snowpark.udtf import UDTFRegistration cp.register_pickle_by_value(inspect.getmodule(get_temp_file_path)) cp.register_pickle_by_value(inspect.getmodule(identifier.get_inferred_name)) @@ -698,7 +698,6 @@ def fit_search_snowpark_enable_efficient_memory_usage( ) -> Union[model_selection.GridSearchCV, model_selection.RandomizedSearchCV]: from itertools import product - import cachetools from sklearn.base import clone, is_classifier from sklearn.calibration import check_cv @@ -719,9 +718,11 @@ def fit_search_snowpark_enable_efficient_memory_usage( # Create a temp file and dump the estimator to that file. estimator_file_name = get_temp_file_path() params_to_evaluate = list(param_grid) - n_candidates = len(params_to_evaluate) - _N_JOBS = estimator.n_jobs - _PRE_DISPATCH = estimator.pre_dispatch + CONSTANTS: Dict[str, Any] = dict() + CONSTANTS["dataset_snowpark_cols"] = dataset.columns + CONSTANTS["n_candidates"] = len(params_to_evaluate) + CONSTANTS["_N_JOBS"] = estimator.n_jobs + CONSTANTS["_PRE_DISPATCH"] = estimator.pre_dispatch with open(estimator_file_name, mode="w+b") as local_estimator_file_obj: cp.dump(dict(estimator=estimator, param_grid=params_to_evaluate), local_estimator_file_obj) @@ -743,6 +744,9 @@ def fit_search_snowpark_enable_efficient_memory_usage( api_calls=[udtf], custom_tags=dict([("hpo_memory_efficient", True)]), ) + from snowflake.ml.modeling._internal.snowpark_implementations.distributed_search_udf_file import ( + execute_template, + ) # Put locally serialized estimator on stage. session.file.put( @@ -753,6 +757,7 @@ def fit_search_snowpark_enable_efficient_memory_usage( ) estimator_location = os.path.basename(estimator_file_name) imports.append(f"@{temp_stage_name}/{estimator_location}") + CONSTANTS["estimator_location"] = estimator_location search_sproc_name = random_name_for_temp_object(TempObjectType.PROCEDURE) random_udtf_name = random_name_for_temp_object(TempObjectType.FUNCTION) @@ -783,7 +788,6 @@ def _distributed_search( ) -> str: import os import time - from typing import Iterator import cloudpickle as cp import pandas as pd @@ -905,145 +909,60 @@ def _distributed_search( fit_and_score_kwargs_location = os.path.basename(local_fit_and_score_kwargs_file_name) imports.append(f"@{temp_stage_name}/{fit_and_score_kwargs_location}") - cross_validator_indices_length = int(len(cross_validator_indices)) - parameter_grid_length = len(param_grid) + CONSTANTS["input_cols"] = input_cols + CONSTANTS["label_cols"] = label_cols + CONSTANTS["DATA_LENGTH"] = DATA_LENGTH + CONSTANTS["n_splits"] = n_splits + CONSTANTS["indices_location"] = indices_location + CONSTANTS["base_estimator_location"] = base_estimator_location + CONSTANTS["fit_and_score_kwargs_location"] = fit_and_score_kwargs_location - assert estimator is not None - - @cachetools.cached(cache={}) - def _load_data_into_udf() -> Tuple[ - npt.NDArray[Any], - npt.NDArray[Any], - List[List[int]], - List[Dict[str, Any]], - object, - Dict[str, Any], - ]: - import pyarrow.parquet as pq + # (6) store the constants + local_constant_file_name = get_temp_file_path(prefix="constant") + with open(local_constant_file_name, mode="w+b") as local_indices_file_obj: + cp.dump(CONSTANTS, local_indices_file_obj) - data_files = [ - filename - for filename in os.listdir(sys._xoptions["snowflake_import_directory"]) - if filename.startswith(dataset_file_name) - ] - partial_df = [ - pq.read_table(os.path.join(sys._xoptions["snowflake_import_directory"], file_name)).to_pandas() - for file_name in data_files - ] - df = pd.concat(partial_df, ignore_index=True) - df.columns = [identifier.get_inferred_name(col_) for col_ in df.columns] - - # load parameter grid - local_estimator_file_path = os.path.join( - sys._xoptions["snowflake_import_directory"], f"{estimator_location}" - ) - with open(local_estimator_file_path, mode="rb") as local_estimator_file_obj: - estimator_objects = cp.load(local_estimator_file_obj) - params_to_evaluate = estimator_objects["param_grid"] + # Put locally serialized indices on stage. + session.file.put( + local_constant_file_name, + temp_stage_name, + auto_compress=False, + overwrite=True, + ) + constant_location = os.path.basename(local_constant_file_name) + imports.append(f"@{temp_stage_name}/{constant_location}") - # load indices - local_indices_file_path = os.path.join( - sys._xoptions["snowflake_import_directory"], f"{indices_location}" - ) - with open(local_indices_file_path, mode="rb") as local_indices_file_obj: - indices = cp.load(local_indices_file_obj) + cross_validator_indices_length = int(len(cross_validator_indices)) + parameter_grid_length = len(param_grid) - # load base estimator - local_base_estimator_file_path = os.path.join( - sys._xoptions["snowflake_import_directory"], f"{base_estimator_location}" - ) - with open(local_base_estimator_file_path, mode="rb") as local_base_estimator_file_obj: - base_estimator = cp.load(local_base_estimator_file_obj) + assert estimator is not None - # load fit_and_score_kwargs - local_fit_and_score_kwargs_file_path = os.path.join( - sys._xoptions["snowflake_import_directory"], f"{fit_and_score_kwargs_location}" - ) - with open(local_fit_and_score_kwargs_file_path, mode="rb") as local_fit_and_score_kwargs_file_obj: - fit_and_score_kwargs = cp.load(local_fit_and_score_kwargs_file_obj) - - # convert dataframe to numpy would save memory consumption - return ( - df[input_cols].to_numpy(), - df[label_cols].squeeze().to_numpy(), - indices, - params_to_evaluate, - base_estimator, - fit_and_score_kwargs, + # Instantiate UDTFRegistration with the session object + udtf_registration = UDTFRegistration(session) + + import tempfile + + with tempfile.NamedTemporaryFile(mode="w", suffix=".py", delete=False, encoding="utf-8") as f: + udf_code = execute_template + f.file.write(udf_code) + f.file.flush() + + # Register the UDTF function from the file + udtf_registration.register_from_file( + file_path=f.name, + handler_name="SearchCV", + name=random_udtf_name, + output_schema=StructType( + [StructField("FIRST_IDX", IntegerType()), StructField("EACH_CV_RESULTS", StringType())] + ), + input_types=[IntegerType(), IntegerType(), IntegerType()], + replace=True, + imports=imports, # type: ignore[arg-type] + is_permanent=False, + packages=required_deps, # type: ignore[arg-type] + statement_params=udtf_statement_params, ) - # Note Table functions (UDTFs) have a limit of 500 input arguments and 500 output columns. - class SearchCV: - def __init__(self) -> None: - X, y, indices, params_to_evaluate, base_estimator, fit_and_score_kwargs = _load_data_into_udf() - self.X = X - self.y = y - self.test_indices = indices - self.params_to_evaluate = params_to_evaluate - self.base_estimator = base_estimator - self.fit_and_score_kwargs = fit_and_score_kwargs - self.fit_score_params: List[Any] = [] - self.cv_indices_set: Set[int] = set() - - def process(self, idx: int, params_idx: int, cv_idx: int) -> None: - self.fit_score_params.extend([[idx, params_idx, cv_idx]]) - self.cv_indices_set.add(cv_idx) - - def end_partition(self) -> Iterator[Tuple[int, str]]: - from sklearn.base import clone - from sklearn.model_selection._validation import _fit_and_score - from sklearn.utils.parallel import Parallel, delayed - - cached_train_test_indices = {} - # Calculate the full index here to avoid duplicate calculation (which consumes a lot of memory) - full_index = np.arange(DATA_LENGTH) - for i in self.cv_indices_set: - cached_train_test_indices[i] = [ - np.setdiff1d(full_index, self.test_indices[i]), - self.test_indices[i], - ] - - parallel = Parallel(n_jobs=_N_JOBS, pre_dispatch=_PRE_DISPATCH) - - out = parallel( - delayed(_fit_and_score)( - clone(self.base_estimator), - self.X, - self.y, - train=cached_train_test_indices[split_idx][0], - test=cached_train_test_indices[split_idx][1], - parameters=self.params_to_evaluate[cand_idx], - split_progress=(split_idx, n_splits), - candidate_progress=(cand_idx, n_candidates), - **self.fit_and_score_kwargs, # load sample weight here - ) - for _, cand_idx, split_idx in self.fit_score_params - ) - - binary_cv_results = None - with io.BytesIO() as f: - cp.dump(out, f) - f.seek(0) - binary_cv_results = f.getvalue().hex() - yield ( - self.fit_score_params[0][0], - binary_cv_results, - ) - - session.udtf.register( - SearchCV, - output_schema=StructType( - [StructField("FIRST_IDX", IntegerType()), StructField("EACH_CV_RESULTS", StringType())] - ), - input_types=[IntegerType(), IntegerType(), IntegerType()], - name=random_udtf_name, - packages=required_deps, # type: ignore[arg-type] - replace=True, - is_permanent=False, - imports=imports, # type: ignore[arg-type] - statement_params=udtf_statement_params, - ) - HP_TUNING = F.table_function(random_udtf_name) # param_indices is for the index for each parameter grid; diff --git a/snowflake/ml/modeling/_internal/snowpark_implementations/distributed_search_udf_file.py b/snowflake/ml/modeling/_internal/snowpark_implementations/distributed_search_udf_file.py new file mode 100644 index 00000000..573493ed --- /dev/null +++ b/snowflake/ml/modeling/_internal/snowpark_implementations/distributed_search_udf_file.py @@ -0,0 +1,159 @@ +""" +Description: + This is the helper file for distributed_hpo_trainer.py to create UDTF by `register_from_file`. +Performance Benefits: + The performance benefits come from two aspects, + 1. register_from_file can reduce duplicating loading data by only loading data once in each node + 2. register_from_file enable user to load data in global variable, whereas writing UDF in python script cannot. +Developer Tips: + Because this script is now a string, so there's no type hinting, linting, etc. It is highly recommended + to develop in a python script, test the type hinting, and then convert it into a string. +""" + +execute_template = """ +from typing import Tuple, Any, List, Dict, Set, Iterator +import os +import sys +import pandas as pd +import numpy as np +import numpy.typing as npt +import cloudpickle as cp +import io + + +def _load_data_into_udf() -> Tuple[ + npt.NDArray[Any], + npt.NDArray[Any], + List[List[int]], + List[Dict[str, Any]], + object, + Dict[str, Any], + Dict[str, Any], +]: + import pyarrow.parquet as pq + + data_files = [ + filename + for filename in os.listdir(sys._xoptions["snowflake_import_directory"]) + if filename.startswith("dataset") + ] + partial_df = [ + pq.read_table(os.path.join(sys._xoptions["snowflake_import_directory"], file_name)).to_pandas() + for file_name in data_files + ] + df = pd.concat(partial_df, ignore_index=True) + constant_file_path = None + for filename in os.listdir(sys._xoptions["snowflake_import_directory"]): + if filename.startswith("constant"): + constant_file_path = os.path.join(sys._xoptions["snowflake_import_directory"], f"{filename}") + if constant_file_path is None: + raise ValueError("UDTF cannot find the constant location, abort!") + with open(constant_file_path, mode="rb") as constant_file_obj: + CONSTANTS = cp.load(constant_file_obj) + df.columns = CONSTANTS['dataset_snowpark_cols'] + + # load parameter grid + local_estimator_file_path = os.path.join( + sys._xoptions["snowflake_import_directory"], + f"{CONSTANTS['estimator_location']}" + ) + with open(local_estimator_file_path, mode="rb") as local_estimator_file_obj: + estimator_objects = cp.load(local_estimator_file_obj) + params_to_evaluate = estimator_objects["param_grid"] + + # load indices + local_indices_file_path = os.path.join( + sys._xoptions["snowflake_import_directory"], + f"{CONSTANTS['indices_location']}" + ) + with open(local_indices_file_path, mode="rb") as local_indices_file_obj: + indices = cp.load(local_indices_file_obj) + + # load base estimator + local_base_estimator_file_path = os.path.join( + sys._xoptions["snowflake_import_directory"], f"{CONSTANTS['base_estimator_location']}" + ) + with open(local_base_estimator_file_path, mode="rb") as local_base_estimator_file_obj: + base_estimator = cp.load(local_base_estimator_file_obj) + + # load fit_and_score_kwargs + local_fit_and_score_kwargs_file_path = os.path.join( + sys._xoptions["snowflake_import_directory"], f"{CONSTANTS['fit_and_score_kwargs_location']}" + ) + with open(local_fit_and_score_kwargs_file_path, mode="rb") as local_fit_and_score_kwargs_file_obj: + fit_and_score_kwargs = cp.load(local_fit_and_score_kwargs_file_obj) + + # convert dataframe to numpy would save memory consumption + return ( + df[CONSTANTS['input_cols']].to_numpy(), + df[CONSTANTS['label_cols']].squeeze().to_numpy(), + indices, + params_to_evaluate, + base_estimator, + fit_and_score_kwargs, + CONSTANTS + ) + + +global_load_data = _load_data_into_udf() + + +# Note Table functions (UDTFs) have a limit of 500 input arguments and 500 output columns. +class SearchCV: + def __init__(self) -> None: + X, y, indices, params_to_evaluate, base_estimator, fit_and_score_kwargs, CONSTANTS = global_load_data + self.X = X + self.y = y + self.test_indices = indices + self.params_to_evaluate = params_to_evaluate + self.base_estimator = base_estimator + self.fit_and_score_kwargs = fit_and_score_kwargs + self.fit_score_params: List[Any] = [] + self.CONSTANTS = CONSTANTS + self.cv_indices_set: Set[int] = set() + + def process(self, idx: int, params_idx: int, cv_idx: int) -> None: + self.fit_score_params.extend([[idx, params_idx, cv_idx]]) + self.cv_indices_set.add(cv_idx) + + def end_partition(self) -> Iterator[Tuple[int, str]]: + from sklearn.base import clone + from sklearn.model_selection._validation import _fit_and_score + from sklearn.utils.parallel import Parallel, delayed + + cached_train_test_indices = {} + # Calculate the full index here to avoid duplicate calculation (which consumes a lot of memory) + full_index = np.arange(self.CONSTANTS['DATA_LENGTH']) + for i in self.cv_indices_set: + cached_train_test_indices[i] = [ + np.setdiff1d(full_index, self.test_indices[i]), + self.test_indices[i], + ] + + parallel = Parallel(n_jobs=self.CONSTANTS['_N_JOBS'], pre_dispatch=self.CONSTANTS['_PRE_DISPATCH']) + + out = parallel( + delayed(_fit_and_score)( + clone(self.base_estimator), + self.X, + self.y, + train=cached_train_test_indices[split_idx][0], + test=cached_train_test_indices[split_idx][1], + parameters=self.params_to_evaluate[cand_idx], + split_progress=(split_idx, self.CONSTANTS['n_splits']), + candidate_progress=(cand_idx, self.CONSTANTS['n_candidates']), + **self.fit_and_score_kwargs, # load sample weight here + ) + for _, cand_idx, split_idx in self.fit_score_params + ) + + binary_cv_results = None + with io.BytesIO() as f: + cp.dump(out, f) + f.seek(0) + binary_cv_results = f.getvalue().hex() + yield ( + self.fit_score_params[0][0], + binary_cv_results, + ) +""" diff --git a/snowflake/ml/modeling/impute/simple_imputer.py b/snowflake/ml/modeling/impute/simple_imputer.py index 5e0d04af..bb3d7858 100644 --- a/snowflake/ml/modeling/impute/simple_imputer.py +++ b/snowflake/ml/modeling/impute/simple_imputer.py @@ -158,6 +158,7 @@ def __init__( self.fill_value = fill_value self.missing_values = missing_values + self.statistics_: Dict[str, Any] = {} # TODO(hayu): [SNOW-752265] Support SimpleImputer keep_empty_features. # Add back when `keep_empty_features` is supported. # self.keep_empty_features = keep_empty_features @@ -229,8 +230,27 @@ def check_type_consistency(col_types: Dict[str, T.DataType]) -> None: return input_col_datatypes + def fit(self, dataset: Union[snowpark.DataFrame, pd.DataFrame]) -> "SimpleImputer": + if isinstance(dataset, snowpark.DataFrame): + return self._fit_snowpark(dataset) + else: + return self._fit_sklearn(dataset) + + def _fit_sklearn(self, dataset: pd.DataFrame) -> "SimpleImputer": + dataset = self._use_input_cols_only(dataset) + sklearn_simple_imputer = self._create_sklearn_object() + sklearn_simple_imputer = sklearn_simple_imputer.fit(dataset) + self._sklearn_object = sklearn_simple_imputer + for input_col, fill_value in zip(self.input_cols, sklearn_simple_imputer.statistics_.tolist()): + self.statistics_[input_col] = fill_value + self._sklearn_fit_dtype = sklearn_simple_imputer._fit_dtype + self.n_features_in_ = len(self.input_cols) + self.feature_names_in_ = self.input_cols + self._is_fitted = True + return self + @telemetry.send_api_usage_telemetry(project=base.PROJECT, subproject=_SUBPROJECT) - def fit(self, dataset: snowpark.DataFrame) -> "SimpleImputer": + def _fit_snowpark(self, dataset: snowpark.DataFrame) -> "SimpleImputer": """ Compute values to impute for the dataset according to the strategy. @@ -245,7 +265,6 @@ def fit(self, dataset: snowpark.DataFrame) -> "SimpleImputer": # In order to fit, the input columns should have the same type. input_col_datatypes = self._get_dataset_input_col_datatypes(dataset) - self.statistics_: Dict[str, Any] = {} statement_params = telemetry.get_statement_params(base.PROJECT, _SUBPROJECT, self.__class__.__name__) if self.strategy == "constant": diff --git a/snowflake/ml/modeling/parameters/BUILD.bazel b/snowflake/ml/modeling/parameters/BUILD.bazel index eda88b5f..99fe5d38 100644 --- a/snowflake/ml/modeling/parameters/BUILD.bazel +++ b/snowflake/ml/modeling/parameters/BUILD.bazel @@ -52,5 +52,6 @@ py_package( packages = ["snowflake.ml"], deps = [ ":disable_distributed_hpo", + ":enable_anonymous_sproc", ], ) diff --git a/snowflake/ml/registry/_manager/BUILD.bazel b/snowflake/ml/registry/_manager/BUILD.bazel index 40fe9677..5e2f75c5 100644 --- a/snowflake/ml/registry/_manager/BUILD.bazel +++ b/snowflake/ml/registry/_manager/BUILD.bazel @@ -12,7 +12,6 @@ py_library( ], deps = [ "//snowflake/ml/_internal/human_readable_id:hrid_generator", - "//snowflake/ml/_internal/utils:identifier", "//snowflake/ml/_internal/utils:sql_identifier", "//snowflake/ml/model", "//snowflake/ml/model:model_signature", diff --git a/snowflake/ml/registry/_manager/model_manager.py b/snowflake/ml/registry/_manager/model_manager.py index 0b97c695..bf3a1f0b 100644 --- a/snowflake/ml/registry/_manager/model_manager.py +++ b/snowflake/ml/registry/_manager/model_manager.py @@ -1,5 +1,5 @@ from types import ModuleType -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union import pandas as pd from absl.logging import logging @@ -31,6 +31,64 @@ def __init__( self._hrid_generator = hrid_generator.HRID16() def log_model( + self, + *, + model: Union[model_types.SupportedModelType, model_version_impl.ModelVersion], + model_name: str, + version_name: Optional[str] = None, + comment: Optional[str] = None, + metrics: Optional[Dict[str, Any]] = None, + conda_dependencies: Optional[List[str]] = None, + pip_requirements: Optional[List[str]] = None, + python_version: Optional[str] = None, + signatures: Optional[Dict[str, model_signature.ModelSignature]] = None, + sample_input_data: Optional[model_types.SupportedDataType] = None, + code_paths: Optional[List[str]] = None, + ext_modules: Optional[List[ModuleType]] = None, + options: Optional[model_types.ModelSaveOption] = None, + statement_params: Optional[Dict[str, Any]] = None, + ) -> model_version_impl.ModelVersion: + if not version_name: + version_name = self._hrid_generator.generate()[1] + + if isinstance(model, model_version_impl.ModelVersion): + ( + source_database_name_id, + source_schema_name_id, + source_model_name_id, + ) = sql_identifier.parse_fully_qualified_name(model.fully_qualified_model_name) + + self._model_ops.create_from_model_version( + source_database_name=source_database_name_id, + source_schema_name=source_schema_name_id, + source_model_name=source_model_name_id, + source_version_name=sql_identifier.SqlIdentifier(model.version_name), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier(model_name), + version_name=sql_identifier.SqlIdentifier(version_name), + statement_params=statement_params, + ) + return self.get_model(model_name=model_name, statement_params=statement_params).version(version_name) + + return self._log_model( + model=model, + model_name=model_name, + version_name=version_name, + comment=comment, + metrics=metrics, + conda_dependencies=conda_dependencies, + pip_requirements=pip_requirements, + python_version=python_version, + signatures=signatures, + sample_input_data=sample_input_data, + code_paths=code_paths, + ext_modules=ext_modules, + options=options, + statement_params=statement_params, + ) + + def _log_model( self, model: model_types.SupportedModelType, *, diff --git a/snowflake/ml/registry/_manager/model_manager_test.py b/snowflake/ml/registry/_manager/model_manager_test.py index eaaaa699..242163ba 100644 --- a/snowflake/ml/registry/_manager/model_manager_test.py +++ b/snowflake/ml/registry/_manager/model_manager_test.py @@ -525,6 +525,30 @@ def test_log_model_fully_qualified(self) -> None: statement_params=mock.ANY, ) + def test_log_model_from_model_version(self) -> None: + m_model_version = mock.MagicMock(spec=model_version_impl.ModelVersion) + m_model_version.fully_qualified_model_name = 'TEMP."test".SOURCE_MODEL' + m_model_version.version_name = "SOURCE_VERSION" + with mock.patch.object( + self.m_r._model_ops, "create_from_model_version" + ) as mock_create_from_model_version, mock.patch.object(self.m_r, "get_model") as mock_get_model: + self.m_r.log_model(model=m_model_version, model_name="MODEL", version_name="V1") + mock_create_from_model_version.assert_called_once_with( + source_database_name=sql_identifier.SqlIdentifier("TEMP"), + source_schema_name=sql_identifier.SqlIdentifier("test", case_sensitive=True), + source_model_name=sql_identifier.SqlIdentifier("SOURCE_MODEL"), + source_version_name=sql_identifier.SqlIdentifier("SOURCE_VERSION"), + database_name=None, + schema_name=None, + model_name=sql_identifier.SqlIdentifier("MODEL"), + version_name=sql_identifier.SqlIdentifier("V1"), + statement_params=mock.ANY, + ) + mock_get_model.assert_called_once_with( + model_name=sql_identifier.SqlIdentifier("MODEL"), + statement_params=mock.ANY, + ) + def test_delete_model(self) -> None: with mock.patch.object(self.m_r._model_ops, "delete_model_or_version") as mock_delete_model_or_version: self.m_r.delete_model( diff --git a/snowflake/ml/registry/registry.py b/snowflake/ml/registry/registry.py index 8aa4ac4e..802fa2be 100644 --- a/snowflake/ml/registry/registry.py +++ b/snowflake/ml/registry/registry.py @@ -71,6 +71,16 @@ def location(self) -> str: @telemetry.send_api_usage_telemetry( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, + func_params_to_log=[ + "model_name", + "version_name", + "comment", + "metrics", + "conda_dependencies", + "pip_requirements", + "python_version", + "signatures", + ], ) def log_model( self, @@ -142,7 +152,6 @@ def log_model( Returns: ModelVersion: ModelVersion object corresponding to the model just logged. """ - statement_params = telemetry.get_statement_params( project=_TELEMETRY_PROJECT, subproject=_MODEL_TELEMETRY_SUBPROJECT, diff --git a/snowflake/ml/registry/registry_test.py b/snowflake/ml/registry/registry_test.py index 886bfc05..cd049481 100644 --- a/snowflake/ml/registry/registry_test.py +++ b/snowflake/ml/registry/registry_test.py @@ -153,6 +153,31 @@ def test_log_model(self) -> None: statement_params=mock.ANY, ) + # def test_log_model_from_model_version(self) -> None: + # m_model_version = mock.MagicMock() + # with mock.patch.object(self.m_r._model_manager, "log_model") as mock_log_model: + # self.m_r.log_model( + # model=m_model_version, + # model_name="MODEL", + # version_name="v1", + # ) + # mock_log_model.assert_called_once_with( + # model=m_model_version, + # model_name="MODEL", + # version_name="v1", + # comment=None, + # metrics=None, + # conda_dependencies=None, + # pip_requirements=None, + # python_version=None, + # signatures=None, + # sample_input_data=None, + # code_paths=None, + # ext_modules=None, + # options=None, + # statement_params=mock.ANY, + # ) + def test_delete_model(self) -> None: with mock.patch.object(self.m_r._model_manager, "delete_model") as mock_delete_model: self.m_r.delete_model( diff --git a/snowflake/ml/test_utils/mock_data_frame.py b/snowflake/ml/test_utils/mock_data_frame.py index 35f3e2a7..65c8e8d5 100644 --- a/snowflake/ml/test_utils/mock_data_frame.py +++ b/snowflake/ml/test_utils/mock_data_frame.py @@ -70,6 +70,11 @@ def __init__( if count_result is not None: self.add_count_result(count_result, count_statement_params) + # Queries are part of the dataframe but are currently not checked. Init only. + self._queries: Dict[str, List[str]] = dict() + self._queries["queries"] = [] + self._queries["post_actions"] = [] + @property # type: ignore[misc] def __class__(self) -> Type[DataFrame]: # type: ignore[override] return DataFrame @@ -201,3 +206,23 @@ def select(self, *args: Any, **kwargs: Any) -> Any: # Result should be int if block=True and AsyncJob if block=False. mdfo = self._check_operation("select", args, kwargs) return mdfo.result + + @property + def queries(self) -> Dict[str, List[str]]: + return self._queries + + def add_query(self, query_type: str, query_string: str) -> MockDataFrame: + """Add a query to the dataframe that can be retrieved by callers through the .query property. + + Args: + query_type: one of "queries" or "post_actions" + query_string: SQL of the query as a string. + + Returns: + self + + Original implementation: + https://github.com/snowflakedb/snowpark-python/blob/81afcd6512e4c52a695365482f1374a7dacb7eb9/src/snowflake/snowpark/dataframe.py#L4023 + """ + self._queries[query_type].append(query_string) + return self diff --git a/snowflake/ml/test_utils/mock_data_frame_test.py b/snowflake/ml/test_utils/mock_data_frame_test.py index 1fc0cfcc..319ab3d7 100644 --- a/snowflake/ml/test_utils/mock_data_frame_test.py +++ b/snowflake/ml/test_utils/mock_data_frame_test.py @@ -181,6 +181,26 @@ def test_statement_params_missing(self) -> None: with self.assertRaises(AssertionError): self.assertEqual(mock_df.select("NAME").count(), 5) + def test_queries(self) -> None: + """Test that adding and accessing queries in the dataframe works.""" + mock_df = mock_data_frame.MockDataFrame() + test_data = [ + {"type": "queries", "index": 0, "value": "SELECT query_1 FROM TABLE;"}, + {"type": "queries", "index": 1, "value": "SELECT query_2 FROM TABLE;"}, + {"type": "post_actions", "index": 0, "value": "post_action_1"}, + {"type": "post_actions", "index": 1, "value": "post_action_2"}, + ] + for t in test_data: + mock_df.add_query(str(t["type"]), str(t["value"])) + + self.assertEqual( + mock_df.queries, + { + "queries": [test_data[0]["value"], test_data[1]["value"]], + "post_actions": [test_data[2]["value"], test_data[3]["value"]], + }, + ) + if __name__ == "__main__": main() diff --git a/snowflake/ml/utils/BUILD.bazel b/snowflake/ml/utils/BUILD.bazel index 55800e64..8027be05 100644 --- a/snowflake/ml/utils/BUILD.bazel +++ b/snowflake/ml/utils/BUILD.bazel @@ -34,5 +34,6 @@ py_package( deps = [ ":connection_params", ":sparse", + "//snowflake/ml/_internal/utils:snowflake_env", # Mitigate BuildSnowML failure ], ) diff --git a/snowflake/ml/version.bzl b/snowflake/ml/version.bzl index 1c52e7e4..c78f51ef 100644 --- a/snowflake/ml/version.bzl +++ b/snowflake/ml/version.bzl @@ -1,2 +1,2 @@ # This is parsed by regex in conda reciper meta file. Make sure not to break it. -VERSION = "1.5.1" +VERSION = "1.5.2" diff --git a/tests/integ/snowflake/ml/extra_tests/pipeline_with_ohe_and_xgbr_test.py b/tests/integ/snowflake/ml/extra_tests/pipeline_with_ohe_and_xgbr_test.py index dd8d9d7a..7a3a9244 100644 --- a/tests/integ/snowflake/ml/extra_tests/pipeline_with_ohe_and_xgbr_test.py +++ b/tests/integ/snowflake/ml/extra_tests/pipeline_with_ohe_and_xgbr_test.py @@ -110,6 +110,57 @@ def test_fit_and_compare_results(self) -> None: np.testing.assert_allclose(results.flatten(), sk_results.flatten(), rtol=1.0e-1, atol=1.0e-2) + def test_fit_predict_proba_and_compare_results(self) -> None: + pd_data = self._test_data + pd_data["ROW_INDEX"] = pd_data.reset_index().index + raw_data = self._session.create_dataframe(pd_data) + + pipeline = Pipeline( + steps=[ + ( + "OHE", + OneHotEncoder( + input_cols=categorical_columns, output_cols=categorical_columns, drop_input_cols=True + ), + ), + ( + "MMS", + MinMaxScaler( + clip=True, + input_cols=numerical_columns, + output_cols=numerical_columns, + ), + ), + ("KNNImputer", KNNImputer(input_cols=numerical_columns, output_cols=numerical_columns)), + ("regression", XGBClassifier(label_cols=label_column, passthrough_cols="ROW_INDEX")), + ] + ) + + pipeline.fit(raw_data) + results = pipeline.predict_proba(raw_data).to_pandas().sort_values(by=["ROW_INDEX"]) + proba_cols = [c for c in results.columns if c.startswith("PREDICT_PROBA_")] + proba_results = results[proba_cols].to_numpy() + + sk_pipeline = SkPipeline( + steps=[ + ( + "Preprocessing", + SkColumnTransformer( + [ + ("cat_transformer", SkOneHotEncoder(), categorical_columns), + ("num_transforms", SkMinMaxScaler(), numerical_columns), + ("num_imputer", SkKNNImputer(), numerical_columns), + ] + ), + ), + ("Training", XGB_XGBClassifier()), + ] + ) + sk_pipeline.fit(pd_data[numerical_columns + categorical_columns], pd_data[label_column]) + sk_proba_results = sk_pipeline.predict_proba(pd_data[numerical_columns + categorical_columns]) + + np.testing.assert_allclose(proba_results.flatten(), sk_proba_results.flatten(), rtol=1.0e-1, atol=1.0e-2) + def test_fit_and_compare_results_pandas_dataframe(self) -> None: raw_data_pandas = self._test_data diff --git a/tests/integ/snowflake/ml/feature_store/feature_store_access_test.py b/tests/integ/snowflake/ml/feature_store/feature_store_access_test.py index 319f7811..0e46ce31 100644 --- a/tests/integ/snowflake/ml/feature_store/feature_store_access_test.py +++ b/tests/integ/snowflake/ml/feature_store/feature_store_access_test.py @@ -187,8 +187,10 @@ def test_init( _configure_pre_init_privileges( self._session, _SessionInfo(self._test_database, schema, self._test_warehouse), - producer_role=self._test_roles[Role.PRODUCER], - consumer_role=self._test_roles[Role.CONSUMER], + roles_to_create={ + Role.PRODUCER: self._test_roles[Role.PRODUCER], + Role.CONSUMER: self._test_roles[Role.CONSUMER], + }, ) def unit_under_test() -> FeatureStore: @@ -347,17 +349,15 @@ def test_generate_dataset(self, required_access: Role, test_access: Role, output required_access=[Role.CONSUMER], test_access=list(Role), output_type=["dataset", "table"] ) def test_access_dataset(self, required_access: Role, test_access: Role, output_type: str) -> None: - if output_type == "dataset": - # FIXME(dhung) SNOW-1346923 - self.skipTest("Dataset RBAC won't be ready until 8.19 release") spine_df = self._session.sql(f"SELECT id FROM {self._mock_table}") fv1 = self._feature_store.get_feature_view("fv1", "v1") fv2 = self._feature_store.get_feature_view("fv2", "v1") dataset_name = f"FS_TEST_DATASET_{uuid4().hex.upper()}" dataset = self._feature_store.generate_dataset(dataset_name, spine_df, [fv1, fv2], output_type=output_type) + dataframe = dataset.read.to_snowpark_dataframe() if output_type == "dataset" else dataset self._test_access( - lambda: dataset.to_pandas(), + dataframe.collect, required_access, test_access, expected_result=lambda _pd: self.assertNotEmpty(_pd), @@ -452,6 +452,20 @@ def test_retrieve_feature_values(self, required_access: Role, test_access: Role) access_exception_dict={Role.NONE: snowpark_exceptions.SnowparkSQLException}, ) + def test_producer_setup(self) -> None: + schema = create_random_schema( + self._session, "FS_TEST", database=self._test_database, additional_options="WITH MANAGED ACCESS" + ) + + fs = setup_feature_store( + self._session, + self._test_database, + schema, + self._test_warehouse, + producer_role=self._test_roles[Role.PRODUCER], + ) + self.assertTrue(fs is not None) + if __name__ == "__main__": absltest.main() diff --git a/tests/integ/snowflake/ml/feature_store/feature_store_object_test.py b/tests/integ/snowflake/ml/feature_store/feature_store_object_test.py index 07fa4732..0e9675a1 100644 --- a/tests/integ/snowflake/ml/feature_store/feature_store_object_test.py +++ b/tests/integ/snowflake/ml/feature_store/feature_store_object_test.py @@ -2,7 +2,6 @@ from absl.testing import absltest -from snowflake.ml._internal.exceptions import exceptions as snowml_exceptions from snowflake.ml.feature_store import ( # type: ignore[attr-defined] Entity, FeatureView, @@ -147,9 +146,7 @@ def test_feature_view_versions(self) -> None: FeatureViewVersion(version) for version in invalid_dataset_versions: - with self.assertRaisesRegex( - snowml_exceptions.SnowflakeMLException, ".* is not a valid feature view version.*" - ): + with self.assertRaisesRegex(ValueError, ".* is not a valid feature view version.*"): FeatureViewVersion(version) diff --git a/tests/integ/snowflake/ml/feature_store/feature_store_test.py b/tests/integ/snowflake/ml/feature_store/feature_store_test.py index 1b39c4e6..09a8c9b3 100644 --- a/tests/integ/snowflake/ml/feature_store/feature_store_test.py +++ b/tests/integ/snowflake/ml/feature_store/feature_store_test.py @@ -2079,6 +2079,34 @@ def create_point_in_time_test_tables(full_schema_path: str) -> Tuple[str, str, s sort_cols=["ID1", "TS"], ) + @parameterized.parameters( + [ + "SELECT * FROM TABLE(RESULT_SCAN())", + "SELECT * FROM TABLE(RESULT_SCAN(LAST_QUERY_ID()))", + """SELECT * FROM TABLE( + RESULT_SCAN( + LAST_QUERY_ID() + ) + )""", + """SELECT * FROM TABLE ( + RESULT_SCAN( + + + LAST_QUERY_ID( ) + ) + )""", + ] + ) # type: ignore[misc] + def test_invalid_result_scan_query(self, query: str) -> None: + fs = self._create_feature_store(use_optimized_tag_ref=True) + self._session.sql(f"create table {fs._config.full_schema_path}.a(a int)").collect() + self._session.sql(f"select * from {fs._config.full_schema_path}.a").collect() + + e = Entity(name="e", join_keys=["a"]) + + with self.assertRaisesRegex(ValueError, ".*reading from RESULT_SCAN.*"): + FeatureView(name="foo", entities=[e], feature_df=self._session.sql(query)) + if __name__ == "__main__": absltest.main() diff --git a/tests/integ/snowflake/ml/modeling/impute/simple_imputer_test.py b/tests/integ/snowflake/ml/modeling/impute/simple_imputer_test.py index 76892f74..f01567dc 100644 --- a/tests/integ/snowflake/ml/modeling/impute/simple_imputer_test.py +++ b/tests/integ/snowflake/ml/modeling/impute/simple_imputer_test.py @@ -3,12 +3,12 @@ import pickle import sys import tempfile -from typing import List -from unittest import TestCase +from typing import Any, Dict, List import cloudpickle import joblib import numpy as np +from absl.testing import parameterized from absl.testing.absltest import main from sklearn.impute import SimpleImputer as SklearnSimpleImputer @@ -29,13 +29,14 @@ ) -class SimpleImputerTest(TestCase): +class SimpleImputerTest(parameterized.TestCase): """Test SimpleImputer.""" def setUp(self) -> None: """Creates Snowpark and Snowflake environments for testing.""" self._session = Session.builder.configs(SnowflakeLoginOptions()).create() self._to_be_deleted_files: List[str] = [] + self._retrieve_dataset() def tearDown(self) -> None: self._session.close() @@ -61,7 +62,15 @@ def test_inconsistent_input_col_type(self) -> None: with self.assertRaisesRegex(TypeError, "Inconsistent input column types."): simple_imputer.fit(df) - def test_fit(self) -> None: + def _retrieve_dataset(self) -> None: + df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) + self._dataset = {"pandas_dataframe": df_pandas, "snowpark_dataframe": df} + + def _convert_statistics_numpy_array(self, arr: Dict[str, Any]) -> np.typing.NDArray: + return np.array(list(arr.values())) + + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit(self, dataset_type) -> None: """ Verify fitted categories. @@ -72,19 +81,19 @@ def test_fit(self) -> None: """ input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS - df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols) - simple_imputer.fit(df) + simple_imputer.fit(self._dataset[dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer() - simple_imputer_sklearn.fit(df_pandas[input_cols]) + simple_imputer_sklearn.fit(self._dataset["pandas_dataframe"][input_cols]) - statistics_numpy = np.array(list(simple_imputer.statistics_.values())) + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) np.testing.assert_equal(statistics_numpy, simple_imputer_sklearn.statistics_) - def test_fit_constant(self) -> None: + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit_constant(self, dataset_type) -> None: """ Verify constant fit statistics. @@ -96,21 +105,21 @@ def test_fit_constant(self) -> None: input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS fill_value = 2 - df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) simple_imputer = SimpleImputer( input_cols=input_cols, output_cols=output_cols, strategy="constant", fill_value=fill_value ) - simple_imputer.fit(df) + simple_imputer.fit(self._dataset[dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer(strategy="constant", fill_value=fill_value) - simple_imputer_sklearn.fit(df_pandas[input_cols]) + simple_imputer_sklearn.fit(self._dataset["pandas_dataframe"][input_cols]) - statistics_numpy = np.array(list(simple_imputer.statistics_.values())) + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) np.testing.assert_equal(statistics_numpy, simple_imputer_sklearn.statistics_) - def test_fit_constant_no_fill_numeric(self) -> None: + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit_constant_no_fill_numeric(self, dataset_type) -> None: """ Verify constant fit statistics with no fill value specified. @@ -121,19 +130,42 @@ def test_fit_constant_no_fill_numeric(self) -> None: """ input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS - df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols, strategy="constant") - simple_imputer.fit(df) + simple_imputer.fit(self._dataset[dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer(strategy="constant") - simple_imputer_sklearn.fit(df_pandas[input_cols]) + simple_imputer_sklearn.fit(self._dataset["pandas_dataframe"][input_cols]) + + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) + + np.testing.assert_equal(statistics_numpy, simple_imputer_sklearn.statistics_) + + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit_constant_no_fill_numeric_pd_dataframe(self, dataset_type) -> None: + """ + Verify constant fit statistics with no fill value specified. + + Raises + ------ + AssertionError + If the fit result differs from the one generated by Sklearn. + """ + input_cols = NUMERIC_COLS + output_cols = OUTPUT_COLS + + simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols, strategy="constant") + simple_imputer.fit(self._dataset[dataset_type]) + + simple_imputer_sklearn = SklearnSimpleImputer(strategy="constant") + simple_imputer_sklearn.fit(self._dataset["pandas_dataframe"][input_cols]) - statistics_numpy = np.array(list(simple_imputer.statistics_.values())) + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) np.testing.assert_equal(statistics_numpy, simple_imputer_sklearn.statistics_) - def test_fit_all_missing(self) -> None: + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit_all_missing(self, dataset_type) -> None: """ Verify fit statistics when the data is missing. @@ -144,19 +176,19 @@ def test_fit_all_missing(self) -> None: """ input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS - df_pandas, df = framework_utils.get_df(self._session, DATA_ALL_NONE, SCHEMA) simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols) - simple_imputer.fit(df) + simple_imputer.fit(self._dataset[dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer() - simple_imputer_sklearn.fit(df_pandas[input_cols]) + simple_imputer_sklearn.fit(self._dataset["pandas_dataframe"][input_cols]) - statistics_numpy = np.array(list(simple_imputer.statistics_.values())) + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) np.testing.assert_allclose(statistics_numpy, simple_imputer_sklearn.statistics_, equal_nan=True) - def test_fit_all_missing_constant(self) -> None: + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit_all_missing_constant(self, dataset_type) -> None: """ Verify constant fill value when data is missing. @@ -168,20 +200,22 @@ def test_fit_all_missing_constant(self) -> None: input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS df_pandas, df = framework_utils.get_df(self._session, DATA_ALL_NONE, SCHEMA) + _dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} fill_value = "foo" simple_imputer = SimpleImputer( input_cols=input_cols, output_cols=output_cols, strategy="constant", fill_value=fill_value ) - simple_imputer.fit(df) + simple_imputer.fit(_dataset[dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer(strategy="constant", fill_value=fill_value) simple_imputer_sklearn.fit(df_pandas[input_cols]) - statistics_numpy = np.array(list(simple_imputer.statistics_.values())) + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) np.testing.assert_equal(statistics_numpy, simple_imputer_sklearn.statistics_) - def test_fit_all_missing_categorial_keep_empty_features_false(self) -> None: + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit_all_missing_categorial_keep_empty_features_false(self, dataset_type) -> None: """ Verify fit statistics when the data is missing. @@ -193,20 +227,22 @@ def test_fit_all_missing_categorial_keep_empty_features_false(self) -> None: input_cols = CATEGORICAL_COLS output_cols = OUTPUT_COLS df_pandas, df = framework_utils.get_df(self._session, DATA_ALL_NONE, SCHEMA) + _dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} # TODO(hayu): [SNOW-752265] Support SimpleImputer keep_empty_features. # Add back `keep_empty_features=False` when supported. simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols) - simple_imputer.fit(df) + simple_imputer.fit(_dataset[dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer() simple_imputer_sklearn.fit(df_pandas[input_cols]) - statistics_numpy = np.array(list(simple_imputer.statistics_.values())) + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) np.testing.assert_allclose(statistics_numpy, simple_imputer_sklearn.statistics_, equal_nan=True) - def test_fit_all_missing_keep_missing_false(self) -> None: + @parameterized.product(dataset_type=["pandas_dataframe", "snowpark_dataframe"]) # type: ignore[misc] + def test_fit_all_missing_keep_missing_false(self, dataset_type) -> None: """ Verify fit statistics when the data is missing. @@ -218,16 +254,17 @@ def test_fit_all_missing_keep_missing_false(self) -> None: input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS df_pandas, df = framework_utils.get_df(self._session, DATA_ALL_NONE, SCHEMA) + _dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} # TODO(hayu): [SNOW-752265] Support SimpleImputer keep_empty_features. # Add back `keep_empty_features=False` when supported. simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols) - simple_imputer.fit(df) + simple_imputer.fit(_dataset[dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer() simple_imputer_sklearn.fit(df_pandas[input_cols]) - statistics_numpy = np.array(list(simple_imputer.statistics_.values())) + statistics_numpy = self._convert_statistics_numpy_array(simple_imputer.statistics_) np.testing.assert_allclose(statistics_numpy, simple_imputer_sklearn.statistics_, equal_nan=True) @@ -251,7 +288,11 @@ def test_reset(self) -> None: with self.assertRaises(AttributeError): simple_imputer.statistics_ - def test_transform_snowpark(self) -> None: + @parameterized.product( + fit_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + predict_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + ) # type: ignore[misc] + def test_transform_snowpark(self, fit_dataset_type, predict_dataset_type) -> None: """ Verify the transformed results for an imputation of mean values in numeric columns. @@ -263,20 +304,33 @@ def test_transform_snowpark(self) -> None: input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) + _fit_dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols) - simple_imputer.fit(df) + simple_imputer.fit(_fit_dataset[fit_dataset_type]) df_none_nan_pandas, df_none_nan = framework_utils.get_df(self._session, DATA_NONE_NAN, SCHEMA) - transformed_df = simple_imputer.transform(df_none_nan) + _predict_dataset = {"snowpark_dataframe": df_none_nan, "pandas_dataframe": df_none_nan_pandas} + transformed_df = simple_imputer.transform(_predict_dataset[predict_dataset_type]) sklearn_simple_imputer = SklearnSimpleImputer() sklearn_simple_imputer.fit(df_pandas[input_cols]) sklearn_transformed_dataset = sklearn_simple_imputer.transform(df_none_nan_pandas[input_cols]) - np.testing.assert_allclose(transformed_df[output_cols].to_pandas().to_numpy(), sklearn_transformed_dataset) + transformed_dataset = ( + transformed_df[output_cols].to_pandas().to_numpy() + if predict_dataset_type == "snowpark_dataframe" + else transformed_df[output_cols] + ) + np.testing.assert_allclose(transformed_dataset, sklearn_transformed_dataset) - def test_transform_snowpark_output_columns_same_as_input_columns(self) -> None: + @parameterized.product( + fit_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + predict_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + ) # type: ignore[misc] + def test_transform_snowpark_output_columns_same_as_input_columns( + self, fit_dataset_type, predict_dataset_type + ) -> None: """ Verify the transformed results for an imputation of mean values in numeric columns. @@ -288,20 +342,34 @@ def test_transform_snowpark_output_columns_same_as_input_columns(self) -> None: input_cols = NUMERIC_COLS output_cols = input_cols df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) + _fit_dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols) - simple_imputer.fit(df) + simple_imputer.fit(_fit_dataset[fit_dataset_type]) df_none_nan_pandas, df_none_nan = framework_utils.get_df(self._session, DATA_NONE_NAN, SCHEMA) - transformed_df = simple_imputer.transform(df_none_nan) + _predict_dataset = {"snowpark_dataframe": df_none_nan, "pandas_dataframe": df_none_nan_pandas} + transformed_df = simple_imputer.transform(_predict_dataset[predict_dataset_type]) sklearn_simple_imputer = SklearnSimpleImputer() sklearn_simple_imputer.fit(df_pandas[input_cols]) sklearn_transformed_dataset = sklearn_simple_imputer.transform(df_none_nan_pandas[input_cols]) - np.testing.assert_allclose(transformed_df[output_cols].to_pandas().to_numpy(), sklearn_transformed_dataset) + transformed_dataset = ( + transformed_df[output_cols].to_pandas().to_numpy() + if predict_dataset_type == "snowpark_dataframe" + else transformed_df[output_cols] + ) + + np.testing.assert_allclose(transformed_dataset, sklearn_transformed_dataset) - def test_transform_snowpark_output_columns_one_equal_to_input_column(self) -> None: + @parameterized.product( + fit_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + predict_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + ) # type: ignore[misc] + def test_transform_snowpark_output_columns_one_equal_to_input_column( + self, fit_dataset_type, predict_dataset_type + ) -> None: """ Verify the transformed results for an imputation of mean values in numeric columns. @@ -313,20 +381,32 @@ def test_transform_snowpark_output_columns_one_equal_to_input_column(self) -> No input_cols = NUMERIC_COLS output_cols = [NUMERIC_COLS[0], OUTPUT_COLS[0]] df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) + _fit_dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols) - simple_imputer.fit(df) + simple_imputer.fit(_fit_dataset[fit_dataset_type]) df_none_nan_pandas, df_none_nan = framework_utils.get_df(self._session, DATA_NONE_NAN, SCHEMA) - transformed_df = simple_imputer.transform(df_none_nan) + _predict_dataset = {"snowpark_dataframe": df_none_nan, "pandas_dataframe": df_none_nan_pandas} + transformed_df = simple_imputer.transform(_predict_dataset[predict_dataset_type]) sklearn_simple_imputer = SklearnSimpleImputer() sklearn_simple_imputer.fit(df_pandas[input_cols]) sklearn_transformed_dataset = sklearn_simple_imputer.transform(df_none_nan_pandas[input_cols]) - np.testing.assert_allclose(transformed_df[output_cols].to_pandas().to_numpy(), sklearn_transformed_dataset) + transformed_dataset = ( + transformed_df[output_cols].to_pandas().to_numpy() + if predict_dataset_type == "snowpark_dataframe" + else transformed_df[output_cols] + ) + + np.testing.assert_allclose(transformed_dataset, sklearn_transformed_dataset) - def test_transform_snowpark_missing_values_not_nan(self) -> None: + @parameterized.product( + fit_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + predict_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + ) # type: ignore[misc] + def test_transform_snowpark_missing_values_not_nan(self, fit_dataset_type, predict_dataset_type) -> None: """ Verify imputed data when the missing value specified is not None or nan. @@ -338,22 +418,35 @@ def test_transform_snowpark_missing_values_not_nan(self) -> None: input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) + _fit_dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} for strategy in ["mean", "median", "most_frequent", "constant"]: simple_imputer = SimpleImputer( strategy=strategy, input_cols=input_cols, output_cols=output_cols, missing_values=-1.0 ) - simple_imputer.fit(df) + simple_imputer.fit(_fit_dataset[fit_dataset_type]) - transformed_df = simple_imputer.transform(df) + transformed_df = simple_imputer.transform(_fit_dataset[predict_dataset_type]) sklearn_simple_imputer = SklearnSimpleImputer(strategy=strategy, missing_values=-1.0) sklearn_simple_imputer.fit(df_pandas[input_cols]) sklearn_transformed_dataset = sklearn_simple_imputer.transform(df_pandas[input_cols]) - np.testing.assert_allclose(transformed_df[output_cols].to_pandas().to_numpy(), sklearn_transformed_dataset) + transformed_dataset = ( + transformed_df[output_cols].to_pandas().to_numpy() + if predict_dataset_type == "snowpark_dataframe" + else transformed_df[output_cols] + ) - def test_transform_snowpark_most_frequent_strategy_categorical(self) -> None: + np.testing.assert_allclose(transformed_dataset, sklearn_transformed_dataset) + + @parameterized.product( + fit_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + predict_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + ) # type: ignore[misc] + def test_transform_snowpark_most_frequent_strategy_categorical( + self, fit_dataset_type, predict_dataset_type + ) -> None: """ Verify imputed data for categorical columns. @@ -365,20 +458,28 @@ def test_transform_snowpark_most_frequent_strategy_categorical(self) -> None: input_cols = CATEGORICAL_COLS output_cols = OUTPUT_COLS df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) + _fit_dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} simple_imputer = SimpleImputer( input_cols=input_cols, output_cols=output_cols, missing_values=None, strategy="most_frequent" ) - simple_imputer.fit(df) + simple_imputer.fit(_fit_dataset[fit_dataset_type]) df_none_nan_pandas, df_none_nan = framework_utils.get_df(self._session, DATA_NONE_NAN, SCHEMA) - transformed_df = simple_imputer.transform(df_none_nan) + _predict_dataset = {"snowpark_dataframe": df_none_nan, "pandas_dataframe": df_none_nan_pandas} + transformed_df = simple_imputer.transform(_predict_dataset[predict_dataset_type]) sklearn_simple_imputer = SklearnSimpleImputer(missing_values=None, strategy="most_frequent") sklearn_simple_imputer.fit(df_pandas[input_cols]) sklearn_transformed_dataset = sklearn_simple_imputer.transform(df_none_nan_pandas[input_cols]) - np.testing.assert_equal(transformed_df[output_cols].to_pandas().to_numpy(), sklearn_transformed_dataset) + transformed_dataset = ( + transformed_df[output_cols].to_pandas().to_numpy() + if predict_dataset_type == "snowpark_dataframe" + else transformed_df[output_cols] + ) + + np.testing.assert_equal(transformed_dataset, sklearn_transformed_dataset) def test_transform_snowpark_most_frequent_strategy_categorical_mixed_types(self) -> None: """ @@ -405,7 +506,11 @@ def test_transform_snowpark_most_frequent_strategy_categorical_mixed_types(self) np.testing.assert_equal(transformed_df[output_cols].to_pandas().to_numpy(), sklearn_transformed_dataset) - def test_transform_snowpark_most_frequent_strategy_numeric(self) -> None: + @parameterized.product( + fit_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + predict_dataset_type=["pandas_dataframe", "snowpark_dataframe"], + ) # type: ignore[misc] + def test_transform_snowpark_most_frequent_strategy_numeric(self, fit_dataset_type, predict_dataset_type) -> None: """ Verify imputed data for "most frequent" strategy and numerical data. @@ -417,18 +522,26 @@ def test_transform_snowpark_most_frequent_strategy_numeric(self) -> None: input_cols = NUMERIC_COLS output_cols = OUTPUT_COLS df_pandas, df = framework_utils.get_df(self._session, DATA, SCHEMA) + _fit_dataset = {"snowpark_dataframe": df, "pandas_dataframe": df_pandas} simple_imputer = SimpleImputer(input_cols=input_cols, output_cols=output_cols, strategy="most_frequent") - simple_imputer.fit(df) + simple_imputer.fit(_fit_dataset[fit_dataset_type]) df_none_nan_pandas, df_none_nan = framework_utils.get_df(self._session, DATA_NONE_NAN, SCHEMA) - transformed_df = simple_imputer.transform(df_none_nan) + _predict_dataset = {"snowpark_dataframe": df_none_nan, "pandas_dataframe": df_none_nan_pandas} + transformed_df = simple_imputer.transform(_predict_dataset[predict_dataset_type]) simple_imputer_sklearn = SklearnSimpleImputer(strategy="most_frequent") simple_imputer_sklearn.fit(df_pandas[input_cols]) sklearn_transformed_dataset = simple_imputer_sklearn.transform(df_none_nan_pandas[input_cols]) - np.testing.assert_allclose(transformed_df[output_cols].to_pandas().to_numpy(), sklearn_transformed_dataset) + transformed_dataset = ( + transformed_df[output_cols].to_pandas().to_numpy() + if predict_dataset_type == "snowpark_dataframe" + else transformed_df[output_cols] + ) + + np.testing.assert_allclose(transformed_dataset, sklearn_transformed_dataset) def test_transform_sklearn(self) -> None: """ diff --git a/tests/integ/snowflake/ml/registry/model/BUILD.bazel b/tests/integ/snowflake/ml/registry/model/BUILD.bazel index 7aa48311..1d23d45f 100644 --- a/tests/integ/snowflake/ml/registry/model/BUILD.bazel +++ b/tests/integ/snowflake/ml/registry/model/BUILD.bazel @@ -17,6 +17,7 @@ py_library( "//snowflake/ml/model:type_hints", "//snowflake/ml/registry", "//snowflake/ml/utils:connection_params", + "//tests/integ/snowflake/ml/test_utils:common_test_base", "//tests/integ/snowflake/ml/test_utils:db_manager", "//tests/integ/snowflake/ml/test_utils:test_env_utils", ], @@ -168,7 +169,7 @@ py_test( py_test( name = "registry_sentence_transformers_model_test", srcs = ["registry_sentence_transformers_model_test.py"], - shard_count = 2, + shard_count = 4, deps = [ ":registry_model_test_base", "//snowflake/ml/_internal:env_utils", @@ -176,3 +177,13 @@ py_test( "//tests/integ/snowflake/ml/test_utils:dataframe_utils", ], ) + +py_test( + name = "registry_in_sproc_test", + srcs = ["registry_in_sproc_test.py"], + deps = [ + ":registry_model_test_base", + "//snowflake/ml/_internal/utils:identifier", + "//tests/integ/snowflake/ml/test_utils:model_factory", + ], +) diff --git a/tests/integ/snowflake/ml/registry/model/fully_qualified_name_test.py b/tests/integ/snowflake/ml/registry/model/fully_qualified_name_test.py index 61152aa0..87c4129f 100644 --- a/tests/integ/snowflake/ml/registry/model/fully_qualified_name_test.py +++ b/tests/integ/snowflake/ml/registry/model/fully_qualified_name_test.py @@ -11,7 +11,7 @@ def setUp(self) -> None: self.database_name = self._test_db self.schema_name = self._test_schema self._registry = self.registry - self.registry = registry.Registry(self._session, database_name="foo", schema_name="bar") + self.registry = registry.Registry(self.session, database_name="foo", schema_name="bar") def test_random_version_name(self) -> None: iris_X, iris_y = datasets.load_iris(return_X_y=True) diff --git a/tests/integ/snowflake/ml/registry/model/multiple_model_test.py b/tests/integ/snowflake/ml/registry/model/multiple_model_test.py index 6a6fc11d..056bc865 100644 --- a/tests/integ/snowflake/ml/registry/model/multiple_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/multiple_model_test.py @@ -48,7 +48,7 @@ def test_multiple_model(self) -> None: self.registry.log_model(lm_2, model_name=name_2, version_name=version, sample_input_data=pd_df) res = ( - self._session.sql(f"SELECT {name_1}!predict(1):output as A, {name_2}!predict(1):output as B") + self.session.sql(f"SELECT {name_1}!predict(1):output as A, {name_2}!predict(1):output as B") .collect()[0] .as_dict() ) diff --git a/tests/integ/snowflake/ml/registry/model/registry_catboost_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_catboost_model_test.py index 9ff61178..efbab08f 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_catboost_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_catboost_model_test.py @@ -2,7 +2,7 @@ import inflection import numpy as np import pandas as pd -from absl.testing import absltest +from absl.testing import absltest, parameterized from sklearn import datasets, model_selection from tests.integ.snowflake.ml.registry.model import registry_model_test_base @@ -10,8 +10,12 @@ class TestRegistryLightGBMModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_catboost_classifier( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -22,7 +26,7 @@ def test_catboost_classifier( classifier = catboost.CatBoostClassifier() classifier.fit(cal_X_train, cal_y_train) - self._test_registry_model( + getattr(self, registry_test_fn)( model=classifier, sample_input_data=cal_X_test, prediction_assert_fns={ @@ -39,8 +43,12 @@ def test_catboost_classifier( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_catboost_classifier_sp( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -66,9 +74,9 @@ def test_catboost_classifier_sp( axis=1, ) - cal_data_sp_df_train = self._session.create_dataframe(cal_X_train) - cal_data_sp_df_test = self._session.create_dataframe(cal_X_test) - self._test_registry_model( + cal_data_sp_df_train = self.session.create_dataframe(cal_X_train) + cal_data_sp_df_test = self.session.create_dataframe(cal_X_test) + getattr(self, registry_test_fn)( model=classifier, sample_input_data=cal_data_sp_df_train, prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/registry/model/registry_custom_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_custom_model_test.py index 2b60de72..0f64503a 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_custom_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_custom_model_test.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from absl.testing import absltest +from absl.testing import absltest, parameterized from snowflake.ml.model import custom_model from snowflake.snowpark._internal import utils as snowpark_utils @@ -63,8 +63,12 @@ def predict(self, input: pd.DataFrame) -> pd.DataFrame: class TestRegistryCustomModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_async_model_composition( self, + registry_test_fn: str, ) -> None: async def _test(self: "TestRegistryCustomModelInteg") -> None: arr = np.random.randint(100, size=(10000, 3)) @@ -77,7 +81,7 @@ async def _test(self: "TestRegistryCustomModelInteg") -> None: } ) acm = AsyncComposeModel(model_context) - self._test_registry_model( + getattr(self, registry_test_fn)( model=acm, sample_input_data=pd_df, prediction_assert_fns={ @@ -93,14 +97,18 @@ async def _test(self: "TestRegistryCustomModelInteg") -> None: asyncio.get_event_loop().run_until_complete(_test(self)) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_sp( self, + registry_test_fn: str, ) -> None: lm = DemoModel(custom_model.ModelContext()) arr = [[1, 2, 3], [4, 2, 5]] - sp_df = self._session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) + sp_df = self.session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) y_df_expected = pd.DataFrame([[1, 2, 3, 1], [4, 2, 5, 4]], columns=["c1", "c2", "c3", "output"]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df, prediction_assert_fns={ @@ -108,16 +116,20 @@ def test_custom_demo_model_sp( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_decimal( self, + registry_test_fn: str, ) -> None: import decimal lm = DemoModel(custom_model.ModelContext()) arr = [[decimal.Decimal(1.2), 2.3, 3.4], [decimal.Decimal(4.6), 2.7, 5.5]] - sp_df = self._session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) + sp_df = self.session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) y_df_expected = pd.DataFrame([[1.2, 2.3, 3.4, 1.2], [4.6, 2.7, 5.5, 4.6]], columns=["c1", "c2", "c3", "output"]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df, prediction_assert_fns={ @@ -125,19 +137,23 @@ def test_custom_demo_model_decimal( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_sp_one_query( self, + registry_test_fn: str, ) -> None: lm = DemoModel(custom_model.ModelContext()) arr = [[1, 2, 3], [4, 2, 5]] - sp_df = self._session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) + sp_df = self.session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) table_name = snowpark_utils.random_name_for_temp_object(snowpark_utils.TempObjectType.TABLE) sp_df.write.save_as_table(table_name, mode="errorifexists", table_type="temporary") - sp_df_2 = self._session.table(table_name) + sp_df_2 = self.session.table(table_name) assert len(sp_df_2.queries["queries"]) == 1, sp_df_2.queries assert len(sp_df_2.queries["post_actions"]) == 0, sp_df_2.queries y_df_expected = pd.DataFrame([[1, 2, 3, 1], [4, 2, 5, 4]], columns=["c1", "c2", "c3", "output"]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df_2, prediction_assert_fns={ @@ -145,14 +161,18 @@ def test_custom_demo_model_sp_one_query( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_sp_quote( self, + registry_test_fn: str, ) -> None: lm = DemoModelSPQuote(custom_model.ModelContext()) arr = [[1, 2, 3], [4, 2, 5]] - sp_df = self._session.create_dataframe(arr, schema=['"""c1"""', '"""c2"""', '"""c3"""']) + sp_df = self.session.create_dataframe(arr, schema=['"""c1"""', '"""c2"""', '"""c3"""']) pd_df = pd.DataFrame(arr, columns=['"c1"', '"c2"', '"c3"']) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df, prediction_assert_fns={ @@ -166,15 +186,19 @@ def test_custom_demo_model_sp_quote( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_sp_mix_1( self, + registry_test_fn: str, ) -> None: lm = DemoModel(custom_model.ModelContext()) arr = [[1, 2, 3], [4, 2, 5]] pd_df = pd.DataFrame(arr, columns=["c1", "c2", "c3"]) - sp_df = self._session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) + sp_df = self.session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) y_df_expected = pd.concat([pd_df, pd_df[["c1"]].rename(columns={"c1": "output"})], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=pd_df, prediction_assert_fns={ @@ -185,14 +209,18 @@ def test_custom_demo_model_sp_mix_1( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_sp_mix_2( self, + registry_test_fn: str, ) -> None: lm = DemoModel(custom_model.ModelContext()) arr = [[1, 2, 3], [4, 2, 5]] pd_df = pd.DataFrame(arr, columns=["c1", "c2", "c3"]) - sp_df = self._session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) - self._test_registry_model( + sp_df = self.session.create_dataframe(arr, schema=['"c1"', '"c2"', '"c3"']) + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df, prediction_assert_fns={ @@ -206,13 +234,17 @@ def test_custom_demo_model_sp_mix_2( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_array( self, + registry_test_fn: str, ) -> None: lm = DemoModelArray(custom_model.ModelContext()) arr = np.array([[1, 2, 3], [4, 2, 5]]) pd_df = pd.DataFrame(arr, columns=["c1", "c2", "c3"]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=pd_df, prediction_assert_fns={ @@ -226,12 +258,16 @@ def test_custom_demo_model_array( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_str( self, + registry_test_fn: str, ) -> None: lm = DemoModel(custom_model.ModelContext()) pd_df = pd.DataFrame([["Yogiri", "Civia", "Echo"], ["Artia", "Doris", "Rosalyn"]], columns=["c1", "c2", "c3"]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=pd_df, prediction_assert_fns={ @@ -245,15 +281,19 @@ def test_custom_demo_model_str( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_array_sp( self, + registry_test_fn: str, ) -> None: lm = DemoModelArray(custom_model.ModelContext()) arr = np.array([[1, 2, 3], [4, 2, 5]]) pd_df = pd.DataFrame(arr, columns=["c1", "c2", "c3"]) - sp_df = self._session.create_dataframe(pd_df) + sp_df = self.session.create_dataframe(pd_df) y_df_expected = pd.concat([pd_df, pd.DataFrame(data={"output": [[1, 2, 3], [4, 2, 5]]})], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df, prediction_assert_fns={ @@ -264,14 +304,18 @@ def test_custom_demo_model_array_sp( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_str_sp( self, + registry_test_fn: str, ) -> None: lm = DemoModel(custom_model.ModelContext()) pd_df = pd.DataFrame([["Yogiri", "Civia", "Echo"], ["Artia", "Doris", "Rosalyn"]], columns=["c1", "c2", "c3"]) - sp_df = self._session.create_dataframe(pd_df) + sp_df = self.session.create_dataframe(pd_df) y_df_expected = pd.concat([pd_df, pd.DataFrame(data={"output": ["Yogiri", "Artia"]})], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df, prediction_assert_fns={ @@ -282,12 +326,16 @@ def test_custom_demo_model_str_sp( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_demo_model_array_str( self, + registry_test_fn: str, ) -> None: lm = DemoModelArray(custom_model.ModelContext()) pd_df = pd.DataFrame([["Yogiri", "Civia", "Echo"], ["Artia", "Doris", "Rosalyn"]], columns=["c1", "c2", "c3"]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=pd_df, prediction_assert_fns={ @@ -301,8 +349,12 @@ def test_custom_demo_model_array_str( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_model_with_artifacts( self, + registry_test_fn: str, ) -> None: with tempfile.TemporaryDirectory() as tmpdir: with open(os.path.join(tmpdir, "bias"), "w", encoding="utf-8") as f: @@ -312,7 +364,7 @@ def test_custom_model_with_artifacts( ) arr = np.array([[1, 2, 3], [4, 2, 5]]) pd_df = pd.DataFrame(arr, columns=["c1", "c2", "c3"]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=pd_df, prediction_assert_fns={ @@ -326,8 +378,12 @@ def test_custom_model_with_artifacts( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_custom_model_bool_sp( self, + registry_test_fn: str, ) -> None: with tempfile.TemporaryDirectory() as tmpdir: with open(os.path.join(tmpdir, "bias"), "w", encoding="utf-8") as f: @@ -337,9 +393,9 @@ def test_custom_model_bool_sp( ) arr = np.array([[1, 2, 3], [4, 2, 5]]) pd_df = pd.DataFrame(arr, columns=["c1", "c2", "c3"]) - sp_df = self._session.create_dataframe(pd_df) + sp_df = self.session.create_dataframe(pd_df) y_df_expected = pd.concat([pd_df, pd.DataFrame([False, True], columns=["output"])], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=lm, sample_input_data=sp_df, prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/registry/model/registry_huggingface_pipeline_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_huggingface_pipeline_model_test.py index 3e5f2870..30586159 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_huggingface_pipeline_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_huggingface_pipeline_model_test.py @@ -4,7 +4,7 @@ import numpy as np import pandas as pd -from absl.testing import absltest +from absl.testing import absltest, parameterized from packaging import requirements from snowflake.ml._internal import env_utils @@ -24,8 +24,12 @@ def tearDownClass(self) -> None: os.environ["TRANSFORMERS_CACHE"] = self._original_cache_dir self.cache_dir.cleanup() + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_conversational_pipeline( self, + registry_test_fn: str, ) -> None: # We have to import here due to cache location issue. # Only by doing so can we make the cache dir setting effective. @@ -53,7 +57,7 @@ def check_res(res: pd.DataFrame) -> None: for resp in row: self.assertIsInstance(resp, str) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -63,8 +67,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_fill_mask_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -92,7 +100,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertIn("token_str", resp[0]) self.assertIn("sequence", resp[0]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -102,8 +110,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_ner_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -129,7 +141,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertIn("start", resp[0]) self.assertIn("end", resp[0]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -139,8 +151,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_question_answering_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -171,7 +187,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertEqual(res["end"].dtype.type, np.int64) self.assertEqual(res["answer"].dtype.type, np.object_) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -181,8 +197,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_question_answering_pipeline_multiple_output( self, + registry_test_fn: str, ) -> None: import transformers @@ -217,7 +237,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertIn("end", resp[0]) self.assertIn("answer", resp[0]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -227,8 +247,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_summarization_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -255,7 +279,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertEqual(res["summary_text"].dtype.type, np.object_) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, additional_dependencies=[ str(env_utils.get_local_installed_version_of_pip_package(requirements.Requirement("sentencepiece"))) @@ -268,8 +292,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_table_question_answering_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -316,7 +344,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertIsInstance(res["cells"][0], list) self.assertEqual(res["aggregator"].dtype.type, np.object_) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -326,8 +354,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_text_classification_pair_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -343,7 +375,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertEqual(res["label"].dtype.type, np.object_) self.assertEqual(res["score"].dtype.type, np.float64) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -353,8 +385,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_text_classification_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -383,7 +419,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertIn("label", resp[0]) self.assertIn("score", resp[0]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -393,8 +429,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_text_generation_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -416,7 +456,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertIsInstance(resp, list) self.assertIn("generated_text", resp[0]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -426,8 +466,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_text2text_generation_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -444,7 +488,7 @@ def check_res(res: pd.DataFrame) -> None: pd.testing.assert_index_equal(res.columns, pd.Index(["generated_text"])) self.assertEqual(res["generated_text"].dtype.type, np.object_) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -454,8 +498,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_translation_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -482,7 +530,7 @@ def check_res(res: pd.DataFrame) -> None: pd.testing.assert_index_equal(res.columns, pd.Index(["translation_text"])) self.assertEqual(res["translation_text"].dtype.type, np.object_) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( @@ -492,8 +540,12 @@ def check_res(res: pd.DataFrame) -> None: }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_zero_shot_classification_pipeline( self, + registry_test_fn: str, ) -> None: import transformers @@ -533,7 +585,7 @@ def check_res(res: pd.DataFrame) -> None: self.assertIsInstance(res["labels"][0], list) self.assertIsInstance(res["labels"][1], list) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, prediction_assert_fns={ "": ( diff --git a/tests/integ/snowflake/ml/registry/model/registry_in_sproc_test.py b/tests/integ/snowflake/ml/registry/model/registry_in_sproc_test.py new file mode 100644 index 00000000..56ef2edb --- /dev/null +++ b/tests/integ/snowflake/ml/registry/model/registry_in_sproc_test.py @@ -0,0 +1,111 @@ +from absl.testing import absltest + +from snowflake.ml._internal.utils import identifier +from tests.integ.snowflake.ml.registry.model import registry_model_test_base +from tests.integ.snowflake.ml.test_utils import common_test_base, model_factory + +MODEL_NAME = "TEST_MODEL" +NEW_MODEL_NAME = "NEW_MODEL" +VERSION_NAME = "V1" +ADDED_VERSION_NAME = "V2" + + +class RegistryInSprocTest(registry_model_test_base.RegistryModelTestBase): + @common_test_base.CommonTestBase.sproc_test(test_owners_rights=False) + def test_workflow(self) -> None: + model, test_features, _ = model_factory.ModelFactory.prepare_sklearn_model() + self.mv_1 = self.registry.log_model( + model=model, + model_name=MODEL_NAME, + version_name=VERSION_NAME, + sample_input_data=test_features, + ) + + self.mv_2 = self.registry.log_model( + model=model, + model_name=MODEL_NAME, + version_name=ADDED_VERSION_NAME, + sample_input_data=test_features, + ) + + self.model = self.registry.get_model(MODEL_NAME) + self.assertLen(self.registry.show_models(), 1) + self.assertEqual(self.model.versions(), [self.mv_1, self.mv_2]) + self.assertLen(self.model.show_versions(), 2) + + description = "test description" + self.mv_1.description = description + self.assertEqual(self.mv_1.description, description) + + self.mv_1.set_metric("a", 1) + expected_metrics = {"a": 2, "b": 1.0, "c": True} + for k, v in expected_metrics.items(): + self.mv_1.set_metric(k, v) + + self.assertEqual(self.mv_1.get_metric("a"), expected_metrics["a"]) + self.assertDictEqual(self.mv_1.show_metrics(), expected_metrics) + + expected_metrics.pop("b") + self.mv_1.delete_metric("b") + self.assertDictEqual(self.mv_1.show_metrics(), expected_metrics) + with self.assertRaises(KeyError): + self.mv_1.get_metric("b") + + description = "test description" + self.model.description = description + self.assertEqual(self.model.description, description) + + self.assertEqual(self.model.default.version_name, VERSION_NAME) + + self.model.default = ADDED_VERSION_NAME + self.assertEqual(self.model.default.version_name, ADDED_VERSION_NAME) + + self.model.delete_version(VERSION_NAME) + self.assertLen(self.model.show_versions(), 1) + + self._tag_name1 = "MYTAG" + self._tag_name2 = '"live_version"' + + self.session.sql(f"CREATE TAG {self._tag_name1}").collect() + self.session.sql(f"CREATE TAG {self._tag_name2}").collect() + + fq_tag_name1 = identifier.get_schema_level_object_identifier(self._test_db, self._test_schema, self._tag_name1) + fq_tag_name2 = identifier.get_schema_level_object_identifier(self._test_db, self._test_schema, self._tag_name2) + + self.assertDictEqual({}, self.model.show_tags()) + self.assertIsNone(self.model.get_tag(self._tag_name1)) + self.model.set_tag(self._tag_name1, "val1") + self.assertEqual( + "val1", + self.model.get_tag(fq_tag_name1), + ) + self.assertDictEqual( + {fq_tag_name1: "val1"}, + self.model.show_tags(), + ) + self.model.set_tag(fq_tag_name2, "v2") + self.assertEqual("v2", self.model.get_tag(self._tag_name2)) + self.assertDictEqual( + { + fq_tag_name1: "val1", + fq_tag_name2: "v2", + }, + self.model.show_tags(), + ) + self.model.unset_tag(fq_tag_name2) + self.assertDictEqual( + {fq_tag_name1: "val1"}, + self.model.show_tags(), + ) + self.model.unset_tag(self._tag_name1) + self.assertDictEqual({}, self.model.show_tags()) + + self.model.rename(NEW_MODEL_NAME) + self.assertEqual(self.model.name, NEW_MODEL_NAME) + self.registry.delete_model(NEW_MODEL_NAME) + + self.assertLen(self.registry.show_models(), 0) + + +if __name__ == "__main__": + absltest.main() diff --git a/tests/integ/snowflake/ml/registry/model/registry_lightgbm_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_lightgbm_model_test.py index 39445c9b..9da9b06e 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_lightgbm_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_lightgbm_model_test.py @@ -2,7 +2,7 @@ import lightgbm import numpy as np import pandas as pd -from absl.testing import absltest +from absl.testing import absltest, parameterized from sklearn import datasets, model_selection from tests.integ.snowflake.ml.registry.model import registry_model_test_base @@ -10,8 +10,12 @@ class TestRegistryLightGBMModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_lightgbm_classifier( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -22,7 +26,7 @@ def test_lightgbm_classifier( classifier = lightgbm.LGBMClassifier() classifier.fit(cal_X_train, cal_y_train) - self._test_registry_model( + getattr(self, registry_test_fn)( model=classifier, sample_input_data=cal_X_test, prediction_assert_fns={ @@ -39,8 +43,12 @@ def test_lightgbm_classifier( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_lightgbm_classifier_sp( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -66,9 +74,9 @@ def test_lightgbm_classifier_sp( axis=1, ) - cal_data_sp_df_train = self._session.create_dataframe(cal_X_train) - cal_data_sp_df_test = self._session.create_dataframe(cal_X_test) - self._test_registry_model( + cal_data_sp_df_train = self.session.create_dataframe(cal_X_train) + cal_data_sp_df_test = self.session.create_dataframe(cal_X_test) + getattr(self, registry_test_fn)( model=classifier, sample_input_data=cal_data_sp_df_train, prediction_assert_fns={ @@ -83,8 +91,12 @@ def test_lightgbm_classifier_sp( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_lightgbm_booster( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -95,7 +107,7 @@ def test_lightgbm_booster( regressor = lightgbm.train({"objective": "regression"}, lightgbm.Dataset(cal_X_train, label=cal_y_train)) y_pred = regressor.predict(cal_X_test) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regressor, sample_input_data=cal_X_test, prediction_assert_fns={ @@ -106,8 +118,12 @@ def test_lightgbm_booster( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_lightgbm_booster_sp( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -124,9 +140,9 @@ def test_lightgbm_booster_sp( axis=1, ) - cal_data_sp_df_train = self._session.create_dataframe(cal_X_train) - cal_data_sp_df_test = self._session.create_dataframe(cal_X_test) - self._test_registry_model( + cal_data_sp_df_train = self.session.create_dataframe(cal_X_train) + cal_data_sp_df_test = self.session.create_dataframe(cal_X_test) + getattr(self, registry_test_fn)( model=regressor, sample_input_data=cal_data_sp_df_train, prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/registry/model/registry_mlflow_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_mlflow_model_test.py index 4b97d63b..a87e51ce 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_mlflow_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_mlflow_model_test.py @@ -2,7 +2,7 @@ import mlflow import numpy as np -from absl.testing import absltest +from absl.testing import absltest, parameterized from sklearn import datasets, ensemble, model_selection from snowflake.ml._internal import env @@ -11,8 +11,12 @@ class TestRegistryMLFlowModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_mlflow_model_deploy_sklearn_df( self, + registry_test_fn: str, ) -> None: db = datasets.load_diabetes(as_frame=True) X_train, X_test, y_train, y_test = model_selection.train_test_split(db.data, db.target) @@ -49,7 +53,7 @@ def test_mlflow_model_deploy_sklearn_df( run_id = run.info.run_id - self._test_registry_model( + getattr(self, registry_test_fn)( model=mlflow.pyfunc.load_model(f"runs:/{run_id}/model"), prediction_assert_fns={ "predict": ( @@ -60,8 +64,12 @@ def test_mlflow_model_deploy_sklearn_df( options={"relax_version": False}, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_mlflow_model_deploy_sklearn( self, + registry_test_fn: str, ) -> None: db = datasets.load_diabetes() X_train, X_test, y_train, y_test = model_selection.train_test_split(db.data, db.target) @@ -100,7 +108,7 @@ def test_mlflow_model_deploy_sklearn( X_test_df = numpy_handler.SeqOfNumpyArrayHandler.convert_to_df([X_test]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=mlflow.pyfunc.load_model(f"runs:/{run_id}/model"), prediction_assert_fns={ "predict": ( diff --git a/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py b/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py index b0222a3d..5577d3a9 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py +++ b/tests/integ/snowflake/ml/registry/model/registry_model_test_base.py @@ -2,19 +2,21 @@ import uuid from typing import Any, Callable, Dict, List, Optional, Tuple -from absl.testing import parameterized - from snowflake.ml.model import type_hints as model_types from snowflake.ml.registry import registry -from snowflake.ml.utils import connection_params -from snowflake.snowpark import Session -from tests.integ.snowflake.ml.test_utils import db_manager, test_env_utils +from tests.integ.snowflake.ml.test_utils import ( + common_test_base, + db_manager, + test_env_utils, +) + +class RegistryModelTestBase(common_test_base.CommonTestBase): + REGISTRY_TEST_FN_LIST = ["_test_registry_model"] -class RegistryModelTestBase(parameterized.TestCase): def setUp(self) -> None: """Creates Snowpark and Snowflake environments for testing.""" - login_options = connection_params.SnowflakeLoginOptions() + super().setUp() self._run_id = uuid.uuid4().hex self._test_db = db_manager.TestObjectNameGenerator.get_snowml_test_object_name(self._run_id, "db").upper() @@ -22,22 +24,15 @@ def setUp(self) -> None: self._run_id, "schema" ).upper() - self._session = Session.builder.configs( - { - **login_options, - **{"database": self._test_db, "schema": self._test_schema}, - } - ).create() - - self._db_manager = db_manager.DBManager(self._session) + self._db_manager = db_manager.DBManager(self.session) self._db_manager.create_database(self._test_db) self._db_manager.create_schema(self._test_schema) self._db_manager.cleanup_databases(expire_hours=6) - self.registry = registry.Registry(self._session) + self.registry = registry.Registry(self.session) def tearDown(self) -> None: self._db_manager.drop_database(self._test_db) - self._session.close() + super().tearDown() def _test_registry_model( self, @@ -48,7 +43,7 @@ def _test_registry_model( options: Optional[model_types.ModelSaveOption] = None, ) -> None: conda_dependencies = [ - test_env_utils.get_latest_package_version_spec_in_server(self._session, "snowflake-snowpark-python!=1.12.0") + test_env_utils.get_latest_package_version_spec_in_server(self.session, "snowflake-snowpark-python!=1.12.0") ] if additional_dependencies: conda_dependencies.extend(additional_dependencies) @@ -75,3 +70,60 @@ def _test_registry_model( self.registry.delete_model(model_name=name) self.assertNotIn(mv.model_name, [m.name for m in self.registry.models()]) + + def _test_registry_model_from_model_version( + self, + model: model_types.SupportedModelType, + prediction_assert_fns: Dict[str, Tuple[Any, Callable[[Any], Any]]], + sample_input_data: Optional[model_types.SupportedDataType] = None, + additional_dependencies: Optional[List[str]] = None, + options: Optional[model_types.ModelSaveOption] = None, + ) -> None: + conda_dependencies = [ + test_env_utils.get_latest_package_version_spec_in_server(self.session, "snowflake-snowpark-python!=1.12.0") + ] + if additional_dependencies: + conda_dependencies.extend(additional_dependencies) + + # Get the name of the caller as the model name + source_name = f"source_model_{inspect.stack()[1].function}" + name = f"model_{inspect.stack()[1].function}" + source_version = f"source_ver_{self._run_id}" + version = f"ver_{self._run_id}" + source_mv = self.registry.log_model( + model=model, + model_name=source_name, + version_name=source_version, + sample_input_data=sample_input_data, + conda_dependencies=conda_dependencies, + options=options, + ) + + # Create a new model when the model doesn't exist + mv = self.registry.log_model( + model=source_mv, + model_name=name, + version_name=version, + ) + + for target_method, (test_input, check_func) in prediction_assert_fns.items(): + res = mv.run(test_input, function_name=target_method) + check_func(res) + + self.registry.show_models() + + # Add a version when the model exists + version2 = f"ver_{self._run_id}_2" + mv2 = self.registry.log_model( + model=source_mv, + model_name=name, + version_name=version2, + ) + + for target_method, (test_input, check_func) in prediction_assert_fns.items(): + res = mv2.run(test_input, function_name=target_method) + check_func(res) + + self.registry.show_models() + self.registry.delete_model(model_name=name) + self.assertNotIn(mv2.model_name, [m.name for m in self.registry.models()]) diff --git a/tests/integ/snowflake/ml/registry/model/registry_modeling_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_modeling_model_test.py index d04ff0db..6f80627c 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_modeling_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_modeling_model_test.py @@ -3,7 +3,7 @@ import numpy as np import yaml -from absl.testing import absltest +from absl.testing import absltest, parameterized from sklearn import datasets from snowflake.ml import dataset @@ -17,8 +17,12 @@ class TestRegistryModelingModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_snowml_model_deploy_snowml_sklearn( self, + registry_test_fn: str, ) -> None: iris_X = datasets.load_iris(as_frame=True).frame iris_X.columns = [s.replace(" (CM)", "").replace(" ", "") for s in iris_X.columns.str.upper()] @@ -30,20 +34,24 @@ def test_snowml_model_deploy_snowml_sklearn( test_features = iris_X regr.fit(test_features) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regr, prediction_assert_fns={ "predict": ( test_features, - lambda res: lambda res: np.testing.assert_allclose( + lambda res: np.testing.assert_allclose( res[OUTPUT_COLUMNS].values, regr.predict(test_features)[OUTPUT_COLUMNS].values ), ), }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_snowml_model_deploy_xgboost( self, + registry_test_fn: str, ) -> None: iris_X = datasets.load_iris(as_frame=True).frame iris_X.columns = [s.replace(" (CM)", "").replace(" ", "") for s in iris_X.columns.str.upper()] @@ -55,7 +63,7 @@ def test_snowml_model_deploy_xgboost( test_features = iris_X[:10] regr.fit(test_features) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regr, prediction_assert_fns={ "predict": ( @@ -67,8 +75,12 @@ def test_snowml_model_deploy_xgboost( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_snowml_model_deploy_lightgbm( self, + registry_test_fn: str, ) -> None: iris_X = datasets.load_iris(as_frame=True).frame iris_X.columns = [s.replace(" (CM)", "").replace(" ", "") for s in iris_X.columns.str.upper()] @@ -80,7 +92,7 @@ def test_snowml_model_deploy_lightgbm( test_features = iris_X[:10] regr.fit(test_features) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regr, prediction_assert_fns={ "predict": ( @@ -92,7 +104,13 @@ def test_snowml_model_deploy_lightgbm( }, ) - def test_dataset_to_model_lineage(self) -> None: + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) + def test_dataset_to_model_lineage( + self, + registry_test_fn: str, + ) -> None: iris_X = datasets.load_iris(as_frame=True).frame iris_X.columns = [s.replace(" (CM)", "").replace(" ", "") for s in iris_X.columns.str.upper()] @@ -108,33 +126,64 @@ def test_dataset_to_model_lineage(self) -> None: T.StructField("TARGET", T.StringType()), T.StructField("PREDICTED_TARGET", T.StringType()), ] - test_features_df = self._session.create_dataframe(iris_X, schema=schema) + test_features_df = self.session.create_dataframe(iris_X, schema=schema) test_features_dataset = dataset.create_from_dataframe( - session=self._session, + session=self.session, name="trainDataset", version="v1", input_dataframe=test_features_df, ) + # Case 1 : Capture Lineage via fit() API of MANIFEST.yml file test_df = test_features_dataset.read.to_snowpark_dataframe() regr.fit(test_df) + self._check_lineage_in_manifest_file(regr, test_features_dataset) + + # Case 2 : test remaining life cycle. + getattr(self, registry_test_fn)( + model=regr, + prediction_assert_fns={ + "predict": ( + iris_X, + lambda res: np.testing.assert_allclose( + res[OUTPUT_COLUMNS].values, regr.predict(iris_X)[OUTPUT_COLUMNS].values + ), + ), + }, + additional_dependencies=["fsspec", "aiohttp", "cryptography"], + ) - # Case 1 : test generation of MANIFEST.yml file + # Case 3 : Capture Lineage via sample_input of log_model of MANIFEST.yml file + pandas_df = test_features_dataset.read.to_pandas() + + regr.fit(pandas_df) + self._check_lineage_in_manifest_file( + regr, test_features_dataset, sample_input_data=test_features_dataset.read.to_snowpark_dataframe() + ) + # Case 4 : Dont capture lineage of if its not passed via with fit() API or sample_input + pandas_df = test_features_dataset.read.to_pandas() + + regr.fit(pandas_df) + self._check_lineage_in_manifest_file( + regr, test_features_dataset, sample_input_data=pandas_df, lineage_should_exist=False + ) + + def _check_lineage_in_manifest_file(self, model, dataset, sample_input_data=None, lineage_should_exist=True): model_name = "some_name" - tmp_stage_path = posixpath.join(self._session.get_session_stage(), f"{model_name}_{1}") + tmp_stage_path = posixpath.join(self.session.get_session_stage(), f"{model_name}_{1}") conda_dependencies = [ - test_env_utils.get_latest_package_version_spec_in_server(self._session, "snowflake-snowpark-python!=1.12.0") + test_env_utils.get_latest_package_version_spec_in_server(self.session, "snowflake-snowpark-python!=1.12.0") ] - mc = model_composer.ModelComposer(self._session, stage_path=tmp_stage_path) + mc = model_composer.ModelComposer(self.session, stage_path=tmp_stage_path) mc.save( name=model_name, - model=regr, + model=model, signatures=None, - sample_input_data=None, + sample_input_data=sample_input_data, conda_dependencies=conda_dependencies, metadata={"author": "rsureshbabu", "version": "1"}, options={"relax_version": False}, @@ -142,29 +191,18 @@ def test_dataset_to_model_lineage(self) -> None: with open(os.path.join(tmp_stage_path, mc._workspace.name, "MANIFEST.yml"), encoding="utf-8") as f: yaml_content = yaml.safe_load(f) - assert "lineage_sources" in yaml_content - assert isinstance(yaml_content["lineage_sources"], list) - assert len(yaml_content["lineage_sources"]) == 1 - - source = yaml_content["lineage_sources"][0] - assert isinstance(source, dict) - assert source.get("type") == "DATASET" - assert source.get("entity") == f"{test_features_dataset.fully_qualified_name}" - assert source.get("version") == f"{test_features_dataset._version.name}" - - # Case 2 : test remaining life cycle. - self._test_registry_model( - model=regr, - prediction_assert_fns={ - "predict": ( - iris_X, - lambda res: lambda res: np.testing.assert_allclose( - res[OUTPUT_COLUMNS].values, regr.predict(iris_X)[OUTPUT_COLUMNS].values - ), - ), - }, - additional_dependencies=["fsspec", "aiohttp", "cryptography"], - ) + if lineage_should_exist: + assert "lineage_sources" in yaml_content + assert isinstance(yaml_content["lineage_sources"], list) + assert len(yaml_content["lineage_sources"]) == 1 + + source = yaml_content["lineage_sources"][0] + assert isinstance(source, dict) + assert source.get("type") == "DATASET" + assert source.get("entity") == f"{dataset.fully_qualified_name}" + assert source.get("version") == f"{dataset._version.name}" + else: + assert "lineage_sources" not in yaml_content if __name__ == "__main__": diff --git a/tests/integ/snowflake/ml/registry/model/registry_pytorch_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_pytorch_model_test.py index d3af8e4b..41776df4 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_pytorch_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_pytorch_model_test.py @@ -1,6 +1,6 @@ import pandas as pd import torch -from absl.testing import absltest +from absl.testing import absltest, parameterized from snowflake.ml.model._signatures import pytorch_handler, snowpark_handler from tests.integ.snowflake.ml.registry.model import registry_model_test_base @@ -8,14 +8,18 @@ class TestRegistryPytorchModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_pytorch_tensor_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = model_factory.ModelFactory.prepare_torch_model(torch.float32) x_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([data_x], ensure_serializable=False) y_pred = model.forward(data_x).detach() - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=[data_x], prediction_assert_fns={ @@ -28,14 +32,18 @@ def test_pytorch_tensor_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_pytorch_df_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = model_factory.ModelFactory.prepare_torch_model(torch.float64) x_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([data_x], ensure_serializable=False) y_pred = model.forward(data_x).detach() - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=x_df, prediction_assert_fns={ @@ -48,19 +56,23 @@ def test_pytorch_df_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_pytorch_sp( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = model_factory.ModelFactory.prepare_torch_model(torch.float64) x_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([data_x], ensure_serializable=False) x_df.columns = ["col_0"] y_pred = model.forward(data_x) - x_df_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df(self._session, x_df) + x_df_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df(self.session, x_df) y_pred_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([y_pred]) y_pred_df.columns = ["output_feature_0"] y_df_expected = pd.concat([x_df, y_pred_df], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=x_df, prediction_assert_fns={ @@ -71,15 +83,19 @@ def test_pytorch_sp( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_torchscript_tensor_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = model_factory.ModelFactory.prepare_jittable_torch_model(torch.float32) x_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([data_x], ensure_serializable=False) model_script = torch.jit.script(model) # type:ignore[attr-defined] y_pred = model_script.forward(data_x).detach() - self._test_registry_model( + getattr(self, registry_test_fn)( model=model_script, sample_input_data=[data_x], prediction_assert_fns={ @@ -92,15 +108,19 @@ def test_torchscript_tensor_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_torchscript_df_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = model_factory.ModelFactory.prepare_jittable_torch_model(torch.float64) x_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([data_x], ensure_serializable=False) model_script = torch.jit.script(model) # type:ignore[attr-defined] y_pred = model_script.forward(data_x).detach() - self._test_registry_model( + getattr(self, registry_test_fn)( model=model_script, sample_input_data=x_df, prediction_assert_fns={ @@ -113,20 +133,24 @@ def test_torchscript_df_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_torchscript_sp( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = model_factory.ModelFactory.prepare_jittable_torch_model(torch.float64) x_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([data_x], ensure_serializable=False) x_df.columns = ["col_0"] model_script = torch.jit.script(model) # type:ignore[attr-defined] y_pred = model_script.forward(data_x) - x_df_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df(self._session, x_df) + x_df_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df(self.session, x_df) y_pred_df = pytorch_handler.SeqOfPyTorchTensorHandler.convert_to_df([y_pred]) y_pred_df.columns = ["output_feature_0"] y_df_expected = pd.concat([x_df, y_pred_df], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model_script, sample_input_data=x_df, prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/registry/model/registry_sentence_transformers_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_sentence_transformers_model_test.py index 903bab33..6f770966 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_sentence_transformers_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_sentence_transformers_model_test.py @@ -3,7 +3,7 @@ import tempfile import pandas as pd -from absl.testing import absltest +from absl.testing import absltest, parameterized from snowflake.ml.model import model_signature from snowflake.ml.model._packager.model_handlers.sentence_transformers import ( @@ -33,8 +33,12 @@ def tearDownClass(self) -> None: os.environ[SENTENCE_TRANSFORMERS_CACHE_DIR] = self._original_cache_dir self.cache_dir.cleanup() + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_sentence_transformers( self, + registry_test_fn: str, ) -> None: import sentence_transformers @@ -55,7 +59,7 @@ def test_sentence_transformers( sig = {"encode": model_signature.infer_signature(sentences, embeddings)} embeddings = model_signature_utils.rename_pandas_df(embeddings, sig["encode"].outputs) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=sentences, prediction_assert_fns={ @@ -66,8 +70,12 @@ def test_sentence_transformers( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_sentence_transformers_sp( self, + registry_test_fn: str, ) -> None: import sentence_transformers @@ -83,14 +91,14 @@ def test_sentence_transformers_sp( ] } ) - sentences_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df(self._session, sentences) + sentences_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df(self.session, sentences) model = sentence_transformers.SentenceTransformer(random.choice(MODEL_NAMES)) embeddings = _sentence_transformer_encode(model, sentences) sig = {"encode": model_signature.infer_signature(sentences, embeddings)} embeddings = model_signature_utils.rename_pandas_df(embeddings, sig["encode"].outputs) y_df_expected = pd.concat([sentences_sp.to_pandas(), embeddings], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=sentences_sp, prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/registry/model/registry_sklearn_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_sklearn_model_test.py index 688169b8..88eaa012 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_sklearn_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_sklearn_model_test.py @@ -2,21 +2,25 @@ import numpy as np import pandas as pd -from absl.testing import absltest +from absl.testing import absltest, parameterized from sklearn import datasets, ensemble, linear_model, multioutput from tests.integ.snowflake.ml.registry.model import registry_model_test_base class TestRegistrySKLearnModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_skl_model( self, + registry_test_fn: str, ) -> None: iris_X, iris_y = datasets.load_iris(return_X_y=True) # LogisticRegression is for classfication task, such as iris regr = linear_model.LogisticRegression() regr.fit(iris_X, iris_y) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regr, sample_input_data=iris_X, prediction_assert_fns={ @@ -31,14 +35,18 @@ def test_skl_model( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_skl_model_case_sensitive( self, + registry_test_fn: str, ) -> None: iris_X, iris_y = datasets.load_iris(return_X_y=True) # LogisticRegression is for classfication task, such as iris regr = linear_model.LogisticRegression() regr.fit(iris_X, iris_y) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regr, sample_input_data=iris_X, options={ @@ -57,15 +65,19 @@ def test_skl_model_case_sensitive( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_skl_multiple_output_model( self, + registry_test_fn: str, ) -> None: iris_X, iris_y = datasets.load_iris(return_X_y=True) target2 = np.random.randint(0, 6, size=iris_y.shape) dual_target = np.vstack([iris_y, target2]).T model = multioutput.MultiOutputClassifier(ensemble.RandomForestClassifier(random_state=42)) model.fit(iris_X[:10], dual_target[:10]) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=iris_X, prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py index 61a27b72..8263100f 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_tensorflow_model_test.py @@ -4,7 +4,7 @@ import pandas as pd import pytest import tensorflow as tf -from absl.testing import absltest +from absl.testing import absltest, parameterized from snowflake.ml.model._signatures import ( numpy_handler, @@ -47,14 +47,18 @@ def call(self, tensor: "tf.Tensor") -> "tf.Tensor": @pytest.mark.pip_incompatible class TestRegistryTensorflowModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_tf_tensor_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x = model_factory.ModelFactory.prepare_tf_model() x_df = tensorflow_handler.SeqOfTensorflowTensorHandler.convert_to_df([data_x], ensure_serializable=False) y_pred = model(data_x) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=[data_x], prediction_assert_fns={ @@ -68,14 +72,18 @@ def test_tf_tensor_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_tf_df_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x = model_factory.ModelFactory.prepare_tf_model() x_df = tensorflow_handler.SeqOfTensorflowTensorHandler.convert_to_df([data_x], ensure_serializable=False) y_pred = model(data_x) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=x_df, prediction_assert_fns={ @@ -89,22 +97,26 @@ def test_tf_df_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_tf_sp( self, + registry_test_fn: str, ) -> None: model, data_x = model_factory.ModelFactory.prepare_tf_model() x_df = tensorflow_handler.SeqOfTensorflowTensorHandler.convert_to_df([data_x], ensure_serializable=False) x_df.columns = ["col_0"] y_pred = model(data_x) x_df_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df( - self._session, + self.session, x_df, ) y_pred_df = tensorflow_handler.SeqOfTensorflowTensorHandler.convert_to_df([y_pred]) y_pred_df.columns = ["output_feature_0"] y_df_expected = pd.concat([x_df, y_pred_df], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=x_df, prediction_assert_fns={ @@ -115,13 +127,17 @@ def test_tf_sp( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_keras_tensor_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = prepare_keras_model() x_df = tensorflow_handler.SeqOfTensorflowTensorHandler.convert_to_df([data_x], ensure_serializable=False) y_pred = model.predict(data_x) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=[data_x], prediction_assert_fns={ @@ -136,13 +152,17 @@ def test_keras_tensor_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_keras_df_as_sample( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = prepare_keras_model() x_df = tensorflow_handler.SeqOfTensorflowTensorHandler.convert_to_df([data_x], ensure_serializable=False) y_pred = model.predict(data_x) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=x_df, prediction_assert_fns={ @@ -157,22 +177,26 @@ def test_keras_df_as_sample( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_keras_sp( self, + registry_test_fn: str, ) -> None: model, data_x, data_y = prepare_keras_model() x_df = tensorflow_handler.SeqOfTensorflowTensorHandler.convert_to_df([data_x], ensure_serializable=False) x_df.columns = ["col_0"] y_pred = model.predict(data_x) x_df_sp = snowpark_handler.SnowparkDataFrameHandler.convert_from_df( - self._session, + self.session, x_df, ) y_pred_df = numpy_handler.SeqOfNumpyArrayHandler.convert_to_df([y_pred]) y_pred_df.columns = ["output_feature_0"] y_df_expected = pd.concat([x_df, y_pred_df], axis=1) - self._test_registry_model( + getattr(self, registry_test_fn)( model=model, sample_input_data=x_df, prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/registry/model/registry_xgboost_model_test.py b/tests/integ/snowflake/ml/registry/model/registry_xgboost_model_test.py index 652d96a1..41a2668d 100644 --- a/tests/integ/snowflake/ml/registry/model/registry_xgboost_model_test.py +++ b/tests/integ/snowflake/ml/registry/model/registry_xgboost_model_test.py @@ -2,7 +2,7 @@ import numpy as np import pandas as pd import xgboost -from absl.testing import absltest +from absl.testing import absltest, parameterized from sklearn import datasets, model_selection from tests.integ.snowflake.ml.registry.model import registry_model_test_base @@ -10,8 +10,12 @@ class TestRegistryXGBoostModelInteg(registry_model_test_base.RegistryModelTestBase): + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_xgb( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -20,7 +24,7 @@ def test_xgb( cal_X_train, cal_X_test, cal_y_train, cal_y_test = model_selection.train_test_split(cal_X, cal_y) regressor = xgboost.XGBRegressor(n_estimators=100, reg_lambda=1, gamma=0, max_depth=3) regressor.fit(cal_X_train, cal_y_train) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regressor, sample_input_data=cal_X_test, prediction_assert_fns={ @@ -33,12 +37,16 @@ def test_xgb( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_xgb_sp( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True).frame cal_data.columns = [inflection.parameterize(c, "_") for c in cal_data] - cal_data_sp_df = self._session.create_dataframe(cal_data) + cal_data_sp_df = self.session.create_dataframe(cal_data) cal_data_sp_df_train, cal_data_sp_df_test = tuple(cal_data_sp_df.random_split([0.25, 0.75], seed=2568)) regressor = xgboost.XGBRegressor(n_estimators=100, reg_lambda=1, gamma=0, max_depth=3) cal_data_pd_df_train = cal_data_sp_df_train.to_pandas() @@ -52,7 +60,7 @@ def test_xgb_sp( ], axis=1, ) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regressor, sample_input_data=cal_data_sp_df_train.drop('"target"'), prediction_assert_fns={ @@ -63,8 +71,12 @@ def test_xgb_sp( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_xgb_booster( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True) cal_X = cal_data.data @@ -74,7 +86,7 @@ def test_xgb_booster( params = dict(n_estimators=100, reg_lambda=1, gamma=0, max_depth=3, objective="binary:logistic") regressor = xgboost.train(params, xgboost.DMatrix(data=cal_X_train, label=cal_y_train)) y_pred = regressor.predict(xgboost.DMatrix(data=cal_X_test)) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regressor, sample_input_data=cal_X_test, prediction_assert_fns={ @@ -85,12 +97,16 @@ def test_xgb_booster( }, ) + @parameterized.product( # type: ignore[misc] + registry_test_fn=registry_model_test_base.RegistryModelTestBase.REGISTRY_TEST_FN_LIST, + ) def test_xgb_booster_sp( self, + registry_test_fn: str, ) -> None: cal_data = datasets.load_breast_cancer(as_frame=True).frame cal_data.columns = [inflection.parameterize(c, "_") for c in cal_data] - cal_data_sp_df = self._session.create_dataframe(cal_data) + cal_data_sp_df = self.session.create_dataframe(cal_data) cal_data_sp_df_train, cal_data_sp_df_test = tuple(cal_data_sp_df.random_split([0.25, 0.75], seed=2568)) cal_data_pd_df_train = cal_data_sp_df_train.to_pandas() params = dict(n_estimators=100, reg_lambda=1, gamma=0, max_depth=3, objective="binary:logistic") @@ -109,7 +125,7 @@ def test_xgb_booster_sp( ], axis=1, ) - self._test_registry_model( + getattr(self, registry_test_fn)( model=regressor, sample_input_data=cal_data_sp_df_train.drop('"target"'), prediction_assert_fns={ diff --git a/tests/integ/snowflake/ml/test_utils/common_test_base.py b/tests/integ/snowflake/ml/test_utils/common_test_base.py index aceec4d5..3744a125 100644 --- a/tests/integ/snowflake/ml/test_utils/common_test_base.py +++ b/tests/integ/snowflake/ml/test_utils/common_test_base.py @@ -77,6 +77,7 @@ def sproc_test( kclass: Type[_V], local: bool = True, test_callers_rights: bool = True, + test_owners_rights: bool = True, additional_packages: Optional[List[str]] = None, ) -> Callable[ [Callable[Concatenate[_V, _T_args], None]], @@ -165,7 +166,47 @@ def _in_sproc_test(execute_as: Literal["owner", "caller"] = "owner") -> None: execute_as=execute_as, ) def test_in_sproc(sess: session.Session, test_name: str) -> None: + import fcntl + import os + import sys + import threading import unittest + import zipfile + from types import TracebackType + from typing import Optional, Type + + class FileLock: + def __enter__(self) -> None: + self._lock = threading.Lock() + self._lock.acquire() + self._fd = open("/tmp/lockfile.LOCK", "w+") + fcntl.lockf(self._fd, fcntl.LOCK_EX) + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc: Optional[BaseException], + traceback: Optional[TracebackType], + ) -> None: + self._fd.close() + self._lock.release() + + IMPORT_DIRECTORY_NAME = "snowflake_import_directory" + import_dir = sys._xoptions[IMPORT_DIRECTORY_NAME] + + for file_name in ["snowflake-ml-python", "snowflake-ml-test"]: + zip_path = os.path.join(import_dir, file_name + ".zip") + for sys_path in sys.path: + if file_name + ".zip" in sys_path: + sys.path.remove(sys_path) + + extracted_dir_path = os.path.join("/tmp", file_name) + + with FileLock(): + if not os.path.isdir(extracted_dir_path): + with zipfile.ZipFile(zip_path, "r") as myzip: + myzip.extractall(extracted_dir_path) + sys.path.append(extracted_dir_path) loader = unittest.TestLoader() @@ -188,13 +229,16 @@ def test_in_sproc(sess: session.Session, test_name: str) -> None: _in_sproc_test(execute_as=_sproc_test_mode) - additional_cases = ["owner"] + additional_cases = [] if local: additional_cases.append("local") if test_callers_rights: additional_cases.append("caller") + if test_owners_rights: + additional_cases.append("owner") + modified_test_cases = get_modified_test_cases(test_cases, additional_cases, "_sproc_test_mode", naming_type) return parameterized._ParameterizedTestIter(