diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..156b6b26 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +include requirements.txt +include stats_requirements.txt +include notebook_requirements.txt diff --git a/README.md b/README.md index fe94ab36..e58d0ff3 100644 --- a/README.md +++ b/README.md @@ -35,3 +35,11 @@ This package contains 4 main external modules. First, `splicemachine.spark.conte 4.2) [`splicemachine.notebooks`](https://pysplice.readthedocs.io/en/latest/splicemachine.notebook.html): houses utilities for use in Jupyter Notebooks running in the Kubernetes cloud environment +## Docs +The docs are managed py readthedocs and Sphinx. See latest docs [here](https://pysplice.readthedocs.io/en/latest/) + +### Building the docs +``` +cd docs +make html +``` diff --git a/dist/splicemachine-2.7.0.dev0.tar.gz b/dist/splicemachine-2.7.0.dev0.tar.gz deleted file mode 100644 index 138e3804..00000000 Binary files a/dist/splicemachine-2.7.0.dev0.tar.gz and /dev/null differ diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle deleted file mode 100644 index dd4997b7..00000000 Binary files a/docs/_build/doctrees/environment.pickle and /dev/null differ diff --git a/docs/_build/doctrees/getting-started.doctree b/docs/_build/doctrees/getting-started.doctree deleted file mode 100644 index 42b8b68f..00000000 Binary files a/docs/_build/doctrees/getting-started.doctree and /dev/null differ diff --git a/docs/_build/doctrees/index.doctree b/docs/_build/doctrees/index.doctree deleted file mode 100644 index 8365b3ab..00000000 Binary files a/docs/_build/doctrees/index.doctree and /dev/null differ diff --git a/docs/_build/doctrees/spark.doctree b/docs/_build/doctrees/spark.doctree deleted file mode 100644 index a3b4b4f3..00000000 Binary files a/docs/_build/doctrees/spark.doctree and /dev/null differ diff --git a/docs/_build/doctrees/splicemachine.doctree b/docs/_build/doctrees/splicemachine.doctree deleted file mode 100644 index e1dbf42e..00000000 Binary files a/docs/_build/doctrees/splicemachine.doctree and /dev/null differ diff --git a/docs/_build/doctrees/splicemachine.features.doctree b/docs/_build/doctrees/splicemachine.features.doctree deleted file mode 100644 index 99c77275..00000000 Binary files a/docs/_build/doctrees/splicemachine.features.doctree and /dev/null differ diff --git a/docs/_build/doctrees/splicemachine.mlflow_support.doctree b/docs/_build/doctrees/splicemachine.mlflow_support.doctree deleted file mode 100644 index d97be221..00000000 Binary files a/docs/_build/doctrees/splicemachine.mlflow_support.doctree and /dev/null differ diff --git a/docs/_build/doctrees/splicemachine.notebook.doctree b/docs/_build/doctrees/splicemachine.notebook.doctree deleted file mode 100644 index 547e1bb4..00000000 Binary files a/docs/_build/doctrees/splicemachine.notebook.doctree and /dev/null differ diff --git a/docs/_build/doctrees/splicemachine.spark.doctree b/docs/_build/doctrees/splicemachine.spark.doctree deleted file mode 100644 index 466f61f0..00000000 Binary files a/docs/_build/doctrees/splicemachine.spark.doctree and /dev/null differ diff --git a/docs/_build/doctrees/splicemachine.stats.doctree b/docs/_build/doctrees/splicemachine.stats.doctree deleted file mode 100644 index 5e9256f0..00000000 Binary files a/docs/_build/doctrees/splicemachine.stats.doctree and /dev/null differ diff --git a/docs/_build/epub/.buildinfo b/docs/_build/epub/.buildinfo deleted file mode 100644 index ff9e2d6e..00000000 --- a/docs/_build/epub/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 22e6ad6f3e6c1f1a4b7bbda07fbd3f3c -tags: 490e2b0d4a1bebf665648774830bc9b4 diff --git a/docs/_build/epub/META-INF/container.xml b/docs/_build/epub/META-INF/container.xml deleted file mode 100644 index 326cf15f..00000000 --- a/docs/_build/epub/META-INF/container.xml +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/docs/_build/epub/SpliceMLManager.epub b/docs/_build/epub/SpliceMLManager.epub deleted file mode 100644 index 025525f7..00000000 Binary files a/docs/_build/epub/SpliceMLManager.epub and /dev/null differ diff --git a/docs/_build/epub/_modules/index.xhtml b/docs/_build/epub/_modules/index.xhtml deleted file mode 100644 index c75dff15..00000000 --- a/docs/_build/epub/_modules/index.xhtml +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - Overview: module code - - - - -
-
- -
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/pyspark/ml/base.xhtml b/docs/_build/epub/_modules/pyspark/ml/base.xhtml deleted file mode 100644 index 41853ec7..00000000 --- a/docs/_build/epub/_modules/pyspark/ml/base.xhtml +++ /dev/null @@ -1,271 +0,0 @@ - - - - - - - pyspark.ml.base - - - - -
-
-
- -

Source code for pyspark.ml.base

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from abc import ABCMeta, abstractmethod
-
-import copy
-import threading
-
-from pyspark import since
-from pyspark.ml.param.shared import *
-from pyspark.ml.common import inherit_doc
-from pyspark.sql.functions import udf
-from pyspark.sql.types import StructField, StructType
-
-
-class _FitMultipleIterator(object):
-    """
-    Used by default implementation of Estimator.fitMultiple to produce models in a thread safe
-    iterator. This class handles the simple case of fitMultiple where each param map should be
-    fit independently.
-
-    :param fitSingleModel: Function: (int => Model) which fits an estimator to a dataset.
-        `fitSingleModel` may be called up to `numModels` times, with a unique index each time.
-        Each call to `fitSingleModel` with an index should return the Model associated with
-        that index.
-    :param numModel: Number of models this iterator should produce.
-
-    See Estimator.fitMultiple for more info.
-    """
-    def __init__(self, fitSingleModel, numModels):
-        """
-
-        """
-        self.fitSingleModel = fitSingleModel
-        self.numModel = numModels
-        self.counter = 0
-        self.lock = threading.Lock()
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        with self.lock:
-            index = self.counter
-            if index >= self.numModel:
-                raise StopIteration("No models remaining.")
-            self.counter += 1
-        return index, self.fitSingleModel(index)
-
-    def next(self):
-        """For python2 compatibility."""
-        return self.__next__()
-
-
-@inherit_doc
-class Estimator(Params):
-    """
-    Abstract class for estimators that fit models to data.
-
-    .. versionadded:: 1.3.0
-    """
-
-    __metaclass__ = ABCMeta
-
-    @abstractmethod
-    def _fit(self, dataset):
-        """
-        Fits a model to the input dataset. This is called by the default implementation of fit.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :returns: fitted model
-        """
-        raise NotImplementedError()
-
-    @since("2.3.0")
-    def fitMultiple(self, dataset, paramMaps):
-        """
-        Fits a model to the input dataset for each param map in `paramMaps`.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`.
-        :param paramMaps: A Sequence of param maps.
-        :return: A thread safe iterable which contains one model for each param map. Each
-                 call to `next(modelIterator)` will return `(index, model)` where model was fit
-                 using `paramMaps[index]`. `index` values may not be sequential.
-        """
-        estimator = self.copy()
-
-        def fitSingleModel(index):
-            return estimator.fit(dataset, paramMaps[index])
-
-        return _FitMultipleIterator(fitSingleModel, len(paramMaps))
-
-    @since("1.3.0")
-    def fit(self, dataset, params=None):
-        """
-        Fits a model to the input dataset with optional parameters.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :param params: an optional param map that overrides embedded params. If a list/tuple of
-                       param maps is given, this calls fit on each param map and returns a list of
-                       models.
-        :returns: fitted model(s)
-        """
-        if params is None:
-            params = dict()
-        if isinstance(params, (list, tuple)):
-            models = [None] * len(params)
-            for index, model in self.fitMultiple(dataset, params):
-                models[index] = model
-            return models
-        elif isinstance(params, dict):
-            if params:
-                return self.copy(params)._fit(dataset)
-            else:
-                return self._fit(dataset)
-        else:
-            raise ValueError("Params must be either a param map or a list/tuple of param maps, "
-                             "but got %s." % type(params))
-
-
-@inherit_doc
-class Transformer(Params):
-    """
-    Abstract class for transformers that transform one dataset into another.
-
-    .. versionadded:: 1.3.0
-    """
-
-    __metaclass__ = ABCMeta
-
-    @abstractmethod
-    def _transform(self, dataset):
-        """
-        Transforms the input dataset.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :returns: transformed dataset
-        """
-        raise NotImplementedError()
-
-    @since("1.3.0")
-    def transform(self, dataset, params=None):
-        """
-        Transforms the input dataset with optional parameters.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :param params: an optional param map that overrides embedded params.
-        :returns: transformed dataset
-        """
-        if params is None:
-            params = dict()
-        if isinstance(params, dict):
-            if params:
-                return self.copy(params)._transform(dataset)
-            else:
-                return self._transform(dataset)
-        else:
-            raise ValueError("Params must be a param map but got %s." % type(params))
-
-
-@inherit_doc
-class Model(Transformer):
-    """
-    Abstract class for models that are fitted by estimators.
-
-    .. versionadded:: 1.4.0
-    """
-
-    __metaclass__ = ABCMeta
-
-
-@inherit_doc
-class UnaryTransformer(HasInputCol, HasOutputCol, Transformer):
-    """
-    Abstract class for transformers that take one input column, apply transformation,
-    and output the result as a new column.
-
-    .. versionadded:: 2.3.0
-    """
-
-    def setInputCol(self, value):
-        """
-        Sets the value of :py:attr:`inputCol`.
-        """
-        return self._set(inputCol=value)
-
-    def setOutputCol(self, value):
-        """
-        Sets the value of :py:attr:`outputCol`.
-        """
-        return self._set(outputCol=value)
-
-    @abstractmethod
-    def createTransformFunc(self):
-        """
-        Creates the transform function using the given param map. The input param map already takes
-        account of the embedded param map. So the param values should be determined
-        solely by the input param map.
-        """
-        raise NotImplementedError()
-
-    @abstractmethod
-    def outputDataType(self):
-        """
-        Returns the data type of the output column.
-        """
-        raise NotImplementedError()
-
-    @abstractmethod
-    def validateInputType(self, inputType):
-        """
-        Validates the input type. Throw an exception if it is invalid.
-        """
-        raise NotImplementedError()
-
-    def transformSchema(self, schema):
-        inputType = schema[self.getInputCol()].dataType
-        self.validateInputType(inputType)
-        if self.getOutputCol() in schema.names:
-            raise ValueError("Output column %s already exists." % self.getOutputCol())
-        outputFields = copy.copy(schema.fields)
-        outputFields.append(StructField(self.getOutputCol(),
-                                        self.outputDataType(),
-                                        nullable=False))
-        return StructType(outputFields)
-
-    def _transform(self, dataset):
-        self.transformSchema(dataset.schema)
-        transformUDF = udf(self.createTransformFunc(), self.outputDataType())
-        transformedDataset = dataset.withColumn(self.getOutputCol(),
-                                                transformUDF(dataset[self.getInputCol()]))
-        return transformedDataset
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/pyspark/ml/param.xhtml b/docs/_build/epub/_modules/pyspark/ml/param.xhtml deleted file mode 100644 index 2179e31d..00000000 --- a/docs/_build/epub/_modules/pyspark/ml/param.xhtml +++ /dev/null @@ -1,552 +0,0 @@ - - - - - - - pyspark.ml.param - - - - -
-
-
- -

Source code for pyspark.ml.param

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import array
-import sys
-if sys.version > '3':
-    basestring = str
-    xrange = range
-    unicode = str
-
-from abc import ABCMeta
-import copy
-import numpy as np
-
-from py4j.java_gateway import JavaObject
-
-from pyspark.ml.linalg import DenseVector, Vector, Matrix
-from pyspark.ml.util import Identifiable
-
-
-__all__ = ['Param', 'Params', 'TypeConverters']
-
-
-class Param(object):
-    """
-    A param with self-contained documentation.
-
-    .. versionadded:: 1.3.0
-    """
-
-    def __init__(self, parent, name, doc, typeConverter=None):
-        if not isinstance(parent, Identifiable):
-            raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
-        self.parent = parent.uid
-        self.name = str(name)
-        self.doc = str(doc)
-        self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter
-
-    def _copy_new_parent(self, parent):
-        """Copy the current param to a new parent, must be a dummy param."""
-        if self.parent == "undefined":
-            param = copy.copy(self)
-            param.parent = parent.uid
-            return param
-        else:
-            raise ValueError("Cannot copy from non-dummy parent %s." % parent)
-
-    def __str__(self):
-        return str(self.parent) + "__" + self.name
-
-    def __repr__(self):
-        return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
-
-    def __hash__(self):
-        return hash(str(self))
-
-    def __eq__(self, other):
-        if isinstance(other, Param):
-            return self.parent == other.parent and self.name == other.name
-        else:
-            return False
-
-
-class TypeConverters(object):
-    """
-    Factory methods for common type conversion functions for `Param.typeConverter`.
-
-    .. versionadded:: 2.0.0
-    """
-
-    @staticmethod
-    def _is_numeric(value):
-        vtype = type(value)
-        return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long'
-
-    @staticmethod
-    def _is_integer(value):
-        return TypeConverters._is_numeric(value) and float(value).is_integer()
-
-    @staticmethod
-    def _can_convert_to_list(value):
-        vtype = type(value)
-        return vtype in [list, np.ndarray, tuple, xrange, array.array] or isinstance(value, Vector)
-
-    @staticmethod
-    def _can_convert_to_string(value):
-        vtype = type(value)
-        return isinstance(value, basestring) or vtype in [np.unicode_, np.string_, np.str_]
-
-    @staticmethod
-    def identity(value):
-        """
-        Dummy converter that just returns value.
-        """
-        return value
-
-    @staticmethod
-    def toList(value):
-        """
-        Convert a value to a list, if possible.
-        """
-        if type(value) == list:
-            return value
-        elif type(value) in [np.ndarray, tuple, xrange, array.array]:
-            return list(value)
-        elif isinstance(value, Vector):
-            return list(value.toArray())
-        else:
-            raise TypeError("Could not convert %s to list" % value)
-
-    @staticmethod
-    def toListFloat(value):
-        """
-        Convert a value to list of floats, if possible.
-        """
-        if TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._is_numeric(v), value)):
-                return [float(v) for v in value]
-        raise TypeError("Could not convert %s to list of floats" % value)
-
-    @staticmethod
-    def toListListFloat(value):
-        """
-        Convert a value to list of list of floats, if possible.
-        """
-        if TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            return [TypeConverters.toListFloat(v) for v in value]
-        raise TypeError("Could not convert %s to list of list of floats" % value)
-
-    @staticmethod
-    def toListInt(value):
-        """
-        Convert a value to list of ints, if possible.
-        """
-        if TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._is_integer(v), value)):
-                return [int(v) for v in value]
-        raise TypeError("Could not convert %s to list of ints" % value)
-
-    @staticmethod
-    def toListString(value):
-        """
-        Convert a value to list of strings, if possible.
-        """
-        if TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
-                return [TypeConverters.toString(v) for v in value]
-        raise TypeError("Could not convert %s to list of strings" % value)
-
-    @staticmethod
-    def toVector(value):
-        """
-        Convert a value to a MLlib Vector, if possible.
-        """
-        if isinstance(value, Vector):
-            return value
-        elif TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._is_numeric(v), value)):
-                return DenseVector(value)
-        raise TypeError("Could not convert %s to vector" % value)
-
-    @staticmethod
-    def toMatrix(value):
-        """
-        Convert a value to a MLlib Matrix, if possible.
-        """
-        if isinstance(value, Matrix):
-            return value
-        raise TypeError("Could not convert %s to matrix" % value)
-
-    @staticmethod
-    def toFloat(value):
-        """
-        Convert a value to a float, if possible.
-        """
-        if TypeConverters._is_numeric(value):
-            return float(value)
-        else:
-            raise TypeError("Could not convert %s to float" % value)
-
-    @staticmethod
-    def toInt(value):
-        """
-        Convert a value to an int, if possible.
-        """
-        if TypeConverters._is_integer(value):
-            return int(value)
-        else:
-            raise TypeError("Could not convert %s to int" % value)
-
-    @staticmethod
-    def toString(value):
-        """
-        Convert a value to a string, if possible.
-        """
-        if isinstance(value, basestring):
-            return value
-        elif type(value) in [np.string_, np.str_]:
-            return str(value)
-        elif type(value) == np.unicode_:
-            return unicode(value)
-        else:
-            raise TypeError("Could not convert %s to string type" % type(value))
-
-    @staticmethod
-    def toBoolean(value):
-        """
-        Convert a value to a boolean, if possible.
-        """
-        if type(value) == bool:
-            return value
-        else:
-            raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value))
-
-
-class Params(Identifiable):
-    """
-    Components that take parameters. This also provides an internal
-    param map to store parameter values attached to the instance.
-
-    .. versionadded:: 1.3.0
-    """
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self):
-        super(Params, self).__init__()
-        #: internal param map for user-supplied values param map
-        self._paramMap = {}
-
-        #: internal param map for default values
-        self._defaultParamMap = {}
-
-        #: value returned by :py:func:`params`
-        self._params = None
-
-        # Copy the params from the class to the object
-        self._copy_params()
-
-    def _copy_params(self):
-        """
-        Copy all params defined on the class to current object.
-        """
-        cls = type(self)
-        src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
-        src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
-        for name, param in src_params:
-            setattr(self, name, param._copy_new_parent(self))
-
-    @property
-    def params(self):
-        """
-        Returns all params ordered by name. The default implementation
-        uses :py:func:`dir` to get all attributes of type
-        :py:class:`Param`.
-        """
-        if self._params is None:
-            self._params = list(filter(lambda attr: isinstance(attr, Param),
-                                       [getattr(self, x) for x in dir(self) if x != "params" and
-                                        not isinstance(getattr(type(self), x, None), property)]))
-        return self._params
-
-    def explainParam(self, param):
-        """
-        Explains a single param and returns its name, doc, and optional
-        default value and user-supplied value in a string.
-        """
-        param = self._resolveParam(param)
-        values = []
-        if self.isDefined(param):
-            if param in self._defaultParamMap:
-                values.append("default: %s" % self._defaultParamMap[param])
-            if param in self._paramMap:
-                values.append("current: %s" % self._paramMap[param])
-        else:
-            values.append("undefined")
-        valueStr = "(" + ", ".join(values) + ")"
-        return "%s: %s %s" % (param.name, param.doc, valueStr)
-
-    def explainParams(self):
-        """
-        Returns the documentation of all params with their optionally
-        default values and user-supplied values.
-        """
-        return "\n".join([self.explainParam(param) for param in self.params])
-
-    def getParam(self, paramName):
-        """
-        Gets a param by its name.
-        """
-        param = getattr(self, paramName)
-        if isinstance(param, Param):
-            return param
-        else:
-            raise ValueError("Cannot find param with name %s." % paramName)
-
-    def isSet(self, param):
-        """
-        Checks whether a param is explicitly set by user.
-        """
-        param = self._resolveParam(param)
-        return param in self._paramMap
-
-    def hasDefault(self, param):
-        """
-        Checks whether a param has a default value.
-        """
-        param = self._resolveParam(param)
-        return param in self._defaultParamMap
-
-    def isDefined(self, param):
-        """
-        Checks whether a param is explicitly set by user or has
-        a default value.
-        """
-        return self.isSet(param) or self.hasDefault(param)
-
-    def hasParam(self, paramName):
-        """
-        Tests whether this instance contains a param with a given
-        (string) name.
-        """
-        if isinstance(paramName, basestring):
-            p = getattr(self, paramName, None)
-            return isinstance(p, Param)
-        else:
-            raise TypeError("hasParam(): paramName must be a string")
-
-    def getOrDefault(self, param):
-        """
-        Gets the value of a param in the user-supplied param map or its
-        default value. Raises an error if neither is set.
-        """
-        param = self._resolveParam(param)
-        if param in self._paramMap:
-            return self._paramMap[param]
-        else:
-            return self._defaultParamMap[param]
-
-    def extractParamMap(self, extra=None):
-        """
-        Extracts the embedded default param values and user-supplied
-        values, and then merges them with extra values from input into
-        a flat param map, where the latter value is used if there exist
-        conflicts, i.e., with ordering: default param values <
-        user-supplied values < extra.
-
-        :param extra: extra param values
-        :return: merged param map
-        """
-        if extra is None:
-            extra = dict()
-        paramMap = self._defaultParamMap.copy()
-        paramMap.update(self._paramMap)
-        paramMap.update(extra)
-        return paramMap
-
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with the same uid and some
-        extra params. The default implementation creates a
-        shallow copy using :py:func:`copy.copy`, and then copies the
-        embedded and extra parameters over and returns the copy.
-        Subclasses should override this method if the default approach
-        is not sufficient.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        that = copy.copy(self)
-        that._paramMap = {}
-        that._defaultParamMap = {}
-        return self._copyValues(that, extra)
-
-    def set(self, param, value):
-        """
-        Sets a parameter in the embedded param map.
-        """
-        self._shouldOwn(param)
-        try:
-            value = param.typeConverter(value)
-        except ValueError as e:
-            raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e))
-        self._paramMap[param] = value
-
-    def _shouldOwn(self, param):
-        """
-        Validates that the input param belongs to this Params instance.
-        """
-        if not (self.uid == param.parent and self.hasParam(param.name)):
-            raise ValueError("Param %r does not belong to %r." % (param, self))
-
-    def _resolveParam(self, param):
-        """
-        Resolves a param and validates the ownership.
-
-        :param param: param name or the param instance, which must
-                      belong to this Params instance
-        :return: resolved param instance
-        """
-        if isinstance(param, Param):
-            self._shouldOwn(param)
-            return param
-        elif isinstance(param, basestring):
-            return self.getParam(param)
-        else:
-            raise ValueError("Cannot resolve %r as a param." % param)
-
-    @staticmethod
-    def _dummy():
-        """
-        Returns a dummy Params instance used as a placeholder to
-        generate docs.
-        """
-        dummy = Params()
-        dummy.uid = "undefined"
-        return dummy
-
-    def _set(self, **kwargs):
-        """
-        Sets user-supplied params.
-        """
-        for param, value in kwargs.items():
-            p = getattr(self, param)
-            if value is not None:
-                try:
-                    value = p.typeConverter(value)
-                except TypeError as e:
-                    raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
-            self._paramMap[p] = value
-        return self
-
-    def clear(self, param):
-        """
-        Clears a param from the param map if it has been explicitly set.
-        """
-        if self.isSet(param):
-            del self._paramMap[param]
-
-    def _setDefault(self, **kwargs):
-        """
-        Sets default params.
-        """
-        for param, value in kwargs.items():
-            p = getattr(self, param)
-            if value is not None and not isinstance(value, JavaObject):
-                try:
-                    value = p.typeConverter(value)
-                except TypeError as e:
-                    raise TypeError('Invalid default param value given for param "%s". %s'
-                                    % (p.name, e))
-            self._defaultParamMap[p] = value
-        return self
-
-    def _copyValues(self, to, extra=None):
-        """
-        Copies param values from this instance to another instance for
-        params shared by them.
-
-        :param to: the target instance
-        :param extra: extra params to be copied
-        :return: the target instance with param values copied
-        """
-        paramMap = self._paramMap.copy()
-        if isinstance(extra, dict):
-            for param, value in extra.items():
-                if isinstance(param, Param):
-                    paramMap[param] = value
-                else:
-                    raise TypeError("Expecting a valid instance of Param, but received: {}"
-                                    .format(param))
-        elif extra is not None:
-            raise TypeError("Expecting a dict, but received an object of type {}."
-                            .format(type(extra)))
-        for param in self.params:
-            # copy default params
-            if param in self._defaultParamMap and to.hasParam(param.name):
-                to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
-            # copy explicitly set params
-            if param in paramMap and to.hasParam(param.name):
-                to._set(**{param.name: paramMap[param]})
-        return to
-
-    def _resetUid(self, newUid):
-        """
-        Changes the uid of this instance. This updates both
-        the stored uid and the parent uid of params and param maps.
-        This is used by persistence (loading).
-        :param newUid: new uid to use, which is converted to unicode
-        :return: same instance, but with the uid and Param.parent values
-                 updated, including within param maps
-        """
-        newUid = unicode(newUid)
-        self.uid = newUid
-        newDefaultParamMap = dict()
-        newParamMap = dict()
-        for param in self.params:
-            newParam = copy.copy(param)
-            newParam.parent = newUid
-            if param in self._defaultParamMap:
-                newDefaultParamMap[newParam] = self._defaultParamMap[param]
-            if param in self._paramMap:
-                newParamMap[newParam] = self._paramMap[param]
-            param.parent = newUid
-        self._defaultParamMap = newDefaultParamMap
-        self._paramMap = newParamMap
-        return self
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/pyspark/ml/param/shared.xhtml b/docs/_build/epub/_modules/pyspark/ml/param/shared.xhtml deleted file mode 100644 index d67eba8d..00000000 --- a/docs/_build/epub/_modules/pyspark/ml/param/shared.xhtml +++ /dev/null @@ -1,624 +0,0 @@ - - - - - - - pyspark.ml.param.shared - - - - -
-
-
- -

Source code for pyspark.ml.param.shared

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.
-
-from pyspark.ml.param import *
-
-
-class HasMaxIter(Params):
-    """
-    Mixin for param maxIter: max number of iterations (>= 0).
-    """
-
-    maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0).", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasMaxIter, self).__init__()
-
-    def getMaxIter(self):
-        """
-        Gets the value of maxIter or its default value.
-        """
-        return self.getOrDefault(self.maxIter)
-
-
-class HasRegParam(Params):
-    """
-    Mixin for param regParam: regularization parameter (>= 0).
-    """
-
-    regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0).", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasRegParam, self).__init__()
-
-    def getRegParam(self):
-        """
-        Gets the value of regParam or its default value.
-        """
-        return self.getOrDefault(self.regParam)
-
-
-class HasFeaturesCol(Params):
-    """
-    Mixin for param featuresCol: features column name.
-    """
-
-    featuresCol = Param(Params._dummy(), "featuresCol", "features column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasFeaturesCol, self).__init__()
-        self._setDefault(featuresCol='features')
-
-    def getFeaturesCol(self):
-        """
-        Gets the value of featuresCol or its default value.
-        """
-        return self.getOrDefault(self.featuresCol)
-
-
-class HasLabelCol(Params):
-    """
-    Mixin for param labelCol: label column name.
-    """
-
-    labelCol = Param(Params._dummy(), "labelCol", "label column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasLabelCol, self).__init__()
-        self._setDefault(labelCol='label')
-
-    def getLabelCol(self):
-        """
-        Gets the value of labelCol or its default value.
-        """
-        return self.getOrDefault(self.labelCol)
-
-
-class HasPredictionCol(Params):
-    """
-    Mixin for param predictionCol: prediction column name.
-    """
-
-    predictionCol = Param(Params._dummy(), "predictionCol", "prediction column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasPredictionCol, self).__init__()
-        self._setDefault(predictionCol='prediction')
-
-    def getPredictionCol(self):
-        """
-        Gets the value of predictionCol or its default value.
-        """
-        return self.getOrDefault(self.predictionCol)
-
-
-class HasProbabilityCol(Params):
-    """
-    Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
-    """
-
-    probabilityCol = Param(Params._dummy(), "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasProbabilityCol, self).__init__()
-        self._setDefault(probabilityCol='probability')
-
-    def getProbabilityCol(self):
-        """
-        Gets the value of probabilityCol or its default value.
-        """
-        return self.getOrDefault(self.probabilityCol)
-
-
-class HasRawPredictionCol(Params):
-    """
-    Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
-    """
-
-    rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasRawPredictionCol, self).__init__()
-        self._setDefault(rawPredictionCol='rawPrediction')
-
-    def getRawPredictionCol(self):
-        """
-        Gets the value of rawPredictionCol or its default value.
-        """
-        return self.getOrDefault(self.rawPredictionCol)
-
-
-class HasInputCol(Params):
-    """
-    Mixin for param inputCol: input column name.
-    """
-
-    inputCol = Param(Params._dummy(), "inputCol", "input column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasInputCol, self).__init__()
-
-    def getInputCol(self):
-        """
-        Gets the value of inputCol or its default value.
-        """
-        return self.getOrDefault(self.inputCol)
-
-
-class HasInputCols(Params):
-    """
-    Mixin for param inputCols: input column names.
-    """
-
-    inputCols = Param(Params._dummy(), "inputCols", "input column names.", typeConverter=TypeConverters.toListString)
-
-    def __init__(self):
-        super(HasInputCols, self).__init__()
-
-    def getInputCols(self):
-        """
-        Gets the value of inputCols or its default value.
-        """
-        return self.getOrDefault(self.inputCols)
-
-
-class HasOutputCol(Params):
-    """
-    Mixin for param outputCol: output column name.
-    """
-
-    outputCol = Param(Params._dummy(), "outputCol", "output column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasOutputCol, self).__init__()
-        self._setDefault(outputCol=self.uid + '__output')
-
-    def getOutputCol(self):
-        """
-        Gets the value of outputCol or its default value.
-        """
-        return self.getOrDefault(self.outputCol)
-
-
-class HasOutputCols(Params):
-    """
-    Mixin for param outputCols: output column names.
-    """
-
-    outputCols = Param(Params._dummy(), "outputCols", "output column names.", typeConverter=TypeConverters.toListString)
-
-    def __init__(self):
-        super(HasOutputCols, self).__init__()
-
-    def getOutputCols(self):
-        """
-        Gets the value of outputCols or its default value.
-        """
-        return self.getOrDefault(self.outputCols)
-
-
-class HasNumFeatures(Params):
-    """
-    Mixin for param numFeatures: Number of features. Should be greater than 0.
-    """
-
-    numFeatures = Param(Params._dummy(), "numFeatures", "Number of features. Should be greater than 0.", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasNumFeatures, self).__init__()
-        self._setDefault(numFeatures=262144)
-
-    def getNumFeatures(self):
-        """
-        Gets the value of numFeatures or its default value.
-        """
-        return self.getOrDefault(self.numFeatures)
-
-
-class HasCheckpointInterval(Params):
-    """
-    Mixin for param checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.
-    """
-
-    checkpointInterval = Param(Params._dummy(), "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasCheckpointInterval, self).__init__()
-
-    def getCheckpointInterval(self):
-        """
-        Gets the value of checkpointInterval or its default value.
-        """
-        return self.getOrDefault(self.checkpointInterval)
-
-
-class HasSeed(Params):
-    """
-    Mixin for param seed: random seed.
-    """
-
-    seed = Param(Params._dummy(), "seed", "random seed.", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasSeed, self).__init__()
-        self._setDefault(seed=hash(type(self).__name__))
-
-    def getSeed(self):
-        """
-        Gets the value of seed or its default value.
-        """
-        return self.getOrDefault(self.seed)
-
-
-class HasTol(Params):
-    """
-    Mixin for param tol: the convergence tolerance for iterative algorithms (>= 0).
-    """
-
-    tol = Param(Params._dummy(), "tol", "the convergence tolerance for iterative algorithms (>= 0).", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasTol, self).__init__()
-
-    def getTol(self):
-        """
-        Gets the value of tol or its default value.
-        """
-        return self.getOrDefault(self.tol)
-
-
-class HasRelativeError(Params):
-    """
-    Mixin for param relativeError: the relative target precision for the approximate quantile algorithm. Must be in the range [0, 1]
-    """
-
-    relativeError = Param(Params._dummy(), "relativeError", "the relative target precision for the approximate quantile algorithm. Must be in the range [0, 1]", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasRelativeError, self).__init__()
-        self._setDefault(relativeError=0.001)
-
-    def getRelativeError(self):
-        """
-        Gets the value of relativeError or its default value.
-        """
-        return self.getOrDefault(self.relativeError)
-
-
-class HasStepSize(Params):
-    """
-    Mixin for param stepSize: Step size to be used for each iteration of optimization (>= 0).
-    """
-
-    stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization (>= 0).", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasStepSize, self).__init__()
-
-    def getStepSize(self):
-        """
-        Gets the value of stepSize or its default value.
-        """
-        return self.getOrDefault(self.stepSize)
-
-
-class HasHandleInvalid(Params):
-    """
-    Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.
-    """
-
-    handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasHandleInvalid, self).__init__()
-
-    def getHandleInvalid(self):
-        """
-        Gets the value of handleInvalid or its default value.
-        """
-        return self.getOrDefault(self.handleInvalid)
-
-
-class HasElasticNetParam(Params):
-    """
-    Mixin for param elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
-    """
-
-    elasticNetParam = Param(Params._dummy(), "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasElasticNetParam, self).__init__()
-        self._setDefault(elasticNetParam=0.0)
-
-    def getElasticNetParam(self):
-        """
-        Gets the value of elasticNetParam or its default value.
-        """
-        return self.getOrDefault(self.elasticNetParam)
-
-
-class HasFitIntercept(Params):
-    """
-    Mixin for param fitIntercept: whether to fit an intercept term.
-    """
-
-    fitIntercept = Param(Params._dummy(), "fitIntercept", "whether to fit an intercept term.", typeConverter=TypeConverters.toBoolean)
-
-    def __init__(self):
-        super(HasFitIntercept, self).__init__()
-        self._setDefault(fitIntercept=True)
-
-    def getFitIntercept(self):
-        """
-        Gets the value of fitIntercept or its default value.
-        """
-        return self.getOrDefault(self.fitIntercept)
-
-
-class HasStandardization(Params):
-    """
-    Mixin for param standardization: whether to standardize the training features before fitting the model.
-    """
-
-    standardization = Param(Params._dummy(), "standardization", "whether to standardize the training features before fitting the model.", typeConverter=TypeConverters.toBoolean)
-
-    def __init__(self):
-        super(HasStandardization, self).__init__()
-        self._setDefault(standardization=True)
-
-    def getStandardization(self):
-        """
-        Gets the value of standardization or its default value.
-        """
-        return self.getOrDefault(self.standardization)
-
-
-class HasThresholds(Params):
-    """
-    Mixin for param thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.
-    """
-
-    thresholds = Param(Params._dummy(), "thresholds", "Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.", typeConverter=TypeConverters.toListFloat)
-
-    def __init__(self):
-        super(HasThresholds, self).__init__()
-
-    def getThresholds(self):
-        """
-        Gets the value of thresholds or its default value.
-        """
-        return self.getOrDefault(self.thresholds)
-
-
-class HasThreshold(Params):
-    """
-    Mixin for param threshold: threshold in binary classification prediction, in range [0, 1]
-    """
-
-    threshold = Param(Params._dummy(), "threshold", "threshold in binary classification prediction, in range [0, 1]", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasThreshold, self).__init__()
-        self._setDefault(threshold=0.5)
-
-    def getThreshold(self):
-        """
-        Gets the value of threshold or its default value.
-        """
-        return self.getOrDefault(self.threshold)
-
-
-class HasWeightCol(Params):
-    """
-    Mixin for param weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0.
-    """
-
-    weightCol = Param(Params._dummy(), "weightCol", "weight column name. If this is not set or empty, we treat all instance weights as 1.0.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasWeightCol, self).__init__()
-
-    def getWeightCol(self):
-        """
-        Gets the value of weightCol or its default value.
-        """
-        return self.getOrDefault(self.weightCol)
-
-
-class HasSolver(Params):
-    """
-    Mixin for param solver: the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.
-    """
-
-    solver = Param(Params._dummy(), "solver", "the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasSolver, self).__init__()
-        self._setDefault(solver='auto')
-
-    def getSolver(self):
-        """
-        Gets the value of solver or its default value.
-        """
-        return self.getOrDefault(self.solver)
-
-
-class HasVarianceCol(Params):
-    """
-    Mixin for param varianceCol: column name for the biased sample variance of prediction.
-    """
-
-    varianceCol = Param(Params._dummy(), "varianceCol", "column name for the biased sample variance of prediction.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasVarianceCol, self).__init__()
-
-    def getVarianceCol(self):
-        """
-        Gets the value of varianceCol or its default value.
-        """
-        return self.getOrDefault(self.varianceCol)
-
-
-class HasAggregationDepth(Params):
-    """
-    Mixin for param aggregationDepth: suggested depth for treeAggregate (>= 2).
-    """
-
-    aggregationDepth = Param(Params._dummy(), "aggregationDepth", "suggested depth for treeAggregate (>= 2).", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasAggregationDepth, self).__init__()
-        self._setDefault(aggregationDepth=2)
-
-    def getAggregationDepth(self):
-        """
-        Gets the value of aggregationDepth or its default value.
-        """
-        return self.getOrDefault(self.aggregationDepth)
-
-
-class HasParallelism(Params):
-    """
-    Mixin for param parallelism: the number of threads to use when running parallel algorithms (>= 1).
-    """
-
-    parallelism = Param(Params._dummy(), "parallelism", "the number of threads to use when running parallel algorithms (>= 1).", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasParallelism, self).__init__()
-        self._setDefault(parallelism=1)
-
-    def getParallelism(self):
-        """
-        Gets the value of parallelism or its default value.
-        """
-        return self.getOrDefault(self.parallelism)
-
-
-class HasCollectSubModels(Params):
-    """
-    Mixin for param collectSubModels: Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.
-    """
-
-    collectSubModels = Param(Params._dummy(), "collectSubModels", "Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.", typeConverter=TypeConverters.toBoolean)
-
-    def __init__(self):
-        super(HasCollectSubModels, self).__init__()
-        self._setDefault(collectSubModels=False)
-
-    def getCollectSubModels(self):
-        """
-        Gets the value of collectSubModels or its default value.
-        """
-        return self.getOrDefault(self.collectSubModels)
-
-
-class HasLoss(Params):
-    """
-    Mixin for param loss: the loss function to be optimized.
-    """
-
-    loss = Param(Params._dummy(), "loss", "the loss function to be optimized.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasLoss, self).__init__()
-
-    def getLoss(self):
-        """
-        Gets the value of loss or its default value.
-        """
-        return self.getOrDefault(self.loss)
-
-
-class HasDistanceMeasure(Params):
-    """
-    Mixin for param distanceMeasure: the distance measure. Supported options: 'euclidean' and 'cosine'.
-    """
-
-    distanceMeasure = Param(Params._dummy(), "distanceMeasure", "the distance measure. Supported options: 'euclidean' and 'cosine'.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasDistanceMeasure, self).__init__()
-        self._setDefault(distanceMeasure='euclidean')
-
-    def getDistanceMeasure(self):
-        """
-        Gets the value of distanceMeasure or its default value.
-        """
-        return self.getOrDefault(self.distanceMeasure)
-
-
-class HasValidationIndicatorCol(Params):
-    """
-    Mixin for param validationIndicatorCol: name of the column that indicates whether each row is for training or for validation. False indicates training; true indicates validation.
-    """
-
-    validationIndicatorCol = Param(Params._dummy(), "validationIndicatorCol", "name of the column that indicates whether each row is for training or for validation. False indicates training; true indicates validation.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasValidationIndicatorCol, self).__init__()
-
-    def getValidationIndicatorCol(self):
-        """
-        Gets the value of validationIndicatorCol or its default value.
-        """
-        return self.getOrDefault(self.validationIndicatorCol)
-
-
-class HasBlockSize(Params):
-    """
-    Mixin for param blockSize: block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data.
-    """
-
-    blockSize = Param(Params._dummy(), "blockSize", "block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data.", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasBlockSize, self).__init__()
-
-    def getBlockSize(self):
-        """
-        Gets the value of blockSize or its default value.
-        """
-        return self.getOrDefault(self.blockSize)
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/pyspark/ml/tuning.xhtml b/docs/_build/epub/_modules/pyspark/ml/tuning.xhtml deleted file mode 100644 index cc795b13..00000000 --- a/docs/_build/epub/_modules/pyspark/ml/tuning.xhtml +++ /dev/null @@ -1,916 +0,0 @@ - - - - - - - pyspark.ml.tuning - - - - -
-
-
- -

Source code for pyspark.ml.tuning

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import itertools
-import sys
-from multiprocessing.pool import ThreadPool
-
-import numpy as np
-
-from pyspark import since, keyword_only
-from pyspark.ml import Estimator, Model
-from pyspark.ml.common import _py2java, _java2py
-from pyspark.ml.param import Params, Param, TypeConverters
-from pyspark.ml.param.shared import HasCollectSubModels, HasParallelism, HasSeed
-from pyspark.ml.util import *
-from pyspark.ml.wrapper import JavaParams
-from pyspark.sql.functions import rand
-
-__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel', 'TrainValidationSplit',
-           'TrainValidationSplitModel']
-
-
-def _parallelFitTasks(est, train, eva, validation, epm, collectSubModel):
-    """
-    Creates a list of callables which can be called from different threads to fit and evaluate
-    an estimator in parallel. Each callable returns an `(index, metric)` pair.
-
-    :param est: Estimator, the estimator to be fit.
-    :param train: DataFrame, training data set, used for fitting.
-    :param eva: Evaluator, used to compute `metric`
-    :param validation: DataFrame, validation data set, used for evaluation.
-    :param epm: Sequence of ParamMap, params maps to be used during fitting & evaluation.
-    :param collectSubModel: Whether to collect sub model.
-    :return: (int, float, subModel), an index into `epm` and the associated metric value.
-    """
-    modelIter = est.fitMultiple(train, epm)
-
-    def singleTask():
-        index, model = next(modelIter)
-        metric = eva.evaluate(model.transform(validation, epm[index]))
-        return index, metric, model if collectSubModel else None
-
-    return [singleTask] * len(epm)
-
-
-class ParamGridBuilder(object):
-    r"""
-    Builder for a param grid used in grid search-based model selection.
-
-    >>> from pyspark.ml.classification import LogisticRegression
-    >>> lr = LogisticRegression()
-    >>> output = ParamGridBuilder() \
-    ...     .baseOn({lr.labelCol: 'l'}) \
-    ...     .baseOn([lr.predictionCol, 'p']) \
-    ...     .addGrid(lr.regParam, [1.0, 2.0]) \
-    ...     .addGrid(lr.maxIter, [1, 5]) \
-    ...     .build()
-    >>> expected = [
-    ...     {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
-    ...     {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
-    ...     {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},
-    ...     {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
-    >>> len(output) == len(expected)
-    True
-    >>> all([m in expected for m in output])
-    True
-
-    .. versionadded:: 1.4.0
-    """
-
-    def __init__(self):
-        self._param_grid = {}
-
-    @since("1.4.0")
-    def addGrid(self, param, values):
-        """
-        Sets the given parameters in this grid to fixed values.
-
-        param must be an instance of Param associated with an instance of Params
-        (such as Estimator or Transformer).
-        """
-        if isinstance(param, Param):
-            self._param_grid[param] = values
-        else:
-            raise TypeError("param must be an instance of Param")
-
-        return self
-
-    @since("1.4.0")
-    def baseOn(self, *args):
-        """
-        Sets the given parameters in this grid to fixed values.
-        Accepts either a parameter dictionary or a list of (parameter, value) pairs.
-        """
-        if isinstance(args[0], dict):
-            self.baseOn(*args[0].items())
-        else:
-            for (param, value) in args:
-                self.addGrid(param, [value])
-
-        return self
-
-    @since("1.4.0")
-    def build(self):
-        """
-        Builds and returns all combinations of parameters specified
-        by the param grid.
-        """
-        keys = self._param_grid.keys()
-        grid_values = self._param_grid.values()
-
-        def to_key_value_pairs(keys, values):
-            return [(key, key.typeConverter(value)) for key, value in zip(keys, values)]
-
-        return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]
-
-
-class _ValidatorParams(HasSeed):
-    """
-    Common params for TrainValidationSplit and CrossValidator.
-    """
-
-    estimator = Param(Params._dummy(), "estimator", "estimator to be cross-validated")
-    estimatorParamMaps = Param(Params._dummy(), "estimatorParamMaps", "estimator param maps")
-    evaluator = Param(
-        Params._dummy(), "evaluator",
-        "evaluator used to select hyper-parameters that maximize the validator metric")
-
-    @since("2.0.0")
-    def getEstimator(self):
-        """
-        Gets the value of estimator or its default value.
-        """
-        return self.getOrDefault(self.estimator)
-
-    @since("2.0.0")
-    def getEstimatorParamMaps(self):
-        """
-        Gets the value of estimatorParamMaps or its default value.
-        """
-        return self.getOrDefault(self.estimatorParamMaps)
-
-    @since("2.0.0")
-    def getEvaluator(self):
-        """
-        Gets the value of evaluator or its default value.
-        """
-        return self.getOrDefault(self.evaluator)
-
-    @classmethod
-    def _from_java_impl(cls, java_stage):
-        """
-        Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
-        """
-
-        # Load information from java_stage to the instance.
-        estimator = JavaParams._from_java(java_stage.getEstimator())
-        evaluator = JavaParams._from_java(java_stage.getEvaluator())
-        epms = [estimator._transfer_param_map_from_java(epm)
-                for epm in java_stage.getEstimatorParamMaps()]
-        return estimator, epms, evaluator
-
-    def _to_java_impl(self):
-        """
-        Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
-        """
-
-        gateway = SparkContext._gateway
-        cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
-
-        java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
-        for idx, epm in enumerate(self.getEstimatorParamMaps()):
-            java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
-
-        java_estimator = self.getEstimator()._to_java()
-        java_evaluator = self.getEvaluator()._to_java()
-        return java_estimator, java_epms, java_evaluator
-
-
-class _CrossValidatorParams(_ValidatorParams):
-    """
-    Params for :py:class:`CrossValidator` and :py:class:`CrossValidatorModel`.
-
-    .. versionadded:: 3.0.0
-    """
-
-    numFolds = Param(Params._dummy(), "numFolds", "number of folds for cross validation",
-                     typeConverter=TypeConverters.toInt)
-
-    @since("1.4.0")
-    def getNumFolds(self):
-        """
-        Gets the value of numFolds or its default value.
-        """
-        return self.getOrDefault(self.numFolds)
-
-
-class CrossValidator(Estimator, _CrossValidatorParams, HasParallelism, HasCollectSubModels,
-                     MLReadable, MLWritable):
-    """
-
-    K-fold cross validation performs model selection by splitting the dataset into a set of
-    non-overlapping randomly partitioned folds which are used as separate training and test datasets
-    e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
-    each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
-    test set exactly once.
-
-
-    >>> from pyspark.ml.classification import LogisticRegression
-    >>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
-    >>> from pyspark.ml.linalg import Vectors
-    >>> from pyspark.ml.tuning import CrossValidatorModel
-    >>> import tempfile
-    >>> dataset = spark.createDataFrame(
-    ...     [(Vectors.dense([0.0]), 0.0),
-    ...      (Vectors.dense([0.4]), 1.0),
-    ...      (Vectors.dense([0.5]), 0.0),
-    ...      (Vectors.dense([0.6]), 1.0),
-    ...      (Vectors.dense([1.0]), 1.0)] * 10,
-    ...     ["features", "label"])
-    >>> lr = LogisticRegression()
-    >>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
-    >>> evaluator = BinaryClassificationEvaluator()
-    >>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
-    ...     parallelism=2)
-    >>> cvModel = cv.fit(dataset)
-    >>> cvModel.getNumFolds()
-    3
-    >>> cvModel.avgMetrics[0]
-    0.5
-    >>> path = tempfile.mkdtemp()
-    >>> model_path = path + "/model"
-    >>> cvModel.write().save(model_path)
-    >>> cvModelRead = CrossValidatorModel.read().load(model_path)
-    >>> cvModelRead.avgMetrics
-    [0.5, ...
-    >>> evaluator.evaluate(cvModel.transform(dataset))
-    0.8333...
-
-    .. versionadded:: 1.4.0
-    """
-
-    @keyword_only
-    def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
-                 seed=None, parallelism=1, collectSubModels=False):
-        """
-        __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
-                 seed=None, parallelism=1, collectSubModels=False)
-        """
-        super(CrossValidator, self).__init__()
-        self._setDefault(numFolds=3, parallelism=1)
-        kwargs = self._input_kwargs
-        self._set(**kwargs)
-
-    @keyword_only
-    @since("1.4.0")
-    def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
-                  seed=None, parallelism=1, collectSubModels=False):
-        """
-        setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
-                  seed=None, parallelism=1, collectSubModels=False):
-        Sets params for cross validator.
-        """
-        kwargs = self._input_kwargs
-        return self._set(**kwargs)
-
-    @since("2.0.0")
-    def setEstimator(self, value):
-        """
-        Sets the value of :py:attr:`estimator`.
-        """
-        return self._set(estimator=value)
-
-    @since("2.0.0")
-    def setEstimatorParamMaps(self, value):
-        """
-        Sets the value of :py:attr:`estimatorParamMaps`.
-        """
-        return self._set(estimatorParamMaps=value)
-
-    @since("2.0.0")
-    def setEvaluator(self, value):
-        """
-        Sets the value of :py:attr:`evaluator`.
-        """
-        return self._set(evaluator=value)
-
-    @since("1.4.0")
-    def setNumFolds(self, value):
-        """
-        Sets the value of :py:attr:`numFolds`.
-        """
-        return self._set(numFolds=value)
-
-    def setSeed(self, value):
-        """
-        Sets the value of :py:attr:`seed`.
-        """
-        return self._set(seed=value)
-
-    def setParallelism(self, value):
-        """
-        Sets the value of :py:attr:`parallelism`.
-        """
-        return self._set(parallelism=value)
-
-    def setCollectSubModels(self, value):
-        """
-        Sets the value of :py:attr:`collectSubModels`.
-        """
-        return self._set(collectSubModels=value)
-
-    def _fit(self, dataset):
-        est = self.getOrDefault(self.estimator)
-        epm = self.getOrDefault(self.estimatorParamMaps)
-        numModels = len(epm)
-        eva = self.getOrDefault(self.evaluator)
-        nFolds = self.getOrDefault(self.numFolds)
-        seed = self.getOrDefault(self.seed)
-        h = 1.0 / nFolds
-        randCol = self.uid + "_rand"
-        df = dataset.select("*", rand(seed).alias(randCol))
-        metrics = [0.0] * numModels
-
-        pool = ThreadPool(processes=min(self.getParallelism(), numModels))
-        subModels = None
-        collectSubModelsParam = self.getCollectSubModels()
-        if collectSubModelsParam:
-            subModels = [[None for j in range(numModels)] for i in range(nFolds)]
-
-        for i in range(nFolds):
-            validateLB = i * h
-            validateUB = (i + 1) * h
-            condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
-            validation = df.filter(condition).cache()
-            train = df.filter(~condition).cache()
-
-            tasks = _parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam)
-            for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
-                metrics[j] += (metric / nFolds)
-                if collectSubModelsParam:
-                    subModels[i][j] = subModel
-
-            validation.unpersist()
-            train.unpersist()
-
-        if eva.isLargerBetter():
-            bestIndex = np.argmax(metrics)
-        else:
-            bestIndex = np.argmin(metrics)
-        bestModel = est.fit(dataset, epm[bestIndex])
-        return self._copyValues(CrossValidatorModel(bestModel, metrics, subModels))
-
-    @since("1.4.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies creates a deep copy of
-        the embedded paramMap, and copies the embedded and extra parameters over.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        newCV = Params.copy(self, extra)
-        if self.isSet(self.estimator):
-            newCV.setEstimator(self.getEstimator().copy(extra))
-        # estimatorParamMaps remain the same
-        if self.isSet(self.evaluator):
-            newCV.setEvaluator(self.getEvaluator().copy(extra))
-        return newCV
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java CrossValidator, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-
-        estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
-        numFolds = java_stage.getNumFolds()
-        seed = java_stage.getSeed()
-        parallelism = java_stage.getParallelism()
-        collectSubModels = java_stage.getCollectSubModels()
-        # Create a new instance of this stage.
-        py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
-                       numFolds=numFolds, seed=seed, parallelism=parallelism,
-                       collectSubModels=collectSubModels)
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java CrossValidator. Used for ML persistence.
-
-        :return: Java object equivalent to this instance.
-        """
-
-        estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
-
-        _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
-        _java_obj.setEstimatorParamMaps(epms)
-        _java_obj.setEvaluator(evaluator)
-        _java_obj.setEstimator(estimator)
-        _java_obj.setSeed(self.getSeed())
-        _java_obj.setNumFolds(self.getNumFolds())
-        _java_obj.setParallelism(self.getParallelism())
-        _java_obj.setCollectSubModels(self.getCollectSubModels())
-
-        return _java_obj
-
-
-class CrossValidatorModel(Model, _CrossValidatorParams, MLReadable, MLWritable):
-    """
-
-    CrossValidatorModel contains the model with the highest average cross-validation
-    metric across folds and uses this model to transform input data. CrossValidatorModel
-    also tracks the metrics for each param map evaluated.
-
-    .. versionadded:: 1.4.0
-    """
-
-    def __init__(self, bestModel, avgMetrics=[], subModels=None):
-        super(CrossValidatorModel, self).__init__()
-        #: best model from cross validation
-        self.bestModel = bestModel
-        #: Average cross-validation metrics for each paramMap in
-        #: CrossValidator.estimatorParamMaps, in the corresponding order.
-        self.avgMetrics = avgMetrics
-        #: sub model list from cross validation
-        self.subModels = subModels
-
-    def _transform(self, dataset):
-        return self.bestModel.transform(dataset)
-
-    @since("1.4.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies the underlying bestModel,
-        creates a deep copy of the embedded paramMap, and
-        copies the embedded and extra parameters over.
-        It does not copy the extra Params into the subModels.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        bestModel = self.bestModel.copy(extra)
-        avgMetrics = self.avgMetrics
-        subModels = self.subModels
-        return CrossValidatorModel(bestModel, avgMetrics, subModels)
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java CrossValidatorModel, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-        sc = SparkContext._active_spark_context
-        bestModel = JavaParams._from_java(java_stage.bestModel())
-        avgMetrics = _java2py(sc, java_stage.avgMetrics())
-        estimator, epms, evaluator = super(CrossValidatorModel, cls)._from_java_impl(java_stage)
-
-        py_stage = cls(bestModel=bestModel, avgMetrics=avgMetrics)._set(estimator=estimator)
-        py_stage = py_stage._set(estimatorParamMaps=epms)._set(evaluator=evaluator)
-
-        if java_stage.hasSubModels():
-            py_stage.subModels = [[JavaParams._from_java(sub_model)
-                                   for sub_model in fold_sub_models]
-                                  for fold_sub_models in java_stage.subModels()]
-
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java CrossValidatorModel. Used for ML persistence.
-
-        :return: Java object equivalent to this instance.
-        """
-
-        sc = SparkContext._active_spark_context
-        _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel",
-                                             self.uid,
-                                             self.bestModel._to_java(),
-                                             _py2java(sc, self.avgMetrics))
-        estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl()
-
-        _java_obj.set("evaluator", evaluator)
-        _java_obj.set("estimator", estimator)
-        _java_obj.set("estimatorParamMaps", epms)
-
-        if self.subModels is not None:
-            java_sub_models = [[sub_model._to_java() for sub_model in fold_sub_models]
-                               for fold_sub_models in self.subModels]
-            _java_obj.setSubModels(java_sub_models)
-        return _java_obj
-
-
-class _TrainValidationSplitParams(_ValidatorParams):
-    """
-    Params for :py:class:`TrainValidationSplit` and :py:class:`TrainValidationSplitModel`.
-
-    .. versionadded:: 3.0.0
-    """
-
-    trainRatio = Param(Params._dummy(), "trainRatio", "Param for ratio between train and\
-     validation data. Must be between 0 and 1.", typeConverter=TypeConverters.toFloat)
-
-    @since("2.0.0")
-    def getTrainRatio(self):
-        """
-        Gets the value of trainRatio or its default value.
-        """
-        return self.getOrDefault(self.trainRatio)
-
-
-class TrainValidationSplit(Estimator, _TrainValidationSplitParams, HasParallelism,
-                           HasCollectSubModels, MLReadable, MLWritable):
-    """
-    Validation for hyper-parameter tuning. Randomly splits the input dataset into train and
-    validation sets, and uses evaluation metric on the validation set to select the best model.
-    Similar to :class:`CrossValidator`, but only splits the set once.
-
-    >>> from pyspark.ml.classification import LogisticRegression
-    >>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
-    >>> from pyspark.ml.linalg import Vectors
-    >>> from pyspark.ml.tuning import TrainValidationSplitModel
-    >>> import tempfile
-    >>> dataset = spark.createDataFrame(
-    ...     [(Vectors.dense([0.0]), 0.0),
-    ...      (Vectors.dense([0.4]), 1.0),
-    ...      (Vectors.dense([0.5]), 0.0),
-    ...      (Vectors.dense([0.6]), 1.0),
-    ...      (Vectors.dense([1.0]), 1.0)] * 10,
-    ...     ["features", "label"]).repartition(1)
-    >>> lr = LogisticRegression()
-    >>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
-    >>> evaluator = BinaryClassificationEvaluator()
-    >>> tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
-    ...     parallelism=1, seed=42)
-    >>> tvsModel = tvs.fit(dataset)
-    >>> tvsModel.getTrainRatio()
-    0.75
-    >>> tvsModel.validationMetrics
-    [0.5, ...
-    >>> path = tempfile.mkdtemp()
-    >>> model_path = path + "/model"
-    >>> tvsModel.write().save(model_path)
-    >>> tvsModelRead = TrainValidationSplitModel.read().load(model_path)
-    >>> tvsModelRead.validationMetrics
-    [0.5, ...
-    >>> evaluator.evaluate(tvsModel.transform(dataset))
-    0.833...
-
-    .. versionadded:: 2.0.0
-    """
-
-    @keyword_only
-    def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
-                 parallelism=1, collectSubModels=False, seed=None):
-        """
-        __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
-                 parallelism=1, collectSubModels=False, seed=None)
-        """
-        super(TrainValidationSplit, self).__init__()
-        self._setDefault(trainRatio=0.75, parallelism=1)
-        kwargs = self._input_kwargs
-        self._set(**kwargs)
-
-    @since("2.0.0")
-    @keyword_only
-    def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
-                  parallelism=1, collectSubModels=False, seed=None):
-        """
-        setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
-                  parallelism=1, collectSubModels=False, seed=None):
-        Sets params for the train validation split.
-        """
-        kwargs = self._input_kwargs
-        return self._set(**kwargs)
-
-    @since("2.0.0")
-    def setEstimator(self, value):
-        """
-        Sets the value of :py:attr:`estimator`.
-        """
-        return self._set(estimator=value)
-
-    @since("2.0.0")
-    def setEstimatorParamMaps(self, value):
-        """
-        Sets the value of :py:attr:`estimatorParamMaps`.
-        """
-        return self._set(estimatorParamMaps=value)
-
-    @since("2.0.0")
-    def setEvaluator(self, value):
-        """
-        Sets the value of :py:attr:`evaluator`.
-        """
-        return self._set(evaluator=value)
-
-    @since("2.0.0")
-    def setTrainRatio(self, value):
-        """
-        Sets the value of :py:attr:`trainRatio`.
-        """
-        return self._set(trainRatio=value)
-
-    def setSeed(self, value):
-        """
-        Sets the value of :py:attr:`seed`.
-        """
-        return self._set(seed=value)
-
-    def setParallelism(self, value):
-        """
-        Sets the value of :py:attr:`parallelism`.
-        """
-        return self._set(parallelism=value)
-
-    def setCollectSubModels(self, value):
-        """
-        Sets the value of :py:attr:`collectSubModels`.
-        """
-        return self._set(collectSubModels=value)
-
-    def _fit(self, dataset):
-        est = self.getOrDefault(self.estimator)
-        epm = self.getOrDefault(self.estimatorParamMaps)
-        numModels = len(epm)
-        eva = self.getOrDefault(self.evaluator)
-        tRatio = self.getOrDefault(self.trainRatio)
-        seed = self.getOrDefault(self.seed)
-        randCol = self.uid + "_rand"
-        df = dataset.select("*", rand(seed).alias(randCol))
-        condition = (df[randCol] >= tRatio)
-        validation = df.filter(condition).cache()
-        train = df.filter(~condition).cache()
-
-        subModels = None
-        collectSubModelsParam = self.getCollectSubModels()
-        if collectSubModelsParam:
-            subModels = [None for i in range(numModels)]
-
-        tasks = _parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam)
-        pool = ThreadPool(processes=min(self.getParallelism(), numModels))
-        metrics = [None] * numModels
-        for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
-            metrics[j] = metric
-            if collectSubModelsParam:
-                subModels[j] = subModel
-
-        train.unpersist()
-        validation.unpersist()
-
-        if eva.isLargerBetter():
-            bestIndex = np.argmax(metrics)
-        else:
-            bestIndex = np.argmin(metrics)
-        bestModel = est.fit(dataset, epm[bestIndex])
-        return self._copyValues(TrainValidationSplitModel(bestModel, metrics, subModels))
-
-    @since("2.0.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies creates a deep copy of
-        the embedded paramMap, and copies the embedded and extra parameters over.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        newTVS = Params.copy(self, extra)
-        if self.isSet(self.estimator):
-            newTVS.setEstimator(self.getEstimator().copy(extra))
-        # estimatorParamMaps remain the same
-        if self.isSet(self.evaluator):
-            newTVS.setEvaluator(self.getEvaluator().copy(extra))
-        return newTVS
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java TrainValidationSplit, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-
-        estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
-        trainRatio = java_stage.getTrainRatio()
-        seed = java_stage.getSeed()
-        parallelism = java_stage.getParallelism()
-        collectSubModels = java_stage.getCollectSubModels()
-        # Create a new instance of this stage.
-        py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
-                       trainRatio=trainRatio, seed=seed, parallelism=parallelism,
-                       collectSubModels=collectSubModels)
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java TrainValidationSplit. Used for ML persistence.
-        :return: Java object equivalent to this instance.
-        """
-
-        estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl()
-
-        _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit",
-                                             self.uid)
-        _java_obj.setEstimatorParamMaps(epms)
-        _java_obj.setEvaluator(evaluator)
-        _java_obj.setEstimator(estimator)
-        _java_obj.setTrainRatio(self.getTrainRatio())
-        _java_obj.setSeed(self.getSeed())
-        _java_obj.setParallelism(self.getParallelism())
-        _java_obj.setCollectSubModels(self.getCollectSubModels())
-        return _java_obj
-
-
-class TrainValidationSplitModel(Model, _TrainValidationSplitParams, MLReadable, MLWritable):
-    """
-    Model from train validation split.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def __init__(self, bestModel, validationMetrics=[], subModels=None):
-        super(TrainValidationSplitModel, self).__init__()
-        #: best model from train validation split
-        self.bestModel = bestModel
-        #: evaluated validation metrics
-        self.validationMetrics = validationMetrics
-        #: sub models from train validation split
-        self.subModels = subModels
-
-    def _transform(self, dataset):
-        return self.bestModel.transform(dataset)
-
-    @since("2.0.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies the underlying bestModel,
-        creates a deep copy of the embedded paramMap, and
-        copies the embedded and extra parameters over.
-        And, this creates a shallow copy of the validationMetrics.
-        It does not copy the extra Params into the subModels.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        bestModel = self.bestModel.copy(extra)
-        validationMetrics = list(self.validationMetrics)
-        subModels = self.subModels
-        return TrainValidationSplitModel(bestModel, validationMetrics, subModels)
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-
-        # Load information from java_stage to the instance.
-        sc = SparkContext._active_spark_context
-        bestModel = JavaParams._from_java(java_stage.bestModel())
-        validationMetrics = _java2py(sc, java_stage.validationMetrics())
-        estimator, epms, evaluator = super(TrainValidationSplitModel,
-                                           cls)._from_java_impl(java_stage)
-        # Create a new instance of this stage.
-        py_stage = cls(bestModel=bestModel,
-                       validationMetrics=validationMetrics)._set(estimator=estimator)
-        py_stage = py_stage._set(estimatorParamMaps=epms)._set(evaluator=evaluator)
-
-        if java_stage.hasSubModels():
-            py_stage.subModels = [JavaParams._from_java(sub_model)
-                                  for sub_model in java_stage.subModels()]
-
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence.
-        :return: Java object equivalent to this instance.
-        """
-
-        sc = SparkContext._active_spark_context
-        _java_obj = JavaParams._new_java_obj(
-            "org.apache.spark.ml.tuning.TrainValidationSplitModel",
-            self.uid,
-            self.bestModel._to_java(),
-            _py2java(sc, self.validationMetrics))
-        estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl()
-
-        _java_obj.set("evaluator", evaluator)
-        _java_obj.set("estimator", estimator)
-        _java_obj.set("estimatorParamMaps", epms)
-
-        if self.subModels is not None:
-            java_sub_models = [sub_model._to_java() for sub_model in self.subModels]
-            _java_obj.setSubModels(java_sub_models)
-
-        return _java_obj
-
-
-if __name__ == "__main__":
-    import doctest
-
-    from pyspark.sql import SparkSession
-    globs = globals().copy()
-
-    # The small batch size here ensures that we see multiple batches,
-    # even in these small test examples:
-    spark = SparkSession.builder\
-        .master("local[2]")\
-        .appName("ml.tuning tests")\
-        .getOrCreate()
-    sc = spark.sparkContext
-    globs['sc'] = sc
-    globs['spark'] = spark
-    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
-    spark.stop()
-    if failure_count:
-        sys.exit(-1)
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/pyspark/ml/util.xhtml b/docs/_build/epub/_modules/pyspark/ml/util.xhtml deleted file mode 100644 index 3b7f0d05..00000000 --- a/docs/_build/epub/_modules/pyspark/ml/util.xhtml +++ /dev/null @@ -1,611 +0,0 @@ - - - - - - - pyspark.ml.util - - - - -
-
-
- -

Source code for pyspark.ml.util

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import json
-import sys
-import os
-import time
-import uuid
-import warnings
-
-if sys.version > '3':
-    basestring = str
-    unicode = str
-    long = int
-
-from pyspark import SparkContext, since
-from pyspark.ml.common import inherit_doc
-from pyspark.sql import SparkSession
-from pyspark.util import VersionUtils
-
-
-def _jvm():
-    """
-    Returns the JVM view associated with SparkContext. Must be called
-    after SparkContext is initialized.
-    """
-    jvm = SparkContext._jvm
-    if jvm:
-        return jvm
-    else:
-        raise AttributeError("Cannot load _jvm from SparkContext. Is SparkContext initialized?")
-
-
-class Identifiable(object):
-    """
-    Object with a unique ID.
-    """
-
-    def __init__(self):
-        #: A unique id for the object.
-        self.uid = self._randomUID()
-
-    def __repr__(self):
-        return self.uid
-
-    @classmethod
-    def _randomUID(cls):
-        """
-        Generate a unique unicode id for the object. The default implementation
-        concatenates the class name, "_", and 12 random hex chars.
-        """
-        return unicode(cls.__name__ + "_" + uuid.uuid4().hex[-12:])
-
-
-@inherit_doc
-class BaseReadWrite(object):
-    """
-    Base class for MLWriter and MLReader. Stores information about the SparkContext
-    and SparkSession.
-
-    .. versionadded:: 2.3.0
-    """
-
-    def __init__(self):
-        self._sparkSession = None
-
-    def session(self, sparkSession):
-        """
-        Sets the Spark Session to use for saving/loading.
-        """
-        self._sparkSession = sparkSession
-        return self
-
-    @property
-    def sparkSession(self):
-        """
-        Returns the user-specified Spark Session or the default.
-        """
-        if self._sparkSession is None:
-            self._sparkSession = SparkSession.builder.getOrCreate()
-        return self._sparkSession
-
-    @property
-    def sc(self):
-        """
-        Returns the underlying `SparkContext`.
-        """
-        return self.sparkSession.sparkContext
-
-
-@inherit_doc
-class MLWriter(BaseReadWrite):
-    """
-    Utility class that can save ML instances.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def __init__(self):
-        super(MLWriter, self).__init__()
-        self.shouldOverwrite = False
-
-    def _handleOverwrite(self, path):
-        from pyspark.ml.wrapper import JavaWrapper
-
-        _java_obj = JavaWrapper._new_java_obj("org.apache.spark.ml.util.FileSystemOverwrite")
-        wrapper = JavaWrapper(_java_obj)
-        wrapper._call_java("handleOverwrite", path, True, self.sparkSession._jsparkSession)
-
-    def save(self, path):
-        """Save the ML instance to the input path."""
-        if self.shouldOverwrite:
-            self._handleOverwrite(path)
-        self.saveImpl(path)
-
-    def saveImpl(self, path):
-        """
-        save() handles overwriting and then calls this method.  Subclasses should override this
-        method to implement the actual saving of the instance.
-        """
-        raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
-
-    def overwrite(self):
-        """Overwrites if the output path already exists."""
-        self.shouldOverwrite = True
-        return self
-
-
-@inherit_doc
-class GeneralMLWriter(MLWriter):
-    """
-    Utility class that can save ML instances in different formats.
-
-    .. versionadded:: 2.4.0
-    """
-
-    def format(self, source):
-        """
-        Specifies the format of ML export (e.g. "pmml", "internal", or the fully qualified class
-        name for export).
-        """
-        self.source = source
-        return self
-
-
-@inherit_doc
-class JavaMLWriter(MLWriter):
-    """
-    (Private) Specialization of :py:class:`MLWriter` for :py:class:`JavaParams` types
-    """
-
-    def __init__(self, instance):
-        super(JavaMLWriter, self).__init__()
-        _java_obj = instance._to_java()
-        self._jwrite = _java_obj.write()
-
-    def save(self, path):
-        """Save the ML instance to the input path."""
-        if not isinstance(path, basestring):
-            raise TypeError("path should be a basestring, got type %s" % type(path))
-        self._jwrite.save(path)
-
-    def overwrite(self):
-        """Overwrites if the output path already exists."""
-        self._jwrite.overwrite()
-        return self
-
-    def option(self, key, value):
-        self._jwrite.option(key, value)
-        return self
-
-    def session(self, sparkSession):
-        """Sets the Spark Session to use for saving."""
-        self._jwrite.session(sparkSession._jsparkSession)
-        return self
-
-
-@inherit_doc
-class GeneralJavaMLWriter(JavaMLWriter):
-    """
-    (Private) Specialization of :py:class:`GeneralMLWriter` for :py:class:`JavaParams` types
-    """
-
-    def __init__(self, instance):
-        super(GeneralJavaMLWriter, self).__init__(instance)
-
-    def format(self, source):
-        """
-        Specifies the format of ML export (e.g. "pmml", "internal", or the fully qualified class
-        name for export).
-        """
-        self._jwrite.format(source)
-        return self
-
-
-@inherit_doc
-class MLWritable(object):
-    """
-    Mixin for ML instances that provide :py:class:`MLWriter`.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        raise NotImplementedError("MLWritable is not yet implemented for type: %r" % type(self))
-
-    def save(self, path):
-        """Save this ML instance to the given path, a shortcut of 'write().save(path)'."""
-        self.write().save(path)
-
-
-@inherit_doc
-class JavaMLWritable(MLWritable):
-    """
-    (Private) Mixin for ML instances that provide :py:class:`JavaMLWriter`.
-    """
-
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-
-@inherit_doc
-class GeneralJavaMLWritable(JavaMLWritable):
-    """
-    (Private) Mixin for ML instances that provide :py:class:`GeneralJavaMLWriter`.
-    """
-
-    def write(self):
-        """Returns an GeneralMLWriter instance for this ML instance."""
-        return GeneralJavaMLWriter(self)
-
-
-@inherit_doc
-class MLReader(BaseReadWrite):
-    """
-    Utility class that can load ML instances.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def __init__(self):
-        super(MLReader, self).__init__()
-
-    def load(self, path):
-        """Load the ML instance from the input path."""
-        raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
-
-
-@inherit_doc
-class JavaMLReader(MLReader):
-    """
-    (Private) Specialization of :py:class:`MLReader` for :py:class:`JavaParams` types
-    """
-
-    def __init__(self, clazz):
-        super(JavaMLReader, self).__init__()
-        self._clazz = clazz
-        self._jread = self._load_java_obj(clazz).read()
-
-    def load(self, path):
-        """Load the ML instance from the input path."""
-        if not isinstance(path, basestring):
-            raise TypeError("path should be a basestring, got type %s" % type(path))
-        java_obj = self._jread.load(path)
-        if not hasattr(self._clazz, "_from_java"):
-            raise NotImplementedError("This Java ML type cannot be loaded into Python currently: %r"
-                                      % self._clazz)
-        return self._clazz._from_java(java_obj)
-
-    def session(self, sparkSession):
-        """Sets the Spark Session to use for loading."""
-        self._jread.session(sparkSession._jsparkSession)
-        return self
-
-    @classmethod
-    def _java_loader_class(cls, clazz):
-        """
-        Returns the full class name of the Java ML instance. The default
-        implementation replaces "pyspark" by "org.apache.spark" in
-        the Python full class name.
-        """
-        java_package = clazz.__module__.replace("pyspark", "org.apache.spark")
-        if clazz.__name__ in ("Pipeline", "PipelineModel"):
-            # Remove the last package name "pipeline" for Pipeline and PipelineModel.
-            java_package = ".".join(java_package.split(".")[0:-1])
-        return java_package + "." + clazz.__name__
-
-    @classmethod
-    def _load_java_obj(cls, clazz):
-        """Load the peer Java object of the ML instance."""
-        java_class = cls._java_loader_class(clazz)
-        java_obj = _jvm()
-        for name in java_class.split("."):
-            java_obj = getattr(java_obj, name)
-        return java_obj
-
-
-@inherit_doc
-class MLReadable(object):
-    """
-    Mixin for instances that provide :py:class:`MLReader`.
-
-    .. versionadded:: 2.0.0
-    """
-
-    @classmethod
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        raise NotImplementedError("MLReadable.read() not implemented for type: %r" % cls)
-
-    @classmethod
-    def load(cls, path):
-        """Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
-        return cls.read().load(path)
-
-
-@inherit_doc
-class JavaMLReadable(MLReadable):
-    """
-    (Private) Mixin for instances that provide JavaMLReader.
-    """
-
-    @classmethod
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-
-@inherit_doc
-class DefaultParamsWritable(MLWritable):
-    """
-    Helper trait for making simple :py:class:`Params` types writable.  If a :py:class:`Params`
-    class stores all data as :py:class:`Param` values, then extending this trait will provide
-    a default implementation of writing saved instances of the class.
-    This only handles simple :py:class:`Param` types; e.g., it will not handle
-    :py:class:`Dataset`. See :py:class:`DefaultParamsReadable`, the counterpart to this trait.
-
-    .. versionadded:: 2.3.0
-    """
-
-    def write(self):
-        """Returns a DefaultParamsWriter instance for this class."""
-        from pyspark.ml.param import Params
-
-        if isinstance(self, Params):
-            return DefaultParamsWriter(self)
-        else:
-            raise TypeError("Cannot use DefautParamsWritable with type %s because it does not " +
-                            " extend Params.", type(self))
-
-
-@inherit_doc
-class DefaultParamsWriter(MLWriter):
-    """
-    Specialization of :py:class:`MLWriter` for :py:class:`Params` types
-
-    Class for writing Estimators and Transformers whose parameters are JSON-serializable.
-
-    .. versionadded:: 2.3.0
-    """
-
-    def __init__(self, instance):
-        super(DefaultParamsWriter, self).__init__()
-        self.instance = instance
-
-    def saveImpl(self, path):
-        DefaultParamsWriter.saveMetadata(self.instance, path, self.sc)
-
-    @staticmethod
-    def saveMetadata(instance, path, sc, extraMetadata=None, paramMap=None):
-        """
-        Saves metadata + Params to: path + "/metadata"
-
-        - class
-        - timestamp
-        - sparkVersion
-        - uid
-        - paramMap
-        - defaultParamMap (since 2.4.0)
-        - (optionally, extra metadata)
-
-        :param extraMetadata:  Extra metadata to be saved at same level as uid, paramMap, etc.
-        :param paramMap:  If given, this is saved in the "paramMap" field.
-        """
-        metadataPath = os.path.join(path, "metadata")
-        metadataJson = DefaultParamsWriter._get_metadata_to_save(instance,
-                                                                 sc,
-                                                                 extraMetadata,
-                                                                 paramMap)
-        sc.parallelize([metadataJson], 1).saveAsTextFile(metadataPath)
-
-    @staticmethod
-    def _get_metadata_to_save(instance, sc, extraMetadata=None, paramMap=None):
-        """
-        Helper for :py:meth:`DefaultParamsWriter.saveMetadata` which extracts the JSON to save.
-        This is useful for ensemble models which need to save metadata for many sub-models.
-
-        .. note:: :py:meth:`DefaultParamsWriter.saveMetadata` for details on what this includes.
-        """
-        uid = instance.uid
-        cls = instance.__module__ + '.' + instance.__class__.__name__
-
-        # User-supplied param values
-        params = instance._paramMap
-        jsonParams = {}
-        if paramMap is not None:
-            jsonParams = paramMap
-        else:
-            for p in params:
-                jsonParams[p.name] = params[p]
-
-        # Default param values
-        jsonDefaultParams = {}
-        for p in instance._defaultParamMap:
-            jsonDefaultParams[p.name] = instance._defaultParamMap[p]
-
-        basicMetadata = {"class": cls, "timestamp": long(round(time.time() * 1000)),
-                         "sparkVersion": sc.version, "uid": uid, "paramMap": jsonParams,
-                         "defaultParamMap": jsonDefaultParams}
-        if extraMetadata is not None:
-            basicMetadata.update(extraMetadata)
-        return json.dumps(basicMetadata, separators=[',',  ':'])
-
-
-@inherit_doc
-class DefaultParamsReadable(MLReadable):
-    """
-    Helper trait for making simple :py:class:`Params` types readable.
-    If a :py:class:`Params` class stores all data as :py:class:`Param` values,
-    then extending this trait will provide a default implementation of reading saved
-    instances of the class. This only handles simple :py:class:`Param` types;
-    e.g., it will not handle :py:class:`Dataset`. See :py:class:`DefaultParamsWritable`,
-    the counterpart to this trait.
-
-    .. versionadded:: 2.3.0
-    """
-
-    @classmethod
-    def read(cls):
-        """Returns a DefaultParamsReader instance for this class."""
-        return DefaultParamsReader(cls)
-
-
-@inherit_doc
-class DefaultParamsReader(MLReader):
-    """
-    Specialization of :py:class:`MLReader` for :py:class:`Params` types
-
-    Default :py:class:`MLReader` implementation for transformers and estimators that
-    contain basic (json-serializable) params and no data. This will not handle
-    more complex params or types with data (e.g., models with coefficients).
-
-    .. versionadded:: 2.3.0
-    """
-
-    def __init__(self, cls):
-        super(DefaultParamsReader, self).__init__()
-        self.cls = cls
-
-    @staticmethod
-    def __get_class(clazz):
-        """
-        Loads Python class from its name.
-        """
-        parts = clazz.split('.')
-        module = ".".join(parts[:-1])
-        m = __import__(module)
-        for comp in parts[1:]:
-            m = getattr(m, comp)
-        return m
-
-    def load(self, path):
-        metadata = DefaultParamsReader.loadMetadata(path, self.sc)
-        py_type = DefaultParamsReader.__get_class(metadata['class'])
-        instance = py_type()
-        instance._resetUid(metadata['uid'])
-        DefaultParamsReader.getAndSetParams(instance, metadata)
-        return instance
-
-    @staticmethod
-    def loadMetadata(path, sc, expectedClassName=""):
-        """
-        Load metadata saved using :py:meth:`DefaultParamsWriter.saveMetadata`
-
-        :param expectedClassName:  If non empty, this is checked against the loaded metadata.
-        """
-        metadataPath = os.path.join(path, "metadata")
-        metadataStr = sc.textFile(metadataPath, 1).first()
-        loadedVals = DefaultParamsReader._parseMetaData(metadataStr, expectedClassName)
-        return loadedVals
-
-    @staticmethod
-    def _parseMetaData(metadataStr, expectedClassName=""):
-        """
-        Parse metadata JSON string produced by :py:meth`DefaultParamsWriter._get_metadata_to_save`.
-        This is a helper function for :py:meth:`DefaultParamsReader.loadMetadata`.
-
-        :param metadataStr:  JSON string of metadata
-        :param expectedClassName:  If non empty, this is checked against the loaded metadata.
-        """
-        metadata = json.loads(metadataStr)
-        className = metadata['class']
-        if len(expectedClassName) > 0:
-            assert className == expectedClassName, "Error loading metadata: Expected " + \
-                "class name {} but found class name {}".format(expectedClassName, className)
-        return metadata
-
-    @staticmethod
-    def getAndSetParams(instance, metadata):
-        """
-        Extract Params from metadata, and set them in the instance.
-        """
-        # Set user-supplied param values
-        for paramName in metadata['paramMap']:
-            param = instance.getParam(paramName)
-            paramValue = metadata['paramMap'][paramName]
-            instance.set(param, paramValue)
-
-        # Set default param values
-        majorAndMinorVersions = VersionUtils.majorMinorVersion(metadata['sparkVersion'])
-        major = majorAndMinorVersions[0]
-        minor = majorAndMinorVersions[1]
-
-        # For metadata file prior to Spark 2.4, there is no default section.
-        if major > 2 or (major == 2 and minor >= 4):
-            assert 'defaultParamMap' in metadata, "Error loading metadata: Expected " + \
-                "`defaultParamMap` section not found"
-
-            for paramName in metadata['defaultParamMap']:
-                paramValue = metadata['defaultParamMap'][paramName]
-                instance._setDefault(**{paramName: paramValue})
-
-    @staticmethod
-    def loadParamsInstance(path, sc):
-        """
-        Load a :py:class:`Params` instance from the given path, and return it.
-        This assumes the instance inherits from :py:class:`MLReadable`.
-        """
-        metadata = DefaultParamsReader.loadMetadata(path, sc)
-        pythonClassName = metadata['class'].replace("org.apache.spark", "pyspark")
-        py_type = DefaultParamsReader.__get_class(pythonClassName)
-        instance = py_type.load(path)
-        return instance
-
-
-@inherit_doc
-class HasTrainingSummary(object):
-    """
-    Base class for models that provides Training summary.
-
-    .. versionadded:: 3.0.0
-    """
-
-    @property
-    @since("2.1.0")
-    def hasSummary(self):
-        """
-        Indicates whether a training summary exists for this model
-        instance.
-        """
-        return self._call_java("hasSummary")
-
-    @property
-    @since("2.1.0")
-    def summary(self):
-        """
-        Gets summary of the model trained on the training set. An exception is thrown if
-        no summary exists.
-        """
-        return (self._call_java("summary"))
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/spark/context.xhtml b/docs/_build/epub/_modules/spark/context.xhtml deleted file mode 100644 index f42171cf..00000000 --- a/docs/_build/epub/_modules/spark/context.xhtml +++ /dev/null @@ -1,664 +0,0 @@ - - - - - - - spark.context - - - - -
-
-
- -

Source code for spark.context

-"""
-Copyright 2020 Splice Machine, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import print_function
-
-import os
-
-from py4j.java_gateway import java_import
-from pyspark.sql import DataFrame
-from pyspark.sql.types import _parse_datatype_json_string
-from splicemachine.spark.constants import CONVERSIONS
-
-
-
[docs]class PySpliceContext: - """ - This class implements a SpliceMachineContext object (similar to the SparkContext object) - """ - _spliceSparkPackagesName = "com.splicemachine.spark.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark.splicemachine.SplicemachineContext(self.jdbcurl) - - def __init__(self, sparkSession, JDBC_URL=None, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - """ - - if JDBC_URL: - self.jdbcurl = JDBC_URL - else: - try: - self.jdbcurl = os.environ['BEAKERX_SQL_DEFAULT_JDBC'] - except KeyError as e: - raise KeyError( - "Could not locate JDBC URL. If you are not running on the cloud service," - "please specify the JDBC_URL=<some url> keyword argument in the constructor" - ) - - self._unit_testing = _unit_testing - - if not _unit_testing: # Private Internal Argument to Override Using JVM - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = self.spark_sql_context._sc._jvm - java_import(self.jvm, self._spliceSparkPackagesName) - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions") - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils") - java_import(self.jvm, "scala.collection.JavaConverters._") - java_import(self.jvm, "com.splicemachine.derby.impl.*") - java_import(self.jvm, 'org.apache.spark.api.python.PythonUtils') - self.jvm.com.splicemachine.derby.impl.SpliceSpark.setContext( - self.spark_sql_context._jsc) - self.context = self._splicemachineContext() - - else: - from .tests.mocked import MockedScalaContext - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = '' - self.context = MockedScalaContext(self.jdbcurl) - -
[docs] def toUpper(self, dataframe): - """ - Returns a dataframe with all of the columns in uppercase - - :param dataframe: (Dataframe) The dataframe to convert to uppercase - """ - for s in dataframe.schema: - s.name = s.name.upper() - # You need to re-generate the dataframe for the capital letters to take effect - return dataframe.rdd.toDF(dataframe.schema)
- -
[docs] def replaceDataframeSchema(self, dataframe, schema_table_name): - """ - Returns a dataframe with all column names replaced with the proper string case from the DB table - - :param dataframe: (Dataframe) A dataframe with column names to convert - :param schema_table_name: (str) The schema.table with the correct column cases to pull from the database - :return: (DataFrame) A Spark DataFrame with the replaced schema - """ - schema = self.getSchema(schema_table_name) - # Fastest way to replace the column case if changed - dataframe = dataframe.rdd.toDF(schema) - return dataframe
- -
[docs] def getConnection(self): - """ - Return a connection to the database - """ - return self.context.getConnection()
- -
[docs] def tableExists(self, schema_and_or_table_name, table_name=None): - """ - Check whether or not a table exists - - :Example: - .. code-block:: python - - splice.tableExists('schemaName.tableName')\n - # or\n - splice.tableExists('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: (bool) whether or not the table exists - """ - if table_name: - return self.context.tableExists(schema_and_or_table_name, table_name) - else: - return self.context.tableExists(schema_and_or_table_name)
- -
[docs] def dropTable(self, schema_and_or_table_name, table_name=None): - """ - Drop a specified table. - - :Example: - .. code-block:: python - - splice.dropTable('schemaName.tableName') \n - # or\n - splice.dropTable('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: None - """ - if table_name: - return self.context.dropTable(schema_and_or_table_name, table_name) - else: - return self.context.dropTable(schema_and_or_table_name)
- -
[docs] def df(self, sql): - """ - Return a Spark Dataframe from the results of a Splice Machine SQL Query - - :Example: - .. code-block:: python - - df = splice.df('SELECT * FROM MYSCHEMA.TABLE1 WHERE COL2 > 3') - - :param sql: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: (Dataframe) A Spark DataFrame containing the results - """ - return DataFrame(self.context.df(sql), self.spark_sql_context)
- -
[docs] def insert(self, dataframe, schema_table_name, to_upper=False): - """ - Insert a dataframe into a table (schema.table). - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the DF - :param to_upper: (bool) If the dataframe columns should be converted to uppercase before table creation - If False, the table will be created with lower case columns. [Default False] - :return: None - """ - if to_upper: - dataframe = self.toUpper(dataframe) - return self.context.insert(dataframe._jdf, schema_table_name)
- -
[docs] def insertWithStatus(self, dataframe, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert a dataframe into a table (schema.table) while tracking and limiting records that fail to insert. - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written - to the status directory. - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.insert(dataframe._jdf, schema_table_name, statusDirectory, badRecordsAllowed)
- -
[docs] def insertRdd(self, rdd, schema, schema_table_name): - """ - Insert an rdd into a table (schema.table) - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the RDD - :return: None - """ - return self.insert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def insertRddWithStatus(self, rdd, schema, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert an rdd into a table (schema.table) while tracking and limiting records that fail to insert. \ - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be \ - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written \ - to the status directory. - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - return self.insertWithStatus( - self.createDataFrame(rdd, schema), - schema_table_name, - statusDirectory, - badRecordsAllowed - )
- -
[docs] def upsert(self, dataframe, schema_table_name): - """ - Upsert the data from a dataframe into a table (schema.table). - - :param dataframe: (Dataframe) The dataframe you would like to upsert - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.upsert(dataframe._jdf, schema_table_name)
- -
[docs] def upsertWithRdd(self, rdd, schema, schema_table_name): - """ - Upsert the data from an RDD into a table (schema.table). - - :param rdd: (RDD) The RDD you would like to upsert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - return self.upsert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def delete(self, dataframe, schema_table_name): - """ - Delete records in a dataframe based on joining by primary keys from the data frame. - Be careful with column naming and case sensitivity. - - :param dataframe: (Dataframe) The dataframe you would like to delete - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.context.delete(dataframe._jdf, schema_table_name)
- -
[docs] def deleteWithRdd(self, rdd, schema, schema_table_name): - """ - Delete records using an rdd based on joining by primary keys from the rdd. - Be careful with column naming and case sensitivity. - - :param rdd: (RDD) The RDD containing the primary keys you would like to delete from the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.delete( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def update(self, dataframe, schema_table_name): - """ - Update data from a dataframe for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param dataframe: (Dataframe) The dataframe you would like to update - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.update(dataframe._jdf, schema_table_name)
- -
[docs] def updateWithRdd(self, rdd, schema, schema_table_name): - """ - Update data from an rdd for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param rdd: (RDD) The RDD you would like to use for updating the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.update( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def getSchema(self, schema_table_name): - """ - Return the schema via JDBC. - - :param schema_table_name: (str) Table name - :return: (StructType) PySpark StructType representation of the table - """ - return _parse_datatype_json_string(self.context.getSchema(schema_table_name).json())
- -
[docs] def execute(self, query_string): - ''' - execute a query over JDBC - - :Example: - .. code-block:: python - - splice.execute('DELETE FROM TABLE1 WHERE col2 > 3') - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: None - ''' - return self.context.execute(query_string)
- -
[docs] def executeUpdate(self, query_string): - ''' - execute a dml query:(update,delete,drop,etc) - - :Example: - .. code-block:: python - - splice.executeUpdate('DROP TABLE table1') - - :param query_string: (string) SQL Query (eg. DROP TABLE table1) - :return: None - ''' - return self.context.executeUpdate(query_string)
- -
[docs] def internalDf(self, query_string): - ''' - SQL to Dataframe translation (Lazy). Runs the query inside Splice Machine and sends the results to the Spark Adapter app - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: (DataFrame) pyspark dataframe contains the result of query_string - ''' - return DataFrame(self.context.internalDf(query_string), self.spark_sql_context)
- -
[docs] def rdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - - :param schema_table_name: (string) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.df('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def internalRdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - Runs the projection inside Splice Machine and sends the results to the Spark Adapter app as an rdd - - :param schema_table_name: (str) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.internalDf('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def truncateTable(self, schema_table_name): - """ - Truncate a table - - :param schema_table_name: (str) the full table name in the format "schema.table_name" which will be truncated - :return: None - """ - return self.context.truncateTable(schema_table_name)
- -
[docs] def analyzeSchema(self, schema_name): - """ - Analyze the schema - - :param schema_name: (str) schema name which stats info will be collected - :return: None - """ - return self.context.analyzeSchema(schema_name)
- -
[docs] def analyzeTable(self, schema_table_name, estimateStatistics=False, samplePercent=10.0): - """ - Collect stats info on a table - - :param schema_table_name: full table name in the format of 'schema.table' - :param estimateStatistics: will use estimate statistics if True - :param samplePercent: the percentage or rows to be sampled. - :return: None - """ - return self.context.analyzeTable(schema_table_name, estimateStatistics, float(samplePercent))
- -
[docs] def export(self, - dataframe, - location, - compression=False, - replicationCount=1, - fileEncoding=None, - fieldSeparator=None, - quoteCharacter=None): - """ - Export a dataFrame in CSV - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param replicationCount: (int) Replication used for HDFS write - :param fileEncoding: (str) fileEncoding or None, defaults to UTF-8 - :param fieldSeparator: (str) fieldSeparator or None, defaults to ',' - :param quoteCharacter: (str) quoteCharacter or None, defaults to '"' - :return: None - """ - return self.context.export(dataframe._jdf, location, compression, replicationCount, - fileEncoding, fieldSeparator, quoteCharacter)
- -
[docs] def exportBinary(self, dataframe, location, compression, e_format='parquet'): - """ - Export a dataFrame in binary format - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param e_format: (str) Binary format to be used, currently only 'parquet' is supported. [Default 'parquet'] - :return: None - """ - return self.context.exportBinary(dataframe._jdf, location, compression, e_format)
- -
[docs] def bulkImportHFile(self, dataframe, schema_table_name, options): - """ - Bulk Import HFile from a dataframe into a schema.table - - :param dataframe: (DataFrame) - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: None - """ - optionsMap = self.jvm.java.util.HashMap() - for k, v in options.items(): - optionsMap.put(k, v) - return self.context.bulkImportHFile(dataframe._jdf, schema_table_name, optionsMap)
- -
[docs] def bulkImportHFileWithRdd(self, rdd, schema, schema_table_name, options): - """ - Bulk Import HFile from an rdd into a schema.table - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: None - """ - return self.bulkImportHFile( - self.createDataFrame(rdd, schema), - schema_table_name, - options - )
- -
[docs] def splitAndInsert(self, dataframe, schema_table_name, sample_fraction): - """ - Sample the dataframe, split the table, and insert a dataFrame into a schema.table. - This corresponds to an insert into from select statement - - :param dataframe: (DataFrame) Input data - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param sample_fraction: (float) A value between 0 and 1 that specifies the percentage of data in the dataFrame \ - that should be sampled to determine the splits. \ - For example, specify 0.005 if you want 0.5% of the data sampled. - :return: None - """ - return self.context.splitAndInsert(dataframe._jdf, schema_table_name, float(sample_fraction))
- -
[docs] def createDataFrame(self, rdd, schema): - """ - Creates a dataframe from a given rdd and schema. - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :return: (DataFrame) The Spark DataFrame - """ - return self.spark_session.createDataFrame(rdd, schema)
- - def _generateDBSchema(self, dataframe, types={}): - """ - Generate the schema for create table - """ - # convert keys and values to uppercase in the types dictionary - types = dict((key.upper(), val) for key, val in types.items()) - db_schema = [] - # convert dataframe to have all uppercase column names - dataframe = self.toUpper(dataframe) - # i contains the name and pyspark datatype of the column - for i in dataframe.schema: - if i.name.upper() in types: - print('Column {} is of type {}'.format( - i.name.upper(), i.dataType)) - dt = types[i.name.upper()] - else: - dt = CONVERSIONS[str(i.dataType)] - db_schema.append((i.name.upper(), dt)) - - return db_schema - - def _getCreateTableSchema(self, schema_table_name, new_schema=False): - """ - Parse schema for new table; if it is needed, create it - """ - # try to get schema and table, else set schema to splice - if '.' in schema_table_name: - schema, table = schema_table_name.upper().split('.') - else: - schema = self.getConnection().getCurrentSchemaName() - table = schema_table_name.upper() - # check for new schema - if new_schema: - print('Creating schema {}'.format(schema)) - self.execute('CREATE SCHEMA {}'.format(schema)) - - return schema, table - - def _dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drop table if it exists - """ - if self.tableExists(schema_and_or_table_name=schema_table_name, table_name=table_name): - print('Table exists. Dropping table') - self.dropTable(schema_and_or_table_name=schema_table_name, table_name=table_name) - -
[docs] def dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drops a table if exists - - :Example: - .. code-block:: python - - splice.dropTableIfExists('schemaName.tableName') \n - # or\n - splice.dropTableIfExists('schemaName', 'tableName') - - :param schema_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_table_name contains only the schema name - :return: None - """ - self._dropTableIfExists(schema_table_name, table_name)
- - def _jstructtype(self, schema): - """ - Convert python StructType to java StructType - - :param schema: PySpark StructType - :return: Java Spark StructType - """ - return self.spark_session._jsparkSession.parseDataType(schema.json()) - -
[docs] def createTable(self, dataframe, schema_table_name, primary_keys=None, create_table_options=None, to_upper=False, drop_table=False): - """ - Creates a schema.table (schema_table_name) from a dataframe - - :param dataframe: The Spark DataFrame to base the table off - :param schema_table_name: str The schema.table to create - :param primary_keys: List[str] the primary keys. Default None - :param create_table_options: str The additional table-level SQL options default None - :param to_upper: bool If the dataframe columns should be converted to uppercase before table creation. \ - If False, the table will be created with lower case columns. Default False - :param drop_table: bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception - :return: None - - """ - if drop_table: - self._dropTableIfExists(schema_table_name) - if to_upper: - dataframe = self.toUpper(dataframe) - primary_keys = primary_keys if primary_keys else [] - self.createTableWithSchema(schema_table_name, dataframe.schema, - keys=primary_keys, create_table_options=create_table_options)
- -
[docs] def createTableWithSchema(self, schema_table_name, schema, keys=None, create_table_options=None): - """ - Creates a schema.table from a schema - - :param schema_table_name: str The schema.table to create - :param schema: (StructType) The schema that describes the columns of the table - :param keys: (List[str]) The primary keys. Default None - :param create_table_options: (str) The additional table-level SQL options. Default None - :return: None - """ - if keys: - keys_seq = self.jvm.PythonUtils.toSeq(keys) - else: - keys_seq = self.jvm.PythonUtils.toSeq([]) - self.context.createTable( - schema_table_name, - self._jstructtype(schema), - keys_seq, - create_table_options - )
- - -
[docs]class ExtPySpliceContext(PySpliceContext): - """ - This class implements a SplicemachineContext object from com.splicemachine.spark2 for use outside of the K8s Cloud Service - """ - _spliceSparkPackagesName = "com.splicemachine.spark2.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark2.splicemachine.SplicemachineContext( - self.jdbcurl, self.kafkaServers, self.kafkaPollTimeout) - - def __init__(self, sparkSession, JDBC_URL=None, kafkaServers='localhost:9092', kafkaPollTimeout=20000, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - :param kafkaServers (string) Comma-separated list of Kafka broker addresses in the form host:port - :param kafkaPollTimeout (int) Number of milliseconds to wait when polling Kafka - """ - self.kafkaServers = kafkaServers - self.kafkaPollTimeout = kafkaPollTimeout - super().__init__(sparkSession, JDBC_URL, _unit_testing)
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/splicemachine/mlflow_support/mlflow_support.xhtml b/docs/_build/epub/_modules/splicemachine/mlflow_support/mlflow_support.xhtml deleted file mode 100644 index 87ece33f..00000000 --- a/docs/_build/epub/_modules/splicemachine/mlflow_support/mlflow_support.xhtml +++ /dev/null @@ -1,826 +0,0 @@ - - - - - - - splicemachine.mlflow_support.mlflow_support - - - - -
-
-
- -

Source code for splicemachine.mlflow_support.mlflow_support

-"""
-Copyright 2020 Splice Machine, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.\n
-
-======================================================================================================================================================================================\n
-
-All functions in this module are accessible through the mlflow object and are to be referenced without the leading underscore as \n
-.. code-block:: python
-
-    mlflow.function_name()
-
-For example, the function _current_exp_id() is accessible via\n
-.. code-block:: python
-
-    mlflow.current_exp_id()
-
-
-All functions are accessible after running the following import\n
-.. code-block:: python
-
-    from splicemachine.mlflow_support import *
-
-Importing anything directly from mlflow before running the above statement will cause problems. After running the above import, you can import additional mlflow submodules as normal\n
-.. code-block:: python
-
-    from splicemachine.mlflow_support import *
-    from mlflow.tensorflow import autolog
-
-======================================================================================================================================================================================\n
-"""
-import time
-from collections import defaultdict
-from contextlib import contextmanager
-from os import path
-from sys import version as py_version
-
-import gorilla
-import mlflow
-import requests
-from requests.auth import HTTPBasicAuth
-from mleap.pyspark import spark_support
-import pyspark
-import sklearn
-from sklearn.base import BaseEstimator as ScikitModel
-from tensorflow import __version__ as tf_version
-from tensorflow.keras import __version__ as keras_version
-from tensorflow.keras import Model as KerasModel
-
-from splicemachine.mlflow_support.constants import *
-from splicemachine.mlflow_support.utilities import *
-from splicemachine.spark.context import PySpliceContext
-from splicemachine.spark.constants import CONVERSIONS
-from pyspark.sql.dataframe import DataFrame as SparkDF
-from pandas.core.frame import DataFrame as PandasDF
-
-_TESTING = env_vars.get("TESTING", False)
-_TRACKING_URL = get_pod_uri("mlflow", "5001", _TESTING)
-
-_CLIENT = mlflow.tracking.MlflowClient(tracking_uri=_TRACKING_URL)
-mlflow.client = _CLIENT
-
-_GORILLA_SETTINGS = gorilla.Settings(allow_hit=True, store_hit=True)
-_PYTHON_VERSION = py_version.split('|')[0].strip()
-
-
[docs]def _mlflow_patch(name): - """ - Create a MLFlow Patch that applies the default gorilla settings - - :param name: destination name under mlflow package - :return: decorator for patched function - """ - return gorilla.patch(mlflow, name, settings=_GORILLA_SETTINGS)
- - -
[docs]def _get_current_run_data(): - """ - Get the data associated with the current run. - As of MLFLow 1.6, it currently does not support getting run info from the mlflow.active_run object, so we need it - to be retrieved via the tracking client. - - :return: active run data object - """ - return _CLIENT.get_run(mlflow.active_run().info.run_id).data
- - -
[docs]@_mlflow_patch('get_run_ids_by_name') -def _get_run_ids_by_name(run_name, experiment_id=None): - """ - Gets a run id from the run name. If there are multiple runs with the same name, all run IDs are returned - - :param run_name: (str) The name of the run - :param experiment_id: (int) The experiment to search in. If None, all experiments are searched. [Default None] - :return: (List[str]) List of run ids - """ - exps = [experiment_id] if experiment_id else _CLIENT.list_experiments() - run_ids = [] - for exp in exps: - for run in _CLIENT.search_runs(exp.experiment_id): - if run_name == run.data.tags['mlflow.runName']: - run_ids.append(run.data.tags['Run ID']) - return run_ids
- - -
[docs]@_mlflow_patch('register_splice_context') -def _register_splice_context(splice_context): - """ - Register a Splice Context for Spark/Database operations (artifact storage, for example) - - :param splice_context: (PySpliceContext) splice context to input - :return: None - """ - assert isinstance(splice_context, PySpliceContext), "You must pass in a PySpliceContext to this method" - mlflow._splice_context = splice_context
- - -def _check_for_splice_ctx(): - """ - Check to make sure that the user has registered - a PySpliceContext with the mlflow object before allowing - spark operations to take place - """ - - if not hasattr(mlflow, '_splice_context'): - raise SpliceMachineException( - "You must run `mlflow.register_splice_context(pysplice_context) before " - "you can run this mlflow operation!" - ) - - -
[docs]@_mlflow_patch('current_run_id') -def _current_run_id(): - """ - Retrieve the current run id - - :return: (str) the current run id - """ - return mlflow.active_run().info.run_uuid
- - -
[docs]@_mlflow_patch('current_exp_id') -def _current_exp_id(): - """ - Retrieve the current exp id - - :return: (int) the current experiment id - """ - return mlflow.active_run().info.experiment_id
- - -
[docs]@_mlflow_patch('lp') -def _lp(key, value): - """ - Add a shortcut for logging parameters in MLFlow. - - :param key: (str) key for the parameter - :param value: (str) value for the parameter - :return: None - """ - if len(str(value)) > 250 or len(str(key)) > 250: - raise SpliceMachineException(f'It seems your parameter input is too long. The max length is 250 characters.' - f'Your key is length {len(str(key))} and your value is length {len(str(value))}.') - mlflow.log_param(key, value)
- - -
[docs]@_mlflow_patch('lm') -def _lm(key, value, step=None): - """ - Add a shortcut for logging metrics in MLFlow. - - :param key: (str) key for the parameter - :param value: (str or int) value for the parameter - :param step: (int) A single integer step at which to log the specified Metrics. If unspecified, each metric is logged at step zero. - """ - if len(str(key)) > 250: - raise SpliceMachineException(f'It seems your metric key is too long. The max length is 250 characters,' - f'but yours is {len(str(key))}') - mlflow.log_metric(key, value, step=step)
- - -
[docs]@_mlflow_patch('log_model') -def _log_model(model, name='model'): - """ - Log a trained machine learning model - - :param model: (Model) is the trained Spark/SKlearn/H2O/Keras model - with the current run - :param name: (str) the run relative name to store the model under. [Deault 'model'] - """ - _check_for_splice_ctx() - if _get_current_run_data().tags.get('splice.model_name'): # this function has already run - raise SpliceMachineException("Only one model is permitted per run.") - - model_class = str(model.__class__) - mlflow.set_tag('splice.model_type', model_class) - mlflow.set_tag('splice.model_py_version', _PYTHON_VERSION) - - run_id = mlflow.active_run().info.run_uuid - if isinstance(model, H2OModel): - mlflow.set_tag('splice.h2o_version', h2o.__version__) - H2OUtils.log_h2o_model(mlflow._splice_context, model, name, run_id) - - elif isinstance(model, SparkModel): - mlflow.set_tag('splice.spark_version', pyspark.__version__) - SparkUtils.log_spark_model(mlflow._splice_context, model, name, run_id) - - elif isinstance(model, ScikitModel): - mlflow.set_tag('splice.sklearn_version', sklearn.__version__) - SKUtils.log_sklearn_model(mlflow._splice_context, model, name, run_id) - - elif isinstance(model, KerasModel): # We can't handle keras models with a different backend - mlflow.set_tag('splice.keras_version', keras_version) - mlflow.set_tag('splice.tf_version', tf_version) - KerasUtils.log_keras_model(mlflow._splice_context, model, name, run_id) - - else: - raise SpliceMachineException('Model type not supported for logging.' - 'Currently we support logging Spark, H2O, SKLearn and Keras (TF backend) models.' - 'You can save your model to disk, zip it and run mlflow.log_artifact to save.') - - mlflow.set_tag('splice.model_name', name) # read in backend for deployment
- -
[docs]@_mlflow_patch('start_run') -def _start_run(run_id=None, tags=None, experiment_id=None, run_name=None, nested=False): - """ - Start a new run - - :Example: - .. code-block:: python - - mlflow.start_run(run_name='my_run')\n - # or\n - with mlflow.start_run(run_name='my_run'): - ... - - - :param tags: a dictionary containing metadata about the current run. \ - For example: \ - { \ - 'team': 'pd', \ - 'purpose': 'r&d' \ - } - :param run_name: (str) an optional name for the run to show up in the MLFlow UI. [Default None] - :param run_id: (str) if you want to reincarnate an existing run, pass in the run id [Default None] - :param experiment_id: (int) if you would like to create an experiment/use one for this run [Default None] - :param nested: (bool) Controls whether run is nested in parent run. True creates a nest run [Default False] - :return: (ActiveRun) the mlflow active run object - """ - # Get the current running transaction ID for time travel/data governance - _check_for_splice_ctx() - db_connection = mlflow._splice_context.getConnection() - prepared_statement = db_connection.prepareStatement('CALL SYSCS_UTIL.SYSCS_GET_CURRENT_TRANSACTION()') - x = prepared_statement.executeQuery() - x.next() - timestamp = x.getLong(1) - prepared_statement.close() - - tags = tags if tags else {} - tags['mlflow.user'] = get_user() - tags['DB Transaction ID'] = timestamp - - orig = gorilla.get_original_attribute(mlflow, "start_run") - active_run = orig(run_id=run_id, experiment_id=experiment_id, run_name=run_name, nested=nested) - - for key in tags: - mlflow.set_tag(key, tags[key]) - if not run_id: - mlflow.set_tag('Run ID', mlflow.active_run().info.run_uuid) - if run_name: - mlflow.set_tag('mlflow.runName', run_name) - - return active_run
- - -
[docs]@_mlflow_patch('log_pipeline_stages') -def _log_pipeline_stages(pipeline): - """ - Log the pipeline stages of a Spark Pipeline as params for the run - - :param pipeline: (PipelineModel) fitted/unitted pipeline - :return: None - """ - for stage_number, pipeline_stage in enumerate(SparkUtils.get_stages(pipeline)): - readable_stage_name = SparkUtils.readable_pipeline_stage(pipeline_stage) - mlflow.log_param('Stage' + str(stage_number), readable_stage_name)
- - -
[docs]@_mlflow_patch('log_feature_transformations') -def _log_feature_transformations(unfit_pipeline): - """ - Log feature transformations for an unfit spark pipeline - Logs --> feature movement through the pipeline - - :param unfit_pipeline: (PipelineModel) unfit spark pipeline to log - :return: None - """ - transformations = defaultdict(lambda: [[], None]) # transformations, outputColumn - - for stage in SparkUtils.get_stages(unfit_pipeline): - input_cols, output_col = SparkUtils.get_cols(stage, get_input=True), SparkUtils.get_cols(stage, get_input=False) - if input_cols and output_col: # make sure it could parse transformer - for column in input_cols: - first_column_found = find_inputs_by_output(transformations, column) - if first_column_found: # column is not original - for f in first_column_found: - transformations[f][1] = output_col - transformations[f][0].append( - SparkUtils.readable_pipeline_stage(stage)) - else: - transformations[column][1] = output_col - transformations[column][0].append(SparkUtils.readable_pipeline_stage(stage)) - - for column in transformations: - param_value = ' -> '.join([column] + transformations[column][0] + - [transformations[column][1]]) - mlflow.log_param('Column- ' + column, param_value)
- - -
[docs]@_mlflow_patch('log_model_params') -def _log_model_params(pipeline_or_model): - """ - Log the parameters of a fitted spark model or a model stage of a fitted spark pipeline - - :param pipeline_or_model: fitted spark pipeline/fitted spark model - """ - model = SparkUtils.get_model_stage(pipeline_or_model) - - mlflow.log_param('model', SparkUtils.readable_pipeline_stage(model)) - if hasattr(model, '_java_obj'): - verbose_parameters = SparkUtils.parse_string_parameters(model._java_obj.extractParamMap()) - elif hasattr(model, 'getClassifier'): - verbose_parameters = SparkUtils.parse_string_parameters( - model.getClassifier()._java_obj.extractParamMap()) - else: - raise Exception("Could not parse model type: " + str(model)) - for param in verbose_parameters: - try: - value = float(verbose_parameters[param]) - mlflow.log_param(param.split('-')[0], value) - except: - mlflow.log_param(param.split('-')[0], verbose_parameters[param])
- - -
[docs]@_mlflow_patch('timer') -@contextmanager -def _timer(timer_name, param=True): - """ - Context manager for logging - - :Example: - .. code-block:: python - - with mlflow.timer('my_timer'): \n - ... - - :param timer_name: (str) the name of the timer - :param param: (bool) whether or not to log the timer as a param (default=True). If false, logs as metric. - :return: None - """ - try: - print(f'Starting Code Block {timer_name}...', end=' ') - t0 = time.time() - yield - finally: - t1 = time.time() - t0 - # Syntactic Sugar - (mlflow.log_param if param else mlflow.log_metric)(timer_name, t1) - print('Done.') - print( - f"Code Block {timer_name}:\nRan in {round(t1, 3)} secs\nRan in {round(t1 / 60, 3)} mins" - )
- - -
[docs]@_mlflow_patch('download_artifact') -def _download_artifact(name, local_path, run_id=None): - """ - Download the artifact at the given run id (active default) + name to the local path - - :param name: (str) artifact name to load (with respect to the run) - :param local_path: (str) local path to download the model to. This path MUST include the file extension - :param run_id: (str) the run id to download the artifact from. Defaults to active run - :return: None - """ - _check_for_splice_ctx() - file_ext = path.splitext(local_path)[1] - - run_id = run_id or mlflow.active_run().info.run_uuid - blob_data, f_ext = SparkUtils.retrieve_artifact_stream(mlflow._splice_context, run_id, name) - - if not file_ext: # If the user didn't provide the file (ie entered . as the local_path), fill it in for them - local_path += f'/{name}.{f_ext}' - - with open(local_path, 'wb') as artifact_file: - artifact_file.write(blob_data)
- -
[docs]@_mlflow_patch('get_model_name') -def _get_model_name(run_id): - """ - Gets the model name associated with a run or None - - :param run_id: (str) the run_id that the model is stored under - :return: (str or None) The model name if it exists - """ - return _CLIENT.get_run(run_id).data.tags.get('splice.model_name')
- -
[docs]@_mlflow_patch('load_model') -def _load_model(run_id=None, name=None): - """ - Download and deserialize a serialized model - - :param run_id: the id of the run to get a model from - (the run must have an associated model with it named spark_model) - :param name: the name of the model in the database - """ - _check_for_splice_ctx() - run_id = run_id or mlflow.active_run().info.run_uuid - name = name or _get_model_name(run_id) - if not name: - raise SpliceMachineException(f"Uh Oh! Looks like there isn't a model logged with this run ({run_id})!" - "If there is, pass in the name= parameter to this function") - model_blob, file_ext = SparkUtils.retrieve_artifact_stream(mlflow._splice_context, run_id, name) - - if file_ext == FileExtensions.spark: - model = SparkUtils.load_spark_model(mlflow._splice_context, model_blob) - elif file_ext == FileExtensions.h2o: - model = H2OUtils.load_h2o_model(model_blob) - elif file_ext == FileExtensions.sklearn: - model = SKUtils.load_sklearn_model(model_blob) - elif file_ext == FileExtensions.keras: - model = KerasUtils.load_keras_model(model_blob) - else: - raise SpliceMachineException(f'Model extension {file_ext} was not a supported model type. ' - f'Supported model extensions are {FileExtensions.get_valid()}') - - return model
- - -
[docs]@_mlflow_patch('log_artifact') -def _log_artifact(file_name, name=None, run_uuid=None): - """ - Log an artifact for the active run - - :Example: - .. code-block:: python - - with mlflow.start_run():\n - mlflow.log_artifact('my_image.png') - - :param file_name: (str) the name of the file name to log - :param name: (str) the name of the run relative name to store the model under - :param run_uuid: (str) the run uuid of a previous run, if none, defaults to current run - :return: None - - :NOTE: - We do not currently support logging directories. If you would like to log a directory, please zip it first and log the zip file - """ - _check_for_splice_ctx() - file_ext = path.splitext(file_name)[1].lstrip('.') - - with open(file_name, 'rb') as artifact: - byte_stream = bytearray(bytes(artifact.read())) - - run_id = run_uuid or mlflow.active_run().info.run_uuid - name = name or file_name - insert_artifact(mlflow._splice_context, name, byte_stream, run_id, file_ext=file_ext)
- - -
[docs]@_mlflow_patch('login_director') -def _login_director(username, password): - """ - Authenticate into the MLManager Director - - :param username: (str) database username - :param password: (str) database password - """ - mlflow._basic_auth = HTTPBasicAuth(username, password)
- - -
[docs]def _initiate_job(payload, endpoint): - """ - Send a job to the initiation endpoint - - :param payload: (dict) JSON payload for POST request - :param endpoint: (str) REST endpoint to target - :return: (str) Response text from request - """ - if not hasattr(mlflow, '_basic_auth'): - raise Exception( - "You have not logged into MLManager director." - " Please run mlflow.login_director(username, password)" - ) - request = requests.post( - get_pod_uri('mlflow', 5003, _testing=_TESTING) + endpoint, - auth=mlflow._basic_auth, - json=payload, - - ) - - if request.ok: - print("Your Job has been submitted. View its status on port 5003 (Job Dashboard)") - print(request.json) - return request.json - else: - print("Error! An error occurred while submitting your job") - print(request.text) - return request.text
- - -
[docs]@_mlflow_patch('deploy_aws') -def _deploy_aws(app_name, region='us-east-2', instance_type='ml.m5.xlarge', - run_id=None, instance_count=1, deployment_mode='replace'): - """ - Queue Job to deploy a run to sagemaker with the - given run id (found in MLFlow UI or through search API) - - :param run_id: the id of the run to deploy. Will default to the current - run id. - :param app_name: the name of the app in sagemaker once deployed - :param region: the sagemaker region to deploy to (us-east-2, - us-west-1, us-west-2, eu-central-1 supported) - :param instance_type: the EC2 Sagemaker instance type to deploy on - (ml.m4.xlarge supported) - :param instance_count: the number of instances to load balance predictions - on - :param deployment_mode: the method to deploy; create=application will fail - if an app with the name specified already exists; replace=application - in sagemaker will be replaced with this one if app already exists; - add=add the specified model to a prexisting application (not recommended) - """ - # get run from mlflow - print("Processing...") - time.sleep(3) # give the mlflow server time to register the artifact, if necessary - - supported_aws_regions = ['us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1'] - supported_instance_types = ['ml.m5.xlarge'] - supported_deployment_modes = ['replace', 'add'] - - # data validation - if region not in supported_aws_regions: - raise Exception("Region must be in list: " + str(supported_aws_regions)) - if instance_type not in supported_instance_types: - raise Exception("Instance type must be in list: " + str(instance_type)) - if deployment_mode not in supported_deployment_modes: - raise Exception("Deployment mode must be in list: " + str(supported_deployment_modes)) - - request_payload = { - 'handler_name': 'DEPLOY_AWS', 'run_id': run_id if run_id else mlflow.active_run().info.run_uuid, - 'region': region, 'user': get_user(), - 'instance_type': instance_type, 'instance_count': instance_count, - 'deployment_mode': deployment_mode, 'app_name': app_name - } - - return _initiate_job(request_payload, '/api/rest/initiate')
- - -
[docs]@_mlflow_patch('deploy_azure') -def _deploy_azure(endpoint_name, resource_group, workspace, run_id=None, region='East US', - cpu_cores=0.1, allocated_ram=0.5, model_name=None): - """ - Deploy a given run to AzureML. - - :param endpoint_name: (str) the name of the endpoint in AzureML when deployed to - Azure Container Services. Must be unique. - :param resource_group: (str) Azure Resource Group for model. Automatically created if - it doesn't exist. - :param workspace: (str) the AzureML workspace to deploy the model under. - Will be created if it doesn't exist - :param run_id: (str) if specified, will deploy a previous run ( - must have an spark model logged). Otherwise, will default to the active run - :param region: (str) AzureML Region to deploy to: Can be East US, East US 2, Central US, - West US 2, North Europe, West Europe or Japan East - :param cpu_cores: (float) Number of CPU Cores to allocate to the instance. - Can be fractional. Default=0.1 - :param allocated_ram: (float) amount of RAM, in GB, allocated to the container. - Default=0.5 - :param model_name: (str) If specified, this will be the name of the model in AzureML. - Otherwise, the model name will be randomly generated. - """ - supported_regions = ['East US', 'East US 2', 'Central US', - 'West US 2', 'North Europe', 'West Europe', 'Japan East'] - - if region not in supported_regions: - raise Exception("Region must be in list: " + str(supported_regions)) - if cpu_cores <= 0: - raise Exception("Invalid CPU Count") - if allocated_ram <= 0: - raise Exception("Invalid Allocated RAM") - - request_payload = { - 'handler_name': 'DEPLOY_AZURE', - 'endpoint_name': endpoint_name, - 'resource_group': resource_group, - 'workspace': workspace, - 'run_id': run_id if run_id else mlflow.active_run().info.run_uuid, - 'cpu_cores': cpu_cores, - 'allocated_ram': allocated_ram, - 'model_name': model_name - } - return _initiate_job(request_payload, '/api/rest/initiate')
- -
[docs]@_mlflow_patch('deploy_database') -def _deploy_db(db_schema_name, - db_table_name, - run_id, - primary_key=None, - df = None, - create_model_table = False, - model_cols = None, - classes=None, - sklearn_args={}, - verbose=False, - pred_threshold = None, - replace=False) -> None: - """ - Deploy a trained (currently Spark, Sklearn, Keras or H2O) model to the Database. - This either creates a new table or alters an existing table in the database (depending on parameters passed) - - :param db_schema_name: (str) the schema name to deploy to. - :param db_table_name: (str) the table name to deploy to. - :param run_id: (str) The run_id to deploy the model on. The model associated with this run will be deployed - :param primary_key: (List[Tuple[str, str]]) List of column + SQL datatype to use for the primary/composite key. \n - * If you are deploying to a table that already exists, it must already have a primary key, and this parameter will be ignored. \n - * If you are creating the table in this function, you MUST pass in a primary key - :param df: (Spark or Pandas DF) The dataframe used to train the model \n - | NOTE: The columns in this df are the ones that will be used to create the table unless specified by model_cols - :param create_model_table: Whether or not to create the table from the dataframe. Default false. This - Will ONLY be used if the table does not exist and a dataframe is passed in - :param model_cols: (List[str]) The columns from the table to use for the model. If None, all columns in the table - will be passed to the model. If specified, the columns will be passed to the model - IN THAT ORDER. The columns passed here must exist in the table. - :param classes: (List[str]) The classes (prediction labels) for the model being deployed.\n - NOTE: If not supplied, the table will have default column names for each class - :param sklearn_args: (dict{str: str}) Prediction options for sklearn models: \n - * Available key value options: \n - * 'predict_call': 'predict', 'predict_proba', or 'transform' \n - * Determines the function call for the model \n - * If blank, predict will be used (or transform if model doesn't have predict) \n - * 'predict_args': 'return_std' or 'return_cov' - For Bayesian and Gaussian models \n - * Only one can be specified \n - * If the model does not have the option specified, it will be ignored. - :param verbose: (bool) Whether or not to print out the queries being created. Helpful for debugging - :param pred_threshold: (double) A prediction threshold for *Keras* binary classification models \n - * If the model type isn't Keras, this parameter will be ignored \n - NOTE: If the model type is Keras, the output layer has 1 node, and pred_threshold is None, \ - you will NOT receive a class prediction, only the output of the final layer (like model.predict()). \ - If you want a class prediction \ - for your binary classification problem, you MUST pass in a threshold. - :param replace: (bool) whether or not to replace a currently existing model. This param does not yet work - :return: None\n - - This function creates the following IF you are creating a table from the dataframe \n - * The model table where run_id is the run_id passed in. This table will have a column for each feature in the feature vector. It will also contain:\n - * USER which is the current user who made the request - * EVAL_TIME which is the CURRENT_TIMESTAMP - * the PRIMARY KEY column(s) passed in - * PREDICTION. The prediction of the model. If the :classes: param is not filled in, this will be default values for classification models - * A column for each class of the predictor with the value being the probability/confidence of the model if applicable\n - IF you are deploying to an existing table, the table will be altered to include the columns above. \n - :NOTE: - .. code-block:: text - - The columns listed above are default value columns.\n - This means that on a SQL insert into the table, \n - you do not need to reference or insert values into them.\n - They are automatically taken care of.\n - Set verbose=True in the function call for more information - - The following will also be created for all deployments: \n - * A trigger that runs on (after) insertion to the data table that runs an INSERT into the prediction table, \ - calling the PREDICT function, passing in the row of data as well as the schema of the dataset, and the run_id of the model to run \n - * A trigger that runs on (after) insertion to the prediction table that calls an UPDATE to the row inserted, \ - parsing the prediction probabilities and filling in proper column values - """ - _check_for_splice_ctx() - - # Get the model - run_id = run_id if run_id else mlflow.active_run().info.run_uuid - fitted_model = _load_model(run_id) - - # Param checking. Can't create model table without a dataframe - if create_model_table and df is None: # Need to compare to None, truth value of df is ambiguous - raise SpliceMachineException("If you'd like to create the model table as part of this deployment, you must pass in a dataframe") - # Make sure primary_key is valid format - if create_model_table and not primary_key: - raise SpliceMachineException("If you'd like to create the model table as part of this deployment must provide the primary key(s)") - - # FIXME: We need to use the dbConnection so we can set a savepoint and rollback on failure - classes = classes if classes else [] - - schema_table_name = f'{db_schema_name}.{db_table_name}' - - feature_columns, schema_types = get_feature_columns_and_types(mlflow._splice_context, df, create_model_table, - model_cols, schema_table_name) - - - # Validate primary key is correct, or that provided table has primary keys - primary_key = validate_primary_key(mlflow._splice_context, primary_key, db_schema_name, db_table_name) or primary_key - - library = get_model_library(fitted_model) - if library == DBLibraries.MLeap: - # Mleap needs a dataframe in order to serialize the model - df = get_df_for_mleap(mlflow._splice_context, schema_table_name, df) - - model_type, classes, model_already_exists = ModelUtils[library].prep_model_for_deployment(mlflow._splice_context, - fitted_model, classes, run_id, - df, pred_threshold, sklearn_args) - - - print(f'Deploying model {run_id} to table {schema_table_name}') - - # Create the schema of the table (we use this a few times) - schema_str = '' - for i in feature_columns: - schema_str += f'\t{i} {CONVERSIONS[schema_types[str(i)]]},' - - try: - # Create/Alter table 1: DATA - if create_model_table: - print('Creating model table ...', end=' ') - create_model_deployment_table(mlflow._splice_context, run_id, schema_table_name, schema_str, classes, primary_key, model_type, verbose) - print('Done.') - else: - print('Altering provided table for deployment') - alter_model_table(mlflow._splice_context, run_id, schema_table_name, classes, model_type, verbose) - - # Create Trigger 1: model prediction - print('Creating model prediction trigger ...', end=' ') - if model_type in (H2OModelType.KEY_VALUE, SklearnModelType.KEY_VALUE, KerasModelType.KEY_VALUE): - create_vti_prediction_trigger(mlflow._splice_context, schema_table_name, run_id, feature_columns, schema_types, - schema_str, primary_key, classes, model_type, sklearn_args, pred_threshold, verbose) - else: - create_prediction_trigger(mlflow._splice_context, schema_table_name, run_id, feature_columns, schema_types, - schema_str, primary_key, model_type, verbose) - print('Done.') - - if model_type in (SparkModelType.CLASSIFICATION, SparkModelType.CLUSTERING_WITH_PROB, - H2OModelType.CLASSIFICATION): - # Create Trigger 2: model parsing - print('Creating parsing trigger ...', end=' ') - create_parsing_trigger(mlflow._splice_context, schema_table_name, primary_key, run_id, classes, model_type, verbose) - print('Done.') - - add_model_to_metadata(mlflow._splice_context, run_id, schema_table_name) - - - except Exception as e: - import traceback - exc = 'Model deployment failed. Rolling back transactions.\n' - print(exc) - drop_tables_on_failure(mlflow._splice_context, schema_table_name, run_id, model_already_exists) - if not verbose: - exc += 'For more insight into the SQL statement that generated this error, rerun with verbose=True' - traceback.print_exc() - raise SpliceMachineException(exc) - - print('Model Deployed.')
- -
[docs]@_mlflow_patch('get_deployed_models') -def _get_deployed_models() -> PandasDF: - """ - Get the currently deployed models in the database - :return: Pandas df - """ - - return mlflow._splice_context.df( - """ - SELECT * FROM MLMANAGER.LIVE_MODEL_STATUS - """ - ).toPandas()
- - -def apply_patches(): - """ - Apply all the Gorilla Patches; \ - All Gorilla Patched MUST be predixed with '_' before their destination in MLflow - """ - targets = [_register_splice_context, _lp, _lm, _timer, _log_artifact, _log_feature_transformations, - _log_model_params, _log_pipeline_stages, _log_model, _load_model, _download_artifact, - _start_run, _current_run_id, _current_exp_id, _deploy_aws, _deploy_azure, _deploy_db, _login_director, - _get_run_ids_by_name, _get_deployed_models] - - for target in targets: - gorilla.apply(gorilla.Patch(mlflow, target.__name__.lstrip('_'), target, settings=_GORILLA_SETTINGS)) - - -def main(): - mlflow.set_tracking_uri(_TRACKING_URL) - apply_patches() - - -main() -
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/splicemachine/notebook.xhtml b/docs/_build/epub/_modules/splicemachine/notebook.xhtml deleted file mode 100644 index 3ec2cb80..00000000 --- a/docs/_build/epub/_modules/splicemachine/notebook.xhtml +++ /dev/null @@ -1,93 +0,0 @@ - - - - - - - splicemachine.notebook - - - - -
-
-
- -

Source code for splicemachine.notebook

-import random
-from IPython.display import IFrame, HTML, display
-from pyspark import SparkContext
-from os import environ as env_vars
-
-
[docs]def hide_toggle(toggle_next=False): - """ - Function to add a toggle at the bottom of Jupyter Notebook cells to allow the entire cell to be collapsed. - :param toggle_next: Bool determine if the toggle should affect the current cell or the next cell - Usage: from splicemachine.stats.utilities import hide_toggle - hide_toggle() - """ - this_cell = """$('div.cell.code_cell.rendered.selected')""" - next_cell = this_cell + '.next()' - - toggle_text = 'Toggle show/hide' # text shown on toggle link - target_cell = this_cell # target cell to control with toggle - js_hide_current = '' # bit of JS to permanently hide code in current cell (only when toggling next cell) - - if toggle_next: - target_cell = next_cell - toggle_text += ' next cell' - js_hide_current = this_cell + '.find("div.input").hide();' - - js_f_name = 'code_toggle_{}'.format(str(random.randint(1, 2 ** 64))) - - html = """ - <script> - function {f_name}() {{ - {cell_selector}.find('div.input').toggle(); - }} - {js_hide_current} - </script> - <a href="javascript:{f_name}()"><button style='color:black'>{toggle_text}</button></a> - """.format( - f_name=js_f_name, - cell_selector=target_cell, - js_hide_current=js_hide_current, - toggle_text=toggle_text - ) - - return HTML(html)
- -
[docs]def get_mlflow_ui(): - """Display the MLflow UI as an IFrame""" - display(HTML('<font size=\"+1\"><a target=\"_blank\" href=/mlflow>MLFlow UI</a></font>')) - return IFrame(src='/mlflow', width='100%', height='500px')
- -
[docs]def get_spark_ui(port=None, spark_session=None): - """ - Display the Spark Jobs UI as an IFrame at a specific port - :param port: (int or str) The port of the desired spark session - :param spark_session: (SparkSession) Optionally the Spark Session associated with the desired UI - :return: - """ - if port: - pass - elif spark_session: - port = spark_session.sparkContext.uiWebUrl.split(':')[-1] - elif SparkContext._active_spark_context: - port = SparkContext._active_spark_context.uiWebUrl.split(':')[-1] - else: - raise Exception('No parameters passed and no active Spark Session found.\n' - 'Either pass in the active Spark Session into the "spark_session" parameter or the port of that session into the "port" parameter.\n'\ - 'You can find the port by running spark.sparkContext.uiWebUrl and taking the number after the \':\'') - user = env_vars.get('JUPYTERHUB_USER','user') - display(HTML(f'<font size=\"+1\"><a target=\"_blank\" href=/splicejupyter/user/{user}/sparkmonitor/{port}>Spark UI</a></font>')) - return IFrame(src=f'/splicejupyter/user/{user}/sparkmonitor/{port}', width='100%', height='500px')
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/splicemachine/spark/context.xhtml b/docs/_build/epub/_modules/splicemachine/spark/context.xhtml deleted file mode 100644 index 8e405408..00000000 --- a/docs/_build/epub/_modules/splicemachine/spark/context.xhtml +++ /dev/null @@ -1,664 +0,0 @@ - - - - - - - splicemachine.spark.context - - - - -
-
-
- -

Source code for splicemachine.spark.context

-"""
-Copyright 2020 Splice Machine, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import print_function
-
-import os
-
-from py4j.java_gateway import java_import
-from pyspark.sql import DataFrame
-from pyspark.sql.types import _parse_datatype_json_string
-from splicemachine.spark.constants import CONVERSIONS
-
-
-
[docs]class PySpliceContext: - """ - This class implements a SpliceMachineContext object (similar to the SparkContext object) - """ - _spliceSparkPackagesName = "com.splicemachine.spark.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark.splicemachine.SplicemachineContext(self.jdbcurl) - - def __init__(self, sparkSession, JDBC_URL=None, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - """ - - if JDBC_URL: - self.jdbcurl = JDBC_URL - else: - try: - self.jdbcurl = os.environ['BEAKERX_SQL_DEFAULT_JDBC'] - except KeyError as e: - raise KeyError( - "Could not locate JDBC URL. If you are not running on the cloud service," - "please specify the JDBC_URL=<some url> keyword argument in the constructor" - ) - - self._unit_testing = _unit_testing - - if not _unit_testing: # Private Internal Argument to Override Using JVM - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = self.spark_sql_context._sc._jvm - java_import(self.jvm, self._spliceSparkPackagesName) - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions") - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils") - java_import(self.jvm, "scala.collection.JavaConverters._") - java_import(self.jvm, "com.splicemachine.derby.impl.*") - java_import(self.jvm, 'org.apache.spark.api.python.PythonUtils') - self.jvm.com.splicemachine.derby.impl.SpliceSpark.setContext( - self.spark_sql_context._jsc) - self.context = self._splicemachineContext() - - else: - from .tests.mocked import MockedScalaContext - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = '' - self.context = MockedScalaContext(self.jdbcurl) - -
[docs] def toUpper(self, dataframe): - """ - Returns a dataframe with all of the columns in uppercase - - :param dataframe: (Dataframe) The dataframe to convert to uppercase - """ - for s in dataframe.schema: - s.name = s.name.upper() - # You need to re-generate the dataframe for the capital letters to take effect - return dataframe.rdd.toDF(dataframe.schema)
- -
[docs] def replaceDataframeSchema(self, dataframe, schema_table_name): - """ - Returns a dataframe with all column names replaced with the proper string case from the DB table - - :param dataframe: (Dataframe) A dataframe with column names to convert - :param schema_table_name: (str) The schema.table with the correct column cases to pull from the database - :return: (DataFrame) A Spark DataFrame with the replaced schema - """ - schema = self.getSchema(schema_table_name) - # Fastest way to replace the column case if changed - dataframe = dataframe.rdd.toDF(schema) - return dataframe
- -
[docs] def getConnection(self): - """ - Return a connection to the database - """ - return self.context.getConnection()
- -
[docs] def tableExists(self, schema_and_or_table_name, table_name=None): - """ - Check whether or not a table exists - - :Example: - .. code-block:: python - - splice.tableExists('schemaName.tableName')\n - # or\n - splice.tableExists('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: (bool) whether or not the table exists - """ - if table_name: - return self.context.tableExists(schema_and_or_table_name, table_name) - else: - return self.context.tableExists(schema_and_or_table_name)
- -
[docs] def dropTable(self, schema_and_or_table_name, table_name=None): - """ - Drop a specified table. - - :Example: - .. code-block:: python - - splice.dropTable('schemaName.tableName') \n - # or\n - splice.dropTable('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: None - """ - if table_name: - return self.context.dropTable(schema_and_or_table_name, table_name) - else: - return self.context.dropTable(schema_and_or_table_name)
- -
[docs] def df(self, sql): - """ - Return a Spark Dataframe from the results of a Splice Machine SQL Query - - :Example: - .. code-block:: python - - df = splice.df('SELECT * FROM MYSCHEMA.TABLE1 WHERE COL2 > 3') - - :param sql: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: (Dataframe) A Spark DataFrame containing the results - """ - return DataFrame(self.context.df(sql), self.spark_sql_context)
- -
[docs] def insert(self, dataframe, schema_table_name, to_upper=False): - """ - Insert a dataframe into a table (schema.table). - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the DF - :param to_upper: (bool) If the dataframe columns should be converted to uppercase before table creation - If False, the table will be created with lower case columns. [Default False] - :return: None - """ - if to_upper: - dataframe = self.toUpper(dataframe) - return self.context.insert(dataframe._jdf, schema_table_name)
- -
[docs] def insertWithStatus(self, dataframe, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert a dataframe into a table (schema.table) while tracking and limiting records that fail to insert. - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written - to the status directory. - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.insert(dataframe._jdf, schema_table_name, statusDirectory, badRecordsAllowed)
- -
[docs] def insertRdd(self, rdd, schema, schema_table_name): - """ - Insert an rdd into a table (schema.table) - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the RDD - :return: None - """ - return self.insert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def insertRddWithStatus(self, rdd, schema, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert an rdd into a table (schema.table) while tracking and limiting records that fail to insert. \ - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be \ - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written \ - to the status directory. - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - return self.insertWithStatus( - self.createDataFrame(rdd, schema), - schema_table_name, - statusDirectory, - badRecordsAllowed - )
- -
[docs] def upsert(self, dataframe, schema_table_name): - """ - Upsert the data from a dataframe into a table (schema.table). - - :param dataframe: (Dataframe) The dataframe you would like to upsert - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.upsert(dataframe._jdf, schema_table_name)
- -
[docs] def upsertWithRdd(self, rdd, schema, schema_table_name): - """ - Upsert the data from an RDD into a table (schema.table). - - :param rdd: (RDD) The RDD you would like to upsert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - return self.upsert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def delete(self, dataframe, schema_table_name): - """ - Delete records in a dataframe based on joining by primary keys from the data frame. - Be careful with column naming and case sensitivity. - - :param dataframe: (Dataframe) The dataframe you would like to delete - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.context.delete(dataframe._jdf, schema_table_name)
- -
[docs] def deleteWithRdd(self, rdd, schema, schema_table_name): - """ - Delete records using an rdd based on joining by primary keys from the rdd. - Be careful with column naming and case sensitivity. - - :param rdd: (RDD) The RDD containing the primary keys you would like to delete from the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.delete( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def update(self, dataframe, schema_table_name): - """ - Update data from a dataframe for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param dataframe: (Dataframe) The dataframe you would like to update - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.update(dataframe._jdf, schema_table_name)
- -
[docs] def updateWithRdd(self, rdd, schema, schema_table_name): - """ - Update data from an rdd for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param rdd: (RDD) The RDD you would like to use for updating the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.update( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def getSchema(self, schema_table_name): - """ - Return the schema via JDBC. - - :param schema_table_name: (str) Table name - :return: (StructType) PySpark StructType representation of the table - """ - return _parse_datatype_json_string(self.context.getSchema(schema_table_name).json())
- -
[docs] def execute(self, query_string): - ''' - execute a query over JDBC - - :Example: - .. code-block:: python - - splice.execute('DELETE FROM TABLE1 WHERE col2 > 3') - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: None - ''' - return self.context.execute(query_string)
- -
[docs] def executeUpdate(self, query_string): - ''' - execute a dml query:(update,delete,drop,etc) - - :Example: - .. code-block:: python - - splice.executeUpdate('DROP TABLE table1') - - :param query_string: (string) SQL Query (eg. DROP TABLE table1) - :return: None - ''' - return self.context.executeUpdate(query_string)
- -
[docs] def internalDf(self, query_string): - ''' - SQL to Dataframe translation (Lazy). Runs the query inside Splice Machine and sends the results to the Spark Adapter app - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: (DataFrame) pyspark dataframe contains the result of query_string - ''' - return DataFrame(self.context.internalDf(query_string), self.spark_sql_context)
- -
[docs] def rdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - - :param schema_table_name: (string) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.df('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def internalRdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - Runs the projection inside Splice Machine and sends the results to the Spark Adapter app as an rdd - - :param schema_table_name: (str) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.internalDf('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def truncateTable(self, schema_table_name): - """ - Truncate a table - - :param schema_table_name: (str) the full table name in the format "schema.table_name" which will be truncated - :return: None - """ - return self.context.truncateTable(schema_table_name)
- -
[docs] def analyzeSchema(self, schema_name): - """ - Analyze the schema - - :param schema_name: (str) schema name which stats info will be collected - :return: None - """ - return self.context.analyzeSchema(schema_name)
- -
[docs] def analyzeTable(self, schema_table_name, estimateStatistics=False, samplePercent=10.0): - """ - Collect stats info on a table - - :param schema_table_name: full table name in the format of 'schema.table' - :param estimateStatistics: will use estimate statistics if True - :param samplePercent: the percentage or rows to be sampled. - :return: None - """ - return self.context.analyzeTable(schema_table_name, estimateStatistics, float(samplePercent))
- -
[docs] def export(self, - dataframe, - location, - compression=False, - replicationCount=1, - fileEncoding=None, - fieldSeparator=None, - quoteCharacter=None): - """ - Export a dataFrame in CSV - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param replicationCount: (int) Replication used for HDFS write - :param fileEncoding: (str) fileEncoding or None, defaults to UTF-8 - :param fieldSeparator: (str) fieldSeparator or None, defaults to ',' - :param quoteCharacter: (str) quoteCharacter or None, defaults to '"' - :return: None - """ - return self.context.export(dataframe._jdf, location, compression, replicationCount, - fileEncoding, fieldSeparator, quoteCharacter)
- -
[docs] def exportBinary(self, dataframe, location, compression, e_format='parquet'): - """ - Export a dataFrame in binary format - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param e_format: (str) Binary format to be used, currently only 'parquet' is supported. [Default 'parquet'] - :return: None - """ - return self.context.exportBinary(dataframe._jdf, location, compression, e_format)
- -
[docs] def bulkImportHFile(self, dataframe, schema_table_name, options): - """ - Bulk Import HFile from a dataframe into a schema.table - - :param dataframe: (DataFrame) - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: None - """ - optionsMap = self.jvm.java.util.HashMap() - for k, v in options.items(): - optionsMap.put(k, v) - return self.context.bulkImportHFile(dataframe._jdf, schema_table_name, optionsMap)
- -
[docs] def bulkImportHFileWithRdd(self, rdd, schema, schema_table_name, options): - """ - Bulk Import HFile from an rdd into a schema.table - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: None - """ - return self.bulkImportHFile( - self.createDataFrame(rdd, schema), - schema_table_name, - options - )
- -
[docs] def splitAndInsert(self, dataframe, schema_table_name, sample_fraction): - """ - Sample the dataframe, split the table, and insert a dataFrame into a schema.table. - This corresponds to an insert into from select statement - - :param dataframe: (DataFrame) Input data - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param sample_fraction: (float) A value between 0 and 1 that specifies the percentage of data in the dataFrame \ - that should be sampled to determine the splits. \ - For example, specify 0.005 if you want 0.5% of the data sampled. - :return: None - """ - return self.context.splitAndInsert(dataframe._jdf, schema_table_name, float(sample_fraction))
- -
[docs] def createDataFrame(self, rdd, schema): - """ - Creates a dataframe from a given rdd and schema. - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :return: (DataFrame) The Spark DataFrame - """ - return self.spark_session.createDataFrame(rdd, schema)
- - def _generateDBSchema(self, dataframe, types={}): - """ - Generate the schema for create table - """ - # convert keys and values to uppercase in the types dictionary - types = dict((key.upper(), val) for key, val in types.items()) - db_schema = [] - # convert dataframe to have all uppercase column names - dataframe = self.toUpper(dataframe) - # i contains the name and pyspark datatype of the column - for i in dataframe.schema: - if i.name.upper() in types: - print('Column {} is of type {}'.format( - i.name.upper(), i.dataType)) - dt = types[i.name.upper()] - else: - dt = CONVERSIONS[str(i.dataType)] - db_schema.append((i.name.upper(), dt)) - - return db_schema - - def _getCreateTableSchema(self, schema_table_name, new_schema=False): - """ - Parse schema for new table; if it is needed, create it - """ - # try to get schema and table, else set schema to splice - if '.' in schema_table_name: - schema, table = schema_table_name.upper().split('.') - else: - schema = self.getConnection().getCurrentSchemaName() - table = schema_table_name.upper() - # check for new schema - if new_schema: - print('Creating schema {}'.format(schema)) - self.execute('CREATE SCHEMA {}'.format(schema)) - - return schema, table - - def _dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drop table if it exists - """ - if self.tableExists(schema_and_or_table_name=schema_table_name, table_name=table_name): - print('Table exists. Dropping table') - self.dropTable(schema_and_or_table_name=schema_table_name, table_name=table_name) - -
[docs] def dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drops a table if exists - - :Example: - .. code-block:: python - - splice.dropTableIfExists('schemaName.tableName') \n - # or\n - splice.dropTableIfExists('schemaName', 'tableName') - - :param schema_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_table_name contains only the schema name - :return: None - """ - self._dropTableIfExists(schema_table_name, table_name)
- - def _jstructtype(self, schema): - """ - Convert python StructType to java StructType - - :param schema: PySpark StructType - :return: Java Spark StructType - """ - return self.spark_session._jsparkSession.parseDataType(schema.json()) - -
[docs] def createTable(self, dataframe, schema_table_name, primary_keys=None, create_table_options=None, to_upper=False, drop_table=False): - """ - Creates a schema.table (schema_table_name) from a dataframe - - :param dataframe: The Spark DataFrame to base the table off - :param schema_table_name: str The schema.table to create - :param primary_keys: List[str] the primary keys. Default None - :param create_table_options: str The additional table-level SQL options default None - :param to_upper: bool If the dataframe columns should be converted to uppercase before table creation. \ - If False, the table will be created with lower case columns. Default False - :param drop_table: bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception - :return: None - - """ - if drop_table: - self._dropTableIfExists(schema_table_name) - if to_upper: - dataframe = self.toUpper(dataframe) - primary_keys = primary_keys if primary_keys else [] - self.createTableWithSchema(schema_table_name, dataframe.schema, - keys=primary_keys, create_table_options=create_table_options)
- -
[docs] def createTableWithSchema(self, schema_table_name, schema, keys=None, create_table_options=None): - """ - Creates a schema.table from a schema - - :param schema_table_name: str The schema.table to create - :param schema: (StructType) The schema that describes the columns of the table - :param keys: (List[str]) The primary keys. Default None - :param create_table_options: (str) The additional table-level SQL options. Default None - :return: None - """ - if keys: - keys_seq = self.jvm.PythonUtils.toSeq(keys) - else: - keys_seq = self.jvm.PythonUtils.toSeq([]) - self.context.createTable( - schema_table_name, - self._jstructtype(schema), - keys_seq, - create_table_options - )
- - -
[docs]class ExtPySpliceContext(PySpliceContext): - """ - This class implements a SplicemachineContext object from com.splicemachine.spark2 for use outside of the K8s Cloud Service - """ - _spliceSparkPackagesName = "com.splicemachine.spark2.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark2.splicemachine.SplicemachineContext( - self.jdbcurl, self.kafkaServers, self.kafkaPollTimeout) - - def __init__(self, sparkSession, JDBC_URL=None, kafkaServers='localhost:9092', kafkaPollTimeout=20000, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - :param kafkaServers (string) Comma-separated list of Kafka broker addresses in the form host:port - :param kafkaPollTimeout (int) Number of milliseconds to wait when polling Kafka - """ - self.kafkaServers = kafkaServers - self.kafkaPollTimeout = kafkaPollTimeout - super().__init__(sparkSession, JDBC_URL, _unit_testing)
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_modules/splicemachine/stats.xhtml b/docs/_build/epub/_modules/splicemachine/stats.xhtml deleted file mode 100644 index 1687f552..00000000 --- a/docs/_build/epub/_modules/splicemachine/stats.xhtml +++ /dev/null @@ -1,1309 +0,0 @@ - - - - - - - splicemachine.stats - - - - -
-
-
- -

Source code for splicemachine.stats

-import warnings
-from multiprocessing.pool import ThreadPool
-import random
-from collections import defaultdict, OrderedDict
-
-import numpy as np
-import pandas as pd
-import scipy.stats as st
-import graphviz
-from numpy.linalg import eigh
-from tqdm import tqdm
-from IPython.display import HTML
-import pyspark_dist_explore as dist_explore
-from pyspark.sql import functions as F, Row
-from pyspark.sql.types import DoubleType, ArrayType, IntegerType, StringType
-from pyspark.ml.param.shared import HasInputCol, HasOutputCol, Param
-from pyspark.ml import Pipeline, Transformer
-from pyspark.ml.classification import LogisticRegressionModel
-from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable
-from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler, StandardScaler, Bucketizer, PCA
-from pyspark.ml.evaluation import RegressionEvaluator, MulticlassClassificationEvaluator, BinaryClassificationEvaluator
-from pyspark import keyword_only
-from pyspark.ml.tuning import CrossValidator, ParamGridBuilder, CrossValidatorModel
-
-
-
[docs]def get_confusion_matrix(spark, TP, TN, FP, FN): - """ - function that shows you a device called a confusion matrix... will be helpful when evaluating. - It allows you to see how well your model performs - :param TP: True Positives - :param TN: True Negatives - :param FP: False Positives - :param FN: False Negatives - """ - - row = Row('', 'True', 'False') - confusion_matrix = spark._wrapped.createDataFrame([row('True', TP, FN), - row('False', FP, TN)]) - return confusion_matrix
- - -
[docs]class SpliceBaseEvaluator(object): - """ - Base ModelEvaluator - """ - - def __init__(self, spark, evaluator, supported_metrics, predictionCol="prediction", - labelCol="label"): - """ - Constructor for SpliceBaseEvaluator - :param spark: spark from zeppelin - :param evaluator: evaluator class from spark - :param supported_metrics: supported metrics list - :param predictionCol: prediction column - :param labelCol: label column - """ - self.spark = spark - self.ev = evaluator - self.prediction_col = predictionCol - self.label = labelCol - self.supported_metrics = supported_metrics - self.avgs = defaultdict(list) - -
[docs] def input(self, predictions_dataframe): - """ - Input a dataframe - :param ev: evaluator class - :param predictions_dataframe: input df - :return: none - """ - for metric in self.supported_metrics: - evaluator = self.ev( - labelCol=self.label, predictionCol=self.prediction_col, metricName=metric) - self.avgs[metric].append(evaluator.evaluate(predictions_dataframe)) - print("Current {metric}: {metric_val}".format(metric=metric, - metric_val=self.avgs - [metric][-1]))
- -
[docs] def get_results(self, as_dict=False): - """ - Get Results - :param dict: whether to get results in a dict or not - :return: dictionary - """ - computed_avgs = {} - for key in self.avgs: - computed_avgs[key] = np.mean(self.avgs[key]) - - if as_dict: - return computed_avgs - - metrics_row = Row(*self.supported_metrics) - computed_row = metrics_row(*[float(computed_avgs[i]) for i in self.supported_metrics]) - return self.spark._wrapped.createDataFrame([computed_row])
- - -
[docs]class SpliceBinaryClassificationEvaluator(SpliceBaseEvaluator): - def __init__(self, spark, predictionCol="prediction", labelCol="label", confusion_matrix=True): - self.avg_tp = [] - self.avg_tn = [] - self.avg_fn = [] - self.avg_fp = [] - self.confusion_matrix = confusion_matrix - - supported = ["areaUnderROC", "areaUnderPR", 'TPR', 'SPC', 'PPV', 'NPV', 'FPR', 'FDR', 'FNR', 'ACC', 'F1', 'MCC'] - SpliceBaseEvaluator.__init__(self, spark, BinaryClassificationEvaluator, supported, predictionCol=predictionCol, - labelCol=labelCol) - -
[docs] def input(self, predictions_dataframe): - """ - Evaluate actual vs Predicted in a dataframe - :param predictions_dataframe: the dataframe containing the label and the predicition - """ - for metric in self.supported_metrics: - if metric in ['areaUnderROC', 'areaUnderPR']: - evaluator = self.ev(labelCol=self.label, rawPredictionCol=self.prediction_col, metricName=metric) - - self.avgs[metric].append(evaluator.evaluate(predictions_dataframe)) - print("Current {metric}: {metric_val}".format(metric=metric, - metric_val=self.avgs - [metric][-1])) - - pred_v_lab = predictions_dataframe.select(self.label, - self.prediction_col) # Select the actual and the predicted labels - - # Add confusion stats - self.avg_tp.append(pred_v_lab[(pred_v_lab[self.label] == 1) - & (pred_v_lab[self.prediction_col] == 1)].count()) - self.avg_tn.append(pred_v_lab[(pred_v_lab[self.label] == 0) - & (pred_v_lab[self.prediction_col] == 0)].count()) - self.avg_fp.append(pred_v_lab[(pred_v_lab[self.label] == 1) - & (pred_v_lab[self.prediction_col] == 0)].count()) - self.avg_fn.append(pred_v_lab[(pred_v_lab[self.label] == 0) - & (pred_v_lab[self.prediction_col] == 1)].count()) - - TP = np.mean(self.avg_tp) - TN = np.mean(self.avg_tn) - FP = np.mean(self.avg_fp) - FN = np.mean(self.avg_fn) - - self.avgs['TPR'].append(float(TP) / (TP + FN)) - self.avgs['SPC'].append(float(TP) / (TP + FN)) - self.avgs['TNR'].append(float(TN) / (TN + FP)) - self.avgs['PPV'].append(float(TP) / (TP + FP)) - self.avgs['NPV'].append(float(TN) / (TN + FN)) - self.avgs['FNR'].append(float(FN) / (FN + TP)) - self.avgs['FPR'].append(float(FP) / (FP + TN)) - self.avgs['FDR'].append(float(FP) / (FP + TP)) - self.avgs['FOR'].append(float(FN) / (FN + TN)) - self.avgs['ACC'].append(float(TP + TN) / (TP + FN + FP + TN)) - self.avgs['F1'].append(float(2 * TP) / (2 * TP + FP + FN)) - self.avgs['MCC'].append(float(TP * TN - FP * FN) / np.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))) - - if self.confusion_matrix: - get_confusion_matrix( - self.spark, - float(TP), - float(TN), - float(FP), - float(FN) - ).show()
- -
[docs] def plotROC(self, fittedEstimator, ax): - """ - Plots the receiver operating characteristic curve for the trained classifier - :param fittedEstimator: fitted logistic regression model - :param ax: matplotlib axis object - :return: axis with ROC plot - """ - if fittedEstimator.__class__ == LogisticRegressionModel: - trainingSummary = fittedEstimator.summary - roc = trainingSummary.roc.toPandas() - ax.plot(roc['FPR'], roc['TPR'], label='Training set areaUnderROC: \n' + str(trainingSummary.areaUnderROC)) - ax.set_xlabel('False Positive Rate') - ax.set_ylabel('True Positive Rate') - ax.set_title('ROC Curve') - ax.legend() - return ax - else: - raise NotImplementedError("Only supported for Logistic Regression Models")
- - -
[docs]class SpliceRegressionEvaluator(SpliceBaseEvaluator): - """ - Splice Regression Evaluator - """ - - def __init__(self, spark, predictionCol="prediction", labelCol="label"): - supported = ['rmse', 'mse', 'r2', 'mae'] - SpliceBaseEvaluator.__init__(self, spark, RegressionEvaluator, supported, predictionCol=predictionCol, - labelCol=labelCol)
- - -
[docs]class SpliceMultiClassificationEvaluator(SpliceBaseEvaluator): - def __init__(self, spark, predictionCol="prediction", labelCol="label"): - supported = ["f1", "weightedPrecision", "weightedRecall", "accuracy"] - SpliceBaseEvaluator.__init__(self, spark, MulticlassClassificationEvaluator, supported, - predictionCol=predictionCol, labelCol=labelCol)
- - -
[docs]class DecisionTreeVisualizer(object): - """ - Visualize a decision tree, either in code like format, or graphviz - """ - -
[docs] @staticmethod - def feature_importance(spark, model, dataset, featuresCol="features"): - """ - Return a dataframe containing the relative importance of each feature - :param model: - :param dataframe: - :param featureCol: - :return: dataframe containing importance - """ - import pandas as pd - featureImp = model.featureImportances - list_extract = [] - for i in dataset.schema[featuresCol].metadata["ml_attr"]["attrs"]: - list_extract = list_extract + dataset.schema[featuresCol].metadata["ml_attr"]["attrs"][ - i] - varlist = pd.DataFrame(list_extract) - varlist['score'] = varlist['idx'].apply(lambda x: featureImp[x]) - return spark._wrapped.createDataFrame((varlist.sort_values('score', ascending=False)))
- -
[docs] @staticmethod - def visualize( - model, - feature_column_names, - label_names, - size=None, - horizontal=False, - tree_name='tree', - visual=False, - ): - """ - Visualize a decision tree, either in a code like format, or graphviz - :param model: the fitted decision tree classifier - :param feature_column_names: (List[str]) column names for features - You can access these feature names by using your VectorAssembler (in PySpark) and calling it's .getInputCols() function - :param label_names: (List[str]) labels vector (below avg, above avg) - :param size: tuple(int,int) The size of the graph. If unspecified, graphviz will automatically assign a size - :param horizontal: (Bool) if the tree should be rendered horizontally - :param tree_name: the name you would like to call the tree - :param visual: bool, true if you want a graphviz pdf containing your file - :return dot: The graphvis object - """ - - tree_to_json = DecisionTreeVisualizer.replacer(model.toDebugString, - ['feature ' + str(i) for i in - range(len(feature_column_names) - 1, -1, -1)], - reversed(feature_column_names)) - - tree_to_json = DecisionTreeVisualizer.replacer(tree_to_json, - [f'Predict: {str(i)}.0' for i in - range(len(label_names) - 1, -1, -1)], - reversed(label_names)) - if not visual: - return tree_to_json - - dot = graphviz.Digraph(comment='Decision Tree') - if size: - dot.attr(size=size) - if horizontal: - dot.attr(rankdir="LR") - dot.node_attr.update(color='lightblue2', style='filled') - json_d = DecisionTreeVisualizer.tree_json(tree_to_json) - - DecisionTreeVisualizer.add_node(dot, '', '', json_d, - realroot=True) - dot.render(tree_name) - print(f'Generated pdf file of tree. You can view it in your Jupyter directory under {dot.filepath}.pdf\n') - dot.view() - return (dot)
- -
[docs] @staticmethod - def replacer(string, bad, good): - """ - Replace every string in "bad" with the corresponding string in "good" - :param string: string to replace in - :param bad: array of strings to replace - :param good: array of strings to replace with - :return: - """ - - for (b, g) in zip(bad, good): - string = string.replace(b, g) - return string
- -
[docs] @staticmethod - def add_node( - dot, - parent, - node_hash, - root, - realroot=False, - ): - """ - Traverse through the .debugString json and generate a graphviz tree - :param dot: dot file object - :param parent: not used currently - :param node_hash: unique node id - :param root: the root of tree - :param realroot: whether or not it is the real root, or a recursive root - :return: - """ - - node_id = str(hash(root['name'])) + str(random.randint(0, 100)) - if root: - dot.node(node_id, root['name']) - if not realroot: - dot.edge(node_hash, node_id) - if root.get('children'): - if not root['children'][0].get('children'): - DecisionTreeVisualizer.add_node(dot, root['name'], - node_id, root['children'][0]) - else: - DecisionTreeVisualizer.add_node(dot, root['name'], - node_id, root['children'][0]) - DecisionTreeVisualizer.add_node(dot, root['name'], - node_id, root['children'][1])
- -
[docs] @staticmethod - def parse(lines): - """ - Lines in debug string - :param lines: - :return: block json - """ - - block = [] - while lines: - - if lines[0].startswith('If'): - bl = ' '.join(lines.pop(0).split()[1:]).replace('(', '' - ).replace(')', '') - block.append({'name': bl, - 'children': DecisionTreeVisualizer.parse(lines)}) - - if lines[0].startswith('Else'): - be = ' '.join(lines.pop(0).split()[1:]).replace('(' - , '').replace(')', '') - block.append({'name': be, - 'children': DecisionTreeVisualizer.parse(lines)}) - elif not lines[0].startswith(('If', 'Else')): - block2 = lines.pop(0) - block.append({'name': block2}) - else: - break - return block
- -
[docs] @staticmethod - def tree_json(tree): - """ - Generate a JSON representation of a decision tree - :param tree: tree debug string - :return: json - """ - - data = [] - for line in tree.splitlines(): - if line.strip(): - line = line.strip() - data.append(line) - else: - break - if not line: - break - res = [{'name': 'Root', - 'children': DecisionTreeVisualizer.parse(data[1:])}] - return res[0]
- - -
[docs]def inspectTable(spliceMLCtx, sql, topN=5): - """Inspect the values of the columns of the table (dataframe) returned from the sql query - :param spliceMLCtx: SpliceMLContext - :param sql: sql string to execute - :param topN: the number of most frequent elements of a column to return, defaults to 5 - """ - df = spliceMLCtx.df(sql) - df = df.repartition(50) - - for _col, _type in df.dtypes: - print("------Inspecting column {} -------- ".format(_col)) - - val_counts = df.groupby(_col).count() - val_counts.show() - val_counts.orderBy(F.desc('count')).limit(topN).show() - - if _type == 'double' or _type == 'int': - df.select(_col).describe().show()
- - - -# Custom Transformers -
[docs]class Rounder(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """Transformer to round predictions for ordinal regression - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - :param Transformer: Inherited Class - :param HasInputCol: Inherited Class - :param HasOutputCol: Inherited Class - :return: Transformed Dataframe with rounded predictionCol - Example: - -------- - >>> from pyspark.sql.session import SparkSession - >>> from splicemachine.stats.stats import Rounder - >>> spark = SparkSession.builder.getOrCreate() - >>> dataset = spark.createDataFrame( - ... [(0.2, 0.0), - ... (1.2, 1.0), - ... (1.6, 2.0), - ... (1.1, 0.0), - ... (3.1, 0.0)], - ... ["prediction", "label"]) - >>> dataset.show() - +----------+-----+ - |prediction|label| - +----------+-----+ - | 0.2| 0.0| - | 1.2| 1.0| - | 1.6| 2.0| - | 1.1| 0.0| - | 3.1| 0.0| - +----------+-----+ - >>> rounder = Rounder(predictionCol = "prediction", labelCol = "label", clipPreds = True) - >>> rounder.transform(dataset).show() - +----------+-----+ - |prediction|label| - +----------+-----+ - | 0.0| 0.0| - | 1.0| 1.0| - | 2.0| 2.0| - | 1.0| 0.0| - | 2.0| 0.0| - +----------+-----+ - >>> rounderNoClip = Rounder(predictionCol = "prediction", labelCol = "label", clipPreds = False) - >>> rounderNoClip.transform(dataset).show() - +----------+-----+ - |prediction|label| - +----------+-----+ - | 0.0| 0.0| - | 1.0| 1.0| - | 2.0| 2.0| - | 1.0| 0.0| - | 3.0| 0.0| - +----------+-----+ - """ - - @keyword_only - def __init__(self, predictionCol="prediction", labelCol="label", clipPreds=True, maxLabel=None, minLabel=None): - """initialize self - :param predictionCol: column containing predictions, defaults to "prediction" - :param labelCol: column containing labels, defaults to "label" - :param clipPreds: clip all predictions above a specified maximum value - :param maxLabel: optional: the maximum value for the prediction column, otherwise uses the maximum of the labelCol, defaults to None - :param minLabel: optional: the minimum value for the prediction column, otherwise uses the maximum of the labelCol, defaults to None - """ - """initialize self - :param predictionCol: column containing predictions, defaults to "prediction" - :param labelCol: column containing labels, defaults to "label" - """ - super(Rounder, self).__init__() - self.labelCol = labelCol - self.predictionCol = predictionCol - self.clipPreds = clipPreds - self.maxLabel = maxLabel - self.minLabel = minLabel - -
[docs] @keyword_only - def setParams(self, predictionCol="prediction", labelCol="label"): - kwargs = self._input_kwargs - return self._set(**kwargs)
- -
[docs] def _transform(self, dataset): - """ - Rounds the predictions to the nearest integer value, and also clips them at the max/min value observed in label - :param dataset: dataframe with predictions to be rounded - :return: DataFrame with rounded predictions - """ - labelCol = self.labelCol - predictionCol = self.predictionCol - - if self.clipPreds: - max_label = self.maxLabel if self.maxLabel else dataset.agg({labelCol: 'max'}).collect()[0][0] - min_label = self.minLabel if self.minLabel else dataset.agg({labelCol: 'min'}).collect()[0][0] - clip = F.udf(lambda x: float(max_label) if x > max_label else (float(min_label) if x < min_label else x), - DoubleType()) - - dataset = dataset.withColumn(predictionCol, F.round(clip(F.col(predictionCol)))) - else: - dataset = dataset.withColumn(predictionCol, F.round(F.col(predictionCol))) - - return dataset
- - -
[docs]class OneHotDummies(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """ - Transformer to generate dummy columns for categorical variables as a part of a preprocessing pipeline - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - :param Transformer: Inherited Classes - :param HasInputCol: Inherited Classes - :param HasOutputCol: Inherited Classes - :return: pyspark DataFrame - """ - - @keyword_only - def __init__(self, inputCol=None, outputCol=None): - """ - Assigns variables to parameters passed - :param inputCol: Sparse vector returned by OneHotEncoders, defaults to None - :param outputCol: string base to append to output columns names, defaults to None - """ - super(OneHotDummies, self).__init__() - # kwargs = self._input_kwargs - # self.setParams(**kwargs) - self.inputCol = inputCol - self.outputCol = outputCol - self.outcols = [] - -
[docs] @keyword_only - def setParams(self, inputCol=None, outputCol=None): - kwargs = self._input_kwargs - return self._set(**kwargs)
- -
[docs] def _transform(self, dataset): - - """iterates through the number of categorical values of a categorical variable and adds dummy columns for each of those categories - For a string categorical column, include this transformer in the following workflow: StringIndexer -> OneHotEncoder -> OneHotDummies -> PCA/ Learning Algorithm - :param dataset: PySpark DataFrame where inputCol is the column returned by by OneHotEncoders - :return: original DataFrame with M additional columns where M = # of categories for this variable - """ - out_col_suffix = self.outputCol # this is what I want to append to the column name - col_name = self.inputCol - - out_col_base = col_name + out_col_suffix # this is the base for the n outputted columns - - # helper functions - get_num_categories = F.udf(lambda x: int(x.size), IntegerType()) - get_active_index = F.udf(lambda x: int(x.indices[0]), IntegerType()) - check_active_index = F.udf(lambda active, i: int(active == i), IntegerType()) - - num_categories = dataset.select( - get_num_categories(col_name).alias('num_categories')).distinct() # this returns a dataframe - if num_categories.count() == 1: # making sure all the sparse vectors have the same number of categories - num_categories_int = num_categories.collect()[0]['num_categories'] # now this is an int - - dataset = dataset.withColumn('active_index', get_active_index(col_name)) - column_names = [] - for i in range(num_categories_int): # Now I'm going to make a column for each category - column_name = out_col_base + '_' + str(i) - dataset = dataset.withColumn(column_name, check_active_index(F.col('active_index'), F.lit(i))) - column_names.append(column_name) - - dataset = dataset.drop('active_index') - self.outcols = column_names - return dataset
- -
[docs] def getOutCols(self): - return self.outcols
- - -
[docs]class IndReconstructer(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """Transformer to reconstruct String Index from OneHotDummy Columns. This can be used as a part of a Pipeline Ojbect - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - :param Transformer: Inherited Class - :param HasInputCol: Inherited Class - :param HasOutputCol: Inherited Class - :return: Transformed PySpark Dataframe With Original String Indexed Variables - """ - - @keyword_only - def __init__(self, inputCol=None, outputCol=None): - super(IndReconstructer, self).__init__() - # kwargs = self._input_kwargs - # self.setParams(**kwargs) - self.inputCol = inputCol - self.outputCol = outputCol - -
[docs] @keyword_only - def setParams(self, inputCol=None, outputCol=None): - kwargs = self._input_kwargs - return self._set(**kwargs)
- -
[docs] def _transform(self, dataset): - """ - iterates through the oneHotDummy columns for a categorical variable and returns the index of the column that is closest to one. This corresponds to the stringIndexed value of this feature for this row. - :param dataset: dataset with OneHotDummy columns - :return: DataFrame with column corresponding to a categorical indexed column - """ - inColBase = self.inputCol - outCol = self.outputCol - - closestToOne = F.udf(lambda x: abs(x - 1), DoubleType()) - dummies = dataset.select(*[closestToOne(i).alias(i) if inColBase in i else i for i in dataset.columns if - inColBase in i or i == 'SUBJECT']) - dummies = dummies.withColumn('least_val', - F.lit(F.least(*[F.col(i) for i in dataset.columns if inColBase in i]))) - - dummies = dummies.select( - *[(F.col(i) == F.col('least_val')).alias(i + 'isind') if inColBase in i else i for i in dataset.columns if - inColBase in i or i == 'SUBJECT']) - getActive = F.udf(lambda row: [idx for idx, val in enumerate(row) if val][0], IntegerType()) - dummies = dummies.withColumn(outCol, getActive( - F.struct(*[F.col(x) for x in dummies.columns if x != 'SUBJECT']).alias('struct'))) - dataset = dataset.join(dummies.select(['SUBJECT', outCol]), 'SUBJECT') - - return dataset
- - -
[docs]class OverSampler(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """Transformer to oversample datapoints with minority labels - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - :param Transformer: Inherited Class - :param HasInputCol: Inherited Class - :param HasOutputCol: Inherited Class - :return: PySpark Dataframe with labels in approximately equal ratios - Example: - ------- - >>> from pyspark.sql import functions as F - >>> from pyspark.sql.session import SparkSession - >>> from pyspark.stats.linalg import Vectors - >>> from splicemachine.stats.stats import OverSampler - >>> spark = SparkSession.builder.getOrCreate() - >>> df = spark.createDataFrame( - ... [(Vectors.dense([0.0]), 0.0), - ... (Vectors.dense([0.5]), 0.0), - ... (Vectors.dense([0.4]), 1.0), - ... (Vectors.dense([0.6]), 1.0), - ... (Vectors.dense([1.0]), 1.0)] * 10, - ... ["features", "Class"]) - >>> df.groupBy(F.col("Class")).count().orderBy("count").show() - +-----+-----+ - |Class|count| - +-----+-----+ - | 0.0| 20| - | 1.0| 30| - +-----+-----+ - >>> oversampler = OverSampler(labelCol = "Class", strategy = "auto") - >>> oversampler.transform(df).groupBy("Class").count().show() - +-----+-----+ - |Class|count| - +-----+-----+ - | 0.0| 29| - | 1.0| 30| - +-----+-----+ - """ - - @keyword_only - def __init__(self, labelCol=None, strategy="auto", randomState=None): - """Initialize self - :param labelCol: Label Column name, defaults to None - :param strategy: defaults to "auto", strategy to resample the dataset: - • Only currently supported for "auto" Corresponds to random samples with repleaement - :param randomState: sets the seed of sample algorithm - """ - super(OverSampler, self).__init__() - self.labelCol = labelCol - self.strategy = strategy - self.withReplacement = True if strategy == "auto" else False - self.randomState = np.random.randn() if not randomState else randomState - -
[docs] @keyword_only - def setParams(self, labelCol=None, strategy="auto"): - kwargs = self._input_kwargs - return self._set(**kwargs)
- -
[docs] def _transform(self, dataset): - """ - Oversamples - :param dataset: dataframe to be oversampled - :return: DataFrame with the resampled data points - """ - if self.strategy == "auto": - - pd_value_counts = dataset.groupBy(F.col(self.labelCol)).count().toPandas() - - label_type = dataset.schema[self.labelCol].dataType.simpleString() - types_dic = {'int': int, "string": str, "double": float} - - maxidx = pd_value_counts['count'].idxmax() - - self.majorityLabel = types_dic[label_type](pd_value_counts[self.labelCol].loc[maxidx]) - majorityData = dataset.filter(F.col(self.labelCol) == self.majorityLabel) - - returnData = None - - if len(pd_value_counts) == 1: - raise ValueError( - f'Error! Number of labels = {len(pd_value_counts)}. Cannot Oversample with this number of classes') - elif len(pd_value_counts) == 2: - minidx = pd_value_counts['count'].idxmin() - minorityLabel = types_dic[label_type](pd_value_counts[self.labelCol].loc[minidx]) - ratio = pd_value_counts['count'].loc[maxidx] / pd_value_counts['count'].loc[minidx] * 1.0 - - returnData = majorityData.union( - dataset.filter(F.col(self.labelCol) == minorityLabel).sample(withReplacement=self.withReplacement, - fraction=ratio, seed=self.randomState)) - - else: - minority_labels = list(pd_value_counts.drop(maxidx)[self.labelCol]) - - ratios = {types_dic[label_type](minority_label): pd_value_counts['count'].loc[maxidx] / float( - pd_value_counts[pd_value_counts[self.labelCol] == minority_label]['count']) for minority_label in - minority_labels} - - for (minorityLabel, ratio) in ratios.items(): - minorityData = dataset.filter(F.col(self.labelCol) == minorityLabel).sample( - withReplacement=self.withReplacement, fraction=ratio, seed=self.randomState) - if not returnData: - returnData = majorityData.union(minorityData) - else: - returnData = returnData.union(minorityData) - - return returnData - else: - raise NotImplementedError("Only auto is currently implemented")
- - -
[docs]class OverSampleCrossValidator(CrossValidator): - """Class to perform Cross Validation model evaluation while over-sampling minority labels. - Example: - ------- - >>> from pyspark.sql.session import SparkSession - >>> from pyspark.stats.classification import LogisticRegression - >>> from pyspark.stats.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator - >>> from pyspark.stats.linalg import Vectors - >>> from splicemachine.stats.stats import OverSampleCrossValidator - >>> spark = SparkSession.builder.getOrCreate() - >>> dataset = spark.createDataFrame( - ... [(Vectors.dense([0.0]), 0.0), - ... (Vectors.dense([0.5]), 0.0), - ... (Vectors.dense([0.4]), 1.0), - ... (Vectors.dense([0.6]), 1.0), - ... (Vectors.dense([1.0]), 1.0)] * 10, - ... ["features", "label"]) - >>> lr = LogisticRegression() - >>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build() - >>> PRevaluator = BinaryClassificationEvaluator(metricName = 'areaUnderPR') - >>> AUCevaluator = BinaryClassificationEvaluator(metricName = 'areaUnderROC') - >>> ACCevaluator = MulticlassClassificationEvaluator(metricName="accuracy") - >>> cv = OverSampleCrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=AUCevaluator, altEvaluators = [PRevaluator, ACCevaluator],parallelism=2,seed = 1234) - >>> cvModel = cv.fit(dataset) - >>> print(cvModel.avgMetrics) - [(0.5, [0.5888888888888888, 0.3888888888888889]), (0.806878306878307, [0.8556863149300125, 0.7055555555555556])] - >>> print(AUCevaluator.evaluate(cvModel.transform(dataset))) - 0.8333333333333333 - """ - - def __init__(self, estimator, estimatorParamMaps, evaluator, numFolds=3, seed=None, parallelism=3, - collectSubModels=False, labelCol='label', altEvaluators=None, overSample=True): - """ Initialize Self - :param estimator: Machine Learning Model, defaults to None - :param estimatorParamMaps: paramMap to search, defaults to None - :param evaluator: primary model evaluation metric, defaults to None - :param numFolds: number of folds to perform, defaults to 3 - :param seed: random state, defaults to None - :param parallelism: number of threads, defaults to 1 - :param collectSubModels: to return submodels, defaults to False - :param labelCol: target variable column label, defaults to 'label' - :param altEvaluators: additional metrics to evaluate, defaults to None - If passed, the metrics of the alternate evaluators are accessed in the CrossValidatorModel.avgMetrics attribute - :param overSample: Boolean: to perform oversampling of minority labels, defaults to True - """ - self.label = labelCol - self.altEvaluators = altEvaluators - self.toOverSample = overSample - super(OverSampleCrossValidator, self).__init__(estimator=estimator, estimatorParamMaps=estimatorParamMaps, - evaluator=evaluator, numFolds=numFolds, seed=seed, - parallelism=parallelism, collectSubModels=collectSubModels) - -
[docs] def getLabel(self): - return self.label
- -
[docs] def getOversample(self): - return self.toOverSample
- -
[docs] def getAltEvaluators(self): - return self.altEvaluators
- -
[docs] def _parallelFitTasks(self, est, train, eva, validation, epm, collectSubModel, altEvaluators): - """ - Creates a list of callables which can be called from different threads to fit and evaluate - an estimator in parallel. Each callable returns an `(index, metric)` pair if altEvaluators, (index, metric, [alt_metrics]). - :param est: Estimator, the estimator to be fit. - :param train: DataFrame, training data set, used for fitting. - :param eva: Evaluator, used to compute `metric` - :param validation: DataFrame, validation data set, used for evaluation. - :param epm: Sequence of ParamMap, params maps to be used during fitting & evaluation. - :param collectSubModel: Whether to collect sub model. - :return: (int, float, subModel), an index into `epm` and the associated metric value. - """ - modelIter = est.fitMultiple(train, epm) - - def singleTask(): - index, model = next(modelIter) - metric = eva.evaluate(model.transform(validation, epm[index])) - altmetrics = None - if altEvaluators: - altmetrics = [altEva.evaluate(model.transform(validation, epm[index])) for altEva in altEvaluators] - return index, metric, altmetrics, model if collectSubModel else None - - return [singleTask] * len(epm)
- -
[docs] def _fit(self, dataset): - """Performs k-fold crossvaldidation on simple oversampled dataset - :param dataset: full dataset - :return: CrossValidatorModel containing the fitted BestModel with the average of the primary and alternate metrics in a list of tuples in the format: [(paramComb1_average_primary_metric, [paramComb1_average_altmetric1,paramComb1_average_altmetric2]), (paramComb2_average_primary_metric, [paramComb2_average_altmetric1,paramComb2_average_altmetric2])] - """ - est = self.getOrDefault(self.estimator) - epm = self.getOrDefault(self.estimatorParamMaps) - numModels = len(epm) - eva = self.getOrDefault(self.evaluator) - nFolds = self.getOrDefault(self.numFolds) - seed = self.getOrDefault(self.seed) - - # Getting Label and altEvaluators - label = self.getLabel() - altEvaluators = self.getAltEvaluators() - altMetrics = [[0.0] * len(altEvaluators)] * numModels if altEvaluators else None - h = 1.0 / nFolds - randCol = self.uid + "_rand" - df = dataset.select("*", F.rand(seed).alias(randCol)) - metrics = [0.0] * numModels - - pool = ThreadPool(processes=min(self.getParallelism(), numModels)) - subModels = None - collectSubModelsParam = self.getCollectSubModels() - if collectSubModelsParam: - subModels = [[None for j in range(numModels)] for i in range(nFolds)] - - for i in range(nFolds): - # Getting the splits such that no data is reused - validateLB = i * h - validateUB = (i + 1) * h - condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB) - validation = df.filter(condition).cache() - train = df.filter(~condition).cache() - - # Oversampling the minority class(s) here - if self.toOverSample: - withReplacement = True - oversampler = OverSampler(labelCol=self.label, strategy="auto") - - # Oversampling - train = oversampler.transform(train) - # Getting the individual tasks so this can be parallelized - tasks = self._parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam, altEvaluators) - # Calling the parallel process - for j, metric, fold_alt_metrics, subModel in pool.imap_unordered(lambda f: f(), tasks): - metrics[j] += (metric / nFolds) - if fold_alt_metrics: - altMetrics[j] = [altMetrics[j][i] + fold_alt_metrics[i] / nFolds for i in range(len(altEvaluators))] - - if collectSubModelsParam: - subModels[i][j] = subModel - - validation.unpersist() - train.unpersist() - - if eva.isLargerBetter(): - bestIndex = np.argmax(metrics) - else: - bestIndex = np.argmin(metrics) - bestModel = est.fit(dataset, epm[bestIndex]) - metrics = [(metric, altMetrics[idx]) for idx, metric in enumerate(metrics)] - return self._copyValues(CrossValidatorModel(bestModel, metrics, subModels))
- - -## Pipeline Functions -
[docs]def get_string_pipeline(df, cols_to_exclude, steps=['StringIndexer', 'OneHotEncoder', 'OneHotDummies']): - """Generates a list of preprocessing stages - :param df: DataFrame including only the training data - :param cols_to_exclude: Column names we don't want to to include in the preprocessing (i.e. SUBJECT/ target column) - :param stages: preprocessing steps to take - :return: (stages, Numeric_Columns) - stages: list of pipeline stages to be used in preprocessing - Numeric_Columns: list of columns that contain numeric features - """ - - String_Columns = [] - Numeric_Columns = [] - for _col, _type in df.dtypes: # This is a tuple of (<col name>, data type) - if _col in cols_to_exclude: - continue - if _type == 'string': - String_Columns.append(_col) - elif _type == 'double' or _type == 'int' or _type == 'float': - Numeric_Columns.append(_col) - else: - print("Unhandled Data type = {}".format((_col, _type))) - continue - - stages = [] - if 'StringIndexer' in steps: - # String Inexing - str_indexers = [StringIndexer(inputCol=c, outputCol=c + '_ind', handleInvalid='skip') for c in String_Columns] - indexed_string_vars = [c + '_ind' for c in String_Columns] - stages = stages + str_indexers - - if 'OneHotEncoder' in steps: - # One hot encoding - str_hot = [OneHotEncoder(inputCol=c + '_ind', outputCol=c + '_vec', dropLast=False) for c in String_Columns] - encoded_str_vars = [c + '_vec' for c in String_Columns] - stages = stages + str_hot - - if 'OneHotDummies' in steps: - # Converting the sparse vector to dummy columns - str_dumbers = [OneHotDummies(inputCol=c + '_vec', outputCol='_dummy') for c in String_Columns] - str_dumb_cols = [c for dummy in str_dumbers for c in dummy.getOutCols()] - stages = stages + str_dumbers - - if len(stages) == 0: - ERROR = """ - Parameter <steps> must include 'StringIndexer', 'OneHotEncoder', 'OneHotDummies' - """ - print(ERROR) - raise Exception(ERROR) - - return stages, Numeric_Columns
- - -
[docs]def vector_assembler_pipeline(df, columns, doPCA=False, k=10): - """After preprocessing String Columns, this function can be used to assemble a feature vector to be used for learning - creates the following stages: VectorAssembler -> Standard Scalar [{ -> PCA}] - :param df: DataFrame containing preprocessed Columns - :param columns: list of Column names of the preprocessed columns - :param doPCA: Do you want to do PCA as part of the vector assembler? defaults to False - :param k: Number of Principal Components to use, defaults to 10 - :return: List of vector assembling stages - """ - - assembler = VectorAssembler(inputCols=columns, outputCol='featuresVec') - scaler = StandardScaler(inputCol="featuresVec", outputCol="features", withStd=True, - withMean=True) # centering and standardizing the data - - if doPCA: - pca_obj = PCA(k=k, inputCol="features", outputCol="pcaFeatures") - stages = [assembler, scaler, pca_obj] - else: - stages = [assembler, scaler] - return stages
- - -
[docs]def postprocessing_pipeline(df, cols_to_exclude): - """Assemble postprocessing pipeline to reconstruct original categorical indexed values from OneHotDummy Columns - :param df: DataFrame Including the original string Columns - :param cols_to_exclude: list of columns to exclude - :return: (reconstructers, String_Columns) - reconstructers: list of IndReconstructer stages - String_Columns: list of columns that are being reconstructed - """ - String_Columns = [] - Numeric_Columns = [] - for _col, _type in df.dtypes: # This is a tuple of (<col name>, data type) - if _col in cols_to_exclude: - continue - if _type == 'string': - String_Columns.append(_col) - elif _type == 'double' or _type == 'int' or _type == 'float': - Numeric_Columns.append(_col) - else: - print("Unhandled Data type = {}".format((_col, _type))) - continue - - # Extracting the Value of the OneHotEncoded Variable - reconstructors = [IndReconstructer(inputCol=c, outputCol=c + '_activeInd') for c in String_Columns] - return reconstructors, String_Columns
- - -# Distribution fitting Functions -
[docs]def make_pdf(dist, params, size=10000): - """Generate distributions's Probability Distribution Function - :param dist: scipy.stats distribution object: https://docs.scipy.org/doc/scipy/reference/stats.html - :param params: distribution parameters - :param size: how many data points to generate , defaults to 10000 - :return: series of probability density function for this distribution - """ - # Separate parts of parameters - arg = params[:-2] - loc = params[-2] - scale = params[-1] - - # Get sane start and end points of distribution - start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) - end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) - - # Build PDF and turn into pandas Series - x = np.linspace(start, end, size) - y = dist.pdf(x, loc=loc, scale=scale, *arg) - pdf = pd.Series(y, x) - - return pdf
- - -
[docs]def best_fit_distribution(data, col_name, bins, ax): - """Model data by finding best fit distribution to data - :param data: DataFrame with one column containing the feature whose distribution is to be investigated - :param col_name: column name for feature - :param bins: number of bins to use in generating the histogram of this data - :param ax: axis to plot histogram on - :return: (best_distribution.name, best_params, best_sse) - best_distribution.name: string of the best distribution name - best_params: parameters for this distribution - best_sse: sum of squared errors for this distribution against the empirical pdf - """ - # Get histogram of original data - - output = dist_explore.pandas_histogram(data, bins=bins) - output.reset_index(level=0, inplace=True) - output['index'] = output['index'].apply(lambda x: np.mean([float(i.strip()) for i in x.split('-')])) - output[col_name] = output[col_name] / np.sum(output[col_name]) / (output['index'][1] - (output['index'][0])) - - x = output['index'] - y = output[col_name] - # DISTRIBUTIONS = [ - # st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine, - # st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk, - # st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon, - # st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r, - # st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss, - # st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable, - # st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf, - # st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal, - # st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda, - # st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy - # ] - - DISTRIBUTIONS = [ - st.beta, st.expon, - st.halfnorm, - st.norm, - st.lognorm, - st.uniform - ] - - # Best holders - best_distribution = st.norm - best_params = (0.0, 1.0) - best_sse = np.inf - - # Estimate distribution parameters from data - for distribution in tqdm(DISTRIBUTIONS): - - # Try to fit the distribution - try: - # Ignore warnings from data that can't be fit - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') - - # fit dist to data - params = distribution.fit(data.collect()) - - # Separate parts of parameters - arg = params[:-2] - loc = params[-2] - scale = params[-1] - - # Calculate fitted PDF and error with fit in distribution - - pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) - sse = np.sum(np.power(y.values - pdf, 2.0)) - - # if axis pass in add to plot - try: - if ax: - if sse < 0.05: - # Don't want to plot really bad ones - ax = pdf.plot(legend=True, label=distribution.name) - # ax.plot(x,pdf, label = distribution.name) - ax.legend() - except Exception: - pass - - # identify if this distribution is better - if best_sse > sse > 0: - best_distribution = distribution - best_params = params - best_sse = sse - - except Exception: - pass - - return (best_distribution.name, best_params, best_sse)
- - -## PCA Functions - -
[docs]def estimateCovariance(df, features_col='features'): - """Compute the covariance matrix for a given dataframe. - Note: The multi-dimensional covariance array should be calculated using outer products. Don't forget to normalize the data by first subtracting the mean. - :param df: PySpark dataframe - :param features_col: name of the column with the features, defaults to 'features' - :return: np.ndarray: A multi-dimensional array where the number of rows and columns both equal the length of the arrays in the input dataframe. - """ - m = df.select(df[features_col]).rdd.map(lambda x: x[0]).mean() - - dfZeroMean = df.select(df[features_col]).rdd.map(lambda x: x[0]).map(lambda x: x - m) # subtract the mean - - return dfZeroMean.map(lambda x: np.outer(x, x)).sum() / df.count()
- - -
[docs]def pca_with_scores(df, k=10): - """Computes the top `k` principal components, corresponding scores, and all eigenvalues. - Note: - All eigenvalues should be returned in sorted order (largest to smallest). `eigh` returns - each eigenvectors as a column. This function should also return eigenvectors as columns. - :param df: A Spark dataframe with a 'features' column, which (column) consists of DenseVectors. - :param k: The number of principal components to return., defaults to 10 - :return:(eigenvectors, `RDD` of scores, eigenvalues). - Eigenvectors: multi-dimensional array where the number of - rows equals the length of the arrays in the input `RDD` and the number of columns equals`k`. - `RDD` of scores: has the same number of rows as `data` and consists of arrays of length `k`. - Eigenvalues is an array of length d (the number of features). - """ - cov = estimateCovariance(df) - col = cov.shape[1] - eigVals, eigVecs = eigh(cov) - inds = np.argsort(eigVals) - eigVecs = eigVecs.T[inds[-1:-(col + 1):-1]] - components = eigVecs[0:k] - eigVals = eigVals[inds[-1:-(col + 1):-1]] # sort eigenvals - score = df.select(df['features']).rdd.map(lambda x: x[0]).map(lambda x: np.dot(x, components.T)) - # Return the `k` principal components, `k` scores, and all eigenvalues - - return components.T, score, eigVals
- - -
[docs]def varianceExplained(df, k=10): - """returns the proportion of variance explained by `k` principal componenets. Calls the above PCA procedure - :param df: PySpark DataFrame - :param k: number of principal components , defaults to 10 - :return: (proportion, principal_components, scores, eigenvalues) - """ - components, scores, eigenvalues = pca_with_scores(df, k) - return sum(eigenvalues[0:k]) / sum(eigenvalues), components, scores, eigenvalues
- - -# PCA reconstruction Functions - -
[docs]def reconstructPCA(sql, df, pc, mean, std, originalColumns, fits, pcaColumn='pcaFeatures'): - """Reconstruct data from lower dimensional space after performing PCA - :param sql: SQLContext - :param df: PySpark DataFrame: inputted PySpark DataFrame - :param pc: numpy.ndarray: principal components projected onto - :param mean: numpy.ndarray: mean of original columns - :param std: numpy.ndarray: standard deviation of original columns - :param originalColumns: list: original column names - :param fits: fits of features returned from best_fit_distribution - :param pcaColumn: column in df that contains PCA features, defaults to 'pcaFeatures' - :return: dataframe containing reconstructed data - """ - - cols = df.columns - cols.remove(pcaColumn) - - pddf = df.toPandas() - first_series = pddf['pcaFeatures'].apply(lambda x: np.array(x.toArray())).as_matrix().reshape(-1, 1) - first_features = np.apply_along_axis(lambda x: x[0], 1, first_series) - # undo-ing PCA - first_reconstructed = np.dot(first_features, pc) - # undo-ing the scaling - first_reconstructed = np.multiply(first_reconstructed, std) + mean - first_reconstructedDF = pd.DataFrame(first_reconstructed, columns=originalColumns) - for _col in cols: - first_reconstructedDF[_col] = pddf[_col] - - # This is a pyspark Dataframe containing the reconstructed data, including the dummy columns for the string variables-- next step is to reverse the one-hot-encoding for the string columns - first_reconstructed = sql.createDataFrame(first_reconstructedDF) - - cols_to_exclude = ['DATE_OF_STUDY'] - postPipeStages, String_Columns = postprocessing_pipeline(df, cols_to_exclude) - - postPipe = Pipeline(stages=postPipeStages) - out = postPipe.fit(first_reconstructed).transform(first_reconstructed) - for _col in String_Columns: - out = out.join(df.select([_col, _col + '_ind']) \ - .withColumnRenamed(_col + '_ind', _col + '_activeInd'), _col + '_activeInd') \ - .dropDuplicates() - cols_to_drop = [_col for _col in out.columns if any([base in _col for base in String_Columns]) and '_' in _col] - - reconstructedDF = out.drop( - *cols_to_drop) # This is the equivalent as the first translated reconstructed dataframe above - clip = F.udf(lambda x: x if x > 0 else 0.0, DoubleType()) - for _key in fits.keys(): - if fits[_key]['dist'] == 'EMPIRICAL': - reconstructedDF = reconstructedDF.withColumn(_key, F.round(clip(F.col(_key)))) - else: - reconstructedDF = reconstructedDF.withColumn(_key, clip(F.col(_key))) - - return reconstructedDF
- - -
[docs]class MarkovChain(object): - def __init__(self, transition_prob): - """ - Initialize the MarkovChain instance. - Parameters - ---------- - transition_prob: dict - A dict object representing the transition - probabilities in Markov Chain. - Should be of the form: - {'state1': {'state1': 0.1, 'state2': 0.4}, - 'state2': {...}} - """ - self.transition_prob = transition_prob - self.states = list(transition_prob.keys()) # states that have transitions to the next layer - # For states in the form <stateN_M> where N is the visit (layer) and M is the cluster in the N-th Layer - self.max_num_steps = max([int(i.split('state')[1][0]) for i in self.states]) - -
[docs] def get_max_num_steps(self): - return self.max_num_steps
- -
[docs] def next_state(self, current_state): - """Returns the state of the random variable at the next time - instance. - :param current_state: The current state of the system. - :raises: Exception if random choice fails - :return: next state - """ - - try: - - # if not current_state in self.states: - # print('We have reached node {} where we do not know where they go from here... \n try reducing the number of clusters at level {} \n otherwise we might be at the terminating layer'.format(current_state, int(current_state.split('state')[1][0]))) - # raise Exception('Unknown transition') - - next_possible_states = self.transition_prob[current_state].keys() - return np.random.choice( - next_possible_states, - p=[self.transition_prob[current_state][next_state] - for next_state in next_possible_states] - )[:] - except Exception as e: - raise e
- -
[docs] def generate_states(self, current_state, no=10, last=True): - """ - Generates the next states of the system. - Parameters - ---------- - current_state: str - The state of the current random variable. - no: int - The number of future states to generate. - last: bool - Do we want to return just the last value - """ - try: - if no > self.max_num_steps: - print('Number of steps exceeds the max number of possible next steps') - raise Exception('<no> should not exceed {}. The value of <no> was: {}'.format(self.max_num_steps, no)) - - future_states = [] - for i in range(no): - try: - next_state = self.next_state(current_state) - except Exception as e: - raise e - future_states.append(next_state) - current_state = next_state - if last: - return future_states[-1] - else: - return future_states - except Exception as e: - raise e
- -
[docs] def rep_states(self, current_state, no=10, num_reps=10): - """running generate states a bunch of times and returning the final state that happens the most - Arguments: - current_state str -- The state of the current random variable - no int -- number of time steps in the future to run - num_reps int -- number of times to run the simultion forward - Returns: - state -- the most commonly reached state at the end of these runs - """ - if no > self.max_num_steps: - print('Number of steps exceeds the max number of possible next steps') - raise Exception('<no> should not exceed {}. The value of <no> was: {}'.format(self.max_num_steps, no)) - - endstates = [] - for _ in range(num_reps): - endstates.append(self.generate_states(current_state, no=no, last=True)) - return max(set(endstates), key=endstates.count)
-
- -
-
-
-
-
- - \ No newline at end of file diff --git a/docs/_build/epub/_static/basic.css b/docs/_build/epub/_static/basic.css deleted file mode 100644 index 2e3cf323..00000000 --- a/docs/_build/epub/_static/basic.css +++ /dev/null @@ -1,855 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -div.section::after { - display: block; - content: ''; - clear: left; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px; - background-color: #ffe; - width: 40%; - float: right; - clear: right; - overflow-x: auto; -} - -p.sidebar-title { - font-weight: bold; -} - -div.admonition, div.topic, blockquote { - clear: left; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin-top: 10px; - margin-bottom: 10px; - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist { - margin: 1em 0; -} - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -dl.footnote > dt, -dl.citation > dt { - float: left; - margin-right: 0.5em; -} - -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; -} - -dl.footnote > dd:after, -dl.citation > dd:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dt:after { - content: ":"; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > :first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0.5em; - content: ":"; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -pre, div[class|="highlight"] { - clear: both; -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -div[class^="highlight-"] { - margin: 1em 0; -} - -td.linenos pre { - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; -} - -table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; -} - -div.code-block-caption { - margin-top: 1em; - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -table.highlighttable td.linenos, -div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - margin: 1em 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: absolute; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/_build/epub/_static/doctools.js b/docs/_build/epub/_static/doctools.js deleted file mode 100644 index daccd209..00000000 --- a/docs/_build/epub/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keydown(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' - && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/docs/_build/epub/_static/documentation_options.js b/docs/_build/epub/_static/documentation_options.js deleted file mode 100644 index 6fd17081..00000000 --- a/docs/_build/epub/_static/documentation_options.js +++ /dev/null @@ -1,12 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'en', - COLLAPSE_INDEX: false, - BUILDER: 'epub', - FILE_SUFFIX: '.xhtml', - LINK_SUFFIX: '.xhtml', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: false -}; \ No newline at end of file diff --git a/docs/_build/epub/_static/epub.css b/docs/_build/epub/_static/epub.css deleted file mode 100644 index 04fdcbc6..00000000 --- a/docs/_build/epub/_static/epub.css +++ /dev/null @@ -1,715 +0,0 @@ -/* - * epub.css_t - * ~~~~~~~~~~ - * - * Sphinx stylesheet -- epub theme. - * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - - -body{ - writing-mode: horizontal-tb; - line-break: normal; - -epub-writing-mode: horizontal-tb; - -webkit-writing-mode: horizontal-tb; - -epub-line-break: normal; - -webkit-line-break: normal; -} - - -div.clearer { - clear: both; -} - -a:link, a:visited { - color: #3333ff; - text-decoration: underline; -} - -img { - border: 0; - max-width: 100%; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-family: sans-serif; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 230px; - margin-left: -100%; - font-size: 90%; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 100%; -} - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 130%; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -a.headerlink { - visibility: hidden; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px 7px 0 7px; - background-color: #ffe; - width: 40%; - float: right; -} - -p.sidebar-title { - font-weight: bold; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 110%; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - border: 0; - border-collapse: collapse; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -/* -- for html4 -- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -/* -- for html5 -- */ - -/* bold field name, content starts on the same line */ - -dl.field-list > dt, -dl.option-list > dt, -dl.docinfo > dt, -dl.footnote > dt, -dl.citation > dt { - font-weight: bold; - clear: left; - float: left; - margin: 0; - padding: 0; - padding-right: 0.5em; -} - -/* Offset for field content (corresponds to the --field-name-limit option) */ - -dl.field-list > dd, -dl.option-list > dd, -dl.docinfo > dd { - margin-left: 9em; /* ca. 14 chars in the test examples */ -} - -/* start field-body on a new line after long field names */ - -dl.field-list > dd > *:first-child, -dl.option-list > dd > *:first-child -{ - display: inline-block; - width: 100%; - margin: 0; -} - -dl.field-list > dt:after, -dl.docinfo > dt:after { - content: ":"; -} - -/* -- option lists ---------------------------------------------------------- */ - -dl.option-list { - margin-left: 40px; -} - -dl.option-list > dt { - font-weight: normal; -} - -span.option { - white-space: nowrap; -} - -/* -- lists ----------------------------------------------------------------- */ - -/* -- compact and simple lists: no margin between items -- */ - -.simple li, .compact li, -.simple ul, .compact ul, -.simple ol, .compact ol, -.simple > li p, .compact > li p, -dl.simple > dd, dl.compact > dd { - margin-top: 0; - margin-bottom: 0; -} - -/* -- enumerated lists ------------------------------------------------------ */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -dt span.classifier { - font-style: italic; -} - -dt span.classifier:before { - font-style: normal; - margin: 0.5em; - content: ":"; -} - -/* -- other body styles ----------------------------------------------------- */ - -dl { - margin-bottom: 15px; -} - -dd p { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dt:target, .highlighted { - background-color: #ddd; -} - -dl.glossary dt { - font-weight: bold; - font-size: 110%; -} - -.optional { - font-size: 130%; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -/* -- footnotes and citations ----------------------------------------------- */ - -/* -- for html4 -- */ -.footnote:target { - background-color: #dddddd; -} - -/* -- for html5 -- */ - -dl.footnote.superscript > dd { - margin-left: 1em; -} - -dl.footnote.brackets > dd { - margin-left: 2em; -} - -dl > dt.label { - font-weight: normal; -} - -a.footnote-reference.brackets:before, -dt.label > span.brackets:before { - content: "["; -} - -a.footnote-reference.brackets:after, -dt.label > span.brackets:after { - content: "]"; -} - -a.footnote-reference.superscript, -dl.footnote.superscript > dt.label { - vertical-align: super; - font-size: smaller; -} - -dt.label > span.fn-backref { - margin-left: 0.2em; -} - -dt.label > span.fn-backref > a { - font-style: italic; -} - -/* -- line blocks ----------------------------------------------------------- */ - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-style: italic; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - font-family: monospace; - overflow: auto; - overflow-y: hidden; -} - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -code { - font-family: monospace; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - padding: 1em 1em 0; -} - -div.literal-block-wrapper div.highlight { - margin: 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -/* -- special divs --------------------------------------------------------- */ - -div.quotebar { - background-color: #e3eff1; - max-width: 250px; - float: right; - font-family: sans-serif; - padding: 7px 7px; - border: 1px solid #ccc; -} -div.footer { - background-color: #e3eff1; - padding: 3px 8px 3px 0; - clear: both; - font-family: sans-serif; - font-size: 80%; - text-align: right; -} - -div.footer a { - text-decoration: underline; -} - -/* -- link-target ----------------------------------------------------------- */ - -.link-target { - font-size: 80%; -} - -table .link-target { - /* Do not show links in tables, there is not enough space */ - display: none; -} - -/* -- font-face ------------------------------------------------------------- */ - -/* -@font-face { - font-family: "LiberationNarrow"; - font-style: normal; - font-weight: normal; - src: url("res:///Data/fonts/LiberationNarrow-Regular.otf") - format("opentype"); -} -@font-face { - font-family: "LiberationNarrow"; - font-style: oblique, italic; - font-weight: normal; - src: url("res:///Data/fonts/LiberationNarrow-Italic.otf") - format("opentype"); -} -@font-face { - font-family: "LiberationNarrow"; - font-style: normal; - font-weight: bold; - src: url("res:///Data/fonts/LiberationNarrow-Bold.otf") - format("opentype"); -} -@font-face { - font-family: "LiberationNarrow"; - font-style: oblique, italic; - font-weight: bold; - src: url("res:///Data/fonts/LiberationNarrow-BoldItalic.otf") - format("opentype"); -} -*/ \ No newline at end of file diff --git a/docs/_build/epub/_static/file.png b/docs/_build/epub/_static/file.png deleted file mode 100644 index a858a410..00000000 Binary files a/docs/_build/epub/_static/file.png and /dev/null differ diff --git a/docs/_build/epub/_static/jquery-3.5.1.js b/docs/_build/epub/_static/jquery-3.5.1.js deleted file mode 100644 index 50937333..00000000 --- a/docs/_build/epub/_static/jquery-3.5.1.js +++ /dev/null @@ -1,10872 +0,0 @@ -/*! - * jQuery JavaScript Library v3.5.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2020-05-04T22:49Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.5.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.5 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2020-03-14 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px"; - tr.style.height = "1px"; - trChild.style.height = "9px"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = parseInt( trStyle.height ) > 3; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( - dataPriv.get( cur, "events" ) || Object.create( null ) - )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script - if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mlflow/entities/_mlflow_object.html b/docs/_build/html/_modules/mlflow/entities/_mlflow_object.html deleted file mode 100644 index 014d2d51..00000000 --- a/docs/_build/html/_modules/mlflow/entities/_mlflow_object.html +++ /dev/null @@ -1,261 +0,0 @@ - - - - - - - - mlflow.entities._mlflow_object — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for mlflow.entities._mlflow_object

-from abc import abstractmethod
-import pprint
-
-
-class _MLflowObject(object):
-    def __iter__(self):
-        # Iterate through list of properties and yield as key -> value
-        for prop in self._properties():
-            yield prop, self.__getattribute__(prop)
-
-    @classmethod
-    def _get_properties_helper(cls):
-        return sorted([p for p in cls.__dict__ if isinstance(getattr(cls, p), property)])
-
-    @classmethod
-    def _properties(cls):
-        return cls._get_properties_helper()
-
-    @classmethod
-    @abstractmethod
-    def from_proto(cls, proto):
-        pass
-
-    @classmethod
-    def from_dictionary(cls, the_dict):
-        filtered_dict = {key: value for key, value in the_dict.items() if key in cls._properties()}
-        return cls(**filtered_dict)
-
-    def __repr__(self):
-        return to_string(self)
-
-
-def to_string(obj):
-    return _MLflowObjectPrinter().to_string(obj)
-
-
-def get_classname(obj):
-    return type(obj).__name__
-
-
-class _MLflowObjectPrinter(object):
-    def __init__(self):
-        super().__init__()
-        self.printer = pprint.PrettyPrinter()
-
-    def to_string(self, obj):
-        if isinstance(obj, _MLflowObject):
-            return "<%s: %s>" % (get_classname(obj), self._entity_to_string(obj))
-        return self.printer.pformat(obj)
-
-    def _entity_to_string(self, entity):
-        return ", ".join(["%s=%s" % (key, self.to_string(value)) for key, value in entity])
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/mlflow/entities/run.html b/docs/_build/html/_modules/mlflow/entities/run.html deleted file mode 100644 index f2261c32..00000000 --- a/docs/_build/html/_modules/mlflow/entities/run.html +++ /dev/null @@ -1,263 +0,0 @@ - - - - - - - - mlflow.entities.run — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for mlflow.entities.run

-from mlflow.entities._mlflow_object import _MLflowObject
-from mlflow.entities.run_data import RunData
-from mlflow.entities.run_info import RunInfo
-from mlflow.exceptions import MlflowException
-from mlflow.protos.service_pb2 import Run as ProtoRun
-
-
-class Run(_MLflowObject):
-    """
-    Run object.
-    """
-
-    def __init__(self, run_info, run_data):
-        if run_info is None:
-            raise MlflowException("run_info cannot be None")
-        self._info = run_info
-        self._data = run_data
-
-    @property
-    def info(self):
-        """
-        The run metadata, such as the run id, start time, and status.
-
-        :rtype: :py:class:`mlflow.entities.RunInfo`
-        """
-        return self._info
-
-    @property
-    def data(self):
-        """
-        The run data, including metrics, parameters, and tags.
-
-        :rtype: :py:class:`mlflow.entities.RunData`
-        """
-        return self._data
-
-    def to_proto(self):
-        run = ProtoRun()
-        run.info.MergeFrom(self.info.to_proto())
-        if self.data:
-            run.data.MergeFrom(self.data.to_proto())
-        return run
-
-    @classmethod
-    def from_proto(cls, proto):
-        return cls(RunInfo.from_proto(proto.info), RunData.from_proto(proto.data))
-
-    def to_dictionary(self):
-        run_dict = {
-            "info": dict(self.info),
-        }
-        if self.data:
-            run_dict["data"] = self.data.to_dictionary()
-        return run_dict
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pyspark/ml/base.html b/docs/_build/html/_modules/pyspark/ml/base.html deleted file mode 100644 index e25d8b1d..00000000 --- a/docs/_build/html/_modules/pyspark/ml/base.html +++ /dev/null @@ -1,442 +0,0 @@ - - - - - - - - - - pyspark.ml.base — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Module code »
  • - -
  • pyspark.ml.base
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pyspark.ml.base

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from abc import ABCMeta, abstractmethod
-
-import copy
-import threading
-
-from pyspark import since
-from pyspark.ml.param.shared import *
-from pyspark.ml.common import inherit_doc
-from pyspark.sql.functions import udf
-from pyspark.sql.types import StructField, StructType
-
-
-class _FitMultipleIterator(object):
-    """
-    Used by default implementation of Estimator.fitMultiple to produce models in a thread safe
-    iterator. This class handles the simple case of fitMultiple where each param map should be
-    fit independently.
-
-    :param fitSingleModel: Function: (int => Model) which fits an estimator to a dataset.
-        `fitSingleModel` may be called up to `numModels` times, with a unique index each time.
-        Each call to `fitSingleModel` with an index should return the Model associated with
-        that index.
-    :param numModel: Number of models this iterator should produce.
-
-    See Estimator.fitMultiple for more info.
-    """
-    def __init__(self, fitSingleModel, numModels):
-        """
-
-        """
-        self.fitSingleModel = fitSingleModel
-        self.numModel = numModels
-        self.counter = 0
-        self.lock = threading.Lock()
-
-    def __iter__(self):
-        return self
-
-    def __next__(self):
-        with self.lock:
-            index = self.counter
-            if index >= self.numModel:
-                raise StopIteration("No models remaining.")
-            self.counter += 1
-        return index, self.fitSingleModel(index)
-
-    def next(self):
-        """For python2 compatibility."""
-        return self.__next__()
-
-
-@inherit_doc
-class Estimator(Params):
-    """
-    Abstract class for estimators that fit models to data.
-
-    .. versionadded:: 1.3.0
-    """
-
-    __metaclass__ = ABCMeta
-
-    @abstractmethod
-    def _fit(self, dataset):
-        """
-        Fits a model to the input dataset. This is called by the default implementation of fit.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :returns: fitted model
-        """
-        raise NotImplementedError()
-
-    @since("2.3.0")
-    def fitMultiple(self, dataset, paramMaps):
-        """
-        Fits a model to the input dataset for each param map in `paramMaps`.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`.
-        :param paramMaps: A Sequence of param maps.
-        :return: A thread safe iterable which contains one model for each param map. Each
-                 call to `next(modelIterator)` will return `(index, model)` where model was fit
-                 using `paramMaps[index]`. `index` values may not be sequential.
-
-        .. note:: DeveloperApi
-        .. note:: Experimental
-        """
-        estimator = self.copy()
-
-        def fitSingleModel(index):
-            return estimator.fit(dataset, paramMaps[index])
-
-        return _FitMultipleIterator(fitSingleModel, len(paramMaps))
-
-    @since("1.3.0")
-    def fit(self, dataset, params=None):
-        """
-        Fits a model to the input dataset with optional parameters.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :param params: an optional param map that overrides embedded params. If a list/tuple of
-                       param maps is given, this calls fit on each param map and returns a list of
-                       models.
-        :returns: fitted model(s)
-        """
-        if params is None:
-            params = dict()
-        if isinstance(params, (list, tuple)):
-            models = [None] * len(params)
-            for index, model in self.fitMultiple(dataset, params):
-                models[index] = model
-            return models
-        elif isinstance(params, dict):
-            if params:
-                return self.copy(params)._fit(dataset)
-            else:
-                return self._fit(dataset)
-        else:
-            raise ValueError("Params must be either a param map or a list/tuple of param maps, "
-                             "but got %s." % type(params))
-
-
-@inherit_doc
-class Transformer(Params):
-    """
-    Abstract class for transformers that transform one dataset into another.
-
-    .. versionadded:: 1.3.0
-    """
-
-    __metaclass__ = ABCMeta
-
-    @abstractmethod
-    def _transform(self, dataset):
-        """
-        Transforms the input dataset.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :returns: transformed dataset
-        """
-        raise NotImplementedError()
-
-    @since("1.3.0")
-    def transform(self, dataset, params=None):
-        """
-        Transforms the input dataset with optional parameters.
-
-        :param dataset: input dataset, which is an instance of :py:class:`pyspark.sql.DataFrame`
-        :param params: an optional param map that overrides embedded params.
-        :returns: transformed dataset
-        """
-        if params is None:
-            params = dict()
-        if isinstance(params, dict):
-            if params:
-                return self.copy(params)._transform(dataset)
-            else:
-                return self._transform(dataset)
-        else:
-            raise ValueError("Params must be a param map but got %s." % type(params))
-
-
-@inherit_doc
-class Model(Transformer):
-    """
-    Abstract class for models that are fitted by estimators.
-
-    .. versionadded:: 1.4.0
-    """
-
-    __metaclass__ = ABCMeta
-
-
-@inherit_doc
-class UnaryTransformer(HasInputCol, HasOutputCol, Transformer):
-    """
-    Abstract class for transformers that take one input column, apply transformation,
-    and output the result as a new column.
-
-    .. versionadded:: 2.3.0
-    """
-
-    @abstractmethod
-    def createTransformFunc(self):
-        """
-        Creates the transform function using the given param map. The input param map already takes
-        account of the embedded param map. So the param values should be determined
-        solely by the input param map.
-        """
-        raise NotImplementedError()
-
-    @abstractmethod
-    def outputDataType(self):
-        """
-        Returns the data type of the output column.
-        """
-        raise NotImplementedError()
-
-    @abstractmethod
-    def validateInputType(self, inputType):
-        """
-        Validates the input type. Throw an exception if it is invalid.
-        """
-        raise NotImplementedError()
-
-    def transformSchema(self, schema):
-        inputType = schema[self.getInputCol()].dataType
-        self.validateInputType(inputType)
-        if self.getOutputCol() in schema.names:
-            raise ValueError("Output column %s already exists." % self.getOutputCol())
-        outputFields = copy.copy(schema.fields)
-        outputFields.append(StructField(self.getOutputCol(),
-                                        self.outputDataType(),
-                                        nullable=False))
-        return StructType(outputFields)
-
-    def _transform(self, dataset):
-        self.transformSchema(dataset.schema)
-        transformUDF = udf(self.createTransformFunc(), self.outputDataType())
-        transformedDataset = dataset.withColumn(self.getOutputCol(),
-                                                transformUDF(dataset[self.getInputCol()]))
-        return transformedDataset
-
- -
- -
-
- - -
- -
-

- - © Copyright 2020, Splice Machine - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pyspark/ml/param.html b/docs/_build/html/_modules/pyspark/ml/param.html deleted file mode 100644 index 94acb0c8..00000000 --- a/docs/_build/html/_modules/pyspark/ml/param.html +++ /dev/null @@ -1,716 +0,0 @@ - - - - - - - - - - pyspark.ml.param — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Module code »
  • - -
  • pyspark.ml.param
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pyspark.ml.param

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import array
-import sys
-if sys.version > '3':
-    basestring = str
-    xrange = range
-    unicode = str
-
-from abc import ABCMeta
-import copy
-import numpy as np
-
-from py4j.java_gateway import JavaObject
-
-from pyspark.ml.linalg import DenseVector, Vector, Matrix
-from pyspark.ml.util import Identifiable
-
-
-__all__ = ['Param', 'Params', 'TypeConverters']
-
-
-class Param(object):
-    """
-    A param with self-contained documentation.
-
-    .. versionadded:: 1.3.0
-    """
-
-    def __init__(self, parent, name, doc, typeConverter=None):
-        if not isinstance(parent, Identifiable):
-            raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
-        self.parent = parent.uid
-        self.name = str(name)
-        self.doc = str(doc)
-        self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter
-
-    def _copy_new_parent(self, parent):
-        """Copy the current param to a new parent, must be a dummy param."""
-        if self.parent == "undefined":
-            param = copy.copy(self)
-            param.parent = parent.uid
-            return param
-        else:
-            raise ValueError("Cannot copy from non-dummy parent %s." % parent)
-
-    def __str__(self):
-        return str(self.parent) + "__" + self.name
-
-    def __repr__(self):
-        return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
-
-    def __hash__(self):
-        return hash(str(self))
-
-    def __eq__(self, other):
-        if isinstance(other, Param):
-            return self.parent == other.parent and self.name == other.name
-        else:
-            return False
-
-
-class TypeConverters(object):
-    """
-    .. note:: DeveloperApi
-
-    Factory methods for common type conversion functions for `Param.typeConverter`.
-
-    .. versionadded:: 2.0.0
-    """
-
-    @staticmethod
-    def _is_numeric(value):
-        vtype = type(value)
-        return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long'
-
-    @staticmethod
-    def _is_integer(value):
-        return TypeConverters._is_numeric(value) and float(value).is_integer()
-
-    @staticmethod
-    def _can_convert_to_list(value):
-        vtype = type(value)
-        return vtype in [list, np.ndarray, tuple, xrange, array.array] or isinstance(value, Vector)
-
-    @staticmethod
-    def _can_convert_to_string(value):
-        vtype = type(value)
-        return isinstance(value, basestring) or vtype in [np.unicode_, np.string_, np.str_]
-
-    @staticmethod
-    def identity(value):
-        """
-        Dummy converter that just returns value.
-        """
-        return value
-
-    @staticmethod
-    def toList(value):
-        """
-        Convert a value to a list, if possible.
-        """
-        if type(value) == list:
-            return value
-        elif type(value) in [np.ndarray, tuple, xrange, array.array]:
-            return list(value)
-        elif isinstance(value, Vector):
-            return list(value.toArray())
-        else:
-            raise TypeError("Could not convert %s to list" % value)
-
-    @staticmethod
-    def toListFloat(value):
-        """
-        Convert a value to list of floats, if possible.
-        """
-        if TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._is_numeric(v), value)):
-                return [float(v) for v in value]
-        raise TypeError("Could not convert %s to list of floats" % value)
-
-    @staticmethod
-    def toListInt(value):
-        """
-        Convert a value to list of ints, if possible.
-        """
-        if TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._is_integer(v), value)):
-                return [int(v) for v in value]
-        raise TypeError("Could not convert %s to list of ints" % value)
-
-    @staticmethod
-    def toListString(value):
-        """
-        Convert a value to list of strings, if possible.
-        """
-        if TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
-                return [TypeConverters.toString(v) for v in value]
-        raise TypeError("Could not convert %s to list of strings" % value)
-
-    @staticmethod
-    def toVector(value):
-        """
-        Convert a value to a MLlib Vector, if possible.
-        """
-        if isinstance(value, Vector):
-            return value
-        elif TypeConverters._can_convert_to_list(value):
-            value = TypeConverters.toList(value)
-            if all(map(lambda v: TypeConverters._is_numeric(v), value)):
-                return DenseVector(value)
-        raise TypeError("Could not convert %s to vector" % value)
-
-    @staticmethod
-    def toMatrix(value):
-        """
-        Convert a value to a MLlib Matrix, if possible.
-        """
-        if isinstance(value, Matrix):
-            return value
-        raise TypeError("Could not convert %s to matrix" % value)
-
-    @staticmethod
-    def toFloat(value):
-        """
-        Convert a value to a float, if possible.
-        """
-        if TypeConverters._is_numeric(value):
-            return float(value)
-        else:
-            raise TypeError("Could not convert %s to float" % value)
-
-    @staticmethod
-    def toInt(value):
-        """
-        Convert a value to an int, if possible.
-        """
-        if TypeConverters._is_integer(value):
-            return int(value)
-        else:
-            raise TypeError("Could not convert %s to int" % value)
-
-    @staticmethod
-    def toString(value):
-        """
-        Convert a value to a string, if possible.
-        """
-        if isinstance(value, basestring):
-            return value
-        elif type(value) in [np.string_, np.str_]:
-            return str(value)
-        elif type(value) == np.unicode_:
-            return unicode(value)
-        else:
-            raise TypeError("Could not convert %s to string type" % type(value))
-
-    @staticmethod
-    def toBoolean(value):
-        """
-        Convert a value to a boolean, if possible.
-        """
-        if type(value) == bool:
-            return value
-        else:
-            raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value))
-
-
-class Params(Identifiable):
-    """
-    Components that take parameters. This also provides an internal
-    param map to store parameter values attached to the instance.
-
-    .. versionadded:: 1.3.0
-    """
-
-    __metaclass__ = ABCMeta
-
-    def __init__(self):
-        super(Params, self).__init__()
-        #: internal param map for user-supplied values param map
-        self._paramMap = {}
-
-        #: internal param map for default values
-        self._defaultParamMap = {}
-
-        #: value returned by :py:func:`params`
-        self._params = None
-
-        # Copy the params from the class to the object
-        self._copy_params()
-
-    def _copy_params(self):
-        """
-        Copy all params defined on the class to current object.
-        """
-        cls = type(self)
-        src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
-        src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
-        for name, param in src_params:
-            setattr(self, name, param._copy_new_parent(self))
-
-    @property
-    def params(self):
-        """
-        Returns all params ordered by name. The default implementation
-        uses :py:func:`dir` to get all attributes of type
-        :py:class:`Param`.
-        """
-        if self._params is None:
-            self._params = list(filter(lambda attr: isinstance(attr, Param),
-                                       [getattr(self, x) for x in dir(self) if x != "params" and
-                                        not isinstance(getattr(type(self), x, None), property)]))
-        return self._params
-
-    def explainParam(self, param):
-        """
-        Explains a single param and returns its name, doc, and optional
-        default value and user-supplied value in a string.
-        """
-        param = self._resolveParam(param)
-        values = []
-        if self.isDefined(param):
-            if param in self._defaultParamMap:
-                values.append("default: %s" % self._defaultParamMap[param])
-            if param in self._paramMap:
-                values.append("current: %s" % self._paramMap[param])
-        else:
-            values.append("undefined")
-        valueStr = "(" + ", ".join(values) + ")"
-        return "%s: %s %s" % (param.name, param.doc, valueStr)
-
-    def explainParams(self):
-        """
-        Returns the documentation of all params with their optionally
-        default values and user-supplied values.
-        """
-        return "\n".join([self.explainParam(param) for param in self.params])
-
-    def getParam(self, paramName):
-        """
-        Gets a param by its name.
-        """
-        param = getattr(self, paramName)
-        if isinstance(param, Param):
-            return param
-        else:
-            raise ValueError("Cannot find param with name %s." % paramName)
-
-    def isSet(self, param):
-        """
-        Checks whether a param is explicitly set by user.
-        """
-        param = self._resolveParam(param)
-        return param in self._paramMap
-
-    def hasDefault(self, param):
-        """
-        Checks whether a param has a default value.
-        """
-        param = self._resolveParam(param)
-        return param in self._defaultParamMap
-
-    def isDefined(self, param):
-        """
-        Checks whether a param is explicitly set by user or has
-        a default value.
-        """
-        return self.isSet(param) or self.hasDefault(param)
-
-    def hasParam(self, paramName):
-        """
-        Tests whether this instance contains a param with a given
-        (string) name.
-        """
-        if isinstance(paramName, basestring):
-            p = getattr(self, paramName, None)
-            return isinstance(p, Param)
-        else:
-            raise TypeError("hasParam(): paramName must be a string")
-
-    def getOrDefault(self, param):
-        """
-        Gets the value of a param in the user-supplied param map or its
-        default value. Raises an error if neither is set.
-        """
-        param = self._resolveParam(param)
-        if param in self._paramMap:
-            return self._paramMap[param]
-        else:
-            return self._defaultParamMap[param]
-
-    def extractParamMap(self, extra=None):
-        """
-        Extracts the embedded default param values and user-supplied
-        values, and then merges them with extra values from input into
-        a flat param map, where the latter value is used if there exist
-        conflicts, i.e., with ordering: default param values <
-        user-supplied values < extra.
-
-        :param extra: extra param values
-        :return: merged param map
-        """
-        if extra is None:
-            extra = dict()
-        paramMap = self._defaultParamMap.copy()
-        paramMap.update(self._paramMap)
-        paramMap.update(extra)
-        return paramMap
-
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with the same uid and some
-        extra params. The default implementation creates a
-        shallow copy using :py:func:`copy.copy`, and then copies the
-        embedded and extra parameters over and returns the copy.
-        Subclasses should override this method if the default approach
-        is not sufficient.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        that = copy.copy(self)
-        that._paramMap = {}
-        that._defaultParamMap = {}
-        return self._copyValues(that, extra)
-
-    def set(self, param, value):
-        """
-        Sets a parameter in the embedded param map.
-        """
-        self._shouldOwn(param)
-        try:
-            value = param.typeConverter(value)
-        except ValueError as e:
-            raise ValueError('Invalid param value given for param "%s". %s' % (param.name, e))
-        self._paramMap[param] = value
-
-    def _shouldOwn(self, param):
-        """
-        Validates that the input param belongs to this Params instance.
-        """
-        if not (self.uid == param.parent and self.hasParam(param.name)):
-            raise ValueError("Param %r does not belong to %r." % (param, self))
-
-    def _resolveParam(self, param):
-        """
-        Resolves a param and validates the ownership.
-
-        :param param: param name or the param instance, which must
-                      belong to this Params instance
-        :return: resolved param instance
-        """
-        if isinstance(param, Param):
-            self._shouldOwn(param)
-            return param
-        elif isinstance(param, basestring):
-            return self.getParam(param)
-        else:
-            raise ValueError("Cannot resolve %r as a param." % param)
-
-    @staticmethod
-    def _dummy():
-        """
-        Returns a dummy Params instance used as a placeholder to
-        generate docs.
-        """
-        dummy = Params()
-        dummy.uid = "undefined"
-        return dummy
-
-    def _set(self, **kwargs):
-        """
-        Sets user-supplied params.
-        """
-        for param, value in kwargs.items():
-            p = getattr(self, param)
-            if value is not None:
-                try:
-                    value = p.typeConverter(value)
-                except TypeError as e:
-                    raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
-            self._paramMap[p] = value
-        return self
-
-    def _clear(self, param):
-        """
-        Clears a param from the param map if it has been explicitly set.
-        """
-        if self.isSet(param):
-            del self._paramMap[param]
-
-    def _setDefault(self, **kwargs):
-        """
-        Sets default params.
-        """
-        for param, value in kwargs.items():
-            p = getattr(self, param)
-            if value is not None and not isinstance(value, JavaObject):
-                try:
-                    value = p.typeConverter(value)
-                except TypeError as e:
-                    raise TypeError('Invalid default param value given for param "%s". %s'
-                                    % (p.name, e))
-            self._defaultParamMap[p] = value
-        return self
-
-    def _copyValues(self, to, extra=None):
-        """
-        Copies param values from this instance to another instance for
-        params shared by them.
-
-        :param to: the target instance
-        :param extra: extra params to be copied
-        :return: the target instance with param values copied
-        """
-        paramMap = self._paramMap.copy()
-        if extra is not None:
-            paramMap.update(extra)
-        for param in self.params:
-            # copy default params
-            if param in self._defaultParamMap and to.hasParam(param.name):
-                to._defaultParamMap[to.getParam(param.name)] = self._defaultParamMap[param]
-            # copy explicitly set params
-            if param in paramMap and to.hasParam(param.name):
-                to._set(**{param.name: paramMap[param]})
-        return to
-
-    def _resetUid(self, newUid):
-        """
-        Changes the uid of this instance. This updates both
-        the stored uid and the parent uid of params and param maps.
-        This is used by persistence (loading).
-        :param newUid: new uid to use, which is converted to unicode
-        :return: same instance, but with the uid and Param.parent values
-                 updated, including within param maps
-        """
-        newUid = unicode(newUid)
-        self.uid = newUid
-        newDefaultParamMap = dict()
-        newParamMap = dict()
-        for param in self.params:
-            newParam = copy.copy(param)
-            newParam.parent = newUid
-            if param in self._defaultParamMap:
-                newDefaultParamMap[newParam] = self._defaultParamMap[param]
-            if param in self._paramMap:
-                newParamMap[newParam] = self._paramMap[param]
-            param.parent = newUid
-        self._defaultParamMap = newDefaultParamMap
-        self._paramMap = newParamMap
-        return self
-
- -
- -
-
- - -
- -
-

- - © Copyright 2020, Splice Machine - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pyspark/ml/param/shared.html b/docs/_build/html/_modules/pyspark/ml/param/shared.html deleted file mode 100644 index af5ca08e..00000000 --- a/docs/_build/html/_modules/pyspark/ml/param/shared.html +++ /dev/null @@ -1,1023 +0,0 @@ - - - - - - - - - - pyspark.ml.param.shared — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- - - - -
-
-
-
- -

Source code for pyspark.ml.param.shared

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.
-
-from pyspark.ml.param import *
-
-
-class HasMaxIter(Params):
-    """
-    Mixin for param maxIter: max number of iterations (>= 0).
-    """
-
-    maxIter = Param(Params._dummy(), "maxIter", "max number of iterations (>= 0).", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasMaxIter, self).__init__()
-
-    def setMaxIter(self, value):
-        """
-        Sets the value of :py:attr:`maxIter`.
-        """
-        return self._set(maxIter=value)
-
-    def getMaxIter(self):
-        """
-        Gets the value of maxIter or its default value.
-        """
-        return self.getOrDefault(self.maxIter)
-
-
-class HasRegParam(Params):
-    """
-    Mixin for param regParam: regularization parameter (>= 0).
-    """
-
-    regParam = Param(Params._dummy(), "regParam", "regularization parameter (>= 0).", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasRegParam, self).__init__()
-
-    def setRegParam(self, value):
-        """
-        Sets the value of :py:attr:`regParam`.
-        """
-        return self._set(regParam=value)
-
-    def getRegParam(self):
-        """
-        Gets the value of regParam or its default value.
-        """
-        return self.getOrDefault(self.regParam)
-
-
-class HasFeaturesCol(Params):
-    """
-    Mixin for param featuresCol: features column name.
-    """
-
-    featuresCol = Param(Params._dummy(), "featuresCol", "features column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasFeaturesCol, self).__init__()
-        self._setDefault(featuresCol='features')
-
-    def setFeaturesCol(self, value):
-        """
-        Sets the value of :py:attr:`featuresCol`.
-        """
-        return self._set(featuresCol=value)
-
-    def getFeaturesCol(self):
-        """
-        Gets the value of featuresCol or its default value.
-        """
-        return self.getOrDefault(self.featuresCol)
-
-
-class HasLabelCol(Params):
-    """
-    Mixin for param labelCol: label column name.
-    """
-
-    labelCol = Param(Params._dummy(), "labelCol", "label column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasLabelCol, self).__init__()
-        self._setDefault(labelCol='label')
-
-    def setLabelCol(self, value):
-        """
-        Sets the value of :py:attr:`labelCol`.
-        """
-        return self._set(labelCol=value)
-
-    def getLabelCol(self):
-        """
-        Gets the value of labelCol or its default value.
-        """
-        return self.getOrDefault(self.labelCol)
-
-
-class HasPredictionCol(Params):
-    """
-    Mixin for param predictionCol: prediction column name.
-    """
-
-    predictionCol = Param(Params._dummy(), "predictionCol", "prediction column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasPredictionCol, self).__init__()
-        self._setDefault(predictionCol='prediction')
-
-    def setPredictionCol(self, value):
-        """
-        Sets the value of :py:attr:`predictionCol`.
-        """
-        return self._set(predictionCol=value)
-
-    def getPredictionCol(self):
-        """
-        Gets the value of predictionCol or its default value.
-        """
-        return self.getOrDefault(self.predictionCol)
-
-
-class HasProbabilityCol(Params):
-    """
-    Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
-    """
-
-    probabilityCol = Param(Params._dummy(), "probabilityCol", "Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasProbabilityCol, self).__init__()
-        self._setDefault(probabilityCol='probability')
-
-    def setProbabilityCol(self, value):
-        """
-        Sets the value of :py:attr:`probabilityCol`.
-        """
-        return self._set(probabilityCol=value)
-
-    def getProbabilityCol(self):
-        """
-        Gets the value of probabilityCol or its default value.
-        """
-        return self.getOrDefault(self.probabilityCol)
-
-
-class HasRawPredictionCol(Params):
-    """
-    Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
-    """
-
-    rawPredictionCol = Param(Params._dummy(), "rawPredictionCol", "raw prediction (a.k.a. confidence) column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasRawPredictionCol, self).__init__()
-        self._setDefault(rawPredictionCol='rawPrediction')
-
-    def setRawPredictionCol(self, value):
-        """
-        Sets the value of :py:attr:`rawPredictionCol`.
-        """
-        return self._set(rawPredictionCol=value)
-
-    def getRawPredictionCol(self):
-        """
-        Gets the value of rawPredictionCol or its default value.
-        """
-        return self.getOrDefault(self.rawPredictionCol)
-
-
-class HasInputCol(Params):
-    """
-    Mixin for param inputCol: input column name.
-    """
-
-    inputCol = Param(Params._dummy(), "inputCol", "input column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasInputCol, self).__init__()
-
-    def setInputCol(self, value):
-        """
-        Sets the value of :py:attr:`inputCol`.
-        """
-        return self._set(inputCol=value)
-
-    def getInputCol(self):
-        """
-        Gets the value of inputCol or its default value.
-        """
-        return self.getOrDefault(self.inputCol)
-
-
-class HasInputCols(Params):
-    """
-    Mixin for param inputCols: input column names.
-    """
-
-    inputCols = Param(Params._dummy(), "inputCols", "input column names.", typeConverter=TypeConverters.toListString)
-
-    def __init__(self):
-        super(HasInputCols, self).__init__()
-
-    def setInputCols(self, value):
-        """
-        Sets the value of :py:attr:`inputCols`.
-        """
-        return self._set(inputCols=value)
-
-    def getInputCols(self):
-        """
-        Gets the value of inputCols or its default value.
-        """
-        return self.getOrDefault(self.inputCols)
-
-
-class HasOutputCol(Params):
-    """
-    Mixin for param outputCol: output column name.
-    """
-
-    outputCol = Param(Params._dummy(), "outputCol", "output column name.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasOutputCol, self).__init__()
-        self._setDefault(outputCol=self.uid + '__output')
-
-    def setOutputCol(self, value):
-        """
-        Sets the value of :py:attr:`outputCol`.
-        """
-        return self._set(outputCol=value)
-
-    def getOutputCol(self):
-        """
-        Gets the value of outputCol or its default value.
-        """
-        return self.getOrDefault(self.outputCol)
-
-
-class HasOutputCols(Params):
-    """
-    Mixin for param outputCols: output column names.
-    """
-
-    outputCols = Param(Params._dummy(), "outputCols", "output column names.", typeConverter=TypeConverters.toListString)
-
-    def __init__(self):
-        super(HasOutputCols, self).__init__()
-
-    def setOutputCols(self, value):
-        """
-        Sets the value of :py:attr:`outputCols`.
-        """
-        return self._set(outputCols=value)
-
-    def getOutputCols(self):
-        """
-        Gets the value of outputCols or its default value.
-        """
-        return self.getOrDefault(self.outputCols)
-
-
-class HasNumFeatures(Params):
-    """
-    Mixin for param numFeatures: number of features.
-    """
-
-    numFeatures = Param(Params._dummy(), "numFeatures", "number of features.", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasNumFeatures, self).__init__()
-
-    def setNumFeatures(self, value):
-        """
-        Sets the value of :py:attr:`numFeatures`.
-        """
-        return self._set(numFeatures=value)
-
-    def getNumFeatures(self):
-        """
-        Gets the value of numFeatures or its default value.
-        """
-        return self.getOrDefault(self.numFeatures)
-
-
-class HasCheckpointInterval(Params):
-    """
-    Mixin for param checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.
-    """
-
-    checkpointInterval = Param(Params._dummy(), "checkpointInterval", "set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasCheckpointInterval, self).__init__()
-
-    def setCheckpointInterval(self, value):
-        """
-        Sets the value of :py:attr:`checkpointInterval`.
-        """
-        return self._set(checkpointInterval=value)
-
-    def getCheckpointInterval(self):
-        """
-        Gets the value of checkpointInterval or its default value.
-        """
-        return self.getOrDefault(self.checkpointInterval)
-
-
-class HasSeed(Params):
-    """
-    Mixin for param seed: random seed.
-    """
-
-    seed = Param(Params._dummy(), "seed", "random seed.", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasSeed, self).__init__()
-        self._setDefault(seed=hash(type(self).__name__))
-
-    def setSeed(self, value):
-        """
-        Sets the value of :py:attr:`seed`.
-        """
-        return self._set(seed=value)
-
-    def getSeed(self):
-        """
-        Gets the value of seed or its default value.
-        """
-        return self.getOrDefault(self.seed)
-
-
-class HasTol(Params):
-    """
-    Mixin for param tol: the convergence tolerance for iterative algorithms (>= 0).
-    """
-
-    tol = Param(Params._dummy(), "tol", "the convergence tolerance for iterative algorithms (>= 0).", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasTol, self).__init__()
-
-    def setTol(self, value):
-        """
-        Sets the value of :py:attr:`tol`.
-        """
-        return self._set(tol=value)
-
-    def getTol(self):
-        """
-        Gets the value of tol or its default value.
-        """
-        return self.getOrDefault(self.tol)
-
-
-class HasStepSize(Params):
-    """
-    Mixin for param stepSize: Step size to be used for each iteration of optimization (>= 0).
-    """
-
-    stepSize = Param(Params._dummy(), "stepSize", "Step size to be used for each iteration of optimization (>= 0).", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasStepSize, self).__init__()
-
-    def setStepSize(self, value):
-        """
-        Sets the value of :py:attr:`stepSize`.
-        """
-        return self._set(stepSize=value)
-
-    def getStepSize(self):
-        """
-        Gets the value of stepSize or its default value.
-        """
-        return self.getOrDefault(self.stepSize)
-
-
-class HasHandleInvalid(Params):
-    """
-    Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.
-    """
-
-    handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasHandleInvalid, self).__init__()
-
-    def setHandleInvalid(self, value):
-        """
-        Sets the value of :py:attr:`handleInvalid`.
-        """
-        return self._set(handleInvalid=value)
-
-    def getHandleInvalid(self):
-        """
-        Gets the value of handleInvalid or its default value.
-        """
-        return self.getOrDefault(self.handleInvalid)
-
-
-class HasElasticNetParam(Params):
-    """
-    Mixin for param elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
-    """
-
-    elasticNetParam = Param(Params._dummy(), "elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasElasticNetParam, self).__init__()
-        self._setDefault(elasticNetParam=0.0)
-
-    def setElasticNetParam(self, value):
-        """
-        Sets the value of :py:attr:`elasticNetParam`.
-        """
-        return self._set(elasticNetParam=value)
-
-    def getElasticNetParam(self):
-        """
-        Gets the value of elasticNetParam or its default value.
-        """
-        return self.getOrDefault(self.elasticNetParam)
-
-
-class HasFitIntercept(Params):
-    """
-    Mixin for param fitIntercept: whether to fit an intercept term.
-    """
-
-    fitIntercept = Param(Params._dummy(), "fitIntercept", "whether to fit an intercept term.", typeConverter=TypeConverters.toBoolean)
-
-    def __init__(self):
-        super(HasFitIntercept, self).__init__()
-        self._setDefault(fitIntercept=True)
-
-    def setFitIntercept(self, value):
-        """
-        Sets the value of :py:attr:`fitIntercept`.
-        """
-        return self._set(fitIntercept=value)
-
-    def getFitIntercept(self):
-        """
-        Gets the value of fitIntercept or its default value.
-        """
-        return self.getOrDefault(self.fitIntercept)
-
-
-class HasStandardization(Params):
-    """
-    Mixin for param standardization: whether to standardize the training features before fitting the model.
-    """
-
-    standardization = Param(Params._dummy(), "standardization", "whether to standardize the training features before fitting the model.", typeConverter=TypeConverters.toBoolean)
-
-    def __init__(self):
-        super(HasStandardization, self).__init__()
-        self._setDefault(standardization=True)
-
-    def setStandardization(self, value):
-        """
-        Sets the value of :py:attr:`standardization`.
-        """
-        return self._set(standardization=value)
-
-    def getStandardization(self):
-        """
-        Gets the value of standardization or its default value.
-        """
-        return self.getOrDefault(self.standardization)
-
-
-class HasThresholds(Params):
-    """
-    Mixin for param thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.
-    """
-
-    thresholds = Param(Params._dummy(), "thresholds", "Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.", typeConverter=TypeConverters.toListFloat)
-
-    def __init__(self):
-        super(HasThresholds, self).__init__()
-
-    def setThresholds(self, value):
-        """
-        Sets the value of :py:attr:`thresholds`.
-        """
-        return self._set(thresholds=value)
-
-    def getThresholds(self):
-        """
-        Gets the value of thresholds or its default value.
-        """
-        return self.getOrDefault(self.thresholds)
-
-
-class HasThreshold(Params):
-    """
-    Mixin for param threshold: threshold in binary classification prediction, in range [0, 1]
-    """
-
-    threshold = Param(Params._dummy(), "threshold", "threshold in binary classification prediction, in range [0, 1]", typeConverter=TypeConverters.toFloat)
-
-    def __init__(self):
-        super(HasThreshold, self).__init__()
-        self._setDefault(threshold=0.5)
-
-    def setThreshold(self, value):
-        """
-        Sets the value of :py:attr:`threshold`.
-        """
-        return self._set(threshold=value)
-
-    def getThreshold(self):
-        """
-        Gets the value of threshold or its default value.
-        """
-        return self.getOrDefault(self.threshold)
-
-
-class HasWeightCol(Params):
-    """
-    Mixin for param weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0.
-    """
-
-    weightCol = Param(Params._dummy(), "weightCol", "weight column name. If this is not set or empty, we treat all instance weights as 1.0.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasWeightCol, self).__init__()
-
-    def setWeightCol(self, value):
-        """
-        Sets the value of :py:attr:`weightCol`.
-        """
-        return self._set(weightCol=value)
-
-    def getWeightCol(self):
-        """
-        Gets the value of weightCol or its default value.
-        """
-        return self.getOrDefault(self.weightCol)
-
-
-class HasSolver(Params):
-    """
-    Mixin for param solver: the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.
-    """
-
-    solver = Param(Params._dummy(), "solver", "the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasSolver, self).__init__()
-        self._setDefault(solver='auto')
-
-    def setSolver(self, value):
-        """
-        Sets the value of :py:attr:`solver`.
-        """
-        return self._set(solver=value)
-
-    def getSolver(self):
-        """
-        Gets the value of solver or its default value.
-        """
-        return self.getOrDefault(self.solver)
-
-
-class HasVarianceCol(Params):
-    """
-    Mixin for param varianceCol: column name for the biased sample variance of prediction.
-    """
-
-    varianceCol = Param(Params._dummy(), "varianceCol", "column name for the biased sample variance of prediction.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasVarianceCol, self).__init__()
-
-    def setVarianceCol(self, value):
-        """
-        Sets the value of :py:attr:`varianceCol`.
-        """
-        return self._set(varianceCol=value)
-
-    def getVarianceCol(self):
-        """
-        Gets the value of varianceCol or its default value.
-        """
-        return self.getOrDefault(self.varianceCol)
-
-
-class HasAggregationDepth(Params):
-    """
-    Mixin for param aggregationDepth: suggested depth for treeAggregate (>= 2).
-    """
-
-    aggregationDepth = Param(Params._dummy(), "aggregationDepth", "suggested depth for treeAggregate (>= 2).", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasAggregationDepth, self).__init__()
-        self._setDefault(aggregationDepth=2)
-
-    def setAggregationDepth(self, value):
-        """
-        Sets the value of :py:attr:`aggregationDepth`.
-        """
-        return self._set(aggregationDepth=value)
-
-    def getAggregationDepth(self):
-        """
-        Gets the value of aggregationDepth or its default value.
-        """
-        return self.getOrDefault(self.aggregationDepth)
-
-
-class HasParallelism(Params):
-    """
-    Mixin for param parallelism: the number of threads to use when running parallel algorithms (>= 1).
-    """
-
-    parallelism = Param(Params._dummy(), "parallelism", "the number of threads to use when running parallel algorithms (>= 1).", typeConverter=TypeConverters.toInt)
-
-    def __init__(self):
-        super(HasParallelism, self).__init__()
-        self._setDefault(parallelism=1)
-
-    def setParallelism(self, value):
-        """
-        Sets the value of :py:attr:`parallelism`.
-        """
-        return self._set(parallelism=value)
-
-    def getParallelism(self):
-        """
-        Gets the value of parallelism or its default value.
-        """
-        return self.getOrDefault(self.parallelism)
-
-
-class HasCollectSubModels(Params):
-    """
-    Mixin for param collectSubModels: Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.
-    """
-
-    collectSubModels = Param(Params._dummy(), "collectSubModels", "Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.", typeConverter=TypeConverters.toBoolean)
-
-    def __init__(self):
-        super(HasCollectSubModels, self).__init__()
-        self._setDefault(collectSubModels=False)
-
-    def setCollectSubModels(self, value):
-        """
-        Sets the value of :py:attr:`collectSubModels`.
-        """
-        return self._set(collectSubModels=value)
-
-    def getCollectSubModels(self):
-        """
-        Gets the value of collectSubModels or its default value.
-        """
-        return self.getOrDefault(self.collectSubModels)
-
-
-class HasLoss(Params):
-    """
-    Mixin for param loss: the loss function to be optimized.
-    """
-
-    loss = Param(Params._dummy(), "loss", "the loss function to be optimized.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasLoss, self).__init__()
-
-    def setLoss(self, value):
-        """
-        Sets the value of :py:attr:`loss`.
-        """
-        return self._set(loss=value)
-
-    def getLoss(self):
-        """
-        Gets the value of loss or its default value.
-        """
-        return self.getOrDefault(self.loss)
-
-
-class DecisionTreeParams(Params):
-    """
-    Mixin for Decision Tree parameters.
-    """
-
-    maxDepth = Param(Params._dummy(), "maxDepth", "Maximum depth of the tree. (>= 0) E.g., depth 0 means 1 leaf node; depth 1 means 1 internal node + 2 leaf nodes.", typeConverter=TypeConverters.toInt)
-    maxBins = Param(Params._dummy(), "maxBins", "Max number of bins for discretizing continuous features.  Must be >=2 and >= number of categories for any categorical feature.", typeConverter=TypeConverters.toInt)
-    minInstancesPerNode = Param(Params._dummy(), "minInstancesPerNode", "Minimum number of instances each child must have after split. If a split causes the left or right child to have fewer than minInstancesPerNode, the split will be discarded as invalid. Should be >= 1.", typeConverter=TypeConverters.toInt)
-    minInfoGain = Param(Params._dummy(), "minInfoGain", "Minimum information gain for a split to be considered at a tree node.", typeConverter=TypeConverters.toFloat)
-    maxMemoryInMB = Param(Params._dummy(), "maxMemoryInMB", "Maximum memory in MB allocated to histogram aggregation. If too small, then 1 node will be split per iteration, and its aggregates may exceed this size.", typeConverter=TypeConverters.toInt)
-    cacheNodeIds = Param(Params._dummy(), "cacheNodeIds", "If false, the algorithm will pass trees to executors to match instances with nodes. If true, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees. Users can set how often should the cache be checkpointed or disable it by setting checkpointInterval.", typeConverter=TypeConverters.toBoolean)
-    
-
-    def __init__(self):
-        super(DecisionTreeParams, self).__init__()
-
-    def setMaxDepth(self, value):
-        """
-        Sets the value of :py:attr:`maxDepth`.
-        """
-        return self._set(maxDepth=value)
-
-    def getMaxDepth(self):
-        """
-        Gets the value of maxDepth or its default value.
-        """
-        return self.getOrDefault(self.maxDepth)
-
-    def setMaxBins(self, value):
-        """
-        Sets the value of :py:attr:`maxBins`.
-        """
-        return self._set(maxBins=value)
-
-    def getMaxBins(self):
-        """
-        Gets the value of maxBins or its default value.
-        """
-        return self.getOrDefault(self.maxBins)
-
-    def setMinInstancesPerNode(self, value):
-        """
-        Sets the value of :py:attr:`minInstancesPerNode`.
-        """
-        return self._set(minInstancesPerNode=value)
-
-    def getMinInstancesPerNode(self):
-        """
-        Gets the value of minInstancesPerNode or its default value.
-        """
-        return self.getOrDefault(self.minInstancesPerNode)
-
-    def setMinInfoGain(self, value):
-        """
-        Sets the value of :py:attr:`minInfoGain`.
-        """
-        return self._set(minInfoGain=value)
-
-    def getMinInfoGain(self):
-        """
-        Gets the value of minInfoGain or its default value.
-        """
-        return self.getOrDefault(self.minInfoGain)
-
-    def setMaxMemoryInMB(self, value):
-        """
-        Sets the value of :py:attr:`maxMemoryInMB`.
-        """
-        return self._set(maxMemoryInMB=value)
-
-    def getMaxMemoryInMB(self):
-        """
-        Gets the value of maxMemoryInMB or its default value.
-        """
-        return self.getOrDefault(self.maxMemoryInMB)
-
-    def setCacheNodeIds(self, value):
-        """
-        Sets the value of :py:attr:`cacheNodeIds`.
-        """
-        return self._set(cacheNodeIds=value)
-
-    def getCacheNodeIds(self):
-        """
-        Gets the value of cacheNodeIds or its default value.
-        """
-        return self.getOrDefault(self.cacheNodeIds)
-
-
-class HasDistanceMeasure(Params):
-    """
-    Mixin for param distanceMeasure: the distance measure. Supported options: 'euclidean' and 'cosine'.
-    """
-
-    distanceMeasure = Param(Params._dummy(), "distanceMeasure", "the distance measure. Supported options: 'euclidean' and 'cosine'.", typeConverter=TypeConverters.toString)
-
-    def __init__(self):
-        super(HasDistanceMeasure, self).__init__()
-        self._setDefault(distanceMeasure='euclidean')
-
-    def setDistanceMeasure(self, value):
-        """
-        Sets the value of :py:attr:`distanceMeasure`.
-        """
-        return self._set(distanceMeasure=value)
-
-    def getDistanceMeasure(self):
-        """
-        Gets the value of distanceMeasure or its default value.
-        """
-        return self.getOrDefault(self.distanceMeasure)
-
-
- -
- -
-
- - -
- -
-

- - © Copyright 2020, Splice Machine - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pyspark/ml/tuning.html b/docs/_build/html/_modules/pyspark/ml/tuning.html deleted file mode 100644 index 068c2920..00000000 --- a/docs/_build/html/_modules/pyspark/ml/tuning.html +++ /dev/null @@ -1,990 +0,0 @@ - - - - - - - - - - pyspark.ml.tuning — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Module code »
  • - -
  • pyspark.ml.tuning
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pyspark.ml.tuning

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import itertools
-import sys
-from multiprocessing.pool import ThreadPool
-
-import numpy as np
-
-from pyspark import since, keyword_only
-from pyspark.ml import Estimator, Model
-from pyspark.ml.common import _py2java
-from pyspark.ml.param import Params, Param, TypeConverters
-from pyspark.ml.param.shared import HasCollectSubModels, HasParallelism, HasSeed
-from pyspark.ml.util import *
-from pyspark.ml.wrapper import JavaParams
-from pyspark.sql.functions import rand
-
-__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel', 'TrainValidationSplit',
-           'TrainValidationSplitModel']
-
-
-def _parallelFitTasks(est, train, eva, validation, epm, collectSubModel):
-    """
-    Creates a list of callables which can be called from different threads to fit and evaluate
-    an estimator in parallel. Each callable returns an `(index, metric)` pair.
-
-    :param est: Estimator, the estimator to be fit.
-    :param train: DataFrame, training data set, used for fitting.
-    :param eva: Evaluator, used to compute `metric`
-    :param validation: DataFrame, validation data set, used for evaluation.
-    :param epm: Sequence of ParamMap, params maps to be used during fitting & evaluation.
-    :param collectSubModel: Whether to collect sub model.
-    :return: (int, float, subModel), an index into `epm` and the associated metric value.
-    """
-    modelIter = est.fitMultiple(train, epm)
-
-    def singleTask():
-        index, model = next(modelIter)
-        metric = eva.evaluate(model.transform(validation, epm[index]))
-        return index, metric, model if collectSubModel else None
-
-    return [singleTask] * len(epm)
-
-
-class ParamGridBuilder(object):
-    r"""
-    Builder for a param grid used in grid search-based model selection.
-
-    >>> from pyspark.ml.classification import LogisticRegression
-    >>> lr = LogisticRegression()
-    >>> output = ParamGridBuilder() \
-    ...     .baseOn({lr.labelCol: 'l'}) \
-    ...     .baseOn([lr.predictionCol, 'p']) \
-    ...     .addGrid(lr.regParam, [1.0, 2.0]) \
-    ...     .addGrid(lr.maxIter, [1, 5]) \
-    ...     .build()
-    >>> expected = [
-    ...     {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
-    ...     {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
-    ...     {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},
-    ...     {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
-    >>> len(output) == len(expected)
-    True
-    >>> all([m in expected for m in output])
-    True
-
-    .. versionadded:: 1.4.0
-    """
-
-    def __init__(self):
-        self._param_grid = {}
-
-    @since("1.4.0")
-    def addGrid(self, param, values):
-        """
-        Sets the given parameters in this grid to fixed values.
-        """
-        self._param_grid[param] = values
-
-        return self
-
-    @since("1.4.0")
-    def baseOn(self, *args):
-        """
-        Sets the given parameters in this grid to fixed values.
-        Accepts either a parameter dictionary or a list of (parameter, value) pairs.
-        """
-        if isinstance(args[0], dict):
-            self.baseOn(*args[0].items())
-        else:
-            for (param, value) in args:
-                self.addGrid(param, [value])
-
-        return self
-
-    @since("1.4.0")
-    def build(self):
-        """
-        Builds and returns all combinations of parameters specified
-        by the param grid.
-        """
-        keys = self._param_grid.keys()
-        grid_values = self._param_grid.values()
-
-        def to_key_value_pairs(keys, values):
-            return [(key, key.typeConverter(value)) for key, value in zip(keys, values)]
-
-        return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]
-
-
-class ValidatorParams(HasSeed):
-    """
-    Common params for TrainValidationSplit and CrossValidator.
-    """
-
-    estimator = Param(Params._dummy(), "estimator", "estimator to be cross-validated")
-    estimatorParamMaps = Param(Params._dummy(), "estimatorParamMaps", "estimator param maps")
-    evaluator = Param(
-        Params._dummy(), "evaluator",
-        "evaluator used to select hyper-parameters that maximize the validator metric")
-
-    def setEstimator(self, value):
-        """
-        Sets the value of :py:attr:`estimator`.
-        """
-        return self._set(estimator=value)
-
-    def getEstimator(self):
-        """
-        Gets the value of estimator or its default value.
-        """
-        return self.getOrDefault(self.estimator)
-
-    def setEstimatorParamMaps(self, value):
-        """
-        Sets the value of :py:attr:`estimatorParamMaps`.
-        """
-        return self._set(estimatorParamMaps=value)
-
-    def getEstimatorParamMaps(self):
-        """
-        Gets the value of estimatorParamMaps or its default value.
-        """
-        return self.getOrDefault(self.estimatorParamMaps)
-
-    def setEvaluator(self, value):
-        """
-        Sets the value of :py:attr:`evaluator`.
-        """
-        return self._set(evaluator=value)
-
-    def getEvaluator(self):
-        """
-        Gets the value of evaluator or its default value.
-        """
-        return self.getOrDefault(self.evaluator)
-
-    @classmethod
-    def _from_java_impl(cls, java_stage):
-        """
-        Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
-        """
-
-        # Load information from java_stage to the instance.
-        estimator = JavaParams._from_java(java_stage.getEstimator())
-        evaluator = JavaParams._from_java(java_stage.getEvaluator())
-        epms = [estimator._transfer_param_map_from_java(epm)
-                for epm in java_stage.getEstimatorParamMaps()]
-        return estimator, epms, evaluator
-
-    def _to_java_impl(self):
-        """
-        Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
-        """
-
-        gateway = SparkContext._gateway
-        cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
-
-        java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
-        for idx, epm in enumerate(self.getEstimatorParamMaps()):
-            java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
-
-        java_estimator = self.getEstimator()._to_java()
-        java_evaluator = self.getEvaluator()._to_java()
-        return java_estimator, java_epms, java_evaluator
-
-
-class CrossValidator(Estimator, ValidatorParams, HasParallelism, HasCollectSubModels,
-                     MLReadable, MLWritable):
-    """
-
-    K-fold cross validation performs model selection by splitting the dataset into a set of
-    non-overlapping randomly partitioned folds which are used as separate training and test datasets
-    e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
-    each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
-    test set exactly once.
-
-
-    >>> from pyspark.ml.classification import LogisticRegression
-    >>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
-    >>> from pyspark.ml.linalg import Vectors
-    >>> dataset = spark.createDataFrame(
-    ...     [(Vectors.dense([0.0]), 0.0),
-    ...      (Vectors.dense([0.4]), 1.0),
-    ...      (Vectors.dense([0.5]), 0.0),
-    ...      (Vectors.dense([0.6]), 1.0),
-    ...      (Vectors.dense([1.0]), 1.0)] * 10,
-    ...     ["features", "label"])
-    >>> lr = LogisticRegression()
-    >>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
-    >>> evaluator = BinaryClassificationEvaluator()
-    >>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
-    ...     parallelism=2)
-    >>> cvModel = cv.fit(dataset)
-    >>> cvModel.avgMetrics[0]
-    0.5
-    >>> evaluator.evaluate(cvModel.transform(dataset))
-    0.8333...
-
-    .. versionadded:: 1.4.0
-    """
-
-    numFolds = Param(Params._dummy(), "numFolds", "number of folds for cross validation",
-                     typeConverter=TypeConverters.toInt)
-
-    @keyword_only
-    def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
-                 seed=None, parallelism=1, collectSubModels=False):
-        """
-        __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
-                 seed=None, parallelism=1, collectSubModels=False)
-        """
-        super(CrossValidator, self).__init__()
-        self._setDefault(numFolds=3, parallelism=1)
-        kwargs = self._input_kwargs
-        self._set(**kwargs)
-
-    @keyword_only
-    @since("1.4.0")
-    def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
-                  seed=None, parallelism=1, collectSubModels=False):
-        """
-        setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
-                  seed=None, parallelism=1, collectSubModels=False):
-        Sets params for cross validator.
-        """
-        kwargs = self._input_kwargs
-        return self._set(**kwargs)
-
-    @since("1.4.0")
-    def setNumFolds(self, value):
-        """
-        Sets the value of :py:attr:`numFolds`.
-        """
-        return self._set(numFolds=value)
-
-    @since("1.4.0")
-    def getNumFolds(self):
-        """
-        Gets the value of numFolds or its default value.
-        """
-        return self.getOrDefault(self.numFolds)
-
-    def _fit(self, dataset):
-        est = self.getOrDefault(self.estimator)
-        epm = self.getOrDefault(self.estimatorParamMaps)
-        numModels = len(epm)
-        eva = self.getOrDefault(self.evaluator)
-        nFolds = self.getOrDefault(self.numFolds)
-        seed = self.getOrDefault(self.seed)
-        h = 1.0 / nFolds
-        randCol = self.uid + "_rand"
-        df = dataset.select("*", rand(seed).alias(randCol))
-        metrics = [0.0] * numModels
-
-        pool = ThreadPool(processes=min(self.getParallelism(), numModels))
-        subModels = None
-        collectSubModelsParam = self.getCollectSubModels()
-        if collectSubModelsParam:
-            subModels = [[None for j in range(numModels)] for i in range(nFolds)]
-
-        for i in range(nFolds):
-            validateLB = i * h
-            validateUB = (i + 1) * h
-            condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
-            validation = df.filter(condition).cache()
-            train = df.filter(~condition).cache()
-
-            tasks = _parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam)
-            for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
-                metrics[j] += (metric / nFolds)
-                if collectSubModelsParam:
-                    subModels[i][j] = subModel
-
-            validation.unpersist()
-            train.unpersist()
-
-        if eva.isLargerBetter():
-            bestIndex = np.argmax(metrics)
-        else:
-            bestIndex = np.argmin(metrics)
-        bestModel = est.fit(dataset, epm[bestIndex])
-        return self._copyValues(CrossValidatorModel(bestModel, metrics, subModels))
-
-    @since("1.4.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies creates a deep copy of
-        the embedded paramMap, and copies the embedded and extra parameters over.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        newCV = Params.copy(self, extra)
-        if self.isSet(self.estimator):
-            newCV.setEstimator(self.getEstimator().copy(extra))
-        # estimatorParamMaps remain the same
-        if self.isSet(self.evaluator):
-            newCV.setEvaluator(self.getEvaluator().copy(extra))
-        return newCV
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java CrossValidator, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-
-        estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
-        numFolds = java_stage.getNumFolds()
-        seed = java_stage.getSeed()
-        parallelism = java_stage.getParallelism()
-        collectSubModels = java_stage.getCollectSubModels()
-        # Create a new instance of this stage.
-        py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
-                       numFolds=numFolds, seed=seed, parallelism=parallelism,
-                       collectSubModels=collectSubModels)
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java CrossValidator. Used for ML persistence.
-
-        :return: Java object equivalent to this instance.
-        """
-
-        estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
-
-        _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
-        _java_obj.setEstimatorParamMaps(epms)
-        _java_obj.setEvaluator(evaluator)
-        _java_obj.setEstimator(estimator)
-        _java_obj.setSeed(self.getSeed())
-        _java_obj.setNumFolds(self.getNumFolds())
-        _java_obj.setParallelism(self.getParallelism())
-        _java_obj.setCollectSubModels(self.getCollectSubModels())
-
-        return _java_obj
-
-
-class CrossValidatorModel(Model, ValidatorParams, MLReadable, MLWritable):
-    """
-
-    CrossValidatorModel contains the model with the highest average cross-validation
-    metric across folds and uses this model to transform input data. CrossValidatorModel
-    also tracks the metrics for each param map evaluated.
-
-    .. versionadded:: 1.4.0
-    """
-
-    def __init__(self, bestModel, avgMetrics=[], subModels=None):
-        super(CrossValidatorModel, self).__init__()
-        #: best model from cross validation
-        self.bestModel = bestModel
-        #: Average cross-validation metrics for each paramMap in
-        #: CrossValidator.estimatorParamMaps, in the corresponding order.
-        self.avgMetrics = avgMetrics
-        #: sub model list from cross validation
-        self.subModels = subModels
-
-    def _transform(self, dataset):
-        return self.bestModel.transform(dataset)
-
-    @since("1.4.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies the underlying bestModel,
-        creates a deep copy of the embedded paramMap, and
-        copies the embedded and extra parameters over.
-        It does not copy the extra Params into the subModels.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        bestModel = self.bestModel.copy(extra)
-        avgMetrics = self.avgMetrics
-        subModels = self.subModels
-        return CrossValidatorModel(bestModel, avgMetrics, subModels)
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java CrossValidatorModel, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-        bestModel = JavaParams._from_java(java_stage.bestModel())
-        estimator, epms, evaluator = super(CrossValidatorModel, cls)._from_java_impl(java_stage)
-
-        py_stage = cls(bestModel=bestModel).setEstimator(estimator)
-        py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
-
-        if java_stage.hasSubModels():
-            py_stage.subModels = [[JavaParams._from_java(sub_model)
-                                   for sub_model in fold_sub_models]
-                                  for fold_sub_models in java_stage.subModels()]
-
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java CrossValidatorModel. Used for ML persistence.
-
-        :return: Java object equivalent to this instance.
-        """
-
-        sc = SparkContext._active_spark_context
-        # TODO: persist average metrics as well
-        _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel",
-                                             self.uid,
-                                             self.bestModel._to_java(),
-                                             _py2java(sc, []))
-        estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl()
-
-        _java_obj.set("evaluator", evaluator)
-        _java_obj.set("estimator", estimator)
-        _java_obj.set("estimatorParamMaps", epms)
-
-        if self.subModels is not None:
-            java_sub_models = [[sub_model._to_java() for sub_model in fold_sub_models]
-                               for fold_sub_models in self.subModels]
-            _java_obj.setSubModels(java_sub_models)
-        return _java_obj
-
-
-class TrainValidationSplit(Estimator, ValidatorParams, HasParallelism, HasCollectSubModels,
-                           MLReadable, MLWritable):
-    """
-    .. note:: Experimental
-
-    Validation for hyper-parameter tuning. Randomly splits the input dataset into train and
-    validation sets, and uses evaluation metric on the validation set to select the best model.
-    Similar to :class:`CrossValidator`, but only splits the set once.
-
-    >>> from pyspark.ml.classification import LogisticRegression
-    >>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
-    >>> from pyspark.ml.linalg import Vectors
-    >>> dataset = spark.createDataFrame(
-    ...     [(Vectors.dense([0.0]), 0.0),
-    ...      (Vectors.dense([0.4]), 1.0),
-    ...      (Vectors.dense([0.5]), 0.0),
-    ...      (Vectors.dense([0.6]), 1.0),
-    ...      (Vectors.dense([1.0]), 1.0)] * 10,
-    ...     ["features", "label"])
-    >>> lr = LogisticRegression()
-    >>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
-    >>> evaluator = BinaryClassificationEvaluator()
-    >>> tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
-    ...     parallelism=2)
-    >>> tvsModel = tvs.fit(dataset)
-    >>> evaluator.evaluate(tvsModel.transform(dataset))
-    0.8333...
-
-    .. versionadded:: 2.0.0
-    """
-
-    trainRatio = Param(Params._dummy(), "trainRatio", "Param for ratio between train and\
-     validation data. Must be between 0 and 1.", typeConverter=TypeConverters.toFloat)
-
-    @keyword_only
-    def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
-                 parallelism=1, collectSubModels=False, seed=None):
-        """
-        __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
-                 parallelism=1, collectSubModels=False, seed=None)
-        """
-        super(TrainValidationSplit, self).__init__()
-        self._setDefault(trainRatio=0.75, parallelism=1)
-        kwargs = self._input_kwargs
-        self._set(**kwargs)
-
-    @since("2.0.0")
-    @keyword_only
-    def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
-                  parallelism=1, collectSubModels=False, seed=None):
-        """
-        setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
-                  parallelism=1, collectSubModels=False, seed=None):
-        Sets params for the train validation split.
-        """
-        kwargs = self._input_kwargs
-        return self._set(**kwargs)
-
-    @since("2.0.0")
-    def setTrainRatio(self, value):
-        """
-        Sets the value of :py:attr:`trainRatio`.
-        """
-        return self._set(trainRatio=value)
-
-    @since("2.0.0")
-    def getTrainRatio(self):
-        """
-        Gets the value of trainRatio or its default value.
-        """
-        return self.getOrDefault(self.trainRatio)
-
-    def _fit(self, dataset):
-        est = self.getOrDefault(self.estimator)
-        epm = self.getOrDefault(self.estimatorParamMaps)
-        numModels = len(epm)
-        eva = self.getOrDefault(self.evaluator)
-        tRatio = self.getOrDefault(self.trainRatio)
-        seed = self.getOrDefault(self.seed)
-        randCol = self.uid + "_rand"
-        df = dataset.select("*", rand(seed).alias(randCol))
-        condition = (df[randCol] >= tRatio)
-        validation = df.filter(condition).cache()
-        train = df.filter(~condition).cache()
-
-        subModels = None
-        collectSubModelsParam = self.getCollectSubModels()
-        if collectSubModelsParam:
-            subModels = [None for i in range(numModels)]
-
-        tasks = _parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam)
-        pool = ThreadPool(processes=min(self.getParallelism(), numModels))
-        metrics = [None] * numModels
-        for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
-            metrics[j] = metric
-            if collectSubModelsParam:
-                subModels[j] = subModel
-
-        train.unpersist()
-        validation.unpersist()
-
-        if eva.isLargerBetter():
-            bestIndex = np.argmax(metrics)
-        else:
-            bestIndex = np.argmin(metrics)
-        bestModel = est.fit(dataset, epm[bestIndex])
-        return self._copyValues(TrainValidationSplitModel(bestModel, metrics, subModels))
-
-    @since("2.0.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies creates a deep copy of
-        the embedded paramMap, and copies the embedded and extra parameters over.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        newTVS = Params.copy(self, extra)
-        if self.isSet(self.estimator):
-            newTVS.setEstimator(self.getEstimator().copy(extra))
-        # estimatorParamMaps remain the same
-        if self.isSet(self.evaluator):
-            newTVS.setEvaluator(self.getEvaluator().copy(extra))
-        return newTVS
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java TrainValidationSplit, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-
-        estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
-        trainRatio = java_stage.getTrainRatio()
-        seed = java_stage.getSeed()
-        parallelism = java_stage.getParallelism()
-        collectSubModels = java_stage.getCollectSubModels()
-        # Create a new instance of this stage.
-        py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
-                       trainRatio=trainRatio, seed=seed, parallelism=parallelism,
-                       collectSubModels=collectSubModels)
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java TrainValidationSplit. Used for ML persistence.
-        :return: Java object equivalent to this instance.
-        """
-
-        estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl()
-
-        _java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit",
-                                             self.uid)
-        _java_obj.setEstimatorParamMaps(epms)
-        _java_obj.setEvaluator(evaluator)
-        _java_obj.setEstimator(estimator)
-        _java_obj.setTrainRatio(self.getTrainRatio())
-        _java_obj.setSeed(self.getSeed())
-        _java_obj.setParallelism(self.getParallelism())
-        _java_obj.setCollectSubModels(self.getCollectSubModels())
-        return _java_obj
-
-
-class TrainValidationSplitModel(Model, ValidatorParams, MLReadable, MLWritable):
-    """
-    .. note:: Experimental
-
-    Model from train validation split.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def __init__(self, bestModel, validationMetrics=[], subModels=None):
-        super(TrainValidationSplitModel, self).__init__()
-        #: best model from train validation split
-        self.bestModel = bestModel
-        #: evaluated validation metrics
-        self.validationMetrics = validationMetrics
-        #: sub models from train validation split
-        self.subModels = subModels
-
-    def _transform(self, dataset):
-        return self.bestModel.transform(dataset)
-
-    @since("2.0.0")
-    def copy(self, extra=None):
-        """
-        Creates a copy of this instance with a randomly generated uid
-        and some extra params. This copies the underlying bestModel,
-        creates a deep copy of the embedded paramMap, and
-        copies the embedded and extra parameters over.
-        And, this creates a shallow copy of the validationMetrics.
-        It does not copy the extra Params into the subModels.
-
-        :param extra: Extra parameters to copy to the new instance
-        :return: Copy of this instance
-        """
-        if extra is None:
-            extra = dict()
-        bestModel = self.bestModel.copy(extra)
-        validationMetrics = list(self.validationMetrics)
-        subModels = self.subModels
-        return TrainValidationSplitModel(bestModel, validationMetrics, subModels)
-
-    @since("2.3.0")
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-    @classmethod
-    @since("2.3.0")
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-    @classmethod
-    def _from_java(cls, java_stage):
-        """
-        Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
-        Used for ML persistence.
-        """
-
-        # Load information from java_stage to the instance.
-        bestModel = JavaParams._from_java(java_stage.bestModel())
-        estimator, epms, evaluator = super(TrainValidationSplitModel,
-                                           cls)._from_java_impl(java_stage)
-        # Create a new instance of this stage.
-        py_stage = cls(bestModel=bestModel).setEstimator(estimator)
-        py_stage = py_stage.setEstimatorParamMaps(epms).setEvaluator(evaluator)
-
-        if java_stage.hasSubModels():
-            py_stage.subModels = [JavaParams._from_java(sub_model)
-                                  for sub_model in java_stage.subModels()]
-
-        py_stage._resetUid(java_stage.uid())
-        return py_stage
-
-    def _to_java(self):
-        """
-        Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence.
-        :return: Java object equivalent to this instance.
-        """
-
-        sc = SparkContext._active_spark_context
-        # TODO: persst validation metrics as well
-        _java_obj = JavaParams._new_java_obj(
-            "org.apache.spark.ml.tuning.TrainValidationSplitModel",
-            self.uid,
-            self.bestModel._to_java(),
-            _py2java(sc, []))
-        estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl()
-
-        _java_obj.set("evaluator", evaluator)
-        _java_obj.set("estimator", estimator)
-        _java_obj.set("estimatorParamMaps", epms)
-
-        if self.subModels is not None:
-            java_sub_models = [sub_model._to_java() for sub_model in self.subModels]
-            _java_obj.setSubModels(java_sub_models)
-
-        return _java_obj
-
-
-if __name__ == "__main__":
-    import doctest
-
-    from pyspark.sql import SparkSession
-    globs = globals().copy()
-
-    # The small batch size here ensures that we see multiple batches,
-    # even in these small test examples:
-    spark = SparkSession.builder\
-        .master("local[2]")\
-        .appName("ml.tuning tests")\
-        .getOrCreate()
-    sc = spark.sparkContext
-    globs['sc'] = sc
-    globs['spark'] = spark
-    (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
-    spark.stop()
-    if failure_count:
-        sys.exit(-1)
-
- -
- -
-
- - -
- -
-

- - © Copyright 2020, Splice Machine - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/pyspark/ml/util.html b/docs/_build/html/_modules/pyspark/ml/util.html deleted file mode 100644 index e086820c..00000000 --- a/docs/_build/html/_modules/pyspark/ml/util.html +++ /dev/null @@ -1,818 +0,0 @@ - - - - - - - - - - pyspark.ml.util — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Module code »
  • - -
  • pyspark.ml.util
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for pyspark.ml.util

-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-import json
-import sys
-import os
-import time
-import uuid
-import warnings
-
-if sys.version > '3':
-    basestring = str
-    unicode = str
-    long = int
-
-from pyspark import SparkContext, since
-from pyspark.ml.common import inherit_doc
-from pyspark.sql import SparkSession
-from pyspark.util import VersionUtils
-
-
-def _jvm():
-    """
-    Returns the JVM view associated with SparkContext. Must be called
-    after SparkContext is initialized.
-    """
-    jvm = SparkContext._jvm
-    if jvm:
-        return jvm
-    else:
-        raise AttributeError("Cannot load _jvm from SparkContext. Is SparkContext initialized?")
-
-
-class Identifiable(object):
-    """
-    Object with a unique ID.
-    """
-
-    def __init__(self):
-        #: A unique id for the object.
-        self.uid = self._randomUID()
-
-    def __repr__(self):
-        return self.uid
-
-    @classmethod
-    def _randomUID(cls):
-        """
-        Generate a unique unicode id for the object. The default implementation
-        concatenates the class name, "_", and 12 random hex chars.
-        """
-        return unicode(cls.__name__ + "_" + uuid.uuid4().hex[-12:])
-
-
-@inherit_doc
-class BaseReadWrite(object):
-    """
-    Base class for MLWriter and MLReader. Stores information about the SparkContext
-    and SparkSession.
-
-    .. versionadded:: 2.3.0
-    """
-
-    def __init__(self):
-        self._sparkSession = None
-
-    def context(self, sqlContext):
-        """
-        Sets the Spark SQLContext to use for saving/loading.
-
-        .. note:: Deprecated in 2.1 and will be removed in 3.0, use session instead.
-        """
-        raise NotImplementedError("Read/Write is not yet implemented for type: %s" % type(self))
-
-    def session(self, sparkSession):
-        """
-        Sets the Spark Session to use for saving/loading.
-        """
-        self._sparkSession = sparkSession
-        return self
-
-    @property
-    def sparkSession(self):
-        """
-        Returns the user-specified Spark Session or the default.
-        """
-        if self._sparkSession is None:
-            self._sparkSession = SparkSession.builder.getOrCreate()
-        return self._sparkSession
-
-    @property
-    def sc(self):
-        """
-        Returns the underlying `SparkContext`.
-        """
-        return self.sparkSession.sparkContext
-
-
-@inherit_doc
-class MLWriter(BaseReadWrite):
-    """
-    Utility class that can save ML instances.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def __init__(self):
-        super(MLWriter, self).__init__()
-        self.shouldOverwrite = False
-
-    def _handleOverwrite(self, path):
-        from pyspark.ml.wrapper import JavaWrapper
-
-        _java_obj = JavaWrapper._new_java_obj("org.apache.spark.ml.util.FileSystemOverwrite")
-        wrapper = JavaWrapper(_java_obj)
-        wrapper._call_java("handleOverwrite", path, True, self.sc._jsc.sc())
-
-    def save(self, path):
-        """Save the ML instance to the input path."""
-        if self.shouldOverwrite:
-            self._handleOverwrite(path)
-        self.saveImpl(path)
-
-    def saveImpl(self, path):
-        """
-        save() handles overwriting and then calls this method.  Subclasses should override this
-        method to implement the actual saving of the instance.
-        """
-        raise NotImplementedError("MLWriter is not yet implemented for type: %s" % type(self))
-
-    def overwrite(self):
-        """Overwrites if the output path already exists."""
-        self.shouldOverwrite = True
-        return self
-
-
-@inherit_doc
-class GeneralMLWriter(MLWriter):
-    """
-    Utility class that can save ML instances in different formats.
-
-    .. versionadded:: 2.4.0
-    """
-
-    def format(self, source):
-        """
-        Specifies the format of ML export (e.g. "pmml", "internal", or the fully qualified class
-        name for export).
-        """
-        self.source = source
-        return self
-
-
-@inherit_doc
-class JavaMLWriter(MLWriter):
-    """
-    (Private) Specialization of :py:class:`MLWriter` for :py:class:`JavaParams` types
-    """
-
-    def __init__(self, instance):
-        super(JavaMLWriter, self).__init__()
-        _java_obj = instance._to_java()
-        self._jwrite = _java_obj.write()
-
-    def save(self, path):
-        """Save the ML instance to the input path."""
-        if not isinstance(path, basestring):
-            raise TypeError("path should be a basestring, got type %s" % type(path))
-        self._jwrite.save(path)
-
-    def overwrite(self):
-        """Overwrites if the output path already exists."""
-        self._jwrite.overwrite()
-        return self
-
-    def option(self, key, value):
-        self._jwrite.option(key, value)
-        return self
-
-    def context(self, sqlContext):
-        """
-        Sets the SQL context to use for saving.
-
-        .. note:: Deprecated in 2.1 and will be removed in 3.0, use session instead.
-        """
-        warnings.warn(
-            "Deprecated in 2.1 and will be removed in 3.0, use session instead.",
-            DeprecationWarning)
-        self._jwrite.context(sqlContext._ssql_ctx)
-        return self
-
-    def session(self, sparkSession):
-        """Sets the Spark Session to use for saving."""
-        self._jwrite.session(sparkSession._jsparkSession)
-        return self
-
-
-@inherit_doc
-class GeneralJavaMLWriter(JavaMLWriter):
-    """
-    (Private) Specialization of :py:class:`GeneralMLWriter` for :py:class:`JavaParams` types
-    """
-
-    def __init__(self, instance):
-        super(GeneralJavaMLWriter, self).__init__(instance)
-
-    def format(self, source):
-        """
-        Specifies the format of ML export (e.g. "pmml", "internal", or the fully qualified class
-        name for export).
-        """
-        self._jwrite.format(source)
-        return self
-
-
-@inherit_doc
-class MLWritable(object):
-    """
-    Mixin for ML instances that provide :py:class:`MLWriter`.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        raise NotImplementedError("MLWritable is not yet implemented for type: %r" % type(self))
-
-    def save(self, path):
-        """Save this ML instance to the given path, a shortcut of 'write().save(path)'."""
-        self.write().save(path)
-
-
-@inherit_doc
-class JavaMLWritable(MLWritable):
-    """
-    (Private) Mixin for ML instances that provide :py:class:`JavaMLWriter`.
-    """
-
-    def write(self):
-        """Returns an MLWriter instance for this ML instance."""
-        return JavaMLWriter(self)
-
-
-@inherit_doc
-class GeneralJavaMLWritable(JavaMLWritable):
-    """
-    (Private) Mixin for ML instances that provide :py:class:`GeneralJavaMLWriter`.
-    """
-
-    def write(self):
-        """Returns an GeneralMLWriter instance for this ML instance."""
-        return GeneralJavaMLWriter(self)
-
-
-@inherit_doc
-class MLReader(BaseReadWrite):
-    """
-    Utility class that can load ML instances.
-
-    .. versionadded:: 2.0.0
-    """
-
-    def __init__(self):
-        super(MLReader, self).__init__()
-
-    def load(self, path):
-        """Load the ML instance from the input path."""
-        raise NotImplementedError("MLReader is not yet implemented for type: %s" % type(self))
-
-
-@inherit_doc
-class JavaMLReader(MLReader):
-    """
-    (Private) Specialization of :py:class:`MLReader` for :py:class:`JavaParams` types
-    """
-
-    def __init__(self, clazz):
-        super(JavaMLReader, self).__init__()
-        self._clazz = clazz
-        self._jread = self._load_java_obj(clazz).read()
-
-    def load(self, path):
-        """Load the ML instance from the input path."""
-        if not isinstance(path, basestring):
-            raise TypeError("path should be a basestring, got type %s" % type(path))
-        java_obj = self._jread.load(path)
-        if not hasattr(self._clazz, "_from_java"):
-            raise NotImplementedError("This Java ML type cannot be loaded into Python currently: %r"
-                                      % self._clazz)
-        return self._clazz._from_java(java_obj)
-
-    def context(self, sqlContext):
-        """
-        Sets the SQL context to use for loading.
-
-        .. note:: Deprecated in 2.1 and will be removed in 3.0, use session instead.
-        """
-        warnings.warn(
-            "Deprecated in 2.1 and will be removed in 3.0, use session instead.",
-            DeprecationWarning)
-        self._jread.context(sqlContext._ssql_ctx)
-        return self
-
-    def session(self, sparkSession):
-        """Sets the Spark Session to use for loading."""
-        self._jread.session(sparkSession._jsparkSession)
-        return self
-
-    @classmethod
-    def _java_loader_class(cls, clazz):
-        """
-        Returns the full class name of the Java ML instance. The default
-        implementation replaces "pyspark" by "org.apache.spark" in
-        the Python full class name.
-        """
-        java_package = clazz.__module__.replace("pyspark", "org.apache.spark")
-        if clazz.__name__ in ("Pipeline", "PipelineModel"):
-            # Remove the last package name "pipeline" for Pipeline and PipelineModel.
-            java_package = ".".join(java_package.split(".")[0:-1])
-        return java_package + "." + clazz.__name__
-
-    @classmethod
-    def _load_java_obj(cls, clazz):
-        """Load the peer Java object of the ML instance."""
-        java_class = cls._java_loader_class(clazz)
-        java_obj = _jvm()
-        for name in java_class.split("."):
-            java_obj = getattr(java_obj, name)
-        return java_obj
-
-
-@inherit_doc
-class MLReadable(object):
-    """
-    Mixin for instances that provide :py:class:`MLReader`.
-
-    .. versionadded:: 2.0.0
-    """
-
-    @classmethod
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        raise NotImplementedError("MLReadable.read() not implemented for type: %r" % cls)
-
-    @classmethod
-    def load(cls, path):
-        """Reads an ML instance from the input path, a shortcut of `read().load(path)`."""
-        return cls.read().load(path)
-
-
-@inherit_doc
-class JavaMLReadable(MLReadable):
-    """
-    (Private) Mixin for instances that provide JavaMLReader.
-    """
-
-    @classmethod
-    def read(cls):
-        """Returns an MLReader instance for this class."""
-        return JavaMLReader(cls)
-
-
-@inherit_doc
-class JavaPredictionModel():
-    """
-    (Private) Java Model for prediction tasks (regression and classification).
-    To be mixed in with class:`pyspark.ml.JavaModel`
-    """
-
-    @property
-    @since("2.1.0")
-    def numFeatures(self):
-        """
-        Returns the number of features the model was trained on. If unknown, returns -1
-        """
-        return self._call_java("numFeatures")
-
-
-@inherit_doc
-class DefaultParamsWritable(MLWritable):
-    """
-    .. note:: DeveloperApi
-
-    Helper trait for making simple :py:class:`Params` types writable.  If a :py:class:`Params`
-    class stores all data as :py:class:`Param` values, then extending this trait will provide
-    a default implementation of writing saved instances of the class.
-    This only handles simple :py:class:`Param` types; e.g., it will not handle
-    :py:class:`Dataset`. See :py:class:`DefaultParamsReadable`, the counterpart to this trait.
-
-    .. versionadded:: 2.3.0
-    """
-
-    def write(self):
-        """Returns a DefaultParamsWriter instance for this class."""
-        from pyspark.ml.param import Params
-
-        if isinstance(self, Params):
-            return DefaultParamsWriter(self)
-        else:
-            raise TypeError("Cannot use DefautParamsWritable with type %s because it does not " +
-                            " extend Params.", type(self))
-
-
-@inherit_doc
-class DefaultParamsWriter(MLWriter):
-    """
-    .. note:: DeveloperApi
-
-    Specialization of :py:class:`MLWriter` for :py:class:`Params` types
-
-    Class for writing Estimators and Transformers whose parameters are JSON-serializable.
-
-    .. versionadded:: 2.3.0
-    """
-
-    def __init__(self, instance):
-        super(DefaultParamsWriter, self).__init__()
-        self.instance = instance
-
-    def saveImpl(self, path):
-        DefaultParamsWriter.saveMetadata(self.instance, path, self.sc)
-
-    @staticmethod
-    def saveMetadata(instance, path, sc, extraMetadata=None, paramMap=None):
-        """
-        Saves metadata + Params to: path + "/metadata"
-        - class
-        - timestamp
-        - sparkVersion
-        - uid
-        - paramMap
-        - defaultParamMap (since 2.4.0)
-        - (optionally, extra metadata)
-        :param extraMetadata:  Extra metadata to be saved at same level as uid, paramMap, etc.
-        :param paramMap:  If given, this is saved in the "paramMap" field.
-        """
-        metadataPath = os.path.join(path, "metadata")
-        metadataJson = DefaultParamsWriter._get_metadata_to_save(instance,
-                                                                 sc,
-                                                                 extraMetadata,
-                                                                 paramMap)
-        sc.parallelize([metadataJson], 1).saveAsTextFile(metadataPath)
-
-    @staticmethod
-    def _get_metadata_to_save(instance, sc, extraMetadata=None, paramMap=None):
-        """
-        Helper for :py:meth:`DefaultParamsWriter.saveMetadata` which extracts the JSON to save.
-        This is useful for ensemble models which need to save metadata for many sub-models.
-
-        .. note:: :py:meth:`DefaultParamsWriter.saveMetadata` for details on what this includes.
-        """
-        uid = instance.uid
-        cls = instance.__module__ + '.' + instance.__class__.__name__
-
-        # User-supplied param values
-        params = instance._paramMap
-        jsonParams = {}
-        if paramMap is not None:
-            jsonParams = paramMap
-        else:
-            for p in params:
-                jsonParams[p.name] = params[p]
-
-        # Default param values
-        jsonDefaultParams = {}
-        for p in instance._defaultParamMap:
-            jsonDefaultParams[p.name] = instance._defaultParamMap[p]
-
-        basicMetadata = {"class": cls, "timestamp": long(round(time.time() * 1000)),
-                         "sparkVersion": sc.version, "uid": uid, "paramMap": jsonParams,
-                         "defaultParamMap": jsonDefaultParams}
-        if extraMetadata is not None:
-            basicMetadata.update(extraMetadata)
-        return json.dumps(basicMetadata, separators=[',',  ':'])
-
-
-@inherit_doc
-class DefaultParamsReadable(MLReadable):
-    """
-    .. note:: DeveloperApi
-
-    Helper trait for making simple :py:class:`Params` types readable.
-    If a :py:class:`Params` class stores all data as :py:class:`Param` values,
-    then extending this trait will provide a default implementation of reading saved
-    instances of the class. This only handles simple :py:class:`Param` types;
-    e.g., it will not handle :py:class:`Dataset`. See :py:class:`DefaultParamsWritable`,
-    the counterpart to this trait.
-
-    .. versionadded:: 2.3.0
-    """
-
-    @classmethod
-    def read(cls):
-        """Returns a DefaultParamsReader instance for this class."""
-        return DefaultParamsReader(cls)
-
-
-@inherit_doc
-class DefaultParamsReader(MLReader):
-    """
-    .. note:: DeveloperApi
-
-    Specialization of :py:class:`MLReader` for :py:class:`Params` types
-
-    Default :py:class:`MLReader` implementation for transformers and estimators that
-    contain basic (json-serializable) params and no data. This will not handle
-    more complex params or types with data (e.g., models with coefficients).
-
-    .. versionadded:: 2.3.0
-    """
-
-    def __init__(self, cls):
-        super(DefaultParamsReader, self).__init__()
-        self.cls = cls
-
-    @staticmethod
-    def __get_class(clazz):
-        """
-        Loads Python class from its name.
-        """
-        parts = clazz.split('.')
-        module = ".".join(parts[:-1])
-        m = __import__(module)
-        for comp in parts[1:]:
-            m = getattr(m, comp)
-        return m
-
-    def load(self, path):
-        metadata = DefaultParamsReader.loadMetadata(path, self.sc)
-        py_type = DefaultParamsReader.__get_class(metadata['class'])
-        instance = py_type()
-        instance._resetUid(metadata['uid'])
-        DefaultParamsReader.getAndSetParams(instance, metadata)
-        return instance
-
-    @staticmethod
-    def loadMetadata(path, sc, expectedClassName=""):
-        """
-        Load metadata saved using :py:meth:`DefaultParamsWriter.saveMetadata`
-
-        :param expectedClassName:  If non empty, this is checked against the loaded metadata.
-        """
-        metadataPath = os.path.join(path, "metadata")
-        metadataStr = sc.textFile(metadataPath, 1).first()
-        loadedVals = DefaultParamsReader._parseMetaData(metadataStr, expectedClassName)
-        return loadedVals
-
-    @staticmethod
-    def _parseMetaData(metadataStr, expectedClassName=""):
-        """
-        Parse metadata JSON string produced by :py:meth`DefaultParamsWriter._get_metadata_to_save`.
-        This is a helper function for :py:meth:`DefaultParamsReader.loadMetadata`.
-
-        :param metadataStr:  JSON string of metadata
-        :param expectedClassName:  If non empty, this is checked against the loaded metadata.
-        """
-        metadata = json.loads(metadataStr)
-        className = metadata['class']
-        if len(expectedClassName) > 0:
-            assert className == expectedClassName, "Error loading metadata: Expected " + \
-                "class name {} but found class name {}".format(expectedClassName, className)
-        return metadata
-
-    @staticmethod
-    def getAndSetParams(instance, metadata):
-        """
-        Extract Params from metadata, and set them in the instance.
-        """
-        # Set user-supplied param values
-        for paramName in metadata['paramMap']:
-            param = instance.getParam(paramName)
-            paramValue = metadata['paramMap'][paramName]
-            instance.set(param, paramValue)
-
-        # Set default param values
-        majorAndMinorVersions = VersionUtils.majorMinorVersion(metadata['sparkVersion'])
-        major = majorAndMinorVersions[0]
-        minor = majorAndMinorVersions[1]
-
-        # For metadata file prior to Spark 2.4, there is no default section.
-        if major > 2 or (major == 2 and minor >= 4):
-            assert 'defaultParamMap' in metadata, "Error loading metadata: Expected " + \
-                "`defaultParamMap` section not found"
-
-            for paramName in metadata['defaultParamMap']:
-                paramValue = metadata['defaultParamMap'][paramName]
-                instance._setDefault(**{paramName: paramValue})
-
-    @staticmethod
-    def loadParamsInstance(path, sc):
-        """
-        Load a :py:class:`Params` instance from the given path, and return it.
-        This assumes the instance inherits from :py:class:`MLReadable`.
-        """
-        metadata = DefaultParamsReader.loadMetadata(path, sc)
-        pythonClassName = metadata['class'].replace("org.apache.spark", "pyspark")
-        py_type = DefaultParamsReader.__get_class(pythonClassName)
-        instance = py_type.load(path)
-        return instance
-
- -
- -
-
- - -
- -
-

- - © Copyright 2020, Splice Machine - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/spark/context.html b/docs/_build/html/_modules/spark/context.html deleted file mode 100644 index 47fc7c22..00000000 --- a/docs/_build/html/_modules/spark/context.html +++ /dev/null @@ -1,844 +0,0 @@ - - - - - - - - - - spark.context — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
- - - - - -
- -
- - - - - - - - - - - - - - - - - -
- -
    - -
  • »
  • - -
  • Module code »
  • - -
  • spark.context
  • - - -
  • - -
  • - -
- - -
-
-
-
- -

Source code for spark.context

-"""
-Copyright 2020 Splice Machine, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import print_function
-
-import os
-
-from py4j.java_gateway import java_import
-from pyspark.sql import DataFrame
-from pyspark.sql.types import _parse_datatype_json_string
-from splicemachine.spark.constants import CONVERSIONS
-
-
-
[docs]class PySpliceContext: - """ - This class implements a SpliceMachineContext object (similar to the SparkContext object) - """ - _spliceSparkPackagesName = "com.splicemachine.spark.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark.splicemachine.SplicemachineContext(self.jdbcurl) - - def __init__(self, sparkSession, JDBC_URL=None, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - """ - - if JDBC_URL: - self.jdbcurl = JDBC_URL - else: - try: - self.jdbcurl = os.environ['BEAKERX_SQL_DEFAULT_JDBC'] - except KeyError as e: - raise KeyError( - "Could not locate JDBC URL. If you are not running on the cloud service," - "please specify the JDBC_URL=<some url> keyword argument in the constructor" - ) - - self._unit_testing = _unit_testing - - if not _unit_testing: # Private Internal Argument to Override Using JVM - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = self.spark_sql_context._sc._jvm - java_import(self.jvm, self._spliceSparkPackagesName) - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions") - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils") - java_import(self.jvm, "scala.collection.JavaConverters._") - java_import(self.jvm, "com.splicemachine.derby.impl.*") - java_import(self.jvm, 'org.apache.spark.api.python.PythonUtils') - self.jvm.com.splicemachine.derby.impl.SpliceSpark.setContext( - self.spark_sql_context._jsc) - self.context = self._splicemachineContext() - - else: - from .tests.mocked import MockedScalaContext - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = '' - self.context = MockedScalaContext(self.jdbcurl) - -
[docs] def toUpper(self, dataframe): - """ - Returns a dataframe with all of the columns in uppercase - - :param dataframe: (Dataframe) The dataframe to convert to uppercase - """ - for s in dataframe.schema: - s.name = s.name.upper() - # You need to re-generate the dataframe for the capital letters to take effect - return dataframe.rdd.toDF(dataframe.schema)
- -
[docs] def replaceDataframeSchema(self, dataframe, schema_table_name): - """ - Returns a dataframe with all column names replaced with the proper string case from the DB table - - :param dataframe: (Dataframe) A dataframe with column names to convert - :param schema_table_name: (str) The schema.table with the correct column cases to pull from the database - :return: (DataFrame) A Spark DataFrame with the replaced schema - """ - schema = self.getSchema(schema_table_name) - # Fastest way to replace the column case if changed - dataframe = dataframe.rdd.toDF(schema) - return dataframe
- -
[docs] def getConnection(self): - """ - Return a connection to the database - """ - return self.context.getConnection()
- -
[docs] def tableExists(self, schema_and_or_table_name, table_name=None): - """ - Check whether or not a table exists - - :Example: - .. code-block:: python - - splice.tableExists('schemaName.tableName')\n - # or\n - splice.tableExists('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: (bool) whether or not the table exists - """ - if table_name: - return self.context.tableExists(schema_and_or_table_name, table_name) - else: - return self.context.tableExists(schema_and_or_table_name)
- -
[docs] def dropTable(self, schema_and_or_table_name, table_name=None): - """ - Drop a specified table. - - :Example: - .. code-block:: python - - splice.dropTable('schemaName.tableName') \n - # or\n - splice.dropTable('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: None - """ - if table_name: - return self.context.dropTable(schema_and_or_table_name, table_name) - else: - return self.context.dropTable(schema_and_or_table_name)
- -
[docs] def df(self, sql): - """ - Return a Spark Dataframe from the results of a Splice Machine SQL Query - - :Example: - .. code-block:: python - - df = splice.df('SELECT * FROM MYSCHEMA.TABLE1 WHERE COL2 > 3') - - :param sql: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: (Dataframe) A Spark DataFrame containing the results - """ - return DataFrame(self.context.df(sql), self.spark_sql_context)
- -
[docs] def insert(self, dataframe, schema_table_name, to_upper=False): - """ - Insert a dataframe into a table (schema.table). - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the DF - :param to_upper: (bool) If the dataframe columns should be converted to uppercase before table creation - If False, the table will be created with lower case columns. [Default False] - :return: None - """ - if to_upper: - dataframe = self.toUpper(dataframe) - return self.context.insert(dataframe._jdf, schema_table_name)
- -
[docs] def insertWithStatus(self, dataframe, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert a dataframe into a table (schema.table) while tracking and limiting records that fail to insert. - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written - to the status directory. - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.insert(dataframe._jdf, schema_table_name, statusDirectory, badRecordsAllowed)
- -
[docs] def insertRdd(self, rdd, schema, schema_table_name): - """ - Insert an rdd into a table (schema.table) - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the RDD - :return: None - """ - return self.insert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def insertRddWithStatus(self, rdd, schema, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert an rdd into a table (schema.table) while tracking and limiting records that fail to insert. \ - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be \ - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written \ - to the status directory. - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - return self.insertWithStatus( - self.createDataFrame(rdd, schema), - schema_table_name, - statusDirectory, - badRecordsAllowed - )
- -
[docs] def upsert(self, dataframe, schema_table_name): - """ - Upsert the data from a dataframe into a table (schema.table). - - :param dataframe: (Dataframe) The dataframe you would like to upsert - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.upsert(dataframe._jdf, schema_table_name)
- -
[docs] def upsertWithRdd(self, rdd, schema, schema_table_name): - """ - Upsert the data from an RDD into a table (schema.table). - - :param rdd: (RDD) The RDD you would like to upsert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - return self.upsert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def delete(self, dataframe, schema_table_name): - """ - Delete records in a dataframe based on joining by primary keys from the data frame. - Be careful with column naming and case sensitivity. - - :param dataframe: (Dataframe) The dataframe you would like to delete - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.context.delete(dataframe._jdf, schema_table_name)
- -
[docs] def deleteWithRdd(self, rdd, schema, schema_table_name): - """ - Delete records using an rdd based on joining by primary keys from the rdd. - Be careful with column naming and case sensitivity. - - :param rdd: (RDD) The RDD containing the primary keys you would like to delete from the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.delete( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def update(self, dataframe, schema_table_name): - """ - Update data from a dataframe for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param dataframe: (Dataframe) The dataframe you would like to update - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - return self.context.update(dataframe._jdf, schema_table_name)
- -
[docs] def updateWithRdd(self, rdd, schema, schema_table_name): - """ - Update data from an rdd for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param rdd: (RDD) The RDD you would like to use for updating the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - return self.update( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def getSchema(self, schema_table_name): - """ - Return the schema via JDBC. - - :param schema_table_name: (str) Table name - :return: (StructType) PySpark StructType representation of the table - """ - return _parse_datatype_json_string(self.context.getSchema(schema_table_name).json())
- -
[docs] def execute(self, query_string): - ''' - execute a query over JDBC - - :Example: - .. code-block:: python - - splice.execute('DELETE FROM TABLE1 WHERE col2 > 3') - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: None - ''' - return self.context.execute(query_string)
- -
[docs] def executeUpdate(self, query_string): - ''' - execute a dml query:(update,delete,drop,etc) - - :Example: - .. code-block:: python - - splice.executeUpdate('DROP TABLE table1') - - :param query_string: (string) SQL Query (eg. DROP TABLE table1) - :return: None - ''' - return self.context.executeUpdate(query_string)
- -
[docs] def internalDf(self, query_string): - ''' - SQL to Dataframe translation (Lazy). Runs the query inside Splice Machine and sends the results to the Spark Adapter app - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: (DataFrame) pyspark dataframe contains the result of query_string - ''' - return DataFrame(self.context.internalDf(query_string), self.spark_sql_context)
- -
[docs] def rdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - - :param schema_table_name: (string) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.df('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def internalRdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - Runs the projection inside Splice Machine and sends the results to the Spark Adapter app as an rdd - - :param schema_table_name: (str) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.internalDf('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def truncateTable(self, schema_table_name): - """ - Truncate a table - - :param schema_table_name: (str) the full table name in the format "schema.table_name" which will be truncated - :return: None - """ - return self.context.truncateTable(schema_table_name)
- -
[docs] def analyzeSchema(self, schema_name): - """ - Analyze the schema - - :param schema_name: (str) schema name which stats info will be collected - :return: None - """ - return self.context.analyzeSchema(schema_name)
- -
[docs] def analyzeTable(self, schema_table_name, estimateStatistics=False, samplePercent=10.0): - """ - Collect stats info on a table - - :param schema_table_name: full table name in the format of 'schema.table' - :param estimateStatistics: will use estimate statistics if True - :param samplePercent: the percentage or rows to be sampled. - :return: None - """ - return self.context.analyzeTable(schema_table_name, estimateStatistics, float(samplePercent))
- -
[docs] def export(self, - dataframe, - location, - compression=False, - replicationCount=1, - fileEncoding=None, - fieldSeparator=None, - quoteCharacter=None): - """ - Export a dataFrame in CSV - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param replicationCount: (int) Replication used for HDFS write - :param fileEncoding: (str) fileEncoding or None, defaults to UTF-8 - :param fieldSeparator: (str) fieldSeparator or None, defaults to ',' - :param quoteCharacter: (str) quoteCharacter or None, defaults to '"' - :return: None - """ - return self.context.export(dataframe._jdf, location, compression, replicationCount, - fileEncoding, fieldSeparator, quoteCharacter)
- -
[docs] def exportBinary(self, dataframe, location, compression, e_format='parquet'): - """ - Export a dataFrame in binary format - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param e_format: (str) Binary format to be used, currently only 'parquet' is supported. [Default 'parquet'] - :return: None - """ - return self.context.exportBinary(dataframe._jdf, location, compression, e_format)
- -
[docs] def bulkImportHFile(self, dataframe, schema_table_name, options): - """ - Bulk Import HFile from a dataframe into a schema.table - - :param dataframe: (DataFrame) - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: None - """ - optionsMap = self.jvm.java.util.HashMap() - for k, v in options.items(): - optionsMap.put(k, v) - return self.context.bulkImportHFile(dataframe._jdf, schema_table_name, optionsMap)
- -
[docs] def bulkImportHFileWithRdd(self, rdd, schema, schema_table_name, options): - """ - Bulk Import HFile from an rdd into a schema.table - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: None - """ - return self.bulkImportHFile( - self.createDataFrame(rdd, schema), - schema_table_name, - options - )
- -
[docs] def splitAndInsert(self, dataframe, schema_table_name, sample_fraction): - """ - Sample the dataframe, split the table, and insert a dataFrame into a schema.table. - This corresponds to an insert into from select statement - - :param dataframe: (DataFrame) Input data - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param sample_fraction: (float) A value between 0 and 1 that specifies the percentage of data in the dataFrame \ - that should be sampled to determine the splits. \ - For example, specify 0.005 if you want 0.5% of the data sampled. - :return: None - """ - return self.context.splitAndInsert(dataframe._jdf, schema_table_name, float(sample_fraction))
- -
[docs] def createDataFrame(self, rdd, schema): - """ - Creates a dataframe from a given rdd and schema. - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :return: (DataFrame) The Spark DataFrame - """ - return self.spark_session.createDataFrame(rdd, schema)
- - def _generateDBSchema(self, dataframe, types={}): - """ - Generate the schema for create table - """ - # convert keys and values to uppercase in the types dictionary - types = dict((key.upper(), val) for key, val in types.items()) - db_schema = [] - # convert dataframe to have all uppercase column names - dataframe = self.toUpper(dataframe) - # i contains the name and pyspark datatype of the column - for i in dataframe.schema: - if i.name.upper() in types: - print('Column {} is of type {}'.format( - i.name.upper(), i.dataType)) - dt = types[i.name.upper()] - else: - dt = CONVERSIONS[str(i.dataType)] - db_schema.append((i.name.upper(), dt)) - - return db_schema - - def _getCreateTableSchema(self, schema_table_name, new_schema=False): - """ - Parse schema for new table; if it is needed, create it - """ - # try to get schema and table, else set schema to splice - if '.' in schema_table_name: - schema, table = schema_table_name.upper().split('.') - else: - schema = self.getConnection().getCurrentSchemaName() - table = schema_table_name.upper() - # check for new schema - if new_schema: - print('Creating schema {}'.format(schema)) - self.execute('CREATE SCHEMA {}'.format(schema)) - - return schema, table - - def _dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drop table if it exists - """ - if self.tableExists(schema_and_or_table_name=schema_table_name, table_name=table_name): - print('Table exists. Dropping table') - self.dropTable(schema_and_or_table_name=schema_table_name, table_name=table_name) - -
[docs] def dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drops a table if exists - - :Example: - .. code-block:: python - - splice.dropTableIfExists('schemaName.tableName') \n - # or\n - splice.dropTableIfExists('schemaName', 'tableName') - - :param schema_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_table_name contains only the schema name - :return: None - """ - self._dropTableIfExists(schema_table_name, table_name)
- - def _jstructtype(self, schema): - """ - Convert python StructType to java StructType - - :param schema: PySpark StructType - :return: Java Spark StructType - """ - return self.spark_session._jsparkSession.parseDataType(schema.json()) - -
[docs] def createTable(self, dataframe, schema_table_name, primary_keys=None, create_table_options=None, to_upper=False, drop_table=False): - """ - Creates a schema.table (schema_table_name) from a dataframe - - :param dataframe: The Spark DataFrame to base the table off - :param schema_table_name: str The schema.table to create - :param primary_keys: List[str] the primary keys. Default None - :param create_table_options: str The additional table-level SQL options default None - :param to_upper: bool If the dataframe columns should be converted to uppercase before table creation. \ - If False, the table will be created with lower case columns. Default False - :param drop_table: bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception - :return: None - - """ - if drop_table: - self._dropTableIfExists(schema_table_name) - if to_upper: - dataframe = self.toUpper(dataframe) - primary_keys = primary_keys if primary_keys else [] - self.createTableWithSchema(schema_table_name, dataframe.schema, - keys=primary_keys, create_table_options=create_table_options)
- -
[docs] def createTableWithSchema(self, schema_table_name, schema, keys=None, create_table_options=None): - """ - Creates a schema.table from a schema - - :param schema_table_name: str The schema.table to create - :param schema: (StructType) The schema that describes the columns of the table - :param keys: (List[str]) The primary keys. Default None - :param create_table_options: (str) The additional table-level SQL options. Default None - :return: None - """ - if keys: - keys_seq = self.jvm.PythonUtils.toSeq(keys) - else: - keys_seq = self.jvm.PythonUtils.toSeq([]) - self.context.createTable( - schema_table_name, - self._jstructtype(schema), - keys_seq, - create_table_options - )
- - -
[docs]class ExtPySpliceContext(PySpliceContext): - """ - This class implements a SplicemachineContext object from com.splicemachine.spark2 for use outside of the K8s Cloud Service - """ - _spliceSparkPackagesName = "com.splicemachine.spark2.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark2.splicemachine.SplicemachineContext( - self.jdbcurl, self.kafkaServers, self.kafkaPollTimeout) - - def __init__(self, sparkSession, JDBC_URL=None, kafkaServers='localhost:9092', kafkaPollTimeout=20000, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - :param kafkaServers (string) Comma-separated list of Kafka broker addresses in the form host:port - :param kafkaPollTimeout (int) Number of milliseconds to wait when polling Kafka - """ - self.kafkaServers = kafkaServers - self.kafkaPollTimeout = kafkaPollTimeout - super().__init__(sparkSession, JDBC_URL, _unit_testing)
-
- -
- -
-
- - -
- -
-

- - © Copyright 2020, Splice Machine - -

-
- - - - Built with Sphinx using a - - theme - - provided by Read the Docs. - -
- -
-
- -
- -
- - - - - - - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/features/feature.html b/docs/_build/html/_modules/splicemachine/features/feature.html deleted file mode 100644 index e182ee36..00000000 --- a/docs/_build/html/_modules/splicemachine/features/feature.html +++ /dev/null @@ -1,269 +0,0 @@ - - - - - - - - splicemachine.features.feature — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.features.feature

-from splicemachine.features.constants import FeatureType
-from .utils.feature_utils import sql_to_datatype
-
-
[docs]class Feature: - def __init__(self, *, name, description, feature_data_type, feature_type, tags, attributes, feature_set_id=None, feature_id=None, **kwargs): - self.name = name.upper() - self.description = description - self.feature_data_type = sql_to_datatype(feature_data_type) - self.feature_type = feature_type - self.feature_set_id = feature_set_id - self.feature_id = feature_id - self.tags = tags - self.attributes = attributes - args = {k.lower(): kwargs[k] for k in kwargs} - self.__dict__.update(args) - -
[docs] def is_categorical(self): - """ - Returns if the type of this feature is categorical - """ - return self.feature_type == FeatureType.categorical
- -
[docs] def is_continuous(self): - """ - Returns if the type of this feature is continuous - """ - return self.feature_type == FeatureType.continuous
- -
[docs] def is_ordinal(self): - """ - Returns if the type of this feature is ordinal - """ - return self.feature_type == FeatureType.ordinal
- - - def __eq__(self, other): - if isinstance(other, Feature): - return self.name.lower() == other.name.lower() - if isinstance(other, str): - return self.name.lower() == other.lower() - return False - - def __repr__(self): - return self.__str__() - - def __str__(self): - return f'Feature(FeatureID={self.__dict__.get("feature_id","None")}, ' \ - f'FeatureSetID={self.__dict__.get("feature_set_id","None")}, Name={self.name}, \n' \ - f'Description={self.description}, FeatureDataType={self.feature_data_type}, ' \ - f'FeatureType={self.feature_type}, Tags={self.tags})\n' - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - if isinstance(other, str): - return self.name < other - elif isinstance(other, Feature): - return self.name < other.name - raise TypeError(f"< not supported between instances of Feature and {type(other)}")
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/features/feature_set.html b/docs/_build/html/_modules/splicemachine/features/feature_set.html deleted file mode 100644 index 144cad83..00000000 --- a/docs/_build/html/_modules/splicemachine/features/feature_set.html +++ /dev/null @@ -1,251 +0,0 @@ - - - - - - - - splicemachine.features.feature_set — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.features.feature_set

-from splicemachine.features import Feature
-from .constants import Columns
-from splicemachine.spark import PySpliceContext
-from typing import List, Dict
-
-
[docs]class FeatureSet: - def __init__(self, *, splice_ctx: PySpliceContext = None, table_name, schema_name, description, - primary_keys: Dict[str, str], feature_set_id=None, deployed: bool = False, **kwargs): - self.splice_ctx = splice_ctx - - self.table_name = table_name.upper() - self.schema_name = schema_name.upper() - self.description = description - self.primary_keys = primary_keys - self.feature_set_id = feature_set_id - self.deployed = deployed - - args = {k.lower(): kwargs[k] for k in kwargs} # Lowercase keys - args = {k: args[k].split(',') if 'columns' in k else args[k] for k in - args} # Make value a list for specific pkcolumns because Splice doesn't support Arrays - self.__dict__.update(args) - self.pk_columns = list(primary_keys.keys()) - -
[docs] def is_deployed(self): - """ - Returns whether or not this Feature Set has been deployed (the schema.table has been created in the database) - :return: (bool) True if the Feature Set is deployed - """ - return self.deployed
- - def __eq__(self, other): - if isinstance(other, FeatureSet): - return self.table_name.lower() == other.table_name.lower() and \ - self.schema_name.lower() == other.schema_name.lower() - return False - - def __repr__(self): - return self.__str__() - - def __str__(self): - return f'FeatureSet(FeatureSetID={self.__dict__.get("feature_set_id", "NA")}, SchemaName={self.schema_name}, ' \ - f'TableName={self.table_name}, Description={self.description}, PKColumns={self.pk_columns}'
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/features/feature_store.html b/docs/_build/html/_modules/splicemachine/features/feature_store.html deleted file mode 100644 index 05e9e488..00000000 --- a/docs/_build/html/_modules/splicemachine/features/feature_store.html +++ /dev/null @@ -1,1495 +0,0 @@ - - - - - - - - splicemachine.features.feature_store — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.features.feature_store

-from typing import List, Dict, Optional, Union
-from datetime import datetime
-import re
-
-from IPython.display import display
-import pandas as pd
-from pandas import DataFrame as PandasDF
-
-from pyspark.sql.dataframe import DataFrame as SparkDF
-from pyspark.ml import Pipeline
-from pyspark.ml.classification import RandomForestClassifier
-from pyspark.ml.regression import RandomForestRegressor
-from pyspark.ml.feature import StringIndexer, VectorAssembler
-
-from splicemachine import SpliceMachineException
-from splicemachine.features.utils.feature_utils import sql_to_datatype
-from splicemachine.features.utils.search_utils import feature_search_external, feature_search_internal
-from splicemachine.notebook import _in_splice_compatible_env
-from splicemachine.spark import PySpliceContext
-from splicemachine.features import Feature, FeatureSet
-from .training_set import TrainingSet
-from .utils.drift_utils import build_feature_drift_plot, build_model_drift_plot
-from .pipelines import FeatureAggregation
-from .utils.http_utils import RequestType, make_request, _get_feature_store_url, Endpoints, _get_credentials, _get_token
-
-from .constants import SQL, FeatureType
-from .training_view import TrainingView
-import warnings
-from requests.auth import HTTPBasicAuth
-
-
[docs]class FeatureStore: - def __init__(self, splice_ctx: PySpliceContext = None) -> None: - self.splice_ctx = splice_ctx - self.mlflow_ctx = None - self.feature_sets = [] # Cache of newly created feature sets - self._FS_URL = _get_feature_store_url() - if not self._FS_URL: warnings.warn( - "Uh Oh! FS_URL variable was not found... you should call 'fs.set_feature_store_url(<url>)' before doing anything.") - self._auth = None - self.__try_auto_login() - -
[docs] def register_splice_context(self, splice_ctx: PySpliceContext) -> None: - self.splice_ctx = splice_ctx
- -
[docs] def get_feature_sets(self, feature_set_names: List[str] = None) -> List[FeatureSet]: - """ - Returns a list of available feature sets - - :param feature_set_names: A list of feature set names in the format '{schema_name}.{table_name}'. If none will return all FeatureSets - :return: List[FeatureSet] the list of Feature Sets - """ - - r = make_request(self._FS_URL, Endpoints.FEATURE_SETS, RequestType.GET, self._auth, - { "name": feature_set_names } if feature_set_names else None) - return [FeatureSet(**fs) for fs in r]
- -
[docs] def remove_training_view(self, name: str): - """ - This removes a training view if it is not being used by any currently deployed models. - NOTE: Once this training view is removed, you will not be able to deploy any models that were trained using this - view - - :param name: The view name - """ - print(f"Removing Training View {name}...", end=' ') - make_request(self._FS_URL, Endpoints.TRAINING_VIEWS, RequestType.DELETE, self._auth, { "name": name }) - print('Done.')
- -
[docs] def get_summary(self) -> TrainingView: - """ - This function returns a summary of the feature store including: - * Number of feature sets - * Number of deployed feature sets - * Number of features - * Number of deployed features - * Number of training sets - * Number of training views - * Number of associated models - this is a count of the MLManager.RUNS table where the `splice.model_name` tag is set and the `splice.feature_store.training_set` parameter is set - * Number of active (deployed) models (that have used the feature store for training) - * Number of pending feature sets - this will will require a new table `featurestore.pending_feature_set_deployments` and it will be a count of that - """ - - r = make_request(self._FS_URL, Endpoints.SUMMARY, RequestType.GET, self._auth) - return r
- -
[docs] def get_training_view(self, training_view: str) -> TrainingView: - """ - Gets a training view by name - - :param training_view: Training view name - :return: TrainingView - """ - - r = make_request(self._FS_URL, Endpoints.TRAINING_VIEWS, RequestType.GET, self._auth, { "name": training_view }) - return TrainingView(**r[0])
- -
[docs] def get_training_views(self, _filter: Dict[str, Union[int, str]] = None) -> List[TrainingView]: - """ - Returns a list of all available training views with an optional filter - - :param _filter: Dictionary container the filter keyword (label, description etc) and the value to filter on - If None, will return all TrainingViews - :return: List[TrainingView] - """ - - r = make_request(self._FS_URL, Endpoints.TRAINING_VIEWS, RequestType.GET, self._auth) - return [TrainingView(**tv) for tv in r]
- -
[docs] def get_training_view_id(self, name: str) -> int: - """ - Returns the unique view ID from a name - - :param name: The training view name - :return: The training view id - """ - # return self.splice_ctx.df(SQL.get_training_view_id.format(name=name)).collect()[0][0] - r = make_request(self._FS_URL, Endpoints.TRAINING_VIEW_ID, RequestType.GET, self._auth, { "name": name }) - return int(r)
- -
[docs] def get_features_by_name(self, names: Optional[List[str]] = None, as_list=False) -> Union[List[Feature], SparkDF]: - """ - Returns a dataframe or list of features whose names are provided - - :param names: The list of feature names - :param as_list: Whether or not to return a list of features. Default False - :return: SparkDF or List[Feature] The list of Feature objects or Spark Dataframe of features and their metadata. Note, this is not the Feature - values, simply the describing metadata about the features. To create a training dataset with Feature values, see - :py:meth:`features.FeatureStore.get_training_set` or :py:meth:`features.FeatureStore.get_feature_dataset` - """ - r = make_request(self._FS_URL, Endpoints.FEATURES, RequestType.GET, self._auth, { "name": names }) - return [Feature(**f) for f in r] if as_list else pd.DataFrame.from_dict(r)
- -
[docs] def training_view_exists(self, name: str) -> bool: - """ - Returns if a training view exists or not - - :param name: The training view name - :return: bool True if the training view exists, False otherwise - """ - r = make_request(self._FS_URL, Endpoints.TRAINING_VIEW_EXISTS, RequestType.GET, self._auth, params={ "name": name }) - return r
- -
[docs] def feature_exists(self, name: str) -> bool: - """ - Returns if a feature exists or not - - :param name: The feature name - :return: bool True if the feature exists, False otherwise - """ - r = make_request(self._FS_URL, Endpoints.FEATURE_EXISTS, RequestType.GET, self._auth, params={ "name": name }) - return r
- -
[docs] def feature_set_exists(self, schema: str, table: str) -> bool: - """ - Returns if a feature set exists or not - - :param schema: The feature set schema - :param table: The feature set table - :return: bool True if the feature exists, False otherwise - """ - r = make_request(self._FS_URL, Endpoints.FEATURE_SET_EXISTS, RequestType.GET, self._auth, - params={ "schema": schema, "table": table }) - return r
- -
[docs] def get_feature_details(self, name: str) -> Feature: - """ - Returns a Feature and it's detailed information - - :param name: The feature name - :return: Feature - """ - r = make_request(self._FS_URL, Endpoints.FEATURE_DETAILS, RequestType.GET, self._auth, { "name": name }) - return Feature(**r)
- -
[docs] def get_feature_vector(self, features: List[Union[str, Feature]], - join_key_values: Dict[str, str], return_primary_keys = True, return_sql=False) -> Union[str, PandasDF]: - """ - Gets a feature vector given a list of Features and primary key values for their corresponding Feature Sets - - :param features: List of str Feature names or Features - :param join_key_values: (dict) join key values to get the proper Feature values formatted as {join_key_column_name: join_key_value} - :param return_primary_keys: Whether to return the Feature Set primary keys in the vector. Default True - :param return_sql: Whether to return the SQL needed to get the vector or the values themselves. Default False - :return: Pandas Dataframe or str (SQL statement) - """ - features = [f if isinstance(f, str) else f.__dict__ for f in features] - r = make_request(self._FS_URL, Endpoints.FEATURE_VECTOR, RequestType.POST, self._auth, - params={ "pks": return_primary_keys, "sql": return_sql }, - body={ "features": features, "join_key_values": join_key_values }) - return r if return_sql else pd.DataFrame(r, index=[0])
- - -
[docs] def get_feature_vector_sql_from_training_view(self, training_view: str, features: List[Union[str,Feature]]) -> str: - """ - Returns the parameterized feature retrieval SQL used for online model serving. - - :param training_view: (str) The name of the registered training view - :param features: (List[str]) the list of features from the feature store to be included in the training - - :NOTE: - .. code-block:: text - - This function will error if the view SQL is missing a view key required \n - to retrieve the desired features - - :return: (str) the parameterized feature vector SQL - """ - features = [f if isinstance(f, str) else f.__dict__ for f in features] - r = make_request(self._FS_URL, Endpoints.FEATURE_VECTOR_SQL, RequestType.POST, self._auth, - { "view": training_view }, features) - return r
- -
[docs] def get_feature_primary_keys(self, features: List[str]) -> Dict[str, List[str]]: - """ - Returns a dictionary mapping each individual feature to its primary key(s). This function is not yet implemented. - - :param features: (List[str]) The list of features to get primary keys for - :return: Dict[str, List[str]] A mapping of {feature name: [pk1, pk2, etc]} - """ - pass
- -
[docs] def get_training_view_features(self, training_view: str) -> List[Feature]: - """ - Returns the available features for the given a training view name - - :param training_view: The name of the training view - :return: A list of available Feature objects - """ - - r = make_request(self._FS_URL, Endpoints.TRAINING_VIEW_FEATURES, RequestType.GET, - self._auth, { "view": training_view }) - return [Feature(**f) for f in r]
- -
[docs] def get_feature_description(self): - # TODO - raise NotImplementedError
- -
[docs] def get_training_set(self, features: Union[List[Feature], List[str]], current_values_only: bool = False, - start_time: datetime = None, end_time: datetime = None, label: str = None, return_pk_cols: bool = False, - return_ts_col: bool = False, return_sql: bool = False, save_as: str = None) -> SparkDF or str: - """ - Gets a set of feature values across feature sets that is not time dependent (ie for non time series clustering). - This feature dataset will be treated and tracked implicitly the same way a training_dataset is tracked from - :py:meth:`features.FeatureStore.get_training_set` . The dataset's metadata and features used will be tracked in mlflow automatically (see - get_training_set for more details). - - :NOTE: - .. code-block:: text - - The way point-in-time correctness is guaranteed here is by choosing one of the Feature Sets as the "anchor" dataset. - This means that the points in time that the query is based off of will be the points in time in which the anchor - Feature Set recorded changes. The anchor Feature Set is the Feature Set that contains the superset of all primary key - columns across all Feature Sets from all Features provided. If more than 1 Feature Set has the superset of - all Feature Sets, the Feature Set with the most primary keys is selected. If more than 1 Feature Set has the same - maximum number of primary keys, the Feature Set is chosen by alphabetical order (schema_name, table_name). - - :param features: List of Features or strings of feature names - - :NOTE: - .. code-block:: text - - The Features Sets which the list of Features come from must have common join keys, - otherwise the function will fail. If there is no common join key, it is recommended to - create a Training View to specify the join conditions. - - :param current_values_only: If you only want the most recent values of the features, set this to true. Otherwise, all history will be returned. Default False - :param start_time: How far back in history you want Feature values. If not specified (and current_values_only is False), all history will be returned. - This parameter only takes effect if current_values_only is False. - :param end_time: The most recent values for each selected Feature. This will be the cutoff time, such that any Feature values that - were updated after this point in time won't be selected. If not specified (and current_values_only is False), - Feature values up to the moment in time you call the function (now) will be retrieved. This parameter - only takes effect if current_values_only is False. - :param label: An optional label to specify for the training set. If specified, the feature set of that feature - will be used as the "anchor" feature set, meaning all point in time joins will be made to the timestamps of - that feature set. This feature will also be recorded as a "label" feature for this particular training set - (but not others in the future, unless this label is again specified). - :param return_pk_cols: bool Whether or not the returned sql should include the primary key column(s) - :param return_ts_cols: bool Whether or not the returned sql should include the timestamp column - :param save_as: Whether or not to save this Training Set (metadata) in the feature store for reproducibility. This - enables you to version and persist the metadata for a training set of a specific model development. If you are - using the Splice Machine managed MLFlow Service, this will be fully automated and managed for you upon model deployment, - however you can still use this parameter to customize the name of the training set (it will default to the run id). - If you are NOT using Splice Machine's mlflow service, this is a useful way to link specific modeling experiments - to the exact training sets used. This DOES NOT persist the training set itself, rather the metadata required - to reproduce the identical training set. - :return: Spark DF or SQL statement necessary to generate the Training Set - """ - features = [f if isinstance(f, str) else f.__dict__ for f in features] - r = make_request(self._FS_URL, Endpoints.TRAINING_SETS, RequestType.POST, self._auth, - params={ "current": current_values_only, "label": label, "pks": return_pk_cols, "ts": return_ts_col, - 'save_as':save_as }, - body={ "features": features, "start_time": start_time, "end_time": end_time}) - create_time = r['metadata']['training_set_create_ts'] - start_time = r['metadata']['training_set_start_ts'] - end_time = r['metadata']['training_set_end_ts'] - sql = r['sql'] - tvw = TrainingView(**r['training_view']) if r.get('training_view') else None - features = [Feature(**f) for f in r['features']] - - if self.mlflow_ctx and not return_sql: - # These will only exist if the user called "save_as" otherwise they will be None - training_set_id = r['metadata'].get('training_set_id') - training_set_version = r['metadata'].get('training_set_version') - self.link_training_set_to_mlflow(features, create_time, start_time, end_time, tvw, - training_set_id=training_set_id, - training_set_version=training_set_version,training_set_name=save_as) - return sql if return_sql else self.splice_ctx.df(sql)
- -
[docs] def get_training_set_by_name(self, name, version: int = None, return_pk_cols: bool = False, - return_ts_col: bool = False, return_sql = False): - """ - Returns a Spark DF (or SQL) of an EXISTING Training Set (one that was saved with the save_as parameter in - :py:meth:`~fs.get_training_set` or :py:meth:`~fs.get_training_set_from_view`. This is useful if you've deployed - a model with a Training Set and - - :param name: Training Set name - :param version: The version of this training set. If not set, it will grab the newest version - :param return_pk_cols: bool Whether or not the returned sql should include the primary key column(s) - :param return_ts_cols: bool Whether or not the returned sql should include the timestamp column - :param return_sql: (Optional[bool]) Return the SQL statement (str) instead of the Spark DF. Defaults False - :return: Spark DF or SQL - """ - - r = make_request(self._FS_URL, Endpoints.TRAINING_SET_BY_NAME, RequestType.GET, self._auth, - params={ "name": name, "version": version, "pks": return_pk_cols, "ts": return_ts_col}) - sql = r["sql"] - tvw = TrainingView(**r["training_view"]) - features = [Feature(**f) for f in r["features"]] - create_time = r['metadata']['training_set_create_ts'] - start_time = r['metadata']['training_set_start_ts'] - end_time = r['metadata']['training_set_end_ts'] - # Link this to mlflow for reproducibility and model deployment - if self.mlflow_ctx and not return_sql: - # These will only exist if the user called "save_as" otherwise they will be None - training_set_id = r['metadata'].get('training_set_id') - self.link_training_set_to_mlflow(features, create_time, start_time, end_time, tvw, - training_set_id=training_set_id, - training_set_version=version, training_set_name=name) - - return sql if return_sql else self.splice_ctx.df(sql)
- -
[docs] def get_training_set_from_view(self, training_view: str, features: Union[List[Feature], List[str]] = None, - start_time: Optional[datetime] = None, end_time: Optional[datetime] = None, - return_pk_cols: bool = False, return_ts_col: bool = False, return_sql: bool = False, - save_as: str = None) -> SparkDF or str: - """ - Returns the training set as a Spark Dataframe from a Training View. When a user calls this function (assuming they have registered - the feature store with mlflow using :py:meth:`~mlflow.register_feature_store` ) - the training dataset's metadata will be tracked in mlflow automatically. The following will be tracked: - including: - * Training View - * Selected features - * Start time - * End time - This tracking will occur in the current run (if there is an active run) - or in the next run that is started after calling this function (if no run is currently active). - - :param training_view: (str) The name of the registered training view - :param features: (List[str] OR List[Feature]) the list of features from the feature store to be included in the training. - If a list of strings is passed in it will be converted to a list of Feature. If not provided will return all available features. - - :NOTE: - .. code-block:: text - - This function will error if the view SQL is missing a join key required to retrieve the - desired features - - :param start_time: (Optional[datetime]) The start time of the query (how far back in the data to start). Default None - - :NOTE: - .. code-block:: text - - If start_time is None, query will start from beginning of history - - :param end_time: (Optional[datetime]) The end time of the query (how far recent in the data to get). Default None - - :NOTE: - .. code-block:: text - - If end_time is None, query will get most recently available data - - :param return_pk_cols: bool Whether or not the returned sql should include the primary key column(s) - :param return_ts_cols: bool Whether or not the returned sql should include the timestamp column - :param return_sql: (Optional[bool]) Return the SQL statement (str) instead of the Spark DF. Defaults False - :param save_as: Whether or not to save this Training Set (metadata) in the feature store for reproducibility. This - enables you to version and persist the metadata for a training set of a specific model development. If you are - using the Splice Machine managed MLFlow Service, this will be fully automated and managed for you upon model deployment, - however you can still use this parameter to customize the name of the training set (it will default to the run id). - If you are NOT using Splice Machine's mlflow service, this is a useful way to link specific modeling experiments - to the exact training sets used. This DOES NOT persist the training set itself, rather the metadata required - to reproduce the identical training set. - :return: Optional[SparkDF, str] The Spark dataframe of the training set or the SQL that is used to generate it (for debugging) - """ - - # Generate the SQL needed to create the dataset - features = [f if isinstance(f, str) else f.__dict__ for f in features] if features else None - r = make_request(self._FS_URL, Endpoints.TRAINING_SET_FROM_VIEW, RequestType.POST, self._auth, - { "view": training_view, "pks": return_pk_cols, "ts": return_ts_col, 'save_as': save_as }, - { "features": features, "start_time": start_time, "end_time": end_time }) - sql = r["sql"] - tvw = TrainingView(**r["training_view"]) - features = [Feature(**f) for f in r["features"]] - create_time = r['metadata']['training_set_create_ts'] - start_time = r['metadata']['training_set_start_ts'] - end_time = r['metadata']['training_set_end_ts'] - # Link this to mlflow for reproducibility and model deployment - if self.mlflow_ctx and not return_sql: - # These will only exist if the user called "save_as" otherwise they will be None - training_set_id = r['metadata'].get('training_set_id') - training_set_version = r['metadata'].get('training_set_version') - self.link_training_set_to_mlflow(features, create_time, start_time, end_time, tvw, - training_set_id=training_set_id, - training_set_version=training_set_version, training_set_name=save_as) - - return sql if return_sql else self.splice_ctx.df(sql)
- -
[docs] def list_training_sets(self) -> Dict[str, Optional[str]]: - """ - Returns a dictionary a training sets available, with the map name -> description. If there is no description, - the value will be an emtpy string - - :return: Dict[str, Optional[str]] - """ - raise NotImplementedError("To see available training views, run fs.describe_training_views()")
- - -
[docs] def create_feature_set(self, schema_name: str, table_name: str, primary_keys: Dict[str, str], - desc: Optional[str] = None, features: Optional[List[Feature]] = None) -> FeatureSet: - """ - Creates and returns a new feature set - - :param schema_name: The schema under which to create the feature set table - :param table_name: The table name for this feature set - :param primary_keys: The primary key column(s) of this feature set - :param desc: The (optional) description - :param features: An optional list of features. If provided, the Features will be created with the Feature Set - :Example: - .. code-block:: python - - from splicemachine.features import FeatureType, Feature - f1 = Feature( - name='my_first_feature', - description='the first feature', - feature_data_type='INT', - feature_type=FeatureType.ordinal, - tags=['good_feature','a new tag', 'ordinal'], - attributes={'quality':'awesome'} - ) - f2 = Feature( - name='my_second_feature', - description='the second feature', - feature_data_type='FLOAT', - feature_type=FeatureType.continuous, - tags=['not_as_good_feature','a new tag'], - attributes={'quality':'not as awesome'} - ) - feats = [f1, f2] - feature_set = fs.create_feature_set( - schema_name='splice', - table_name='foo', - primary_keys={'MOMENT_KEY':"INT"}, - desc='test fset', - features=feats - ) - - :return: FeatureSet - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - features = [f.__dict__ for f in features] if features else None - fset_dict = { "schema_name": schema_name, - "table_name": table_name, - "primary_keys": {pk: sql_to_datatype(primary_keys[pk]) for pk in primary_keys}, - "description": desc, - "features": features} - - print(f'Registering feature set {schema_name}.{table_name} in Feature Store') - if features: - print(f'Registering {len(features)} features for {schema_name}.{table_name} in the Feature Store') - r = make_request(self._FS_URL, Endpoints.FEATURE_SETS, RequestType.POST, self._auth, body=fset_dict) - return FeatureSet(**r)
- -
[docs] def update_feature_metadata(self, name: str, desc: Optional[str] = None, tags: Optional[List[str]] = None, - attributes: Optional[Dict[str,str]] = None): - """ - Update the metadata of a feature - - :param name: The feature name - :param desc: The (optional) feature description (default None) - :param tags: (optional) List of (str) tag words (default None) - :param attributes: (optional) Dict of (str) attribute key/value pairs (default None) - :return: updated Feature - """ - f_dict = { "description": desc, 'tags': tags, "attributes": attributes } - print(f'Registering feature {name} in Feature Store') - r = make_request(self._FS_URL, Endpoints.FEATURES, RequestType.PUT, self._auth, - params={"name": name}, body=f_dict) - f = Feature(**r) - return f
- -
[docs] def create_feature(self, schema_name: str, table_name: str, name: str, feature_data_type: str, - feature_type: str, desc: str = None, tags: List[str] = None, attributes: Dict[str, str] = None): - """ - Add a feature to a feature set - - :param schema_name: The feature set schema - :param table_name: The feature set table name to add the feature to - :param name: The feature name - :param feature_data_type: The datatype of the feature. Must be a valid SQL datatype - :param feature_type: splicemachine.features.FeatureType of the feature. The available types are from the FeatureType class: FeatureType.[categorical, ordinal, continuous]. - You can see available feature types by running - - .. code-block:: python - - from splicemachine.features import FeatureType - print(FeatureType.get_valid()) - - :param desc: The (optional) feature description (default None) - :param tags: (optional) List of (str) tag words (default None) - :param attributes: (optional) Dict of (str) attribute key/value pairs (default None) - :return: Feature created - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - assert feature_type in FeatureType.get_valid(), f"The feature_type {feature_type} in not valid. Valid feature " \ - f"types include {FeatureType.get_valid()}. Use the FeatureType" \ - f" class provided by splicemachine.features" - - f_dict = { "name": name, "description": desc or '', "feature_data_type": sql_to_datatype(feature_data_type), - "feature_type": feature_type, "tags": tags, "attributes": attributes } - print(f'Registering feature {name} in Feature Store') - r = make_request(self._FS_URL, Endpoints.FEATURES, RequestType.POST, self._auth, - { "schema": schema_name, "table": table_name }, f_dict) - f = Feature(**r) - return f
- # TODO: Backfill the feature - -
[docs] def create_training_view(self, name: str, sql: str, primary_keys: List[str], join_keys: List[str], - ts_col: str, label_col: Optional[str] = None, replace: Optional[bool] = False, - desc: Optional[str] = None, verbose=False) -> None: - """ - Registers a training view for use in generating training SQL - - :param name: The training set name. This must be unique to other existing training sets unless replace is True - :param sql: (str) a SELECT statement that includes: - * the primary key column(s) - uniquely identifying a training row/case - * the inference timestamp column - timestamp column with which to join features (temporal join timestamp) - * join key(s) - the references to the other feature tables' primary keys (ie customer_id, location_id) - * (optionally) the label expression - defining what the training set is trying to predict - :param primary_keys: (List[str]) The list of columns from the training SQL that identify the training row - :param ts_col: The timestamp column of the training SQL that identifies the inference timestamp - :param label_col: (Optional[str]) The optional label column from the training SQL. - :param replace: (Optional[bool]) Whether to replace an existing training view - :param join_keys: (List[str]) A list of join keys in the sql that are used to get the desired features in - get_training_set - :param desc: (Optional[str]) An optional description of the training set - :param verbose: Whether or not to print the SQL before execution (default False) - :return: - """ - assert name != "None", "Name of training view cannot be None!" - - tv_dict = { "name": name, "description": desc, "pk_columns": primary_keys, "ts_column": ts_col, "label_column": label_col, - "join_columns": join_keys, "sql_text": sql} - print(f'Registering Training View {name} in the Feature Store') - make_request(self._FS_URL, Endpoints.TRAINING_VIEWS, RequestType.POST, self._auth, body=tv_dict)
- - def _process_features(self, features: List[Union[Feature, str]]) -> List[Feature]: - """ - Process a list of Features parameter. If the list is strings, it converts them to Features, else returns itself - - :param features: The list of Feature names or Feature objects - :return: List[Feature] - """ - feat_str = [f for f in features if isinstance(f, str)] - str_to_feat = self.get_features_by_name(names=feat_str, as_list=True) if feat_str else [] - all_features = str_to_feat + [f for f in features if not isinstance(f, str)] - assert all( - [isinstance(i, Feature) for i in all_features]), "It seems you've passed in Features that are neither" \ - " a feature name (string) or a Feature object" - return all_features - -
[docs] def deploy_feature_set(self, schema_name: str, table_name: str): - """ - Deploys a feature set to the database. This persists the feature stores existence. - As of now, once deployed you cannot delete the feature set or add/delete features. - The feature set must have already been created with :py:meth:`~features.FeatureStore.create_feature_set` - - :param schema_name: The schema of the created feature set - :param table_name: The table of the created feature set - """ - - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - print(f'Deploying Feature Set {schema_name}.{table_name}...',end=' ') - make_request(self._FS_URL, Endpoints.DEPLOY_FEATURE_SET, RequestType.POST, self._auth, { "schema": schema_name, "table": table_name }) - print('Done.')
- -
[docs] def describe_feature_sets(self) -> None: - """ - Prints out a description of a all feature sets, with all features in the feature sets and whether the feature - set is deployed - - :return: None - """ - r = make_request(self._FS_URL, Endpoints.FEATURE_SET_DETAILS, RequestType.GET, self._auth) - - print('Available feature sets') - for desc in r: - features = [Feature(**feature) for feature in desc.pop('features')] - fset = FeatureSet(**desc) - print('-' * 23) - self._feature_set_describe(fset, features)
- -
[docs] def describe_feature_set(self, schema_name: str, table_name: str) -> None: - """ - Prints out a description of a given feature set, with all features in the feature set and whether the feature - set is deployed - - :param schema_name: feature set schema name - :param table_name: feature set table name - :return: None - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - r = make_request(self._FS_URL, Endpoints.FEATURE_SET_DETAILS, RequestType.GET, self._auth, - params={'schema':schema_name, 'table':table_name}) - descs = r - if not descs: raise SpliceMachineException( - f"Feature Set {schema_name}.{table_name} not found. Check name and try again.") - desc = descs[0] - features = [Feature(**feature) for feature in desc.pop("features")] - fset = FeatureSet(**desc) - self._feature_set_describe(fset, features)
- - def _feature_set_describe(self, fset: FeatureSet, features: List[Feature]): - print(f'{fset.schema_name}.{fset.table_name} - {fset.description}') - print('Primary keys:', fset.primary_keys) - print('\nAvailable features:') - display(pd.DataFrame(f.__dict__ for f in features)) - -
[docs] def describe_training_views(self) -> None: - """ - Prints out a description of all training views, the ID, name, description and optional label - - :param training_view: The training view name - :return: None - """ - r = make_request(self._FS_URL, Endpoints.TRAINING_VIEW_DETAILS, RequestType.GET, self._auth) - - print('Available training views') - for desc in r: - features = [Feature(**f) for f in desc.pop('features')] - tcx = TrainingView(**desc) - print('-' * 23) - self._training_view_describe(tcx, features)
- -
[docs] def describe_training_view(self, training_view: str) -> None: - """ - Prints out a description of a given training view, the ID, name, description and optional label - - :param training_view: The training view name - :return: None - """ - - r = make_request(self._FS_URL, Endpoints.TRAINING_VIEW_DETAILS, RequestType.GET, self._auth, {'name': training_view}) - descs = r - if not descs: raise SpliceMachineException(f"Training view {training_view} not found. Check name and try again.") - desc = descs[0] - feats = [Feature(**f) for f in desc.pop('features')] - tcx = TrainingView(**desc) - self._training_view_describe(tcx, feats)
- - def _training_view_describe(self, tcx: TrainingView, feats: List[Feature]): - print(f'ID({tcx.view_id}) {tcx.name} - {tcx.description} - LABEL: {tcx.label_column}') - print(f'Available features in {tcx.name}:') - - col_order = ['name', 'description', 'feature_data_type', 'feature_set_name', 'feature_type', 'tags', - 'last_update_ts', - 'last_update_username', 'compliance_level', 'feature_set_id', 'feature_id'] - display(pd.DataFrame(f.__dict__ for f in feats)[col_order]) - -
[docs] def set_feature_description(self): - raise NotImplementedError
- -
[docs] def get_training_set_from_deployment(self, schema_name: str, table_name: str, label: str = None, - return_pk_cols: bool = False, return_ts_col: bool = False): - """ - Reads Feature Store metadata to rebuild orginal training data set used for the given deployed model. - - :param schema_name: model schema name - :param table_name: model table name - :param label: An optional label to specify for the training set. If specified, the feature set of that feature - will be used as the "anchor" feature set, meaning all point in time joins will be made to the timestamps of - that feature set. This feature will also be recorded as a "label" feature for this particular training set - (but not others in the future, unless this label is again specified). - :param return_pk_cols: bool Whether or not the returned sql should include the primary key column(s) - :param return_ts_cols: bool Whether or not the returned sql should include the timestamp column - :return: SparkDF the Training Frame - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - r = make_request(self._FS_URL, Endpoints.TRAINING_SET_FROM_DEPLOYMENT, RequestType.GET, self._auth, - { "schema": schema_name, "table": table_name, "label": label, "pks": return_pk_cols, "ts": return_ts_col}) - - metadata = r['metadata'] - sql = r['sql'] - - tv_name = metadata['name'] - start_time = metadata['training_set_start_ts'] - end_time = metadata['training_set_end_ts'] - create_time = metadata['training_set_create_ts'] - - tv = TrainingView(**r['training_view']) if 'training_view' in r else None - features = [Feature(**f) for f in r['features']] - - if self.mlflow_ctx: - self.link_training_set_to_mlflow(features, create_time, start_time, end_time, tv) - return self.splice_ctx.df(sql)
- -
[docs] def remove_feature(self, name: str): - """ - Removes a feature. This will run 2 checks. - 1. See if the feature exists. - 2. See if the feature belongs to a feature set that has already been deployed. - - If either of these are true, this function will throw an error explaining which check has failed - - :param name: feature name - :return: - """ - print(f"Removing feature {name}...",end=' ') - make_request(self._FS_URL, Endpoints.FEATURES, RequestType.DELETE, self._auth, { "name": name }) - print('Done.')
- -
[docs] def get_deployments(self, schema_name: str = None, table_name: str = None, training_set: str = None, - feature: str = None, feature_set: str = None): - """ - Returns a list of all (or specified) available deployments - - :param schema_name: model schema name - :param table_name: model table name - :param training_set: training set name - :param feature: passing this in will return all deployments that used this feature - :param feature_set: passing this in will return all deployments that used this feature set - :return: List[Deployment] the list of Deployments as dicts - """ - return make_request(self._FS_URL, Endpoints.DEPLOYMENTS, RequestType.GET, self._auth, - { 'schema': schema_name, 'table': table_name, 'name': training_set, 'feat': feature, 'fset': feature_set})
- -
[docs] def get_training_set_features(self, training_set: str = None): - """ - Returns a list of all features from an available Training Set, as well as details about that Training Set - - :param training_set: training set name - :return: TrainingSet as dict - """ - r = make_request(self._FS_URL, Endpoints.TRAINING_SET_FEATURES, RequestType.GET, self._auth, - { 'name': training_set }) - r['features'] = [Feature(**f) for f in r['features']] - return r
- -
[docs] def remove_feature_set(self, schema_name: str, table_name: str, purge: bool = False) -> None: - """ - Deletes a feature set if appropriate. You can currently delete a feature set in two scenarios: - 1. The feature set has not been deployed - 2. The feature set has been deployed, but not linked to any training sets - - If both of these conditions are false, this will fail. - - Optionally set purge=True to force delete the feature set and all of the associated Training Sets using the - Feature Set. ONLY USE IF YOU KNOW WHAT YOU ARE DOING. This will delete Training Sets, but will still fail if - there is an active deployment with this feature set. That cannot be overwritten - - :param schema_name: The Feature Set Schema - :param table_name: The Feature Set Table - :param purge: Whether to force delete training sets that use the feature set (that are not used in deployments) - """ - if purge: - warnings.warn("You've set purge=True, I hope you know what you are doing! This will delete any dependent" - " Training Sets (except ones used in an active model deployment)") - print(f'Removing Feature Set {schema_name}.{table_name}...',end=' ') - make_request(self._FS_URL, Endpoints.FEATURE_SETS, - RequestType.DELETE, self._auth, { "schema": schema_name, "table":table_name, "purge": purge }) - print('Done.')
- -
[docs] def create_source(self, name: str, sql: str, event_ts_column: datetime, - update_ts_column: datetime, primary_keys: List[str]): - """ - Creates, validates, and stores a source in the Feature Store that can be used to create a Pipeline that - feeds a feature set - - :Example: - .. code-block:: python - - fs.create_source( - name='CUSTOMER_RFM', - sql='SELECT * FROM RETAIL_RFM.CUSTOMER_CATEGORY_ACTIVITY', - event_ts_column='INVOICEDATE', - update_ts_column='LAST_UPDATE_TS', - primary_keys=['CUSTOMERID'] - ) - - :param name: The name of the source. This must be unique across the feature store - :param sql: the SQL statement that returns the base result set to be used in future aggregation pipelines - :param event_ts_column: The column of the source query that determines the time of the event (row) being - described. This is not necessarily the time the record was recorded, but the time the event itself occured. - - :param update_ts_column: The column that indicates the time when the record was last updated. When scheduled - pipelines run, they will filter on this column to get only the records that have not been queried before. - - :param primary_keys: The list of columns in the source SQL that uniquely identifies each row. These become - the primary keys of the feature set(s) that is/are eventually created from this source. - """ - source = { - 'name': name.upper(), - 'sql_text': sql, - 'event_ts_column': event_ts_column, - 'update_ts_column': update_ts_column, - 'pk_columns': primary_keys - - } - print(f'Registering Source {name.upper()} in the Feature Store') - make_request(self._FS_URL, Endpoints.SOURCE, method=RequestType.POST, auth=self._auth, body=source)
- -
[docs] def remove_source(self, name: str): - """ - Removes a Source by name. You cannot remove a Source that has child dependencies (Feature Sets). If there is a - Feature Set that is deployed and a Pipeline that is feeding it, you cannot delete the Source until you remove - the Feature Set (which in turn removes the Pipeline) - - :param name: The Source name - """ - print(f'Deleting Source {name}...',end=' ') - make_request(self._FS_URL, Endpoints.SOURCE, method=RequestType.DELETE, - auth=self._auth, params={'name': name}) - print('Done.')
- -
[docs] def create_aggregation_feature_set_from_source(self, source_name: str, schema_name: str, table_name: str, - start_time: datetime, schedule_interval: str, - aggregations: List[FeatureAggregation], - backfill_start_time: datetime = None, backfill_interval: str = None, - description: Optional[str] = None, run_backfill: Optional[bool] = True - ): - """ - Creates a temporal aggregation feature set by creating a pipeline linking a Source to a feature set. - Sources are created with :py:meth:`features.FeatureStore.create_source`. - Provided aggregations will generate the features for the feature set. This will create the feature set - along with aggregation calculations to create features - - :param source_name: The name of the of the source created via create_source - :param schema_name: The schema name of the feature set - :param table_name: The table name of the feature set - :param start_time: The start time for the pipeline to run - :param schedule_interval: The frequency with which to run the pipeline. - :param aggregations: The list of FeatureAggregations to apply to the column names of the source SQL statement - :param backfill_start_time: The datetime representing the earliest point in time to get data from when running - backfill - :param backfill_interval: The "sliding window" interval to increase each timepoint by when performing backfill - :param run_backfill: Whether or not to run backfill when calling this function. Default False. If this is True - backfill_start_time and backfill_interval MUST BE SET - :return: (FeatureSet) the created Feature Set - - :Example: - .. code-block:: python - - from splicemachine.features.pipelines import AggWindow, FeatureAgg, FeatureAggregation - from datetime import datetime - source_name = 'CUSTOMER_RFM' - fs.create_source( - name=source_name, - sql='SELECT * FROM RETAIL_RFM.CUSTOMER_CATEGORY_ACTIVITY', - event_ts_column='INVOICEDATE', - update_ts_column='LAST_UPDATE_TS', - primary_keys=['CUSTOMERID'] - ) - fs.create_aggregation_feature_set_from_source( - - ) - start_time = datetime.today() - schedule_interval = AggWindow.get_window(5,AggWindow.DAY) - backfill_start = datetime.strptime('2002-01-01 00:00:00', '%Y-%m-%d %H:%M:%S') - backfill_interval = schedule_interval - fs.create_aggregation_feature_set_from_source - ( - source_name, 'RETAIL_FS', 'AUTO_RFM', start_time=start_time, - schedule_interval=schedule_interval, backfill_start_time=backfill_start, - backfill_interval=backfill_interval, - aggregations = [ - FeatureAggregation(feature_name_prefix = 'AR_CLOTHING_QTY', column_name = 'CLOTHING_QTY', agg_functions=['sum','max'], agg_windows=['1d','2d','90d'], agg_default_value = 0.0 ), - FeatureAggregation(feature_name_prefix = 'AR_DELICATESSEN_QTY', column_name = 'DELICATESSEN_QTY', agg_functions=['avg'], agg_windows=['1d','2d', '2w'], agg_default_value = 11.5 ), - FeatureAggregation(feature_name_prefix = 'AR_GARDEN_QTY' , column_name = 'GARDEN_QTY', agg_functions=['count','avg'], agg_windows=['30d','90d', '1q'], agg_default_value = 8 ) - ] - ) - - This will create, deploy and return a FeatureSet called 'RETAIL_FS.AUTO_RFM'. - The Feature Set will have 15 features: - * 6 for the 'AR_CLOTHING_QTY' prefix (sum & max over provided agg windows) - * 3 for the 'AR_DELICATESSEN_QTY' prefix (avg over provided agg windows) - * 6 for the 'AR_GARDEN_QTY' prefix (count & avg over provided agg windows) - - A Pipeline is also created and scheduled in Airflow that feeds it every 5 days from the Source 'CUSTOMER_RFM' - Backfill will also occur, reading data from the source as of '2002-01-01 00:00:00' with a 5 day window - """ - schema_name, table_name, source_name = schema_name.upper(), table_name.upper(), source_name.upper() - agg_feature_set = { - 'source_name': source_name, - 'schema_name': schema_name, - 'table_name': table_name, - 'start_time': str(start_time), - 'schedule_interval': schedule_interval, - 'aggregations': [f.__dict__ for f in aggregations], - 'backfill_start_time': str(backfill_start_time), - 'backfill_interval': backfill_interval, - 'description': description - } - num_features = sum([len(f.agg_functions)*len(f.agg_windows) for f in aggregations]) - print(f'Registering aggregation feature set {schema_name}.{table_name} and {num_features} features' - f' in the Feature Store...', end=' ') - r = make_request(self._FS_URL, Endpoints.AGG_FEATURE_SET_FROM_SOURCE, RequestType.POST, self._auth, - params={'run_backfill': run_backfill}, body=agg_feature_set) - print('Done.') - return FeatureSet(**r)
- -
[docs] def get_backfill_sql(self, schema_name: str, table_name: str): - """ - Returns the necessary parameterized SQL statement to perform backfill on an Aggregate Feature Set. The Feature - Set must have been deployed using the :py:meth:`features.FeatureStore.create_aggregation_feature_set_from_source` - function. Meaning there must be a Source and a Pipeline associated to it. This function will likely not be - necessary as you can perform backfill at the time of feature set creation automatically. - - This SQL will be parameterized and need a timestamp to execute. You can get those timestamps with the - :py:meth:`features.FeatureStore.get_backfill_interval` with the same parameters - - :param schema_name: The schema name of the feature set - :param table_name: The table name of the feature set - :return: The parameterized Backfill SQL - """ - - p = { - 'schema': schema_name, - 'table': table_name - } - return make_request(self._FS_URL, Endpoints.BACKFILL_SQL, RequestType.GET, self._auth, params=p)
- -
[docs] def get_pipeline_sql(self, schema_name: str, table_name: str): - """ - Returns the incremental pipeline SQL that feeds a feature set from a source (thus creating a pipeline). - Pipelines are managed for you by default by Splice Machine via Airflow, but if you opt out of using the - managed pipelines you can use this function to get the incremental SQL. - - This SQL will be parameterized and need a timestamp to execute. You can get those timestamps with the - :py:meth:`features.FeatureStore.get_backfill_interval` with the same parameters - - :param schema_name: The schema name of the feature set - :param table_name: The table name of the feature set - :return: The incremental Pipeline SQL - """ - - p = { - 'schema': schema_name, - 'table': table_name - } - return make_request(self._FS_URL, Endpoints.PIPELINE_SQL, RequestType.GET, self._auth, params=p)
- -
[docs] def get_backfill_intervals(self, schema_name: str, table_name: str) -> List[datetime]: - """ - Gets the backfill intervals necessary for the parameterized backfill SQL obtained from the - :py:meth:`features.FeatureStore.get_backfill_sql` function. This function will likely not be - necessary as you can perform backfill at the time of feature set creation automatically. - - :param schema_name: The schema name of the feature set - :param table_name: The table name of the feature set - :return: The list of datetimes necessary to parameterize the backfill SQL - """ - p = { - 'schema': schema_name, - 'table': table_name - } - return make_request(self._FS_URL, Endpoints.BACKFILL_INTERVALS, RequestType.GET, self._auth, params=p)
- - - def _retrieve_model_data_sets(self, schema_name: str, table_name: str): - """ - Returns the training set dataframe and model table dataframe for a given deployed model. - - :param schema_name: model schema name - :param table_name: model table name - :return: - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - training_set_df = self.get_training_set_from_deployment(schema_name, table_name) - model_table_df = self.splice_ctx.df(f'SELECT * FROM {schema_name}.{table_name}') - return training_set_df, model_table_df - - def _retrieve_training_set_metadata_from_deployement(self, schema_name: str, table_name: str): - """ - Reads Feature Store metadata to retrieve definition of training set used to train the specified model. - :param schema_name: model schema name - :param table_name: model table name - :return: - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - sql = SQL.get_deployment_metadata.format(schema_name=schema_name, table_name=table_name) - deploy_df = self.splice_ctx.df(sql).collect() - cnt = len(deploy_df) - if cnt == 1: - return deploy_df[0] - -
[docs] def display_model_feature_drift(self, schema_name: str, table_name: str): - """ - Displays feature by feature comparison between the training set of the deployed model and the input feature - values used with the model since deployment. - - :param schema_name: name of database schema where model table is deployed - :param table_name: name of the model table - :return: None - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - metadata = make_request(self._FS_URL, Endpoints.TRAINING_SET_FROM_DEPLOYMENT, RequestType.GET, - self._auth, params={ "schema": schema_name, "table": table_name})['metadata'] - - training_set_df, model_table_df = self._retrieve_model_data_sets(schema_name, table_name) - features = metadata['features'].split(',') - build_feature_drift_plot(features, training_set_df, model_table_df)
- - -
[docs] def display_model_drift(self, schema_name: str, table_name: str, time_intervals: int, - start_time: datetime = None, end_time: datetime = None): - """ - Displays as many as 'time_intervals' plots showing the distribution of the model prediction within each time - period. Time periods are equal periods of time where predictions are present in the model table - 'schema_name'.'table_name'. Model predictions are first filtered to only those occurring after 'start_time' if - specified and before 'end_time' if specified. - - :param schema_name: schema where the model table resides - :param table_name: name of the model table - :param time_intervals: number of time intervals to plot - :param start_time: if specified, filters to only show predictions occurring after this date/time - :param end_time: if specified, filters to only show predictions occurring before this date/time - :return: None - """ - # database stores object names in upper case - schema_name = schema_name.upper() - table_name = table_name.upper() - - # set default timeframe if not specified - if not start_time: - start_time = datetime(1900, 1, 1, 0, 0, 0) - if not end_time: - end_time = datetime.now() - # retrieve predictions the model has made over time - sql = SQL.get_model_predictions.format(schema_name=schema_name, table_name=table_name, - start_time=start_time, end_time=end_time) - model_table_df = self.splice_ctx.df(sql) - build_model_drift_plot(model_table_df, time_intervals)
- - - - - - def __get_pipeline(self, df, features, label, model_type): - """ - Creates a Pipeline with preprocessing steps (StringINdexer, VectorAssembler) for each feature depending - on feature type, and returns the pipeline for training for feature elimination - - :param df: Spark Dataframe - :param features: List[Feature] to train on - :param label: Label name to train on - :param model_type: (str) the model type - avl options are "classification" and "regression" - :return: Unfit Spark Pipeline - """ - categorical_features = [f.name for f in features if f.is_categorical()] - numeric_features = [f.name for f in features if f.is_continuous() or f.is_ordinal()] - indexed_features = [f'{n}_index' for n in categorical_features] - - si = [StringIndexer(inputCol=n, outputCol=f'{n}_index', handleInvalid='keep') for n in categorical_features] - all_features = numeric_features + indexed_features - - v = VectorAssembler(inputCols=all_features, outputCol='features', handleInvalid='keep') - if model_type == 'classification': - si += [StringIndexer(inputCol=label, outputCol=f'{label}_index', handleInvalid='keep')] - clf = RandomForestClassifier(labelCol=f'{label}_index') - else: - clf = RandomForestRegressor(labelCol=label) - return Pipeline(stages=si + [v, clf]).fit(df) - - def __get_feature_importance(self, feature_importances, df, features_column): - """ - Gets the ordered feature importance for the feature elimination rounds - :param feature_importances: Spark model featureImportances attribute - :param df: Spark dataframe - :param features_column: Column name of the dataframe that holds the features - :return: Sorted pandas dataframe with the feature importances and feature names - """ - feature_rank = [] - for i in df.schema[features_column].metadata["ml_attr"]["attrs"]: - feature_rank += df.schema[features_column].metadata["ml_attr"]["attrs"][i] - features_df = pd.DataFrame(feature_rank) - features_df['score'] = features_df['idx'].apply(lambda x: feature_importances[x]) - return (features_df.sort_values('score', ascending=False)) - - def __log_mlflow_results(self, name, rounds, mlflow_results): - """ - Logs the results of feature elimination to mlflow - - :param name: MLflow run name - :param rounds: Number of rounds of feature elimination that were run - :param mlflow_results: The params / metrics to log - :return: - """ - try: - if self.mlflow_ctx.active_run(): - self.mlflow_ctx.start_run(run_name=name) - for r in range(rounds): - with self.mlflow_ctx.start_run(run_name=f'Round {r}', nested=True): - self.mlflow_ctx.log_metrics(mlflow_results[r]) - finally: - self.mlflow_ctx.end_run() - - - def __prune_features_for_elimination(self, features) -> List[Feature]: - """ - Removes incompatible features from the provided list if they are not compatible with SparkML modeling - - :param features: List[Feature] the provided list - :return: List[Features] the pruned list - """ - from splicemachine.spark.constants import SQL_MODELING_TYPES - invalid_features = {f for f in features if f.feature_data_type['data_type'] not in SQL_MODELING_TYPES} - valid_features = list(set(features) - invalid_features) - if invalid_features: print('The following features are invalid for modeling based on their Data Types:\n') - for f in invalid_features: - print(f.name, f.feature_data_type) - return valid_features - -
[docs] def run_feature_elimination(self, df, features: List[Union[str, Feature]], label: str = 'label', n: int = 10, - verbose: int = 0, model_type: str = 'classification', step: int = 1, - log_mlflow: bool = False, mlflow_run_name: str = None, - return_importances: bool = False): - - """ - Runs feature elimination using a Spark decision tree on the dataframe passed in. Optionally logs results to mlflow - - :param df: The dataframe with features and label - :param features: The list of feature names (or Feature objects) to run elimination on - :param label: the label column names - :param n: The number of features desired. Default 10 - :param verbose: The level of verbosity. 0 indicated no printing. 1 indicates printing remaining features after - each round. 2 indicates print features and relative importances after each round. Default 0 - :param model_type: Whether the model to test with will be a regression or classification model. Default classification - :param log_mlflow: Whether or not to log results to mlflow as nested runs. Default false - :param mlflow_run_name: The name of the parent run under which all subsequent runs will live. The children run - names will be {mlflow_run_name}_{num_features}_features. ie testrun_5_features, testrun_4_features etc - :return: - """ - - train_df = df - features = self._process_features(features) - remaining_features = self.__prune_features_for_elimination(features) - rnd = 0 - mlflow_results = [] - assert len( - remaining_features) > n, \ - "You've set the number of desired features (n) greater than the number of available features" - while len(remaining_features) > n: - rnd += 1 - num_features = max(len(remaining_features) - step, n) # Don't go less than the specified value - print(f'Building {model_type} model') - model = self.__get_pipeline(train_df, remaining_features, label, model_type) - print('Getting feature importance') - feature_importances = self.__get_feature_importance(model.stages[-1].featureImportances, - model.transform(train_df), "features").head( - num_features) - remaining_features_and_label = list(feature_importances['name'].values) + [label] - train_df = train_df.select(*remaining_features_and_label) - remaining_features = [f for f in remaining_features if f.name in feature_importances['name'].values] - print(f'{len(remaining_features)} features remaining') - - if verbose == 1: - print(f'Round {rnd} complete. Remaining Features:') - for i, f in enumerate(list(feature_importances['name'].values)): - print(f'{i}. {f}') - elif verbose == 2: - print(f'Round {rnd} complete. Remaining Features:') - display(feature_importances.reset_index(drop=True)) - - # Add results to a list for mlflow logging - round_metrics = {'Round': rnd, 'Number of features': len(remaining_features)} - for index, row in feature_importances.iterrows(): - round_metrics[row['name']] = row['score'] - mlflow_results.append(round_metrics) - - if log_mlflow and self.mlflow_ctx: - run_name = mlflow_run_name or f'feature_elimination_{label}' - self.__log_mlflow_results(run_name, rnd, mlflow_results) - - return remaining_features, feature_importances.reset_index( - drop=True) if return_importances else remaining_features
- - - - -
[docs] def set_feature_store_url(self, url: str): - self._FS_URL = url
- -
[docs] def login_fs(self, username, password): - self._auth = HTTPBasicAuth(username, password)
- -
[docs] def set_token(self, token): - self._auth = token
- - def __try_auto_login(self): - """ - Tries to login the user automatically. This will only work if the user is not - using the cloud service. - - :return: None - """ - token = _get_token() - if token: - self.set_token(token) - return - - user, password = _get_credentials() - if user and password: - self.login_fs(user, password)
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/features/training_view.html b/docs/_build/html/_modules/splicemachine/features/training_view.html deleted file mode 100644 index 7dee9876..00000000 --- a/docs/_build/html/_modules/splicemachine/features/training_view.html +++ /dev/null @@ -1,235 +0,0 @@ - - - - - - - - splicemachine.features.training_view — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.features.training_view

-from typing import List
-
-
[docs]class TrainingView: - def __init__(self, *, pk_columns: List[str], ts_column, label_column, view_sql, name, description, - view_id = None, **kwargs): - self.pk_columns = pk_columns - self.ts_column = ts_column - self.label_column = label_column - self.view_sql = view_sql - self.name = name - self.description = description - self.view_id = view_id - args = {k.lower(): kwargs[k] for k in kwargs} # Make all keys lowercase - args = {k: args[k].split(',') if 'columns' in k and isinstance(args[k], str) else args[k] for k in args} # Make value a list for specific pkcolumns and contextcolumns because Splice doesn't support Arrays - self.__dict__.update(args) - - def __repr__(self): - return f'TrainingView(' \ - f'PKColumns={self.pk_columns}, ' \ - f'TSColumn={self.ts_column}, ' \ - f'LabelColumn={self.label_column}, \n' \ - f'ViewSQL={self.view_sql}, \n' \ - f'ViewID={self.view_id}' - - def __str__(self): - return self.__repr__()
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/mlflow_support/mlflow_support.html b/docs/_build/html/_modules/splicemachine/mlflow_support/mlflow_support.html deleted file mode 100644 index e892c3e8..00000000 --- a/docs/_build/html/_modules/splicemachine/mlflow_support/mlflow_support.html +++ /dev/null @@ -1,1291 +0,0 @@ - - - - - - - - splicemachine.mlflow_support.mlflow_support — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.mlflow_support.mlflow_support

-"""
-Copyright 2020 Splice Machine, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.\n
-
-======================================================================================================================================================================================\n
-
-All functions in this module are accessible through the mlflow object and are to be referenced without the leading underscore as \n
-.. code-block:: python
-
-    mlflow.function_name()
-
-For example, the function _current_exp_id() is accessible via\n
-.. code-block:: python
-
-    mlflow.current_exp_id()
-
-
-All functions are accessible after running the following import\n
-.. code-block:: python
-
-    from splicemachine.mlflow_support import *
-
-Importing anything directly from mlflow before running the above statement will cause problems. After running the above import, you can import additional mlflow submodules as normal\n
-.. code-block:: python
-
-    from splicemachine.mlflow_support import *
-    from mlflow.tensorflow import autolog
-
-======================================================================================================================================================================================\n
-"""
-import glob
-import os
-import time
-import copy
-from collections import defaultdict
-from contextlib import contextmanager
-from importlib import import_module
-from io import BytesIO
-from os import path
-from sys import version as py_version, stderr
-from tempfile import TemporaryDirectory, NamedTemporaryFile
-from zipfile import ZIP_DEFLATED, ZipFile
-from typing import Dict, Optional, List, Union
-
-import gorilla
-import h2o
-import mlflow
-import mlflow.pyfunc
-from mlflow.tracking.fluent import ActiveRun
-from mlflow.entities import RunStatus
-import pyspark
-import requests
-import sklearn
-import yaml
-import warnings
-
-from pandas.core.frame import DataFrame as PandasDF
-from pyspark.ml.base import Model as SparkModel
-from pyspark.sql import DataFrame as SparkDF
-from requests.auth import HTTPBasicAuth
-from sklearn.base import BaseEstimator as ScikitModel
-
-from splicemachine.features import FeatureStore
-from splicemachine.mlflow_support.constants import (FileExtensions, DatabaseSupportedLibs)
-from splicemachine.mlflow_support.utilities import (SparkUtils, get_pod_uri, get_user,
-                                                    insert_artifact)
-from splicemachine import SpliceMachineException
-from splicemachine.spark.context import PySpliceContext
-
-try: # PySpark/H2O 3.X
-    from h2o.model.model_base import ModelBase as H2OModel
-except: # PySpark/H2O 2.X
-    from h2o.estimators.estimator_base import ModelBase as H2OModel
-
-# For recording notebook history
-try:
-    from IPython import get_ipython
-    import nbformat as nbf
-    ipython = get_ipython()
-    mlflow._notebook_history = bool(ipython) # If running outside a notebook/ipython, this will be False
-except:
-    mlflow._notebook_history = False
-
-_TESTING = os.environ.get("TESTING", False)
-
-try:
-    _TRACKING_URL = get_pod_uri("mlflow", "5001", _TESTING)
-except:
-    print("It looks like you're running outside the Splice K8s Cloud Service. "
-          "You must run mlflow.set_mlflow_uri(<url>) and pass in the URL to the MLFlow UI", file=stderr)
-    _TRACKING_URL = ''
-
-_CLIENT = mlflow.tracking.MlflowClient(tracking_uri=_TRACKING_URL)
-mlflow.client = _CLIENT
-
-_GORILLA_SETTINGS = gorilla.Settings(allow_hit=True, store_hit=True)
-_PYTHON_VERSION = py_version.split('|')[0].strip()
-
-
[docs]class SpliceActiveRun(ActiveRun): - """ - A wrapped active run for Splice Machine that calls our custom mlflow.end_run, so we can record the notebook - history - """ - def __exit__(self, exc_type, exc_val, exc_tb): - status = RunStatus.FINISHED if exc_type is None else RunStatus.FAILED - mlflow.end_run(RunStatus.to_string(status)) - return exc_type is None
- -def __try_auto_login(): - """ - Tries to login the user to the Director for deployment automatically. This will only work if the user is not - using the cloud service. - - :return: None - """ - jwt = os.environ.get('SPLICE_JUPYTER_JWTTOKEN') - if jwt: - mlflow.login_director(jwt_token=jwt) - user, password = os.environ.get('SPLICE_JUPYTER_USER'), os.environ.get('SPLICE_JUPYTER_PASSWORD') - if user and password: - mlflow.login_director(username=user, password=password) - -
[docs]def _mlflow_patch(name): - """ - Create a MLFlow Patch that applies the default gorilla settings - - :param name: destination name under mlflow package - :return: decorator for patched function - """ - return gorilla.patch(mlflow, name, settings=_GORILLA_SETTINGS)
- - -
[docs]def _get_current_run_data(): - """ - Get the data associated with the current run. - As of MLFLow 1.6, it currently does not support getting run info from the mlflow.active_run object, so we need it - to be retrieved via the tracking client. - - :return: active run data object - """ - return _CLIENT.get_run(mlflow.active_run().info.run_id).data
- -
[docs]def __get_active_user(): - if hasattr(mlflow, '_username'): - return mlflow._username - if get_user(): - return get_user() - return SpliceMachineException("Could not detect active user. Please run mlflow.login_director() and pass in your Splice" - "username and password or JWT token.")
- -
[docs]@_mlflow_patch('get_run_ids_by_name') -def _get_run_ids_by_name(run_name, experiment_id=None): - """ - Gets a run id from the run name. If there are multiple runs with the same name, all run IDs are returned - - :param run_name: (str) The name of the run - :param experiment_id: (int) The experiment to search in. If None, all experiments are searched. [Default None] - :return: (List[str]) List of run ids - """ - exps = [_CLIENT.get_experiment(experiment_id)] if experiment_id else _CLIENT.list_experiments() - run_ids = [] - for exp in exps: - for run in _CLIENT.search_runs(exp.experiment_id): - if run_name == run.data.tags.get('mlflow.runName'): - run_ids.append(run.data.tags['Run ID']) - return run_ids
- -
[docs]@_mlflow_patch('register_splice_context') -def _register_splice_context(splice_context): - """ - Register a Splice Context for Spark/Database operations (artifact storage, for example) - - :param splice_context: (PySpliceContext) splice context to input - :return: None - """ - assert isinstance(splice_context, PySpliceContext), "You must pass in a PySpliceContext to this method" - mlflow._splice_context = splice_context
- -
[docs]@_mlflow_patch('register_feature_store') -def _register_feature_store(fs: FeatureStore): - """ - Register a feature store for feature tracking of experiments - - :param feature_store: (FeatureStore) The feature store - :return: None - """ - mlflow._feature_store = fs - mlflow._feature_store.mlflow_ctx = mlflow
- -def _check_for_splice_ctx(): - """ - Check to make sure that the user has registered - a PySpliceContext with the mlflow object before allowing - spark operations to take place - """ - - if not hasattr(mlflow, '_splice_context'): - raise SpliceMachineException( - "You must run `mlflow.register_splice_context(pysplice_context) before " - "you can run this mlflow operation!" - ) - - -
[docs]@_mlflow_patch('current_run_id') -def _current_run_id(): - """ - Retrieve the current run id - - :return: (str) the current run id - """ - return mlflow.active_run().info.run_uuid
- - -
[docs]@_mlflow_patch('current_exp_id') -def _current_exp_id(): - """ - Retrieve the current exp id - - :return: (int) the current experiment id - """ - return mlflow.active_run().info.experiment_id
- - -
[docs]@_mlflow_patch('lp') -def _lp(key, value): - """ - Add a shortcut for logging parameters in MLFlow. - - :param key: (str) key for the parameter - :param value: (str) value for the parameter - :return: None - """ - if len(str(value)) > 250 or len(str(key)) > 250: - raise SpliceMachineException(f'It seems your parameter input is too long. The max length is 250 characters.' - f'Your key is length {len(str(key))} and your value is length {len(str(value))}.') - mlflow.log_param(key, value)
- - -
[docs]@_mlflow_patch('lm') -def _lm(key, value, step=None): - """ - Add a shortcut for logging metrics in MLFlow. - - :param key: (str) key for the parameter - :param value: (str or int) value for the parameter - :param step: (int) A single integer step at which to log the specified Metrics. If unspecified, each metric is logged at step zero. - """ - if len(str(key)) > 250: - raise SpliceMachineException(f'It seems your metric key is too long. The max length is 250 characters,' - f'but yours is {len(str(key))}') - mlflow.log_metric(key, value, step=step)
- - -
[docs]def __get_serialized_mlmodel(model, model_lib=None, **flavor_options): - """ - Populate the Zip buffer with the serialized MLModel - :param model: (Model) is the trained Spark/SKlearn/H2O/Keras model - with the current run - :param flavor_options: The extra kw arguments to any particular model library. If this is set, model_lib must be set - """ - buffer = BytesIO() - zip_buffer = ZipFile(buffer, mode="a", compression=ZIP_DEFLATED, allowZip64=False) - with TemporaryDirectory() as tempdir: - mlmodel_dir = f'{tempdir}/model' - if model_lib: - try: - import mlflow - import_module(f'mlflow.{model_lib}') - if model_lib == 'pyfunc': - getattr(mlflow, model_lib).save_model(python_model=model, path=mlmodel_dir, **flavor_options) - else: - getattr(mlflow, model_lib).save_model(model, path=mlmodel_dir, **flavor_options) - - file_ext = FileExtensions.map_from_mlflow_flavor(model_lib) if \ - model_lib in DatabaseSupportedLibs.get_valid() else model_lib - - except Exception as e: - print(str(e)) - raise SpliceMachineException(f'Failed to save model type {model_lib}. Ensure that is a supposed model ' - f'flavor https://www.mlflow.org/docs/1.8.0/models.html#built-in-model-flavors\n' - f'Or you can build a pyfunc model\n' - 'https://www.mlflow.org/docs/1.8.0/models.html#python-function-python-function') - # deprecated behavior - elif isinstance(model, H2OModel): - import mlflow.h2o - mlflow.set_tag('splice.h2o_version', h2o.__version__) - mlflow.h2o.save_model(model, mlmodel_dir, **flavor_options) - file_ext = FileExtensions.h2o - elif isinstance(model, SparkModel): - import mlflow.spark - mlflow.set_tag('splice.spark_version', pyspark.__version__) - mlflow.spark.save_model(model, mlmodel_dir, **flavor_options) - file_ext = FileExtensions.spark - elif isinstance(model, ScikitModel): - import mlflow.sklearn - mlflow.set_tag('splice.sklearn_version', sklearn.__version__) - mlflow.sklearn.save_model(model, mlmodel_dir, **flavor_options) - file_ext = FileExtensions.sklearn - else: - raise SpliceMachineException('Model type not supported for logging. If you received this error,' - 'you should pass a value to the model_lib parameter of the model type you ' - 'want to save, or call the original mlflow.<flavor>.log_model(). ' - 'Supported values are available here: ' - 'https://www.mlflow.org/docs/1.8.0/models.html#built-in-model-flavors\n' - 'as well as \'pyfunc\' ' - 'https://www.mlflow.org/docs/1.8.0/models.html#python-function-python-function') - - for model_file in glob.glob(mlmodel_dir + "/**/*", recursive=True): - zip_buffer.write(model_file, arcname=path.relpath(model_file, mlmodel_dir)) - - return buffer, file_ext
- - -
[docs]@_mlflow_patch('log_model') -def _log_model(model, name='model', model_lib=None, **flavor_options): - """ - Log a trained machine learning model - - :param model: (Model) is the trained Spark/SKlearn/H2O/Keras model - with the current run - :param name: (str) the run relative name to store the model under. [Deault 'model'] - :param model_lib: An optional param specifying the model type of the model to log - Available options match the mlflow built-in model flavors https://www.mlflow.org/docs/1.8.0/models.html#built-in-model-flavors - :param flavor_options: (**kwargs) The full set of save options to pass into the save_model function. If this is passed, - model_class must also be provided and the keys of this dictionary must match the params of that functions signature - (ie mlflow.pyfunc.save_model). An example of pyfuncs signature is here, although each flavor has its own. - https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#mlflow.pyfunc.save_model - """ - _check_for_splice_ctx() - - # Make sure no models have been logged to this run - if _get_current_run_data().tags.get('splice.model_name'): # this function has already run - raise SpliceMachineException("Only one model is permitted per run.") - if flavor_options and not model_lib: - raise SpliceMachineException("You cannot set mlflow-flavor specific options without setting the model library. " - "Either set model_lib, or use the native mlflow.<flavor>.log_model function") - - model_class = str(model.__class__) - - run_id = mlflow.active_run().info.run_uuid - buffer, file_ext = __get_serialized_mlmodel(model, model_lib=model_lib, **flavor_options) - buffer.seek(0) - insert_artifact(splice_context=mlflow._splice_context, byte_array=bytearray(buffer.read()), name=name, - run_uuid=run_id, file_ext=file_ext) - - # Set the model metadata as tags after successful logging - mlflow.set_tag('splice.model_name', name) # read in backend for deployment - mlflow.set_tag('splice.model_type', model_class) - mlflow.set_tag('splice.model_py_version', _PYTHON_VERSION)
- - -
[docs]@_mlflow_patch('end_run') -def _end_run(status=RunStatus.to_string(RunStatus.FINISHED), save_html=True): - """End an active MLflow run (if there is one). - - .. code-block:: python - :caption: Example - - import mlflow - - # Start run and get status - mlflow.start_run() - run = mlflow.active_run() - print("run_id: {}; status: {}".format(run.info.run_id, run.info.status)) - - # End run and get status - mlflow.end_run() - run = mlflow.get_run(run.info.run_id) - print("run_id: {}; status: {}".format(run.info.run_id, run.info.status)) - print("--") - - # Check for any active runs - print("Active run: {}".format(mlflow.active_run())) - - .. code-block:: text - :caption: Output - - run_id: b47ee4563368419880b44ad8535f6371; status: RUNNING - run_id: b47ee4563368419880b44ad8535f6371; status: FINISHED - -- - Active run: None - """ - if mlflow._notebook_history and hasattr(mlflow, '_splice_context') and mlflow.active_run(): - with NamedTemporaryFile() as temp_file: - nb = nbf.v4.new_notebook() - nb['cells'] = [nbf.v4.new_code_cell(code) for code in ipython.history_manager.input_hist_raw] - nbf.write(nb, temp_file.name) - run_name = mlflow.get_run(mlflow.current_run_id()).to_dictionary()['data']['tags']['mlflow.runName'] - mlflow.log_artifact(temp_file.name, name=f'{run_name}_run_log.ipynb') - typ,ext = ('html','html') if save_html else ('script','py') - os.system(f'jupyter nbconvert --to {typ} {temp_file.name}') - mlflow.log_artifact(f'{temp_file.name[:-1]}.{ext}', name=f'{run_name}_run_log.{ext}') - orig = gorilla.get_original_attribute(mlflow, "end_run") - orig(status=status)
- -
[docs]@_mlflow_patch('start_run') -def _start_run(run_id=None, tags=None, experiment_id=None, run_name=None, nested=False): - """ - Start a new run - - :Example: - .. code-block:: python - - mlflow.start_run(run_name='my_run')\n - # or\n - with mlflow.start_run(run_name='my_run'): - ... - - - :param tags: a dictionary containing metadata about the current run. \ - For example: \ - { \ - 'team': 'pd', \ - 'purpose': 'r&d' \ - } - :param run_name: (str) an optional name for the run to show up in the MLFlow UI. [Default None] - :param run_id: (str) if you want to reincarnate an existing run, pass in the run id [Default None] - :param experiment_id: (int) if you would like to create an experiment/use one for this run [Default None] - :param nested: (bool) Controls whether run is nested in parent run. True creates a nest run [Default False] - :return: (ActiveRun) the mlflow active run object - """ - tags = tags or {} - tags['mlflow.user'] = __get_active_user() - - orig = gorilla.get_original_attribute(mlflow, "start_run") - active_run = orig(run_id=run_id, experiment_id=experiment_id, run_name=run_name, nested=nested) - - for key in tags: - mlflow.set_tag(key, tags[key]) - if not run_id: - mlflow.set_tag('Run ID', mlflow.active_run().info.run_uuid) - if run_name: - mlflow.set_tag('mlflow.runName', run_name) - if hasattr(mlflow,'_active_training_set'): - mlflow._active_training_set._register_metadata(mlflow) - - return SpliceActiveRun(active_run)
- -
[docs]@_mlflow_patch('remove_active_training_set') -def _remove_active_training_set(): - """ - Removes the active training set from mlflow. This function deletes mlflows active training set (retrieved from - the feature store), which will in turn stop the automated logging of features to the active mlflow run. To recreate - an active training set, call fs.get_training_set or fs.get_training_set_from_view in the Feature Store. - """ - if hasattr(mlflow,'_active_training_set'): - del mlflow._active_training_set
- - -
[docs]@_mlflow_patch('log_pipeline_stages') -def _log_pipeline_stages(pipeline): - """ - Log the pipeline stages of a Spark Pipeline as params for the run - - :param pipeline: (PipelineModel) fitted/unitted pipeline - :return: None - """ - for stage_number, pipeline_stage in enumerate(SparkUtils.get_stages(pipeline)): - readable_stage_name = SparkUtils.readable_pipeline_stage(pipeline_stage) - mlflow.log_param('Stage' + str(stage_number), readable_stage_name)
- - -
[docs]@_mlflow_patch('log_feature_transformations') -def _log_feature_transformations(unfit_pipeline): - """ - Log feature transformations for an unfit spark pipeline - Logs --> feature movement through the pipeline - - :param unfit_pipeline: (PipelineModel) unfit spark pipeline to log - :return: None - """ - transformations = defaultdict(lambda: [[], None]) # transformations, outputColumn - - for stage in SparkUtils.get_stages(unfit_pipeline): - input_cols, output_col = SparkUtils.get_cols(stage, get_input=True), SparkUtils.get_cols(stage, get_input=False) - if input_cols and output_col: # make sure it could parse transformer - for column in input_cols: - first_column_found = SparkUtils.find_spark_transformer_inputs_by_output(transformations, column) - if first_column_found: # column is not original - for f in first_column_found: - transformations[f][1] = output_col - transformations[f][0].append( - SparkUtils.readable_pipeline_stage(stage)) - else: - transformations[column][1] = output_col - transformations[column][0].append(SparkUtils.readable_pipeline_stage(stage)) - - for column in transformations: - param_value = ' -> '.join([column] + transformations[column][0] + - [transformations[column][1]]) - mlflow.log_param('Column- ' + column, param_value)
- - -
[docs]@_mlflow_patch('log_model_params') -def _log_model_params(pipeline_or_model): - """ - Log the parameters of a fitted spark model or a model stage of a fitted spark pipeline - - :param pipeline_or_model: fitted spark pipeline/fitted spark model - """ - model = SparkUtils.get_model_stage(pipeline_or_model) - - mlflow.log_param('model', SparkUtils.readable_pipeline_stage(model)) - if hasattr(model, '_java_obj'): - verbose_parameters = SparkUtils.parse_string_parameters(model._java_obj.extractParamMap()) - elif hasattr(model, 'getClassifier'): - verbose_parameters = SparkUtils.parse_string_parameters( - model.getClassifier()._java_obj.extractParamMap()) - else: - raise Exception("Could not parse model type: " + str(model)) - for param in verbose_parameters: - value = verbose_parameters[param] - if value: # Spark 3.0 leafCol returns an empty parameter, mlflow fails if you try to log an empty string - try: - value = float(value) - mlflow.log_param(param.split('-')[0], value) - except: - mlflow.log_param(param.split('-')[0], value)
- - -
[docs]@_mlflow_patch('timer') -@contextmanager -def _timer(timer_name, param=False): - """ - Context manager for logging - - :Example: - .. code-block:: python - - with mlflow.timer('my_timer'): \n - ... - - :param timer_name: (str) the name of the timer - :param param: (bool) whether or not to log the timer as a param (default=True). If false, logs as metric. - :return: None - """ - t0 = time.time() - try: - print(f'Starting Code Block {timer_name}...', end=' ') - yield - finally: - t1 = time.time() - t0 - # Syntactic Sugar - (mlflow.log_param if param else mlflow.log_metric)(timer_name, t1) - print('Done.') - print( - f"Code Block {timer_name}:\nRan in {round(t1, 3)} secs\nRan in {round(t1 / 60, 3)} mins" - )
- - -
[docs]@_mlflow_patch('download_artifact') -def _download_artifact(name, local_path, run_id=None): - """ - Download the artifact at the given run id (active default) + name to the local path - - :param name: (str) artifact name to load (with respect to the run) - :param local_path: (str) local path to download the model to. This path MUST include the file extension - :param run_id: (str) the run id to download the artifact from. Defaults to active run - :return: None - """ - _check_for_splice_ctx() - file_ext = path.splitext(local_path)[1] - - run_id = run_id or mlflow.active_run().info.run_uuid - blob_data, f_ext = SparkUtils.retrieve_artifact_stream(mlflow._splice_context, run_id, name) - if f_ext in FileExtensions.get_valid(): - f_ext = 'zip' # we zip up these models, even though we use the file ext to identify model type - if not file_ext: # If the user didn't provide the file (ie entered . as the local_path), fill it in for them - local_path += f'/{name}.{f_ext}' - - with open(local_path, 'wb') as artifact_file: - artifact_file.write(blob_data)
- - -
[docs]@_mlflow_patch('get_model_name') -def _get_model_name(run_id): - """ - Gets the model name associated with a run or None - - :param run_id: (str) the run_id that the model is stored under - :return: (str or None) The model name if it exists - """ - return _CLIENT.get_run(run_id).data.tags.get('splice.model_name')
- - -
[docs]def _load_model(run_id=None, name=None, as_pyfunc=False): - """ - Download and deserialize a serialized model - - :param run_id: (str) the id of the run to get a model from - (the run must have an associated model with it named spark_model) - :param name: (str) the name of the model in the database - :param as_pyfunc: (bool) load as a model-agnostic pyfunc model - (https://www.mlflow.org/docs/latest/models.html#python-function-python-function) - """ - if not (run_id or mlflow.active_run()): - raise SpliceMachineException("You need to pass in a run_id or start an mlflow run.") - - run_id = run_id or mlflow.active_run().info.run_uuid - name = name or _get_model_name(run_id) - if not name: - raise SpliceMachineException(f"Uh Oh! Looks like there isn't a model logged with this run ({run_id})!" - "If there is, pass in the name= parameter to this function") - model_blob, _ = SparkUtils.retrieve_artifact_stream(mlflow._splice_context, run_id, name) - buffer = BytesIO() - buffer.seek(0) - buffer.write(model_blob) - - with TemporaryDirectory() as tempdir: - ZipFile(buffer).extractall(path=tempdir) - if as_pyfunc: - mlflow_module = 'pyfunc' - else: - with open(f'{tempdir}/MLmodel', 'r') as mlmodel_file: - loader_module = yaml.safe_load(mlmodel_file.read())['flavors']['python_function']['loader_module'] - mlflow_module = loader_module.split('.')[1] # get the mlflow.(MODULE) - import_module(loader_module) - return getattr(mlflow, mlflow_module).load_model(tempdir)
- - -
[docs]@_mlflow_patch('log_artifact') -def _log_artifact(file_name, name=None, run_uuid=None): - """ - Log an artifact for the active run - - :Example: - .. code-block:: python - - with mlflow.start_run():\n - mlflow.log_artifact('my_image.png') - - :param file_name: (str) the name of the file name to log - :param name: (str) the name to store the artifact as. Defaults to the file name. If the name param includes the file - extension (or is not passed in) you will be able to preview it in the mlflow UI (image, text, html, geojson files). - :param run_uuid: (str) the run uuid of a previous run, if none, defaults to current run - :return: None - - :NOTE: - We do not currently support logging directories. If you would like to log a directory, please zip it first and log the zip file - """ - _check_for_splice_ctx() - file_ext = path.splitext(file_name)[1].lstrip('.') - - with open(file_name, 'rb') as artifact: - byte_stream = bytearray(bytes(artifact.read())) - - run_id = run_uuid or mlflow.active_run().info.run_uuid - name = name or file_name - insert_artifact(mlflow._splice_context, name, byte_stream, run_id, file_ext=file_ext)
- - -
[docs]@_mlflow_patch('login_director') -def _login_director(username=None, password=None, jwt_token=None): - """ - Authenticate into the MLManager Director - - :param username: (str) database username - :param password: (str) database password - :param jwt_token: (str) database JWT token authentication - - Either (username/password) for basic auth or jwt_token must be provided. Basic authentication takes precedence if set (mlflow default) - """ - if (username and not password) or (password and not username): - raise SpliceMachineException("You must either set both username and password, or neither. You cannot set just one") - - if username and password: - mlflow._basic_auth = HTTPBasicAuth(username, password) - mlflow._username = username - os.environ['MLFLOW_TRACKING_USERNAME'] = username - os.environ['MLFLOW_TRACKING_PASSWORD'] = password - if jwt_token: - os.environ['MLFLOW_TRACKING_TOKEN'] = jwt_token
- - -
[docs]def __initiate_job(payload, endpoint): - """ - Send a job to the initiation endpoint - - :param payload: (dict) JSON payload for POST request - :param endpoint: (str) REST endpoint to target - :return: (str) Response text from request - """ - if not hasattr(mlflow, '_basic_auth'): - raise Exception( - "You have not logged into MLManager director." - " Please run mlflow.login_director(username, password)" - ) - request = requests.post( - get_pod_uri('mlflow', 5003, _testing=_TESTING) + endpoint, - auth=mlflow._basic_auth, - json=payload - ) - - if request.ok: - print("Your Job has been submitted. The returned value of this function is" - " the job id, which you can use to monitor the your task in real-time. Run mlflow.watch_job(<job id>) to" - "stream them to stdout, or mlflow.fetch_logs(<job id>) to read them one time to a list") - return request.json()['job_id'] - else: - print("Error! An error occurred while submitting your job") - print(request.text) - return request.text
- - -
[docs]@_mlflow_patch('deploy_aws') -def _deploy_aws(app_name: str, region: str = 'us-east-2', instance_type: str = 'ml.m5.xlarge', - run_id: str = None, instance_count: int = 1, deployment_mode: str = 'replace'): - """ - Queue Job to deploy a run to sagemaker with the - given run id (found in MLFlow UI or through search API) - - :param run_id: the id of the run to deploy. Will default to the current - run id. - :param app_name: the name of the app in sagemaker once deployed - :param region: the sagemaker region to deploy to (us-east-2, - us-west-1, us-west-2, eu-central-1 supported) - :param instance_type: the EC2 Sagemaker instance type to deploy on - (ml.m4.xlarge supported) - :param instance_count: the number of instances to load balance predictions - on - :param deployment_mode: the method to deploy; create=application will fail - if an app with the name specified already exists; replace=application - in sagemaker will be replaced with this one if app already exists; - add=add the specified model to a prexisting application (not recommended) - """ - # get run from mlflow - print("Processing...") - - request_payload = { - 'handler_name': 'DEPLOY_AWS', 'run_id': run_id, - 'region': region, 'user': __get_active_user(), - 'instance_type': instance_type, 'instance_count': instance_count, - 'deployment_mode': deployment_mode, 'app_name': app_name - } - - return __initiate_job(request_payload, '/api/rest/initiate')
- - -
[docs]@_mlflow_patch('deploy_azure') -def _deploy_azure(endpoint_name: str, resource_group: str, workspace: str, run_id: str, region: str = 'East US', - cpu_cores: float = 0.1, allocated_ram: float = 0.5, model_name: str = None): - """ - Deploy a given run to AzureML. - - :param endpoint_name: (str) the name of the endpoint in AzureML when deployed to - Azure Container Services. Must be unique. - :param resource_group: (str) Azure Resource Group for model. Automatically created if - it doesn't exist. - :param workspace: (str) the AzureML workspace to deploy the model under. - Will be created if it doesn't exist - :param run_id: (str) if specified, will deploy a previous run ( - must have an spark model logged). Otherwise, will default to the active run - :param region: (str) AzureML Region to deploy to: Can be East US, East US 2, Central US, - West US 2, North Europe, West Europe or Japan East - :param cpu_cores: (float) Number of CPU Cores to allocate to the instance. - Can be fractional. Default=0.1 - :param allocated_ram: (float) amount of RAM, in GB, allocated to the container. - Default=0.5 - :param model_name: (str) If specified, this will be the name of the model in AzureML. - Otherwise, the model name will be randomly generated. - """ - request_payload = { - 'handler_name': 'DEPLOY_AZURE', - 'endpoint_name': endpoint_name, - 'resource_group': resource_group, - 'workspace': workspace, - 'region': region, - 'run_id': run_id, - 'cpu_cores': cpu_cores, - 'allocated_ram': allocated_ram, - 'model_name': model_name - } - return __initiate_job(request_payload, '/api/rest/initiate')
- - -
[docs]@_mlflow_patch('deploy_kubernetes') -def _deploy_kubernetes(run_id: str, service_port: int = 80, - base_replicas: int = 1, autoscaling_enabled: bool = False, - max_replicas: int = 2, target_cpu_utilization: int = 50, - disable_nginx: bool = False, gunicorn_workers: int = 1, - resource_requests_enabled: bool = False, resource_limits_enabled: bool = False, - cpu_request: int = 0.5, cpu_limit: int = 1, memory_request: str = "512Mi", - memory_limit: str = "2048Mi", expose_external: bool = False): - """ - Deploy model associated with the specified or active run to Kubernetes cluster.\n - - Creates the Following Resources: - * Pod (with your model loaded in via an init container) - * ReplicaSet (configured to base replicas specified) - * HPA (if autoscaling is enabled) - * Service (model-<run id>.<db namespace>.svc.cluster.local:<service port specified>) - * Deployment - * Ingress (if expose enable is set to True) (on <your cluster url>/<run id>/invocations) - - :param run_id: specified if overriding the active run - :param service_port: (default 80) the port that the prediction service runs on internally in the cluster - :param autoscaling_enabled: (default False) whether or not to provision a Horizontal Pod Autoscaler to provision - pods dynamically - :param max_replicas: (default 2) [USED IF AUTOSCALING ENABLED] max number of pods to scale up to - :param target_cpu_utilization: (default 50) [USED IF AUTOSCALING ENABLED] the cpu utilization to scale up to - new pods on - :param disable_nginx: (default False) disable nginx inside of the pod (recommended) - :param gunicorn_workers: (default 1) [MUST BE 1 FOR SPARK ML models TO PREVENT OOM] Number of web workers. - :param resource_requests_enabled: (default False) whether or not to enable Kubernetes resource requests - :param resource_limits_enabled: (default False) whether or not to enable Kubernetes resource limits - :param cpu_request: (default 0.5) [USED IF RESOURCE REQUESTS ENABLED] number of CPU to request - :param cpu_limit: (default 1) [USED IF RESOURCE LIMITS ENABLED] number of CPU to cap at - :param memory_request: (default 512Mi) [USED IF RESOURCE REQUESTS ENABLED] amount of RAM to request - :param memory_limit: (default 2048Mi) [USED IF RESOURCE LIMITS ENABLED] amount of RAM to limit at - :param expose_external: (default False) whether or not to create Ingress resource to deploy outside of the cluster. - :NOTE: - .. code-block:: text - - It is not recommended to create an Ingress resource using this parameter, as your model will be - deployed with no authorization (and public access). Instead, it is better to deploy your model - as an internal service, and deploy an authentication proxy (such as https://github.com/oauth2-proxy/oauth2-proxy) - to proxy traffic to your internal service after authenticating. - """ - print("Processing...") - - payload = { - 'run_id': run_id or mlflow.active_run().info.run_uuid, 'handler_name': 'DEPLOY_KUBERNETES', - 'service_port': service_port, 'base_replicas': base_replicas, 'autoscaling_enabled': autoscaling_enabled, - 'max_replicas': max_replicas, 'target_cpu_utilization': target_cpu_utilization, - 'disable_nginx': disable_nginx, 'gunicorn_workers': gunicorn_workers, - 'resource_requests_enabled': resource_requests_enabled, 'memory_limit': memory_limit, - 'resource_limits_enabled': resource_limits_enabled, 'cpu_request': cpu_request, 'cpu_limit': cpu_limit, - 'memory_request': memory_request, 'expose_external': expose_external - } - - return __initiate_job(payload, '/api/rest/initiate')
- -
[docs]@_mlflow_patch('undeploy_kubernetes') -def _undeploy_kubernetes(run_id: str): - """ - Removes a model deployment from Kubernetes. This will delete the Kubernetes deployment and record the event - - :param run_id: specified if overriding the active run - """ - print("Processing...") - - payload = { - 'run_id': run_id or mlflow.active_run().info.run_uuid, 'handler_name': 'UNDEPLOY_KUBERNETES' - } - - return __initiate_job(payload, '/api/rest/initiate')
- - -
[docs]@_mlflow_patch('deploy_database') -def _deploy_db(db_schema_name: str, - db_table_name: str, - run_id: str, - reference_table: Optional[str] = None, - reference_schema: Optional[str] = None, - primary_key: Optional[Dict[str, str]] = None, - df: Optional[Union[SparkDF, PandasDF]] = None, - create_model_table: Optional[bool] = True, - model_cols: Optional[List[str]] = None, - classes: Optional[List[str]] = None, - library_specific: Optional[Dict[str, str]] = None, - replace: Optional[bool] = False, - max_batch_size: Optional[int] = 10000, - verbose: bool = False) -> None: - """ - Deploy a trained (currently Spark, Sklearn, Keras or H2O) model to the Database. - This either creates a new table or alters an existing table in the database (depending on parameters passed) - - :param db_schema_name: (str) the schema name to deploy to. - :param db_table_name: (str) the table name to deploy to. - :param run_id: (str) The run_id to deploy the model on. The model associated with this run will be deployed - :param reference_table: (str) if creating a new table, an alternative to specifying a dataframe is specifying a - reference table. The column schema of the reference table will be used to create the new table (e.g. MYTABLE)\n - :param reference_schema: (str) the db schema for the reference table. - :param primary_key: (Dict) Dictionary of column + SQL datatype to use for the primary/composite key. \n - * If you are deploying to a table that already exists, it must already have a primary key, and this parameter will be ignored. \n - * If you are creating the table in this function, you MUST pass in a primary key - :param df: (Spark or Pandas DF) The dataframe used to train the model \n - | NOTE: The columns in this df are the ones that will be used to create the table unless specified by model_cols - :param create_model_table: Whether or not to create the table from the dataframe. Default True. This - Will ONLY be used if the table does not exist and a dataframe is passed in - :param model_cols: (List[str]) The columns from the table to use for the model. If None, all columns in the table - will be passed to the model. If specified, the columns will be passed to the model - IN THAT ORDER. The columns passed here must exist in the table. - :param classes: (List[str]) The classes (prediction labels) for the model being deployed.\n - NOTE: If not supplied, the table will have default column names for each class - :param library_specific: (dict{str: str}) Prediction options for certain model types: \n - * Certain model types (specifically Keras and Scikit-learn) support prediction arguments. Here are the options that we support: - * Scikit-learn - * predict_call: determines function call for the model. Available: 'predict' (default), 'predict_proba', 'transform' - * predict_args: passed into the predict call (for Gaussian and Bayesian models). Available: 'return_std', 'return_cov' - * Keras - * pred_threshold: prediction threshold for Keras binary classification models. Note: If the model type is Keras, the output layer has 1 node, and pred_threshold is None, you will NOT receive a class prediction, only the output of the final layer (like model.predict()). If you want a class prediction for your binary classification problem, you MUST pass in a threshold. - If the model does not support these parameters, they will be ignored. - :param max_batch_size: (int) the max size for the database to batch groups of rows for prediction. Default 10,000. - :param replace: (bool) whether or not to replace a currently existing model. This param is not yet implemented - :return: None\n - - This function creates the following IF you are creating a table from the dataframe \n - * The model table where run_id is the run_id passed in. This table will have a column for each feature in the feature vector. It will also contain:\n - * USER which is the current user who made the request - * EVAL_TIME which is the CURRENT_TIMESTAMP - * the PRIMARY KEY column(s) passed in - * PREDICTION. The prediction of the model. If the :classes: param is not filled in, this will be default values for classification models - * A column for each class of the predictor with the value being the probability/confidence of the model if applicable\n - IF you are deploying to an existing table, the table will be altered to include the columns above. \n - :NOTE: - .. code-block:: text - - The columns listed above are default value columns.\n - This means that on a SQL insert into the table, \n - you do not need to reference or insert values into them.\n - They are automatically taken care of.\n - Set verbose=True in the function call for more information - - The following will also be created for all deployments: \n - * A trigger that runs on (after) insertion to the data table that runs an INSERT into the prediction table, \ - calling the PREDICT function, passing in the row of data as well as the schema of the dataset, and the run_id of the model to run \n - * A trigger that runs on (after) insertion to the prediction table that calls an UPDATE to the row inserted, \ - parsing the prediction probabilities and filling in proper column values - """ - print("Deploying model to database...") - - # database converts all object names to upper case, so we need to as well in our metadata - db_schema_name=db_schema_name.upper() - db_table_name=db_table_name.upper() - - - # ~ Backwards Compatability ~ - if verbose: - print("Deprecated Parameter 'verbose'. Use mlflow.watch_job(<job id>) or mlflow.fetch_logs(<job id>) to get" - " verbose output. Ignoring...", file=stderr) - - if primary_key is not None: - if isinstance(primary_key, list): - print("Passing in primary keys as a list of tuples is deprecated. Use dictionary {column name: type}", - file=stderr) - primary_key = dict(primary_key) - - if df is not None: - if isinstance(df, PandasDF): - _check_for_splice_ctx() # We will need a splice context to convert to sparkDF - df_schema = mlflow._splice_context.pandasToSpark(df).schema.json() - elif isinstance(df, SparkDF): - df_schema = df.schema.json() - else: - raise SpliceMachineException("Dataframe must either be a Pandas or Spark Dataframe") - else: - df_schema = None - - payload = { - 'db_table': db_table_name, 'db_schema': db_schema_name, 'run_id': run_id or mlflow.active_run().info.run_uuid, - 'primary_key': primary_key, 'df_schema': df_schema, 'create_model_table': create_model_table, - 'model_cols': model_cols, 'classes': classes, 'library_specific': library_specific, 'replace': replace, - 'handler_name': 'DEPLOY_DATABASE', 'reference_table': reference_table, 'reference_schema': reference_schema, - 'max_batch_size': max_batch_size - } - - return __initiate_job(payload, '/api/rest/initiate')
- - -
[docs]def __get_logs(job_id: int): - """ - Retrieve the logs associated with the specified job id - """ - request = requests.post( - get_pod_uri("mlflow", 5003, _testing=_TESTING) + "/api/rest/logs", - json={"task_id": job_id}, auth=mlflow._basic_auth - ) - if not request.ok: - raise SpliceMachineException(f"Could not retrieve the logs for job {job_id}: {request.status_code}") - return request.json()['logs']
- -
[docs]@_mlflow_patch('watch_job') -def _watch_job(job_id: int): - """ - Stream the logs in real time to standard out - of a Job - :param job_id: the job id to watch (returned after executing an operation) - NOTE: If the job being watched fails, this function will throw a SpliceMachineException - """ - previous_lines = [] - warn = False # If there were any warnings from the log, we want to notify the user explicitly - while True: - logs_retrieved = __get_logs(job_id) - logs_retrieved.remove('') - log_idx = len(logs_retrieved) - # searching from the end is faster, because unless the logs double in the interval, it will be closer - for log_idx in range(len(logs_retrieved) - 1, -1, -1): - if logs_retrieved[log_idx] in previous_lines: - break - - idx = log_idx+1 if log_idx else log_idx # First time getting logs, go to 0th index, else log_idx+1 - for n in logs_retrieved[idx:]: - if 'WARNING' in n: - warnings.warn(n) - warn = True - print(f'\n{n}',end='') - - previous_lines = copy.deepcopy(logs_retrieved) # O(1) checking - previous_lines = previous_lines if previous_lines[-1] else previous_lines[:-1] # Remove empty line - if 'TASK_COMPLETED' in previous_lines[-1]: # Finishing Statement - # Check for a failure first, and raise an error if so - for log in reversed(previous_lines): - if 'ERROR' in log and 'Task Failed' in log: - raise SpliceMachineException( - 'An error occured in your Job. See the log above for more information' - ) from None - if warn: - print('\n','Note! Your deployment had some warnings you should consider.') - return - - time.sleep(1)
- - -
[docs]@_mlflow_patch('fetch_logs') -def _fetch_logs(job_id: int): - """ - Get the logs as an array - :param job_id: the job to get the logs for - """ - return __get_logs(job_id)
- - -
[docs]@_mlflow_patch('get_deployed_models') -def _get_deployed_models() -> PandasDF: - """ - Get the currently deployed models in the database - :return: Pandas df - """ - - return mlflow._splice_context.df( - """ - SELECT * FROM MLMANAGER.LIVE_MODEL_STATUS - """ - ).toPandas()
- - -def apply_patches(): - """ - Apply all the Gorilla Patches; \ - All Gorilla Patched MUST be predixed with '_' before their destination in MLflow - """ - targets = [_register_feature_store, _register_splice_context, _lp, _lm, _timer, _log_artifact, _log_feature_transformations, - _log_model_params, _log_pipeline_stages, _log_model, _load_model, _download_artifact, - _start_run, _current_run_id, _current_exp_id, _deploy_aws, _deploy_azure, _deploy_db, _login_director, - _get_run_ids_by_name, _get_deployed_models, _deploy_kubernetes, _undeploy_kubernetes, _fetch_logs, - _watch_job, _end_run, _set_mlflow_uri, _remove_active_training_set] - - for target in targets: - gorilla.apply(gorilla.Patch(mlflow, target.__name__.lstrip('_'), target, settings=_GORILLA_SETTINGS)) - - -
[docs]def _set_mlflow_uri(uri): - """ - Set the tracking uri for mlflow. Only needed if running outside of the Splice Machine K8s Cloud Service - - :param uri: (str) the URL of your mlflow UI. - :return: None - """ - _CLIENT = mlflow.tracking.MlflowClient(tracking_uri=uri) - mlflow.client = _CLIENT - mlflow.set_tracking_uri(uri)
- - -def main(): - mlflow.set_tracking_uri(_TRACKING_URL) - apply_patches() - __try_auto_login() - - -main() -
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/notebook.html b/docs/_build/html/_modules/splicemachine/notebook.html deleted file mode 100644 index 78a26573..00000000 --- a/docs/_build/html/_modules/splicemachine/notebook.html +++ /dev/null @@ -1,317 +0,0 @@ - - - - - - - - splicemachine.notebook — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.notebook

-import random
-from os import environ as env_vars
-from IPython import get_ipython
-from IPython.display import HTML, IFrame, display
-from pyspark import SparkContext
-from splicemachine import SpliceMachineException
-
-def _in_splice_compatible_env():
-    """
-    Determines if a user is using the Splice Machine managed notebooks or not
-
-    :return: Boolean if the user is using the Splice Environment
-    """
-    try:
-        from beakerx import TableDisplay
-        import ipywidgets
-    except ImportError:
-        return False
-    return get_ipython()
-
-
[docs]def run_sql(sql): - """ - Runs a SQL statement over JDBC from the Splice Machine managed Jupyter notebook environment. If you are running - outside of the Splice Jupyter environment, you must have a sql kernel and magic set up and configured. - :param sql: The SQL to execute - """ - if not get_ipython(): - raise SpliceMachineException("You don't seem to have IPython available. This function is only available" - "in an IPython envrionment with a configured %%sql magic kernel. Consider using" - "the managed Splice Machine notebook environment") - get_ipython().run_cell_magic('sql', '', sql)
- -
[docs]def hide_toggle(toggle_next=False): - """ - Function to add a toggle at the bottom of Jupyter Notebook cells to allow the entire cell to be collapsed. - - :param toggle_next: Bool determine if the toggle should hide the current cell or the next cell - - """ - this_cell = """$('div.cell.code_cell.rendered.selected')""" - next_cell = this_cell + '.next()' - - toggle_text = 'Toggle show/hide' # text shown on toggle link - target_cell = this_cell # target cell to control with toggle - js_hide_current = '' # bit of JS to permanently hide code in current cell (only when toggling next cell) - - if toggle_next: - target_cell = next_cell - toggle_text += ' next cell' - js_hide_current = this_cell + '.find("div.input").hide();' - - js_f_name = 'code_toggle_{}'.format(str(random.randint(1, 2 ** 64))) - - html = """ - <script> - function {f_name}() {{ - {cell_selector}.find('div.input').toggle(); - }} - {js_hide_current} - </script> - <a href="javascript:{f_name}()"><button style='color:black'>{toggle_text}</button></a> - """.format( - f_name=js_f_name, - cell_selector=target_cell, - js_hide_current=js_hide_current, - toggle_text=toggle_text - ) - - return HTML(html)
- -
[docs]def get_mlflow_ui(experiment_id=None, run_id=None): - """ - Display the MLFlow UI as an IFrame - - :param experiment_id: (int or str) Optionally the experiment id to load into view - :param run_id: (str) Optionally the run_id to load into view. If you pass in a run_id you must pass an experiment_id - :return: (IFrame) An IFrame of the MLFlow UI - """ - if run_id and not experiment_id: - raise Exception('If you are passing in a run id, you must also provide an experiment id!') - experiment_id = experiment_id or 0 - mlflow_url = '/mlflow/#/experiments/{}'.format(experiment_id) - if run_id: - mlflow_url += '/runs/{}'.format(run_id) - display(HTML('<font size=\"+1\"><a target=\"_blank\" href={}>MLFlow UI</a></font>'.format(mlflow_url))) - return IFrame(src=mlflow_url, width='100%', height='700px')
- -
[docs]def get_spark_ui(port=None, spark_session=None): - """ - Display the Spark Jobs UI as an IFrame at a specific port - - :param port: (int or str) The port of the desired spark session - :param spark_session: (SparkSession) Optionally the Spark Session associated with the desired UI - :return: - """ - if port: - pass - elif spark_session: - port = spark_session.sparkContext.uiWebUrl.split(':')[-1] - elif SparkContext._active_spark_context: - port = SparkContext._active_spark_context.uiWebUrl.split(':')[-1] - else: - raise Exception('No parameters passed and no active Spark Session found.\n' - 'Either pass in the active Spark Session into the "spark_session" parameter or the port of that session into the "port" parameter.\n'\ - 'You can find the port by running spark.sparkContext.uiWebUrl and taking the number after the \':\'') - user = env_vars.get('JUPYTERHUB_USER','user') - display(HTML(f'<font size=\"+1\"><a target=\"_blank\" href=/splicejupyter/user/{user}/sparkmonitor/{port}>Spark UI</a></font>')) - return IFrame(src=f'/splicejupyter/user/{user}/sparkmonitor/{port}', width='100%', height='700px')
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/spark/context.html b/docs/_build/html/_modules/splicemachine/spark/context.html deleted file mode 100644 index 1fd2921d..00000000 --- a/docs/_build/html/_modules/splicemachine/spark/context.html +++ /dev/null @@ -1,1073 +0,0 @@ - - - - - - - - splicemachine.spark.context — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.spark.context

-"""
-Copyright 2021 Splice Machine, Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-from __future__ import print_function
-
-import os
-import re
-from string import punctuation as bad_chars
-
-from py4j.java_gateway import java_import
-from pyspark.sql import DataFrame
-from pyspark.sql.types import _parse_datatype_json_string, StringType
-
-from splicemachine.spark.constants import CONVERSIONS
-from splicemachine import SpliceMachineException
-
-
-
[docs]class PySpliceContext: - """ - This class implements a SpliceMachineContext object (similar to the SparkContext object) - """ - _spliceSparkPackagesName = "com.splicemachine.spark.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark.splicemachine.SplicemachineContext(self.jdbcurl) - - def __init__(self, sparkSession, JDBC_URL=None, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - """ - - if JDBC_URL: - self.jdbcurl = JDBC_URL - else: - try: - self.jdbcurl = os.environ['BEAKERX_SQL_DEFAULT_JDBC'] - except KeyError as e: - raise KeyError( - "Could not locate JDBC URL. If you are not running on the cloud service," - "please specify the JDBC_URL=<some url> keyword argument in the constructor" - ) - - self._unit_testing = _unit_testing - - if not _unit_testing: # Private Internal Argument to Override Using JVM - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = self.spark_sql_context._sc._jvm - java_import(self.jvm, self._spliceSparkPackagesName) - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions") - java_import( - self.jvm, "org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils") - java_import(self.jvm, "scala.collection.JavaConverters._") - java_import(self.jvm, "com.splicemachine.derby.impl.*") - java_import(self.jvm, 'org.apache.spark.api.python.PythonUtils') - self.jvm.com.splicemachine.derby.impl.SpliceSpark.setContext( - self.spark_sql_context._jsc) - self.context = self._splicemachineContext() - - else: - from .tests.mocked import MockedScalaContext - self.spark_sql_context = sparkSession._wrapped - self.spark_session = sparkSession - self.jvm = '' - self.context = MockedScalaContext(self.jdbcurl) - -
[docs] def columnNamesCaseSensitive(self, caseSensitive): - """ - Sets whether column names should be treated as case sensitive. - - :param caseSensitive: (boolean) True for case sensitive, False for not case sensitive - """ - self.context.columnNamesCaseSensitive(caseSensitive)
- -
[docs] def toUpper(self, dataframe): - """ - Returns a dataframe with all of the columns in uppercase - - :param dataframe: (Dataframe) The dataframe to convert to uppercase - """ - for s in dataframe.schema: - s.name = s.name.upper() - # You need to re-generate the dataframe for the capital letters to take effect - return dataframe.rdd.toDF(dataframe.schema)
- -
[docs] def toLower(self, dataframe): - """ - Returns a dataframe with all of the columns in lowercase - - :param dataframe: (Dataframe) The dataframe to convert to lowercase - """ - for s in dataframe.schema: - s.name = s.name.lower() - # You need to re-generate the dataframe for the capital letters to take effect - return dataframe.rdd.toDF(dataframe.schema)
- - -
[docs] def replaceDataframeSchema(self, dataframe, schema_table_name): - """ - Returns a dataframe with all column names replaced with the proper string case from the DB table - - :param dataframe: (Dataframe) A dataframe with column names to convert - :param schema_table_name: (str) The schema.table with the correct column cases to pull from the database - :return: (DataFrame) A Spark DataFrame with the replaced schema - """ - schema = self.getSchema(schema_table_name) - # Fastest way to replace the column case if changed - dataframe = dataframe.rdd.toDF(schema) - return dataframe
- -
[docs] def fileToTable(self, file_path, schema_table_name, primary_keys=None, drop_table=False, **pandas_args): - """ - Load a file from the local filesystem or from a remote location and create a new table - (or recreate an existing table), and load the data from the file into the new table. Any file_path that can be - read by pandas should work here. - - :param file_path: The local file to load - :param schema_table_name: The schema.table name - :param primary_keys: List[str] of primary keys for the table. Default None - :param drop_table: Whether or not to drop the table. If this is False and the table already exists, the - function will fail. Default False - :param pandas_args: Extra parameters to be passed into the pd.read_csv function. Any parameters accepted - in pd.read_csv will work here - :return: None - """ - import pandas as pd - pdf = pd.read_csv(file_path, **pandas_args) - df = self.pandasToSpark(pdf) - self.createTable(df, schema_table_name, primary_keys=primary_keys, drop_table=drop_table, to_upper=True) - self.insert(df, schema_table_name, to_upper=True)
- -
[docs] def pandasToSpark(self, pdf): - """ - Convert a Pandas DF to Spark, and try to manage NANs from Pandas in case of failure. Spark cannot handle - Pandas NAN existing in String columns (as it considers it NaN Number ironically), so we replace the occurances - with a temporary value and then convert it back to null after it becomes a Spark DF - - :param pdf: The Pandas dataframe - :return: The Spark DF - """ - try: # Try to create the dataframe as it exists - return self.spark_session.createDataFrame(pdf) - except TypeError: - p_df = pdf.copy() - # This means there was an NaN conversion error - from pyspark.sql.functions import udf - for c in p_df.columns: # Replace non numeric/time columns with a custom null value - if p_df[c].dtype not in ('int64','float64', 'datetime64[ns]'): - p_df[c].fillna('Splice_Temp_NA', inplace=True) - spark_df = self.spark_session.createDataFrame(p_df) - # Convert that custom null value back to null after converting to a spark dataframe - null_replace_udf = udf(lambda name: None if name == "Splice_Temp_NA" else name, StringType()) - for field in spark_df.schema: - if field.dataType==StringType(): - spark_df = spark_df.withColumn(field.name, null_replace_udf(spark_df[field.name])) - spark_df = spark_df.withColumnRenamed(field.name, re.sub(r'['+bad_chars+' ]', '_',field.name)) - # Replace NaN numeric columns with null - spark_df = spark_df.replace(float('nan'), None) - return spark_df
- - -
[docs] def getConnection(self): - """ - Return a connection to the database - """ - return self.context.getConnection()
- -
[docs] def tableExists(self, schema_and_or_table_name, table_name=None): - """ - Check whether or not a table exists - - :Example: - .. code-block:: python - - splice.tableExists('schemaName.tableName')\n - # or\n - splice.tableExists('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: (bool) whether or not the table exists - """ - if table_name: - return self.context.tableExists(schema_and_or_table_name, table_name) - else: - return self.context.tableExists(schema_and_or_table_name)
- -
[docs] def dropTable(self, schema_and_or_table_name, table_name=None): - """ - Drop a specified table. - - :Example: - .. code-block:: python - - splice.dropTable('schemaName.tableName') \n - # or\n - splice.dropTable('schemaName', 'tableName') - - :param schema_and_or_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name - :return: None - """ - if table_name: - self.context.dropTable(schema_and_or_table_name, table_name) - else: - self.context.dropTable(schema_and_or_table_name)
- -
[docs] def df(self, sql, to_lower=False): - """ - Return a Spark Dataframe from the results of a Splice Machine SQL Query - - :Example: - .. code-block:: python - - df = splice.df('SELECT * FROM MYSCHEMA.TABLE1 WHERE COL2 > 3') - - :param sql: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :param to_lower: Whether or not to convert column names from the dataframe to lowercase - :return: (Dataframe) A Spark DataFrame containing the results - """ - df = DataFrame(self.context.df(sql), self.spark_sql_context) - return self.toLower(df) if to_lower else df
- -
[docs] def insert(self, dataframe, schema_table_name, to_upper=True, create_table=False): - """ - Insert a dataframe into a table (schema.table). - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the DF - :param to_upper: (bool) If the dataframe columns should be converted to uppercase before table creation - If False, the table will be created with lower case columns. [Default True] - :param create_table: If the table does not exists at the time of the call, the table will first be created - :return: None - """ - if to_upper: - dataframe = self.toUpper(dataframe) - if not self.tableExists(schema_table_name): - if not create_table: - raise SpliceMachineException("Table does not exist. Create the table first or set create_table=True " - "in this function, or call createAndInsertTable") - else: - print('Table does not yet exist, creating table... ',end='') - self.createTable(dataframe, schema_table_name, to_upper=to_upper) - print('Done.') - self.context.insert(dataframe._jdf, schema_table_name)
- -
[docs] def insertWithStatus(self, dataframe, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert a dataframe into a table (schema.table) while tracking and limiting records that fail to insert. - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written - to the status directory. - - :param dataframe: (Dataframe) The dataframe you would like to insert - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - self.context.insert(dataframe._jdf, schema_table_name, statusDirectory, badRecordsAllowed)
- -
[docs] def insertRdd(self, rdd, schema, schema_table_name): - """ - Insert an rdd into a table (schema.table) - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the RDD - :return: None - """ - self.insert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def insertRddWithStatus(self, rdd, schema, schema_table_name, statusDirectory, badRecordsAllowed): - """ - Insert an rdd into a table (schema.table) while tracking and limiting records that fail to insert. \ - The status directory and number of badRecordsAllowed allow for duplicate primary keys to be \ - written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written \ - to the status directory. - - :param rdd: (RDD) The RDD you would like to insert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to insert the dataframe - :param statusDirectory: (str) The status directory where bad records file will be created - :param badRecordsAllowed: (int) The number of bad records are allowed. -1 for unlimited - :return: None - """ - self.insertWithStatus( - self.createDataFrame(rdd, schema), - schema_table_name, - statusDirectory, - badRecordsAllowed - )
- -
[docs] def upsert(self, dataframe, schema_table_name): - """ - Upsert the data from a dataframe into a table (schema.table). - If triggers fail when calling upsert, use the mergeInto function instead of upsert. - - :param dataframe: (Dataframe) The dataframe you would like to upsert - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - self.context.upsert(dataframe._jdf, schema_table_name)
- -
[docs] def upsertWithRdd(self, rdd, schema, schema_table_name): - """ - Upsert the data from an RDD into a table (schema.table). - If triggers fail when calling upsertWithRdd, use the mergeIntoWithRdd function instead of upsertWithRdd. - - :param rdd: (RDD) The RDD you would like to upsert - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to upsert the RDD - :return: None - """ - self.upsert( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def mergeInto(self, dataframe, schema_table_name): - """ - Rows in the dataframe whose primary key is not in schemaTableName will be inserted into the table; - rows in the dataframe whose primary key is in schemaTableName will be used to update the table. - - This implementation differs from upsert in a way that allows triggers to work. - - :param dataframe: (Dataframe) The dataframe you would like to merge in - :param schema_table_name: (str) The table in which you would like to merge in the dataframe - :return: None - """ - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - self.context.mergeInto(dataframe._jdf, schema_table_name)
- -
[docs] def mergeIntoWithRdd(self, rdd, schema, schema_table_name): - """ - Rows in the rdd whose primary key is not in schemaTableName will be inserted into the table; - rows in the rdd whose primary key is in schemaTableName will be used to update the table. - - This implementation differs from upsertWithRdd in a way that allows triggers to work. - - :param rdd: (RDD) The RDD you would like to merge in - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) The table in which you would like to merge in the RDD - :return: None - """ - self.mergeInto( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def delete(self, dataframe, schema_table_name): - """ - Delete records in a dataframe based on joining by primary keys from the data frame. - Be careful with column naming and case sensitivity. - - :param dataframe: (Dataframe) The dataframe you would like to delete - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - self.context.delete(dataframe._jdf, schema_table_name)
- -
[docs] def deleteWithRdd(self, rdd, schema, schema_table_name): - """ - Delete records using an rdd based on joining by primary keys from the rdd. - Be careful with column naming and case sensitivity. - - :param rdd: (RDD) The RDD containing the primary keys you would like to delete from the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - self.delete( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def update(self, dataframe, schema_table_name): - """ - Update data from a dataframe for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param dataframe: (Dataframe) The dataframe you would like to update - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - # make sure column names are in the correct case - dataframe = self.replaceDataframeSchema(dataframe, schema_table_name) - self.context.update(dataframe._jdf, schema_table_name)
- -
[docs] def updateWithRdd(self, rdd, schema, schema_table_name): - """ - Update data from an rdd for a specified schema_table_name (schema.table). - The keys are required for the update and any other columns provided will be updated - in the rows. - - :param rdd: (RDD) The RDD you would like to use for updating the table - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Splice Machine Table - :return: None - """ - self.update( - self.createDataFrame(rdd, schema), - schema_table_name - )
- -
[docs] def getSchema(self, schema_table_name): - """ - Return the schema via JDBC. - - :param schema_table_name: (str) Table name - :return: (StructType) PySpark StructType representation of the table - """ - return _parse_datatype_json_string(self.context.getSchema(schema_table_name).json())
- -
[docs] def execute(self, query_string): - ''' - execute a query over JDBC - - :Example: - .. code-block:: python - - splice.execute('DELETE FROM TABLE1 WHERE col2 > 3') - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: None - ''' - self.context.execute(query_string)
- -
[docs] def executeUpdate(self, query_string): - ''' - execute a dml query:(update,delete,drop,etc) - - :Example: - .. code-block:: python - - splice.executeUpdate('DROP TABLE table1') - - :param query_string: (string) SQL Query (eg. DROP TABLE table1) - :return: None - ''' - self.context.executeUpdate(query_string)
- -
[docs] def internalDf(self, query_string): - ''' - SQL to Dataframe translation (Lazy). Runs the query inside Splice Machine and sends the results to the Spark Adapter app - - :param query_string: (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3) - :return: (DataFrame) pyspark dataframe contains the result of query_string - ''' - return DataFrame(self.context.internalDf(query_string), self.spark_sql_context)
- -
[docs] def rdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - - :param schema_table_name: (string) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.df('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def internalRdd(self, schema_table_name, column_projection=None): - """ - Table with projections in Splice mapped to an RDD. - Runs the projection inside Splice Machine and sends the results to the Spark Adapter app as an rdd - - :param schema_table_name: (str) Accessed table - :param column_projection: (list of strings) Names of selected columns - :return: (RDD[Row]) the result of the projection - """ - if column_projection: - colnames = ', '.join(str(col) for col in column_projection) - else: - colnames = '*' - return self.internalDf('select '+colnames+' from '+schema_table_name).rdd
- -
[docs] def truncateTable(self, schema_table_name): - """ - Truncate a table - - :param schema_table_name: (str) the full table name in the format "schema.table_name" which will be truncated - :return: None - """ - self.context.truncateTable(schema_table_name)
- -
[docs] def analyzeSchema(self, schema_name): - """ - Analyze the schema - - :param schema_name: (str) schema name which stats info will be collected - :return: None - """ - self.context.analyzeSchema(schema_name)
- -
[docs] def analyzeTable(self, schema_table_name, estimateStatistics=False, samplePercent=10.0): - """ - Collect stats info on a table - - :param schema_table_name: full table name in the format of 'schema.table' - :param estimateStatistics: will use estimate statistics if True - :param samplePercent: the percentage or rows to be sampled. - :return: None - """ - self.context.analyzeTable(schema_table_name, estimateStatistics, float(samplePercent))
- -
[docs] def export(self, - dataframe, - location, - compression=False, - replicationCount=1, - fileEncoding=None, - fieldSeparator=None, - quoteCharacter=None): - """ - Export a dataFrame in CSV - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param replicationCount: (int) Replication used for HDFS write - :param fileEncoding: (str) fileEncoding or None, defaults to UTF-8 - :param fieldSeparator: (str) fieldSeparator or None, defaults to ',' - :param quoteCharacter: (str) quoteCharacter or None, defaults to '"' - :return: None - """ - self.context.export(dataframe._jdf, location, compression, replicationCount, - fileEncoding, fieldSeparator, quoteCharacter)
- -
[docs] def exportBinary(self, dataframe, location, compression, e_format='parquet'): - """ - Export a dataFrame in binary format - - :param dataframe: (DataFrame) - :param location: (str) Destination directory - :param compression: (bool) Whether to compress the output or not - :param e_format: (str) Binary format to be used, currently only 'parquet' is supported. [Default 'parquet'] - :return: None - """ - self.context.exportBinary(dataframe._jdf, location, compression, e_format)
- -
[docs] def bulkImportHFile(self, dataframe, schema_table_name, options): - """ - Bulk Import HFile from a dataframe into a schema.table - - :param dataframe: (DataFrame) - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: (int) Number of records imported - """ - optionsMap = self.jvm.java.util.HashMap() - for k, v in options.items(): - optionsMap.put(k, v) - return self.context.bulkImportHFile(dataframe._jdf, schema_table_name, optionsMap)
- -
[docs] def bulkImportHFileWithRdd(self, rdd, schema, schema_table_name, options): - """ - Bulk Import HFile from an rdd into a schema.table - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param options: (Dict) Dictionary of options to be passed to --splice-properties; bulkImportDirectory is required - :return: (int) Number of records imported - """ - return self.bulkImportHFile( - self.createDataFrame(rdd, schema), - schema_table_name, - options - )
- -
[docs] def splitAndInsert(self, dataframe, schema_table_name, sample_fraction): - """ - Sample the dataframe, split the table, and insert a dataFrame into a schema.table. - This corresponds to an insert into from select statement - - :param dataframe: (DataFrame) Input data - :param schema_table_name: (str) Full table name in the format of "schema.table" - :param sample_fraction: (float) A value between 0 and 1 that specifies the percentage of data in the dataFrame \ - that should be sampled to determine the splits. \ - For example, specify 0.005 if you want 0.5% of the data sampled. - :return: None - """ - self.context.splitAndInsert(dataframe._jdf, schema_table_name, float(sample_fraction))
- -
[docs] def createDataFrame(self, rdd, schema): - """ - Creates a dataframe from a given rdd and schema. - - :param rdd: (RDD) Input data - :param schema: (StructType) The schema of the rows in the RDD - :return: (DataFrame) The Spark DataFrame - """ - return self.spark_session.createDataFrame(rdd, schema)
- - def _generateDBSchema(self, dataframe, types={}): - """ - Generate the schema for create table - """ - # convert keys and values to uppercase in the types dictionary - types = dict((key.upper(), val) for key, val in types.items()) - db_schema = [] - # convert dataframe to have all uppercase column names - dataframe = self.toUpper(dataframe) - # i contains the name and pyspark datatype of the column - for i in dataframe.schema: - if i.name.upper() in types: - print('Column {} is of type {}'.format( - i.name.upper(), i.dataType)) - dt = types[i.name.upper()] - else: - dt = CONVERSIONS[str(i.dataType)] - db_schema.append((i.name.upper(), dt)) - - return db_schema - - def _getCreateTableSchema(self, schema_table_name, new_schema=False): - """ - Parse schema for new table; if it is needed, create it - """ - # try to get schema and table, else set schema to splice - if '.' in schema_table_name: - schema, table = schema_table_name.upper().split('.') - else: - schema = self.getConnection().getCurrentSchemaName() - table = schema_table_name.upper() - # check for new schema - if new_schema: - print('Creating schema {}'.format(schema)) - self.execute('CREATE SCHEMA {}'.format(schema)) - - return schema, table - - def _dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drop table if it exists - """ - if self.tableExists(schema_and_or_table_name=schema_table_name, table_name=table_name): - print('Table exists. Dropping table') - self.dropTable(schema_and_or_table_name=schema_table_name, table_name=table_name) - -
[docs] def dropTableIfExists(self, schema_table_name, table_name=None): - """ - Drops a table if exists - - :Example: - .. code-block:: python - - splice.dropTableIfExists('schemaName.tableName') \n - # or\n - splice.dropTableIfExists('schemaName', 'tableName') - - :param schema_table_name: (str) Pass the schema name in this param when passing the table_name param, - or pass schemaName.tableName in this param without passing the table_name param - :param table_name: (optional) (str) Table Name, used when schema_table_name contains only the schema name - :return: None - """ - self._dropTableIfExists(schema_table_name, table_name)
- - def _jstructtype(self, schema): - """ - Convert python StructType to java StructType - - :param schema: PySpark StructType - :return: Java Spark StructType - """ - return self.spark_session._jsparkSession.parseDataType(schema.json()) - -
[docs] def createTable(self, dataframe, schema_table_name, primary_keys=None, - create_table_options=None, to_upper=True, drop_table=False): - """ - Creates a schema.table (schema_table_name) from a dataframe - - :param dataframe: The Spark DataFrame to base the table off - :param schema_table_name: str The schema.table to create - :param primary_keys: List[str] the primary keys. Default None - :param create_table_options: str The additional table-level SQL options default None - :param to_upper: bool If the dataframe columns should be converted to uppercase before table creation. \ - If False, the table will be created with lower case columns. Default True - :param drop_table: bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception - :return: None - - """ - if drop_table: - self._dropTableIfExists(schema_table_name) - if to_upper: - dataframe = self.toUpper(dataframe) - primary_keys = primary_keys if primary_keys else [] - self.createTableWithSchema(schema_table_name, dataframe.schema, - keys=primary_keys, create_table_options=create_table_options)
- -
[docs] def createTableWithSchema(self, schema_table_name, schema, keys=None, create_table_options=None): - """ - Creates a schema.table from a schema - - :param schema_table_name: str The schema.table to create - :param schema: (StructType) The schema that describes the columns of the table - :param keys: (List[str]) The primary keys. Default None - :param create_table_options: (str) The additional table-level SQL options. Default None - :return: None - """ - if keys: - keys_seq = self.jvm.PythonUtils.toSeq(keys) - else: - keys_seq = self.jvm.PythonUtils.toSeq([]) - self.context.createTable( - schema_table_name, - self._jstructtype(schema), - keys_seq, - create_table_options - )
- -
[docs] def createAndInsertTable(self, dataframe, schema_table_name, primary_keys=None, - create_table_options=None, to_upper=True): - """ - Creates a schema.table (schema_table_name) from a dataframe and inserts the dataframe into the table - - :param dataframe: The Spark DataFrame to base the table off - :param schema_table_name: str The schema.table to create - :param primary_keys: List[str] the primary keys. Default None - :param create_table_options: str The additional table-level SQL options default None - :param to_upper: bool If the dataframe columns should be converted to uppercase before table creation. \ - If False, the table will be created with lower case columns. Default True - :param drop_table: bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception - :return: None - - """ - if self.tableExists(schema_table_name): - raise SpliceMachineException(f'Table {schema_table_name} already exists. Drop the table first or call ' - f'splice.insert with the provided dataframe') - self.createTable(dataframe, schema_table_name, primary_keys=primary_keys, - create_table_options=create_table_options, to_upper=to_upper) - self.insert(dataframe, schema_table_name, to_upper=to_upper)
- -
[docs]class ExtPySpliceContext(PySpliceContext): - """ - This class implements a SplicemachineContext object from com.splicemachine.spark2 for use outside of the K8s Cloud Service - """ - _spliceSparkPackagesName = "com.splicemachine.spark2.splicemachine.*" - - def _splicemachineContext(self): - return self.jvm.com.splicemachine.spark2.splicemachine.SplicemachineContext( - self.jdbcurl, self.kafkaServers, self.kafkaPollTimeout) - - def __init__(self, sparkSession, JDBC_URL=None, kafkaServers='localhost:9092', - kafkaPollTimeout=20000, _unit_testing=False): - """ - :param JDBC_URL: (string) The JDBC URL Connection String for your Splice Machine Cluster - :param sparkSession: (sparkContext) A SparkSession object for talking to Spark - :param kafkaServers (string) Comma-separated list of Kafka broker addresses in the form host:port - :param kafkaPollTimeout (int) Number of milliseconds to wait when polling Kafka - """ - self.kafkaServers = kafkaServers - self.kafkaPollTimeout = kafkaPollTimeout - super().__init__(sparkSession, JDBC_URL, _unit_testing) - -
[docs] def setAutoCommitOn(self): - """ - Turn auto-commit on. Auto-commit is on by default when the class is instantiated. - - :return: None - """ - self.context.setAutoCommitOn()
- -
[docs] def setAutoCommitOff(self): - """ - Turn auto-commit off. - - :return: None - """ - self.context.setAutoCommitOff()
- -
[docs] def autoCommitting(self): - """ - Check whether auto-commit is on. - - :return: (Boolean) True if auto-commit is on. - """ - return self.context.autoCommitting()
- -
[docs] def transactional(self): - """ - Check whether auto-commit is off. - - :return: (Boolean) True if auto-commit is off. - """ - return self.context.transactional()
- -
[docs] def commit(self): - """ - Commit the transaction. Throws exception if auto-commit is on. - - :return: None - """ - self.context.commit()
- -
[docs] def rollback(self): - """ - Rollback the transaction. Throws exception if auto-commit is on. - - :return: None - """ - self.context.rollback()
- -
[docs] def rollbackToSavepoint(self, savepoint): - """ - Rollback to the savepoint. Throws exception if auto-commit is on. - :param savepoint: (java.sql.Savepoint) A Savepoint. - - :return: None - """ - self.context.rollback(savepoint)
- -
[docs] def setSavepoint(self): - """ - Create and set a unnamed savepoint at the current point in the transaction. Throws exception if auto-commit is on. - - :return: (java.sql.Savepoint) The unnamed Savepoint - """ - return self.context.setSavepoint()
- -
[docs] def setSavepointWithName(self, name): - """ - Create and set a named savepoint at the current point in the transaction. Throws exception if auto-commit is on. - :param name: (String) The name of the Savepoint. - - :return: (java.sql.Savepoint) The named Savepoint - """ - return self.context.setSavepoint(name)
- -
[docs] def releaseSavepoint(self, savepoint): - """ - Release the savepoint. Throws exception if auto-commit is on. - :param savepoint: (java.sql.Savepoint) A Savepoint. - - :return: None - """ - self.context.releaseSavepoint(savepoint)
-
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_modules/splicemachine/stats.html b/docs/_build/html/_modules/splicemachine/stats.html deleted file mode 100644 index c84e3ec1..00000000 --- a/docs/_build/html/_modules/splicemachine/stats.html +++ /dev/null @@ -1,1567 +0,0 @@ - - - - - - - - splicemachine.stats — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Source code for splicemachine.stats

-import random
-import warnings
-from collections import OrderedDict, defaultdict
-from multiprocessing.pool import ThreadPool
-
-import graphviz
-import numpy as np
-import pandas as pd
-import pyspark_dist_explore as dist_explore
-import scipy.stats as st
-from IPython.display import HTML
-from numpy.linalg import eigh
-from pyspark import keyword_only
-from pyspark.ml import Pipeline, Transformer
-from pyspark.ml.classification import LogisticRegressionModel
-from pyspark.ml.evaluation import (BinaryClassificationEvaluator,
-                                   MulticlassClassificationEvaluator,
-                                   RegressionEvaluator)
-from pyspark.ml.feature import (PCA, Bucketizer, OneHotEncoder, StandardScaler,
-                                StringIndexer, VectorAssembler)
-from pyspark.ml.param.shared import HasInputCol, HasOutputCol, Param
-from pyspark.ml.tuning import (CrossValidator, CrossValidatorModel,
-                               ParamGridBuilder)
-from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable
-from pyspark.sql import Row
-from pyspark.sql import functions as F
-from pyspark.sql.types import ArrayType, DoubleType, IntegerType, StringType
-from tqdm import tqdm
-
-
-
[docs]def get_confusion_matrix(spark, TP, TN, FP, FN): - """ - Creates and returns a confusion matrix - - :param TP: True Positives - :param TN: True Negatives - :param FP: False Positives - :param FN: False Negatives - - :return: Spark DataFrame - """ - - row = Row('', 'True', 'False') - confusion_matrix = spark._wrapped.createDataFrame([row('True', TP, FN), - row('False', FP, TN)]) - return confusion_matrix
- - -
[docs]class SpliceBaseEvaluator(object): - """ - Base ModelEvaluator - """ - - def __init__(self, spark, evaluator, supported_metrics, predictionCol="prediction", - labelCol="label"): - """ - Constructor for SpliceBaseEvaluator - - :param spark: spark session - :param evaluator: evaluator class from spark - :param supported_metrics: supported metrics list - :param predictionCol: prediction column - :param labelCol: label column - """ - self.spark = spark - self.ev = evaluator - self.prediction_col = predictionCol - self.label = labelCol - self.supported_metrics = supported_metrics - self.avgs = defaultdict(list) - -
[docs] def input(self, predictions_dataframe): - """ - Input a dataframe - - :param ev: evaluator class - :param predictions_dataframe: input df - :return: none - """ - for metric in self.supported_metrics: - evaluator = self.ev( - labelCol=self.label, predictionCol=self.prediction_col, metricName=metric) - self.avgs[metric].append(evaluator.evaluate(predictions_dataframe)) - print("Current {metric}: {metric_val}".format(metric=metric, - metric_val=self.avgs - [metric][-1]))
- -
[docs] def get_results(self, as_dict=False): - """ - Get Results - - :param dict: whether to get results in a dict or not - :return: dictionary - """ - computed_avgs = {} - for key in self.avgs: - computed_avgs[key] = np.mean(self.avgs[key]) - - if as_dict: - return computed_avgs - - metrics_row = Row(*self.supported_metrics) - computed_row = metrics_row(*[float(computed_avgs[i]) for i in self.supported_metrics]) - return self.spark._wrapped.createDataFrame([computed_row])
- - -
[docs]class SpliceBinaryClassificationEvaluator(SpliceBaseEvaluator): - """ - A Splice Machine evaluator for Spark Binary Classification models. Implements functions from SpliceBaseEvaluator. - """ - def __init__(self, spark, predictionCol="prediction", labelCol="label", confusion_matrix=True): - self.avg_tp = [] - self.avg_tn = [] - self.avg_fn = [] - self.avg_fp = [] - self.confusion_matrix = confusion_matrix - - supported = ["areaUnderROC", "areaUnderPR", 'TPR', 'SPC', 'PPV', 'NPV', 'FPR', 'FDR', 'FNR', 'ACC', 'F1', 'MCC'] - SpliceBaseEvaluator.__init__(self, spark, BinaryClassificationEvaluator, supported, predictionCol=predictionCol, - labelCol=labelCol) - -
[docs] def input(self, predictions_dataframe): - """ - Evaluate actual vs Predicted in a dataframe - - :param predictions_dataframe: the dataframe containing the label and the predicition - """ - for metric in self.supported_metrics: - if metric in ['areaUnderROC', 'areaUnderPR']: - evaluator = self.ev(labelCol=self.label, rawPredictionCol=self.prediction_col, metricName=metric) - - self.avgs[metric].append(evaluator.evaluate(predictions_dataframe)) - print("Current {metric}: {metric_val}".format(metric=metric, - metric_val=self.avgs - [metric][-1])) - - pred_v_lab = predictions_dataframe.select(self.label, - self.prediction_col) # Select the actual and the predicted labels - - # Add confusion stats - self.avg_tp.append(pred_v_lab[(pred_v_lab[self.label] == 1) - & (pred_v_lab[self.prediction_col] == 1)].count()) - self.avg_tn.append(pred_v_lab[(pred_v_lab[self.label] == 0) - & (pred_v_lab[self.prediction_col] == 0)].count()) - self.avg_fp.append(pred_v_lab[(pred_v_lab[self.label] == 1) - & (pred_v_lab[self.prediction_col] == 0)].count()) - self.avg_fn.append(pred_v_lab[(pred_v_lab[self.label] == 0) - & (pred_v_lab[self.prediction_col] == 1)].count()) - - TP = np.mean(self.avg_tp) - TN = np.mean(self.avg_tn) - FP = np.mean(self.avg_fp) - FN = np.mean(self.avg_fn) - - self.avgs['TPR'].append(float(TP) / (TP + FN)) - self.avgs['SPC'].append(float(TP) / (TP + FN)) - self.avgs['TNR'].append(float(TN) / (TN + FP)) - self.avgs['PPV'].append(float(TP) / (TP + FP)) - self.avgs['NPV'].append(float(TN) / (TN + FN)) - self.avgs['FNR'].append(float(FN) / (FN + TP)) - self.avgs['FPR'].append(float(FP) / (FP + TN)) - self.avgs['FDR'].append(float(FP) / (FP + TP)) - self.avgs['FOR'].append(float(FN) / (FN + TN)) - self.avgs['ACC'].append(float(TP + TN) / (TP + FN + FP + TN)) - self.avgs['F1'].append(float(2 * TP) / (2 * TP + FP + FN)) - self.avgs['MCC'].append(float(TP * TN - FP * FN) / np.sqrt((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN))) - - if self.confusion_matrix: - get_confusion_matrix( - self.spark, - float(TP), - float(TN), - float(FP), - float(FN) - ).show()
- -
[docs] def plotROC(self, fittedEstimator, ax): - """ - Plots the receiver operating characteristic curve for the trained classifier - - :param fittedEstimator: fitted logistic regression model - :param ax: matplotlib axis object - :return: axis with ROC plot - """ - if fittedEstimator.__class__ == LogisticRegressionModel: - trainingSummary = fittedEstimator.summary - roc = trainingSummary.roc.toPandas() - ax.plot(roc['FPR'], roc['TPR'], label='Training set areaUnderROC: \n' + str(trainingSummary.areaUnderROC)) - ax.set_xlabel('False Positive Rate') - ax.set_ylabel('True Positive Rate') - ax.set_title('ROC Curve') - ax.legend() - return ax - else: - raise NotImplementedError("Only supported for Logistic Regression Models")
- - -
[docs]class SpliceRegressionEvaluator(SpliceBaseEvaluator): - """ - A Splice Machine evaluator for Spark Regression models. Implements functions from SpliceBaseEvaluator. - """ - - def __init__(self, spark, predictionCol="prediction", labelCol="label"): - supported = ['rmse', 'mse', 'r2', 'mae'] - SpliceBaseEvaluator.__init__(self, spark, RegressionEvaluator, supported, predictionCol=predictionCol, - labelCol=labelCol)
- - -
[docs]class SpliceMultiClassificationEvaluator(SpliceBaseEvaluator): - """ - A Splice Machine evaluator for Spark MultiClass models. Implements functions from SpliceBaseEvaluator. - """ - - def __init__(self, spark, predictionCol="prediction", labelCol="label"): - supported = ["f1", "weightedPrecision", "weightedRecall", "accuracy"] - SpliceBaseEvaluator.__init__(self, spark, MulticlassClassificationEvaluator, supported, - predictionCol=predictionCol, labelCol=labelCol)
- - -
[docs]class DecisionTreeVisualizer(object): - """ - Visualize a decision tree, either in code like format, or graphviz - """ - -
[docs] @staticmethod - def feature_importance(spark, model, dataset, featuresCol="features"): - """ - Return a dataframe containing the relative importance of each feature - - :param model: The Spark Machine Learning model - :param dataframe: Spark Dataframe - :param featureCol: (str) the column containing the feature vector - :return: dataframe containing importance - """ - import pandas as pd - featureImp = model.featureImportances - list_extract = [] - for i in dataset.schema[featuresCol].metadata["ml_attr"]["attrs"]: - list_extract = list_extract + dataset.schema[featuresCol].metadata["ml_attr"]["attrs"][ - i] - varlist = pd.DataFrame(list_extract) - varlist['score'] = varlist['idx'].apply(lambda x: featureImp[x]) - return spark._wrapped.createDataFrame((varlist.sort_values('score', ascending=False)))
- -
[docs] @staticmethod - def visualize( - model, - feature_column_names, - label_names, - size=None, - horizontal=False, - tree_name='tree', - visual=False, - ): - """ - Visualize a decision tree, either in a code like format, or graphviz - - :param model: the fitted decision tree classifier\n - :param feature_column_names: (List[str]) column names for features - You can access these feature names by using your VectorAssembler (in PySpark) and calling it's .getInputCols() function - :param label_names: (List[str]) labels vector (below avg, above avg) - :param size: tuple(int,int) The size of the graph. If unspecified, graphviz will automatically assign a size - :param horizontal: (Bool) if the tree should be rendered horizontally - :param tree_name: the name you would like to call the tree - :param visual: bool, true if you want a graphviz pdf containing your file - :return dot: The graphvis object - """ - - tree_to_json = DecisionTreeVisualizer.replacer(model.toDebugString, - ['feature ' + str(i) for i in - range(len(feature_column_names) - 1, -1, -1)], - reversed(feature_column_names)) - - tree_to_json = DecisionTreeVisualizer.replacer(tree_to_json, - [f'Predict: {str(i)}.0' for i in - range(len(label_names) - 1, -1, -1)], - reversed(label_names)) - if not visual: - return tree_to_json - - dot = graphviz.Digraph(comment='Decision Tree') - if size: - dot.attr(size=size) - if horizontal: - dot.attr(rankdir="LR") - dot.node_attr.update(color='lightblue2', style='filled') - json_d = DecisionTreeVisualizer.tree_json(tree_to_json) - - DecisionTreeVisualizer.add_node(dot, '', '', json_d, - realroot=True) - dot.render(tree_name) - print(f'Generated pdf file of tree. You can view it in your Jupyter directory under {dot.filepath}.pdf\n') - dot.view() - return (dot)
- -
[docs] @staticmethod - def replacer(string, bad, good): - """ - Replace every string in "bad" with the corresponding string in "good" - - :param string: string to replace in - :param bad: array of strings to replace - :param good: array of strings to replace with - :return: - """ - - for b, g in zip(bad, good): - string = string.replace(b, g) - return string
- -
[docs] @staticmethod - def add_node( - dot, - parent, - node_hash, - root, - realroot=False, - ): - """ - Traverse through the .debugString json and generate a graphviz tree - - :param dot: dot file object - :param parent: not used currently - :param node_hash: unique node id - :param root: the root of tree - :param realroot: whether or not it is the real root, or a recursive root - :return: - """ - - node_id = str(hash(root['name'])) + str(random.randint(0, 100)) - if root: - dot.node(node_id, root['name']) - if not realroot: - dot.edge(node_hash, node_id) - if root.get('children'): - if not root['children'][0].get('children'): - DecisionTreeVisualizer.add_node(dot, root['name'], - node_id, root['children'][0]) - else: - DecisionTreeVisualizer.add_node(dot, root['name'], - node_id, root['children'][0]) - DecisionTreeVisualizer.add_node(dot, root['name'], - node_id, root['children'][1])
- -
[docs] @staticmethod - def parse(lines): - """ - Lines in debug string - - :param lines: - :return: block json - """ - - block = [] - while lines: - if lines[0].startswith('If'): - bl = ' '.join(lines.pop(0).split()[1:]).replace('(', '' - ).replace(')', '') - block.append({'name': bl, - 'children': DecisionTreeVisualizer.parse(lines)}) - - if lines[0].startswith('Else'): - be = ' '.join(lines.pop(0).split()[1:]).replace('(' - , '').replace(')', '') - block.append({'name': be, - 'children': DecisionTreeVisualizer.parse(lines)}) - elif not lines[0].startswith(('If', 'Else')): - block2 = lines.pop(0) - block.append({'name': block2}) - else: - break - return block
- -
[docs] @staticmethod - def tree_json(tree): - """ - Generate a JSON representation of a decision tree - - :param tree: tree debug string - :return: json - """ - - data = [] - for line in tree.splitlines(): - if line.strip(): - line = line.strip() - data.append(line) - else: - break - if not line: - break - res = [{'name': 'Root', - 'children': DecisionTreeVisualizer.parse(data[1:])}] - return res[0]
- - -
[docs]def inspectTable(spliceMLCtx, sql, topN=5): - """ - Inspect the values of the columns of the table (dataframe) returned from the sql query - - :param spliceMLCtx: SpliceMLContext - :param sql: sql string to execute - :param topN: the number of most frequent elements of a column to return, defaults to 5 - """ - df = spliceMLCtx.df(sql) - df = df.repartition(50) - - for _col, _type in df.dtypes: - print("------Inspecting column {} -------- ".format(_col)) - - val_counts = df.groupby(_col).count() - val_counts.show() - val_counts.orderBy(F.desc('count')).limit(topN).show() - - if _type == 'double' or _type == 'int': - df.select(_col).describe().show()
- - - -# Custom Transformers -
[docs]class Rounder(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """Transformer to round predictions for ordinal regression - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - - :param Transformer: Inherited Class - :param HasInputCol: Inherited Class - :param HasOutputCol: Inherited Class - :return: Transformed Dataframe with rounded predictionCol - - :Example: - .. code-block:: python - - >>> from pyspark.sql.session import SparkSession - >>> from splicemachine.stats.stats import Rounder - >>> spark = SparkSession.builder.getOrCreate() - >>> dataset = spark.createDataFrame( - ... [(0.2, 0.0), - ... (1.2, 1.0), - ... (1.6, 2.0), - ... (1.1, 0.0), - ... (3.1, 0.0)], - ... ["prediction", "label"]) - >>> dataset.show() - +----------+-----+ - |prediction|label| - +----------+-----+ - | 0.2| 0.0| - | 1.2| 1.0| - | 1.6| 2.0| - | 1.1| 0.0| - | 3.1| 0.0| - +----------+-----+ - >>> rounder = Rounder(predictionCol = "prediction", labelCol = "label", - clipPreds = True) - >>> rounder.transform(dataset).show() - +----------+-----+ - |prediction|label| - +----------+-----+ - | 0.0| 0.0| - | 1.0| 1.0| - | 2.0| 2.0| - | 1.0| 0.0| - | 2.0| 0.0| - +----------+-----+ - >>> rounderNoClip = Rounder(predictionCol = "prediction", labelCol = "label", - clipPreds = False) - >>> rounderNoClip.transform(dataset).show() - +----------+-----+ - |prediction|label| - +----------+-----+ - | 0.0| 0.0| - | 1.0| 1.0| - | 2.0| 2.0| - | 1.0| 0.0| - | 3.0| 0.0| - +----------+-----+ - """ - - @keyword_only - def __init__(self, predictionCol="prediction", labelCol="label", clipPreds=True, maxLabel=None, minLabel=None): - """ - initialize self - - :param predictionCol: column containing predictions, defaults to "prediction" - :param labelCol: column containing labels, defaults to "label" - :param clipPreds: clip all predictions above a specified maximum value - :param maxLabel: optional: the maximum value for the prediction column, otherwise uses the maximum of the labelCol, defaults to None - :param minLabel: optional: the minimum value for the prediction column, otherwise uses the maximum of the labelCol, defaults to None - """ - super(Rounder, self).__init__() - self.labelCol = labelCol - self.predictionCol = predictionCol - self.clipPreds = clipPreds - self.maxLabel = maxLabel - self.minLabel = minLabel - - @keyword_only - def setParams(self, predictionCol="prediction", labelCol="label"): - kwargs = self._input_kwargs - return self._set(**kwargs) - - def _transform(self, dataset): - """ - Rounds the predictions to the nearest integer value, and also clips them at the max/min value observed in label - - :param dataset: dataframe with predictions to be rounded - :return: DataFrame with rounded predictions - """ - labelCol = self.labelCol - predictionCol = self.predictionCol - - if self.clipPreds: - max_label = self.maxLabel if self.maxLabel else dataset.agg({labelCol: 'max'}).collect()[0][0] - min_label = self.minLabel if self.minLabel else dataset.agg({labelCol: 'min'}).collect()[0][0] - clip = F.udf(lambda x: float(max_label) if x > max_label else (float(min_label) if x < min_label else x), - DoubleType()) - - dataset = dataset.withColumn(predictionCol, F.round(clip(F.col(predictionCol)))) - else: - dataset = dataset.withColumn(predictionCol, F.round(F.col(predictionCol))) - - return dataset
- - -
[docs]class OneHotDummies(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """ - Transformer to generate dummy columns for categorical variables as a part of a preprocessing pipeline\n - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - - :param Transformer: Inherited Classes - :param HasInputCol: Inherited Classes - :param HasOutputCol: Inherited Classes - :return: pyspark DataFrame - """ - - @keyword_only - def __init__(self, inputCol=None, outputCol=None): - """ - Assigns variables to parameters passed - - :param inputCol: Sparse vector returned by OneHotEncoders, defaults to None - :param outputCol: string base to append to output columns names, defaults to None - """ - super(OneHotDummies, self).__init__() - # kwargs = self._input_kwargs - # self.setParams(**kwargs) - self.inputCol = inputCol - self.outputCol = outputCol - self.outcols = [] - - @keyword_only - def setParams(self, inputCol=None, outputCol=None): - kwargs = self._input_kwargs - return self._set(**kwargs) - - def _transform(self, dataset): - """ - Iterates through the number of categorical values of a categorical variable and adds dummy columns for each of those categories - For a string categorical column, include this transformer in the following workflow: StringIndexer -> OneHotEncoder -> OneHotDummies -> PCA/ Learning Algorithm - - :param dataset: PySpark DataFrame where inputCol is the column returned by by OneHotEncoders - :return: original DataFrame with M additional columns where M = # of categories for this variable - """ - out_col_suffix = self.outputCol # this is what I want to append to the column name - col_name = self.inputCol - - out_col_base = col_name + out_col_suffix # this is the base for the n outputted columns - - # helper functions - get_num_categories = F.udf(lambda x: int(x.size), IntegerType()) - get_active_index = F.udf(lambda x: int(x.indices[0]), IntegerType()) - check_active_index = F.udf(lambda active, i: int(active == i), IntegerType()) - - num_categories = dataset.select( - get_num_categories(col_name).alias('num_categories')).distinct() # this returns a dataframe - if num_categories.count() == 1: # making sure all the sparse vectors have the same number of categories - num_categories_int = num_categories.collect()[0]['num_categories'] # now this is an int - - dataset = dataset.withColumn('active_index', get_active_index(col_name)) - column_names = [] - for i in range(num_categories_int): # Now I'm going to make a column for each category - column_name = out_col_base + '_' + str(i) - dataset = dataset.withColumn(column_name, check_active_index(F.col('active_index'), F.lit(i))) - column_names.append(column_name) - - dataset = dataset.drop('active_index') - self.outcols = column_names - return dataset - - def getOutCols(self): - return self.outcols
- - -
[docs]class IndReconstructer(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """ - Transformer to reconstruct String Index from OneHotDummy Columns. This can be used as a part of a Pipeline Ojbect\n - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - - :param Transformer: Inherited Class - :param HasInputCol: Inherited Class - :param HasOutputCol: Inherited Class - :return: Transformed PySpark Dataframe With Original String Indexed Variables - """ - - @keyword_only - def __init__(self, inputCol=None, outputCol=None): - super(IndReconstructer, self).__init__() - # kwargs = self._input_kwargs - # self.setParams(**kwargs) - self.inputCol = inputCol - self.outputCol = outputCol - - @keyword_only - def setParams(self, inputCol=None, outputCol=None): - kwargs = self._input_kwargs - return self._set(**kwargs) - - def _transform(self, dataset): - """ - Iterates through the oneHotDummy columns for a categorical variable and returns the index of the column that is closest to one. This corresponds to the stringIndexed value of this feature for this row. - - :param dataset: dataset with OneHotDummy columns - :return: DataFrame with column corresponding to a categorical indexed column - """ - inColBase = self.inputCol - outCol = self.outputCol - - closestToOne = F.udf(lambda x: abs(x - 1), DoubleType()) - dummies = dataset.select(*[closestToOne(i).alias(i) if inColBase in i else i for i in dataset.columns if - inColBase in i or i == 'SUBJECT']) - dummies = dummies.withColumn('least_val', - F.lit(F.least(*[F.col(i) for i in dataset.columns if inColBase in i]))) - - dummies = dummies.select( - *[(F.col(i) == F.col('least_val')).alias(i + 'isind') if inColBase in i else i for i in dataset.columns if - inColBase in i or i == 'SUBJECT']) - getActive = F.udf(lambda row: [idx for idx, val in enumerate(row) if val][0], IntegerType()) - dummies = dummies.withColumn(outCol, getActive( - F.struct(*[F.col(x) for x in dummies.columns if x != 'SUBJECT']).alias('struct'))) - dataset = dataset.join(dummies.select(['SUBJECT', outCol]), 'SUBJECT') - - return dataset
- - -
[docs]class OverSampler(Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable): - """ - Transformer to oversample datapoints with minority labels\n - - Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers - - :param Transformer: Inherited Class - :param HasInputCol: Inherited Class - :param HasOutputCol: Inherited Class - :return: PySpark Dataframe with labels in approximately equal ratios - - :Example: - .. code-block:: python - - >>> from pyspark.sql import functions as F - >>> from pyspark.sql.session import SparkSession - >>> from pyspark.stats.linalg import Vectors - >>> from splicemachine.stats.stats import OverSampler - >>> spark = SparkSession.builder.getOrCreate() - >>> df = spark.createDataFrame( - ... [(Vectors.dense([0.0]), 0.0), - ... (Vectors.dense([0.5]), 0.0), - ... (Vectors.dense([0.4]), 1.0), - ... (Vectors.dense([0.6]), 1.0), - ... (Vectors.dense([1.0]), 1.0)] * 10, - ... ["features", "Class"]) - >>> df.groupBy(F.col("Class")).count().orderBy("count").show() - +-----+-----+ - |Class|count| - +-----+-----+ - | 0.0| 20| - | 1.0| 30| - +-----+-----+ - >>> oversampler = OverSampler(labelCol = "Class", strategy = "auto") - >>> oversampler.transform(df).groupBy("Class").count().show() - +-----+-----+ - |Class|count| - +-----+-----+ - | 0.0| 29| - | 1.0| 30| - +-----+-----+ - """ - - @keyword_only - def __init__(self, labelCol=None, strategy="auto", randomState=None): - """ - Initialize self - - :param labelCol: Label Column name, defaults to None - :param strategy: defaults to "auto", strategy to resample the dataset: - • Only currently supported for "auto" Corresponds to random samples with repleaement - :param randomState: sets the seed of sample algorithm - """ - super(OverSampler, self).__init__() - self.labelCol = labelCol - self.strategy = strategy - self.withReplacement = True if strategy == "auto" else False - self.randomState = np.random.randn() if not randomState else randomState - - @keyword_only - def setParams(self, labelCol=None, strategy="auto"): - kwargs = self._input_kwargs - return self._set(**kwargs) - - def _transform(self, dataset): - """ - Oversamples - - :param dataset: dataframe to be oversampled - :return: DataFrame with the resampled data points - """ - if self.strategy == "auto": - - pd_value_counts = dataset.groupBy(F.col(self.labelCol)).count().toPandas() - - label_type = dataset.schema[self.labelCol].dataType.simpleString() - types_dic = {'int': int, "string": str, "double": float} - - maxidx = pd_value_counts['count'].idxmax() - - self.majorityLabel = types_dic[label_type](pd_value_counts[self.labelCol].loc[maxidx]) - majorityData = dataset.filter(F.col(self.labelCol) == self.majorityLabel) - - returnData = None - - if len(pd_value_counts) == 1: - raise ValueError( - f'Error! Number of labels = {len(pd_value_counts)}. Cannot Oversample with this number of classes') - elif len(pd_value_counts) == 2: - minidx = pd_value_counts['count'].idxmin() - minorityLabel = types_dic[label_type](pd_value_counts[self.labelCol].loc[minidx]) - ratio = pd_value_counts['count'].loc[maxidx] / pd_value_counts['count'].loc[minidx] * 1.0 - - returnData = majorityData.union( - dataset.filter(F.col(self.labelCol) == minorityLabel).sample(withReplacement=self.withReplacement, - fraction=ratio, seed=self.randomState)) - - else: - minority_labels = list(pd_value_counts.drop(maxidx)[self.labelCol]) - - ratios = {types_dic[label_type](minority_label): pd_value_counts['count'].loc[maxidx] / float( - pd_value_counts[pd_value_counts[self.labelCol] == minority_label]['count']) for minority_label in - minority_labels} - - for (minorityLabel, ratio) in ratios.items(): - minorityData = dataset.filter(F.col(self.labelCol) == minorityLabel).sample( - withReplacement=self.withReplacement, fraction=ratio, seed=self.randomState) - if not returnData: - returnData = majorityData.union(minorityData) - else: - returnData = returnData.union(minorityData) - - return returnData - else: - raise NotImplementedError("Only auto is currently implemented")
- - -
[docs]class OverSampleCrossValidator(CrossValidator): - """ - Class to perform Cross Validation model evaluation while over-sampling minority labels. - - :Example: - .. code-block:: python - - >>> from pyspark.sql.session import SparkSession - >>> from pyspark.stats.classification import LogisticRegression - >>> from pyspark.stats.evaluation import BinaryClassificationEvaluator,\n - MulticlassClassificationEvaluator - >>> from pyspark.stats.linalg import Vectors - >>> from splicemachine.stats.stats import OverSampleCrossValidator - >>> spark = SparkSession.builder.getOrCreate() - >>> dataset = spark.createDataFrame( - ... [(Vectors.dense([0.0]), 0.0), - ... (Vectors.dense([0.5]), 0.0), - ... (Vectors.dense([0.4]), 1.0), - ... (Vectors.dense([0.6]), 1.0), - ... (Vectors.dense([1.0]), 1.0)] * 10, - ... ["features", "label"]) - >>> lr = LogisticRegression() - >>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build() - >>> PRevaluator = BinaryClassificationEvaluator(metricName = 'areaUnderPR') - >>> AUCevaluator = BinaryClassificationEvaluator(metricName = 'areaUnderROC') - >>> ACCevaluator = MulticlassClassificationEvaluator(metricName="accuracy") - >>> cv = OverSampleCrossValidator(estimator=lr, estimatorParamMaps=grid, - evaluator=AUCevaluator, altEvaluators = [PRevaluator, ACCevaluator], - parallelism=2,seed = 1234) - >>> cvModel = cv.fit(dataset) - >>> print(cvModel.avgMetrics) - [(0.5, [0.5888888888888888, 0.3888888888888889]), (0.806878306878307, - [0.8556863149300125, 0.7055555555555556])] - >>> print(AUCevaluator.evaluate(cvModel.transform(dataset))) - 0.8333333333333333 - """ - - def __init__(self, estimator, estimatorParamMaps, evaluator, numFolds=3, seed=None, parallelism=3, - collectSubModels=False, labelCol='label', altEvaluators=None, overSample=True): - """ - Initialize Self - - :param estimator: Machine Learning Model, defaults to None - :param estimatorParamMaps: paramMap to search, defaults to None - :param evaluator: primary model evaluation metric, defaults to None - :param numFolds: number of folds to perform, defaults to 3 - :param seed: random state, defaults to None - :param parallelism: number of threads, defaults to 1 - :param collectSubModels: to return submodels, defaults to False - :param labelCol: target variable column label, defaults to 'label' - :param altEvaluators: additional metrics to evaluate, defaults to None - If passed, the metrics of the alternate evaluators are accessed in the CrossValidatorModel.avgMetrics attribute - :param overSample: Boolean: to perform oversampling of minority labels, defaults to True - """ - self.label = labelCol - self.altEvaluators = altEvaluators - self.toOverSample = overSample - super(OverSampleCrossValidator, self).__init__(estimator=estimator, estimatorParamMaps=estimatorParamMaps, - evaluator=evaluator, numFolds=numFolds, seed=seed, - parallelism=parallelism, collectSubModels=collectSubModels) - - def getLabel(self): - return self.label - - def getOversample(self): - return self.toOverSample - - def getAltEvaluators(self): - return self.altEvaluators - - def _parallelFitTasks(self, est, train, eva, validation, epm, collectSubModel, altEvaluators): - """ - Creates a list of callables which can be called from different threads to fit and evaluate - an estimator in parallel. Each callable returns an `(index, metric)` pair if altEvaluators, (index, metric, [alt_metrics]). - - :param est: Estimator, the estimator to be fit. - :param train: DataFrame, training data set, used for fitting. - :param eva: Evaluator, used to compute `metric` - :param validation: DataFrame, validation data set, used for evaluation. - :param epm: Sequence of ParamMap, params maps to be used during fitting & evaluation. - :param collectSubModel: Whether to collect sub model. - :return: (int, float, subModel), an index into `epm` and the associated metric value. - """ - modelIter = est.fitMultiple(train, epm) - - def singleTask(): - index, model = next(modelIter) - metric = eva.evaluate(model.transform(validation, epm[index])) - altmetrics = None - if altEvaluators: - altmetrics = [altEva.evaluate(model.transform(validation, epm[index])) for altEva in altEvaluators] - return index, metric, altmetrics, model if collectSubModel else None - - return [singleTask] * len(epm) - - def _fit(self, dataset): - """ - Performs k-fold crossvaldidation on simple oversampled dataset - - :param dataset: full dataset - :return: CrossValidatorModel containing the fitted BestModel with the average of the primary and alternate metrics in a list of tuples in the format: [(paramComb1_average_primary_metric, [paramComb1_average_altmetric1,paramComb1_average_altmetric2]), (paramComb2_average_primary_metric, [paramComb2_average_altmetric1,paramComb2_average_altmetric2])] - """ - est = self.getOrDefault(self.estimator) - epm = self.getOrDefault(self.estimatorParamMaps) - numModels = len(epm) - eva = self.getOrDefault(self.evaluator) - nFolds = self.getOrDefault(self.numFolds) - seed = self.getOrDefault(self.seed) - - # Getting Label and altEvaluators - label = self.getLabel() - altEvaluators = self.getAltEvaluators() - altMetrics = [[0.0] * len(altEvaluators)] * numModels if altEvaluators else None - h = 1.0 / nFolds - randCol = self.uid + "_rand" - df = dataset.select("*", F.rand(seed).alias(randCol)) - metrics = [0.0] * numModels - - pool = ThreadPool(processes=min(self.getParallelism(), numModels)) - subModels = None - collectSubModelsParam = self.getCollectSubModels() - if collectSubModelsParam: - subModels = [[None for j in range(numModels)] for i in range(nFolds)] - - for i in range(nFolds): - # Getting the splits such that no data is reused - validateLB = i * h - validateUB = (i + 1) * h - condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB) - validation = df.filter(condition).cache() - train = df.filter(~condition).cache() - - # Oversampling the minority class(s) here - if self.toOverSample: - withReplacement = True - oversampler = OverSampler(labelCol=self.label, strategy="auto") - - # Oversampling - train = oversampler.transform(train) - # Getting the individual tasks so this can be parallelized - tasks = self._parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam, altEvaluators) - # Calling the parallel process - for j, metric, fold_alt_metrics, subModel in pool.imap_unordered(lambda f: f(), tasks): - metrics[j] += (metric / nFolds) - if fold_alt_metrics: - altMetrics[j] = [altMetrics[j][i] + fold_alt_metrics[i] / nFolds for i in range(len(altEvaluators))] - - if collectSubModelsParam: - subModels[i][j] = subModel - - validation.unpersist() - train.unpersist() - - if eva.isLargerBetter(): - bestIndex = np.argmax(metrics) - else: - bestIndex = np.argmin(metrics) - bestModel = est.fit(dataset, epm[bestIndex]) - metrics = [(metric, altMetrics[idx]) for idx, metric in enumerate(metrics)] - return self._copyValues(CrossValidatorModel(bestModel, metrics, subModels))
- - -## Pipeline Functions -
[docs]def get_string_pipeline(df, cols_to_exclude, steps=['StringIndexer', 'OneHotEncoder', 'OneHotDummies']): - """ - Generates a list of preprocessing stages - - :param df: DataFrame including only the training data - :param cols_to_exclude: Column names we don't want to to include in the preprocessing (i.e. SUBJECT/ target column) - :param stages: preprocessing steps to take - :return: (stages, Numeric_Columns) - stages: list of pipeline stages to be used in preprocessing - Numeric_Columns: list of columns that contain numeric features - """ - - String_Columns = [] - Numeric_Columns = [] - for _col, _type in df.dtypes: # This is a tuple of (<col name>, data type) - if _col in cols_to_exclude: - continue - if _type == 'string': - String_Columns.append(_col) - elif _type == 'double' or _type == 'int' or _type == 'float': - Numeric_Columns.append(_col) - else: - print("Unhandled Data type = {}".format((_col, _type))) - continue - - stages = [] - if 'StringIndexer' in steps: - # String Inexing - str_indexers = [StringIndexer(inputCol=c, outputCol=c + '_ind', handleInvalid='skip') for c in String_Columns] - indexed_string_vars = [c + '_ind' for c in String_Columns] - stages = stages + str_indexers - - if 'OneHotEncoder' in steps: - # One hot encoding - str_hot = [OneHotEncoder(inputCol=c + '_ind', outputCol=c + '_vec', dropLast=False) for c in String_Columns] - encoded_str_vars = [c + '_vec' for c in String_Columns] - stages = stages + str_hot - - if 'OneHotDummies' in steps: - # Converting the sparse vector to dummy columns - str_dumbers = [OneHotDummies(inputCol=c + '_vec', outputCol='_dummy') for c in String_Columns] - str_dumb_cols = [c for dummy in str_dumbers for c in dummy.getOutCols()] - stages = stages + str_dumbers - - if len(stages) == 0: - ERROR = """ - Parameter <steps> must include 'StringIndexer', 'OneHotEncoder', 'OneHotDummies' - """ - print(ERROR) - raise Exception(ERROR) - - return stages, Numeric_Columns
- - -
[docs]def vector_assembler_pipeline(df, columns, doPCA=False, k=10): - """ - After preprocessing String Columns, this function can be used to assemble a feature vector to be used for learning - creates the following stages: VectorAssembler -> Standard Scalar [{ -> PCA}] - - :param df: DataFrame containing preprocessed Columns - :param columns: list of Column names of the preprocessed columns - :param doPCA: Do you want to do PCA as part of the vector assembler? defaults to False - :param k: Number of Principal Components to use, defaults to 10 - :return: List of vector assembling stages - """ - - assembler = VectorAssembler(inputCols=columns, outputCol='featuresVec') - scaler = StandardScaler(inputCol="featuresVec", outputCol="features", withStd=True, - withMean=True) # centering and standardizing the data - - if doPCA: - pca_obj = PCA(k=k, inputCol="features", outputCol="pcaFeatures") - stages = [assembler, scaler, pca_obj] - else: - stages = [assembler, scaler] - return stages
- - -
[docs]def postprocessing_pipeline(df, cols_to_exclude): - """ - Assemble postprocessing pipeline to reconstruct original categorical indexed values from OneHotDummy Columns - - :param df: DataFrame Including the original string Columns - :param cols_to_exclude: list of columns to exclude - :return: (reconstructers, String_Columns) - reconstructers: list of IndReconstructer stages - String_Columns: list of columns that are being reconstructed - """ - String_Columns = [] - Numeric_Columns = [] - for _col, _type in df.dtypes: # This is a tuple of (<col name>, data type) - if _col in cols_to_exclude: - continue - if _type == 'string': - String_Columns.append(_col) - elif _type == 'double' or _type == 'int' or _type == 'float': - Numeric_Columns.append(_col) - else: - print("Unhandled Data type = {}".format((_col, _type))) - continue - - # Extracting the Value of the OneHotEncoded Variable - reconstructors = [IndReconstructer(inputCol=c, outputCol=c + '_activeInd') for c in String_Columns] - return reconstructors, String_Columns
- - -# Distribution fitting Functions -
[docs]def make_pdf(dist, params, size=10000): - """ - Generate distributions's Probability Distribution Function - - :param dist: scipy.stats distribution object: https://docs.scipy.org/doc/scipy/reference/stats.html - :param params: distribution parameters - :param size: how many data points to generate , defaults to 10000 - :return: series of probability density function for this distribution - """ - # Separate parts of parameters - arg = params[:-2] - loc = params[-2] - scale = params[-1] - - # Get sane start and end points of distribution - start = dist.ppf(0.01, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.01, loc=loc, scale=scale) - end = dist.ppf(0.99, *arg, loc=loc, scale=scale) if arg else dist.ppf(0.99, loc=loc, scale=scale) - - # Build PDF and turn into pandas Series - x = np.linspace(start, end, size) - y = dist.pdf(x, loc=loc, scale=scale, *arg) - pdf = pd.Series(y, x) - - return pdf
- - -
[docs]def best_fit_distribution(data, col_name, bins, ax): - """ - Model data by finding best fit distribution to data - - :param data: DataFrame with one column containing the feature whose distribution is to be investigated - :param col_name: column name for feature - :param bins: number of bins to use in generating the histogram of this data - :param ax: axis to plot histogram on - :return: (best_distribution.name, best_params, best_sse) - best_distribution.name: string of the best distribution name - best_params: parameters for this distribution - best_sse: sum of squared errors for this distribution against the empirical pdf - """ - # Get histogram of original data - - output = dist_explore.pandas_histogram(data, bins=bins) - output.reset_index(level=0, inplace=True) - output['index'] = output['index'].apply(lambda x: np.mean([float(i.strip()) for i in x.split('-')])) - output[col_name] = output[col_name] / np.sum(output[col_name]) / (output['index'][1] - (output['index'][0])) - - x = output['index'] - y = output[col_name] - # DISTRIBUTIONS = [ - # st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine, - # st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk, - # st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon, - # st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r, - # st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss, - # st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable, - # st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf, - # st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal, - # st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda, - # st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy - # ] - - DISTRIBUTIONS = [ - st.beta, st.expon, - st.halfnorm, - st.norm, - st.lognorm, - st.uniform - ] - - # Best holders - best_distribution = st.norm - best_params = (0.0, 1.0) - best_sse = np.inf - - # Estimate distribution parameters from data - for distribution in tqdm(DISTRIBUTIONS): - - # Try to fit the distribution - try: - # Ignore warnings from data that can't be fit - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') - - # fit dist to data - params = distribution.fit(data.collect()) - - # Separate parts of parameters - arg = params[:-2] - loc = params[-2] - scale = params[-1] - - # Calculate fitted PDF and error with fit in distribution - - pdf = distribution.pdf(x, loc=loc, scale=scale, *arg) - sse = np.sum(np.power(y.values - pdf, 2.0)) - - # if axis pass in add to plot - try: - if ax: - if sse < 0.05: - # Don't want to plot really bad ones - ax = pdf.plot(legend=True, label=distribution.name) - # ax.plot(x,pdf, label = distribution.name) - ax.legend() - except Exception: - pass - - # identify if this distribution is better - if best_sse > sse > 0: - best_distribution = distribution - best_params = params - best_sse = sse - - except Exception: - pass - - return (best_distribution.name, best_params, best_sse)
- - -## PCA Functions - -
[docs]def estimateCovariance(df, features_col='features'): - """ - Compute the covariance matrix for a given dataframe. - - - :param df: PySpark dataframe - :param features_col: name of the column with the features, defaults to 'features' - :return: np.ndarray: A multi-dimensional array where the number of rows and columns both equal the length of the arrays in the input dataframe. - - :Note: - The multi-dimensional covariance array should be calculated using outer products. Don't forget to normalize the data by first subtracting the mean. - """ - m = df.select(df[features_col]).rdd.map(lambda x: x[0]).mean() - - dfZeroMean = df.select(df[features_col]).rdd.map(lambda x: x[0]).map(lambda x: x - m) # subtract the mean - - return dfZeroMean.map(lambda x: np.outer(x, x)).sum() / df.count()
- - -
[docs]def pca_with_scores(df, k=10): - """Computes the top `k` principal components, corresponding scores, and all eigenvalues. - - :param df: A Spark dataframe with a 'features' column, which (column) consists of DenseVectors. - :param k: The number of principal components to return., defaults to 10 - :return: (eigenvectors, `RDD` of scores, eigenvalues)\n - * Eigenvectors: multi-dimensional array where the number of\ - rows equals the length of the arrays in the input `RDD` and the number of columns equals`k`. - * `RDD` of scores: has the same number of rows as `data` and consists of arrays of length `k`. - * Eigenvalues is an array of length d (the number of features). - - :Note: - All eigenvalues should be returned in sorted order (largest to smallest). `eigh` returns - each eigenvectors as a column. This function should also return eigenvectors as columns. - """ - cov = estimateCovariance(df) - col = cov.shape[1] - eigVals, eigVecs = eigh(cov) - inds = np.argsort(eigVals) - eigVecs = eigVecs.T[inds[-1:-(col + 1):-1]] - components = eigVecs[0:k] - eigVals = eigVals[inds[-1:-(col + 1):-1]] # sort eigenvals - score = df.select(df['features']).rdd.map(lambda x: x[0]).map(lambda x: np.dot(x, components.T)) - # Return the `k` principal components, `k` scores, and all eigenvalues - - return components.T, score, eigVals
- - -
[docs]def varianceExplained(df, k=10): - """ - Returns the proportion of variance explained by `k` principal componenets. Calls the above PCA procedure - - :param df: PySpark DataFrame - :param k: number of principal components , defaults to 10 - :return: (proportion, principal_components, scores, eigenvalues) - """ - components, scores, eigenvalues = pca_with_scores(df, k) - return sum(eigenvalues[0:k]) / sum(eigenvalues), components, scores, eigenvalues
- - -# PCA reconstruction Functions - -
[docs]def reconstructPCA(sql, df, pc, mean, std, originalColumns, fits, pcaColumn='pcaFeatures'): - """ - Reconstruct data from lower dimensional space after performing PCA - - :param sql: SQLContext - :param df: PySpark DataFrame: inputted PySpark DataFrame - :param pc: numpy.ndarray: principal components projected onto - :param mean: numpy.ndarray: mean of original columns - :param std: numpy.ndarray: standard deviation of original columns - :param originalColumns: list: original column names - :param fits: fits of features returned from best_fit_distribution - :param pcaColumn: column in df that contains PCA features, defaults to 'pcaFeatures' - :return: dataframe containing reconstructed data - """ - - cols = df.columns - cols.remove(pcaColumn) - - pddf = df.toPandas() - first_series = pddf['pcaFeatures'].apply(lambda x: np.array(x.toArray())).as_matrix().reshape(-1, 1) - first_features = np.apply_along_axis(lambda x: x[0], 1, first_series) - # undo-ing PCA - first_reconstructed = np.dot(first_features, pc) - # undo-ing the scaling - first_reconstructed = np.multiply(first_reconstructed, std) + mean - first_reconstructedDF = pd.DataFrame(first_reconstructed, columns=originalColumns) - for _col in cols: - first_reconstructedDF[_col] = pddf[_col] - - # This is a pyspark Dataframe containing the reconstructed data, including the dummy columns for the string variables-- next step is to reverse the one-hot-encoding for the string columns - first_reconstructed = sql.createDataFrame(first_reconstructedDF) - - cols_to_exclude = ['DATE_OF_STUDY'] - postPipeStages, String_Columns = postprocessing_pipeline(df, cols_to_exclude) - - postPipe = Pipeline(stages=postPipeStages) - out = postPipe.fit(first_reconstructed).transform(first_reconstructed) - for _col in String_Columns: - out = out.join(df.select([_col, _col + '_ind']) \ - .withColumnRenamed(_col + '_ind', _col + '_activeInd'), _col + '_activeInd') \ - .dropDuplicates() - cols_to_drop = [_col for _col in out.columns if any([base in _col for base in String_Columns]) and '_' in _col] - - reconstructedDF = out.drop( - *cols_to_drop) # This is the equivalent as the first translated reconstructed dataframe above - clip = F.udf(lambda x: x if x > 0 else 0.0, DoubleType()) - for _key in fits.keys(): - if fits[_key]['dist'] == 'EMPIRICAL': - reconstructedDF = reconstructedDF.withColumn(_key, F.round(clip(F.col(_key)))) - else: - reconstructedDF = reconstructedDF.withColumn(_key, clip(F.col(_key))) - - return reconstructedDF
- - -class MarkovChain(object): - def __init__(self, transition_prob): - """ - Initialize the MarkovChain instance. - - :param: transition_prob: dict\ - A dict object representing the transition\ - probabilities in Markov Chain.\ - Should be of the form:\ - {'state1': {'state1': 0.1, 'state2': 0.4}, - 'state2': {...}} - """ - self.transition_prob = transition_prob - self.states = list(transition_prob.keys()) # states that have transitions to the next layer - # For states in the form <stateN_M> where N is the visit (layer) and M is the cluster in the N-th Layer - self.max_num_steps = max([int(i.split('state')[1][0]) for i in self.states]) - - def get_max_num_steps(self): - return self.max_num_steps - - def next_state(self, current_state): - """ - Returns the state of the random variable at the next time - instance. - - :param current_state: The current state of the system. - :raises: Exception if random choice fails - :return: next state - """ - - try: - - # if not current_state in self.states: - # print('We have reached node {} where we do not know where they go from here... \n try reducing the number of clusters at level {} \n otherwise we might be at the terminating layer'.format(current_state, int(current_state.split('state')[1][0]))) - # raise Exception('Unknown transition') - - next_possible_states = self.transition_prob[current_state].keys() - return np.random.choice( - next_possible_states, - p=[self.transition_prob[current_state][next_state] - for next_state in next_possible_states] - )[:] - except Exception as e: - raise e - - def generate_states(self, current_state, no=10, last=True): - """ - Generates the next states of the system. - - :param current_state: (str) The state of the current random variable. - :param no: (int) The number of future states to generate. - :param last: (bool) Do we want to return just the last value - """ - try: - if no > self.max_num_steps: - print('Number of steps exceeds the max number of possible next steps') - raise Exception('<no> should not exceed {}. The value of <no> was: {}'.format(self.max_num_steps, no)) - - future_states = [] - for i in range(no): - try: - next_state = self.next_state(current_state) - except Exception as e: - raise e - future_states.append(next_state) - current_state = next_state - if last: - return future_states[-1] - else: - return future_states - except Exception as e: - raise e - - def rep_states(self, current_state, no=10, num_reps=10): - """ - Running generate states a bunch of times and returning the final state that happens the most - - :param current_state: (str) The state of the current random variable - :param no: (int) number of time steps in the future to run - num_reps int number of times to run the simpca_with_scoresultion forward - :return state: the most commonly reached state at the end of these runs - """ - if no > self.max_num_steps: - print('Number of steps exceeds the max number of possible next steps') - raise Exception('<no> should not exceed {}. The value of <no> was: {}'.format(self.max_num_steps, no)) - - endstates = [] - for _ in range(num_reps): - endstates.append(self.generate_states(current_state, no=no, last=True)) - return max(set(endstates), key=endstates.count) -
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/_sources/getting-started.rst.txt b/docs/_build/html/_sources/getting-started.rst.txt deleted file mode 100644 index a29a588a..00000000 --- a/docs/_build/html/_sources/getting-started.rst.txt +++ /dev/null @@ -1,73 +0,0 @@ -Getting Started -=============== - -K8s Install ------------ - -If you are running inside of the Splice Machine Cloud Service in a Jupyter Notebook, MLManager will already be installed for you. If you'd like to install it (or upgrade it), you can install from git with - -.. code-block:: sh - - pip install [--upgrade] git+https://www.github.com/splicemachine/pysplice@ - -External Installation ---------------------- - -If you would like to install outside of the K8s cluster (and use the ExtPySpliceContext), you can install the stable build with - -.. code-block:: sh - - sudo pip install git+http://www.github.com/splicemachine/pysplice@2.3.0-k8 - -Or latest with - -.. code-block:: sh - - sudo pip install git+http://www.github.com/splicemachine/pysplice - -Usage ------ - -This section covers importing and instantiating the Native Spark DataSource - -.. tabs:: - - .. tab:: Native Spark DataSource - - To use the Native Spark DataSource inside of the `cloud service`_., first create a Spark Session and then import your PySpliceContext - - .. code-block:: Python - - from pyspark.sql import SparkSession - from splicemachine.spark import PySpliceContext - from splicemachine.mlflow_support import * # Connects your MLflow session automatically - from splicemachine.features import FeatureStore # Splice Machine Feature Store - - spark = SparkSession.builder.getOrCreate() - splice = PySpliceContext(spark) # The Native Spark Datasource (PySpliceContext) takes a Spark Session - fs = FeatureStore(splice) # Create your Feature Store - mlflow.register_splice_context(splice) # Gives mlflow native DB connection - mlflow.register_feature_store(fs) # Tracks Feature Store work in Mlflow automatically - - - .. tab:: External Native Spark DataSource - - To use the External Native Spark DataSource, create a Spark Session with your external Jars configured. Then, import your ExtPySpliceContext and set the necessary parameters. - Once created, the functionality is identical to the internal Native Spark Datasource (PySpliceContext) - - .. code-block:: Python - - from pyspark.sql import SparkSession - from splicemachine.spark import ExtPySpliceContext - from splicemachine.mlflow_support import * # Connects your MLflow session automatically - from splicemachine.features import FeatureStore # Splice Machine Feature Store - - spark = SparkSession.builder.config('spark.jars', '/path/to/splice_spark2-3.0.0.1962-SNAPSHOT-shaded.jar').config('spark.driver.extraClassPath', 'path/to/Splice/jars/dir/*').getOrCreate() - JDBC_URL = '' #Set your JDBC URL here. You can get this from the Cloud Manager UI. Make sure to append ';user=;password=' after ';ssl=basic' so you can authenticate in - # The ExtPySpliceContext communicates with the database via Kafka - kafka_server = 'kafka-broker-0-' + JDBC_URL.split('jdbc:splice://jdbc-')[1].split(':1527')[0] + ':19092' # Formatting kafka URL from JDBC - splice = ExtPySpliceContext(spark, JDBC_URL=JDBC_URL, kafkaServers=kafka_server) - - fs = FeatureStore(splice) # Create your Feature Store - mlflow.register_splice_context(splice) # Gives mlflow native DB connection - mlflow.register_feature_store(fs) # Tracks Feature Store work in Mlflow automatically diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt deleted file mode 100644 index b5a757fd..00000000 --- a/docs/_build/html/_sources/index.rst.txt +++ /dev/null @@ -1,27 +0,0 @@ -.. splicemachine documentation master file, created by - sphinx-quickstart on Tue Jul 14 18:34:54 2020. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Splicemachine's documentation! -========================================= - -This documentation will will you through what you need to start using Splice Machine's MLManager workbench. See the Getting Started section below for installation and usage. Then see the splicemachine.spark section for Database connectivity as well as mlflow_support for our embedded mlflow connection. - -.. toctree:: - :maxdepth: 4 - :caption: Contents: - :titlesonly: - - getting-started - splicemachine - - :hidden: - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/docs/_build/html/_sources/spark.rst.txt b/docs/_build/html/_sources/spark.rst.txt deleted file mode 100644 index 1067b5ba..00000000 --- a/docs/_build/html/_sources/spark.rst.txt +++ /dev/null @@ -1,38 +0,0 @@ -spark package -============= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - spark.test - -Submodules ----------- - -spark.constants module ----------------------- - -.. automodule:: spark.constants - :members: - :undoc-members: - :show-inheritance: - -spark.context module --------------------- - -.. automodule:: spark.context - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: spark - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_build/html/_sources/splicemachine.features.rst.txt b/docs/_build/html/_sources/splicemachine.features.rst.txt deleted file mode 100644 index 56f1a57a..00000000 --- a/docs/_build/html/_sources/splicemachine.features.rst.txt +++ /dev/null @@ -1,83 +0,0 @@ -splicemachine.features package -=========================== - -Submodules ----------- -.. - .. automodule:: splicemachine.features.feature_store - :members: - :undoc-members: - :show-inheritance: - - .. automodule:: splicemachine.features.feature_set - :members: - :undoc-members: - :show-inheritance: - - .. automodule:: splicemachine.features.feature - :members: - :undoc-members: - :show-inheritance: - - .. automodule:: splicemachine.features.training_view - :members: - :undoc-members: - :show-inheritance: - -splicemachine.features.feature_store module -------------------------------------------- - -This Module contains the classes and APIs for interacting with the Splice Machine Feature Store. - -.. automodule:: splicemachine.features.feature_store - :members: - :undoc-members: - :show-inheritance: - -splicemachine.features.feature_set ----------------------------------- - -This describes the Python representation of a Feature Set. A feature set is a database table that contains Features and their metadata. -The Feature Set class is mostly used internally but can be used by the user to see the available Features in the given -Feature Set, to see the table and schema name it is deployed to (if it is deployed), and to deploy the feature set -(which can also be done directly through the Feature Store). Feature Sets are unique by their schema.table name, as they -exist in the Splice Machine database as a SQL table. They are case insensitive. -To see the full contents of your Feature Set, you can print, return, or .__dict__ your Feature Set object. - -.. automodule:: splicemachine.features.feature_set - :members: - :show-inheritance: - - -splicemachine.features.Feature ----------------------------------- - -This describes the Python representation of a Feature. A Feature is a column of a Feature Set table with particular metadata. -A Feature is the smallest unit in the Feature Store, and each Feature within a Feature Set is individually tracked for changes -to enable full time travel and point-in-time consistent training datasets. Features' names are unique and case insensitive. -To see the full contents of your Feature, you can print, return, or .__dict__ your Feature object. - -.. automodule:: splicemachine.features.feature - :members: - :undoc-members: - :show-inheritance: - -splicemachine.features.training_view ----------------------------------- - -This describes the Python representation of a Training View. A Training View is a SQL statement defining an event of interest, and metadata around how to create a training dataset with that view. -To see the full contents of your Training View, you can print, return, or .__dict__ your Training View object. - -.. automodule:: splicemachine.features.training_view - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: splicemachine.features - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_build/html/_sources/splicemachine.mlflow_support.rst.txt b/docs/_build/html/_sources/splicemachine.mlflow_support.rst.txt deleted file mode 100644 index 5c03a60d..00000000 --- a/docs/_build/html/_sources/splicemachine.mlflow_support.rst.txt +++ /dev/null @@ -1,30 +0,0 @@ -splicemachine.mlflow_support package -=========================== - -Submodules ----------- -.. - .. automodule:: splicemachine.mlflow_support.mlflow_support - :members: - :undoc-members: - :show-inheritance: - - .. automodule:: splicemachine.mlflow_support.constants - :members: - :undoc-members: - :show-inheritance: - - .. automodule:: splicemachine.mlflow_support.utilities - :members: - :undoc-members: - :show-inheritance: - -splicemachine.mlflow_support.mlflow_support module ----------------------------------- - -This module contains the entrypoint to the Splice Machine managed mlflow environment - -.. automodule:: splicemachine.mlflow_support.mlflow_support - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_build/html/_sources/splicemachine.notebook.rst.txt b/docs/_build/html/_sources/splicemachine.notebook.rst.txt deleted file mode 100644 index 634a74f3..00000000 --- a/docs/_build/html/_sources/splicemachine.notebook.rst.txt +++ /dev/null @@ -1,9 +0,0 @@ -splicemachine.notebook module -===================================== - -This module contains helper functions and tools for use inside of a Jupyter Notebook - -.. automodule:: splicemachine.notebook - :members: - :no-private-members: - :no-undoc-members: diff --git a/docs/_build/html/_sources/splicemachine.rst.txt b/docs/_build/html/_sources/splicemachine.rst.txt deleted file mode 100644 index e2e5a6dc..00000000 --- a/docs/_build/html/_sources/splicemachine.rst.txt +++ /dev/null @@ -1,16 +0,0 @@ -Splicemachine package -===================== - -This package contains the classes and modules necessary for interacting with Splice Machine's MLManager workbench. Follow the documentation below to get started, or check out the Getting Started section for installation - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - splicemachine.spark - splicemachine.mlflow_support - splicemachine.features - splicemachine.notebook - splicemachine.stats diff --git a/docs/_build/html/_sources/splicemachine.spark.rst.txt b/docs/_build/html/_sources/splicemachine.spark.rst.txt deleted file mode 100644 index 1d9ef496..00000000 --- a/docs/_build/html/_sources/splicemachine.spark.rst.txt +++ /dev/null @@ -1,29 +0,0 @@ -splicemachine.spark package -=========================== - -Submodules ----------- -.. - .. automodule:: splicemachine.spark.constants - :members: - :undoc-members: - :show-inheritance: - -splicemachine.spark.context module ----------------------------------- - -This Module contains the classes for interacting with the Database via our NSDS. For installation instructions, please see the Getting Started guide. For use inside the K8s cluster, see `PySpliceContext <#splicemachine.spark.context.PySpliceContext>`_. For use outside of the K8s cluster, see `ExtPySpliceContext <#splicemachine.spark.context.ExtPySpliceContext>`_ - -.. automodule:: splicemachine.spark.context - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: splicemachine.spark - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/_build/html/_sources/splicemachine.stats.rst.txt b/docs/_build/html/_sources/splicemachine.stats.rst.txt deleted file mode 100644 index 44e2545b..00000000 --- a/docs/_build/html/_sources/splicemachine.stats.rst.txt +++ /dev/null @@ -1,10 +0,0 @@ -splicemachine.stats module -===================================== - -This module contains statistical functions to help with Machine Learning and data analysis. - -.. automodule:: splicemachine.stats - :members: - :no-private-members: - :no-undoc-members: - :no-inherited-members: \ No newline at end of file diff --git a/docs/_build/html/_static/__init__.py b/docs/_build/html/_static/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css deleted file mode 100644 index 616111c1..00000000 --- a/docs/_build/html/_static/basic.css +++ /dev/null @@ -1,855 +0,0 @@ -/* - * basic.css - * ~~~~~~~~~ - * - * Sphinx stylesheet -- basic theme. - * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/* -- main layout ----------------------------------------------------------- */ - -div.clearer { - clear: both; -} - -div.section::after { - display: block; - content: ''; - clear: left; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- sidebar --------------------------------------------------------------- */ - -div.sphinxsidebarwrapper { - padding: 10px 5px 0 10px; -} - -div.sphinxsidebar { - float: left; - width: 270px; - margin-left: -100%; - font-size: 90%; - word-wrap: break-word; - overflow-wrap : break-word; -} - -div.sphinxsidebar ul { - list-style: none; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar form { - margin-top: 10px; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - -div.sphinxsidebar #searchbox form.search { - overflow: hidden; -} - -div.sphinxsidebar #searchbox input[type="text"] { - float: left; - width: 80%; - padding: 0.25em; - box-sizing: border-box; -} - -div.sphinxsidebar #searchbox input[type="submit"] { - float: left; - width: 20%; - border-left: none; - padding: 0.25em; - box-sizing: border-box; -} - - -img { - border: 0; - max-width: 100%; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background-image: url(file.png); - background-repeat: no-repeat; - background-position: 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- index page ------------------------------------------------------------ */ - -table.contentstable { - width: 90%; - margin-left: auto; - margin-right: auto; -} - -table.contentstable p.biglink { - line-height: 150%; -} - -a.biglink { - font-size: 1.3em; -} - -span.linkdescr { - font-style: italic; - padding-top: 5px; - font-size: 90%; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable ul { - margin-top: 0; - margin-bottom: 0; - list-style-type: none; -} - -table.indextable > tbody > tr > td > ul { - padding-left: 0em; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- domain module index --------------------------------------------------- */ - -table.modindextable td { - padding: 2px; - border-collapse: collapse; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body { - min-width: 450px; - max-width: 800px; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - -moz-hyphens: auto; - -ms-hyphens: auto; - -webkit-hyphens: auto; - hyphens: auto; -} - -a.headerlink { - visibility: hidden; -} - -a.brackets:before, -span.brackets > a:before{ - content: "["; -} - -a.brackets:after, -span.brackets > a:after { - content: "]"; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink, -caption:hover > a.headerlink, -p.caption:hover > a.headerlink, -div.code-block-caption:hover > a.headerlink { - visibility: visible; -} - -div.body p.caption { - text-align: inherit; -} - -div.body td { - text-align: left; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -img.align-default, .figure.align-default { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-default { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- sidebars -------------------------------------------------------------- */ - -div.sidebar { - margin: 0 0 0.5em 1em; - border: 1px solid #ddb; - padding: 7px; - background-color: #ffe; - width: 40%; - float: right; - clear: right; - overflow-x: auto; -} - -p.sidebar-title { - font-weight: bold; -} - -div.admonition, div.topic, blockquote { - clear: left; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px; - margin: 10px 0 10px 0; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - margin-top: 10px; - margin-bottom: 10px; - padding: 7px; -} - -div.admonition dt { - font-weight: bold; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -/* -- content of sidebars/topics/admonitions -------------------------------- */ - -div.sidebar > :last-child, -div.topic > :last-child, -div.admonition > :last-child { - margin-bottom: 0; -} - -div.sidebar::after, -div.topic::after, -div.admonition::after, -blockquote::after { - display: block; - content: ''; - clear: both; -} - -/* -- tables ---------------------------------------------------------------- */ - -table.docutils { - margin-top: 10px; - margin-bottom: 10px; - border: 0; - border-collapse: collapse; -} - -table.align-center { - margin-left: auto; - margin-right: auto; -} - -table.align-default { - margin-left: auto; - margin-right: auto; -} - -table caption span.caption-number { - font-style: italic; -} - -table caption span.caption-text { -} - -table.docutils td, table.docutils th { - padding: 1px 8px 1px 5px; - border-top: 0; - border-left: 0; - border-right: 0; - border-bottom: 1px solid #aaa; -} - -table.footnote td, table.footnote th { - border: 0 !important; -} - -th { - text-align: left; - padding-right: 5px; -} - -table.citation { - border-left: solid 1px gray; - margin-left: 1px; -} - -table.citation td { - border-bottom: none; -} - -th > :first-child, -td > :first-child { - margin-top: 0px; -} - -th > :last-child, -td > :last-child { - margin-bottom: 0px; -} - -/* -- figures --------------------------------------------------------------- */ - -div.figure { - margin: 0.5em; - padding: 0.5em; -} - -div.figure p.caption { - padding: 0.3em; -} - -div.figure p.caption span.caption-number { - font-style: italic; -} - -div.figure p.caption span.caption-text { -} - -/* -- field list styles ----------------------------------------------------- */ - -table.field-list td, table.field-list th { - border: 0 !important; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.field-name { - -moz-hyphens: manual; - -ms-hyphens: manual; - -webkit-hyphens: manual; - hyphens: manual; -} - -/* -- hlist styles ---------------------------------------------------------- */ - -table.hlist { - margin: 1em 0; -} - -table.hlist td { - vertical-align: top; -} - - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -:not(li) > ol > li:first-child > :first-child, -:not(li) > ul > li:first-child > :first-child { - margin-top: 0px; -} - -:not(li) > ol > li:last-child > :last-child, -:not(li) > ul > li:last-child > :last-child { - margin-bottom: 0px; -} - -ol.simple ol p, -ol.simple ul p, -ul.simple ol p, -ul.simple ul p { - margin-top: 0; -} - -ol.simple > li:not(:first-child) > p, -ul.simple > li:not(:first-child) > p { - margin-top: 0; -} - -ol.simple p, -ul.simple p { - margin-bottom: 0; -} - -dl.footnote > dt, -dl.citation > dt { - float: left; - margin-right: 0.5em; -} - -dl.footnote > dd, -dl.citation > dd { - margin-bottom: 0em; -} - -dl.footnote > dd:after, -dl.citation > dd:after { - content: ""; - clear: both; -} - -dl.field-list { - display: grid; - grid-template-columns: fit-content(30%) auto; -} - -dl.field-list > dt { - font-weight: bold; - word-break: break-word; - padding-left: 0.5em; - padding-right: 5px; -} - -dl.field-list > dt:after { - content: ":"; -} - -dl.field-list > dd { - padding-left: 0.5em; - margin-top: 0em; - margin-left: 0em; - margin-bottom: 0em; -} - -dl { - margin-bottom: 15px; -} - -dd > :first-child { - margin-top: 0px; -} - -dd ul, dd table { - margin-bottom: 10px; -} - -dd { - margin-top: 3px; - margin-bottom: 10px; - margin-left: 30px; -} - -dl > dd:last-child, -dl > dd:last-child > :last-child { - margin-bottom: 0; -} - -dt:target, span.highlighted { - background-color: #fbe54e; -} - -rect.highlighted { - fill: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.optional { - font-size: 1.3em; -} - -.sig-paren { - font-size: larger; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -.classifier:before { - font-style: normal; - margin: 0.5em; - content: ":"; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -/* -- code displays --------------------------------------------------------- */ - -pre { - overflow: auto; - overflow-y: hidden; /* fixes display issues on Chrome browsers */ -} - -pre, div[class*="highlight-"] { - clear: both; -} - -span.pre { - -moz-hyphens: none; - -ms-hyphens: none; - -webkit-hyphens: none; - hyphens: none; -} - -div[class*="highlight-"] { - margin: 1em 0; -} - -td.linenos pre { - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - display: block; -} - -table.highlighttable tbody { - display: block; -} - -table.highlighttable tr { - display: flex; -} - -table.highlighttable td { - margin: 0; - padding: 0; -} - -table.highlighttable td.linenos { - padding-right: 0.5em; -} - -table.highlighttable td.code { - flex: 1; - overflow: hidden; -} - -.highlight .hll { - display: block; -} - -div.highlight pre, -table.highlighttable pre { - margin: 0; -} - -div.code-block-caption + div { - margin-top: 0; -} - -div.code-block-caption { - margin-top: 1em; - padding: 2px 5px; - font-size: small; -} - -div.code-block-caption code { - background-color: transparent; -} - -table.highlighttable td.linenos, -div.doctest > div.highlight span.gp { /* gp: Generic.Prompt */ - user-select: none; -} - -div.code-block-caption span.caption-number { - padding: 0.1em 0.3em; - font-style: italic; -} - -div.code-block-caption span.caption-text { -} - -div.literal-block-wrapper { - margin: 1em 0; -} - -code.descname { - background-color: transparent; - font-weight: bold; - font-size: 1.2em; -} - -code.descclassname { - background-color: transparent; -} - -code.xref, a code { - background-color: transparent; - font-weight: bold; -} - -h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -span.eqno a.headerlink { - position: absolute; - z-index: 1; -} - -div.math:hover a.headerlink { - visibility: visible; -} - -/* -- printout stylesheet --------------------------------------------------- */ - -@media print { - div.document, - div.documentwrapper, - div.bodywrapper { - margin: 0 !important; - width: 100%; - } - - div.sphinxsidebar, - div.related, - div.footer, - #top-link { - display: none; - } -} \ No newline at end of file diff --git a/docs/_build/html/_static/classic.css b/docs/_build/html/_static/classic.css deleted file mode 100644 index cceb67ac..00000000 --- a/docs/_build/html/_static/classic.css +++ /dev/null @@ -1,266 +0,0 @@ -/* - * classic.css_t - * ~~~~~~~~~~~~~ - * - * Sphinx stylesheet -- classic theme. - * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -@import url("basic.css"); - -/* -- page layout ----------------------------------------------------------- */ - -html { - /* CSS hack for macOS's scrollbar (see #1125) */ - background-color: #FFFFFF; -} - -body { - font-family: sans-serif; - font-size: 100%; - background-color: #11303d; - color: #000; - margin: 0; - padding: 0; -} - -div.document { - background-color: #1c4e63; -} - -div.documentwrapper { - float: left; - width: 100%; -} - -div.bodywrapper { - margin: 0 0 0 230px; -} - -div.body { - background-color: #ffffff; - color: #000000; - padding: 0 20px 30px 20px; -} - -div.footer { - color: #ffffff; - width: 100%; - padding: 9px 0 9px 0; - text-align: center; - font-size: 75%; -} - -div.footer a { - color: #ffffff; - text-decoration: underline; -} - -div.related { - background-color: #133f52; - line-height: 30px; - color: #ffffff; -} - -div.related a { - color: #ffffff; -} - -div.sphinxsidebar { -} - -div.sphinxsidebar h3 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.4em; - font-weight: normal; - margin: 0; - padding: 0; -} - -div.sphinxsidebar h3 a { - color: #ffffff; -} - -div.sphinxsidebar h4 { - font-family: 'Trebuchet MS', sans-serif; - color: #ffffff; - font-size: 1.3em; - font-weight: normal; - margin: 5px 0 0 0; - padding: 0; -} - -div.sphinxsidebar p { - color: #ffffff; -} - -div.sphinxsidebar p.topless { - margin: 5px 10px 10px 10px; -} - -div.sphinxsidebar ul { - margin: 10px; - padding: 0; - color: #ffffff; -} - -div.sphinxsidebar a { - color: #98dbcc; -} - -div.sphinxsidebar input { - border: 1px solid #98dbcc; - font-family: sans-serif; - font-size: 1em; -} - - - -/* -- hyperlink styles ------------------------------------------------------ */ - -a { - color: #355f7c; - text-decoration: none; -} - -a:visited { - color: #355f7c; - text-decoration: none; -} - -a:hover { - text-decoration: underline; -} - - - -/* -- body styles ----------------------------------------------------------- */ - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: 'Trebuchet MS', sans-serif; - background-color: #f2f2f2; - font-weight: normal; - color: #20435c; - border-bottom: 1px solid #ccc; - margin: 20px -20px 10px -20px; - padding: 3px 0 3px 10px; -} - -div.body h1 { margin-top: 0; font-size: 200%; } -div.body h2 { font-size: 160%; } -div.body h3 { font-size: 140%; } -div.body h4 { font-size: 120%; } -div.body h5 { font-size: 110%; } -div.body h6 { font-size: 100%; } - -a.headerlink { - color: #c60f0f; - font-size: 0.8em; - padding: 0 4px 0 4px; - text-decoration: none; -} - -a.headerlink:hover { - background-color: #c60f0f; - color: white; -} - -div.body p, div.body dd, div.body li, div.body blockquote { - text-align: justify; - line-height: 130%; -} - -div.admonition p.admonition-title + p { - display: inline; -} - -div.admonition p { - margin-bottom: 5px; -} - -div.admonition pre { - margin-bottom: 5px; -} - -div.admonition ul, div.admonition ol { - margin-bottom: 5px; -} - -div.note { - background-color: #eee; - border: 1px solid #ccc; -} - -div.seealso { - background-color: #ffc; - border: 1px solid #ff6; -} - -div.topic { - background-color: #eee; -} - -div.warning { - background-color: #ffe4e4; - border: 1px solid #f66; -} - -p.admonition-title { - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -pre { - padding: 5px; - background-color: unset; - color: unset; - line-height: 120%; - border: 1px solid #ac9; - border-left: none; - border-right: none; -} - -code { - background-color: #ecf0f3; - padding: 0 1px 0 1px; - font-size: 0.95em; -} - -th, dl.field-list > dt { - background-color: #ede; -} - -.warning code { - background: #efc2c2; -} - -.note code { - background: #d6d6d6; -} - -.viewcode-back { - font-family: sans-serif; -} - -div.viewcode-block:target { - background-color: #f4debf; - border-top: 1px solid #ac9; - border-bottom: 1px solid #ac9; -} - -div.code-block-caption { - color: #efefef; - background-color: #1c4e63; -} \ No newline at end of file diff --git a/docs/_build/html/_static/css/badge_only.css b/docs/_build/html/_static/css/badge_only.css deleted file mode 100644 index e380325b..00000000 --- a/docs/_build/html/_static/css/badge_only.css +++ /dev/null @@ -1 +0,0 @@ -.fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff deleted file mode 100644 index 6cb60000..00000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 deleted file mode 100644 index 7059e231..00000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff deleted file mode 100644 index f815f63f..00000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 b/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 deleted file mode 100644 index f2c76e5b..00000000 Binary files a/docs/_build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca9..00000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg deleted file mode 100644 index 855c845e..00000000 --- a/docs/_build/html/_static/css/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,2671 +0,0 @@ - - - - -Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 - By ,,, -Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2f..00000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff deleted file mode 100644 index 400014a4..00000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc60..00000000 Binary files a/docs/_build/html/_static/css/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff deleted file mode 100644 index 88ad05b9..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 deleted file mode 100644 index c4e3d804..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff b/docs/_build/html/_static/css/fonts/lato-bold.woff deleted file mode 100644 index c6dff51f..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-bold.woff2 b/docs/_build/html/_static/css/fonts/lato-bold.woff2 deleted file mode 100644 index bb195043..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff deleted file mode 100644 index 76114bc0..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 b/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 deleted file mode 100644 index 3404f37e..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff b/docs/_build/html/_static/css/fonts/lato-normal.woff deleted file mode 100644 index ae1307ff..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal.woff and /dev/null differ diff --git a/docs/_build/html/_static/css/fonts/lato-normal.woff2 b/docs/_build/html/_static/css/fonts/lato-normal.woff2 deleted file mode 100644 index 3bf98433..00000000 Binary files a/docs/_build/html/_static/css/fonts/lato-normal.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/css/index.c5995385ac14fb8791e8eb36b4908be2.css b/docs/_build/html/_static/css/index.c5995385ac14fb8791e8eb36b4908be2.css deleted file mode 100644 index 655656db..00000000 --- a/docs/_build/html/_static/css/index.c5995385ac14fb8791e8eb36b4908be2.css +++ /dev/null @@ -1,6 +0,0 @@ -/*! - * Bootstrap v4.5.0 (https://getbootstrap.com/) - * Copyright 2011-2020 The Bootstrap Authors - * Copyright 2011-2020 Twitter, Inc. - * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) - */:root{--blue:#007bff;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#dc3545;--orange:#fd7e14;--yellow:#ffc107;--green:#28a745;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#6c757d;--gray-dark:#343a40;--primary:#007bff;--secondary:#6c757d;--success:#28a745;--info:#17a2b8;--warning:#ffc107;--danger:#dc3545;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:540px;--breakpoint-md:720px;--breakpoint-lg:960px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--font-family-monospace:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace}*,:after,:before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-webkit-tap-highlight-color:rgba(0,0,0,0)}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-size:1rem;line-height:1.5;color:#212529;text-align:left}[tabindex="-1"]:focus:not(:focus-visible){outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;text-decoration:underline dotted;cursor:help;border-bottom:0;text-decoration-skip-ink:none}address{font-style:normal;line-height:inherit}address,dl,ol,ul{margin-bottom:1rem}dl,ol,ul{margin-top:0}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;background-color:transparent}a:hover{color:#0056b3}a:not([href]),a:not([href]):hover{color:inherit;text-decoration:none}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto;-ms-overflow-style:scrollbar}figure{margin:0 0 1rem}img{border-style:none}img,svg{vertical-align:middle}svg{overflow:hidden}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}[role=button]{cursor:pointer}select{word-wrap:normal}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[type=button]:not(:disabled),[type=reset]:not(:disabled),[type=submit]:not(:disabled),button:not(:disabled){cursor:pointer}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important}.h1,.h2,.h3,.h4,.h5,.h6,h1,h2,h3,h4,h5,h6{margin-bottom:.5rem;font-weight:500;line-height:1.2}.h1,h1{font-size:2.5rem}.h2,h2{font-size:2rem}.h3,h3{font-size:1.75rem}.h4,h4{font-size:1.5rem}.h5,h5{font-size:1.25rem}.h6,h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem}.display-1,.display-2{font-weight:300;line-height:1.2}.display-2{font-size:5.5rem}.display-3{font-size:4.5rem}.display-3,.display-4{font-weight:300;line-height:1.2}.display-4{font-size:3.5rem}hr{margin-top:1rem;margin-bottom:1rem;border-top:1px solid rgba(0,0,0,.1)}.small,small{font-size:80%;font-weight:400}.mark,mark{padding:.2em;background-color:#fcf8e3}.list-inline,.list-unstyled{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#6c757d}.blockquote-footer:before{content:"\2014\00A0"}.img-fluid,.img-thumbnail{max-width:100%;height:auto}.img-thumbnail{padding:.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:.25rem}.figure{display:inline-block}.figure-img{margin-bottom:.5rem;line-height:1}.figure-caption{font-size:90%;color:#6c757d}code{font-size:87.5%;color:#e83e8c;word-wrap:break-word}a>code{color:inherit}kbd{padding:.2rem .4rem;font-size:87.5%;color:#fff;background-color:#212529;border-radius:.2rem}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#212529}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:540px){.container{max-width:540px}}@media (min-width:720px){.container{max-width:720px}}@media (min-width:960px){.container{max-width:960px}}@media (min-width:1200px){.container{max-width:1400px}}.container-fluid,.container-lg,.container-md,.container-sm,.container-xl{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:540px){.container,.container-sm{max-width:540px}}@media (min-width:720px){.container,.container-md,.container-sm{max-width:720px}}@media (min-width:960px){.container,.container-lg,.container-md,.container-sm{max-width:960px}}@media (min-width:1200px){.container,.container-lg,.container-md,.container-sm,.container-xl{max-width:1400px}}.row{display:flex;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*=col-]{padding-right:0;padding-left:0}.col,.col-1,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-10,.col-11,.col-12,.col-auto,.col-lg,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-auto,.col-md,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-md-auto,.col-sm,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-auto,.col-xl,.col-xl-1,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-auto{position:relative;width:100%;padding-right:15px;padding-left:15px}.col{flex-basis:0;flex-grow:1;min-width:0;max-width:100%}.row-cols-1>*{flex:0 0 100%;max-width:100%}.row-cols-2>*{flex:0 0 50%;max-width:50%}.row-cols-3>*{flex:0 0 33.33333%;max-width:33.33333%}.row-cols-4>*{flex:0 0 25%;max-width:25%}.row-cols-5>*{flex:0 0 20%;max-width:20%}.row-cols-6>*{flex:0 0 16.66667%;max-width:16.66667%}.col-auto{flex:0 0 auto;width:auto;max-width:100%}.col-1{flex:0 0 8.33333%;max-width:8.33333%}.col-2{flex:0 0 16.66667%;max-width:16.66667%}.col-3{flex:0 0 25%;max-width:25%}.col-4{flex:0 0 33.33333%;max-width:33.33333%}.col-5{flex:0 0 41.66667%;max-width:41.66667%}.col-6{flex:0 0 50%;max-width:50%}.col-7{flex:0 0 58.33333%;max-width:58.33333%}.col-8{flex:0 0 66.66667%;max-width:66.66667%}.col-9{flex:0 0 75%;max-width:75%}.col-10{flex:0 0 83.33333%;max-width:83.33333%}.col-11{flex:0 0 91.66667%;max-width:91.66667%}.col-12{flex:0 0 100%;max-width:100%}.order-first{order:-1}.order-last{order:13}.order-0{order:0}.order-1{order:1}.order-2{order:2}.order-3{order:3}.order-4{order:4}.order-5{order:5}.order-6{order:6}.order-7{order:7}.order-8{order:8}.order-9{order:9}.order-10{order:10}.order-11{order:11}.order-12{order:12}.offset-1{margin-left:8.33333%}.offset-2{margin-left:16.66667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.33333%}.offset-5{margin-left:41.66667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.33333%}.offset-8{margin-left:66.66667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.33333%}.offset-11{margin-left:91.66667%}@media (min-width:540px){.col-sm{flex-basis:0;flex-grow:1;min-width:0;max-width:100%}.row-cols-sm-1>*{flex:0 0 100%;max-width:100%}.row-cols-sm-2>*{flex:0 0 50%;max-width:50%}.row-cols-sm-3>*{flex:0 0 33.33333%;max-width:33.33333%}.row-cols-sm-4>*{flex:0 0 25%;max-width:25%}.row-cols-sm-5>*{flex:0 0 20%;max-width:20%}.row-cols-sm-6>*{flex:0 0 16.66667%;max-width:16.66667%}.col-sm-auto{flex:0 0 auto;width:auto;max-width:100%}.col-sm-1{flex:0 0 8.33333%;max-width:8.33333%}.col-sm-2{flex:0 0 16.66667%;max-width:16.66667%}.col-sm-3{flex:0 0 25%;max-width:25%}.col-sm-4{flex:0 0 33.33333%;max-width:33.33333%}.col-sm-5{flex:0 0 41.66667%;max-width:41.66667%}.col-sm-6{flex:0 0 50%;max-width:50%}.col-sm-7{flex:0 0 58.33333%;max-width:58.33333%}.col-sm-8{flex:0 0 66.66667%;max-width:66.66667%}.col-sm-9{flex:0 0 75%;max-width:75%}.col-sm-10{flex:0 0 83.33333%;max-width:83.33333%}.col-sm-11{flex:0 0 91.66667%;max-width:91.66667%}.col-sm-12{flex:0 0 100%;max-width:100%}.order-sm-first{order:-1}.order-sm-last{order:13}.order-sm-0{order:0}.order-sm-1{order:1}.order-sm-2{order:2}.order-sm-3{order:3}.order-sm-4{order:4}.order-sm-5{order:5}.order-sm-6{order:6}.order-sm-7{order:7}.order-sm-8{order:8}.order-sm-9{order:9}.order-sm-10{order:10}.order-sm-11{order:11}.order-sm-12{order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.33333%}.offset-sm-2{margin-left:16.66667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.33333%}.offset-sm-5{margin-left:41.66667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.33333%}.offset-sm-8{margin-left:66.66667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.33333%}.offset-sm-11{margin-left:91.66667%}}@media (min-width:720px){.col-md{flex-basis:0;flex-grow:1;min-width:0;max-width:100%}.row-cols-md-1>*{flex:0 0 100%;max-width:100%}.row-cols-md-2>*{flex:0 0 50%;max-width:50%}.row-cols-md-3>*{flex:0 0 33.33333%;max-width:33.33333%}.row-cols-md-4>*{flex:0 0 25%;max-width:25%}.row-cols-md-5>*{flex:0 0 20%;max-width:20%}.row-cols-md-6>*{flex:0 0 16.66667%;max-width:16.66667%}.col-md-auto{flex:0 0 auto;width:auto;max-width:100%}.col-md-1{flex:0 0 8.33333%;max-width:8.33333%}.col-md-2{flex:0 0 16.66667%;max-width:16.66667%}.col-md-3{flex:0 0 25%;max-width:25%}.col-md-4{flex:0 0 33.33333%;max-width:33.33333%}.col-md-5{flex:0 0 41.66667%;max-width:41.66667%}.col-md-6{flex:0 0 50%;max-width:50%}.col-md-7{flex:0 0 58.33333%;max-width:58.33333%}.col-md-8{flex:0 0 66.66667%;max-width:66.66667%}.col-md-9{flex:0 0 75%;max-width:75%}.col-md-10{flex:0 0 83.33333%;max-width:83.33333%}.col-md-11{flex:0 0 91.66667%;max-width:91.66667%}.col-md-12{flex:0 0 100%;max-width:100%}.order-md-first{order:-1}.order-md-last{order:13}.order-md-0{order:0}.order-md-1{order:1}.order-md-2{order:2}.order-md-3{order:3}.order-md-4{order:4}.order-md-5{order:5}.order-md-6{order:6}.order-md-7{order:7}.order-md-8{order:8}.order-md-9{order:9}.order-md-10{order:10}.order-md-11{order:11}.order-md-12{order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.33333%}.offset-md-2{margin-left:16.66667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.33333%}.offset-md-5{margin-left:41.66667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.33333%}.offset-md-8{margin-left:66.66667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.33333%}.offset-md-11{margin-left:91.66667%}}@media (min-width:960px){.col-lg{flex-basis:0;flex-grow:1;min-width:0;max-width:100%}.row-cols-lg-1>*{flex:0 0 100%;max-width:100%}.row-cols-lg-2>*{flex:0 0 50%;max-width:50%}.row-cols-lg-3>*{flex:0 0 33.33333%;max-width:33.33333%}.row-cols-lg-4>*{flex:0 0 25%;max-width:25%}.row-cols-lg-5>*{flex:0 0 20%;max-width:20%}.row-cols-lg-6>*{flex:0 0 16.66667%;max-width:16.66667%}.col-lg-auto{flex:0 0 auto;width:auto;max-width:100%}.col-lg-1{flex:0 0 8.33333%;max-width:8.33333%}.col-lg-2{flex:0 0 16.66667%;max-width:16.66667%}.col-lg-3{flex:0 0 25%;max-width:25%}.col-lg-4{flex:0 0 33.33333%;max-width:33.33333%}.col-lg-5{flex:0 0 41.66667%;max-width:41.66667%}.col-lg-6{flex:0 0 50%;max-width:50%}.col-lg-7{flex:0 0 58.33333%;max-width:58.33333%}.col-lg-8{flex:0 0 66.66667%;max-width:66.66667%}.col-lg-9{flex:0 0 75%;max-width:75%}.col-lg-10{flex:0 0 83.33333%;max-width:83.33333%}.col-lg-11{flex:0 0 91.66667%;max-width:91.66667%}.col-lg-12{flex:0 0 100%;max-width:100%}.order-lg-first{order:-1}.order-lg-last{order:13}.order-lg-0{order:0}.order-lg-1{order:1}.order-lg-2{order:2}.order-lg-3{order:3}.order-lg-4{order:4}.order-lg-5{order:5}.order-lg-6{order:6}.order-lg-7{order:7}.order-lg-8{order:8}.order-lg-9{order:9}.order-lg-10{order:10}.order-lg-11{order:11}.order-lg-12{order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.33333%}.offset-lg-2{margin-left:16.66667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.33333%}.offset-lg-5{margin-left:41.66667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.33333%}.offset-lg-8{margin-left:66.66667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.33333%}.offset-lg-11{margin-left:91.66667%}}@media (min-width:1200px){.col-xl{flex-basis:0;flex-grow:1;min-width:0;max-width:100%}.row-cols-xl-1>*{flex:0 0 100%;max-width:100%}.row-cols-xl-2>*{flex:0 0 50%;max-width:50%}.row-cols-xl-3>*{flex:0 0 33.33333%;max-width:33.33333%}.row-cols-xl-4>*{flex:0 0 25%;max-width:25%}.row-cols-xl-5>*{flex:0 0 20%;max-width:20%}.row-cols-xl-6>*{flex:0 0 16.66667%;max-width:16.66667%}.col-xl-auto{flex:0 0 auto;width:auto;max-width:100%}.col-xl-1{flex:0 0 8.33333%;max-width:8.33333%}.col-xl-2{flex:0 0 16.66667%;max-width:16.66667%}.col-xl-3{flex:0 0 25%;max-width:25%}.col-xl-4{flex:0 0 33.33333%;max-width:33.33333%}.col-xl-5{flex:0 0 41.66667%;max-width:41.66667%}.col-xl-6{flex:0 0 50%;max-width:50%}.col-xl-7{flex:0 0 58.33333%;max-width:58.33333%}.col-xl-8{flex:0 0 66.66667%;max-width:66.66667%}.col-xl-9{flex:0 0 75%;max-width:75%}.col-xl-10{flex:0 0 83.33333%;max-width:83.33333%}.col-xl-11{flex:0 0 91.66667%;max-width:91.66667%}.col-xl-12{flex:0 0 100%;max-width:100%}.order-xl-first{order:-1}.order-xl-last{order:13}.order-xl-0{order:0}.order-xl-1{order:1}.order-xl-2{order:2}.order-xl-3{order:3}.order-xl-4{order:4}.order-xl-5{order:5}.order-xl-6{order:6}.order-xl-7{order:7}.order-xl-8{order:8}.order-xl-9{order:9}.order-xl-10{order:10}.order-xl-11{order:11}.order-xl-12{order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.33333%}.offset-xl-2{margin-left:16.66667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.33333%}.offset-xl-5{margin-left:41.66667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.33333%}.offset-xl-8{margin-left:66.66667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.33333%}.offset-xl-11{margin-left:91.66667%}}.table{width:100%;margin-bottom:1rem;color:#212529}.table td,.table th{padding:.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table thead th{vertical-align:bottom;border-bottom:2px solid #dee2e6}.table tbody+tbody{border-top:2px solid #dee2e6}.table-sm td,.table-sm th{padding:.3rem}.table-bordered,.table-bordered td,.table-bordered th{border:1px solid #dee2e6}.table-bordered thead td,.table-bordered thead th{border-bottom-width:2px}.table-borderless tbody+tbody,.table-borderless td,.table-borderless th,.table-borderless thead th{border:0}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(0,0,0,.05)}.table-hover tbody tr:hover{color:#212529;background-color:rgba(0,0,0,.075)}.table-primary,.table-primary>td,.table-primary>th{background-color:#b8daff}.table-primary tbody+tbody,.table-primary td,.table-primary th,.table-primary thead th{border-color:#7abaff}.table-hover .table-primary:hover,.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#9fcdff}.table-secondary,.table-secondary>td,.table-secondary>th{background-color:#d6d8db}.table-secondary tbody+tbody,.table-secondary td,.table-secondary th,.table-secondary thead th{border-color:#b3b7bb}.table-hover .table-secondary:hover,.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#c8cbcf}.table-success,.table-success>td,.table-success>th{background-color:#c3e6cb}.table-success tbody+tbody,.table-success td,.table-success th,.table-success thead th{border-color:#8fd19e}.table-hover .table-success:hover,.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b1dfbb}.table-info,.table-info>td,.table-info>th{background-color:#bee5eb}.table-info tbody+tbody,.table-info td,.table-info th,.table-info thead th{border-color:#86cfda}.table-hover .table-info:hover,.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>td,.table-warning>th{background-color:#ffeeba}.table-warning tbody+tbody,.table-warning td,.table-warning th,.table-warning thead th{border-color:#ffdf7e}.table-hover .table-warning:hover,.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#ffe8a1}.table-danger,.table-danger>td,.table-danger>th{background-color:#f5c6cb}.table-danger tbody+tbody,.table-danger td,.table-danger th,.table-danger thead th{border-color:#ed969e}.table-hover .table-danger:hover,.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f1b0b7}.table-light,.table-light>td,.table-light>th{background-color:#fdfdfe}.table-light tbody+tbody,.table-light td,.table-light th,.table-light thead th{border-color:#fbfcfc}.table-hover .table-light:hover,.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#ececf6}.table-dark,.table-dark>td,.table-dark>th{background-color:#c6c8ca}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#95999c}.table-hover .table-dark:hover,.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#b9bbbe}.table-active,.table-active>td,.table-active>th,.table-hover .table-active:hover,.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(0,0,0,.075)}.table .thead-dark th{color:#fff;background-color:#343a40;border-color:#454d55}.table .thead-light th{color:#495057;background-color:#e9ecef;border-color:#dee2e6}.table-dark{color:#fff;background-color:#343a40}.table-dark td,.table-dark th,.table-dark thead th{border-color:#454d55}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:hsla(0,0%,100%,.05)}.table-dark.table-hover tbody tr:hover{color:#fff;background-color:hsla(0,0%,100%,.075)}@media (max-width:539.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-sm>.table-bordered{border:0}}@media (max-width:719.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-md>.table-bordered{border:0}}@media (max-width:959.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-lg>.table-bordered{border:0}}@media (max-width:1199.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:.25rem;transition:border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.form-control{transition:none}}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:-moz-focusring{color:transparent;text-shadow:0 0 0 #495057}.form-control:focus{color:#495057;background-color:#fff;border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.form-control::placeholder{color:#6c757d;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}input[type=date].form-control,input[type=datetime-local].form-control,input[type=month].form-control,input[type=time].form-control{appearance:none}select.form-control:focus::-ms-value{color:#495057;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(.375rem + 1px);padding-bottom:calc(.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(.5rem + 1px);padding-bottom:calc(.5rem + 1px);font-size:1.25rem;line-height:1.5}.col-form-label-sm{padding-top:calc(.25rem + 1px);padding-bottom:calc(.25rem + 1px);font-size:.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding:.375rem 0;margin-bottom:0;font-size:1rem;line-height:1.5;color:#212529;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-lg,.form-control-plaintext.form-control-sm{padding-right:0;padding-left:0}.form-control-sm{height:calc(1.5em + .5rem + 2px);padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.form-control-lg{height:calc(1.5em + 1rem + 2px);padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}select.form-control[multiple],select.form-control[size],textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:.25rem}.form-row{display:flex;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*=col-]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:.3rem;margin-left:-1.25rem}.form-check-input:disabled~.form-check-label,.form-check-input[disabled]~.form-check-label{color:#6c757d}.form-check-label{margin-bottom:0}.form-check-inline{display:inline-flex;align-items:center;padding-left:0;margin-right:.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#28a745}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(40,167,69,.9);border-radius:.25rem}.is-valid~.valid-feedback,.is-valid~.valid-tooltip,.was-validated :valid~.valid-feedback,.was-validated :valid~.valid-tooltip{display:block}.form-control.is-valid,.was-validated .form-control:valid{border-color:#28a745;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-valid:focus,.was-validated .form-control:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.was-validated textarea.form-control:valid,textarea.form-control.is-valid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.custom-select.is-valid,.was-validated .custom-select:valid{border-color:#28a745;padding-right:calc(.75em + 2.3125rem);background:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%2328a745' d='M2.3 6.73L.6 4.53c-.4-1.04.46-1.4 1.1-.8l1.1 1.4 3.4-3.8c.6-.63 1.6-.27 1.2.7l-4 4.6c-.43.5-.8.4-1.1.1z'/%3E%3C/svg%3E") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.custom-select.is-valid:focus,.was-validated .custom-select:valid:focus{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.form-check-input.is-valid~.form-check-label,.was-validated .form-check-input:valid~.form-check-label{color:#28a745}.form-check-input.is-valid~.valid-feedback,.form-check-input.is-valid~.valid-tooltip,.was-validated .form-check-input:valid~.valid-feedback,.was-validated .form-check-input:valid~.valid-tooltip{display:block}.custom-control-input.is-valid~.custom-control-label,.was-validated .custom-control-input:valid~.custom-control-label{color:#28a745}.custom-control-input.is-valid~.custom-control-label:before,.was-validated .custom-control-input:valid~.custom-control-label:before{border-color:#28a745}.custom-control-input.is-valid:checked~.custom-control-label:before,.was-validated .custom-control-input:valid:checked~.custom-control-label:before{border-color:#34ce57;background-color:#34ce57}.custom-control-input.is-valid:focus~.custom-control-label:before,.was-validated .custom-control-input:valid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.custom-control-input.is-valid:focus:not(:checked)~.custom-control-label:before,.custom-file-input.is-valid~.custom-file-label,.was-validated .custom-control-input:valid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-file-input:valid~.custom-file-label{border-color:#28a745}.custom-file-input.is-valid:focus~.custom-file-label,.was-validated .custom-file-input:valid:focus~.custom-file-label{border-color:#28a745;box-shadow:0 0 0 .2rem rgba(40,167,69,.25)}.invalid-feedback{display:none;width:100%;margin-top:.25rem;font-size:80%;color:#dc3545}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:.25rem .5rem;margin-top:.1rem;font-size:.875rem;line-height:1.5;color:#fff;background-color:rgba(220,53,69,.9);border-radius:.25rem}.is-invalid~.invalid-feedback,.is-invalid~.invalid-tooltip,.was-validated :invalid~.invalid-feedback,.was-validated :invalid~.invalid-tooltip{display:block}.form-control.is-invalid,.was-validated .form-control:invalid{border-color:#dc3545;padding-right:calc(1.5em + .75rem);background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E");background-repeat:no-repeat;background-position:right calc(.375em + .1875rem) center;background-size:calc(.75em + .375rem) calc(.75em + .375rem)}.form-control.is-invalid:focus,.was-validated .form-control:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.was-validated textarea.form-control:invalid,textarea.form-control.is-invalid{padding-right:calc(1.5em + .75rem);background-position:top calc(.375em + .1875rem) right calc(.375em + .1875rem)}.custom-select.is-invalid,.was-validated .custom-select:invalid{border-color:#dc3545;padding-right:calc(.75em + 2.3125rem);background:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px,url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' fill='none' stroke='%23dc3545'%3E%3Ccircle cx='6' cy='6' r='4.5'/%3E%3Cpath stroke-linejoin='round' d='M5.8 3.6h.4L6 6.5z'/%3E%3Ccircle cx='6' cy='8.2' r='.6' fill='%23dc3545' stroke='none'/%3E%3C/svg%3E") #fff no-repeat center right 1.75rem/calc(.75em + .375rem) calc(.75em + .375rem)}.custom-select.is-invalid:focus,.was-validated .custom-select:invalid:focus{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.form-check-input.is-invalid~.form-check-label,.was-validated .form-check-input:invalid~.form-check-label{color:#dc3545}.form-check-input.is-invalid~.invalid-feedback,.form-check-input.is-invalid~.invalid-tooltip,.was-validated .form-check-input:invalid~.invalid-feedback,.was-validated .form-check-input:invalid~.invalid-tooltip{display:block}.custom-control-input.is-invalid~.custom-control-label,.was-validated .custom-control-input:invalid~.custom-control-label{color:#dc3545}.custom-control-input.is-invalid~.custom-control-label:before,.was-validated .custom-control-input:invalid~.custom-control-label:before{border-color:#dc3545}.custom-control-input.is-invalid:checked~.custom-control-label:before,.was-validated .custom-control-input:invalid:checked~.custom-control-label:before{border-color:#e4606d;background-color:#e4606d}.custom-control-input.is-invalid:focus~.custom-control-label:before,.was-validated .custom-control-input:invalid:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.custom-control-input.is-invalid:focus:not(:checked)~.custom-control-label:before,.custom-file-input.is-invalid~.custom-file-label,.was-validated .custom-control-input:invalid:focus:not(:checked)~.custom-control-label:before,.was-validated .custom-file-input:invalid~.custom-file-label{border-color:#dc3545}.custom-file-input.is-invalid:focus~.custom-file-label,.was-validated .custom-file-input:invalid:focus~.custom-file-label{border-color:#dc3545;box-shadow:0 0 0 .2rem rgba(220,53,69,.25)}.form-inline{display:flex;flex-flow:row wrap;align-items:center}.form-inline .form-check{width:100%}@media (min-width:540px){.form-inline label{justify-content:center}.form-inline .form-group,.form-inline label{display:flex;align-items:center;margin-bottom:0}.form-inline .form-group{flex:0 0 auto;flex-flow:row wrap}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .custom-select,.form-inline .input-group{width:auto}.form-inline .form-check{display:flex;align-items:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;flex-shrink:0;margin-top:0;margin-right:.25rem;margin-left:0}.form-inline .custom-control{align-items:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-weight:400;color:#212529;text-align:center;vertical-align:middle;user-select:none;background-color:transparent;border:1px solid transparent;padding:.375rem .75rem;font-size:1rem;line-height:1.5;border-radius:.25rem;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.btn{transition:none}}.btn:hover{color:#212529;text-decoration:none}.btn.focus,.btn:focus{outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.btn.disabled,.btn:disabled{opacity:.65}.btn:not(:disabled):not(.disabled){cursor:pointer}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary.focus,.btn-primary:focus,.btn-primary:hover{color:#fff;background-color:#0069d9;border-color:#0062cc}.btn-primary.focus,.btn-primary:focus{box-shadow:0 0 0 .2rem rgba(38,143,255,.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#007bff;border-color:#007bff}.btn-primary:not(:disabled):not(.disabled).active,.btn-primary:not(:disabled):not(.disabled):active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#0062cc;border-color:#005cbf}.btn-primary:not(:disabled):not(.disabled).active:focus,.btn-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(38,143,255,.5)}.btn-secondary{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary.focus,.btn-secondary:focus,.btn-secondary:hover{color:#fff;background-color:#5a6268;border-color:#545b62}.btn-secondary.focus,.btn-secondary:focus{box-shadow:0 0 0 .2rem rgba(130,138,145,.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-secondary:not(:disabled):not(.disabled).active,.btn-secondary:not(:disabled):not(.disabled):active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#545b62;border-color:#4e555b}.btn-secondary:not(:disabled):not(.disabled).active:focus,.btn-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(130,138,145,.5)}.btn-success{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success.focus,.btn-success:focus,.btn-success:hover{color:#fff;background-color:#218838;border-color:#1e7e34}.btn-success.focus,.btn-success:focus{box-shadow:0 0 0 .2rem rgba(72,180,97,.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#28a745;border-color:#28a745}.btn-success:not(:disabled):not(.disabled).active,.btn-success:not(:disabled):not(.disabled):active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#1e7e34;border-color:#1c7430}.btn-success:not(:disabled):not(.disabled).active:focus,.btn-success:not(:disabled):not(.disabled):active:focus,.show>.btn-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(72,180,97,.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info.focus,.btn-info:focus,.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info.focus,.btn-info:focus{box-shadow:0 0 0 .2rem rgba(58,176,195,.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled).active,.btn-info:not(:disabled):not(.disabled):active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled).active:focus,.btn-info:not(:disabled):not(.disabled):active:focus,.show>.btn-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(58,176,195,.5)}.btn-warning{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning.focus,.btn-warning:focus,.btn-warning:hover{color:#212529;background-color:#e0a800;border-color:#d39e00}.btn-warning.focus,.btn-warning:focus{box-shadow:0 0 0 .2rem rgba(222,170,12,.5)}.btn-warning.disabled,.btn-warning:disabled{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-warning:not(:disabled):not(.disabled).active,.btn-warning:not(:disabled):not(.disabled):active,.show>.btn-warning.dropdown-toggle{color:#212529;background-color:#d39e00;border-color:#c69500}.btn-warning:not(:disabled):not(.disabled).active:focus,.btn-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(222,170,12,.5)}.btn-danger{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger.focus,.btn-danger:focus,.btn-danger:hover{color:#fff;background-color:#c82333;border-color:#bd2130}.btn-danger.focus,.btn-danger:focus{box-shadow:0 0 0 .2rem rgba(225,83,97,.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-danger:not(:disabled):not(.disabled).active,.btn-danger:not(:disabled):not(.disabled):active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bd2130;border-color:#b21f2d}.btn-danger:not(:disabled):not(.disabled).active:focus,.btn-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(225,83,97,.5)}.btn-light{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light.focus,.btn-light:focus,.btn-light:hover{color:#212529;background-color:#e2e6ea;border-color:#dae0e5}.btn-light.focus,.btn-light:focus{box-shadow:0 0 0 .2rem rgba(216,217,219,.5)}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:not(:disabled):not(.disabled).active,.btn-light:not(:disabled):not(.disabled):active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#dae0e5;border-color:#d3d9df}.btn-light:not(:disabled):not(.disabled).active:focus,.btn-light:not(:disabled):not(.disabled):active:focus,.show>.btn-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(216,217,219,.5)}.btn-dark{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark.focus,.btn-dark:focus,.btn-dark:hover{color:#fff;background-color:#23272b;border-color:#1d2124}.btn-dark.focus,.btn-dark:focus{box-shadow:0 0 0 .2rem rgba(82,88,93,.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:not(:disabled):not(.disabled).active,.btn-dark:not(:disabled):not(.disabled):active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1d2124;border-color:#171a1d}.btn-dark:not(:disabled):not(.disabled).active:focus,.btn-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(82,88,93,.5)}.btn-outline-primary{color:#007bff;border-color:#007bff}.btn-outline-primary:hover{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary.focus,.btn-outline-primary:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#007bff;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled).active,.btn-outline-primary:not(:disabled):not(.disabled):active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#007bff;border-color:#007bff}.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.btn-outline-secondary{color:#6c757d;border-color:#6c757d}.btn-outline-secondary:hover{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary.focus,.btn-outline-secondary:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#6c757d;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled).active,.btn-outline-secondary:not(:disabled):not(.disabled):active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#6c757d;border-color:#6c757d}.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.btn-outline-success{color:#28a745;border-color:#28a745}.btn-outline-success:hover{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success.focus,.btn-outline-success:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#28a745;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled).active,.btn-outline-success:not(:disabled):not(.disabled):active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#28a745;border-color:#28a745}.btn-outline-success:not(:disabled):not(.disabled).active:focus,.btn-outline-success:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-success.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.btn-outline-info{color:#17a2b8;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info.focus,.btn-outline-info:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled).active,.btn-outline-info:not(:disabled):not(.disabled):active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled).active:focus,.btn-outline-info:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-info.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.btn-outline-warning{color:#ffc107;border-color:#ffc107}.btn-outline-warning:hover{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning.focus,.btn-outline-warning:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#ffc107;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled).active,.btn-outline-warning:not(:disabled):not(.disabled):active,.show>.btn-outline-warning.dropdown-toggle{color:#212529;background-color:#ffc107;border-color:#ffc107}.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.btn-outline-danger{color:#dc3545;border-color:#dc3545}.btn-outline-danger:hover{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger.focus,.btn-outline-danger:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#dc3545;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled).active,.btn-outline-danger:not(:disabled):not(.disabled):active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#dc3545;border-color:#dc3545}.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.btn-outline-light{color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:hover{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light.focus,.btn-outline-light:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled).active,.btn-outline-light:not(:disabled):not(.disabled):active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:not(:disabled):not(.disabled).active:focus,.btn-outline-light:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-light.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.btn-outline-dark{color:#343a40;border-color:#343a40}.btn-outline-dark:hover{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark.focus,.btn-outline-dark:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#343a40;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled).active,.btn-outline-dark:not(:disabled):not(.disabled):active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.btn-link{font-weight:400;color:#007bff;text-decoration:none}.btn-link:hover{color:#0056b3}.btn-link.focus,.btn-link:focus,.btn-link:hover{text-decoration:underline}.btn-link.disabled,.btn-link:disabled{color:#6c757d;pointer-events:none}.btn-group-lg>.btn,.btn-lg{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.btn-group-sm>.btn,.btn-sm{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:.5rem}input[type=button].btn-block,input[type=reset].btn-block,input[type=submit].btn-block{width:100%}.fade{transition:opacity .15s linear}@media (prefers-reduced-motion:reduce){.fade{transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{position:relative;height:0;overflow:hidden;transition:height .35s ease}@media (prefers-reduced-motion:reduce){.collapsing{transition:none}}.dropdown,.dropleft,.dropright,.dropup{position:relative}.dropdown-toggle{white-space:nowrap}.dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid;border-right:.3em solid transparent;border-bottom:0;border-left:.3em solid transparent}.dropdown-toggle:empty:after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:.5rem 0;margin:.125rem 0 0;font-size:1rem;color:#212529;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.15);border-radius:.25rem}.dropdown-menu-left{right:auto;left:0}.dropdown-menu-right{right:0;left:auto}@media (min-width:540px){.dropdown-menu-sm-left{right:auto;left:0}.dropdown-menu-sm-right{right:0;left:auto}}@media (min-width:720px){.dropdown-menu-md-left{right:auto;left:0}.dropdown-menu-md-right{right:0;left:auto}}@media (min-width:960px){.dropdown-menu-lg-left{right:auto;left:0}.dropdown-menu-lg-right{right:0;left:auto}}@media (min-width:1200px){.dropdown-menu-xl-left{right:auto;left:0}.dropdown-menu-xl-right{right:0;left:auto}}.dropup .dropdown-menu{top:auto;bottom:100%;margin-top:0;margin-bottom:.125rem}.dropup .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:0;border-right:.3em solid transparent;border-bottom:.3em solid;border-left:.3em solid transparent}.dropup .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-menu{top:0;right:auto;left:100%;margin-top:0;margin-left:.125rem}.dropright .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:0;border-bottom:.3em solid transparent;border-left:.3em solid}.dropright .dropdown-toggle:empty:after{margin-left:0}.dropright .dropdown-toggle:after{vertical-align:0}.dropleft .dropdown-menu{top:0;right:100%;left:auto;margin-top:0;margin-right:.125rem}.dropleft .dropdown-toggle:after{display:inline-block;margin-left:.255em;vertical-align:.255em;content:"";display:none}.dropleft .dropdown-toggle:before{display:inline-block;margin-right:.255em;vertical-align:.255em;content:"";border-top:.3em solid transparent;border-right:.3em solid;border-bottom:.3em solid transparent}.dropleft .dropdown-toggle:empty:after{margin-left:0}.dropleft .dropdown-toggle:before{vertical-align:0}.dropdown-menu[x-placement^=bottom],.dropdown-menu[x-placement^=left],.dropdown-menu[x-placement^=right],.dropdown-menu[x-placement^=top]{right:auto;bottom:auto}.dropdown-divider{height:0;margin:.5rem 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:.25rem 1.5rem;clear:both;font-weight:400;color:#212529;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:focus,.dropdown-item:hover{color:#16181b;text-decoration:none;background-color:#f8f9fa}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#007bff}.dropdown-item.disabled,.dropdown-item:disabled{color:#6c757d;pointer-events:none;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:.5rem 1.5rem;margin-bottom:0;font-size:.875rem;color:#6c757d;white-space:nowrap}.dropdown-item-text{display:block;padding:.25rem 1.5rem;color:#212529}.btn-group,.btn-group-vertical{position:relative;display:inline-flex;vertical-align:middle}.btn-group-vertical>.btn,.btn-group>.btn{position:relative;flex:1 1 auto}.btn-group-vertical>.btn.active,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:hover,.btn-group>.btn.active,.btn-group>.btn:active,.btn-group>.btn:focus,.btn-group>.btn:hover{z-index:1}.btn-toolbar{display:flex;flex-wrap:wrap;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn-group:not(:first-child),.btn-group>.btn:not(:first-child){margin-left:-1px}.btn-group>.btn-group:not(:last-child)>.btn,.btn-group>.btn:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:not(:first-child)>.btn,.btn-group>.btn:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:.5625rem;padding-left:.5625rem}.dropdown-toggle-split:after,.dropright .dropdown-toggle-split:after,.dropup .dropdown-toggle-split:after{margin-left:0}.dropleft .dropdown-toggle-split:before{margin-right:0}.btn-group-sm>.btn+.dropdown-toggle-split,.btn-sm+.dropdown-toggle-split{padding-right:.375rem;padding-left:.375rem}.btn-group-lg>.btn+.dropdown-toggle-split,.btn-lg+.dropdown-toggle-split{padding-right:.75rem;padding-left:.75rem}.btn-group-vertical{flex-direction:column;align-items:flex-start;justify-content:center}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group{width:100%}.btn-group-vertical>.btn-group:not(:first-child),.btn-group-vertical>.btn:not(:first-child){margin-top:-1px}.btn-group-vertical>.btn-group:not(:last-child)>.btn,.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle){border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:not(:first-child)>.btn,.btn-group-vertical>.btn:not(:first-child){border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn-group>.btn input[type=checkbox],.btn-group-toggle>.btn-group>.btn input[type=radio],.btn-group-toggle>.btn input[type=checkbox],.btn-group-toggle>.btn input[type=radio]{position:absolute;clip:rect(0,0,0,0);pointer-events:none}.input-group{position:relative;display:flex;flex-wrap:wrap;align-items:stretch;width:100%}.input-group>.custom-file,.input-group>.custom-select,.input-group>.form-control,.input-group>.form-control-plaintext{position:relative;flex:1 1 auto;width:1%;min-width:0;margin-bottom:0}.input-group>.custom-file+.custom-file,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.form-control,.input-group>.custom-select+.custom-file,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.form-control,.input-group>.form-control+.custom-file,.input-group>.form-control+.custom-select,.input-group>.form-control+.form-control,.input-group>.form-control-plaintext+.custom-file,.input-group>.form-control-plaintext+.custom-select,.input-group>.form-control-plaintext+.form-control{margin-left:-1px}.input-group>.custom-file .custom-file-input:focus~.custom-file-label,.input-group>.custom-select:focus,.input-group>.form-control:focus{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.custom-select:not(:last-child),.input-group>.form-control:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-select:not(:first-child),.input-group>.form-control:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:flex;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label:after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-append,.input-group-prepend{display:flex}.input-group-append .btn,.input-group-prepend .btn{position:relative;z-index:2}.input-group-append .btn:focus,.input-group-prepend .btn:focus{z-index:3}.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.btn,.input-group-append .input-group-text+.input-group-text,.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-prepend .input-group-text+.input-group-text{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:flex;align-items:center;padding:.375rem .75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:.25rem}.input-group-text input[type=checkbox],.input-group-text input[type=radio]{margin-top:0}.input-group-lg>.custom-select,.input-group-lg>.form-control:not(textarea){height:calc(1.5em + 1rem + 2px)}.input-group-lg>.custom-select,.input-group-lg>.form-control,.input-group-lg>.input-group-append>.btn,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-prepend>.input-group-text{padding:.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:.3rem}.input-group-sm>.custom-select,.input-group-sm>.form-control:not(textarea){height:calc(1.5em + .5rem + 2px)}.input-group-sm>.custom-select,.input-group-sm>.form-control,.input-group-sm>.input-group-append>.btn,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-prepend>.input-group-text{padding:.25rem .5rem;font-size:.875rem;line-height:1.5;border-radius:.2rem}.input-group-lg>.custom-select,.input-group-sm>.custom-select{padding-right:1.75rem}.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child),.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child),.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text{border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;left:0;z-index:-1;width:1rem;height:1.25rem;opacity:0}.custom-control-input:checked~.custom-control-label:before{color:#fff;border-color:#007bff;background-color:#007bff}.custom-control-input:focus~.custom-control-label:before{box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.custom-control-input:focus:not(:checked)~.custom-control-label:before{border-color:#80bdff}.custom-control-input:not(:disabled):active~.custom-control-label:before{color:#fff;background-color:#b3d7ff;border-color:#b3d7ff}.custom-control-input:disabled~.custom-control-label,.custom-control-input[disabled]~.custom-control-label{color:#6c757d}.custom-control-input:disabled~.custom-control-label:before,.custom-control-input[disabled]~.custom-control-label:before{background-color:#e9ecef}.custom-control-label{position:relative;margin-bottom:0;vertical-align:top}.custom-control-label:before{pointer-events:none;background-color:#fff;border:1px solid #adb5bd}.custom-control-label:after,.custom-control-label:before{position:absolute;top:.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;content:""}.custom-control-label:after{background:no-repeat 50%/50% 50%}.custom-checkbox .custom-control-label:before{border-radius:.25rem}.custom-checkbox .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='8' height='8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26l2.974 2.99L8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:before{border-color:#007bff;background-color:#007bff}.custom-checkbox .custom-control-input:indeterminate~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(0,123,255,.5)}.custom-checkbox .custom-control-input:disabled:indeterminate~.custom-control-label:before{background-color:rgba(0,123,255,.5)}.custom-radio .custom-control-label:before{border-radius:50%}.custom-radio .custom-control-input:checked~.custom-control-label:after{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='12' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-radio .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(0,123,255,.5)}.custom-switch{padding-left:2.25rem}.custom-switch .custom-control-label:before{left:-2.25rem;width:1.75rem;pointer-events:all;border-radius:.5rem}.custom-switch .custom-control-label:after{top:calc(.25rem + 2px);left:calc(-2.25rem + 2px);width:calc(1rem - 4px);height:calc(1rem - 4px);background-color:#adb5bd;border-radius:.5rem;transition:transform .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.custom-switch .custom-control-label:after{transition:none}}.custom-switch .custom-control-input:checked~.custom-control-label:after{background-color:#fff;transform:translateX(.75rem)}.custom-switch .custom-control-input:disabled:checked~.custom-control-label:before{background-color:rgba(0,123,255,.5)}.custom-select{display:inline-block;width:100%;height:calc(1.5em + .75rem + 2px);padding:.375rem 1.75rem .375rem .75rem;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='4' height='5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right .75rem center/8px 10px;border:1px solid #ced4da;border-radius:.25rem;appearance:none}.custom-select:focus{border-color:#80bdff;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.custom-select:focus::-ms-value{color:#495057;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:.75rem;background-image:none}.custom-select:disabled{color:#6c757d;background-color:#e9ecef}.custom-select::-ms-expand{display:none}.custom-select:-moz-focusring{color:transparent;text-shadow:0 0 0 #495057}.custom-select-sm{height:calc(1.5em + .5rem + 2px);padding-top:.25rem;padding-bottom:.25rem;padding-left:.5rem;font-size:.875rem}.custom-select-lg{height:calc(1.5em + 1rem + 2px);padding-top:.5rem;padding-bottom:.5rem;padding-left:1rem;font-size:1.25rem}.custom-file{display:inline-block;margin-bottom:0}.custom-file,.custom-file-input{position:relative;width:100%;height:calc(1.5em + .75rem + 2px)}.custom-file-input{z-index:2;margin:0;opacity:0}.custom-file-input:focus~.custom-file-label{border-color:#80bdff;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.custom-file-input:disabled~.custom-file-label,.custom-file-input[disabled]~.custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en)~.custom-file-label:after{content:"Browse"}.custom-file-input~.custom-file-label[data-browse]:after{content:attr(data-browse)}.custom-file-label{left:0;z-index:1;height:calc(1.5em + .75rem + 2px);font-weight:400;background-color:#fff;border:1px solid #ced4da;border-radius:.25rem}.custom-file-label,.custom-file-label:after{position:absolute;top:0;right:0;padding:.375rem .75rem;line-height:1.5;color:#495057}.custom-file-label:after{bottom:0;z-index:3;display:block;height:calc(1.5em + .75rem);content:"Browse";background-color:#e9ecef;border-left:inherit;border-radius:0 .25rem .25rem 0}.custom-range{width:100%;height:1.4rem;padding:0;background-color:transparent;appearance:none}.custom-range:focus{outline:none}.custom-range:focus::-webkit-slider-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,.25)}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,.25)}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 .2rem rgba(0,123,255,.25)}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-.25rem;background-color:#007bff;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;appearance:none}@media (prefers-reduced-motion:reduce){.custom-range::-webkit-slider-thumb{transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#b3d7ff}.custom-range::-webkit-slider-runnable-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#007bff;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;appearance:none}@media (prefers-reduced-motion:reduce){.custom-range::-moz-range-thumb{transition:none}}.custom-range::-moz-range-thumb:active{background-color:#b3d7ff}.custom-range::-moz-range-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;margin-top:0;margin-right:.2rem;margin-left:.2rem;background-color:#007bff;border:0;border-radius:1rem;transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;appearance:none}@media (prefers-reduced-motion:reduce){.custom-range::-ms-thumb{transition:none}}.custom-range::-ms-thumb:active{background-color:#b3d7ff}.custom-range::-ms-track{width:100%;height:.5rem;color:transparent;cursor:pointer;background-color:transparent;border-color:transparent;border-width:.5rem}.custom-range::-ms-fill-lower,.custom-range::-ms-fill-upper{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{margin-right:15px}.custom-range:disabled::-webkit-slider-thumb{background-color:#adb5bd}.custom-range:disabled::-webkit-slider-runnable-track{cursor:default}.custom-range:disabled::-moz-range-thumb{background-color:#adb5bd}.custom-range:disabled::-moz-range-track{cursor:default}.custom-range:disabled::-ms-thumb{background-color:#adb5bd}.custom-control-label:before,.custom-file-label,.custom-select{transition:background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.custom-control-label:before,.custom-file-label,.custom-select{transition:none}}.nav{display:flex;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:.5rem 1rem}.nav-link:focus,.nav-link:hover{text-decoration:none}.nav-link.disabled{color:#6c757d;pointer-events:none;cursor:default}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:.25rem;border-top-right-radius:.25rem}.nav-tabs .nav-link:focus,.nav-tabs .nav-link:hover{border-color:#e9ecef #e9ecef #dee2e6}.nav-tabs .nav-link.disabled{color:#6c757d;background-color:transparent;border-color:transparent}.nav-tabs .nav-item.show .nav-link,.nav-tabs .nav-link.active{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#007bff}.nav-fill .nav-item{flex:1 1 auto;text-align:center}.nav-justified .nav-item{flex-basis:0;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;padding:.5rem 1rem}.navbar,.navbar .container,.navbar .container-fluid,.navbar .container-lg,.navbar .container-md,.navbar .container-sm,.navbar .container-xl{display:flex;flex-wrap:wrap;align-items:center;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:.3125rem;padding-bottom:.3125rem;margin-right:1rem;font-size:1.25rem;line-height:inherit;white-space:nowrap}.navbar-brand:focus,.navbar-brand:hover{text-decoration:none}.navbar-nav{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:.5rem;padding-bottom:.5rem}.navbar-collapse{flex-basis:100%;flex-grow:1;align-items:center}.navbar-toggler{padding:.25rem .75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:.25rem}.navbar-toggler:focus,.navbar-toggler:hover{text-decoration:none}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat 50%;background-size:100% 100%}@media (max-width:539.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl{padding-right:0;padding-left:0}}@media (min-width:540px){.navbar-expand-sm{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-sm .navbar-nav{flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid,.navbar-expand-sm>.container-lg,.navbar-expand-sm>.container-md,.navbar-expand-sm>.container-sm,.navbar-expand-sm>.container-xl{flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media (max-width:719.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl{padding-right:0;padding-left:0}}@media (min-width:720px){.navbar-expand-md{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-md .navbar-nav{flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid,.navbar-expand-md>.container-lg,.navbar-expand-md>.container-md,.navbar-expand-md>.container-sm,.navbar-expand-md>.container-xl{flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media (max-width:959.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl{padding-right:0;padding-left:0}}@media (min-width:960px){.navbar-expand-lg{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-lg .navbar-nav{flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid,.navbar-expand-lg>.container-lg,.navbar-expand-lg>.container-md,.navbar-expand-lg>.container-sm,.navbar-expand-lg>.container-xl{flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media (max-width:1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl{padding-right:0;padding-left:0}}@media (min-width:1200px){.navbar-expand-xl{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand-xl .navbar-nav{flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid,.navbar-expand-xl>.container-lg,.navbar-expand-xl>.container-md,.navbar-expand-xl>.container-sm,.navbar-expand-xl>.container-xl{flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{flex-flow:row nowrap;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:.5rem;padding-left:.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid,.navbar-expand>.container-lg,.navbar-expand>.container-md,.navbar-expand>.container-sm,.navbar-expand>.container-xl{flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:flex!important;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand,.navbar-light .navbar-brand:focus,.navbar-light .navbar-brand:hover{color:rgba(0,0,0,.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,.5)}.navbar-light .navbar-nav .nav-link:focus,.navbar-light .navbar-nav .nav-link:hover{color:rgba(0,0,0,.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,.3)}.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.active,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .show>.nav-link{color:rgba(0,0,0,.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,.5);border-color:rgba(0,0,0,.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30'%3E%3Cpath stroke='rgba(0,0,0,0.5)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(0,0,0,.5)}.navbar-light .navbar-text a,.navbar-light .navbar-text a:focus,.navbar-light .navbar-text a:hover{color:rgba(0,0,0,.9)}.navbar-dark .navbar-brand,.navbar-dark .navbar-brand:focus,.navbar-dark .navbar-brand:hover{color:#fff}.navbar-dark .navbar-nav .nav-link{color:hsla(0,0%,100%,.5)}.navbar-dark .navbar-nav .nav-link:focus,.navbar-dark .navbar-nav .nav-link:hover{color:hsla(0,0%,100%,.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:hsla(0,0%,100%,.25)}.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.active,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .show>.nav-link{color:#fff}.navbar-dark .navbar-toggler{color:hsla(0,0%,100%,.5);border-color:hsla(0,0%,100%,.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='30' height='30'%3E%3Cpath stroke='rgba(255,255,255,0.5)' stroke-linecap='round' stroke-miterlimit='10' stroke-width='2' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:hsla(0,0%,100%,.5)}.navbar-dark .navbar-text a,.navbar-dark .navbar-text a:focus,.navbar-dark .navbar-text a:hover{color:#fff}.card{position:relative;display:flex;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,.125);border-radius:.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group{border-top:inherit;border-bottom:inherit}.card>.list-group:first-child{border-top-width:0;border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card>.list-group:last-child{border-bottom-width:0;border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-body{flex:1 1 auto;min-height:1px;padding:1.25rem}.card-title{margin-bottom:.75rem}.card-subtitle{margin-top:-.375rem}.card-subtitle,.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{padding:.75rem 1.25rem;margin-bottom:0;background-color:rgba(0,0,0,.03);border-bottom:1px solid rgba(0,0,0,.125)}.card-header:first-child{border-radius:calc(.25rem - 1px) calc(.25rem - 1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:.75rem 1.25rem;background-color:rgba(0,0,0,.03);border-top:1px solid rgba(0,0,0,.125)}.card-footer:last-child{border-radius:0 0 calc(.25rem - 1px) calc(.25rem - 1px)}.card-header-tabs{margin-bottom:-.75rem;border-bottom:0}.card-header-pills,.card-header-tabs{margin-right:-.625rem;margin-left:-.625rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img,.card-img-bottom,.card-img-top{flex-shrink:0;width:100%}.card-img,.card-img-top{border-top-left-radius:calc(.25rem - 1px);border-top-right-radius:calc(.25rem - 1px)}.card-img,.card-img-bottom{border-bottom-right-radius:calc(.25rem - 1px);border-bottom-left-radius:calc(.25rem - 1px)}.card-deck .card{margin-bottom:15px}@media (min-width:540px){.card-deck{display:flex;flex-flow:row wrap;margin-right:-15px;margin-left:-15px}.card-deck .card{flex:1 0 0%;margin-right:15px;margin-bottom:0;margin-left:15px}}.card-group>.card{margin-bottom:15px}@media (min-width:540px){.card-group{display:flex;flex-flow:row wrap}.card-group>.card{flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:not(:last-child) .card-header,.card-group>.card:not(:last-child) .card-img-top{border-top-right-radius:0}.card-group>.card:not(:last-child) .card-footer,.card-group>.card:not(:last-child) .card-img-bottom{border-bottom-right-radius:0}.card-group>.card:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:not(:first-child) .card-header,.card-group>.card:not(:first-child) .card-img-top{border-top-left-radius:0}.card-group>.card:not(:first-child) .card-footer,.card-group>.card:not(:first-child) .card-img-bottom{border-bottom-left-radius:0}}.card-columns .card{margin-bottom:.75rem}@media (min-width:540px){.card-columns{column-count:3;column-gap:1.25rem;orphans:1;widows:1}.card-columns .card{display:inline-block;width:100%}}.accordion>.card{overflow:hidden}.accordion>.card:not(:last-of-type){border-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.accordion>.card:not(:first-of-type){border-top-left-radius:0;border-top-right-radius:0}.accordion>.card>.card-header{border-radius:0;margin-bottom:-1px}.breadcrumb{flex-wrap:wrap;padding:.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:.25rem}.breadcrumb,.breadcrumb-item{display:flex}.breadcrumb-item+.breadcrumb-item{padding-left:.5rem}.breadcrumb-item+.breadcrumb-item:before{display:inline-block;padding-right:.5rem;color:#6c757d;content:"/"}.breadcrumb-item+.breadcrumb-item:hover:before{text-decoration:underline;text-decoration:none}.breadcrumb-item.active{color:#6c757d}.pagination{display:flex;padding-left:0;list-style:none;border-radius:.25rem}.page-link{position:relative;display:block;padding:.5rem .75rem;margin-left:-1px;line-height:1.25;color:#007bff;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{z-index:2;color:#0056b3;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:3;outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.25)}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.page-item:last-child .page-link{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.page-item.active .page-link{z-index:3;color:#fff;background-color:#007bff;border-color:#007bff}.page-item.disabled .page-link{color:#6c757d;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:.75rem 1.5rem;font-size:1.25rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:.3rem;border-bottom-left-radius:.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:.3rem;border-bottom-right-radius:.3rem}.pagination-sm .page-link{padding:.25rem .5rem;font-size:.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:.2rem;border-bottom-left-radius:.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:.2rem;border-bottom-right-radius:.2rem}.badge{display:inline-block;padding:.25em .4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out}@media (prefers-reduced-motion:reduce){.badge{transition:none}}a.badge:focus,a.badge:hover{text-decoration:none}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:.6em;padding-left:.6em;border-radius:10rem}.badge-primary{color:#fff;background-color:#007bff}a.badge-primary:focus,a.badge-primary:hover{color:#fff;background-color:#0062cc}a.badge-primary.focus,a.badge-primary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(0,123,255,.5)}.badge-secondary{color:#fff;background-color:#6c757d}a.badge-secondary:focus,a.badge-secondary:hover{color:#fff;background-color:#545b62}a.badge-secondary.focus,a.badge-secondary:focus{outline:0;box-shadow:0 0 0 .2rem rgba(108,117,125,.5)}.badge-success{color:#fff;background-color:#28a745}a.badge-success:focus,a.badge-success:hover{color:#fff;background-color:#1e7e34}a.badge-success.focus,a.badge-success:focus{outline:0;box-shadow:0 0 0 .2rem rgba(40,167,69,.5)}.badge-info{color:#fff;background-color:#17a2b8}a.badge-info:focus,a.badge-info:hover{color:#fff;background-color:#117a8b}a.badge-info.focus,a.badge-info:focus{outline:0;box-shadow:0 0 0 .2rem rgba(23,162,184,.5)}.badge-warning{color:#212529;background-color:#ffc107}a.badge-warning:focus,a.badge-warning:hover{color:#212529;background-color:#d39e00}a.badge-warning.focus,a.badge-warning:focus{outline:0;box-shadow:0 0 0 .2rem rgba(255,193,7,.5)}.badge-danger{color:#fff;background-color:#dc3545}a.badge-danger:focus,a.badge-danger:hover{color:#fff;background-color:#bd2130}a.badge-danger.focus,a.badge-danger:focus{outline:0;box-shadow:0 0 0 .2rem rgba(220,53,69,.5)}.badge-light{color:#212529;background-color:#f8f9fa}a.badge-light:focus,a.badge-light:hover{color:#212529;background-color:#dae0e5}a.badge-light.focus,a.badge-light:focus{outline:0;box-shadow:0 0 0 .2rem rgba(248,249,250,.5)}.badge-dark{color:#fff;background-color:#343a40}a.badge-dark:focus,a.badge-dark:hover{color:#fff;background-color:#1d2124}a.badge-dark.focus,a.badge-dark:focus{outline:0;box-shadow:0 0 0 .2rem rgba(52,58,64,.5)}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:.3rem}@media (min-width:540px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:.75rem 1.25rem;color:inherit}.alert-primary{color:#004085;background-color:#cce5ff;border-color:#b8daff}.alert-primary hr{border-top-color:#9fcdff}.alert-primary .alert-link{color:#002752}.alert-secondary{color:#383d41;background-color:#e2e3e5;border-color:#d6d8db}.alert-secondary hr{border-top-color:#c8cbcf}.alert-secondary .alert-link{color:#202326}.alert-success{color:#155724;background-color:#d4edda;border-color:#c3e6cb}.alert-success hr{border-top-color:#b1dfbb}.alert-success .alert-link{color:#0b2e13}.alert-info{color:#0c5460;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#062c33}.alert-warning{color:#856404;background-color:#fff3cd;border-color:#ffeeba}.alert-warning hr{border-top-color:#ffe8a1}.alert-warning .alert-link{color:#533f03}.alert-danger{color:#721c24;background-color:#f8d7da;border-color:#f5c6cb}.alert-danger hr{border-top-color:#f1b0b7}.alert-danger .alert-link{color:#491217}.alert-light{color:#818182;background-color:#fefefe;border-color:#fdfdfe}.alert-light hr{border-top-color:#ececf6}.alert-light .alert-link{color:#686868}.alert-dark{color:#1b1e21;background-color:#d6d8d9;border-color:#c6c8ca}.alert-dark hr{border-top-color:#b9bbbe}.alert-dark .alert-link{color:#040505}@keyframes progress-bar-stripes{0%{background-position:1rem 0}to{background-position:0 0}}.progress{height:1rem;line-height:0;font-size:.75rem;background-color:#e9ecef;border-radius:.25rem}.progress,.progress-bar{display:flex;overflow:hidden}.progress-bar{flex-direction:column;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#007bff;transition:width .6s ease}@media (prefers-reduced-motion:reduce){.progress-bar{transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg,hsla(0,0%,100%,.15) 25%,transparent 0,transparent 50%,hsla(0,0%,100%,.15) 0,hsla(0,0%,100%,.15) 75%,transparent 0,transparent);background-size:1rem 1rem}.progress-bar-animated{animation:progress-bar-stripes 1s linear infinite}@media (prefers-reduced-motion:reduce){.progress-bar-animated{animation:none}}.media{display:flex;align-items:flex-start}.media-body{flex:1}.list-group{display:flex;flex-direction:column;padding-left:0;margin-bottom:0;border-radius:.25rem}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:focus,.list-group-item-action:hover{z-index:1;color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#212529;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:.75rem 1.25rem;background-color:#fff;border:1px solid rgba(0,0,0,.125)}.list-group-item:first-child{border-top-left-radius:inherit;border-top-right-radius:inherit}.list-group-item:last-child{border-bottom-right-radius:inherit;border-bottom-left-radius:inherit}.list-group-item.disabled,.list-group-item:disabled{color:#6c757d;pointer-events:none;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#007bff;border-color:#007bff}.list-group-item+.list-group-item{border-top-width:0}.list-group-item+.list-group-item.active{margin-top:-1px;border-top-width:1px}.list-group-horizontal{flex-direction:row}.list-group-horizontal>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal>.list-group-item.active{margin-top:0}.list-group-horizontal>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}@media (min-width:540px){.list-group-horizontal-sm{flex-direction:row}.list-group-horizontal-sm>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-sm>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-sm>.list-group-item.active{margin-top:0}.list-group-horizontal-sm>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-sm>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:720px){.list-group-horizontal-md{flex-direction:row}.list-group-horizontal-md>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-md>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-md>.list-group-item.active{margin-top:0}.list-group-horizontal-md>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-md>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:960px){.list-group-horizontal-lg{flex-direction:row}.list-group-horizontal-lg>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-lg>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-lg>.list-group-item.active{margin-top:0}.list-group-horizontal-lg>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-lg>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}@media (min-width:1200px){.list-group-horizontal-xl{flex-direction:row}.list-group-horizontal-xl>.list-group-item:first-child{border-bottom-left-radius:.25rem;border-top-right-radius:0}.list-group-horizontal-xl>.list-group-item:last-child{border-top-right-radius:.25rem;border-bottom-left-radius:0}.list-group-horizontal-xl>.list-group-item.active{margin-top:0}.list-group-horizontal-xl>.list-group-item+.list-group-item{border-top-width:1px;border-left-width:0}.list-group-horizontal-xl>.list-group-item+.list-group-item.active{margin-left:-1px;border-left-width:1px}}.list-group-flush{border-radius:0}.list-group-flush>.list-group-item{border-width:0 0 1px}.list-group-flush>.list-group-item:last-child{border-bottom-width:0}.list-group-item-primary{color:#004085;background-color:#b8daff}.list-group-item-primary.list-group-item-action:focus,.list-group-item-primary.list-group-item-action:hover{color:#004085;background-color:#9fcdff}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#004085;border-color:#004085}.list-group-item-secondary{color:#383d41;background-color:#d6d8db}.list-group-item-secondary.list-group-item-action:focus,.list-group-item-secondary.list-group-item-action:hover{color:#383d41;background-color:#c8cbcf}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#383d41;border-color:#383d41}.list-group-item-success{color:#155724;background-color:#c3e6cb}.list-group-item-success.list-group-item-action:focus,.list-group-item-success.list-group-item-action:hover{color:#155724;background-color:#b1dfbb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#155724;border-color:#155724}.list-group-item-info{color:#0c5460;background-color:#bee5eb}.list-group-item-info.list-group-item-action:focus,.list-group-item-info.list-group-item-action:hover{color:#0c5460;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#0c5460;border-color:#0c5460}.list-group-item-warning{color:#856404;background-color:#ffeeba}.list-group-item-warning.list-group-item-action:focus,.list-group-item-warning.list-group-item-action:hover{color:#856404;background-color:#ffe8a1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#856404;border-color:#856404}.list-group-item-danger{color:#721c24;background-color:#f5c6cb}.list-group-item-danger.list-group-item-action:focus,.list-group-item-danger.list-group-item-action:hover{color:#721c24;background-color:#f1b0b7}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#721c24;border-color:#721c24}.list-group-item-light{color:#818182;background-color:#fdfdfe}.list-group-item-light.list-group-item-action:focus,.list-group-item-light.list-group-item-action:hover{color:#818182;background-color:#ececf6}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#818182;border-color:#818182}.list-group-item-dark{color:#1b1e21;background-color:#c6c8ca}.list-group-item-dark.list-group-item-action:focus,.list-group-item-dark.list-group-item-action:hover{color:#1b1e21;background-color:#b9bbbe}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#1b1e21;border-color:#1b1e21}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.5}.close:hover{color:#000;text-decoration:none}.close:not(:disabled):not(.disabled):focus,.close:not(:disabled):not(.disabled):hover{opacity:.75}button.close{padding:0;background-color:transparent;border:0}a.close.disabled{pointer-events:none}.toast{max-width:350px;overflow:hidden;font-size:.875rem;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border:1px solid rgba(0,0,0,.1);box-shadow:0 .25rem .75rem rgba(0,0,0,.1);backdrop-filter:blur(10px);opacity:0;border-radius:.25rem}.toast:not(:last-child){margin-bottom:.75rem}.toast.showing{opacity:1}.toast.show{display:block;opacity:1}.toast.hide{display:none}.toast-header{display:flex;align-items:center;padding:.25rem .75rem;color:#6c757d;background-color:hsla(0,0%,100%,.85);background-clip:padding-box;border-bottom:1px solid rgba(0,0,0,.05)}.toast-body{padding:.75rem}.modal-open{overflow:hidden}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal{position:fixed;top:0;left:0;z-index:1050;display:none;width:100%;height:100%;overflow:hidden;outline:0}.modal-dialog{position:relative;width:auto;margin:.5rem;pointer-events:none}.modal.fade .modal-dialog{transition:transform .3s ease-out;transform:translateY(-50px)}@media (prefers-reduced-motion:reduce){.modal.fade .modal-dialog{transition:none}}.modal.show .modal-dialog{transform:none}.modal.modal-static .modal-dialog{transform:scale(1.02)}.modal-dialog-scrollable{display:flex;max-height:calc(100% - 1rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 1rem);overflow:hidden}.modal-dialog-scrollable .modal-footer,.modal-dialog-scrollable .modal-header{flex-shrink:0}.modal-dialog-scrollable .modal-body{overflow-y:auto}.modal-dialog-centered{display:flex;align-items:center;min-height:calc(100% - 1rem)}.modal-dialog-centered:before{display:block;height:calc(100vh - 1rem);height:min-content;content:""}.modal-dialog-centered.modal-dialog-scrollable{flex-direction:column;justify-content:center;height:100%}.modal-dialog-centered.modal-dialog-scrollable .modal-content{max-height:none}.modal-dialog-centered.modal-dialog-scrollable:before{content:none}.modal-content{position:relative;display:flex;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem;outline:0}.modal-backdrop{position:fixed;top:0;left:0;z-index:1040;width:100vw;height:100vh;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:.5}.modal-header{display:flex;align-items:flex-start;justify-content:space-between;padding:1rem;border-bottom:1px solid #dee2e6;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.modal-header .close{padding:1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;flex:1 1 auto;padding:1rem}.modal-footer{display:flex;flex-wrap:wrap;align-items:center;justify-content:flex-end;padding:.75rem;border-top:1px solid #dee2e6;border-bottom-right-radius:calc(.3rem - 1px);border-bottom-left-radius:calc(.3rem - 1px)}.modal-footer>*{margin:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:540px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-scrollable{max-height:calc(100% - 3.5rem)}.modal-dialog-scrollable .modal-content{max-height:calc(100vh - 3.5rem)}.modal-dialog-centered{min-height:calc(100% - 3.5rem)}.modal-dialog-centered:before{height:calc(100vh - 3.5rem);height:min-content}.modal-sm{max-width:300px}}@media (min-width:960px){.modal-lg,.modal-xl{max-width:800px}}@media (min-width:1200px){.modal-xl{max-width:1140px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:.9}.tooltip .arrow{position:absolute;display:block;width:.8rem;height:.4rem}.tooltip .arrow:before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-auto[x-placement^=top],.bs-tooltip-top{padding:.4rem 0}.bs-tooltip-auto[x-placement^=top] .arrow,.bs-tooltip-top .arrow{bottom:0}.bs-tooltip-auto[x-placement^=top] .arrow:before,.bs-tooltip-top .arrow:before{top:0;border-width:.4rem .4rem 0;border-top-color:#000}.bs-tooltip-auto[x-placement^=right],.bs-tooltip-right{padding:0 .4rem}.bs-tooltip-auto[x-placement^=right] .arrow,.bs-tooltip-right .arrow{left:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=right] .arrow:before,.bs-tooltip-right .arrow:before{right:0;border-width:.4rem .4rem .4rem 0;border-right-color:#000}.bs-tooltip-auto[x-placement^=bottom],.bs-tooltip-bottom{padding:.4rem 0}.bs-tooltip-auto[x-placement^=bottom] .arrow,.bs-tooltip-bottom .arrow{top:0}.bs-tooltip-auto[x-placement^=bottom] .arrow:before,.bs-tooltip-bottom .arrow:before{bottom:0;border-width:0 .4rem .4rem;border-bottom-color:#000}.bs-tooltip-auto[x-placement^=left],.bs-tooltip-left{padding:0 .4rem}.bs-tooltip-auto[x-placement^=left] .arrow,.bs-tooltip-left .arrow{right:0;width:.4rem;height:.8rem}.bs-tooltip-auto[x-placement^=left] .arrow:before,.bs-tooltip-left .arrow:before{left:0;border-width:.4rem 0 .4rem .4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:.25rem .5rem;color:#fff;text-align:center;background-color:#000;border-radius:.25rem}.popover{top:0;left:0;z-index:1060;max-width:276px;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,.2);border-radius:.3rem}.popover,.popover .arrow{position:absolute;display:block}.popover .arrow{width:1rem;height:.5rem;margin:0 .3rem}.popover .arrow:after,.popover .arrow:before{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-auto[x-placement^=top],.bs-popover-top{margin-bottom:.5rem}.bs-popover-auto[x-placement^=top]>.arrow,.bs-popover-top>.arrow{bottom:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=top]>.arrow:before,.bs-popover-top>.arrow:before{bottom:0;border-width:.5rem .5rem 0;border-top-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=top]>.arrow:after,.bs-popover-top>.arrow:after{bottom:1px;border-width:.5rem .5rem 0;border-top-color:#fff}.bs-popover-auto[x-placement^=right],.bs-popover-right{margin-left:.5rem}.bs-popover-auto[x-placement^=right]>.arrow,.bs-popover-right>.arrow{left:calc(-.5rem - 1px);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-auto[x-placement^=right]>.arrow:before,.bs-popover-right>.arrow:before{left:0;border-width:.5rem .5rem .5rem 0;border-right-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=right]>.arrow:after,.bs-popover-right>.arrow:after{left:1px;border-width:.5rem .5rem .5rem 0;border-right-color:#fff}.bs-popover-auto[x-placement^=bottom],.bs-popover-bottom{margin-top:.5rem}.bs-popover-auto[x-placement^=bottom]>.arrow,.bs-popover-bottom>.arrow{top:calc(-.5rem - 1px)}.bs-popover-auto[x-placement^=bottom]>.arrow:before,.bs-popover-bottom>.arrow:before{top:0;border-width:0 .5rem .5rem;border-bottom-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=bottom]>.arrow:after,.bs-popover-bottom>.arrow:after{top:1px;border-width:0 .5rem .5rem;border-bottom-color:#fff}.bs-popover-auto[x-placement^=bottom] .popover-header:before,.bs-popover-bottom .popover-header:before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-auto[x-placement^=left],.bs-popover-left{margin-right:.5rem}.bs-popover-auto[x-placement^=left]>.arrow,.bs-popover-left>.arrow{right:calc(-.5rem - 1px);width:.5rem;height:1rem;margin:.3rem 0}.bs-popover-auto[x-placement^=left]>.arrow:before,.bs-popover-left>.arrow:before{right:0;border-width:.5rem 0 .5rem .5rem;border-left-color:rgba(0,0,0,.25)}.bs-popover-auto[x-placement^=left]>.arrow:after,.bs-popover-left>.arrow:after{right:1px;border-width:.5rem 0 .5rem .5rem;border-left-color:#fff}.popover-header{padding:.5rem .75rem;margin-bottom:0;font-size:1rem;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:calc(.3rem - 1px);border-top-right-radius:calc(.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:.5rem .75rem;color:#212529}.carousel{position:relative}.carousel.pointer-event{touch-action:pan-y}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner:after{display:block;clear:both;content:""}.carousel-item{position:relative;display:none;float:left;width:100%;margin-right:-100%;backface-visibility:hidden;transition:transform .6s ease-in-out}@media (prefers-reduced-motion:reduce){.carousel-item{transition:none}}.carousel-item-next,.carousel-item-prev,.carousel-item.active{display:block}.active.carousel-item-right,.carousel-item-next:not(.carousel-item-left){transform:translateX(100%)}.active.carousel-item-left,.carousel-item-prev:not(.carousel-item-right){transform:translateX(-100%)}.carousel-fade .carousel-item{opacity:0;transition-property:opacity;transform:none}.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right,.carousel-fade .carousel-item.active{z-index:1;opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{z-index:0;opacity:0;transition:opacity 0s .6s}@media (prefers-reduced-motion:reduce){.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{transition:none}}.carousel-control-next,.carousel-control-prev{position:absolute;top:0;bottom:0;z-index:1;display:flex;align-items:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:.5;transition:opacity .15s ease}@media (prefers-reduced-motion:reduce){.carousel-control-next,.carousel-control-prev{transition:none}}.carousel-control-next:focus,.carousel-control-next:hover,.carousel-control-prev:focus,.carousel-control-prev:hover{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-next-icon,.carousel-control-prev-icon{display:inline-block;width:20px;height:20px;background:no-repeat 50%/100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5L4.25 4l2.5-2.5L5.25 0z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' width='8' height='8'%3E%3Cpath d='M2.75 0l-1.5 1.5L3.75 4l-2.5 2.5L2.75 8l4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:0;left:0;z-index:15;display:flex;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{box-sizing:content-box;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:#fff;background-clip:padding-box;border-top:10px solid transparent;border-bottom:10px solid transparent;opacity:.5;transition:opacity .6s ease}@media (prefers-reduced-motion:reduce){.carousel-indicators li{transition:none}}.carousel-indicators .active{opacity:1}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}@keyframes spinner-border{to{transform:rotate(1turn)}}.spinner-border{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;border:.25em solid;border-right:.25em solid transparent;border-radius:50%;animation:spinner-border .75s linear infinite}.spinner-border-sm{width:1rem;height:1rem;border-width:.2em}@keyframes spinner-grow{0%{transform:scale(0)}50%{opacity:1;transform:none}}.spinner-grow{display:inline-block;width:2rem;height:2rem;vertical-align:text-bottom;background-color:currentColor;border-radius:50%;opacity:0;animation:spinner-grow .75s linear infinite}.spinner-grow-sm{width:1rem;height:1rem}.align-baseline{vertical-align:baseline!important}.align-top{vertical-align:top!important}.align-middle{vertical-align:middle!important}.align-bottom{vertical-align:bottom!important}.align-text-bottom{vertical-align:text-bottom!important}.align-text-top{vertical-align:text-top!important}.bg-primary{background-color:#007bff!important}a.bg-primary:focus,a.bg-primary:hover,button.bg-primary:focus,button.bg-primary:hover{background-color:#0062cc!important}.bg-secondary{background-color:#6c757d!important}a.bg-secondary:focus,a.bg-secondary:hover,button.bg-secondary:focus,button.bg-secondary:hover{background-color:#545b62!important}.bg-success{background-color:#28a745!important}a.bg-success:focus,a.bg-success:hover,button.bg-success:focus,button.bg-success:hover{background-color:#1e7e34!important}.bg-info{background-color:#17a2b8!important}a.bg-info:focus,a.bg-info:hover,button.bg-info:focus,button.bg-info:hover{background-color:#117a8b!important}.bg-warning{background-color:#ffc107!important}a.bg-warning:focus,a.bg-warning:hover,button.bg-warning:focus,button.bg-warning:hover{background-color:#d39e00!important}.bg-danger{background-color:#dc3545!important}a.bg-danger:focus,a.bg-danger:hover,button.bg-danger:focus,button.bg-danger:hover{background-color:#bd2130!important}.bg-light{background-color:#f8f9fa!important}a.bg-light:focus,a.bg-light:hover,button.bg-light:focus,button.bg-light:hover{background-color:#dae0e5!important}.bg-dark{background-color:#343a40!important}a.bg-dark:focus,a.bg-dark:hover,button.bg-dark:focus,button.bg-dark:hover{background-color:#1d2124!important}.bg-white{background-color:#fff!important}.bg-transparent{background-color:transparent!important}.border{border:1px solid #dee2e6!important}.border-top{border-top:1px solid #dee2e6!important}.border-right{border-right:1px solid #dee2e6!important}.border-bottom{border-bottom:1px solid #dee2e6!important}.border-left{border-left:1px solid #dee2e6!important}.border-0{border:0!important}.border-top-0{border-top:0!important}.border-right-0{border-right:0!important}.border-bottom-0{border-bottom:0!important}.border-left-0{border-left:0!important}.border-primary{border-color:#007bff!important}.border-secondary{border-color:#6c757d!important}.border-success{border-color:#28a745!important}.border-info{border-color:#17a2b8!important}.border-warning{border-color:#ffc107!important}.border-danger{border-color:#dc3545!important}.border-light{border-color:#f8f9fa!important}.border-dark{border-color:#343a40!important}.border-white{border-color:#fff!important}.rounded-sm{border-radius:.2rem!important}.rounded{border-radius:.25rem!important}.rounded-top{border-top-left-radius:.25rem!important}.rounded-right,.rounded-top{border-top-right-radius:.25rem!important}.rounded-bottom,.rounded-right{border-bottom-right-radius:.25rem!important}.rounded-bottom,.rounded-left{border-bottom-left-radius:.25rem!important}.rounded-left{border-top-left-radius:.25rem!important}.rounded-lg{border-radius:.3rem!important}.rounded-circle{border-radius:50%!important}.rounded-pill{border-radius:50rem!important}.rounded-0{border-radius:0!important}.clearfix:after{display:block;clear:both;content:""}.d-none{display:none!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:flex!important}.d-inline-flex{display:inline-flex!important}@media (min-width:540px){.d-sm-none{display:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:flex!important}.d-sm-inline-flex{display:inline-flex!important}}@media (min-width:720px){.d-md-none{display:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:flex!important}.d-md-inline-flex{display:inline-flex!important}}@media (min-width:960px){.d-lg-none{display:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:flex!important}.d-lg-inline-flex{display:inline-flex!important}}@media (min-width:1200px){.d-xl-none{display:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:flex!important}.d-xl-inline-flex{display:inline-flex!important}}@media print{.d-print-none{display:none!important}.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:flex!important}.d-print-inline-flex{display:inline-flex!important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive:before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive embed,.embed-responsive iframe,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9:before{padding-top:42.85714%}.embed-responsive-16by9:before{padding-top:56.25%}.embed-responsive-4by3:before{padding-top:75%}.embed-responsive-1by1:before{padding-top:100%}.flex-row{flex-direction:row!important}.flex-column{flex-direction:column!important}.flex-row-reverse{flex-direction:row-reverse!important}.flex-column-reverse{flex-direction:column-reverse!important}.flex-wrap{flex-wrap:wrap!important}.flex-nowrap{flex-wrap:nowrap!important}.flex-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-fill{flex:1 1 auto!important}.flex-grow-0{flex-grow:0!important}.flex-grow-1{flex-grow:1!important}.flex-shrink-0{flex-shrink:0!important}.flex-shrink-1{flex-shrink:1!important}.justify-content-start{justify-content:flex-start!important}.justify-content-end{justify-content:flex-end!important}.justify-content-center{justify-content:center!important}.justify-content-between{justify-content:space-between!important}.justify-content-around{justify-content:space-around!important}.align-items-start{align-items:flex-start!important}.align-items-end{align-items:flex-end!important}.align-items-center{align-items:center!important}.align-items-baseline{align-items:baseline!important}.align-items-stretch{align-items:stretch!important}.align-content-start{align-content:flex-start!important}.align-content-end{align-content:flex-end!important}.align-content-center{align-content:center!important}.align-content-between{align-content:space-between!important}.align-content-around{align-content:space-around!important}.align-content-stretch{align-content:stretch!important}.align-self-auto{align-self:auto!important}.align-self-start{align-self:flex-start!important}.align-self-end{align-self:flex-end!important}.align-self-center{align-self:center!important}.align-self-baseline{align-self:baseline!important}.align-self-stretch{align-self:stretch!important}@media (min-width:540px){.flex-sm-row{flex-direction:row!important}.flex-sm-column{flex-direction:column!important}.flex-sm-row-reverse{flex-direction:row-reverse!important}.flex-sm-column-reverse{flex-direction:column-reverse!important}.flex-sm-wrap{flex-wrap:wrap!important}.flex-sm-nowrap{flex-wrap:nowrap!important}.flex-sm-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-sm-fill{flex:1 1 auto!important}.flex-sm-grow-0{flex-grow:0!important}.flex-sm-grow-1{flex-grow:1!important}.flex-sm-shrink-0{flex-shrink:0!important}.flex-sm-shrink-1{flex-shrink:1!important}.justify-content-sm-start{justify-content:flex-start!important}.justify-content-sm-end{justify-content:flex-end!important}.justify-content-sm-center{justify-content:center!important}.justify-content-sm-between{justify-content:space-between!important}.justify-content-sm-around{justify-content:space-around!important}.align-items-sm-start{align-items:flex-start!important}.align-items-sm-end{align-items:flex-end!important}.align-items-sm-center{align-items:center!important}.align-items-sm-baseline{align-items:baseline!important}.align-items-sm-stretch{align-items:stretch!important}.align-content-sm-start{align-content:flex-start!important}.align-content-sm-end{align-content:flex-end!important}.align-content-sm-center{align-content:center!important}.align-content-sm-between{align-content:space-between!important}.align-content-sm-around{align-content:space-around!important}.align-content-sm-stretch{align-content:stretch!important}.align-self-sm-auto{align-self:auto!important}.align-self-sm-start{align-self:flex-start!important}.align-self-sm-end{align-self:flex-end!important}.align-self-sm-center{align-self:center!important}.align-self-sm-baseline{align-self:baseline!important}.align-self-sm-stretch{align-self:stretch!important}}@media (min-width:720px){.flex-md-row{flex-direction:row!important}.flex-md-column{flex-direction:column!important}.flex-md-row-reverse{flex-direction:row-reverse!important}.flex-md-column-reverse{flex-direction:column-reverse!important}.flex-md-wrap{flex-wrap:wrap!important}.flex-md-nowrap{flex-wrap:nowrap!important}.flex-md-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-md-fill{flex:1 1 auto!important}.flex-md-grow-0{flex-grow:0!important}.flex-md-grow-1{flex-grow:1!important}.flex-md-shrink-0{flex-shrink:0!important}.flex-md-shrink-1{flex-shrink:1!important}.justify-content-md-start{justify-content:flex-start!important}.justify-content-md-end{justify-content:flex-end!important}.justify-content-md-center{justify-content:center!important}.justify-content-md-between{justify-content:space-between!important}.justify-content-md-around{justify-content:space-around!important}.align-items-md-start{align-items:flex-start!important}.align-items-md-end{align-items:flex-end!important}.align-items-md-center{align-items:center!important}.align-items-md-baseline{align-items:baseline!important}.align-items-md-stretch{align-items:stretch!important}.align-content-md-start{align-content:flex-start!important}.align-content-md-end{align-content:flex-end!important}.align-content-md-center{align-content:center!important}.align-content-md-between{align-content:space-between!important}.align-content-md-around{align-content:space-around!important}.align-content-md-stretch{align-content:stretch!important}.align-self-md-auto{align-self:auto!important}.align-self-md-start{align-self:flex-start!important}.align-self-md-end{align-self:flex-end!important}.align-self-md-center{align-self:center!important}.align-self-md-baseline{align-self:baseline!important}.align-self-md-stretch{align-self:stretch!important}}@media (min-width:960px){.flex-lg-row{flex-direction:row!important}.flex-lg-column{flex-direction:column!important}.flex-lg-row-reverse{flex-direction:row-reverse!important}.flex-lg-column-reverse{flex-direction:column-reverse!important}.flex-lg-wrap{flex-wrap:wrap!important}.flex-lg-nowrap{flex-wrap:nowrap!important}.flex-lg-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-lg-fill{flex:1 1 auto!important}.flex-lg-grow-0{flex-grow:0!important}.flex-lg-grow-1{flex-grow:1!important}.flex-lg-shrink-0{flex-shrink:0!important}.flex-lg-shrink-1{flex-shrink:1!important}.justify-content-lg-start{justify-content:flex-start!important}.justify-content-lg-end{justify-content:flex-end!important}.justify-content-lg-center{justify-content:center!important}.justify-content-lg-between{justify-content:space-between!important}.justify-content-lg-around{justify-content:space-around!important}.align-items-lg-start{align-items:flex-start!important}.align-items-lg-end{align-items:flex-end!important}.align-items-lg-center{align-items:center!important}.align-items-lg-baseline{align-items:baseline!important}.align-items-lg-stretch{align-items:stretch!important}.align-content-lg-start{align-content:flex-start!important}.align-content-lg-end{align-content:flex-end!important}.align-content-lg-center{align-content:center!important}.align-content-lg-between{align-content:space-between!important}.align-content-lg-around{align-content:space-around!important}.align-content-lg-stretch{align-content:stretch!important}.align-self-lg-auto{align-self:auto!important}.align-self-lg-start{align-self:flex-start!important}.align-self-lg-end{align-self:flex-end!important}.align-self-lg-center{align-self:center!important}.align-self-lg-baseline{align-self:baseline!important}.align-self-lg-stretch{align-self:stretch!important}}@media (min-width:1200px){.flex-xl-row{flex-direction:row!important}.flex-xl-column{flex-direction:column!important}.flex-xl-row-reverse{flex-direction:row-reverse!important}.flex-xl-column-reverse{flex-direction:column-reverse!important}.flex-xl-wrap{flex-wrap:wrap!important}.flex-xl-nowrap{flex-wrap:nowrap!important}.flex-xl-wrap-reverse{flex-wrap:wrap-reverse!important}.flex-xl-fill{flex:1 1 auto!important}.flex-xl-grow-0{flex-grow:0!important}.flex-xl-grow-1{flex-grow:1!important}.flex-xl-shrink-0{flex-shrink:0!important}.flex-xl-shrink-1{flex-shrink:1!important}.justify-content-xl-start{justify-content:flex-start!important}.justify-content-xl-end{justify-content:flex-end!important}.justify-content-xl-center{justify-content:center!important}.justify-content-xl-between{justify-content:space-between!important}.justify-content-xl-around{justify-content:space-around!important}.align-items-xl-start{align-items:flex-start!important}.align-items-xl-end{align-items:flex-end!important}.align-items-xl-center{align-items:center!important}.align-items-xl-baseline{align-items:baseline!important}.align-items-xl-stretch{align-items:stretch!important}.align-content-xl-start{align-content:flex-start!important}.align-content-xl-end{align-content:flex-end!important}.align-content-xl-center{align-content:center!important}.align-content-xl-between{align-content:space-between!important}.align-content-xl-around{align-content:space-around!important}.align-content-xl-stretch{align-content:stretch!important}.align-self-xl-auto{align-self:auto!important}.align-self-xl-start{align-self:flex-start!important}.align-self-xl-end{align-self:flex-end!important}.align-self-xl-center{align-self:center!important}.align-self-xl-baseline{align-self:baseline!important}.align-self-xl-stretch{align-self:stretch!important}}.float-left{float:left!important}.float-right{float:right!important}.float-none{float:none!important}@media (min-width:540px){.float-sm-left{float:left!important}.float-sm-right{float:right!important}.float-sm-none{float:none!important}}@media (min-width:720px){.float-md-left{float:left!important}.float-md-right{float:right!important}.float-md-none{float:none!important}}@media (min-width:960px){.float-lg-left{float:left!important}.float-lg-right{float:right!important}.float-lg-none{float:none!important}}@media (min-width:1200px){.float-xl-left{float:left!important}.float-xl-right{float:right!important}.float-xl-none{float:none!important}}.user-select-all{user-select:all!important}.user-select-auto{user-select:auto!important}.user-select-none{user-select:none!important}.overflow-auto{overflow:auto!important}.overflow-hidden{overflow:hidden!important}.position-static{position:static!important}.position-relative{position:relative!important}.position-absolute{position:absolute!important}.position-fixed{position:fixed!important}.position-sticky{position:sticky!important}.fixed-top{top:0}.fixed-bottom,.fixed-top{position:fixed;right:0;left:0;z-index:1030}.fixed-bottom{bottom:0}@supports (position:sticky){.sticky-top{position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal}.shadow-sm{box-shadow:0 .125rem .25rem rgba(0,0,0,.075)!important}.shadow{box-shadow:0 .5rem 1rem rgba(0,0,0,.15)!important}.shadow-lg{box-shadow:0 1rem 3rem rgba(0,0,0,.175)!important}.shadow-none{box-shadow:none!important}.w-25{width:25%!important}.w-50{width:50%!important}.w-75{width:75%!important}.w-100{width:100%!important}.w-auto{width:auto!important}.h-25{height:25%!important}.h-50{height:50%!important}.h-75{height:75%!important}.h-100{height:100%!important}.h-auto{height:auto!important}.mw-100{max-width:100%!important}.mh-100{max-height:100%!important}.min-vw-100{min-width:100vw!important}.min-vh-100{min-height:100vh!important}.vw-100{width:100vw!important}.vh-100{height:100vh!important}.m-0{margin:0!important}.mt-0,.my-0{margin-top:0!important}.mr-0,.mx-0{margin-right:0!important}.mb-0,.my-0{margin-bottom:0!important}.ml-0,.mx-0{margin-left:0!important}.m-1{margin:.25rem!important}.mt-1,.my-1{margin-top:.25rem!important}.mr-1,.mx-1{margin-right:.25rem!important}.mb-1,.my-1{margin-bottom:.25rem!important}.ml-1,.mx-1{margin-left:.25rem!important}.m-2{margin:.5rem!important}.mt-2,.my-2{margin-top:.5rem!important}.mr-2,.mx-2{margin-right:.5rem!important}.mb-2,.my-2{margin-bottom:.5rem!important}.ml-2,.mx-2{margin-left:.5rem!important}.m-3{margin:1rem!important}.mt-3,.my-3{margin-top:1rem!important}.mr-3,.mx-3{margin-right:1rem!important}.mb-3,.my-3{margin-bottom:1rem!important}.ml-3,.mx-3{margin-left:1rem!important}.m-4{margin:1.5rem!important}.mt-4,.my-4{margin-top:1.5rem!important}.mr-4,.mx-4{margin-right:1.5rem!important}.mb-4,.my-4{margin-bottom:1.5rem!important}.ml-4,.mx-4{margin-left:1.5rem!important}.m-5{margin:3rem!important}.mt-5,.my-5{margin-top:3rem!important}.mr-5,.mx-5{margin-right:3rem!important}.mb-5,.my-5{margin-bottom:3rem!important}.ml-5,.mx-5{margin-left:3rem!important}.p-0{padding:0!important}.pt-0,.py-0{padding-top:0!important}.pr-0,.px-0{padding-right:0!important}.pb-0,.py-0{padding-bottom:0!important}.pl-0,.px-0{padding-left:0!important}.p-1{padding:.25rem!important}.pt-1,.py-1{padding-top:.25rem!important}.pr-1,.px-1{padding-right:.25rem!important}.pb-1,.py-1{padding-bottom:.25rem!important}.pl-1,.px-1{padding-left:.25rem!important}.p-2{padding:.5rem!important}.pt-2,.py-2{padding-top:.5rem!important}.pr-2,.px-2{padding-right:.5rem!important}.pb-2,.py-2{padding-bottom:.5rem!important}.pl-2,.px-2{padding-left:.5rem!important}.p-3{padding:1rem!important}.pt-3,.py-3{padding-top:1rem!important}.pr-3,.px-3{padding-right:1rem!important}.pb-3,.py-3{padding-bottom:1rem!important}.pl-3,.px-3{padding-left:1rem!important}.p-4{padding:1.5rem!important}.pt-4,.py-4{padding-top:1.5rem!important}.pr-4,.px-4{padding-right:1.5rem!important}.pb-4,.py-4{padding-bottom:1.5rem!important}.pl-4,.px-4{padding-left:1.5rem!important}.p-5{padding:3rem!important}.pt-5,.py-5{padding-top:3rem!important}.pr-5,.px-5{padding-right:3rem!important}.pb-5,.py-5{padding-bottom:3rem!important}.pl-5,.px-5{padding-left:3rem!important}.m-n1{margin:-.25rem!important}.mt-n1,.my-n1{margin-top:-.25rem!important}.mr-n1,.mx-n1{margin-right:-.25rem!important}.mb-n1,.my-n1{margin-bottom:-.25rem!important}.ml-n1,.mx-n1{margin-left:-.25rem!important}.m-n2{margin:-.5rem!important}.mt-n2,.my-n2{margin-top:-.5rem!important}.mr-n2,.mx-n2{margin-right:-.5rem!important}.mb-n2,.my-n2{margin-bottom:-.5rem!important}.ml-n2,.mx-n2{margin-left:-.5rem!important}.m-n3{margin:-1rem!important}.mt-n3,.my-n3{margin-top:-1rem!important}.mr-n3,.mx-n3{margin-right:-1rem!important}.mb-n3,.my-n3{margin-bottom:-1rem!important}.ml-n3,.mx-n3{margin-left:-1rem!important}.m-n4{margin:-1.5rem!important}.mt-n4,.my-n4{margin-top:-1.5rem!important}.mr-n4,.mx-n4{margin-right:-1.5rem!important}.mb-n4,.my-n4{margin-bottom:-1.5rem!important}.ml-n4,.mx-n4{margin-left:-1.5rem!important}.m-n5{margin:-3rem!important}.mt-n5,.my-n5{margin-top:-3rem!important}.mr-n5,.mx-n5{margin-right:-3rem!important}.mb-n5,.my-n5{margin-bottom:-3rem!important}.ml-n5,.mx-n5{margin-left:-3rem!important}.m-auto{margin:auto!important}.mt-auto,.my-auto{margin-top:auto!important}.mr-auto,.mx-auto{margin-right:auto!important}.mb-auto,.my-auto{margin-bottom:auto!important}.ml-auto,.mx-auto{margin-left:auto!important}@media (min-width:540px){.m-sm-0{margin:0!important}.mt-sm-0,.my-sm-0{margin-top:0!important}.mr-sm-0,.mx-sm-0{margin-right:0!important}.mb-sm-0,.my-sm-0{margin-bottom:0!important}.ml-sm-0,.mx-sm-0{margin-left:0!important}.m-sm-1{margin:.25rem!important}.mt-sm-1,.my-sm-1{margin-top:.25rem!important}.mr-sm-1,.mx-sm-1{margin-right:.25rem!important}.mb-sm-1,.my-sm-1{margin-bottom:.25rem!important}.ml-sm-1,.mx-sm-1{margin-left:.25rem!important}.m-sm-2{margin:.5rem!important}.mt-sm-2,.my-sm-2{margin-top:.5rem!important}.mr-sm-2,.mx-sm-2{margin-right:.5rem!important}.mb-sm-2,.my-sm-2{margin-bottom:.5rem!important}.ml-sm-2,.mx-sm-2{margin-left:.5rem!important}.m-sm-3{margin:1rem!important}.mt-sm-3,.my-sm-3{margin-top:1rem!important}.mr-sm-3,.mx-sm-3{margin-right:1rem!important}.mb-sm-3,.my-sm-3{margin-bottom:1rem!important}.ml-sm-3,.mx-sm-3{margin-left:1rem!important}.m-sm-4{margin:1.5rem!important}.mt-sm-4,.my-sm-4{margin-top:1.5rem!important}.mr-sm-4,.mx-sm-4{margin-right:1.5rem!important}.mb-sm-4,.my-sm-4{margin-bottom:1.5rem!important}.ml-sm-4,.mx-sm-4{margin-left:1.5rem!important}.m-sm-5{margin:3rem!important}.mt-sm-5,.my-sm-5{margin-top:3rem!important}.mr-sm-5,.mx-sm-5{margin-right:3rem!important}.mb-sm-5,.my-sm-5{margin-bottom:3rem!important}.ml-sm-5,.mx-sm-5{margin-left:3rem!important}.p-sm-0{padding:0!important}.pt-sm-0,.py-sm-0{padding-top:0!important}.pr-sm-0,.px-sm-0{padding-right:0!important}.pb-sm-0,.py-sm-0{padding-bottom:0!important}.pl-sm-0,.px-sm-0{padding-left:0!important}.p-sm-1{padding:.25rem!important}.pt-sm-1,.py-sm-1{padding-top:.25rem!important}.pr-sm-1,.px-sm-1{padding-right:.25rem!important}.pb-sm-1,.py-sm-1{padding-bottom:.25rem!important}.pl-sm-1,.px-sm-1{padding-left:.25rem!important}.p-sm-2{padding:.5rem!important}.pt-sm-2,.py-sm-2{padding-top:.5rem!important}.pr-sm-2,.px-sm-2{padding-right:.5rem!important}.pb-sm-2,.py-sm-2{padding-bottom:.5rem!important}.pl-sm-2,.px-sm-2{padding-left:.5rem!important}.p-sm-3{padding:1rem!important}.pt-sm-3,.py-sm-3{padding-top:1rem!important}.pr-sm-3,.px-sm-3{padding-right:1rem!important}.pb-sm-3,.py-sm-3{padding-bottom:1rem!important}.pl-sm-3,.px-sm-3{padding-left:1rem!important}.p-sm-4{padding:1.5rem!important}.pt-sm-4,.py-sm-4{padding-top:1.5rem!important}.pr-sm-4,.px-sm-4{padding-right:1.5rem!important}.pb-sm-4,.py-sm-4{padding-bottom:1.5rem!important}.pl-sm-4,.px-sm-4{padding-left:1.5rem!important}.p-sm-5{padding:3rem!important}.pt-sm-5,.py-sm-5{padding-top:3rem!important}.pr-sm-5,.px-sm-5{padding-right:3rem!important}.pb-sm-5,.py-sm-5{padding-bottom:3rem!important}.pl-sm-5,.px-sm-5{padding-left:3rem!important}.m-sm-n1{margin:-.25rem!important}.mt-sm-n1,.my-sm-n1{margin-top:-.25rem!important}.mr-sm-n1,.mx-sm-n1{margin-right:-.25rem!important}.mb-sm-n1,.my-sm-n1{margin-bottom:-.25rem!important}.ml-sm-n1,.mx-sm-n1{margin-left:-.25rem!important}.m-sm-n2{margin:-.5rem!important}.mt-sm-n2,.my-sm-n2{margin-top:-.5rem!important}.mr-sm-n2,.mx-sm-n2{margin-right:-.5rem!important}.mb-sm-n2,.my-sm-n2{margin-bottom:-.5rem!important}.ml-sm-n2,.mx-sm-n2{margin-left:-.5rem!important}.m-sm-n3{margin:-1rem!important}.mt-sm-n3,.my-sm-n3{margin-top:-1rem!important}.mr-sm-n3,.mx-sm-n3{margin-right:-1rem!important}.mb-sm-n3,.my-sm-n3{margin-bottom:-1rem!important}.ml-sm-n3,.mx-sm-n3{margin-left:-1rem!important}.m-sm-n4{margin:-1.5rem!important}.mt-sm-n4,.my-sm-n4{margin-top:-1.5rem!important}.mr-sm-n4,.mx-sm-n4{margin-right:-1.5rem!important}.mb-sm-n4,.my-sm-n4{margin-bottom:-1.5rem!important}.ml-sm-n4,.mx-sm-n4{margin-left:-1.5rem!important}.m-sm-n5{margin:-3rem!important}.mt-sm-n5,.my-sm-n5{margin-top:-3rem!important}.mr-sm-n5,.mx-sm-n5{margin-right:-3rem!important}.mb-sm-n5,.my-sm-n5{margin-bottom:-3rem!important}.ml-sm-n5,.mx-sm-n5{margin-left:-3rem!important}.m-sm-auto{margin:auto!important}.mt-sm-auto,.my-sm-auto{margin-top:auto!important}.mr-sm-auto,.mx-sm-auto{margin-right:auto!important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto!important}.ml-sm-auto,.mx-sm-auto{margin-left:auto!important}}@media (min-width:720px){.m-md-0{margin:0!important}.mt-md-0,.my-md-0{margin-top:0!important}.mr-md-0,.mx-md-0{margin-right:0!important}.mb-md-0,.my-md-0{margin-bottom:0!important}.ml-md-0,.mx-md-0{margin-left:0!important}.m-md-1{margin:.25rem!important}.mt-md-1,.my-md-1{margin-top:.25rem!important}.mr-md-1,.mx-md-1{margin-right:.25rem!important}.mb-md-1,.my-md-1{margin-bottom:.25rem!important}.ml-md-1,.mx-md-1{margin-left:.25rem!important}.m-md-2{margin:.5rem!important}.mt-md-2,.my-md-2{margin-top:.5rem!important}.mr-md-2,.mx-md-2{margin-right:.5rem!important}.mb-md-2,.my-md-2{margin-bottom:.5rem!important}.ml-md-2,.mx-md-2{margin-left:.5rem!important}.m-md-3{margin:1rem!important}.mt-md-3,.my-md-3{margin-top:1rem!important}.mr-md-3,.mx-md-3{margin-right:1rem!important}.mb-md-3,.my-md-3{margin-bottom:1rem!important}.ml-md-3,.mx-md-3{margin-left:1rem!important}.m-md-4{margin:1.5rem!important}.mt-md-4,.my-md-4{margin-top:1.5rem!important}.mr-md-4,.mx-md-4{margin-right:1.5rem!important}.mb-md-4,.my-md-4{margin-bottom:1.5rem!important}.ml-md-4,.mx-md-4{margin-left:1.5rem!important}.m-md-5{margin:3rem!important}.mt-md-5,.my-md-5{margin-top:3rem!important}.mr-md-5,.mx-md-5{margin-right:3rem!important}.mb-md-5,.my-md-5{margin-bottom:3rem!important}.ml-md-5,.mx-md-5{margin-left:3rem!important}.p-md-0{padding:0!important}.pt-md-0,.py-md-0{padding-top:0!important}.pr-md-0,.px-md-0{padding-right:0!important}.pb-md-0,.py-md-0{padding-bottom:0!important}.pl-md-0,.px-md-0{padding-left:0!important}.p-md-1{padding:.25rem!important}.pt-md-1,.py-md-1{padding-top:.25rem!important}.pr-md-1,.px-md-1{padding-right:.25rem!important}.pb-md-1,.py-md-1{padding-bottom:.25rem!important}.pl-md-1,.px-md-1{padding-left:.25rem!important}.p-md-2{padding:.5rem!important}.pt-md-2,.py-md-2{padding-top:.5rem!important}.pr-md-2,.px-md-2{padding-right:.5rem!important}.pb-md-2,.py-md-2{padding-bottom:.5rem!important}.pl-md-2,.px-md-2{padding-left:.5rem!important}.p-md-3{padding:1rem!important}.pt-md-3,.py-md-3{padding-top:1rem!important}.pr-md-3,.px-md-3{padding-right:1rem!important}.pb-md-3,.py-md-3{padding-bottom:1rem!important}.pl-md-3,.px-md-3{padding-left:1rem!important}.p-md-4{padding:1.5rem!important}.pt-md-4,.py-md-4{padding-top:1.5rem!important}.pr-md-4,.px-md-4{padding-right:1.5rem!important}.pb-md-4,.py-md-4{padding-bottom:1.5rem!important}.pl-md-4,.px-md-4{padding-left:1.5rem!important}.p-md-5{padding:3rem!important}.pt-md-5,.py-md-5{padding-top:3rem!important}.pr-md-5,.px-md-5{padding-right:3rem!important}.pb-md-5,.py-md-5{padding-bottom:3rem!important}.pl-md-5,.px-md-5{padding-left:3rem!important}.m-md-n1{margin:-.25rem!important}.mt-md-n1,.my-md-n1{margin-top:-.25rem!important}.mr-md-n1,.mx-md-n1{margin-right:-.25rem!important}.mb-md-n1,.my-md-n1{margin-bottom:-.25rem!important}.ml-md-n1,.mx-md-n1{margin-left:-.25rem!important}.m-md-n2{margin:-.5rem!important}.mt-md-n2,.my-md-n2{margin-top:-.5rem!important}.mr-md-n2,.mx-md-n2{margin-right:-.5rem!important}.mb-md-n2,.my-md-n2{margin-bottom:-.5rem!important}.ml-md-n2,.mx-md-n2{margin-left:-.5rem!important}.m-md-n3{margin:-1rem!important}.mt-md-n3,.my-md-n3{margin-top:-1rem!important}.mr-md-n3,.mx-md-n3{margin-right:-1rem!important}.mb-md-n3,.my-md-n3{margin-bottom:-1rem!important}.ml-md-n3,.mx-md-n3{margin-left:-1rem!important}.m-md-n4{margin:-1.5rem!important}.mt-md-n4,.my-md-n4{margin-top:-1.5rem!important}.mr-md-n4,.mx-md-n4{margin-right:-1.5rem!important}.mb-md-n4,.my-md-n4{margin-bottom:-1.5rem!important}.ml-md-n4,.mx-md-n4{margin-left:-1.5rem!important}.m-md-n5{margin:-3rem!important}.mt-md-n5,.my-md-n5{margin-top:-3rem!important}.mr-md-n5,.mx-md-n5{margin-right:-3rem!important}.mb-md-n5,.my-md-n5{margin-bottom:-3rem!important}.ml-md-n5,.mx-md-n5{margin-left:-3rem!important}.m-md-auto{margin:auto!important}.mt-md-auto,.my-md-auto{margin-top:auto!important}.mr-md-auto,.mx-md-auto{margin-right:auto!important}.mb-md-auto,.my-md-auto{margin-bottom:auto!important}.ml-md-auto,.mx-md-auto{margin-left:auto!important}}@media (min-width:960px){.m-lg-0{margin:0!important}.mt-lg-0,.my-lg-0{margin-top:0!important}.mr-lg-0,.mx-lg-0{margin-right:0!important}.mb-lg-0,.my-lg-0{margin-bottom:0!important}.ml-lg-0,.mx-lg-0{margin-left:0!important}.m-lg-1{margin:.25rem!important}.mt-lg-1,.my-lg-1{margin-top:.25rem!important}.mr-lg-1,.mx-lg-1{margin-right:.25rem!important}.mb-lg-1,.my-lg-1{margin-bottom:.25rem!important}.ml-lg-1,.mx-lg-1{margin-left:.25rem!important}.m-lg-2{margin:.5rem!important}.mt-lg-2,.my-lg-2{margin-top:.5rem!important}.mr-lg-2,.mx-lg-2{margin-right:.5rem!important}.mb-lg-2,.my-lg-2{margin-bottom:.5rem!important}.ml-lg-2,.mx-lg-2{margin-left:.5rem!important}.m-lg-3{margin:1rem!important}.mt-lg-3,.my-lg-3{margin-top:1rem!important}.mr-lg-3,.mx-lg-3{margin-right:1rem!important}.mb-lg-3,.my-lg-3{margin-bottom:1rem!important}.ml-lg-3,.mx-lg-3{margin-left:1rem!important}.m-lg-4{margin:1.5rem!important}.mt-lg-4,.my-lg-4{margin-top:1.5rem!important}.mr-lg-4,.mx-lg-4{margin-right:1.5rem!important}.mb-lg-4,.my-lg-4{margin-bottom:1.5rem!important}.ml-lg-4,.mx-lg-4{margin-left:1.5rem!important}.m-lg-5{margin:3rem!important}.mt-lg-5,.my-lg-5{margin-top:3rem!important}.mr-lg-5,.mx-lg-5{margin-right:3rem!important}.mb-lg-5,.my-lg-5{margin-bottom:3rem!important}.ml-lg-5,.mx-lg-5{margin-left:3rem!important}.p-lg-0{padding:0!important}.pt-lg-0,.py-lg-0{padding-top:0!important}.pr-lg-0,.px-lg-0{padding-right:0!important}.pb-lg-0,.py-lg-0{padding-bottom:0!important}.pl-lg-0,.px-lg-0{padding-left:0!important}.p-lg-1{padding:.25rem!important}.pt-lg-1,.py-lg-1{padding-top:.25rem!important}.pr-lg-1,.px-lg-1{padding-right:.25rem!important}.pb-lg-1,.py-lg-1{padding-bottom:.25rem!important}.pl-lg-1,.px-lg-1{padding-left:.25rem!important}.p-lg-2{padding:.5rem!important}.pt-lg-2,.py-lg-2{padding-top:.5rem!important}.pr-lg-2,.px-lg-2{padding-right:.5rem!important}.pb-lg-2,.py-lg-2{padding-bottom:.5rem!important}.pl-lg-2,.px-lg-2{padding-left:.5rem!important}.p-lg-3{padding:1rem!important}.pt-lg-3,.py-lg-3{padding-top:1rem!important}.pr-lg-3,.px-lg-3{padding-right:1rem!important}.pb-lg-3,.py-lg-3{padding-bottom:1rem!important}.pl-lg-3,.px-lg-3{padding-left:1rem!important}.p-lg-4{padding:1.5rem!important}.pt-lg-4,.py-lg-4{padding-top:1.5rem!important}.pr-lg-4,.px-lg-4{padding-right:1.5rem!important}.pb-lg-4,.py-lg-4{padding-bottom:1.5rem!important}.pl-lg-4,.px-lg-4{padding-left:1.5rem!important}.p-lg-5{padding:3rem!important}.pt-lg-5,.py-lg-5{padding-top:3rem!important}.pr-lg-5,.px-lg-5{padding-right:3rem!important}.pb-lg-5,.py-lg-5{padding-bottom:3rem!important}.pl-lg-5,.px-lg-5{padding-left:3rem!important}.m-lg-n1{margin:-.25rem!important}.mt-lg-n1,.my-lg-n1{margin-top:-.25rem!important}.mr-lg-n1,.mx-lg-n1{margin-right:-.25rem!important}.mb-lg-n1,.my-lg-n1{margin-bottom:-.25rem!important}.ml-lg-n1,.mx-lg-n1{margin-left:-.25rem!important}.m-lg-n2{margin:-.5rem!important}.mt-lg-n2,.my-lg-n2{margin-top:-.5rem!important}.mr-lg-n2,.mx-lg-n2{margin-right:-.5rem!important}.mb-lg-n2,.my-lg-n2{margin-bottom:-.5rem!important}.ml-lg-n2,.mx-lg-n2{margin-left:-.5rem!important}.m-lg-n3{margin:-1rem!important}.mt-lg-n3,.my-lg-n3{margin-top:-1rem!important}.mr-lg-n3,.mx-lg-n3{margin-right:-1rem!important}.mb-lg-n3,.my-lg-n3{margin-bottom:-1rem!important}.ml-lg-n3,.mx-lg-n3{margin-left:-1rem!important}.m-lg-n4{margin:-1.5rem!important}.mt-lg-n4,.my-lg-n4{margin-top:-1.5rem!important}.mr-lg-n4,.mx-lg-n4{margin-right:-1.5rem!important}.mb-lg-n4,.my-lg-n4{margin-bottom:-1.5rem!important}.ml-lg-n4,.mx-lg-n4{margin-left:-1.5rem!important}.m-lg-n5{margin:-3rem!important}.mt-lg-n5,.my-lg-n5{margin-top:-3rem!important}.mr-lg-n5,.mx-lg-n5{margin-right:-3rem!important}.mb-lg-n5,.my-lg-n5{margin-bottom:-3rem!important}.ml-lg-n5,.mx-lg-n5{margin-left:-3rem!important}.m-lg-auto{margin:auto!important}.mt-lg-auto,.my-lg-auto{margin-top:auto!important}.mr-lg-auto,.mx-lg-auto{margin-right:auto!important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto!important}.ml-lg-auto,.mx-lg-auto{margin-left:auto!important}}@media (min-width:1200px){.m-xl-0{margin:0!important}.mt-xl-0,.my-xl-0{margin-top:0!important}.mr-xl-0,.mx-xl-0{margin-right:0!important}.mb-xl-0,.my-xl-0{margin-bottom:0!important}.ml-xl-0,.mx-xl-0{margin-left:0!important}.m-xl-1{margin:.25rem!important}.mt-xl-1,.my-xl-1{margin-top:.25rem!important}.mr-xl-1,.mx-xl-1{margin-right:.25rem!important}.mb-xl-1,.my-xl-1{margin-bottom:.25rem!important}.ml-xl-1,.mx-xl-1{margin-left:.25rem!important}.m-xl-2{margin:.5rem!important}.mt-xl-2,.my-xl-2{margin-top:.5rem!important}.mr-xl-2,.mx-xl-2{margin-right:.5rem!important}.mb-xl-2,.my-xl-2{margin-bottom:.5rem!important}.ml-xl-2,.mx-xl-2{margin-left:.5rem!important}.m-xl-3{margin:1rem!important}.mt-xl-3,.my-xl-3{margin-top:1rem!important}.mr-xl-3,.mx-xl-3{margin-right:1rem!important}.mb-xl-3,.my-xl-3{margin-bottom:1rem!important}.ml-xl-3,.mx-xl-3{margin-left:1rem!important}.m-xl-4{margin:1.5rem!important}.mt-xl-4,.my-xl-4{margin-top:1.5rem!important}.mr-xl-4,.mx-xl-4{margin-right:1.5rem!important}.mb-xl-4,.my-xl-4{margin-bottom:1.5rem!important}.ml-xl-4,.mx-xl-4{margin-left:1.5rem!important}.m-xl-5{margin:3rem!important}.mt-xl-5,.my-xl-5{margin-top:3rem!important}.mr-xl-5,.mx-xl-5{margin-right:3rem!important}.mb-xl-5,.my-xl-5{margin-bottom:3rem!important}.ml-xl-5,.mx-xl-5{margin-left:3rem!important}.p-xl-0{padding:0!important}.pt-xl-0,.py-xl-0{padding-top:0!important}.pr-xl-0,.px-xl-0{padding-right:0!important}.pb-xl-0,.py-xl-0{padding-bottom:0!important}.pl-xl-0,.px-xl-0{padding-left:0!important}.p-xl-1{padding:.25rem!important}.pt-xl-1,.py-xl-1{padding-top:.25rem!important}.pr-xl-1,.px-xl-1{padding-right:.25rem!important}.pb-xl-1,.py-xl-1{padding-bottom:.25rem!important}.pl-xl-1,.px-xl-1{padding-left:.25rem!important}.p-xl-2{padding:.5rem!important}.pt-xl-2,.py-xl-2{padding-top:.5rem!important}.pr-xl-2,.px-xl-2{padding-right:.5rem!important}.pb-xl-2,.py-xl-2{padding-bottom:.5rem!important}.pl-xl-2,.px-xl-2{padding-left:.5rem!important}.p-xl-3{padding:1rem!important}.pt-xl-3,.py-xl-3{padding-top:1rem!important}.pr-xl-3,.px-xl-3{padding-right:1rem!important}.pb-xl-3,.py-xl-3{padding-bottom:1rem!important}.pl-xl-3,.px-xl-3{padding-left:1rem!important}.p-xl-4{padding:1.5rem!important}.pt-xl-4,.py-xl-4{padding-top:1.5rem!important}.pr-xl-4,.px-xl-4{padding-right:1.5rem!important}.pb-xl-4,.py-xl-4{padding-bottom:1.5rem!important}.pl-xl-4,.px-xl-4{padding-left:1.5rem!important}.p-xl-5{padding:3rem!important}.pt-xl-5,.py-xl-5{padding-top:3rem!important}.pr-xl-5,.px-xl-5{padding-right:3rem!important}.pb-xl-5,.py-xl-5{padding-bottom:3rem!important}.pl-xl-5,.px-xl-5{padding-left:3rem!important}.m-xl-n1{margin:-.25rem!important}.mt-xl-n1,.my-xl-n1{margin-top:-.25rem!important}.mr-xl-n1,.mx-xl-n1{margin-right:-.25rem!important}.mb-xl-n1,.my-xl-n1{margin-bottom:-.25rem!important}.ml-xl-n1,.mx-xl-n1{margin-left:-.25rem!important}.m-xl-n2{margin:-.5rem!important}.mt-xl-n2,.my-xl-n2{margin-top:-.5rem!important}.mr-xl-n2,.mx-xl-n2{margin-right:-.5rem!important}.mb-xl-n2,.my-xl-n2{margin-bottom:-.5rem!important}.ml-xl-n2,.mx-xl-n2{margin-left:-.5rem!important}.m-xl-n3{margin:-1rem!important}.mt-xl-n3,.my-xl-n3{margin-top:-1rem!important}.mr-xl-n3,.mx-xl-n3{margin-right:-1rem!important}.mb-xl-n3,.my-xl-n3{margin-bottom:-1rem!important}.ml-xl-n3,.mx-xl-n3{margin-left:-1rem!important}.m-xl-n4{margin:-1.5rem!important}.mt-xl-n4,.my-xl-n4{margin-top:-1.5rem!important}.mr-xl-n4,.mx-xl-n4{margin-right:-1.5rem!important}.mb-xl-n4,.my-xl-n4{margin-bottom:-1.5rem!important}.ml-xl-n4,.mx-xl-n4{margin-left:-1.5rem!important}.m-xl-n5{margin:-3rem!important}.mt-xl-n5,.my-xl-n5{margin-top:-3rem!important}.mr-xl-n5,.mx-xl-n5{margin-right:-3rem!important}.mb-xl-n5,.my-xl-n5{margin-bottom:-3rem!important}.ml-xl-n5,.mx-xl-n5{margin-left:-3rem!important}.m-xl-auto{margin:auto!important}.mt-xl-auto,.my-xl-auto{margin-top:auto!important}.mr-xl-auto,.mx-xl-auto{margin-right:auto!important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto!important}.ml-xl-auto,.mx-xl-auto{margin-left:auto!important}}.stretched-link:after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;pointer-events:auto;content:"";background-color:transparent}.text-monospace{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace!important}.text-justify{text-align:justify!important}.text-wrap{white-space:normal!important}.text-nowrap{white-space:nowrap!important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left!important}.text-right{text-align:right!important}.text-center{text-align:center!important}@media (min-width:540px){.text-sm-left{text-align:left!important}.text-sm-right{text-align:right!important}.text-sm-center{text-align:center!important}}@media (min-width:720px){.text-md-left{text-align:left!important}.text-md-right{text-align:right!important}.text-md-center{text-align:center!important}}@media (min-width:960px){.text-lg-left{text-align:left!important}.text-lg-right{text-align:right!important}.text-lg-center{text-align:center!important}}@media (min-width:1200px){.text-xl-left{text-align:left!important}.text-xl-right{text-align:right!important}.text-xl-center{text-align:center!important}}.text-lowercase{text-transform:lowercase!important}.text-uppercase{text-transform:uppercase!important}.text-capitalize{text-transform:capitalize!important}.font-weight-light{font-weight:300!important}.font-weight-lighter{font-weight:lighter!important}.font-weight-normal{font-weight:400!important}.font-weight-bold{font-weight:700!important}.font-weight-bolder{font-weight:bolder!important}.font-italic{font-style:italic!important}.text-white{color:#fff!important}.text-primary{color:#007bff!important}a.text-primary:focus,a.text-primary:hover{color:#0056b3!important}.text-secondary{color:#6c757d!important}a.text-secondary:focus,a.text-secondary:hover{color:#494f54!important}.text-success{color:#28a745!important}a.text-success:focus,a.text-success:hover{color:#19692c!important}.text-info{color:#17a2b8!important}a.text-info:focus,a.text-info:hover{color:#0f6674!important}.text-warning{color:#ffc107!important}a.text-warning:focus,a.text-warning:hover{color:#ba8b00!important}.text-danger{color:#dc3545!important}a.text-danger:focus,a.text-danger:hover{color:#a71d2a!important}.text-light{color:#f8f9fa!important}a.text-light:focus,a.text-light:hover{color:#cbd3da!important}.text-dark{color:#343a40!important}a.text-dark:focus,a.text-dark:hover{color:#121416!important}.text-body{color:#212529!important}.text-muted{color:#6c757d!important}.text-black-50{color:rgba(0,0,0,.5)!important}.text-white-50{color:hsla(0,0%,100%,.5)!important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.text-decoration-none{text-decoration:none!important}.text-break{word-wrap:break-word!important}.text-reset{color:inherit!important}.visible{visibility:visible!important}.invisible{visibility:hidden!important}@media print{*,:after,:before{text-shadow:none!important;box-shadow:none!important}a:not(.btn){text-decoration:underline}abbr[title]:after{content:" (" attr(title) ")"}pre{white-space:pre-wrap!important}blockquote,pre{border:1px solid #adb5bd;page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}h2,h3,p{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}.container,body{min-width:960px!important}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse!important}.table td,.table th{background-color:#fff!important}.table-bordered td,.table-bordered th{border:1px solid #dee2e6!important}.table-dark{color:inherit}.table-dark tbody+tbody,.table-dark td,.table-dark th,.table-dark thead th{border-color:#dee2e6}.table .thead-dark th{color:inherit;border-color:#dee2e6}}html{font-size:var(--pst-font-size-base);scroll-padding-top:calc(var(--pst-header-height) + 12px)}body{padding-top:calc(var(--pst-header-height) + 20px);background-color:#fff;font-family:var(--pst-font-family-base);font-weight:400;line-height:1.65;color:rgba(var(--pst-color-text-base),1)}p{margin-bottom:1.15rem;font-size:1em;color:rgba(var(--pst-color-paragraph),1)}p.rubric{border-bottom:1px solid #c9c9c9}a{color:rgba(var(--pst-color-link),1);text-decoration:none}a:hover{color:rgba(var(--pst-color-link-hover),1);text-decoration:underline}a.headerlink{color:rgba(var(--pst-color-headerlink),1);font-size:.8em;padding:0 4px;text-decoration:none}a.headerlink:hover{background-color:rgba(var(--pst-color-headerlink),1);color:rgba(var(--pst-color-headerlink-hover),1)}.heading-style,h1,h2,h3,h4,h5,h6{margin:2.75rem 0 1.05rem;font-family:var(--pst-font-family-heading);font-weight:400;line-height:1.15}h1{margin-top:0;font-size:var(--pst-font-size-h1);color:rgba(var(--pst-color-h1),1)}h2{font-size:var(--pst-font-size-h2);color:rgba(var(--pst-color-h2),1)}h3{font-size:var(--pst-font-size-h3);color:rgba(var(--pst-color-h3),1)}h4{font-size:var(--pst-font-size-h4);color:rgba(var(--pst-color-h4),1)}h5{font-size:var(--pst-font-size-h5);color:rgba(var(--pst-color-h5),1)}h6{font-size:var(--pst-font-size-h6);color:rgba(var(--pst-color-h6),1)}.text_small,small{font-size:var(--pst-font-size-milli)}hr{border:0;border-top:1px solid #e5e5e5}code,kbd,pre,samp{font-family:var(--pst-font-family-monospace)}code{color:rgba(var(--pst-color-inline-code),1)}pre{margin:1.5em 0;padding:10px;background-color:rgba(var(--pst-color-preformatted-background),1);color:rgba(var(--pst-color-preformatted-text),1);line-height:1.2em;border:1px solid #c9c9c9;box-shadow:1px 1px 1px #d8d8d8}.navbar{position:fixed;min-height:var(--pst-header-height);width:100%;padding:0}.navbar .container-xl{height:100%}@media (min-width:960px){.navbar #navbar-end>.navbar-end-item{display:inline-block}}.navbar-brand{position:relative;height:var(--pst-header-height);width:auto;padding:.5rem 0}.navbar-brand img{max-width:100%;height:100%;width:auto}.navbar-light{background:#fff!important;box-shadow:0 .125rem .25rem 0 rgba(0,0,0,.11)}.navbar-light .navbar-nav li a.nav-link{padding:0 .5rem;color:rgba(var(--pst-color-navbar-link),1)}.navbar-light .navbar-nav li a.nav-link:hover{color:rgba(var(--pst-color-navbar-link-hover),1)}.navbar-light .navbar-nav>.active>.nav-link{font-weight:600;color:rgba(var(--pst-color-navbar-link-active),1)}.navbar-header a{padding:0 15px}.admonition{margin:1.5625em auto;padding:0 .6rem .8rem!important;overflow:hidden;page-break-inside:avoid;border-left:.2rem solid;border-left-color:rgba(var(--pst-color-admonition-default),1);border-bottom-color:rgba(var(--pst-color-admonition-default),1);border-right-color:rgba(var(--pst-color-admonition-default),1);border-top-color:rgba(var(--pst-color-admonition-default),1);border-radius:.1rem;box-shadow:0 .2rem .5rem rgba(0,0,0,.05),0 0 .05rem rgba(0,0,0,.1);transition:color .25s,background-color .25s,border-color .25s}.admonition :last-child{margin-bottom:0}.admonition p.admonition-title~*{padding:0 1.4rem}.admonition>ol,.admonition>ul{margin-left:1em}.admonition .admonition-title{position:relative;margin:0 -.6rem!important;padding:.4rem .6rem .4rem 2rem;font-weight:700;background-color:rgba(var(--pst-color-admonition-default),.1)}.admonition .admonition-title:before{position:absolute;left:.6rem;width:1rem;height:1rem;color:rgba(var(--pst-color-admonition-default),1);font-family:Font Awesome\ 5 Free;font-weight:900;content:var(--pst-icon-admonition-default)}.admonition .admonition-title+*{margin-top:.4em}.admonition.attention{border-color:rgba(var(--pst-color-admonition-attention),1)}.admonition.attention .admonition-title{background-color:rgba(var(--pst-color-admonition-attention),.1)}.admonition.attention .admonition-title:before{color:rgba(var(--pst-color-admonition-attention),1);content:var(--pst-icon-admonition-attention)}.admonition.caution{border-color:rgba(var(--pst-color-admonition-caution),1)}.admonition.caution .admonition-title{background-color:rgba(var(--pst-color-admonition-caution),.1)}.admonition.caution .admonition-title:before{color:rgba(var(--pst-color-admonition-caution),1);content:var(--pst-icon-admonition-caution)}.admonition.warning{border-color:rgba(var(--pst-color-admonition-warning),1)}.admonition.warning .admonition-title{background-color:rgba(var(--pst-color-admonition-warning),.1)}.admonition.warning .admonition-title:before{color:rgba(var(--pst-color-admonition-warning),1);content:var(--pst-icon-admonition-warning)}.admonition.danger{border-color:rgba(var(--pst-color-admonition-danger),1)}.admonition.danger .admonition-title{background-color:rgba(var(--pst-color-admonition-danger),.1)}.admonition.danger .admonition-title:before{color:rgba(var(--pst-color-admonition-danger),1);content:var(--pst-icon-admonition-danger)}.admonition.error{border-color:rgba(var(--pst-color-admonition-error),1)}.admonition.error .admonition-title{background-color:rgba(var(--pst-color-admonition-error),.1)}.admonition.error .admonition-title:before{color:rgba(var(--pst-color-admonition-error),1);content:var(--pst-icon-admonition-error)}.admonition.hint{border-color:rgba(var(--pst-color-admonition-hint),1)}.admonition.hint .admonition-title{background-color:rgba(var(--pst-color-admonition-hint),.1)}.admonition.hint .admonition-title:before{color:rgba(var(--pst-color-admonition-hint),1);content:var(--pst-icon-admonition-hint)}.admonition.tip{border-color:rgba(var(--pst-color-admonition-tip),1)}.admonition.tip .admonition-title{background-color:rgba(var(--pst-color-admonition-tip),.1)}.admonition.tip .admonition-title:before{color:rgba(var(--pst-color-admonition-tip),1);content:var(--pst-icon-admonition-tip)}.admonition.important{border-color:rgba(var(--pst-color-admonition-important),1)}.admonition.important .admonition-title{background-color:rgba(var(--pst-color-admonition-important),.1)}.admonition.important .admonition-title:before{color:rgba(var(--pst-color-admonition-important),1);content:var(--pst-icon-admonition-important)}.admonition.note{border-color:rgba(var(--pst-color-admonition-note),1)}.admonition.note .admonition-title{background-color:rgba(var(--pst-color-admonition-note),.1)}.admonition.note .admonition-title:before{color:rgba(var(--pst-color-admonition-note),1);content:var(--pst-icon-admonition-note)}div.deprecated{margin-bottom:10px;margin-top:10px;padding:7px;background-color:#f3e5e5;border:1px solid #eed3d7;border-radius:.5rem}div.deprecated p{color:#b94a48;display:inline}.topic{background-color:#eee}.seealso dd{margin-top:0;margin-bottom:0}.viewcode-back{font-family:var(--pst-font-family-base)}.viewcode-block:target{background-color:#f4debf;border-top:1px solid #ac9;border-bottom:1px solid #ac9}span.guilabel{border:1px solid #7fbbe3;background:#e7f2fa;font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}table.field-list{border-collapse:separate;border-spacing:10px;margin-left:1px}table.field-list th.field-name{padding:1px 8px 1px 5px;white-space:nowrap;background-color:#eee}table.field-list td.field-body p{font-style:italic}table.field-list td.field-body p>strong{font-style:normal}table.field-list td.field-body blockquote{border-left:none;margin:0 0 .3em;padding-left:30px}.table.autosummary td:first-child{white-space:nowrap}footer{width:100%;border-top:1px solid #ccc;padding:10px}footer .footer-item p{margin-bottom:0}.bd-search{position:relative;padding:1rem 15px;margin-right:-15px;margin-left:-15px}.bd-search .icon{position:absolute;color:#a4a6a7;left:25px;top:25px}.bd-search input{border-radius:0;border:0;border-bottom:1px solid #e5e5e5;padding-left:35px}.bd-toc{-ms-flex-order:2;order:2;height:calc(100vh - 2rem);overflow-y:auto}@supports (position:-webkit-sticky) or (position:sticky){.bd-toc{position:-webkit-sticky;position:sticky;top:calc(var(--pst-header-height) + 20px);height:calc(100vh - 5rem);overflow-y:auto}}.bd-toc .onthispage{color:#a4a6a7}.section-nav{padding-left:0;border-left:1px solid #eee;border-bottom:none}.section-nav ul{padding-left:1rem}.toc-entry,.toc-entry a{display:block}.toc-entry a{padding:.125rem 1.5rem;color:rgba(var(--pst-color-toc-link),1)}@media (min-width:1200px){.toc-entry a{padding-right:0}}.toc-entry a:hover{color:rgba(var(--pst-color-toc-link-hover),1);text-decoration:none}.bd-sidebar{padding-top:1em}@media (min-width:720px){.bd-sidebar{border-right:1px solid rgba(0,0,0,.1)}@supports (position:-webkit-sticky) or (position:sticky){.bd-sidebar{position:-webkit-sticky;position:sticky;top:calc(var(--pst-header-height) + 20px);z-index:1000;height:calc(100vh - var(--pst-header-height) - 20px)}}}.bd-sidebar.no-sidebar{border-right:0}.bd-links{padding-top:1rem;padding-bottom:1rem;margin-right:-15px;margin-left:-15px}@media (min-width:720px){.bd-links{display:block!important}@supports (position:-webkit-sticky) or (position:sticky){.bd-links{max-height:calc(100vh - 11rem);overflow-y:auto}}}.bd-sidenav{display:none}.bd-content{padding-top:20px}.bd-content .section{max-width:100%}.bd-content .section table{display:block;overflow:auto}.bd-toc-link{display:block;padding:.25rem 1.5rem;font-weight:600;color:rgba(0,0,0,.65)}.bd-toc-link:hover{color:rgba(0,0,0,.85);text-decoration:none}.bd-toc-item.active{margin-bottom:1rem}.bd-toc-item.active:not(:first-child){margin-top:1rem}.bd-toc-item.active>.bd-toc-link{color:rgba(0,0,0,.85)}.bd-toc-item.active>.bd-toc-link:hover{background-color:transparent}.bd-toc-item.active>.bd-sidenav{display:block}nav.bd-links p.caption{font-size:var(--pst-sidebar-caption-font-size);text-transform:uppercase;font-weight:700;position:relative;margin-top:1.25em;margin-bottom:.5em;padding:0 1.5rem;color:rgba(var(--pst-color-sidebar-caption),1)}nav.bd-links p.caption:first-child{margin-top:0}.bd-sidebar .nav{font-size:var(--pst-sidebar-font-size)}.bd-sidebar .nav ul{list-style:none;padding:0 0 0 1.5rem}.bd-sidebar .nav li>a{display:block;padding:.25rem 1.5rem;color:rgba(var(--pst-color-sidebar-link),1)}.bd-sidebar .nav li>a:hover{color:rgba(var(--pst-color-sidebar-link-hover),1);text-decoration:none;background-color:transparent}.bd-sidebar .nav li>a.reference.external:after{font-family:Font Awesome\ 5 Free;font-weight:900;content:"\f35d";font-size:.75em;margin-left:.3em}.bd-sidebar .nav .active:hover>a,.bd-sidebar .nav .active>a{font-weight:600;color:rgba(var(--pst-color-sidebar-link-active),1)}.toc-h2{font-size:.85rem}.toc-h3{font-size:.75rem}.toc-h4{font-size:.65rem}.toc-entry>.nav-link.active{font-weight:600;color:#130654;color:rgba(var(--pst-color-toc-link-active),1);background-color:transparent;border-left:2px solid rgba(var(--pst-color-toc-link-active),1)}.nav-link:hover{border-style:none}#navbar-main-elements li.nav-item i{font-size:.7rem;padding-left:2px;vertical-align:middle}.bd-toc .nav .nav{display:none}.bd-toc .nav .nav.visible,.bd-toc .nav>.active>ul{display:block}.prev-next-bottom{margin:20px 0}.prev-next-bottom a.left-prev,.prev-next-bottom a.right-next{padding:10px;border:1px solid rgba(0,0,0,.2);max-width:45%;overflow-x:hidden;color:rgba(0,0,0,.65)}.prev-next-bottom a.left-prev{float:left}.prev-next-bottom a.left-prev:before{content:"<< "}.prev-next-bottom a.right-next{float:right}.prev-next-bottom a.right-next:after{content:" >>"}.alert{padding-bottom:0}.alert-info a{color:#e83e8c}#navbar-icon-links i.fa,#navbar-icon-links i.fab,#navbar-icon-links i.far,#navbar-icon-links i.fas{vertical-align:middle;font-style:normal;font-size:1.5rem;line-height:1.25}#navbar-icon-links i.fa-github-square:before{color:#333}#navbar-icon-links i.fa-twitter-square:before{color:#55acee}#navbar-icon-links i.fa-gitlab:before{color:#548}#navbar-icon-links i.fa-bitbucket:before{color:#0052cc}.tocsection{border-left:1px solid #eee;padding:.3rem 1.5rem}.tocsection i{padding-right:.5rem}.editthispage{padding-top:2rem}.editthispage a{color:#130754}.xr-wrap[hidden]{display:block!important}.toctree-checkbox{position:absolute;display:none}.toctree-checkbox~ul{display:none}.toctree-checkbox~label i{transform:rotate(0deg)}.toctree-checkbox:checked~ul{display:block}.toctree-checkbox:checked~label i{transform:rotate(180deg)}.bd-sidebar li{position:relative}.bd-sidebar label{position:absolute;top:0;right:0;height:30px;width:30px;cursor:pointer;display:flex;justify-content:center;align-items:center}.bd-sidebar label:hover{background:rgba(var(--pst-color-sidebar-expander-background-hover),1)}.bd-sidebar label i{display:inline-block;font-size:.75rem;text-align:center}.bd-sidebar label i:hover{color:rgba(var(--pst-color-sidebar-link-hover),1)}.bd-sidebar li.has-children>.reference{padding-right:30px}div.doctest>div.highlight span.gp,span.linenos,table.highlighttable td.linenos{user-select:none!important;-webkit-user-select:text!important;-webkit-user-select:none!important;-moz-user-select:none!important;-ms-user-select:none!important} \ No newline at end of file diff --git a/docs/_build/html/_static/css/theme.css b/docs/_build/html/_static/css/theme.css deleted file mode 100644 index 3f6e79da..00000000 --- a/docs/_build/html/_static/css/theme.css +++ /dev/null @@ -1,117 +0,0 @@ -:root { - /***************************************************************************** - * Theme config - **/ - --pst-header-height: 60px; - - /***************************************************************************** - * Font size - **/ - --pst-font-size-base: 15px; /* base font size - applied at body / html level */ - - /* heading font sizes */ - --pst-font-size-h1: 36px; - --pst-font-size-h2: 32px; - --pst-font-size-h3: 26px; - --pst-font-size-h4: 21px; - --pst-font-size-h5: 18px; - --pst-font-size-h6: 16px; - - /* smaller then heading font sizes*/ - --pst-font-size-milli: 12px; - - --pst-sidebar-font-size: .9em; - --pst-sidebar-caption-font-size: .9em; - - /***************************************************************************** - * Font family - **/ - /* These are adapted from https://systemfontstack.com/ */ - --pst-font-family-base-system: -apple-system, BlinkMacSystemFont, Segoe UI, "Helvetica Neue", - Arial, sans-serif, Apple Color Emoji, Segoe UI Emoji, Segoe UI Symbol; - --pst-font-family-monospace-system: "SFMono-Regular", Menlo, Consolas, Monaco, - Liberation Mono, Lucida Console, monospace; - - --pst-font-family-base: var(--pst-font-family-base-system); - --pst-font-family-heading: var(--pst-font-family-base); - --pst-font-family-monospace: var(--pst-font-family-monospace-system); - - /***************************************************************************** - * Color - * - * Colors are defined in rgb string way, "red, green, blue" - **/ - --pst-color-primary: 19, 6, 84; - --pst-color-success: 40, 167, 69; - --pst-color-info: 0, 123, 255; /*23, 162, 184;*/ - --pst-color-warning: 255, 193, 7; - --pst-color-danger: 220, 53, 69; - --pst-color-text-base: 51, 51, 51; - - --pst-color-h1: var(--pst-color-primary); - --pst-color-h2: var(--pst-color-primary); - --pst-color-h3: var(--pst-color-text-base); - --pst-color-h4: var(--pst-color-text-base); - --pst-color-h5: var(--pst-color-text-base); - --pst-color-h6: var(--pst-color-text-base); - --pst-color-paragraph: var(--pst-color-text-base); - --pst-color-link: 0, 91, 129; - --pst-color-link-hover: 227, 46, 0; - --pst-color-headerlink: 198, 15, 15; - --pst-color-headerlink-hover: 255, 255, 255; - --pst-color-preformatted-text: 34, 34, 34; - --pst-color-preformatted-background: 250, 250, 250; - --pst-color-inline-code: 232, 62, 140; - - --pst-color-active-navigation: 19, 6, 84; - --pst-color-navbar-link: 77, 77, 77; - --pst-color-navbar-link-hover: var(--pst-color-active-navigation); - --pst-color-navbar-link-active: var(--pst-color-active-navigation); - --pst-color-sidebar-link: 77, 77, 77; - --pst-color-sidebar-link-hover: var(--pst-color-active-navigation); - --pst-color-sidebar-link-active: var(--pst-color-active-navigation); - --pst-color-sidebar-expander-background-hover: 244, 244, 244; - --pst-color-sidebar-caption: 77, 77, 77; - --pst-color-toc-link: 119, 117, 122; - --pst-color-toc-link-hover: var(--pst-color-active-navigation); - --pst-color-toc-link-active: var(--pst-color-active-navigation); - - /***************************************************************************** - * Icon - **/ - - /* font awesome icons*/ - --pst-icon-check-circle: '\f058'; - --pst-icon-info-circle: '\f05a'; - --pst-icon-exclamation-triangle: '\f071'; - --pst-icon-exclamation-circle: '\f06a'; - --pst-icon-times-circle: '\f057'; - --pst-icon-lightbulb: '\f0eb'; - - /***************************************************************************** - * Admonitions - **/ - - --pst-color-admonition-default: var(--pst-color-info); - --pst-color-admonition-note: var(--pst-color-info); - --pst-color-admonition-attention: var(--pst-color-warning); - --pst-color-admonition-caution: var(--pst-color-warning); - --pst-color-admonition-warning: var(--pst-color-warning); - --pst-color-admonition-danger: var(--pst-color-danger); - --pst-color-admonition-error: var(--pst-color-danger); - --pst-color-admonition-hint: var(--pst-color-success); - --pst-color-admonition-tip: var(--pst-color-success); - --pst-color-admonition-important: var(--pst-color-success); - - --pst-icon-admonition-default: var(--pst-icon-info-circle); - --pst-icon-admonition-note: var(--pst-icon-info-circle); - --pst-icon-admonition-attention: var(--pst-icon-exclamation-circle); - --pst-icon-admonition-caution: var(--pst-icon-exclamation-triangle); - --pst-icon-admonition-warning: var(--pst-icon-exclamation-triangle); - --pst-icon-admonition-danger: var(--pst-icon-exclamation-triangle); - --pst-icon-admonition-error: var(--pst-icon-times-circle); - --pst-icon-admonition-hint: var(--pst-icon-lightbulb); - --pst-icon-admonition-tip: var(--pst-icon-lightbulb); - --pst-icon-admonition-important: var(--pst-icon-exclamation-circle); - -} diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js deleted file mode 100644 index daccd209..00000000 --- a/docs/_build/html/_static/doctools.js +++ /dev/null @@ -1,315 +0,0 @@ -/* - * doctools.js - * ~~~~~~~~~~~ - * - * Sphinx JavaScript utilities for all documentation. - * - * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. - * :license: BSD, see LICENSE for details. - * - */ - -/** - * select a different prefix for underscore - */ -$u = _.noConflict(); - -/** - * make the code below compatible with browsers without - * an installed firebug like debugger -if (!window.console || !console.firebug) { - var names = ["log", "debug", "info", "warn", "error", "assert", "dir", - "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", - "profile", "profileEnd"]; - window.console = {}; - for (var i = 0; i < names.length; ++i) - window.console[names[i]] = function() {}; -} - */ - -/** - * small helper function to urldecode strings - */ -jQuery.urldecode = function(x) { - return decodeURIComponent(x).replace(/\+/g, ' '); -}; - -/** - * small helper function to urlencode strings - */ -jQuery.urlencode = encodeURIComponent; - -/** - * This function returns the parsed url parameters of the - * current request. Multiple values per key are supported, - * it will always return arrays of strings for the value parts. - */ -jQuery.getQueryParameters = function(s) { - if (typeof s === 'undefined') - s = document.location.search; - var parts = s.substr(s.indexOf('?') + 1).split('&'); - var result = {}; - for (var i = 0; i < parts.length; i++) { - var tmp = parts[i].split('=', 2); - var key = jQuery.urldecode(tmp[0]); - var value = jQuery.urldecode(tmp[1]); - if (key in result) - result[key].push(value); - else - result[key] = [value]; - } - return result; -}; - -/** - * highlight a given string on a jquery object by wrapping it in - * span elements with the given class name. - */ -jQuery.fn.highlightText = function(text, className) { - function highlight(node, addItems) { - if (node.nodeType === 3) { - var val = node.nodeValue; - var pos = val.toLowerCase().indexOf(text); - if (pos >= 0 && - !jQuery(node.parentNode).hasClass(className) && - !jQuery(node.parentNode).hasClass("nohighlight")) { - var span; - var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); - if (isInSVG) { - span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); - } else { - span = document.createElement("span"); - span.className = className; - } - span.appendChild(document.createTextNode(val.substr(pos, text.length))); - node.parentNode.insertBefore(span, node.parentNode.insertBefore( - document.createTextNode(val.substr(pos + text.length)), - node.nextSibling)); - node.nodeValue = val.substr(0, pos); - if (isInSVG) { - var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); - var bbox = node.parentElement.getBBox(); - rect.x.baseVal.value = bbox.x; - rect.y.baseVal.value = bbox.y; - rect.width.baseVal.value = bbox.width; - rect.height.baseVal.value = bbox.height; - rect.setAttribute('class', className); - addItems.push({ - "parent": node.parentNode, - "target": rect}); - } - } - } - else if (!jQuery(node).is("button, select, textarea")) { - jQuery.each(node.childNodes, function() { - highlight(this, addItems); - }); - } - } - var addItems = []; - var result = this.each(function() { - highlight(this, addItems); - }); - for (var i = 0; i < addItems.length; ++i) { - jQuery(addItems[i].parent).before(addItems[i].target); - } - return result; -}; - -/* - * backward compatibility for jQuery.browser - * This will be supported until firefox bug is fixed. - */ -if (!jQuery.browser) { - jQuery.uaMatch = function(ua) { - ua = ua.toLowerCase(); - - var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || - /(webkit)[ \/]([\w.]+)/.exec(ua) || - /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || - /(msie) ([\w.]+)/.exec(ua) || - ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || - []; - - return { - browser: match[ 1 ] || "", - version: match[ 2 ] || "0" - }; - }; - jQuery.browser = {}; - jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; -} - -/** - * Small JavaScript module for the documentation. - */ -var Documentation = { - - init : function() { - this.fixFirefoxAnchorBug(); - this.highlightSearchWords(); - this.initIndexTable(); - if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { - this.initOnKeyListeners(); - } - }, - - /** - * i18n support - */ - TRANSLATIONS : {}, - PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, - LOCALE : 'unknown', - - // gettext and ngettext don't access this so that the functions - // can safely bound to a different name (_ = Documentation.gettext) - gettext : function(string) { - var translated = Documentation.TRANSLATIONS[string]; - if (typeof translated === 'undefined') - return string; - return (typeof translated === 'string') ? translated : translated[0]; - }, - - ngettext : function(singular, plural, n) { - var translated = Documentation.TRANSLATIONS[singular]; - if (typeof translated === 'undefined') - return (n == 1) ? singular : plural; - return translated[Documentation.PLURALEXPR(n)]; - }, - - addTranslations : function(catalog) { - for (var key in catalog.messages) - this.TRANSLATIONS[key] = catalog.messages[key]; - this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); - this.LOCALE = catalog.locale; - }, - - /** - * add context elements like header anchor links - */ - addContextElements : function() { - $('div[id] > :header:first').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this headline')). - appendTo(this); - }); - $('dt[id]').each(function() { - $('\u00B6'). - attr('href', '#' + this.id). - attr('title', _('Permalink to this definition')). - appendTo(this); - }); - }, - - /** - * workaround a firefox stupidity - * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 - */ - fixFirefoxAnchorBug : function() { - if (document.location.hash && $.browser.mozilla) - window.setTimeout(function() { - document.location.href += ''; - }, 10); - }, - - /** - * highlight the search words provided in the url in the text - */ - highlightSearchWords : function() { - var params = $.getQueryParameters(); - var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; - if (terms.length) { - var body = $('div.body'); - if (!body.length) { - body = $('body'); - } - window.setTimeout(function() { - $.each(terms, function() { - body.highlightText(this.toLowerCase(), 'highlighted'); - }); - }, 10); - $('') - .appendTo($('#searchbox')); - } - }, - - /** - * init the domain index toggle buttons - */ - initIndexTable : function() { - var togglers = $('img.toggler').click(function() { - var src = $(this).attr('src'); - var idnum = $(this).attr('id').substr(7); - $('tr.cg-' + idnum).toggle(); - if (src.substr(-9) === 'minus.png') - $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); - else - $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); - }).css('display', ''); - if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { - togglers.click(); - } - }, - - /** - * helper function to hide the search marks again - */ - hideSearchWords : function() { - $('#searchbox .highlight-link').fadeOut(300); - $('span.highlighted').removeClass('highlighted'); - }, - - /** - * make the url absolute - */ - makeURL : function(relativeURL) { - return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; - }, - - /** - * get the current relative url - */ - getCurrentURL : function() { - var path = document.location.pathname; - var parts = path.split(/\//); - $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { - if (this === '..') - parts.pop(); - }); - var url = parts.join('/'); - return path.substring(url.lastIndexOf('/') + 1, path.length - 1); - }, - - initOnKeyListeners: function() { - $(document).keydown(function(event) { - var activeElementType = document.activeElement.tagName; - // don't navigate when in search box or textarea - if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' - && !event.altKey && !event.ctrlKey && !event.metaKey && !event.shiftKey) { - switch (event.keyCode) { - case 37: // left - var prevHref = $('link[rel="prev"]').prop('href'); - if (prevHref) { - window.location.href = prevHref; - return false; - } - case 39: // right - var nextHref = $('link[rel="next"]').prop('href'); - if (nextHref) { - window.location.href = nextHref; - return false; - } - } - } - }); - } -}; - -// quick alias for translations -_ = Documentation.gettext; - -$(document).ready(function() { - Documentation.init(); -}); diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js deleted file mode 100644 index 26f38484..00000000 --- a/docs/_build/html/_static/documentation_options.js +++ /dev/null @@ -1,12 +0,0 @@ -var DOCUMENTATION_OPTIONS = { - URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), - VERSION: '', - LANGUAGE: 'en', - COLLAPSE_INDEX: false, - BUILDER: 'html', - FILE_SUFFIX: '.html', - LINK_SUFFIX: '.html', - HAS_SOURCE: true, - SOURCELINK_SUFFIX: '.txt', - NAVIGATION_WITH_KEYS: true -}; \ No newline at end of file diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png deleted file mode 100644 index a858a410..00000000 Binary files a/docs/_build/html/_static/file.png and /dev/null differ diff --git a/docs/_build/html/_static/fonts/FontAwesome.otf b/docs/_build/html/_static/fonts/FontAwesome.otf deleted file mode 100644 index 401ec0f3..00000000 Binary files a/docs/_build/html/_static/fonts/FontAwesome.otf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf b/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf deleted file mode 100644 index 809c1f58..00000000 Binary files a/docs/_build/html/_static/fonts/Inconsolata-Bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf b/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf deleted file mode 100644 index fc981ce7..00000000 Binary files a/docs/_build/html/_static/fonts/Inconsolata-Regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Inconsolata.ttf b/docs/_build/html/_static/fonts/Inconsolata.ttf deleted file mode 100644 index 4b8a36d2..00000000 Binary files a/docs/_build/html/_static/fonts/Inconsolata.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato-Bold.ttf b/docs/_build/html/_static/fonts/Lato-Bold.ttf deleted file mode 100644 index 1d23c706..00000000 Binary files a/docs/_build/html/_static/fonts/Lato-Bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato-Regular.ttf b/docs/_build/html/_static/fonts/Lato-Regular.ttf deleted file mode 100644 index 0f3d0f83..00000000 Binary files a/docs/_build/html/_static/fonts/Lato-Regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.eot b/docs/_build/html/_static/fonts/Lato/lato-bold.eot deleted file mode 100644 index 3361183a..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.ttf b/docs/_build/html/_static/fonts/Lato/lato-bold.ttf deleted file mode 100644 index 29f691d5..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff b/docs/_build/html/_static/fonts/Lato/lato-bold.woff deleted file mode 100644 index c6dff51f..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 deleted file mode 100644 index bb195043..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot deleted file mode 100644 index 3d415493..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf deleted file mode 100644 index f402040b..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff deleted file mode 100644 index 88ad05b9..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 deleted file mode 100644 index c4e3d804..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-bolditalic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.eot b/docs/_build/html/_static/fonts/Lato/lato-italic.eot deleted file mode 100644 index 3f826421..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.ttf b/docs/_build/html/_static/fonts/Lato/lato-italic.ttf deleted file mode 100644 index b4bfc9b2..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff b/docs/_build/html/_static/fonts/Lato/lato-italic.woff deleted file mode 100644 index 76114bc0..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 b/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 deleted file mode 100644 index 3404f37e..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.eot b/docs/_build/html/_static/fonts/Lato/lato-regular.eot deleted file mode 100644 index 11e3f2a5..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.ttf b/docs/_build/html/_static/fonts/Lato/lato-regular.ttf deleted file mode 100644 index 74decd9e..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff b/docs/_build/html/_static/fonts/Lato/lato-regular.woff deleted file mode 100644 index ae1307ff..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 b/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 deleted file mode 100644 index 3bf98433..00000000 Binary files a/docs/_build/html/_static/fonts/Lato/lato-regular.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Bold.woff b/docs/_build/html/_static/fonts/Roboto-Slab-Bold.woff deleted file mode 100644 index 6cb60000..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Bold.woff2 b/docs/_build/html/_static/fonts/Roboto-Slab-Bold.woff2 deleted file mode 100644 index 7059e231..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Light.woff b/docs/_build/html/_static/fonts/Roboto-Slab-Light.woff deleted file mode 100644 index 337d2871..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Light.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Light.woff2 b/docs/_build/html/_static/fonts/Roboto-Slab-Light.woff2 deleted file mode 100644 index 20398aff..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Light.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Regular.woff b/docs/_build/html/_static/fonts/Roboto-Slab-Regular.woff deleted file mode 100644 index f815f63f..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Regular.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Regular.woff2 b/docs/_build/html/_static/fonts/Roboto-Slab-Regular.woff2 deleted file mode 100644 index f2c76e5b..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Regular.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Thin.woff b/docs/_build/html/_static/fonts/Roboto-Slab-Thin.woff deleted file mode 100644 index 6b30ea63..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Thin.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/Roboto-Slab-Thin.woff2 b/docs/_build/html/_static/fonts/Roboto-Slab-Thin.woff2 deleted file mode 100644 index 328f5bb0..00000000 Binary files a/docs/_build/html/_static/fonts/Roboto-Slab-Thin.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf deleted file mode 100644 index df5d1df2..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab-Bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf deleted file mode 100644 index eb52a790..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab-Regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot deleted file mode 100644 index 79dc8efe..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf deleted file mode 100644 index df5d1df2..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff deleted file mode 100644 index 6cb60000..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 deleted file mode 100644 index 7059e231..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot deleted file mode 100644 index 2f7ca78a..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf deleted file mode 100644 index eb52a790..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff deleted file mode 100644 index f815f63f..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 b/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 deleted file mode 100644 index f2c76e5b..00000000 Binary files a/docs/_build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.eot b/docs/_build/html/_static/fonts/fontawesome-webfont.eot deleted file mode 100644 index e9f60ca9..00000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.eot and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.svg b/docs/_build/html/_static/fonts/fontawesome-webfont.svg deleted file mode 100644 index 855c845e..00000000 --- a/docs/_build/html/_static/fonts/fontawesome-webfont.svg +++ /dev/null @@ -1,2671 +0,0 @@ - - - - -Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 - By ,,, -Copyright Dave Gandy 2016. All rights reserved. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.ttf b/docs/_build/html/_static/fonts/fontawesome-webfont.ttf deleted file mode 100644 index 35acda2f..00000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.ttf and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff b/docs/_build/html/_static/fonts/fontawesome-webfont.woff deleted file mode 100644 index 400014a4..00000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 b/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 deleted file mode 100644 index 4d13fc60..00000000 Binary files a/docs/_build/html/_static/fonts/fontawesome-webfont.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-bold-italic.woff b/docs/_build/html/_static/fonts/lato-bold-italic.woff deleted file mode 100644 index 88ad05b9..00000000 Binary files a/docs/_build/html/_static/fonts/lato-bold-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-bold-italic.woff2 b/docs/_build/html/_static/fonts/lato-bold-italic.woff2 deleted file mode 100644 index c4e3d804..00000000 Binary files a/docs/_build/html/_static/fonts/lato-bold-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-bold.woff b/docs/_build/html/_static/fonts/lato-bold.woff deleted file mode 100644 index c6dff51f..00000000 Binary files a/docs/_build/html/_static/fonts/lato-bold.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-bold.woff2 b/docs/_build/html/_static/fonts/lato-bold.woff2 deleted file mode 100644 index bb195043..00000000 Binary files a/docs/_build/html/_static/fonts/lato-bold.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-normal-italic.woff b/docs/_build/html/_static/fonts/lato-normal-italic.woff deleted file mode 100644 index 76114bc0..00000000 Binary files a/docs/_build/html/_static/fonts/lato-normal-italic.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-normal-italic.woff2 b/docs/_build/html/_static/fonts/lato-normal-italic.woff2 deleted file mode 100644 index 3404f37e..00000000 Binary files a/docs/_build/html/_static/fonts/lato-normal-italic.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-normal.woff b/docs/_build/html/_static/fonts/lato-normal.woff deleted file mode 100644 index ae1307ff..00000000 Binary files a/docs/_build/html/_static/fonts/lato-normal.woff and /dev/null differ diff --git a/docs/_build/html/_static/fonts/lato-normal.woff2 b/docs/_build/html/_static/fonts/lato-normal.woff2 deleted file mode 100644 index 3bf98433..00000000 Binary files a/docs/_build/html/_static/fonts/lato-normal.woff2 and /dev/null differ diff --git a/docs/_build/html/_static/images/logo_binder.svg b/docs/_build/html/_static/images/logo_binder.svg deleted file mode 100644 index 45fecf75..00000000 --- a/docs/_build/html/_static/images/logo_binder.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - -logo - - - - - - - - diff --git a/docs/_build/html/_static/images/logo_colab.png b/docs/_build/html/_static/images/logo_colab.png deleted file mode 100644 index b7560ec2..00000000 Binary files a/docs/_build/html/_static/images/logo_colab.png and /dev/null differ diff --git a/docs/_build/html/_static/images/logo_jupyterhub.svg b/docs/_build/html/_static/images/logo_jupyterhub.svg deleted file mode 100644 index 60cfe9f2..00000000 --- a/docs/_build/html/_static/images/logo_jupyterhub.svg +++ /dev/null @@ -1 +0,0 @@ -logo_jupyterhubHub diff --git a/docs/_build/html/_static/jquery-3.5.1.js b/docs/_build/html/_static/jquery-3.5.1.js deleted file mode 100644 index 50937333..00000000 --- a/docs/_build/html/_static/jquery-3.5.1.js +++ /dev/null @@ -1,10872 +0,0 @@ -/*! - * jQuery JavaScript Library v3.5.1 - * https://jquery.com/ - * - * Includes Sizzle.js - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://jquery.org/license - * - * Date: 2020-05-04T22:49Z - */ -( function( global, factory ) { - - "use strict"; - - if ( typeof module === "object" && typeof module.exports === "object" ) { - - // For CommonJS and CommonJS-like environments where a proper `window` - // is present, execute the factory and get jQuery. - // For environments that do not have a `window` with a `document` - // (such as Node.js), expose a factory as module.exports. - // This accentuates the need for the creation of a real `window`. - // e.g. var jQuery = require("jquery")(window); - // See ticket #14549 for more info. - module.exports = global.document ? - factory( global, true ) : - function( w ) { - if ( !w.document ) { - throw new Error( "jQuery requires a window with a document" ); - } - return factory( w ); - }; - } else { - factory( global ); - } - -// Pass this if window is not defined yet -} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { - -// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 -// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode -// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common -// enough that all such attempts are guarded in a try block. -"use strict"; - -var arr = []; - -var getProto = Object.getPrototypeOf; - -var slice = arr.slice; - -var flat = arr.flat ? function( array ) { - return arr.flat.call( array ); -} : function( array ) { - return arr.concat.apply( [], array ); -}; - - -var push = arr.push; - -var indexOf = arr.indexOf; - -var class2type = {}; - -var toString = class2type.toString; - -var hasOwn = class2type.hasOwnProperty; - -var fnToString = hasOwn.toString; - -var ObjectFunctionString = fnToString.call( Object ); - -var support = {}; - -var isFunction = function isFunction( obj ) { - - // Support: Chrome <=57, Firefox <=52 - // In some browsers, typeof returns "function" for HTML elements - // (i.e., `typeof document.createElement( "object" ) === "function"`). - // We don't want to classify *any* DOM node as a function. - return typeof obj === "function" && typeof obj.nodeType !== "number"; - }; - - -var isWindow = function isWindow( obj ) { - return obj != null && obj === obj.window; - }; - - -var document = window.document; - - - - var preservedScriptAttributes = { - type: true, - src: true, - nonce: true, - noModule: true - }; - - function DOMEval( code, node, doc ) { - doc = doc || document; - - var i, val, - script = doc.createElement( "script" ); - - script.text = code; - if ( node ) { - for ( i in preservedScriptAttributes ) { - - // Support: Firefox 64+, Edge 18+ - // Some browsers don't support the "nonce" property on scripts. - // On the other hand, just using `getAttribute` is not enough as - // the `nonce` attribute is reset to an empty string whenever it - // becomes browsing-context connected. - // See https://github.com/whatwg/html/issues/2369 - // See https://html.spec.whatwg.org/#nonce-attributes - // The `node.getAttribute` check was added for the sake of - // `jQuery.globalEval` so that it can fake a nonce-containing node - // via an object. - val = node[ i ] || node.getAttribute && node.getAttribute( i ); - if ( val ) { - script.setAttribute( i, val ); - } - } - } - doc.head.appendChild( script ).parentNode.removeChild( script ); - } - - -function toType( obj ) { - if ( obj == null ) { - return obj + ""; - } - - // Support: Android <=2.3 only (functionish RegExp) - return typeof obj === "object" || typeof obj === "function" ? - class2type[ toString.call( obj ) ] || "object" : - typeof obj; -} -/* global Symbol */ -// Defining this global in .eslintrc.json would create a danger of using the global -// unguarded in another place, it seems safer to define global only for this module - - - -var - version = "3.5.1", - - // Define a local copy of jQuery - jQuery = function( selector, context ) { - - // The jQuery object is actually just the init constructor 'enhanced' - // Need init if jQuery is called (just allow error to be thrown if not included) - return new jQuery.fn.init( selector, context ); - }; - -jQuery.fn = jQuery.prototype = { - - // The current version of jQuery being used - jquery: version, - - constructor: jQuery, - - // The default length of a jQuery object is 0 - length: 0, - - toArray: function() { - return slice.call( this ); - }, - - // Get the Nth element in the matched element set OR - // Get the whole matched element set as a clean array - get: function( num ) { - - // Return all the elements in a clean array - if ( num == null ) { - return slice.call( this ); - } - - // Return just the one element from the set - return num < 0 ? this[ num + this.length ] : this[ num ]; - }, - - // Take an array of elements and push it onto the stack - // (returning the new matched element set) - pushStack: function( elems ) { - - // Build a new jQuery matched element set - var ret = jQuery.merge( this.constructor(), elems ); - - // Add the old object onto the stack (as a reference) - ret.prevObject = this; - - // Return the newly-formed element set - return ret; - }, - - // Execute a callback for every element in the matched set. - each: function( callback ) { - return jQuery.each( this, callback ); - }, - - map: function( callback ) { - return this.pushStack( jQuery.map( this, function( elem, i ) { - return callback.call( elem, i, elem ); - } ) ); - }, - - slice: function() { - return this.pushStack( slice.apply( this, arguments ) ); - }, - - first: function() { - return this.eq( 0 ); - }, - - last: function() { - return this.eq( -1 ); - }, - - even: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return ( i + 1 ) % 2; - } ) ); - }, - - odd: function() { - return this.pushStack( jQuery.grep( this, function( _elem, i ) { - return i % 2; - } ) ); - }, - - eq: function( i ) { - var len = this.length, - j = +i + ( i < 0 ? len : 0 ); - return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); - }, - - end: function() { - return this.prevObject || this.constructor(); - }, - - // For internal use only. - // Behaves like an Array's method, not like a jQuery method. - push: push, - sort: arr.sort, - splice: arr.splice -}; - -jQuery.extend = jQuery.fn.extend = function() { - var options, name, src, copy, copyIsArray, clone, - target = arguments[ 0 ] || {}, - i = 1, - length = arguments.length, - deep = false; - - // Handle a deep copy situation - if ( typeof target === "boolean" ) { - deep = target; - - // Skip the boolean and the target - target = arguments[ i ] || {}; - i++; - } - - // Handle case when target is a string or something (possible in deep copy) - if ( typeof target !== "object" && !isFunction( target ) ) { - target = {}; - } - - // Extend jQuery itself if only one argument is passed - if ( i === length ) { - target = this; - i--; - } - - for ( ; i < length; i++ ) { - - // Only deal with non-null/undefined values - if ( ( options = arguments[ i ] ) != null ) { - - // Extend the base object - for ( name in options ) { - copy = options[ name ]; - - // Prevent Object.prototype pollution - // Prevent never-ending loop - if ( name === "__proto__" || target === copy ) { - continue; - } - - // Recurse if we're merging plain objects or arrays - if ( deep && copy && ( jQuery.isPlainObject( copy ) || - ( copyIsArray = Array.isArray( copy ) ) ) ) { - src = target[ name ]; - - // Ensure proper type for the source value - if ( copyIsArray && !Array.isArray( src ) ) { - clone = []; - } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { - clone = {}; - } else { - clone = src; - } - copyIsArray = false; - - // Never move original objects, clone them - target[ name ] = jQuery.extend( deep, clone, copy ); - - // Don't bring in undefined values - } else if ( copy !== undefined ) { - target[ name ] = copy; - } - } - } - } - - // Return the modified object - return target; -}; - -jQuery.extend( { - - // Unique for each copy of jQuery on the page - expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), - - // Assume jQuery is ready without the ready module - isReady: true, - - error: function( msg ) { - throw new Error( msg ); - }, - - noop: function() {}, - - isPlainObject: function( obj ) { - var proto, Ctor; - - // Detect obvious negatives - // Use toString instead of jQuery.type to catch host objects - if ( !obj || toString.call( obj ) !== "[object Object]" ) { - return false; - } - - proto = getProto( obj ); - - // Objects with no prototype (e.g., `Object.create( null )`) are plain - if ( !proto ) { - return true; - } - - // Objects with prototype are plain iff they were constructed by a global Object function - Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; - return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; - }, - - isEmptyObject: function( obj ) { - var name; - - for ( name in obj ) { - return false; - } - return true; - }, - - // Evaluates a script in a provided context; falls back to the global one - // if not specified. - globalEval: function( code, options, doc ) { - DOMEval( code, { nonce: options && options.nonce }, doc ); - }, - - each: function( obj, callback ) { - var length, i = 0; - - if ( isArrayLike( obj ) ) { - length = obj.length; - for ( ; i < length; i++ ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } else { - for ( i in obj ) { - if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { - break; - } - } - } - - return obj; - }, - - // results is for internal usage only - makeArray: function( arr, results ) { - var ret = results || []; - - if ( arr != null ) { - if ( isArrayLike( Object( arr ) ) ) { - jQuery.merge( ret, - typeof arr === "string" ? - [ arr ] : arr - ); - } else { - push.call( ret, arr ); - } - } - - return ret; - }, - - inArray: function( elem, arr, i ) { - return arr == null ? -1 : indexOf.call( arr, elem, i ); - }, - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - merge: function( first, second ) { - var len = +second.length, - j = 0, - i = first.length; - - for ( ; j < len; j++ ) { - first[ i++ ] = second[ j ]; - } - - first.length = i; - - return first; - }, - - grep: function( elems, callback, invert ) { - var callbackInverse, - matches = [], - i = 0, - length = elems.length, - callbackExpect = !invert; - - // Go through the array, only saving the items - // that pass the validator function - for ( ; i < length; i++ ) { - callbackInverse = !callback( elems[ i ], i ); - if ( callbackInverse !== callbackExpect ) { - matches.push( elems[ i ] ); - } - } - - return matches; - }, - - // arg is for internal usage only - map: function( elems, callback, arg ) { - var length, value, - i = 0, - ret = []; - - // Go through the array, translating each of the items to their new values - if ( isArrayLike( elems ) ) { - length = elems.length; - for ( ; i < length; i++ ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - - // Go through every key on the object, - } else { - for ( i in elems ) { - value = callback( elems[ i ], i, arg ); - - if ( value != null ) { - ret.push( value ); - } - } - } - - // Flatten any nested arrays - return flat( ret ); - }, - - // A global GUID counter for objects - guid: 1, - - // jQuery.support is not used in Core but other projects attach their - // properties to it so it needs to exist. - support: support -} ); - -if ( typeof Symbol === "function" ) { - jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; -} - -// Populate the class2type map -jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), -function( _i, name ) { - class2type[ "[object " + name + "]" ] = name.toLowerCase(); -} ); - -function isArrayLike( obj ) { - - // Support: real iOS 8.2 only (not reproducible in simulator) - // `in` check used to prevent JIT error (gh-2145) - // hasOwn isn't used here due to false negatives - // regarding Nodelist length in IE - var length = !!obj && "length" in obj && obj.length, - type = toType( obj ); - - if ( isFunction( obj ) || isWindow( obj ) ) { - return false; - } - - return type === "array" || length === 0 || - typeof length === "number" && length > 0 && ( length - 1 ) in obj; -} -var Sizzle = -/*! - * Sizzle CSS Selector Engine v2.3.5 - * https://sizzlejs.com/ - * - * Copyright JS Foundation and other contributors - * Released under the MIT license - * https://js.foundation/ - * - * Date: 2020-03-14 - */ -( function( window ) { -var i, - support, - Expr, - getText, - isXML, - tokenize, - compile, - select, - outermostContext, - sortInput, - hasDuplicate, - - // Local document vars - setDocument, - document, - docElem, - documentIsHTML, - rbuggyQSA, - rbuggyMatches, - matches, - contains, - - // Instance-specific data - expando = "sizzle" + 1 * new Date(), - preferredDoc = window.document, - dirruns = 0, - done = 0, - classCache = createCache(), - tokenCache = createCache(), - compilerCache = createCache(), - nonnativeSelectorCache = createCache(), - sortOrder = function( a, b ) { - if ( a === b ) { - hasDuplicate = true; - } - return 0; - }, - - // Instance methods - hasOwn = ( {} ).hasOwnProperty, - arr = [], - pop = arr.pop, - pushNative = arr.push, - push = arr.push, - slice = arr.slice, - - // Use a stripped-down indexOf as it's faster than native - // https://jsperf.com/thor-indexof-vs-for/5 - indexOf = function( list, elem ) { - var i = 0, - len = list.length; - for ( ; i < len; i++ ) { - if ( list[ i ] === elem ) { - return i; - } - } - return -1; - }, - - booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + - "ismap|loop|multiple|open|readonly|required|scoped", - - // Regular expressions - - // http://www.w3.org/TR/css3-selectors/#whitespace - whitespace = "[\\x20\\t\\r\\n\\f]", - - // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram - identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + - "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", - - // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors - attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + - - // Operator (capture 2) - "*([*^$|!~]?=)" + whitespace + - - // "Attribute values must be CSS identifiers [capture 5] - // or strings [capture 3 or capture 4]" - "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + - whitespace + "*\\]", - - pseudos = ":(" + identifier + ")(?:\\((" + - - // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: - // 1. quoted (capture 3; capture 4 or capture 5) - "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + - - // 2. simple (capture 6) - "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + - - // 3. anything else (capture 2) - ".*" + - ")\\)|)", - - // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter - rwhitespace = new RegExp( whitespace + "+", "g" ), - rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + - whitespace + "+$", "g" ), - - rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), - rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + - "*" ), - rdescend = new RegExp( whitespace + "|>" ), - - rpseudo = new RegExp( pseudos ), - ridentifier = new RegExp( "^" + identifier + "$" ), - - matchExpr = { - "ID": new RegExp( "^#(" + identifier + ")" ), - "CLASS": new RegExp( "^\\.(" + identifier + ")" ), - "TAG": new RegExp( "^(" + identifier + "|[*])" ), - "ATTR": new RegExp( "^" + attributes ), - "PSEUDO": new RegExp( "^" + pseudos ), - "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + - whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + - whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), - "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), - - // For use in libraries implementing .is() - // We use this for POS matching in `select` - "needsContext": new RegExp( "^" + whitespace + - "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + - "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) - }, - - rhtml = /HTML$/i, - rinputs = /^(?:input|select|textarea|button)$/i, - rheader = /^h\d$/i, - - rnative = /^[^{]+\{\s*\[native \w/, - - // Easily-parseable/retrievable ID or TAG or CLASS selectors - rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, - - rsibling = /[+~]/, - - // CSS escapes - // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters - runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), - funescape = function( escape, nonHex ) { - var high = "0x" + escape.slice( 1 ) - 0x10000; - - return nonHex ? - - // Strip the backslash prefix from a non-hex escape sequence - nonHex : - - // Replace a hexadecimal escape sequence with the encoded Unicode code point - // Support: IE <=11+ - // For values outside the Basic Multilingual Plane (BMP), manually construct a - // surrogate pair - high < 0 ? - String.fromCharCode( high + 0x10000 ) : - String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); - }, - - // CSS string/identifier serialization - // https://drafts.csswg.org/cssom/#common-serializing-idioms - rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, - fcssescape = function( ch, asCodePoint ) { - if ( asCodePoint ) { - - // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER - if ( ch === "\0" ) { - return "\uFFFD"; - } - - // Control characters and (dependent upon position) numbers get escaped as code points - return ch.slice( 0, -1 ) + "\\" + - ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; - } - - // Other potentially-special ASCII characters get backslash-escaped - return "\\" + ch; - }, - - // Used for iframes - // See setDocument() - // Removing the function wrapper causes a "Permission Denied" - // error in IE - unloadHandler = function() { - setDocument(); - }, - - inDisabledFieldset = addCombinator( - function( elem ) { - return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; - }, - { dir: "parentNode", next: "legend" } - ); - -// Optimize for push.apply( _, NodeList ) -try { - push.apply( - ( arr = slice.call( preferredDoc.childNodes ) ), - preferredDoc.childNodes - ); - - // Support: Android<4.0 - // Detect silently failing push.apply - // eslint-disable-next-line no-unused-expressions - arr[ preferredDoc.childNodes.length ].nodeType; -} catch ( e ) { - push = { apply: arr.length ? - - // Leverage slice if possible - function( target, els ) { - pushNative.apply( target, slice.call( els ) ); - } : - - // Support: IE<9 - // Otherwise append directly - function( target, els ) { - var j = target.length, - i = 0; - - // Can't trust NodeList.length - while ( ( target[ j++ ] = els[ i++ ] ) ) {} - target.length = j - 1; - } - }; -} - -function Sizzle( selector, context, results, seed ) { - var m, i, elem, nid, match, groups, newSelector, - newContext = context && context.ownerDocument, - - // nodeType defaults to 9, since context defaults to document - nodeType = context ? context.nodeType : 9; - - results = results || []; - - // Return early from calls with invalid selector or context - if ( typeof selector !== "string" || !selector || - nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { - - return results; - } - - // Try to shortcut find operations (as opposed to filters) in HTML documents - if ( !seed ) { - setDocument( context ); - context = context || document; - - if ( documentIsHTML ) { - - // If the selector is sufficiently simple, try using a "get*By*" DOM method - // (excepting DocumentFragment context, where the methods don't exist) - if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { - - // ID selector - if ( ( m = match[ 1 ] ) ) { - - // Document context - if ( nodeType === 9 ) { - if ( ( elem = context.getElementById( m ) ) ) { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( elem.id === m ) { - results.push( elem ); - return results; - } - } else { - return results; - } - - // Element context - } else { - - // Support: IE, Opera, Webkit - // TODO: identify versions - // getElementById can match elements by name instead of ID - if ( newContext && ( elem = newContext.getElementById( m ) ) && - contains( context, elem ) && - elem.id === m ) { - - results.push( elem ); - return results; - } - } - - // Type selector - } else if ( match[ 2 ] ) { - push.apply( results, context.getElementsByTagName( selector ) ); - return results; - - // Class selector - } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && - context.getElementsByClassName ) { - - push.apply( results, context.getElementsByClassName( m ) ); - return results; - } - } - - // Take advantage of querySelectorAll - if ( support.qsa && - !nonnativeSelectorCache[ selector + " " ] && - ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && - - // Support: IE 8 only - // Exclude object elements - ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { - - newSelector = selector; - newContext = context; - - // qSA considers elements outside a scoping root when evaluating child or - // descendant combinators, which is not what we want. - // In such cases, we work around the behavior by prefixing every selector in the - // list with an ID selector referencing the scope context. - // The technique has to be used as well when a leading combinator is used - // as such selectors are not recognized by querySelectorAll. - // Thanks to Andrew Dupont for this technique. - if ( nodeType === 1 && - ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { - - // Expand context for sibling selectors - newContext = rsibling.test( selector ) && testContext( context.parentNode ) || - context; - - // We can use :scope instead of the ID hack if the browser - // supports it & if we're not changing the context. - if ( newContext !== context || !support.scope ) { - - // Capture the context ID, setting it first if necessary - if ( ( nid = context.getAttribute( "id" ) ) ) { - nid = nid.replace( rcssescape, fcssescape ); - } else { - context.setAttribute( "id", ( nid = expando ) ); - } - } - - // Prefix every selector in the list - groups = tokenize( selector ); - i = groups.length; - while ( i-- ) { - groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + - toSelector( groups[ i ] ); - } - newSelector = groups.join( "," ); - } - - try { - push.apply( results, - newContext.querySelectorAll( newSelector ) - ); - return results; - } catch ( qsaError ) { - nonnativeSelectorCache( selector, true ); - } finally { - if ( nid === expando ) { - context.removeAttribute( "id" ); - } - } - } - } - } - - // All others - return select( selector.replace( rtrim, "$1" ), context, results, seed ); -} - -/** - * Create key-value caches of limited size - * @returns {function(string, object)} Returns the Object data after storing it on itself with - * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) - * deleting the oldest entry - */ -function createCache() { - var keys = []; - - function cache( key, value ) { - - // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) - if ( keys.push( key + " " ) > Expr.cacheLength ) { - - // Only keep the most recent entries - delete cache[ keys.shift() ]; - } - return ( cache[ key + " " ] = value ); - } - return cache; -} - -/** - * Mark a function for special use by Sizzle - * @param {Function} fn The function to mark - */ -function markFunction( fn ) { - fn[ expando ] = true; - return fn; -} - -/** - * Support testing using an element - * @param {Function} fn Passed the created element and returns a boolean result - */ -function assert( fn ) { - var el = document.createElement( "fieldset" ); - - try { - return !!fn( el ); - } catch ( e ) { - return false; - } finally { - - // Remove from its parent by default - if ( el.parentNode ) { - el.parentNode.removeChild( el ); - } - - // release memory in IE - el = null; - } -} - -/** - * Adds the same handler for all of the specified attrs - * @param {String} attrs Pipe-separated list of attributes - * @param {Function} handler The method that will be applied - */ -function addHandle( attrs, handler ) { - var arr = attrs.split( "|" ), - i = arr.length; - - while ( i-- ) { - Expr.attrHandle[ arr[ i ] ] = handler; - } -} - -/** - * Checks document order of two siblings - * @param {Element} a - * @param {Element} b - * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b - */ -function siblingCheck( a, b ) { - var cur = b && a, - diff = cur && a.nodeType === 1 && b.nodeType === 1 && - a.sourceIndex - b.sourceIndex; - - // Use IE sourceIndex if available on both nodes - if ( diff ) { - return diff; - } - - // Check if b follows a - if ( cur ) { - while ( ( cur = cur.nextSibling ) ) { - if ( cur === b ) { - return -1; - } - } - } - - return a ? 1 : -1; -} - -/** - * Returns a function to use in pseudos for input types - * @param {String} type - */ -function createInputPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for buttons - * @param {String} type - */ -function createButtonPseudo( type ) { - return function( elem ) { - var name = elem.nodeName.toLowerCase(); - return ( name === "input" || name === "button" ) && elem.type === type; - }; -} - -/** - * Returns a function to use in pseudos for :enabled/:disabled - * @param {Boolean} disabled true for :disabled; false for :enabled - */ -function createDisabledPseudo( disabled ) { - - // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable - return function( elem ) { - - // Only certain elements can match :enabled or :disabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled - // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled - if ( "form" in elem ) { - - // Check for inherited disabledness on relevant non-disabled elements: - // * listed form-associated elements in a disabled fieldset - // https://html.spec.whatwg.org/multipage/forms.html#category-listed - // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled - // * option elements in a disabled optgroup - // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled - // All such elements have a "form" property. - if ( elem.parentNode && elem.disabled === false ) { - - // Option elements defer to a parent optgroup if present - if ( "label" in elem ) { - if ( "label" in elem.parentNode ) { - return elem.parentNode.disabled === disabled; - } else { - return elem.disabled === disabled; - } - } - - // Support: IE 6 - 11 - // Use the isDisabled shortcut property to check for disabled fieldset ancestors - return elem.isDisabled === disabled || - - // Where there is no isDisabled, check manually - /* jshint -W018 */ - elem.isDisabled !== !disabled && - inDisabledFieldset( elem ) === disabled; - } - - return elem.disabled === disabled; - - // Try to winnow out elements that can't be disabled before trusting the disabled property. - // Some victims get caught in our net (label, legend, menu, track), but it shouldn't - // even exist on them, let alone have a boolean value. - } else if ( "label" in elem ) { - return elem.disabled === disabled; - } - - // Remaining elements are neither :enabled nor :disabled - return false; - }; -} - -/** - * Returns a function to use in pseudos for positionals - * @param {Function} fn - */ -function createPositionalPseudo( fn ) { - return markFunction( function( argument ) { - argument = +argument; - return markFunction( function( seed, matches ) { - var j, - matchIndexes = fn( [], seed.length, argument ), - i = matchIndexes.length; - - // Match elements found at the specified indexes - while ( i-- ) { - if ( seed[ ( j = matchIndexes[ i ] ) ] ) { - seed[ j ] = !( matches[ j ] = seed[ j ] ); - } - } - } ); - } ); -} - -/** - * Checks a node for validity as a Sizzle context - * @param {Element|Object=} context - * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value - */ -function testContext( context ) { - return context && typeof context.getElementsByTagName !== "undefined" && context; -} - -// Expose support vars for convenience -support = Sizzle.support = {}; - -/** - * Detects XML nodes - * @param {Element|Object} elem An element or a document - * @returns {Boolean} True iff elem is a non-HTML XML node - */ -isXML = Sizzle.isXML = function( elem ) { - var namespace = elem.namespaceURI, - docElem = ( elem.ownerDocument || elem ).documentElement; - - // Support: IE <=8 - // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes - // https://bugs.jquery.com/ticket/4833 - return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); -}; - -/** - * Sets document-related variables once based on the current document - * @param {Element|Object} [doc] An element or document object to use to set the document - * @returns {Object} Returns the current document - */ -setDocument = Sizzle.setDocument = function( node ) { - var hasCompare, subWindow, - doc = node ? node.ownerDocument || node : preferredDoc; - - // Return early if doc is invalid or already selected - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { - return document; - } - - // Update global variables - document = doc; - docElem = document.documentElement; - documentIsHTML = !isXML( document ); - - // Support: IE 9 - 11+, Edge 12 - 18+ - // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( preferredDoc != document && - ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { - - // Support: IE 11, Edge - if ( subWindow.addEventListener ) { - subWindow.addEventListener( "unload", unloadHandler, false ); - - // Support: IE 9 - 10 only - } else if ( subWindow.attachEvent ) { - subWindow.attachEvent( "onunload", unloadHandler ); - } - } - - // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, - // Safari 4 - 5 only, Opera <=11.6 - 12.x only - // IE/Edge & older browsers don't support the :scope pseudo-class. - // Support: Safari 6.0 only - // Safari 6.0 supports :scope but it's an alias of :root there. - support.scope = assert( function( el ) { - docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); - return typeof el.querySelectorAll !== "undefined" && - !el.querySelectorAll( ":scope fieldset div" ).length; - } ); - - /* Attributes - ---------------------------------------------------------------------- */ - - // Support: IE<8 - // Verify that getAttribute really returns attributes and not properties - // (excepting IE8 booleans) - support.attributes = assert( function( el ) { - el.className = "i"; - return !el.getAttribute( "className" ); - } ); - - /* getElement(s)By* - ---------------------------------------------------------------------- */ - - // Check if getElementsByTagName("*") returns only elements - support.getElementsByTagName = assert( function( el ) { - el.appendChild( document.createComment( "" ) ); - return !el.getElementsByTagName( "*" ).length; - } ); - - // Support: IE<9 - support.getElementsByClassName = rnative.test( document.getElementsByClassName ); - - // Support: IE<10 - // Check if getElementById returns elements by name - // The broken getElementById methods don't pick up programmatically-set names, - // so use a roundabout getElementsByName test - support.getById = assert( function( el ) { - docElem.appendChild( el ).id = expando; - return !document.getElementsByName || !document.getElementsByName( expando ).length; - } ); - - // ID filter and find - if ( support.getById ) { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - return elem.getAttribute( "id" ) === attrId; - }; - }; - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var elem = context.getElementById( id ); - return elem ? [ elem ] : []; - } - }; - } else { - Expr.filter[ "ID" ] = function( id ) { - var attrId = id.replace( runescape, funescape ); - return function( elem ) { - var node = typeof elem.getAttributeNode !== "undefined" && - elem.getAttributeNode( "id" ); - return node && node.value === attrId; - }; - }; - - // Support: IE 6 - 7 only - // getElementById is not reliable as a find shortcut - Expr.find[ "ID" ] = function( id, context ) { - if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { - var node, i, elems, - elem = context.getElementById( id ); - - if ( elem ) { - - // Verify the id attribute - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - - // Fall back on getElementsByName - elems = context.getElementsByName( id ); - i = 0; - while ( ( elem = elems[ i++ ] ) ) { - node = elem.getAttributeNode( "id" ); - if ( node && node.value === id ) { - return [ elem ]; - } - } - } - - return []; - } - }; - } - - // Tag - Expr.find[ "TAG" ] = support.getElementsByTagName ? - function( tag, context ) { - if ( typeof context.getElementsByTagName !== "undefined" ) { - return context.getElementsByTagName( tag ); - - // DocumentFragment nodes don't have gEBTN - } else if ( support.qsa ) { - return context.querySelectorAll( tag ); - } - } : - - function( tag, context ) { - var elem, - tmp = [], - i = 0, - - // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too - results = context.getElementsByTagName( tag ); - - // Filter out possible comments - if ( tag === "*" ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem.nodeType === 1 ) { - tmp.push( elem ); - } - } - - return tmp; - } - return results; - }; - - // Class - Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { - if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { - return context.getElementsByClassName( className ); - } - }; - - /* QSA/matchesSelector - ---------------------------------------------------------------------- */ - - // QSA and matchesSelector support - - // matchesSelector(:active) reports false when true (IE9/Opera 11.5) - rbuggyMatches = []; - - // qSa(:focus) reports false when true (Chrome 21) - // We allow this because of a bug in IE8/9 that throws an error - // whenever `document.activeElement` is accessed on an iframe - // So, we allow :focus to pass through QSA all the time to avoid the IE error - // See https://bugs.jquery.com/ticket/13378 - rbuggyQSA = []; - - if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { - - // Build QSA regex - // Regex strategy adopted from Diego Perini - assert( function( el ) { - - var input; - - // Select is set to empty string on purpose - // This is to test IE's treatment of not explicitly - // setting a boolean content attribute, - // since its presence should be enough - // https://bugs.jquery.com/ticket/12359 - docElem.appendChild( el ).innerHTML = "" + - ""; - - // Support: IE8, Opera 11-12.16 - // Nothing should be selected when empty strings follow ^= or $= or *= - // The test attribute must be unknown in Opera but "safe" for WinRT - // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section - if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { - rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); - } - - // Support: IE8 - // Boolean attributes and "value" are not treated correctly - if ( !el.querySelectorAll( "[selected]" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); - } - - // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ - if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { - rbuggyQSA.push( "~=" ); - } - - // Support: IE 11+, Edge 15 - 18+ - // IE 11/Edge don't find elements on a `[name='']` query in some cases. - // Adding a temporary attribute to the document before the selection works - // around the issue. - // Interestingly, IE 10 & older don't seem to have the issue. - input = document.createElement( "input" ); - input.setAttribute( "name", "" ); - el.appendChild( input ); - if ( !el.querySelectorAll( "[name='']" ).length ) { - rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + - whitespace + "*(?:''|\"\")" ); - } - - // Webkit/Opera - :checked should return selected option elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests - if ( !el.querySelectorAll( ":checked" ).length ) { - rbuggyQSA.push( ":checked" ); - } - - // Support: Safari 8+, iOS 8+ - // https://bugs.webkit.org/show_bug.cgi?id=136851 - // In-page `selector#id sibling-combinator selector` fails - if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { - rbuggyQSA.push( ".#.+[+~]" ); - } - - // Support: Firefox <=3.6 - 5 only - // Old Firefox doesn't throw on a badly-escaped identifier. - el.querySelectorAll( "\\\f" ); - rbuggyQSA.push( "[\\r\\n\\f]" ); - } ); - - assert( function( el ) { - el.innerHTML = "" + - ""; - - // Support: Windows 8 Native Apps - // The type and name attributes are restricted during .innerHTML assignment - var input = document.createElement( "input" ); - input.setAttribute( "type", "hidden" ); - el.appendChild( input ).setAttribute( "name", "D" ); - - // Support: IE8 - // Enforce case-sensitivity of name attribute - if ( el.querySelectorAll( "[name=d]" ).length ) { - rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); - } - - // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests - if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: IE9-11+ - // IE's :disabled selector does not pick up the children of disabled fieldsets - docElem.appendChild( el ).disabled = true; - if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { - rbuggyQSA.push( ":enabled", ":disabled" ); - } - - // Support: Opera 10 - 11 only - // Opera 10-11 does not throw on post-comma invalid pseudos - el.querySelectorAll( "*,:x" ); - rbuggyQSA.push( ",.*:" ); - } ); - } - - if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || - docElem.webkitMatchesSelector || - docElem.mozMatchesSelector || - docElem.oMatchesSelector || - docElem.msMatchesSelector ) ) ) ) { - - assert( function( el ) { - - // Check to see if it's possible to do matchesSelector - // on a disconnected node (IE 9) - support.disconnectedMatch = matches.call( el, "*" ); - - // This should fail with an exception - // Gecko does not error, returns false instead - matches.call( el, "[s!='']:x" ); - rbuggyMatches.push( "!=", pseudos ); - } ); - } - - rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); - rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); - - /* Contains - ---------------------------------------------------------------------- */ - hasCompare = rnative.test( docElem.compareDocumentPosition ); - - // Element contains another - // Purposefully self-exclusive - // As in, an element does not contain itself - contains = hasCompare || rnative.test( docElem.contains ) ? - function( a, b ) { - var adown = a.nodeType === 9 ? a.documentElement : a, - bup = b && b.parentNode; - return a === bup || !!( bup && bup.nodeType === 1 && ( - adown.contains ? - adown.contains( bup ) : - a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 - ) ); - } : - function( a, b ) { - if ( b ) { - while ( ( b = b.parentNode ) ) { - if ( b === a ) { - return true; - } - } - } - return false; - }; - - /* Sorting - ---------------------------------------------------------------------- */ - - // Document order sorting - sortOrder = hasCompare ? - function( a, b ) { - - // Flag for duplicate removal - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - // Sort on method existence if only one input has compareDocumentPosition - var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; - if ( compare ) { - return compare; - } - - // Calculate position if both inputs belong to the same document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? - a.compareDocumentPosition( b ) : - - // Otherwise we know they are disconnected - 1; - - // Disconnected nodes - if ( compare & 1 || - ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { - - // Choose the first element that is related to our preferred document - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( a == document || a.ownerDocument == preferredDoc && - contains( preferredDoc, a ) ) { - return -1; - } - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( b == document || b.ownerDocument == preferredDoc && - contains( preferredDoc, b ) ) { - return 1; - } - - // Maintain original order - return sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - } - - return compare & 4 ? -1 : 1; - } : - function( a, b ) { - - // Exit early if the nodes are identical - if ( a === b ) { - hasDuplicate = true; - return 0; - } - - var cur, - i = 0, - aup = a.parentNode, - bup = b.parentNode, - ap = [ a ], - bp = [ b ]; - - // Parentless nodes are either documents or disconnected - if ( !aup || !bup ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - return a == document ? -1 : - b == document ? 1 : - /* eslint-enable eqeqeq */ - aup ? -1 : - bup ? 1 : - sortInput ? - ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : - 0; - - // If the nodes are siblings, we can do a quick check - } else if ( aup === bup ) { - return siblingCheck( a, b ); - } - - // Otherwise we need full lists of their ancestors for comparison - cur = a; - while ( ( cur = cur.parentNode ) ) { - ap.unshift( cur ); - } - cur = b; - while ( ( cur = cur.parentNode ) ) { - bp.unshift( cur ); - } - - // Walk down the tree looking for a discrepancy - while ( ap[ i ] === bp[ i ] ) { - i++; - } - - return i ? - - // Do a sibling check if the nodes have a common ancestor - siblingCheck( ap[ i ], bp[ i ] ) : - - // Otherwise nodes in our document sort first - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - /* eslint-disable eqeqeq */ - ap[ i ] == preferredDoc ? -1 : - bp[ i ] == preferredDoc ? 1 : - /* eslint-enable eqeqeq */ - 0; - }; - - return document; -}; - -Sizzle.matches = function( expr, elements ) { - return Sizzle( expr, null, null, elements ); -}; - -Sizzle.matchesSelector = function( elem, expr ) { - setDocument( elem ); - - if ( support.matchesSelector && documentIsHTML && - !nonnativeSelectorCache[ expr + " " ] && - ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && - ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { - - try { - var ret = matches.call( elem, expr ); - - // IE 9's matchesSelector returns false on disconnected nodes - if ( ret || support.disconnectedMatch || - - // As well, disconnected nodes are said to be in a document - // fragment in IE 9 - elem.document && elem.document.nodeType !== 11 ) { - return ret; - } - } catch ( e ) { - nonnativeSelectorCache( expr, true ); - } - } - - return Sizzle( expr, document, null, [ elem ] ).length > 0; -}; - -Sizzle.contains = function( context, elem ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( context.ownerDocument || context ) != document ) { - setDocument( context ); - } - return contains( context, elem ); -}; - -Sizzle.attr = function( elem, name ) { - - // Set document vars if needed - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( ( elem.ownerDocument || elem ) != document ) { - setDocument( elem ); - } - - var fn = Expr.attrHandle[ name.toLowerCase() ], - - // Don't get fooled by Object.prototype properties (jQuery #13807) - val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? - fn( elem, name, !documentIsHTML ) : - undefined; - - return val !== undefined ? - val : - support.attributes || !documentIsHTML ? - elem.getAttribute( name ) : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; -}; - -Sizzle.escape = function( sel ) { - return ( sel + "" ).replace( rcssescape, fcssescape ); -}; - -Sizzle.error = function( msg ) { - throw new Error( "Syntax error, unrecognized expression: " + msg ); -}; - -/** - * Document sorting and removing duplicates - * @param {ArrayLike} results - */ -Sizzle.uniqueSort = function( results ) { - var elem, - duplicates = [], - j = 0, - i = 0; - - // Unless we *know* we can detect duplicates, assume their presence - hasDuplicate = !support.detectDuplicates; - sortInput = !support.sortStable && results.slice( 0 ); - results.sort( sortOrder ); - - if ( hasDuplicate ) { - while ( ( elem = results[ i++ ] ) ) { - if ( elem === results[ i ] ) { - j = duplicates.push( i ); - } - } - while ( j-- ) { - results.splice( duplicates[ j ], 1 ); - } - } - - // Clear input after sorting to release objects - // See https://github.com/jquery/sizzle/pull/225 - sortInput = null; - - return results; -}; - -/** - * Utility function for retrieving the text value of an array of DOM nodes - * @param {Array|Element} elem - */ -getText = Sizzle.getText = function( elem ) { - var node, - ret = "", - i = 0, - nodeType = elem.nodeType; - - if ( !nodeType ) { - - // If no nodeType, this is expected to be an array - while ( ( node = elem[ i++ ] ) ) { - - // Do not traverse comment nodes - ret += getText( node ); - } - } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { - - // Use textContent for elements - // innerText usage removed for consistency of new lines (jQuery #11153) - if ( typeof elem.textContent === "string" ) { - return elem.textContent; - } else { - - // Traverse its children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - ret += getText( elem ); - } - } - } else if ( nodeType === 3 || nodeType === 4 ) { - return elem.nodeValue; - } - - // Do not include comment or processing instruction nodes - - return ret; -}; - -Expr = Sizzle.selectors = { - - // Can be adjusted by the user - cacheLength: 50, - - createPseudo: markFunction, - - match: matchExpr, - - attrHandle: {}, - - find: {}, - - relative: { - ">": { dir: "parentNode", first: true }, - " ": { dir: "parentNode" }, - "+": { dir: "previousSibling", first: true }, - "~": { dir: "previousSibling" } - }, - - preFilter: { - "ATTR": function( match ) { - match[ 1 ] = match[ 1 ].replace( runescape, funescape ); - - // Move the given value to match[3] whether quoted or unquoted - match[ 3 ] = ( match[ 3 ] || match[ 4 ] || - match[ 5 ] || "" ).replace( runescape, funescape ); - - if ( match[ 2 ] === "~=" ) { - match[ 3 ] = " " + match[ 3 ] + " "; - } - - return match.slice( 0, 4 ); - }, - - "CHILD": function( match ) { - - /* matches from matchExpr["CHILD"] - 1 type (only|nth|...) - 2 what (child|of-type) - 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) - 4 xn-component of xn+y argument ([+-]?\d*n|) - 5 sign of xn-component - 6 x of xn-component - 7 sign of y-component - 8 y of y-component - */ - match[ 1 ] = match[ 1 ].toLowerCase(); - - if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { - - // nth-* requires argument - if ( !match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - // numeric x and y parameters for Expr.filter.CHILD - // remember that false/true cast respectively to 0/1 - match[ 4 ] = +( match[ 4 ] ? - match[ 5 ] + ( match[ 6 ] || 1 ) : - 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); - match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); - - // other types prohibit arguments - } else if ( match[ 3 ] ) { - Sizzle.error( match[ 0 ] ); - } - - return match; - }, - - "PSEUDO": function( match ) { - var excess, - unquoted = !match[ 6 ] && match[ 2 ]; - - if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { - return null; - } - - // Accept quoted arguments as-is - if ( match[ 3 ] ) { - match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; - - // Strip excess characters from unquoted arguments - } else if ( unquoted && rpseudo.test( unquoted ) && - - // Get excess from tokenize (recursively) - ( excess = tokenize( unquoted, true ) ) && - - // advance to the next closing parenthesis - ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { - - // excess is a negative index - match[ 0 ] = match[ 0 ].slice( 0, excess ); - match[ 2 ] = unquoted.slice( 0, excess ); - } - - // Return only captures needed by the pseudo filter method (type and argument) - return match.slice( 0, 3 ); - } - }, - - filter: { - - "TAG": function( nodeNameSelector ) { - var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); - return nodeNameSelector === "*" ? - function() { - return true; - } : - function( elem ) { - return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; - }; - }, - - "CLASS": function( className ) { - var pattern = classCache[ className + " " ]; - - return pattern || - ( pattern = new RegExp( "(^|" + whitespace + - ")" + className + "(" + whitespace + "|$)" ) ) && classCache( - className, function( elem ) { - return pattern.test( - typeof elem.className === "string" && elem.className || - typeof elem.getAttribute !== "undefined" && - elem.getAttribute( "class" ) || - "" - ); - } ); - }, - - "ATTR": function( name, operator, check ) { - return function( elem ) { - var result = Sizzle.attr( elem, name ); - - if ( result == null ) { - return operator === "!="; - } - if ( !operator ) { - return true; - } - - result += ""; - - /* eslint-disable max-len */ - - return operator === "=" ? result === check : - operator === "!=" ? result !== check : - operator === "^=" ? check && result.indexOf( check ) === 0 : - operator === "*=" ? check && result.indexOf( check ) > -1 : - operator === "$=" ? check && result.slice( -check.length ) === check : - operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : - operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : - false; - /* eslint-enable max-len */ - - }; - }, - - "CHILD": function( type, what, _argument, first, last ) { - var simple = type.slice( 0, 3 ) !== "nth", - forward = type.slice( -4 ) !== "last", - ofType = what === "of-type"; - - return first === 1 && last === 0 ? - - // Shortcut for :nth-*(n) - function( elem ) { - return !!elem.parentNode; - } : - - function( elem, _context, xml ) { - var cache, uniqueCache, outerCache, node, nodeIndex, start, - dir = simple !== forward ? "nextSibling" : "previousSibling", - parent = elem.parentNode, - name = ofType && elem.nodeName.toLowerCase(), - useCache = !xml && !ofType, - diff = false; - - if ( parent ) { - - // :(first|last|only)-(child|of-type) - if ( simple ) { - while ( dir ) { - node = elem; - while ( ( node = node[ dir ] ) ) { - if ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) { - - return false; - } - } - - // Reverse direction for :only-* (if we haven't yet done so) - start = dir = type === "only" && !start && "nextSibling"; - } - return true; - } - - start = [ forward ? parent.firstChild : parent.lastChild ]; - - // non-xml :nth-child(...) stores cache data on `parent` - if ( forward && useCache ) { - - // Seek `elem` from a previously-cached index - - // ...in a gzip-friendly way - node = parent; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex && cache[ 2 ]; - node = nodeIndex && parent.childNodes[ nodeIndex ]; - - while ( ( node = ++nodeIndex && node && node[ dir ] || - - // Fallback to seeking `elem` from the start - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - // When found, cache indexes on `parent` and break - if ( node.nodeType === 1 && ++diff && node === elem ) { - uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; - break; - } - } - - } else { - - // Use previously-cached element index if available - if ( useCache ) { - - // ...in a gzip-friendly way - node = elem; - outerCache = node[ expando ] || ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - cache = uniqueCache[ type ] || []; - nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; - diff = nodeIndex; - } - - // xml :nth-child(...) - // or :nth-last-child(...) or :nth(-last)?-of-type(...) - if ( diff === false ) { - - // Use the same loop as above to seek `elem` from the start - while ( ( node = ++nodeIndex && node && node[ dir ] || - ( diff = nodeIndex = 0 ) || start.pop() ) ) { - - if ( ( ofType ? - node.nodeName.toLowerCase() === name : - node.nodeType === 1 ) && - ++diff ) { - - // Cache the index of each encountered element - if ( useCache ) { - outerCache = node[ expando ] || - ( node[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ node.uniqueID ] || - ( outerCache[ node.uniqueID ] = {} ); - - uniqueCache[ type ] = [ dirruns, diff ]; - } - - if ( node === elem ) { - break; - } - } - } - } - } - - // Incorporate the offset, then check against cycle size - diff -= last; - return diff === first || ( diff % first === 0 && diff / first >= 0 ); - } - }; - }, - - "PSEUDO": function( pseudo, argument ) { - - // pseudo-class names are case-insensitive - // http://www.w3.org/TR/selectors/#pseudo-classes - // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters - // Remember that setFilters inherits from pseudos - var args, - fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || - Sizzle.error( "unsupported pseudo: " + pseudo ); - - // The user may use createPseudo to indicate that - // arguments are needed to create the filter function - // just as Sizzle does - if ( fn[ expando ] ) { - return fn( argument ); - } - - // But maintain support for old signatures - if ( fn.length > 1 ) { - args = [ pseudo, pseudo, "", argument ]; - return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? - markFunction( function( seed, matches ) { - var idx, - matched = fn( seed, argument ), - i = matched.length; - while ( i-- ) { - idx = indexOf( seed, matched[ i ] ); - seed[ idx ] = !( matches[ idx ] = matched[ i ] ); - } - } ) : - function( elem ) { - return fn( elem, 0, args ); - }; - } - - return fn; - } - }, - - pseudos: { - - // Potentially complex pseudos - "not": markFunction( function( selector ) { - - // Trim the selector passed to compile - // to avoid treating leading and trailing - // spaces as combinators - var input = [], - results = [], - matcher = compile( selector.replace( rtrim, "$1" ) ); - - return matcher[ expando ] ? - markFunction( function( seed, matches, _context, xml ) { - var elem, - unmatched = matcher( seed, null, xml, [] ), - i = seed.length; - - // Match elements unmatched by `matcher` - while ( i-- ) { - if ( ( elem = unmatched[ i ] ) ) { - seed[ i ] = !( matches[ i ] = elem ); - } - } - } ) : - function( elem, _context, xml ) { - input[ 0 ] = elem; - matcher( input, null, xml, results ); - - // Don't keep the element (issue #299) - input[ 0 ] = null; - return !results.pop(); - }; - } ), - - "has": markFunction( function( selector ) { - return function( elem ) { - return Sizzle( selector, elem ).length > 0; - }; - } ), - - "contains": markFunction( function( text ) { - text = text.replace( runescape, funescape ); - return function( elem ) { - return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; - }; - } ), - - // "Whether an element is represented by a :lang() selector - // is based solely on the element's language value - // being equal to the identifier C, - // or beginning with the identifier C immediately followed by "-". - // The matching of C against the element's language value is performed case-insensitively. - // The identifier C does not have to be a valid language name." - // http://www.w3.org/TR/selectors/#lang-pseudo - "lang": markFunction( function( lang ) { - - // lang value must be a valid identifier - if ( !ridentifier.test( lang || "" ) ) { - Sizzle.error( "unsupported lang: " + lang ); - } - lang = lang.replace( runescape, funescape ).toLowerCase(); - return function( elem ) { - var elemLang; - do { - if ( ( elemLang = documentIsHTML ? - elem.lang : - elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { - - elemLang = elemLang.toLowerCase(); - return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; - } - } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); - return false; - }; - } ), - - // Miscellaneous - "target": function( elem ) { - var hash = window.location && window.location.hash; - return hash && hash.slice( 1 ) === elem.id; - }, - - "root": function( elem ) { - return elem === docElem; - }, - - "focus": function( elem ) { - return elem === document.activeElement && - ( !document.hasFocus || document.hasFocus() ) && - !!( elem.type || elem.href || ~elem.tabIndex ); - }, - - // Boolean properties - "enabled": createDisabledPseudo( false ), - "disabled": createDisabledPseudo( true ), - - "checked": function( elem ) { - - // In CSS3, :checked should return both checked and selected elements - // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - var nodeName = elem.nodeName.toLowerCase(); - return ( nodeName === "input" && !!elem.checked ) || - ( nodeName === "option" && !!elem.selected ); - }, - - "selected": function( elem ) { - - // Accessing this property makes selected-by-default - // options in Safari work properly - if ( elem.parentNode ) { - // eslint-disable-next-line no-unused-expressions - elem.parentNode.selectedIndex; - } - - return elem.selected === true; - }, - - // Contents - "empty": function( elem ) { - - // http://www.w3.org/TR/selectors/#empty-pseudo - // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), - // but not by others (comment: 8; processing instruction: 7; etc.) - // nodeType < 6 works because attributes (2) do not appear as children - for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { - if ( elem.nodeType < 6 ) { - return false; - } - } - return true; - }, - - "parent": function( elem ) { - return !Expr.pseudos[ "empty" ]( elem ); - }, - - // Element/input types - "header": function( elem ) { - return rheader.test( elem.nodeName ); - }, - - "input": function( elem ) { - return rinputs.test( elem.nodeName ); - }, - - "button": function( elem ) { - var name = elem.nodeName.toLowerCase(); - return name === "input" && elem.type === "button" || name === "button"; - }, - - "text": function( elem ) { - var attr; - return elem.nodeName.toLowerCase() === "input" && - elem.type === "text" && - - // Support: IE<8 - // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" - ( ( attr = elem.getAttribute( "type" ) ) == null || - attr.toLowerCase() === "text" ); - }, - - // Position-in-collection - "first": createPositionalPseudo( function() { - return [ 0 ]; - } ), - - "last": createPositionalPseudo( function( _matchIndexes, length ) { - return [ length - 1 ]; - } ), - - "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { - return [ argument < 0 ? argument + length : argument ]; - } ), - - "even": createPositionalPseudo( function( matchIndexes, length ) { - var i = 0; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "odd": createPositionalPseudo( function( matchIndexes, length ) { - var i = 1; - for ( ; i < length; i += 2 ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? - argument + length : - argument > length ? - length : - argument; - for ( ; --i >= 0; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ), - - "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { - var i = argument < 0 ? argument + length : argument; - for ( ; ++i < length; ) { - matchIndexes.push( i ); - } - return matchIndexes; - } ) - } -}; - -Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; - -// Add button/input type pseudos -for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { - Expr.pseudos[ i ] = createInputPseudo( i ); -} -for ( i in { submit: true, reset: true } ) { - Expr.pseudos[ i ] = createButtonPseudo( i ); -} - -// Easy API for creating new setFilters -function setFilters() {} -setFilters.prototype = Expr.filters = Expr.pseudos; -Expr.setFilters = new setFilters(); - -tokenize = Sizzle.tokenize = function( selector, parseOnly ) { - var matched, match, tokens, type, - soFar, groups, preFilters, - cached = tokenCache[ selector + " " ]; - - if ( cached ) { - return parseOnly ? 0 : cached.slice( 0 ); - } - - soFar = selector; - groups = []; - preFilters = Expr.preFilter; - - while ( soFar ) { - - // Comma and first run - if ( !matched || ( match = rcomma.exec( soFar ) ) ) { - if ( match ) { - - // Don't consume trailing commas as valid - soFar = soFar.slice( match[ 0 ].length ) || soFar; - } - groups.push( ( tokens = [] ) ); - } - - matched = false; - - // Combinators - if ( ( match = rcombinators.exec( soFar ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - - // Cast descendant combinators to space - type: match[ 0 ].replace( rtrim, " " ) - } ); - soFar = soFar.slice( matched.length ); - } - - // Filters - for ( type in Expr.filter ) { - if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || - ( match = preFilters[ type ]( match ) ) ) ) { - matched = match.shift(); - tokens.push( { - value: matched, - type: type, - matches: match - } ); - soFar = soFar.slice( matched.length ); - } - } - - if ( !matched ) { - break; - } - } - - // Return the length of the invalid excess - // if we're just parsing - // Otherwise, throw an error or return tokens - return parseOnly ? - soFar.length : - soFar ? - Sizzle.error( selector ) : - - // Cache the tokens - tokenCache( selector, groups ).slice( 0 ); -}; - -function toSelector( tokens ) { - var i = 0, - len = tokens.length, - selector = ""; - for ( ; i < len; i++ ) { - selector += tokens[ i ].value; - } - return selector; -} - -function addCombinator( matcher, combinator, base ) { - var dir = combinator.dir, - skip = combinator.next, - key = skip || dir, - checkNonElements = base && key === "parentNode", - doneName = done++; - - return combinator.first ? - - // Check against closest ancestor/preceding element - function( elem, context, xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - return matcher( elem, context, xml ); - } - } - return false; - } : - - // Check against all ancestor/preceding elements - function( elem, context, xml ) { - var oldCache, uniqueCache, outerCache, - newCache = [ dirruns, doneName ]; - - // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching - if ( xml ) { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - if ( matcher( elem, context, xml ) ) { - return true; - } - } - } - } else { - while ( ( elem = elem[ dir ] ) ) { - if ( elem.nodeType === 1 || checkNonElements ) { - outerCache = elem[ expando ] || ( elem[ expando ] = {} ); - - // Support: IE <9 only - // Defend against cloned attroperties (jQuery gh-1709) - uniqueCache = outerCache[ elem.uniqueID ] || - ( outerCache[ elem.uniqueID ] = {} ); - - if ( skip && skip === elem.nodeName.toLowerCase() ) { - elem = elem[ dir ] || elem; - } else if ( ( oldCache = uniqueCache[ key ] ) && - oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { - - // Assign to newCache so results back-propagate to previous elements - return ( newCache[ 2 ] = oldCache[ 2 ] ); - } else { - - // Reuse newcache so results back-propagate to previous elements - uniqueCache[ key ] = newCache; - - // A match means we're done; a fail means we have to keep checking - if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { - return true; - } - } - } - } - } - return false; - }; -} - -function elementMatcher( matchers ) { - return matchers.length > 1 ? - function( elem, context, xml ) { - var i = matchers.length; - while ( i-- ) { - if ( !matchers[ i ]( elem, context, xml ) ) { - return false; - } - } - return true; - } : - matchers[ 0 ]; -} - -function multipleContexts( selector, contexts, results ) { - var i = 0, - len = contexts.length; - for ( ; i < len; i++ ) { - Sizzle( selector, contexts[ i ], results ); - } - return results; -} - -function condense( unmatched, map, filter, context, xml ) { - var elem, - newUnmatched = [], - i = 0, - len = unmatched.length, - mapped = map != null; - - for ( ; i < len; i++ ) { - if ( ( elem = unmatched[ i ] ) ) { - if ( !filter || filter( elem, context, xml ) ) { - newUnmatched.push( elem ); - if ( mapped ) { - map.push( i ); - } - } - } - } - - return newUnmatched; -} - -function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { - if ( postFilter && !postFilter[ expando ] ) { - postFilter = setMatcher( postFilter ); - } - if ( postFinder && !postFinder[ expando ] ) { - postFinder = setMatcher( postFinder, postSelector ); - } - return markFunction( function( seed, results, context, xml ) { - var temp, i, elem, - preMap = [], - postMap = [], - preexisting = results.length, - - // Get initial elements from seed or context - elems = seed || multipleContexts( - selector || "*", - context.nodeType ? [ context ] : context, - [] - ), - - // Prefilter to get matcher input, preserving a map for seed-results synchronization - matcherIn = preFilter && ( seed || !selector ) ? - condense( elems, preMap, preFilter, context, xml ) : - elems, - - matcherOut = matcher ? - - // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, - postFinder || ( seed ? preFilter : preexisting || postFilter ) ? - - // ...intermediate processing is necessary - [] : - - // ...otherwise use results directly - results : - matcherIn; - - // Find primary matches - if ( matcher ) { - matcher( matcherIn, matcherOut, context, xml ); - } - - // Apply postFilter - if ( postFilter ) { - temp = condense( matcherOut, postMap ); - postFilter( temp, [], context, xml ); - - // Un-match failing elements by moving them back to matcherIn - i = temp.length; - while ( i-- ) { - if ( ( elem = temp[ i ] ) ) { - matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); - } - } - } - - if ( seed ) { - if ( postFinder || preFilter ) { - if ( postFinder ) { - - // Get the final matcherOut by condensing this intermediate into postFinder contexts - temp = []; - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) ) { - - // Restore matcherIn since elem is not yet a final match - temp.push( ( matcherIn[ i ] = elem ) ); - } - } - postFinder( null, ( matcherOut = [] ), temp, xml ); - } - - // Move matched elements from seed to results to keep them synchronized - i = matcherOut.length; - while ( i-- ) { - if ( ( elem = matcherOut[ i ] ) && - ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { - - seed[ temp ] = !( results[ temp ] = elem ); - } - } - } - - // Add elements to results, through postFinder if defined - } else { - matcherOut = condense( - matcherOut === results ? - matcherOut.splice( preexisting, matcherOut.length ) : - matcherOut - ); - if ( postFinder ) { - postFinder( null, results, matcherOut, xml ); - } else { - push.apply( results, matcherOut ); - } - } - } ); -} - -function matcherFromTokens( tokens ) { - var checkContext, matcher, j, - len = tokens.length, - leadingRelative = Expr.relative[ tokens[ 0 ].type ], - implicitRelative = leadingRelative || Expr.relative[ " " ], - i = leadingRelative ? 1 : 0, - - // The foundational matcher ensures that elements are reachable from top-level context(s) - matchContext = addCombinator( function( elem ) { - return elem === checkContext; - }, implicitRelative, true ), - matchAnyContext = addCombinator( function( elem ) { - return indexOf( checkContext, elem ) > -1; - }, implicitRelative, true ), - matchers = [ function( elem, context, xml ) { - var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( - ( checkContext = context ).nodeType ? - matchContext( elem, context, xml ) : - matchAnyContext( elem, context, xml ) ); - - // Avoid hanging onto element (issue #299) - checkContext = null; - return ret; - } ]; - - for ( ; i < len; i++ ) { - if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { - matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; - } else { - matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); - - // Return special upon seeing a positional matcher - if ( matcher[ expando ] ) { - - // Find the next relative operator (if any) for proper handling - j = ++i; - for ( ; j < len; j++ ) { - if ( Expr.relative[ tokens[ j ].type ] ) { - break; - } - } - return setMatcher( - i > 1 && elementMatcher( matchers ), - i > 1 && toSelector( - - // If the preceding token was a descendant combinator, insert an implicit any-element `*` - tokens - .slice( 0, i - 1 ) - .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) - ).replace( rtrim, "$1" ), - matcher, - i < j && matcherFromTokens( tokens.slice( i, j ) ), - j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), - j < len && toSelector( tokens ) - ); - } - matchers.push( matcher ); - } - } - - return elementMatcher( matchers ); -} - -function matcherFromGroupMatchers( elementMatchers, setMatchers ) { - var bySet = setMatchers.length > 0, - byElement = elementMatchers.length > 0, - superMatcher = function( seed, context, xml, results, outermost ) { - var elem, j, matcher, - matchedCount = 0, - i = "0", - unmatched = seed && [], - setMatched = [], - contextBackup = outermostContext, - - // We must always have either seed elements or outermost context - elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), - - // Use integer dirruns iff this is the outermost matcher - dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), - len = elems.length; - - if ( outermost ) { - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - outermostContext = context == document || context || outermost; - } - - // Add elements passing elementMatchers directly to results - // Support: IE<9, Safari - // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id - for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { - if ( byElement && elem ) { - j = 0; - - // Support: IE 11+, Edge 17 - 18+ - // IE/Edge sometimes throw a "Permission denied" error when strict-comparing - // two documents; shallow comparisons work. - // eslint-disable-next-line eqeqeq - if ( !context && elem.ownerDocument != document ) { - setDocument( elem ); - xml = !documentIsHTML; - } - while ( ( matcher = elementMatchers[ j++ ] ) ) { - if ( matcher( elem, context || document, xml ) ) { - results.push( elem ); - break; - } - } - if ( outermost ) { - dirruns = dirrunsUnique; - } - } - - // Track unmatched elements for set filters - if ( bySet ) { - - // They will have gone through all possible matchers - if ( ( elem = !matcher && elem ) ) { - matchedCount--; - } - - // Lengthen the array for every element, matched or not - if ( seed ) { - unmatched.push( elem ); - } - } - } - - // `i` is now the count of elements visited above, and adding it to `matchedCount` - // makes the latter nonnegative. - matchedCount += i; - - // Apply set filters to unmatched elements - // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` - // equals `i`), unless we didn't visit _any_ elements in the above loop because we have - // no element matchers and no seed. - // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that - // case, which will result in a "00" `matchedCount` that differs from `i` but is also - // numerically zero. - if ( bySet && i !== matchedCount ) { - j = 0; - while ( ( matcher = setMatchers[ j++ ] ) ) { - matcher( unmatched, setMatched, context, xml ); - } - - if ( seed ) { - - // Reintegrate element matches to eliminate the need for sorting - if ( matchedCount > 0 ) { - while ( i-- ) { - if ( !( unmatched[ i ] || setMatched[ i ] ) ) { - setMatched[ i ] = pop.call( results ); - } - } - } - - // Discard index placeholder values to get only actual matches - setMatched = condense( setMatched ); - } - - // Add matches to results - push.apply( results, setMatched ); - - // Seedless set matches succeeding multiple successful matchers stipulate sorting - if ( outermost && !seed && setMatched.length > 0 && - ( matchedCount + setMatchers.length ) > 1 ) { - - Sizzle.uniqueSort( results ); - } - } - - // Override manipulation of globals by nested matchers - if ( outermost ) { - dirruns = dirrunsUnique; - outermostContext = contextBackup; - } - - return unmatched; - }; - - return bySet ? - markFunction( superMatcher ) : - superMatcher; -} - -compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { - var i, - setMatchers = [], - elementMatchers = [], - cached = compilerCache[ selector + " " ]; - - if ( !cached ) { - - // Generate a function of recursive functions that can be used to check each element - if ( !match ) { - match = tokenize( selector ); - } - i = match.length; - while ( i-- ) { - cached = matcherFromTokens( match[ i ] ); - if ( cached[ expando ] ) { - setMatchers.push( cached ); - } else { - elementMatchers.push( cached ); - } - } - - // Cache the compiled function - cached = compilerCache( - selector, - matcherFromGroupMatchers( elementMatchers, setMatchers ) - ); - - // Save selector and tokenization - cached.selector = selector; - } - return cached; -}; - -/** - * A low-level selection function that works with Sizzle's compiled - * selector functions - * @param {String|Function} selector A selector or a pre-compiled - * selector function built with Sizzle.compile - * @param {Element} context - * @param {Array} [results] - * @param {Array} [seed] A set of elements to match against - */ -select = Sizzle.select = function( selector, context, results, seed ) { - var i, tokens, token, type, find, - compiled = typeof selector === "function" && selector, - match = !seed && tokenize( ( selector = compiled.selector || selector ) ); - - results = results || []; - - // Try to minimize operations if there is only one selector in the list and no seed - // (the latter of which guarantees us context) - if ( match.length === 1 ) { - - // Reduce context if the leading compound selector is an ID - tokens = match[ 0 ] = match[ 0 ].slice( 0 ); - if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && - context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { - - context = ( Expr.find[ "ID" ]( token.matches[ 0 ] - .replace( runescape, funescape ), context ) || [] )[ 0 ]; - if ( !context ) { - return results; - - // Precompiled matchers will still verify ancestry, so step up a level - } else if ( compiled ) { - context = context.parentNode; - } - - selector = selector.slice( tokens.shift().value.length ); - } - - // Fetch a seed set for right-to-left matching - i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; - while ( i-- ) { - token = tokens[ i ]; - - // Abort if we hit a combinator - if ( Expr.relative[ ( type = token.type ) ] ) { - break; - } - if ( ( find = Expr.find[ type ] ) ) { - - // Search, expanding context for leading sibling combinators - if ( ( seed = find( - token.matches[ 0 ].replace( runescape, funescape ), - rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || - context - ) ) ) { - - // If seed is empty or no tokens remain, we can return early - tokens.splice( i, 1 ); - selector = seed.length && toSelector( tokens ); - if ( !selector ) { - push.apply( results, seed ); - return results; - } - - break; - } - } - } - } - - // Compile and execute a filtering function if one is not provided - // Provide `match` to avoid retokenization if we modified the selector above - ( compiled || compile( selector, match ) )( - seed, - context, - !documentIsHTML, - results, - !context || rsibling.test( selector ) && testContext( context.parentNode ) || context - ); - return results; -}; - -// One-time assignments - -// Sort stability -support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; - -// Support: Chrome 14-35+ -// Always assume duplicates if they aren't passed to the comparison function -support.detectDuplicates = !!hasDuplicate; - -// Initialize against the default document -setDocument(); - -// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) -// Detached nodes confoundingly follow *each other* -support.sortDetached = assert( function( el ) { - - // Should return 1, but returns 4 (following) - return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; -} ); - -// Support: IE<8 -// Prevent attribute/property "interpolation" -// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx -if ( !assert( function( el ) { - el.innerHTML = ""; - return el.firstChild.getAttribute( "href" ) === "#"; -} ) ) { - addHandle( "type|href|height|width", function( elem, name, isXML ) { - if ( !isXML ) { - return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); - } - } ); -} - -// Support: IE<9 -// Use defaultValue in place of getAttribute("value") -if ( !support.attributes || !assert( function( el ) { - el.innerHTML = ""; - el.firstChild.setAttribute( "value", "" ); - return el.firstChild.getAttribute( "value" ) === ""; -} ) ) { - addHandle( "value", function( elem, _name, isXML ) { - if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { - return elem.defaultValue; - } - } ); -} - -// Support: IE<9 -// Use getAttributeNode to fetch booleans when getAttribute lies -if ( !assert( function( el ) { - return el.getAttribute( "disabled" ) == null; -} ) ) { - addHandle( booleans, function( elem, name, isXML ) { - var val; - if ( !isXML ) { - return elem[ name ] === true ? name.toLowerCase() : - ( val = elem.getAttributeNode( name ) ) && val.specified ? - val.value : - null; - } - } ); -} - -return Sizzle; - -} )( window ); - - - -jQuery.find = Sizzle; -jQuery.expr = Sizzle.selectors; - -// Deprecated -jQuery.expr[ ":" ] = jQuery.expr.pseudos; -jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; -jQuery.text = Sizzle.getText; -jQuery.isXMLDoc = Sizzle.isXML; -jQuery.contains = Sizzle.contains; -jQuery.escapeSelector = Sizzle.escape; - - - - -var dir = function( elem, dir, until ) { - var matched = [], - truncate = until !== undefined; - - while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { - if ( elem.nodeType === 1 ) { - if ( truncate && jQuery( elem ).is( until ) ) { - break; - } - matched.push( elem ); - } - } - return matched; -}; - - -var siblings = function( n, elem ) { - var matched = []; - - for ( ; n; n = n.nextSibling ) { - if ( n.nodeType === 1 && n !== elem ) { - matched.push( n ); - } - } - - return matched; -}; - - -var rneedsContext = jQuery.expr.match.needsContext; - - - -function nodeName( elem, name ) { - - return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); - -}; -var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); - - - -// Implement the identical functionality for filter and not -function winnow( elements, qualifier, not ) { - if ( isFunction( qualifier ) ) { - return jQuery.grep( elements, function( elem, i ) { - return !!qualifier.call( elem, i, elem ) !== not; - } ); - } - - // Single element - if ( qualifier.nodeType ) { - return jQuery.grep( elements, function( elem ) { - return ( elem === qualifier ) !== not; - } ); - } - - // Arraylike of elements (jQuery, arguments, Array) - if ( typeof qualifier !== "string" ) { - return jQuery.grep( elements, function( elem ) { - return ( indexOf.call( qualifier, elem ) > -1 ) !== not; - } ); - } - - // Filtered directly for both simple and complex selectors - return jQuery.filter( qualifier, elements, not ); -} - -jQuery.filter = function( expr, elems, not ) { - var elem = elems[ 0 ]; - - if ( not ) { - expr = ":not(" + expr + ")"; - } - - if ( elems.length === 1 && elem.nodeType === 1 ) { - return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; - } - - return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { - return elem.nodeType === 1; - } ) ); -}; - -jQuery.fn.extend( { - find: function( selector ) { - var i, ret, - len = this.length, - self = this; - - if ( typeof selector !== "string" ) { - return this.pushStack( jQuery( selector ).filter( function() { - for ( i = 0; i < len; i++ ) { - if ( jQuery.contains( self[ i ], this ) ) { - return true; - } - } - } ) ); - } - - ret = this.pushStack( [] ); - - for ( i = 0; i < len; i++ ) { - jQuery.find( selector, self[ i ], ret ); - } - - return len > 1 ? jQuery.uniqueSort( ret ) : ret; - }, - filter: function( selector ) { - return this.pushStack( winnow( this, selector || [], false ) ); - }, - not: function( selector ) { - return this.pushStack( winnow( this, selector || [], true ) ); - }, - is: function( selector ) { - return !!winnow( - this, - - // If this is a positional/relative selector, check membership in the returned set - // so $("p:first").is("p:last") won't return true for a doc with two "p". - typeof selector === "string" && rneedsContext.test( selector ) ? - jQuery( selector ) : - selector || [], - false - ).length; - } -} ); - - -// Initialize a jQuery object - - -// A central reference to the root jQuery(document) -var rootjQuery, - - // A simple way to check for HTML strings - // Prioritize #id over to avoid XSS via location.hash (#9521) - // Strict HTML recognition (#11290: must start with <) - // Shortcut simple #id case for speed - rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, - - init = jQuery.fn.init = function( selector, context, root ) { - var match, elem; - - // HANDLE: $(""), $(null), $(undefined), $(false) - if ( !selector ) { - return this; - } - - // Method init() accepts an alternate rootjQuery - // so migrate can support jQuery.sub (gh-2101) - root = root || rootjQuery; - - // Handle HTML strings - if ( typeof selector === "string" ) { - if ( selector[ 0 ] === "<" && - selector[ selector.length - 1 ] === ">" && - selector.length >= 3 ) { - - // Assume that strings that start and end with <> are HTML and skip the regex check - match = [ null, selector, null ]; - - } else { - match = rquickExpr.exec( selector ); - } - - // Match html or make sure no context is specified for #id - if ( match && ( match[ 1 ] || !context ) ) { - - // HANDLE: $(html) -> $(array) - if ( match[ 1 ] ) { - context = context instanceof jQuery ? context[ 0 ] : context; - - // Option to run scripts is true for back-compat - // Intentionally let the error be thrown if parseHTML is not present - jQuery.merge( this, jQuery.parseHTML( - match[ 1 ], - context && context.nodeType ? context.ownerDocument || context : document, - true - ) ); - - // HANDLE: $(html, props) - if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { - for ( match in context ) { - - // Properties of context are called as methods if possible - if ( isFunction( this[ match ] ) ) { - this[ match ]( context[ match ] ); - - // ...and otherwise set as attributes - } else { - this.attr( match, context[ match ] ); - } - } - } - - return this; - - // HANDLE: $(#id) - } else { - elem = document.getElementById( match[ 2 ] ); - - if ( elem ) { - - // Inject the element directly into the jQuery object - this[ 0 ] = elem; - this.length = 1; - } - return this; - } - - // HANDLE: $(expr, $(...)) - } else if ( !context || context.jquery ) { - return ( context || root ).find( selector ); - - // HANDLE: $(expr, context) - // (which is just equivalent to: $(context).find(expr) - } else { - return this.constructor( context ).find( selector ); - } - - // HANDLE: $(DOMElement) - } else if ( selector.nodeType ) { - this[ 0 ] = selector; - this.length = 1; - return this; - - // HANDLE: $(function) - // Shortcut for document ready - } else if ( isFunction( selector ) ) { - return root.ready !== undefined ? - root.ready( selector ) : - - // Execute immediately if ready is not present - selector( jQuery ); - } - - return jQuery.makeArray( selector, this ); - }; - -// Give the init function the jQuery prototype for later instantiation -init.prototype = jQuery.fn; - -// Initialize central reference -rootjQuery = jQuery( document ); - - -var rparentsprev = /^(?:parents|prev(?:Until|All))/, - - // Methods guaranteed to produce a unique set when starting from a unique set - guaranteedUnique = { - children: true, - contents: true, - next: true, - prev: true - }; - -jQuery.fn.extend( { - has: function( target ) { - var targets = jQuery( target, this ), - l = targets.length; - - return this.filter( function() { - var i = 0; - for ( ; i < l; i++ ) { - if ( jQuery.contains( this, targets[ i ] ) ) { - return true; - } - } - } ); - }, - - closest: function( selectors, context ) { - var cur, - i = 0, - l = this.length, - matched = [], - targets = typeof selectors !== "string" && jQuery( selectors ); - - // Positional selectors never match, since there's no _selection_ context - if ( !rneedsContext.test( selectors ) ) { - for ( ; i < l; i++ ) { - for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { - - // Always skip document fragments - if ( cur.nodeType < 11 && ( targets ? - targets.index( cur ) > -1 : - - // Don't pass non-elements to Sizzle - cur.nodeType === 1 && - jQuery.find.matchesSelector( cur, selectors ) ) ) { - - matched.push( cur ); - break; - } - } - } - } - - return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); - }, - - // Determine the position of an element within the set - index: function( elem ) { - - // No argument, return index in parent - if ( !elem ) { - return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; - } - - // Index in selector - if ( typeof elem === "string" ) { - return indexOf.call( jQuery( elem ), this[ 0 ] ); - } - - // Locate the position of the desired element - return indexOf.call( this, - - // If it receives a jQuery object, the first element is used - elem.jquery ? elem[ 0 ] : elem - ); - }, - - add: function( selector, context ) { - return this.pushStack( - jQuery.uniqueSort( - jQuery.merge( this.get(), jQuery( selector, context ) ) - ) - ); - }, - - addBack: function( selector ) { - return this.add( selector == null ? - this.prevObject : this.prevObject.filter( selector ) - ); - } -} ); - -function sibling( cur, dir ) { - while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} - return cur; -} - -jQuery.each( { - parent: function( elem ) { - var parent = elem.parentNode; - return parent && parent.nodeType !== 11 ? parent : null; - }, - parents: function( elem ) { - return dir( elem, "parentNode" ); - }, - parentsUntil: function( elem, _i, until ) { - return dir( elem, "parentNode", until ); - }, - next: function( elem ) { - return sibling( elem, "nextSibling" ); - }, - prev: function( elem ) { - return sibling( elem, "previousSibling" ); - }, - nextAll: function( elem ) { - return dir( elem, "nextSibling" ); - }, - prevAll: function( elem ) { - return dir( elem, "previousSibling" ); - }, - nextUntil: function( elem, _i, until ) { - return dir( elem, "nextSibling", until ); - }, - prevUntil: function( elem, _i, until ) { - return dir( elem, "previousSibling", until ); - }, - siblings: function( elem ) { - return siblings( ( elem.parentNode || {} ).firstChild, elem ); - }, - children: function( elem ) { - return siblings( elem.firstChild ); - }, - contents: function( elem ) { - if ( elem.contentDocument != null && - - // Support: IE 11+ - // elements with no `data` attribute has an object - // `contentDocument` with a `null` prototype. - getProto( elem.contentDocument ) ) { - - return elem.contentDocument; - } - - // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only - // Treat the template element as a regular one in browsers that - // don't support it. - if ( nodeName( elem, "template" ) ) { - elem = elem.content || elem; - } - - return jQuery.merge( [], elem.childNodes ); - } -}, function( name, fn ) { - jQuery.fn[ name ] = function( until, selector ) { - var matched = jQuery.map( this, fn, until ); - - if ( name.slice( -5 ) !== "Until" ) { - selector = until; - } - - if ( selector && typeof selector === "string" ) { - matched = jQuery.filter( selector, matched ); - } - - if ( this.length > 1 ) { - - // Remove duplicates - if ( !guaranteedUnique[ name ] ) { - jQuery.uniqueSort( matched ); - } - - // Reverse order for parents* and prev-derivatives - if ( rparentsprev.test( name ) ) { - matched.reverse(); - } - } - - return this.pushStack( matched ); - }; -} ); -var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); - - - -// Convert String-formatted options into Object-formatted ones -function createOptions( options ) { - var object = {}; - jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { - object[ flag ] = true; - } ); - return object; -} - -/* - * Create a callback list using the following parameters: - * - * options: an optional list of space-separated options that will change how - * the callback list behaves or a more traditional option object - * - * By default a callback list will act like an event callback list and can be - * "fired" multiple times. - * - * Possible options: - * - * once: will ensure the callback list can only be fired once (like a Deferred) - * - * memory: will keep track of previous values and will call any callback added - * after the list has been fired right away with the latest "memorized" - * values (like a Deferred) - * - * unique: will ensure a callback can only be added once (no duplicate in the list) - * - * stopOnFalse: interrupt callings when a callback returns false - * - */ -jQuery.Callbacks = function( options ) { - - // Convert options from String-formatted to Object-formatted if needed - // (we check in cache first) - options = typeof options === "string" ? - createOptions( options ) : - jQuery.extend( {}, options ); - - var // Flag to know if list is currently firing - firing, - - // Last fire value for non-forgettable lists - memory, - - // Flag to know if list was already fired - fired, - - // Flag to prevent firing - locked, - - // Actual callback list - list = [], - - // Queue of execution data for repeatable lists - queue = [], - - // Index of currently firing callback (modified by add/remove as needed) - firingIndex = -1, - - // Fire callbacks - fire = function() { - - // Enforce single-firing - locked = locked || options.once; - - // Execute callbacks for all pending executions, - // respecting firingIndex overrides and runtime changes - fired = firing = true; - for ( ; queue.length; firingIndex = -1 ) { - memory = queue.shift(); - while ( ++firingIndex < list.length ) { - - // Run callback and check for early termination - if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && - options.stopOnFalse ) { - - // Jump to end and forget the data so .add doesn't re-fire - firingIndex = list.length; - memory = false; - } - } - } - - // Forget the data if we're done with it - if ( !options.memory ) { - memory = false; - } - - firing = false; - - // Clean up if we're done firing for good - if ( locked ) { - - // Keep an empty list if we have data for future add calls - if ( memory ) { - list = []; - - // Otherwise, this object is spent - } else { - list = ""; - } - } - }, - - // Actual Callbacks object - self = { - - // Add a callback or a collection of callbacks to the list - add: function() { - if ( list ) { - - // If we have memory from a past run, we should fire after adding - if ( memory && !firing ) { - firingIndex = list.length - 1; - queue.push( memory ); - } - - ( function add( args ) { - jQuery.each( args, function( _, arg ) { - if ( isFunction( arg ) ) { - if ( !options.unique || !self.has( arg ) ) { - list.push( arg ); - } - } else if ( arg && arg.length && toType( arg ) !== "string" ) { - - // Inspect recursively - add( arg ); - } - } ); - } )( arguments ); - - if ( memory && !firing ) { - fire(); - } - } - return this; - }, - - // Remove a callback from the list - remove: function() { - jQuery.each( arguments, function( _, arg ) { - var index; - while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { - list.splice( index, 1 ); - - // Handle firing indexes - if ( index <= firingIndex ) { - firingIndex--; - } - } - } ); - return this; - }, - - // Check if a given callback is in the list. - // If no argument is given, return whether or not list has callbacks attached. - has: function( fn ) { - return fn ? - jQuery.inArray( fn, list ) > -1 : - list.length > 0; - }, - - // Remove all callbacks from the list - empty: function() { - if ( list ) { - list = []; - } - return this; - }, - - // Disable .fire and .add - // Abort any current/pending executions - // Clear all callbacks and values - disable: function() { - locked = queue = []; - list = memory = ""; - return this; - }, - disabled: function() { - return !list; - }, - - // Disable .fire - // Also disable .add unless we have memory (since it would have no effect) - // Abort any pending executions - lock: function() { - locked = queue = []; - if ( !memory && !firing ) { - list = memory = ""; - } - return this; - }, - locked: function() { - return !!locked; - }, - - // Call all callbacks with the given context and arguments - fireWith: function( context, args ) { - if ( !locked ) { - args = args || []; - args = [ context, args.slice ? args.slice() : args ]; - queue.push( args ); - if ( !firing ) { - fire(); - } - } - return this; - }, - - // Call all the callbacks with the given arguments - fire: function() { - self.fireWith( this, arguments ); - return this; - }, - - // To know if the callbacks have already been called at least once - fired: function() { - return !!fired; - } - }; - - return self; -}; - - -function Identity( v ) { - return v; -} -function Thrower( ex ) { - throw ex; -} - -function adoptValue( value, resolve, reject, noValue ) { - var method; - - try { - - // Check for promise aspect first to privilege synchronous behavior - if ( value && isFunction( ( method = value.promise ) ) ) { - method.call( value ).done( resolve ).fail( reject ); - - // Other thenables - } else if ( value && isFunction( ( method = value.then ) ) ) { - method.call( value, resolve, reject ); - - // Other non-thenables - } else { - - // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: - // * false: [ value ].slice( 0 ) => resolve( value ) - // * true: [ value ].slice( 1 ) => resolve() - resolve.apply( undefined, [ value ].slice( noValue ) ); - } - - // For Promises/A+, convert exceptions into rejections - // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in - // Deferred#then to conditionally suppress rejection. - } catch ( value ) { - - // Support: Android 4.0 only - // Strict mode functions invoked without .call/.apply get global-object context - reject.apply( undefined, [ value ] ); - } -} - -jQuery.extend( { - - Deferred: function( func ) { - var tuples = [ - - // action, add listener, callbacks, - // ... .then handlers, argument index, [final state] - [ "notify", "progress", jQuery.Callbacks( "memory" ), - jQuery.Callbacks( "memory" ), 2 ], - [ "resolve", "done", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 0, "resolved" ], - [ "reject", "fail", jQuery.Callbacks( "once memory" ), - jQuery.Callbacks( "once memory" ), 1, "rejected" ] - ], - state = "pending", - promise = { - state: function() { - return state; - }, - always: function() { - deferred.done( arguments ).fail( arguments ); - return this; - }, - "catch": function( fn ) { - return promise.then( null, fn ); - }, - - // Keep pipe for back-compat - pipe: function( /* fnDone, fnFail, fnProgress */ ) { - var fns = arguments; - - return jQuery.Deferred( function( newDefer ) { - jQuery.each( tuples, function( _i, tuple ) { - - // Map tuples (progress, done, fail) to arguments (done, fail, progress) - var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; - - // deferred.progress(function() { bind to newDefer or newDefer.notify }) - // deferred.done(function() { bind to newDefer or newDefer.resolve }) - // deferred.fail(function() { bind to newDefer or newDefer.reject }) - deferred[ tuple[ 1 ] ]( function() { - var returned = fn && fn.apply( this, arguments ); - if ( returned && isFunction( returned.promise ) ) { - returned.promise() - .progress( newDefer.notify ) - .done( newDefer.resolve ) - .fail( newDefer.reject ); - } else { - newDefer[ tuple[ 0 ] + "With" ]( - this, - fn ? [ returned ] : arguments - ); - } - } ); - } ); - fns = null; - } ).promise(); - }, - then: function( onFulfilled, onRejected, onProgress ) { - var maxDepth = 0; - function resolve( depth, deferred, handler, special ) { - return function() { - var that = this, - args = arguments, - mightThrow = function() { - var returned, then; - - // Support: Promises/A+ section 2.3.3.3.3 - // https://promisesaplus.com/#point-59 - // Ignore double-resolution attempts - if ( depth < maxDepth ) { - return; - } - - returned = handler.apply( that, args ); - - // Support: Promises/A+ section 2.3.1 - // https://promisesaplus.com/#point-48 - if ( returned === deferred.promise() ) { - throw new TypeError( "Thenable self-resolution" ); - } - - // Support: Promises/A+ sections 2.3.3.1, 3.5 - // https://promisesaplus.com/#point-54 - // https://promisesaplus.com/#point-75 - // Retrieve `then` only once - then = returned && - - // Support: Promises/A+ section 2.3.4 - // https://promisesaplus.com/#point-64 - // Only check objects and functions for thenability - ( typeof returned === "object" || - typeof returned === "function" ) && - returned.then; - - // Handle a returned thenable - if ( isFunction( then ) ) { - - // Special processors (notify) just wait for resolution - if ( special ) { - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ) - ); - - // Normal processors (resolve) also hook into progress - } else { - - // ...and disregard older resolution values - maxDepth++; - - then.call( - returned, - resolve( maxDepth, deferred, Identity, special ), - resolve( maxDepth, deferred, Thrower, special ), - resolve( maxDepth, deferred, Identity, - deferred.notifyWith ) - ); - } - - // Handle all other returned values - } else { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Identity ) { - that = undefined; - args = [ returned ]; - } - - // Process the value(s) - // Default process is resolve - ( special || deferred.resolveWith )( that, args ); - } - }, - - // Only normal processors (resolve) catch and reject exceptions - process = special ? - mightThrow : - function() { - try { - mightThrow(); - } catch ( e ) { - - if ( jQuery.Deferred.exceptionHook ) { - jQuery.Deferred.exceptionHook( e, - process.stackTrace ); - } - - // Support: Promises/A+ section 2.3.3.3.4.1 - // https://promisesaplus.com/#point-61 - // Ignore post-resolution exceptions - if ( depth + 1 >= maxDepth ) { - - // Only substitute handlers pass on context - // and multiple values (non-spec behavior) - if ( handler !== Thrower ) { - that = undefined; - args = [ e ]; - } - - deferred.rejectWith( that, args ); - } - } - }; - - // Support: Promises/A+ section 2.3.3.3.1 - // https://promisesaplus.com/#point-57 - // Re-resolve promises immediately to dodge false rejection from - // subsequent errors - if ( depth ) { - process(); - } else { - - // Call an optional hook to record the stack, in case of exception - // since it's otherwise lost when execution goes async - if ( jQuery.Deferred.getStackHook ) { - process.stackTrace = jQuery.Deferred.getStackHook(); - } - window.setTimeout( process ); - } - }; - } - - return jQuery.Deferred( function( newDefer ) { - - // progress_handlers.add( ... ) - tuples[ 0 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onProgress ) ? - onProgress : - Identity, - newDefer.notifyWith - ) - ); - - // fulfilled_handlers.add( ... ) - tuples[ 1 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onFulfilled ) ? - onFulfilled : - Identity - ) - ); - - // rejected_handlers.add( ... ) - tuples[ 2 ][ 3 ].add( - resolve( - 0, - newDefer, - isFunction( onRejected ) ? - onRejected : - Thrower - ) - ); - } ).promise(); - }, - - // Get a promise for this deferred - // If obj is provided, the promise aspect is added to the object - promise: function( obj ) { - return obj != null ? jQuery.extend( obj, promise ) : promise; - } - }, - deferred = {}; - - // Add list-specific methods - jQuery.each( tuples, function( i, tuple ) { - var list = tuple[ 2 ], - stateString = tuple[ 5 ]; - - // promise.progress = list.add - // promise.done = list.add - // promise.fail = list.add - promise[ tuple[ 1 ] ] = list.add; - - // Handle state - if ( stateString ) { - list.add( - function() { - - // state = "resolved" (i.e., fulfilled) - // state = "rejected" - state = stateString; - }, - - // rejected_callbacks.disable - // fulfilled_callbacks.disable - tuples[ 3 - i ][ 2 ].disable, - - // rejected_handlers.disable - // fulfilled_handlers.disable - tuples[ 3 - i ][ 3 ].disable, - - // progress_callbacks.lock - tuples[ 0 ][ 2 ].lock, - - // progress_handlers.lock - tuples[ 0 ][ 3 ].lock - ); - } - - // progress_handlers.fire - // fulfilled_handlers.fire - // rejected_handlers.fire - list.add( tuple[ 3 ].fire ); - - // deferred.notify = function() { deferred.notifyWith(...) } - // deferred.resolve = function() { deferred.resolveWith(...) } - // deferred.reject = function() { deferred.rejectWith(...) } - deferred[ tuple[ 0 ] ] = function() { - deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); - return this; - }; - - // deferred.notifyWith = list.fireWith - // deferred.resolveWith = list.fireWith - // deferred.rejectWith = list.fireWith - deferred[ tuple[ 0 ] + "With" ] = list.fireWith; - } ); - - // Make the deferred a promise - promise.promise( deferred ); - - // Call given func if any - if ( func ) { - func.call( deferred, deferred ); - } - - // All done! - return deferred; - }, - - // Deferred helper - when: function( singleValue ) { - var - - // count of uncompleted subordinates - remaining = arguments.length, - - // count of unprocessed arguments - i = remaining, - - // subordinate fulfillment data - resolveContexts = Array( i ), - resolveValues = slice.call( arguments ), - - // the master Deferred - master = jQuery.Deferred(), - - // subordinate callback factory - updateFunc = function( i ) { - return function( value ) { - resolveContexts[ i ] = this; - resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; - if ( !( --remaining ) ) { - master.resolveWith( resolveContexts, resolveValues ); - } - }; - }; - - // Single- and empty arguments are adopted like Promise.resolve - if ( remaining <= 1 ) { - adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, - !remaining ); - - // Use .then() to unwrap secondary thenables (cf. gh-3000) - if ( master.state() === "pending" || - isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { - - return master.then(); - } - } - - // Multiple arguments are aggregated like Promise.all array elements - while ( i-- ) { - adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); - } - - return master.promise(); - } -} ); - - -// These usually indicate a programmer mistake during development, -// warn about them ASAP rather than swallowing them by default. -var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; - -jQuery.Deferred.exceptionHook = function( error, stack ) { - - // Support: IE 8 - 9 only - // Console exists when dev tools are open, which can happen at any time - if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { - window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); - } -}; - - - - -jQuery.readyException = function( error ) { - window.setTimeout( function() { - throw error; - } ); -}; - - - - -// The deferred used on DOM ready -var readyList = jQuery.Deferred(); - -jQuery.fn.ready = function( fn ) { - - readyList - .then( fn ) - - // Wrap jQuery.readyException in a function so that the lookup - // happens at the time of error handling instead of callback - // registration. - .catch( function( error ) { - jQuery.readyException( error ); - } ); - - return this; -}; - -jQuery.extend( { - - // Is the DOM ready to be used? Set to true once it occurs. - isReady: false, - - // A counter to track how many items to wait for before - // the ready event fires. See #6781 - readyWait: 1, - - // Handle when the DOM is ready - ready: function( wait ) { - - // Abort if there are pending holds or we're already ready - if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { - return; - } - - // Remember that the DOM is ready - jQuery.isReady = true; - - // If a normal DOM Ready event fired, decrement, and wait if need be - if ( wait !== true && --jQuery.readyWait > 0 ) { - return; - } - - // If there are functions bound, to execute - readyList.resolveWith( document, [ jQuery ] ); - } -} ); - -jQuery.ready.then = readyList.then; - -// The ready event handler and self cleanup method -function completed() { - document.removeEventListener( "DOMContentLoaded", completed ); - window.removeEventListener( "load", completed ); - jQuery.ready(); -} - -// Catch cases where $(document).ready() is called -// after the browser event has already occurred. -// Support: IE <=9 - 10 only -// Older IE sometimes signals "interactive" too soon -if ( document.readyState === "complete" || - ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { - - // Handle it asynchronously to allow scripts the opportunity to delay ready - window.setTimeout( jQuery.ready ); - -} else { - - // Use the handy event callback - document.addEventListener( "DOMContentLoaded", completed ); - - // A fallback to window.onload, that will always work - window.addEventListener( "load", completed ); -} - - - - -// Multifunctional method to get and set values of a collection -// The value/s can optionally be executed if it's a function -var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { - var i = 0, - len = elems.length, - bulk = key == null; - - // Sets many values - if ( toType( key ) === "object" ) { - chainable = true; - for ( i in key ) { - access( elems, fn, i, key[ i ], true, emptyGet, raw ); - } - - // Sets one value - } else if ( value !== undefined ) { - chainable = true; - - if ( !isFunction( value ) ) { - raw = true; - } - - if ( bulk ) { - - // Bulk operations run against the entire set - if ( raw ) { - fn.call( elems, value ); - fn = null; - - // ...except when executing function values - } else { - bulk = fn; - fn = function( elem, _key, value ) { - return bulk.call( jQuery( elem ), value ); - }; - } - } - - if ( fn ) { - for ( ; i < len; i++ ) { - fn( - elems[ i ], key, raw ? - value : - value.call( elems[ i ], i, fn( elems[ i ], key ) ) - ); - } - } - } - - if ( chainable ) { - return elems; - } - - // Gets - if ( bulk ) { - return fn.call( elems ); - } - - return len ? fn( elems[ 0 ], key ) : emptyGet; -}; - - -// Matches dashed string for camelizing -var rmsPrefix = /^-ms-/, - rdashAlpha = /-([a-z])/g; - -// Used by camelCase as callback to replace() -function fcamelCase( _all, letter ) { - return letter.toUpperCase(); -} - -// Convert dashed to camelCase; used by the css and data modules -// Support: IE <=9 - 11, Edge 12 - 15 -// Microsoft forgot to hump their vendor prefix (#9572) -function camelCase( string ) { - return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); -} -var acceptData = function( owner ) { - - // Accepts only: - // - Node - // - Node.ELEMENT_NODE - // - Node.DOCUMENT_NODE - // - Object - // - Any - return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); -}; - - - - -function Data() { - this.expando = jQuery.expando + Data.uid++; -} - -Data.uid = 1; - -Data.prototype = { - - cache: function( owner ) { - - // Check if the owner object already has a cache - var value = owner[ this.expando ]; - - // If not, create one - if ( !value ) { - value = {}; - - // We can accept data for non-element nodes in modern browsers, - // but we should not, see #8335. - // Always return an empty object. - if ( acceptData( owner ) ) { - - // If it is a node unlikely to be stringify-ed or looped over - // use plain assignment - if ( owner.nodeType ) { - owner[ this.expando ] = value; - - // Otherwise secure it in a non-enumerable property - // configurable must be true to allow the property to be - // deleted when data is removed - } else { - Object.defineProperty( owner, this.expando, { - value: value, - configurable: true - } ); - } - } - } - - return value; - }, - set: function( owner, data, value ) { - var prop, - cache = this.cache( owner ); - - // Handle: [ owner, key, value ] args - // Always use camelCase key (gh-2257) - if ( typeof data === "string" ) { - cache[ camelCase( data ) ] = value; - - // Handle: [ owner, { properties } ] args - } else { - - // Copy the properties one-by-one to the cache object - for ( prop in data ) { - cache[ camelCase( prop ) ] = data[ prop ]; - } - } - return cache; - }, - get: function( owner, key ) { - return key === undefined ? - this.cache( owner ) : - - // Always use camelCase key (gh-2257) - owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; - }, - access: function( owner, key, value ) { - - // In cases where either: - // - // 1. No key was specified - // 2. A string key was specified, but no value provided - // - // Take the "read" path and allow the get method to determine - // which value to return, respectively either: - // - // 1. The entire cache object - // 2. The data stored at the key - // - if ( key === undefined || - ( ( key && typeof key === "string" ) && value === undefined ) ) { - - return this.get( owner, key ); - } - - // When the key is not a string, or both a key and value - // are specified, set or extend (existing objects) with either: - // - // 1. An object of properties - // 2. A key and value - // - this.set( owner, key, value ); - - // Since the "set" path can have two possible entry points - // return the expected data based on which path was taken[*] - return value !== undefined ? value : key; - }, - remove: function( owner, key ) { - var i, - cache = owner[ this.expando ]; - - if ( cache === undefined ) { - return; - } - - if ( key !== undefined ) { - - // Support array or space separated string of keys - if ( Array.isArray( key ) ) { - - // If key is an array of keys... - // We always set camelCase keys, so remove that. - key = key.map( camelCase ); - } else { - key = camelCase( key ); - - // If a key with the spaces exists, use it. - // Otherwise, create an array by matching non-whitespace - key = key in cache ? - [ key ] : - ( key.match( rnothtmlwhite ) || [] ); - } - - i = key.length; - - while ( i-- ) { - delete cache[ key[ i ] ]; - } - } - - // Remove the expando if there's no more data - if ( key === undefined || jQuery.isEmptyObject( cache ) ) { - - // Support: Chrome <=35 - 45 - // Webkit & Blink performance suffers when deleting properties - // from DOM nodes, so set to undefined instead - // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) - if ( owner.nodeType ) { - owner[ this.expando ] = undefined; - } else { - delete owner[ this.expando ]; - } - } - }, - hasData: function( owner ) { - var cache = owner[ this.expando ]; - return cache !== undefined && !jQuery.isEmptyObject( cache ); - } -}; -var dataPriv = new Data(); - -var dataUser = new Data(); - - - -// Implementation Summary -// -// 1. Enforce API surface and semantic compatibility with 1.9.x branch -// 2. Improve the module's maintainability by reducing the storage -// paths to a single mechanism. -// 3. Use the same single mechanism to support "private" and "user" data. -// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) -// 5. Avoid exposing implementation details on user objects (eg. expando properties) -// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 - -var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, - rmultiDash = /[A-Z]/g; - -function getData( data ) { - if ( data === "true" ) { - return true; - } - - if ( data === "false" ) { - return false; - } - - if ( data === "null" ) { - return null; - } - - // Only convert to a number if it doesn't change the string - if ( data === +data + "" ) { - return +data; - } - - if ( rbrace.test( data ) ) { - return JSON.parse( data ); - } - - return data; -} - -function dataAttr( elem, key, data ) { - var name; - - // If nothing was found internally, try to fetch any - // data from the HTML5 data-* attribute - if ( data === undefined && elem.nodeType === 1 ) { - name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); - data = elem.getAttribute( name ); - - if ( typeof data === "string" ) { - try { - data = getData( data ); - } catch ( e ) {} - - // Make sure we set the data so it isn't changed later - dataUser.set( elem, key, data ); - } else { - data = undefined; - } - } - return data; -} - -jQuery.extend( { - hasData: function( elem ) { - return dataUser.hasData( elem ) || dataPriv.hasData( elem ); - }, - - data: function( elem, name, data ) { - return dataUser.access( elem, name, data ); - }, - - removeData: function( elem, name ) { - dataUser.remove( elem, name ); - }, - - // TODO: Now that all calls to _data and _removeData have been replaced - // with direct calls to dataPriv methods, these can be deprecated. - _data: function( elem, name, data ) { - return dataPriv.access( elem, name, data ); - }, - - _removeData: function( elem, name ) { - dataPriv.remove( elem, name ); - } -} ); - -jQuery.fn.extend( { - data: function( key, value ) { - var i, name, data, - elem = this[ 0 ], - attrs = elem && elem.attributes; - - // Gets all values - if ( key === undefined ) { - if ( this.length ) { - data = dataUser.get( elem ); - - if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { - i = attrs.length; - while ( i-- ) { - - // Support: IE 11 only - // The attrs elements can be null (#14894) - if ( attrs[ i ] ) { - name = attrs[ i ].name; - if ( name.indexOf( "data-" ) === 0 ) { - name = camelCase( name.slice( 5 ) ); - dataAttr( elem, name, data[ name ] ); - } - } - } - dataPriv.set( elem, "hasDataAttrs", true ); - } - } - - return data; - } - - // Sets multiple values - if ( typeof key === "object" ) { - return this.each( function() { - dataUser.set( this, key ); - } ); - } - - return access( this, function( value ) { - var data; - - // The calling jQuery object (element matches) is not empty - // (and therefore has an element appears at this[ 0 ]) and the - // `value` parameter was not undefined. An empty jQuery object - // will result in `undefined` for elem = this[ 0 ] which will - // throw an exception if an attempt to read a data cache is made. - if ( elem && value === undefined ) { - - // Attempt to get data from the cache - // The key will always be camelCased in Data - data = dataUser.get( elem, key ); - if ( data !== undefined ) { - return data; - } - - // Attempt to "discover" the data in - // HTML5 custom data-* attrs - data = dataAttr( elem, key ); - if ( data !== undefined ) { - return data; - } - - // We tried really hard, but the data doesn't exist. - return; - } - - // Set the data... - this.each( function() { - - // We always store the camelCased key - dataUser.set( this, key, value ); - } ); - }, null, value, arguments.length > 1, null, true ); - }, - - removeData: function( key ) { - return this.each( function() { - dataUser.remove( this, key ); - } ); - } -} ); - - -jQuery.extend( { - queue: function( elem, type, data ) { - var queue; - - if ( elem ) { - type = ( type || "fx" ) + "queue"; - queue = dataPriv.get( elem, type ); - - // Speed up dequeue by getting out quickly if this is just a lookup - if ( data ) { - if ( !queue || Array.isArray( data ) ) { - queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); - } else { - queue.push( data ); - } - } - return queue || []; - } - }, - - dequeue: function( elem, type ) { - type = type || "fx"; - - var queue = jQuery.queue( elem, type ), - startLength = queue.length, - fn = queue.shift(), - hooks = jQuery._queueHooks( elem, type ), - next = function() { - jQuery.dequeue( elem, type ); - }; - - // If the fx queue is dequeued, always remove the progress sentinel - if ( fn === "inprogress" ) { - fn = queue.shift(); - startLength--; - } - - if ( fn ) { - - // Add a progress sentinel to prevent the fx queue from being - // automatically dequeued - if ( type === "fx" ) { - queue.unshift( "inprogress" ); - } - - // Clear up the last queue stop function - delete hooks.stop; - fn.call( elem, next, hooks ); - } - - if ( !startLength && hooks ) { - hooks.empty.fire(); - } - }, - - // Not public - generate a queueHooks object, or return the current one - _queueHooks: function( elem, type ) { - var key = type + "queueHooks"; - return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { - empty: jQuery.Callbacks( "once memory" ).add( function() { - dataPriv.remove( elem, [ type + "queue", key ] ); - } ) - } ); - } -} ); - -jQuery.fn.extend( { - queue: function( type, data ) { - var setter = 2; - - if ( typeof type !== "string" ) { - data = type; - type = "fx"; - setter--; - } - - if ( arguments.length < setter ) { - return jQuery.queue( this[ 0 ], type ); - } - - return data === undefined ? - this : - this.each( function() { - var queue = jQuery.queue( this, type, data ); - - // Ensure a hooks for this queue - jQuery._queueHooks( this, type ); - - if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { - jQuery.dequeue( this, type ); - } - } ); - }, - dequeue: function( type ) { - return this.each( function() { - jQuery.dequeue( this, type ); - } ); - }, - clearQueue: function( type ) { - return this.queue( type || "fx", [] ); - }, - - // Get a promise resolved when queues of a certain type - // are emptied (fx is the type by default) - promise: function( type, obj ) { - var tmp, - count = 1, - defer = jQuery.Deferred(), - elements = this, - i = this.length, - resolve = function() { - if ( !( --count ) ) { - defer.resolveWith( elements, [ elements ] ); - } - }; - - if ( typeof type !== "string" ) { - obj = type; - type = undefined; - } - type = type || "fx"; - - while ( i-- ) { - tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); - if ( tmp && tmp.empty ) { - count++; - tmp.empty.add( resolve ); - } - } - resolve(); - return defer.promise( obj ); - } -} ); -var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; - -var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); - - -var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; - -var documentElement = document.documentElement; - - - - var isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ); - }, - composed = { composed: true }; - - // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only - // Check attachment across shadow DOM boundaries when possible (gh-3504) - // Support: iOS 10.0-10.2 only - // Early iOS 10 versions support `attachShadow` but not `getRootNode`, - // leading to errors. We need to check for `getRootNode`. - if ( documentElement.getRootNode ) { - isAttached = function( elem ) { - return jQuery.contains( elem.ownerDocument, elem ) || - elem.getRootNode( composed ) === elem.ownerDocument; - }; - } -var isHiddenWithinTree = function( elem, el ) { - - // isHiddenWithinTree might be called from jQuery#filter function; - // in that case, element will be second argument - elem = el || elem; - - // Inline style trumps all - return elem.style.display === "none" || - elem.style.display === "" && - - // Otherwise, check computed style - // Support: Firefox <=43 - 45 - // Disconnected elements can have computed display: none, so first confirm that elem is - // in the document. - isAttached( elem ) && - - jQuery.css( elem, "display" ) === "none"; - }; - - - -function adjustCSS( elem, prop, valueParts, tween ) { - var adjusted, scale, - maxIterations = 20, - currentValue = tween ? - function() { - return tween.cur(); - } : - function() { - return jQuery.css( elem, prop, "" ); - }, - initial = currentValue(), - unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), - - // Starting value computation is required for potential unit mismatches - initialInUnit = elem.nodeType && - ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && - rcssNum.exec( jQuery.css( elem, prop ) ); - - if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { - - // Support: Firefox <=54 - // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) - initial = initial / 2; - - // Trust units reported by jQuery.css - unit = unit || initialInUnit[ 3 ]; - - // Iteratively approximate from a nonzero starting point - initialInUnit = +initial || 1; - - while ( maxIterations-- ) { - - // Evaluate and update our best guess (doubling guesses that zero out). - // Finish if the scale equals or crosses 1 (making the old*new product non-positive). - jQuery.style( elem, prop, initialInUnit + unit ); - if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { - maxIterations = 0; - } - initialInUnit = initialInUnit / scale; - - } - - initialInUnit = initialInUnit * 2; - jQuery.style( elem, prop, initialInUnit + unit ); - - // Make sure we update the tween properties later on - valueParts = valueParts || []; - } - - if ( valueParts ) { - initialInUnit = +initialInUnit || +initial || 0; - - // Apply relative offset (+=/-=) if specified - adjusted = valueParts[ 1 ] ? - initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : - +valueParts[ 2 ]; - if ( tween ) { - tween.unit = unit; - tween.start = initialInUnit; - tween.end = adjusted; - } - } - return adjusted; -} - - -var defaultDisplayMap = {}; - -function getDefaultDisplay( elem ) { - var temp, - doc = elem.ownerDocument, - nodeName = elem.nodeName, - display = defaultDisplayMap[ nodeName ]; - - if ( display ) { - return display; - } - - temp = doc.body.appendChild( doc.createElement( nodeName ) ); - display = jQuery.css( temp, "display" ); - - temp.parentNode.removeChild( temp ); - - if ( display === "none" ) { - display = "block"; - } - defaultDisplayMap[ nodeName ] = display; - - return display; -} - -function showHide( elements, show ) { - var display, elem, - values = [], - index = 0, - length = elements.length; - - // Determine new display value for elements that need to change - for ( ; index < length; index++ ) { - elem = elements[ index ]; - if ( !elem.style ) { - continue; - } - - display = elem.style.display; - if ( show ) { - - // Since we force visibility upon cascade-hidden elements, an immediate (and slow) - // check is required in this first loop unless we have a nonempty display value (either - // inline or about-to-be-restored) - if ( display === "none" ) { - values[ index ] = dataPriv.get( elem, "display" ) || null; - if ( !values[ index ] ) { - elem.style.display = ""; - } - } - if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { - values[ index ] = getDefaultDisplay( elem ); - } - } else { - if ( display !== "none" ) { - values[ index ] = "none"; - - // Remember what we're overwriting - dataPriv.set( elem, "display", display ); - } - } - } - - // Set the display of the elements in a second loop to avoid constant reflow - for ( index = 0; index < length; index++ ) { - if ( values[ index ] != null ) { - elements[ index ].style.display = values[ index ]; - } - } - - return elements; -} - -jQuery.fn.extend( { - show: function() { - return showHide( this, true ); - }, - hide: function() { - return showHide( this ); - }, - toggle: function( state ) { - if ( typeof state === "boolean" ) { - return state ? this.show() : this.hide(); - } - - return this.each( function() { - if ( isHiddenWithinTree( this ) ) { - jQuery( this ).show(); - } else { - jQuery( this ).hide(); - } - } ); - } -} ); -var rcheckableType = ( /^(?:checkbox|radio)$/i ); - -var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); - -var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); - - - -( function() { - var fragment = document.createDocumentFragment(), - div = fragment.appendChild( document.createElement( "div" ) ), - input = document.createElement( "input" ); - - // Support: Android 4.0 - 4.3 only - // Check state lost if the name is set (#11217) - // Support: Windows Web Apps (WWA) - // `name` and `type` must use .setAttribute for WWA (#14901) - input.setAttribute( "type", "radio" ); - input.setAttribute( "checked", "checked" ); - input.setAttribute( "name", "t" ); - - div.appendChild( input ); - - // Support: Android <=4.1 only - // Older WebKit doesn't clone checked state correctly in fragments - support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; - - // Support: IE <=11 only - // Make sure textarea (and checkbox) defaultValue is properly cloned - div.innerHTML = ""; - support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; - - // Support: IE <=9 only - // IE <=9 replaces "; - support.option = !!div.lastChild; -} )(); - - -// We have to close these tags to support XHTML (#13200) -var wrapMap = { - - // XHTML parsers do not magically insert elements in the - // same way that tag soup parsers do. So we cannot shorten - // this by omitting or other required elements. - thead: [ 1, "", "
" ], - col: [ 2, "", "
" ], - tr: [ 2, "", "
" ], - td: [ 3, "", "
" ], - - _default: [ 0, "", "" ] -}; - -wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; -wrapMap.th = wrapMap.td; - -// Support: IE <=9 only -if ( !support.option ) { - wrapMap.optgroup = wrapMap.option = [ 1, "" ]; -} - - -function getAll( context, tag ) { - - // Support: IE <=9 - 11 only - // Use typeof to avoid zero-argument method invocation on host objects (#15151) - var ret; - - if ( typeof context.getElementsByTagName !== "undefined" ) { - ret = context.getElementsByTagName( tag || "*" ); - - } else if ( typeof context.querySelectorAll !== "undefined" ) { - ret = context.querySelectorAll( tag || "*" ); - - } else { - ret = []; - } - - if ( tag === undefined || tag && nodeName( context, tag ) ) { - return jQuery.merge( [ context ], ret ); - } - - return ret; -} - - -// Mark scripts as having already been evaluated -function setGlobalEval( elems, refElements ) { - var i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - dataPriv.set( - elems[ i ], - "globalEval", - !refElements || dataPriv.get( refElements[ i ], "globalEval" ) - ); - } -} - - -var rhtml = /<|&#?\w+;/; - -function buildFragment( elems, context, scripts, selection, ignored ) { - var elem, tmp, tag, wrap, attached, j, - fragment = context.createDocumentFragment(), - nodes = [], - i = 0, - l = elems.length; - - for ( ; i < l; i++ ) { - elem = elems[ i ]; - - if ( elem || elem === 0 ) { - - // Add nodes directly - if ( toType( elem ) === "object" ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); - - // Convert non-html into a text node - } else if ( !rhtml.test( elem ) ) { - nodes.push( context.createTextNode( elem ) ); - - // Convert html into DOM nodes - } else { - tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); - - // Deserialize a standard representation - tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); - wrap = wrapMap[ tag ] || wrapMap._default; - tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; - - // Descend through wrappers to the right content - j = wrap[ 0 ]; - while ( j-- ) { - tmp = tmp.lastChild; - } - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( nodes, tmp.childNodes ); - - // Remember the top-level container - tmp = fragment.firstChild; - - // Ensure the created nodes are orphaned (#12392) - tmp.textContent = ""; - } - } - } - - // Remove wrapper from fragment - fragment.textContent = ""; - - i = 0; - while ( ( elem = nodes[ i++ ] ) ) { - - // Skip elements already in the context collection (trac-4087) - if ( selection && jQuery.inArray( elem, selection ) > -1 ) { - if ( ignored ) { - ignored.push( elem ); - } - continue; - } - - attached = isAttached( elem ); - - // Append to fragment - tmp = getAll( fragment.appendChild( elem ), "script" ); - - // Preserve script evaluation history - if ( attached ) { - setGlobalEval( tmp ); - } - - // Capture executables - if ( scripts ) { - j = 0; - while ( ( elem = tmp[ j++ ] ) ) { - if ( rscriptType.test( elem.type || "" ) ) { - scripts.push( elem ); - } - } - } - } - - return fragment; -} - - -var - rkeyEvent = /^key/, - rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, - rtypenamespace = /^([^.]*)(?:\.(.+)|)/; - -function returnTrue() { - return true; -} - -function returnFalse() { - return false; -} - -// Support: IE <=9 - 11+ -// focus() and blur() are asynchronous, except when they are no-op. -// So expect focus to be synchronous when the element is already active, -// and blur to be synchronous when the element is not already active. -// (focus and blur are always synchronous in other supported browsers, -// this just defines when we can count on it). -function expectSync( elem, type ) { - return ( elem === safeActiveElement() ) === ( type === "focus" ); -} - -// Support: IE <=9 only -// Accessing document.activeElement can throw unexpectedly -// https://bugs.jquery.com/ticket/13393 -function safeActiveElement() { - try { - return document.activeElement; - } catch ( err ) { } -} - -function on( elem, types, selector, data, fn, one ) { - var origFn, type; - - // Types can be a map of types/handlers - if ( typeof types === "object" ) { - - // ( types-Object, selector, data ) - if ( typeof selector !== "string" ) { - - // ( types-Object, data ) - data = data || selector; - selector = undefined; - } - for ( type in types ) { - on( elem, type, selector, data, types[ type ], one ); - } - return elem; - } - - if ( data == null && fn == null ) { - - // ( types, fn ) - fn = selector; - data = selector = undefined; - } else if ( fn == null ) { - if ( typeof selector === "string" ) { - - // ( types, selector, fn ) - fn = data; - data = undefined; - } else { - - // ( types, data, fn ) - fn = data; - data = selector; - selector = undefined; - } - } - if ( fn === false ) { - fn = returnFalse; - } else if ( !fn ) { - return elem; - } - - if ( one === 1 ) { - origFn = fn; - fn = function( event ) { - - // Can use an empty set, since event contains the info - jQuery().off( event ); - return origFn.apply( this, arguments ); - }; - - // Use same guid so caller can remove using origFn - fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); - } - return elem.each( function() { - jQuery.event.add( this, types, fn, data, selector ); - } ); -} - -/* - * Helper functions for managing events -- not part of the public interface. - * Props to Dean Edwards' addEvent library for many of the ideas. - */ -jQuery.event = { - - global: {}, - - add: function( elem, types, handler, data, selector ) { - - var handleObjIn, eventHandle, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.get( elem ); - - // Only attach events to objects that accept data - if ( !acceptData( elem ) ) { - return; - } - - // Caller can pass in an object of custom data in lieu of the handler - if ( handler.handler ) { - handleObjIn = handler; - handler = handleObjIn.handler; - selector = handleObjIn.selector; - } - - // Ensure that invalid selectors throw exceptions at attach time - // Evaluate against documentElement in case elem is a non-element node (e.g., document) - if ( selector ) { - jQuery.find.matchesSelector( documentElement, selector ); - } - - // Make sure that the handler has a unique ID, used to find/remove it later - if ( !handler.guid ) { - handler.guid = jQuery.guid++; - } - - // Init the element's event structure and main handler, if this is the first - if ( !( events = elemData.events ) ) { - events = elemData.events = Object.create( null ); - } - if ( !( eventHandle = elemData.handle ) ) { - eventHandle = elemData.handle = function( e ) { - - // Discard the second event of a jQuery.event.trigger() and - // when an event is called after a page has unloaded - return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? - jQuery.event.dispatch.apply( elem, arguments ) : undefined; - }; - } - - // Handle multiple events separated by a space - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // There *must* be a type, no attaching namespace-only handlers - if ( !type ) { - continue; - } - - // If event changes its type, use the special event handlers for the changed type - special = jQuery.event.special[ type ] || {}; - - // If selector defined, determine special event api type, otherwise given type - type = ( selector ? special.delegateType : special.bindType ) || type; - - // Update special based on newly reset type - special = jQuery.event.special[ type ] || {}; - - // handleObj is passed to all event handlers - handleObj = jQuery.extend( { - type: type, - origType: origType, - data: data, - handler: handler, - guid: handler.guid, - selector: selector, - needsContext: selector && jQuery.expr.match.needsContext.test( selector ), - namespace: namespaces.join( "." ) - }, handleObjIn ); - - // Init the event handler queue if we're the first - if ( !( handlers = events[ type ] ) ) { - handlers = events[ type ] = []; - handlers.delegateCount = 0; - - // Only use addEventListener if the special events handler returns false - if ( !special.setup || - special.setup.call( elem, data, namespaces, eventHandle ) === false ) { - - if ( elem.addEventListener ) { - elem.addEventListener( type, eventHandle ); - } - } - } - - if ( special.add ) { - special.add.call( elem, handleObj ); - - if ( !handleObj.handler.guid ) { - handleObj.handler.guid = handler.guid; - } - } - - // Add to the element's handler list, delegates in front - if ( selector ) { - handlers.splice( handlers.delegateCount++, 0, handleObj ); - } else { - handlers.push( handleObj ); - } - - // Keep track of which events have ever been used, for event optimization - jQuery.event.global[ type ] = true; - } - - }, - - // Detach an event or set of events from an element - remove: function( elem, types, handler, selector, mappedTypes ) { - - var j, origCount, tmp, - events, t, handleObj, - special, handlers, type, namespaces, origType, - elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); - - if ( !elemData || !( events = elemData.events ) ) { - return; - } - - // Once for each type.namespace in types; type may be omitted - types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; - t = types.length; - while ( t-- ) { - tmp = rtypenamespace.exec( types[ t ] ) || []; - type = origType = tmp[ 1 ]; - namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); - - // Unbind all events (on this namespace, if provided) for the element - if ( !type ) { - for ( type in events ) { - jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); - } - continue; - } - - special = jQuery.event.special[ type ] || {}; - type = ( selector ? special.delegateType : special.bindType ) || type; - handlers = events[ type ] || []; - tmp = tmp[ 2 ] && - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); - - // Remove matching events - origCount = j = handlers.length; - while ( j-- ) { - handleObj = handlers[ j ]; - - if ( ( mappedTypes || origType === handleObj.origType ) && - ( !handler || handler.guid === handleObj.guid ) && - ( !tmp || tmp.test( handleObj.namespace ) ) && - ( !selector || selector === handleObj.selector || - selector === "**" && handleObj.selector ) ) { - handlers.splice( j, 1 ); - - if ( handleObj.selector ) { - handlers.delegateCount--; - } - if ( special.remove ) { - special.remove.call( elem, handleObj ); - } - } - } - - // Remove generic event handler if we removed something and no more handlers exist - // (avoids potential for endless recursion during removal of special event handlers) - if ( origCount && !handlers.length ) { - if ( !special.teardown || - special.teardown.call( elem, namespaces, elemData.handle ) === false ) { - - jQuery.removeEvent( elem, type, elemData.handle ); - } - - delete events[ type ]; - } - } - - // Remove data and the expando if it's no longer used - if ( jQuery.isEmptyObject( events ) ) { - dataPriv.remove( elem, "handle events" ); - } - }, - - dispatch: function( nativeEvent ) { - - var i, j, ret, matched, handleObj, handlerQueue, - args = new Array( arguments.length ), - - // Make a writable jQuery.Event from the native event object - event = jQuery.event.fix( nativeEvent ), - - handlers = ( - dataPriv.get( this, "events" ) || Object.create( null ) - )[ event.type ] || [], - special = jQuery.event.special[ event.type ] || {}; - - // Use the fix-ed jQuery.Event rather than the (read-only) native event - args[ 0 ] = event; - - for ( i = 1; i < arguments.length; i++ ) { - args[ i ] = arguments[ i ]; - } - - event.delegateTarget = this; - - // Call the preDispatch hook for the mapped type, and let it bail if desired - if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { - return; - } - - // Determine handlers - handlerQueue = jQuery.event.handlers.call( this, event, handlers ); - - // Run delegates first; they may want to stop propagation beneath us - i = 0; - while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { - event.currentTarget = matched.elem; - - j = 0; - while ( ( handleObj = matched.handlers[ j++ ] ) && - !event.isImmediatePropagationStopped() ) { - - // If the event is namespaced, then each handler is only invoked if it is - // specially universal or its namespaces are a superset of the event's. - if ( !event.rnamespace || handleObj.namespace === false || - event.rnamespace.test( handleObj.namespace ) ) { - - event.handleObj = handleObj; - event.data = handleObj.data; - - ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || - handleObj.handler ).apply( matched.elem, args ); - - if ( ret !== undefined ) { - if ( ( event.result = ret ) === false ) { - event.preventDefault(); - event.stopPropagation(); - } - } - } - } - } - - // Call the postDispatch hook for the mapped type - if ( special.postDispatch ) { - special.postDispatch.call( this, event ); - } - - return event.result; - }, - - handlers: function( event, handlers ) { - var i, handleObj, sel, matchedHandlers, matchedSelectors, - handlerQueue = [], - delegateCount = handlers.delegateCount, - cur = event.target; - - // Find delegate handlers - if ( delegateCount && - - // Support: IE <=9 - // Black-hole SVG instance trees (trac-13180) - cur.nodeType && - - // Support: Firefox <=42 - // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) - // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click - // Support: IE 11 only - // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) - !( event.type === "click" && event.button >= 1 ) ) { - - for ( ; cur !== this; cur = cur.parentNode || this ) { - - // Don't check non-elements (#13208) - // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) - if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { - matchedHandlers = []; - matchedSelectors = {}; - for ( i = 0; i < delegateCount; i++ ) { - handleObj = handlers[ i ]; - - // Don't conflict with Object.prototype properties (#13203) - sel = handleObj.selector + " "; - - if ( matchedSelectors[ sel ] === undefined ) { - matchedSelectors[ sel ] = handleObj.needsContext ? - jQuery( sel, this ).index( cur ) > -1 : - jQuery.find( sel, this, null, [ cur ] ).length; - } - if ( matchedSelectors[ sel ] ) { - matchedHandlers.push( handleObj ); - } - } - if ( matchedHandlers.length ) { - handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); - } - } - } - } - - // Add the remaining (directly-bound) handlers - cur = this; - if ( delegateCount < handlers.length ) { - handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); - } - - return handlerQueue; - }, - - addProp: function( name, hook ) { - Object.defineProperty( jQuery.Event.prototype, name, { - enumerable: true, - configurable: true, - - get: isFunction( hook ) ? - function() { - if ( this.originalEvent ) { - return hook( this.originalEvent ); - } - } : - function() { - if ( this.originalEvent ) { - return this.originalEvent[ name ]; - } - }, - - set: function( value ) { - Object.defineProperty( this, name, { - enumerable: true, - configurable: true, - writable: true, - value: value - } ); - } - } ); - }, - - fix: function( originalEvent ) { - return originalEvent[ jQuery.expando ] ? - originalEvent : - new jQuery.Event( originalEvent ); - }, - - special: { - load: { - - // Prevent triggered image.load events from bubbling to window.load - noBubble: true - }, - click: { - - // Utilize native event to ensure correct state for checkable inputs - setup: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Claim the first handler - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - // dataPriv.set( el, "click", ... ) - leverageNative( el, "click", returnTrue ); - } - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function( data ) { - - // For mutual compressibility with _default, replace `this` access with a local var. - // `|| data` is dead code meant only to preserve the variable through minification. - var el = this || data; - - // Force setup before triggering a click - if ( rcheckableType.test( el.type ) && - el.click && nodeName( el, "input" ) ) { - - leverageNative( el, "click" ); - } - - // Return non-false to allow normal event-path propagation - return true; - }, - - // For cross-browser consistency, suppress native .click() on links - // Also prevent it if we're currently inside a leveraged native-event stack - _default: function( event ) { - var target = event.target; - return rcheckableType.test( target.type ) && - target.click && nodeName( target, "input" ) && - dataPriv.get( target, "click" ) || - nodeName( target, "a" ); - } - }, - - beforeunload: { - postDispatch: function( event ) { - - // Support: Firefox 20+ - // Firefox doesn't alert if the returnValue field is not set. - if ( event.result !== undefined && event.originalEvent ) { - event.originalEvent.returnValue = event.result; - } - } - } - } -}; - -// Ensure the presence of an event listener that handles manually-triggered -// synthetic events by interrupting progress until reinvoked in response to -// *native* events that it fires directly, ensuring that state changes have -// already occurred before other listeners are invoked. -function leverageNative( el, type, expectSync ) { - - // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add - if ( !expectSync ) { - if ( dataPriv.get( el, type ) === undefined ) { - jQuery.event.add( el, type, returnTrue ); - } - return; - } - - // Register the controller as a special universal handler for all event namespaces - dataPriv.set( el, type, false ); - jQuery.event.add( el, type, { - namespace: false, - handler: function( event ) { - var notAsync, result, - saved = dataPriv.get( this, type ); - - if ( ( event.isTrigger & 1 ) && this[ type ] ) { - - // Interrupt processing of the outer synthetic .trigger()ed event - // Saved data should be false in such cases, but might be a leftover capture object - // from an async native handler (gh-4350) - if ( !saved.length ) { - - // Store arguments for use when handling the inner native event - // There will always be at least one argument (an event object), so this array - // will not be confused with a leftover capture object. - saved = slice.call( arguments ); - dataPriv.set( this, type, saved ); - - // Trigger the native event and capture its result - // Support: IE <=9 - 11+ - // focus() and blur() are asynchronous - notAsync = expectSync( this, type ); - this[ type ](); - result = dataPriv.get( this, type ); - if ( saved !== result || notAsync ) { - dataPriv.set( this, type, false ); - } else { - result = {}; - } - if ( saved !== result ) { - - // Cancel the outer synthetic event - event.stopImmediatePropagation(); - event.preventDefault(); - return result.value; - } - - // If this is an inner synthetic event for an event with a bubbling surrogate - // (focus or blur), assume that the surrogate already propagated from triggering the - // native event and prevent that from happening again here. - // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the - // bubbling surrogate propagates *after* the non-bubbling base), but that seems - // less bad than duplication. - } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { - event.stopPropagation(); - } - - // If this is a native event triggered above, everything is now in order - // Fire an inner synthetic event with the original arguments - } else if ( saved.length ) { - - // ...and capture the result - dataPriv.set( this, type, { - value: jQuery.event.trigger( - - // Support: IE <=9 - 11+ - // Extend with the prototype to reset the above stopImmediatePropagation() - jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), - saved.slice( 1 ), - this - ) - } ); - - // Abort handling of the native event - event.stopImmediatePropagation(); - } - } - } ); -} - -jQuery.removeEvent = function( elem, type, handle ) { - - // This "if" is needed for plain objects - if ( elem.removeEventListener ) { - elem.removeEventListener( type, handle ); - } -}; - -jQuery.Event = function( src, props ) { - - // Allow instantiation without the 'new' keyword - if ( !( this instanceof jQuery.Event ) ) { - return new jQuery.Event( src, props ); - } - - // Event object - if ( src && src.type ) { - this.originalEvent = src; - this.type = src.type; - - // Events bubbling up the document may have been marked as prevented - // by a handler lower down the tree; reflect the correct value. - this.isDefaultPrevented = src.defaultPrevented || - src.defaultPrevented === undefined && - - // Support: Android <=2.3 only - src.returnValue === false ? - returnTrue : - returnFalse; - - // Create target properties - // Support: Safari <=6 - 7 only - // Target should not be a text node (#504, #13143) - this.target = ( src.target && src.target.nodeType === 3 ) ? - src.target.parentNode : - src.target; - - this.currentTarget = src.currentTarget; - this.relatedTarget = src.relatedTarget; - - // Event type - } else { - this.type = src; - } - - // Put explicitly provided properties onto the event object - if ( props ) { - jQuery.extend( this, props ); - } - - // Create a timestamp if incoming event doesn't have one - this.timeStamp = src && src.timeStamp || Date.now(); - - // Mark it as fixed - this[ jQuery.expando ] = true; -}; - -// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding -// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html -jQuery.Event.prototype = { - constructor: jQuery.Event, - isDefaultPrevented: returnFalse, - isPropagationStopped: returnFalse, - isImmediatePropagationStopped: returnFalse, - isSimulated: false, - - preventDefault: function() { - var e = this.originalEvent; - - this.isDefaultPrevented = returnTrue; - - if ( e && !this.isSimulated ) { - e.preventDefault(); - } - }, - stopPropagation: function() { - var e = this.originalEvent; - - this.isPropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopPropagation(); - } - }, - stopImmediatePropagation: function() { - var e = this.originalEvent; - - this.isImmediatePropagationStopped = returnTrue; - - if ( e && !this.isSimulated ) { - e.stopImmediatePropagation(); - } - - this.stopPropagation(); - } -}; - -// Includes all common event props including KeyEvent and MouseEvent specific props -jQuery.each( { - altKey: true, - bubbles: true, - cancelable: true, - changedTouches: true, - ctrlKey: true, - detail: true, - eventPhase: true, - metaKey: true, - pageX: true, - pageY: true, - shiftKey: true, - view: true, - "char": true, - code: true, - charCode: true, - key: true, - keyCode: true, - button: true, - buttons: true, - clientX: true, - clientY: true, - offsetX: true, - offsetY: true, - pointerId: true, - pointerType: true, - screenX: true, - screenY: true, - targetTouches: true, - toElement: true, - touches: true, - - which: function( event ) { - var button = event.button; - - // Add which for key events - if ( event.which == null && rkeyEvent.test( event.type ) ) { - return event.charCode != null ? event.charCode : event.keyCode; - } - - // Add which for click: 1 === left; 2 === middle; 3 === right - if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { - if ( button & 1 ) { - return 1; - } - - if ( button & 2 ) { - return 3; - } - - if ( button & 4 ) { - return 2; - } - - return 0; - } - - return event.which; - } -}, jQuery.event.addProp ); - -jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { - jQuery.event.special[ type ] = { - - // Utilize native event if possible so blur/focus sequence is correct - setup: function() { - - // Claim the first handler - // dataPriv.set( this, "focus", ... ) - // dataPriv.set( this, "blur", ... ) - leverageNative( this, type, expectSync ); - - // Return false to allow normal processing in the caller - return false; - }, - trigger: function() { - - // Force setup before trigger - leverageNative( this, type ); - - // Return non-false to allow normal event-path propagation - return true; - }, - - delegateType: delegateType - }; -} ); - -// Create mouseenter/leave events using mouseover/out and event-time checks -// so that event delegation works in jQuery. -// Do the same for pointerenter/pointerleave and pointerover/pointerout -// -// Support: Safari 7 only -// Safari sends mouseenter too often; see: -// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 -// for the description of the bug (it existed in older Chrome versions as well). -jQuery.each( { - mouseenter: "mouseover", - mouseleave: "mouseout", - pointerenter: "pointerover", - pointerleave: "pointerout" -}, function( orig, fix ) { - jQuery.event.special[ orig ] = { - delegateType: fix, - bindType: fix, - - handle: function( event ) { - var ret, - target = this, - related = event.relatedTarget, - handleObj = event.handleObj; - - // For mouseenter/leave call the handler if related is outside the target. - // NB: No relatedTarget if the mouse left/entered the browser window - if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { - event.type = handleObj.origType; - ret = handleObj.handler.apply( this, arguments ); - event.type = fix; - } - return ret; - } - }; -} ); - -jQuery.fn.extend( { - - on: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn ); - }, - one: function( types, selector, data, fn ) { - return on( this, types, selector, data, fn, 1 ); - }, - off: function( types, selector, fn ) { - var handleObj, type; - if ( types && types.preventDefault && types.handleObj ) { - - // ( event ) dispatched jQuery.Event - handleObj = types.handleObj; - jQuery( types.delegateTarget ).off( - handleObj.namespace ? - handleObj.origType + "." + handleObj.namespace : - handleObj.origType, - handleObj.selector, - handleObj.handler - ); - return this; - } - if ( typeof types === "object" ) { - - // ( types-object [, selector] ) - for ( type in types ) { - this.off( type, selector, types[ type ] ); - } - return this; - } - if ( selector === false || typeof selector === "function" ) { - - // ( types [, fn] ) - fn = selector; - selector = undefined; - } - if ( fn === false ) { - fn = returnFalse; - } - return this.each( function() { - jQuery.event.remove( this, types, fn, selector ); - } ); - } -} ); - - -var - - // Support: IE <=10 - 11, Edge 12 - 13 only - // In IE/Edge using regex groups here causes severe slowdowns. - // See https://connect.microsoft.com/IE/feedback/details/1736512/ - rnoInnerhtml = /\s*$/g; - -// Prefer a tbody over its parent table for containing new rows -function manipulationTarget( elem, content ) { - if ( nodeName( elem, "table" ) && - nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { - - return jQuery( elem ).children( "tbody" )[ 0 ] || elem; - } - - return elem; -} - -// Replace/restore the type attribute of script elements for safe DOM manipulation -function disableScript( elem ) { - elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; - return elem; -} -function restoreScript( elem ) { - if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { - elem.type = elem.type.slice( 5 ); - } else { - elem.removeAttribute( "type" ); - } - - return elem; -} - -function cloneCopyEvent( src, dest ) { - var i, l, type, pdataOld, udataOld, udataCur, events; - - if ( dest.nodeType !== 1 ) { - return; - } - - // 1. Copy private data: events, handlers, etc. - if ( dataPriv.hasData( src ) ) { - pdataOld = dataPriv.get( src ); - events = pdataOld.events; - - if ( events ) { - dataPriv.remove( dest, "handle events" ); - - for ( type in events ) { - for ( i = 0, l = events[ type ].length; i < l; i++ ) { - jQuery.event.add( dest, type, events[ type ][ i ] ); - } - } - } - } - - // 2. Copy user data - if ( dataUser.hasData( src ) ) { - udataOld = dataUser.access( src ); - udataCur = jQuery.extend( {}, udataOld ); - - dataUser.set( dest, udataCur ); - } -} - -// Fix IE bugs, see support tests -function fixInput( src, dest ) { - var nodeName = dest.nodeName.toLowerCase(); - - // Fails to persist the checked state of a cloned checkbox or radio button. - if ( nodeName === "input" && rcheckableType.test( src.type ) ) { - dest.checked = src.checked; - - // Fails to return the selected option to the default selected state when cloning options - } else if ( nodeName === "input" || nodeName === "textarea" ) { - dest.defaultValue = src.defaultValue; - } -} - -function domManip( collection, args, callback, ignored ) { - - // Flatten any nested arrays - args = flat( args ); - - var fragment, first, scripts, hasScripts, node, doc, - i = 0, - l = collection.length, - iNoClone = l - 1, - value = args[ 0 ], - valueIsFunction = isFunction( value ); - - // We can't cloneNode fragments that contain checked, in WebKit - if ( valueIsFunction || - ( l > 1 && typeof value === "string" && - !support.checkClone && rchecked.test( value ) ) ) { - return collection.each( function( index ) { - var self = collection.eq( index ); - if ( valueIsFunction ) { - args[ 0 ] = value.call( this, index, self.html() ); - } - domManip( self, args, callback, ignored ); - } ); - } - - if ( l ) { - fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); - first = fragment.firstChild; - - if ( fragment.childNodes.length === 1 ) { - fragment = first; - } - - // Require either new content or an interest in ignored elements to invoke the callback - if ( first || ignored ) { - scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); - hasScripts = scripts.length; - - // Use the original fragment for the last item - // instead of the first because it can end up - // being emptied incorrectly in certain situations (#8070). - for ( ; i < l; i++ ) { - node = fragment; - - if ( i !== iNoClone ) { - node = jQuery.clone( node, true, true ); - - // Keep references to cloned scripts for later restoration - if ( hasScripts ) { - - // Support: Android <=4.0 only, PhantomJS 1 only - // push.apply(_, arraylike) throws on ancient WebKit - jQuery.merge( scripts, getAll( node, "script" ) ); - } - } - - callback.call( collection[ i ], node, i ); - } - - if ( hasScripts ) { - doc = scripts[ scripts.length - 1 ].ownerDocument; - - // Reenable scripts - jQuery.map( scripts, restoreScript ); - - // Evaluate executable scripts on first document insertion - for ( i = 0; i < hasScripts; i++ ) { - node = scripts[ i ]; - if ( rscriptType.test( node.type || "" ) && - !dataPriv.access( node, "globalEval" ) && - jQuery.contains( doc, node ) ) { - - if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { - - // Optional AJAX dependency, but won't run scripts if not present - if ( jQuery._evalUrl && !node.noModule ) { - jQuery._evalUrl( node.src, { - nonce: node.nonce || node.getAttribute( "nonce" ) - }, doc ); - } - } else { - DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); - } - } - } - } - } - } - - return collection; -} - -function remove( elem, selector, keepData ) { - var node, - nodes = selector ? jQuery.filter( selector, elem ) : elem, - i = 0; - - for ( ; ( node = nodes[ i ] ) != null; i++ ) { - if ( !keepData && node.nodeType === 1 ) { - jQuery.cleanData( getAll( node ) ); - } - - if ( node.parentNode ) { - if ( keepData && isAttached( node ) ) { - setGlobalEval( getAll( node, "script" ) ); - } - node.parentNode.removeChild( node ); - } - } - - return elem; -} - -jQuery.extend( { - htmlPrefilter: function( html ) { - return html; - }, - - clone: function( elem, dataAndEvents, deepDataAndEvents ) { - var i, l, srcElements, destElements, - clone = elem.cloneNode( true ), - inPage = isAttached( elem ); - - // Fix IE cloning issues - if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && - !jQuery.isXMLDoc( elem ) ) { - - // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 - destElements = getAll( clone ); - srcElements = getAll( elem ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - fixInput( srcElements[ i ], destElements[ i ] ); - } - } - - // Copy the events from the original to the clone - if ( dataAndEvents ) { - if ( deepDataAndEvents ) { - srcElements = srcElements || getAll( elem ); - destElements = destElements || getAll( clone ); - - for ( i = 0, l = srcElements.length; i < l; i++ ) { - cloneCopyEvent( srcElements[ i ], destElements[ i ] ); - } - } else { - cloneCopyEvent( elem, clone ); - } - } - - // Preserve script evaluation history - destElements = getAll( clone, "script" ); - if ( destElements.length > 0 ) { - setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); - } - - // Return the cloned set - return clone; - }, - - cleanData: function( elems ) { - var data, elem, type, - special = jQuery.event.special, - i = 0; - - for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { - if ( acceptData( elem ) ) { - if ( ( data = elem[ dataPriv.expando ] ) ) { - if ( data.events ) { - for ( type in data.events ) { - if ( special[ type ] ) { - jQuery.event.remove( elem, type ); - - // This is a shortcut to avoid jQuery.event.remove's overhead - } else { - jQuery.removeEvent( elem, type, data.handle ); - } - } - } - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataPriv.expando ] = undefined; - } - if ( elem[ dataUser.expando ] ) { - - // Support: Chrome <=35 - 45+ - // Assign undefined instead of using delete, see Data#remove - elem[ dataUser.expando ] = undefined; - } - } - } - } -} ); - -jQuery.fn.extend( { - detach: function( selector ) { - return remove( this, selector, true ); - }, - - remove: function( selector ) { - return remove( this, selector ); - }, - - text: function( value ) { - return access( this, function( value ) { - return value === undefined ? - jQuery.text( this ) : - this.empty().each( function() { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - this.textContent = value; - } - } ); - }, null, value, arguments.length ); - }, - - append: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.appendChild( elem ); - } - } ); - }, - - prepend: function() { - return domManip( this, arguments, function( elem ) { - if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { - var target = manipulationTarget( this, elem ); - target.insertBefore( elem, target.firstChild ); - } - } ); - }, - - before: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this ); - } - } ); - }, - - after: function() { - return domManip( this, arguments, function( elem ) { - if ( this.parentNode ) { - this.parentNode.insertBefore( elem, this.nextSibling ); - } - } ); - }, - - empty: function() { - var elem, - i = 0; - - for ( ; ( elem = this[ i ] ) != null; i++ ) { - if ( elem.nodeType === 1 ) { - - // Prevent memory leaks - jQuery.cleanData( getAll( elem, false ) ); - - // Remove any remaining nodes - elem.textContent = ""; - } - } - - return this; - }, - - clone: function( dataAndEvents, deepDataAndEvents ) { - dataAndEvents = dataAndEvents == null ? false : dataAndEvents; - deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; - - return this.map( function() { - return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); - } ); - }, - - html: function( value ) { - return access( this, function( value ) { - var elem = this[ 0 ] || {}, - i = 0, - l = this.length; - - if ( value === undefined && elem.nodeType === 1 ) { - return elem.innerHTML; - } - - // See if we can take a shortcut and just use innerHTML - if ( typeof value === "string" && !rnoInnerhtml.test( value ) && - !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { - - value = jQuery.htmlPrefilter( value ); - - try { - for ( ; i < l; i++ ) { - elem = this[ i ] || {}; - - // Remove element nodes and prevent memory leaks - if ( elem.nodeType === 1 ) { - jQuery.cleanData( getAll( elem, false ) ); - elem.innerHTML = value; - } - } - - elem = 0; - - // If using innerHTML throws an exception, use the fallback method - } catch ( e ) {} - } - - if ( elem ) { - this.empty().append( value ); - } - }, null, value, arguments.length ); - }, - - replaceWith: function() { - var ignored = []; - - // Make the changes, replacing each non-ignored context element with the new content - return domManip( this, arguments, function( elem ) { - var parent = this.parentNode; - - if ( jQuery.inArray( this, ignored ) < 0 ) { - jQuery.cleanData( getAll( this ) ); - if ( parent ) { - parent.replaceChild( elem, this ); - } - } - - // Force callback invocation - }, ignored ); - } -} ); - -jQuery.each( { - appendTo: "append", - prependTo: "prepend", - insertBefore: "before", - insertAfter: "after", - replaceAll: "replaceWith" -}, function( name, original ) { - jQuery.fn[ name ] = function( selector ) { - var elems, - ret = [], - insert = jQuery( selector ), - last = insert.length - 1, - i = 0; - - for ( ; i <= last; i++ ) { - elems = i === last ? this : this.clone( true ); - jQuery( insert[ i ] )[ original ]( elems ); - - // Support: Android <=4.0 only, PhantomJS 1 only - // .get() because push.apply(_, arraylike) throws on ancient WebKit - push.apply( ret, elems.get() ); - } - - return this.pushStack( ret ); - }; -} ); -var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); - -var getStyles = function( elem ) { - - // Support: IE <=11 only, Firefox <=30 (#15098, #14150) - // IE throws on elements created in popups - // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" - var view = elem.ownerDocument.defaultView; - - if ( !view || !view.opener ) { - view = window; - } - - return view.getComputedStyle( elem ); - }; - -var swap = function( elem, options, callback ) { - var ret, name, - old = {}; - - // Remember the old values, and insert the new ones - for ( name in options ) { - old[ name ] = elem.style[ name ]; - elem.style[ name ] = options[ name ]; - } - - ret = callback.call( elem ); - - // Revert the old values - for ( name in options ) { - elem.style[ name ] = old[ name ]; - } - - return ret; -}; - - -var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); - - - -( function() { - - // Executing both pixelPosition & boxSizingReliable tests require only one layout - // so they're executed at the same time to save the second computation. - function computeStyleTests() { - - // This is a singleton, we need to execute it only once - if ( !div ) { - return; - } - - container.style.cssText = "position:absolute;left:-11111px;width:60px;" + - "margin-top:1px;padding:0;border:0"; - div.style.cssText = - "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + - "margin:auto;border:1px;padding:1px;" + - "width:60%;top:1%"; - documentElement.appendChild( container ).appendChild( div ); - - var divStyle = window.getComputedStyle( div ); - pixelPositionVal = divStyle.top !== "1%"; - - // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 - reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; - - // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 - // Some styles come back with percentage values, even though they shouldn't - div.style.right = "60%"; - pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; - - // Support: IE 9 - 11 only - // Detect misreporting of content dimensions for box-sizing:border-box elements - boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; - - // Support: IE 9 only - // Detect overflow:scroll screwiness (gh-3699) - // Support: Chrome <=64 - // Don't get tricked when zoom affects offsetWidth (gh-4029) - div.style.position = "absolute"; - scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; - - documentElement.removeChild( container ); - - // Nullify the div so it wouldn't be stored in the memory and - // it will also be a sign that checks already performed - div = null; - } - - function roundPixelMeasures( measure ) { - return Math.round( parseFloat( measure ) ); - } - - var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, - reliableTrDimensionsVal, reliableMarginLeftVal, - container = document.createElement( "div" ), - div = document.createElement( "div" ); - - // Finish early in limited (non-browser) environments - if ( !div.style ) { - return; - } - - // Support: IE <=9 - 11 only - // Style of cloned element affects source element cloned (#8908) - div.style.backgroundClip = "content-box"; - div.cloneNode( true ).style.backgroundClip = ""; - support.clearCloneStyle = div.style.backgroundClip === "content-box"; - - jQuery.extend( support, { - boxSizingReliable: function() { - computeStyleTests(); - return boxSizingReliableVal; - }, - pixelBoxStyles: function() { - computeStyleTests(); - return pixelBoxStylesVal; - }, - pixelPosition: function() { - computeStyleTests(); - return pixelPositionVal; - }, - reliableMarginLeft: function() { - computeStyleTests(); - return reliableMarginLeftVal; - }, - scrollboxSize: function() { - computeStyleTests(); - return scrollboxSizeVal; - }, - - // Support: IE 9 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Behavior in IE 9 is more subtle than in newer versions & it passes - // some versions of this test; make sure not to make it pass there! - reliableTrDimensions: function() { - var table, tr, trChild, trStyle; - if ( reliableTrDimensionsVal == null ) { - table = document.createElement( "table" ); - tr = document.createElement( "tr" ); - trChild = document.createElement( "div" ); - - table.style.cssText = "position:absolute;left:-11111px"; - tr.style.height = "1px"; - trChild.style.height = "9px"; - - documentElement - .appendChild( table ) - .appendChild( tr ) - .appendChild( trChild ); - - trStyle = window.getComputedStyle( tr ); - reliableTrDimensionsVal = parseInt( trStyle.height ) > 3; - - documentElement.removeChild( table ); - } - return reliableTrDimensionsVal; - } - } ); -} )(); - - -function curCSS( elem, name, computed ) { - var width, minWidth, maxWidth, ret, - - // Support: Firefox 51+ - // Retrieving style before computed somehow - // fixes an issue with getting wrong values - // on detached elements - style = elem.style; - - computed = computed || getStyles( elem ); - - // getPropertyValue is needed for: - // .css('filter') (IE 9 only, #12537) - // .css('--customProperty) (#3144) - if ( computed ) { - ret = computed.getPropertyValue( name ) || computed[ name ]; - - if ( ret === "" && !isAttached( elem ) ) { - ret = jQuery.style( elem, name ); - } - - // A tribute to the "awesome hack by Dean Edwards" - // Android Browser returns percentage for some values, - // but width seems to be reliably pixels. - // This is against the CSSOM draft spec: - // https://drafts.csswg.org/cssom/#resolved-values - if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { - - // Remember the original values - width = style.width; - minWidth = style.minWidth; - maxWidth = style.maxWidth; - - // Put in the new values to get a computed value out - style.minWidth = style.maxWidth = style.width = ret; - ret = computed.width; - - // Revert the changed values - style.width = width; - style.minWidth = minWidth; - style.maxWidth = maxWidth; - } - } - - return ret !== undefined ? - - // Support: IE <=9 - 11 only - // IE returns zIndex value as an integer. - ret + "" : - ret; -} - - -function addGetHookIf( conditionFn, hookFn ) { - - // Define the hook, we'll check on the first run if it's really needed. - return { - get: function() { - if ( conditionFn() ) { - - // Hook not needed (or it's not possible to use it due - // to missing dependency), remove it. - delete this.get; - return; - } - - // Hook needed; redefine it so that the support test is not executed again. - return ( this.get = hookFn ).apply( this, arguments ); - } - }; -} - - -var cssPrefixes = [ "Webkit", "Moz", "ms" ], - emptyStyle = document.createElement( "div" ).style, - vendorProps = {}; - -// Return a vendor-prefixed property or undefined -function vendorPropName( name ) { - - // Check for vendor prefixed names - var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), - i = cssPrefixes.length; - - while ( i-- ) { - name = cssPrefixes[ i ] + capName; - if ( name in emptyStyle ) { - return name; - } - } -} - -// Return a potentially-mapped jQuery.cssProps or vendor prefixed property -function finalPropName( name ) { - var final = jQuery.cssProps[ name ] || vendorProps[ name ]; - - if ( final ) { - return final; - } - if ( name in emptyStyle ) { - return name; - } - return vendorProps[ name ] = vendorPropName( name ) || name; -} - - -var - - // Swappable if display is none or starts with table - // except "table", "table-cell", or "table-caption" - // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display - rdisplayswap = /^(none|table(?!-c[ea]).+)/, - rcustomProp = /^--/, - cssShow = { position: "absolute", visibility: "hidden", display: "block" }, - cssNormalTransform = { - letterSpacing: "0", - fontWeight: "400" - }; - -function setPositiveNumber( _elem, value, subtract ) { - - // Any relative (+/-) values have already been - // normalized at this point - var matches = rcssNum.exec( value ); - return matches ? - - // Guard against undefined "subtract", e.g., when used as in cssHooks - Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : - value; -} - -function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { - var i = dimension === "width" ? 1 : 0, - extra = 0, - delta = 0; - - // Adjustment may not be necessary - if ( box === ( isBorderBox ? "border" : "content" ) ) { - return 0; - } - - for ( ; i < 4; i += 2 ) { - - // Both box models exclude margin - if ( box === "margin" ) { - delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); - } - - // If we get here with a content-box, we're seeking "padding" or "border" or "margin" - if ( !isBorderBox ) { - - // Add padding - delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - - // For "border" or "margin", add border - if ( box !== "padding" ) { - delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - - // But still keep track of it otherwise - } else { - extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - - // If we get here with a border-box (content + padding + border), we're seeking "content" or - // "padding" or "margin" - } else { - - // For "content", subtract padding - if ( box === "content" ) { - delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); - } - - // For "content" or "padding", subtract border - if ( box !== "margin" ) { - delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); - } - } - } - - // Account for positive content-box scroll gutter when requested by providing computedVal - if ( !isBorderBox && computedVal >= 0 ) { - - // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border - // Assuming integer scroll gutter, subtract the rest and round down - delta += Math.max( 0, Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - computedVal - - delta - - extra - - 0.5 - - // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter - // Use an explicit zero to avoid NaN (gh-3964) - ) ) || 0; - } - - return delta; -} - -function getWidthOrHeight( elem, dimension, extra ) { - - // Start with computed style - var styles = getStyles( elem ), - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). - // Fake content-box until we know it's needed to know the true value. - boxSizingNeeded = !support.boxSizingReliable() || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - valueIsBorderBox = isBorderBox, - - val = curCSS( elem, dimension, styles ), - offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); - - // Support: Firefox <=54 - // Return a confounding non-pixel value or feign ignorance, as appropriate. - if ( rnumnonpx.test( val ) ) { - if ( !extra ) { - return val; - } - val = "auto"; - } - - - // Support: IE 9 - 11 only - // Use offsetWidth/offsetHeight for when box sizing is unreliable. - // In those cases, the computed value can be trusted to be border-box. - if ( ( !support.boxSizingReliable() && isBorderBox || - - // Support: IE 10 - 11+, Edge 15 - 18+ - // IE/Edge misreport `getComputedStyle` of table rows with width/height - // set in CSS while `offset*` properties report correct values. - // Interestingly, in some cases IE 9 doesn't suffer from this issue. - !support.reliableTrDimensions() && nodeName( elem, "tr" ) || - - // Fall back to offsetWidth/offsetHeight when value is "auto" - // This happens for inline elements with no explicit setting (gh-3571) - val === "auto" || - - // Support: Android <=4.1 - 4.3 only - // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) - !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && - - // Make sure the element is visible & connected - elem.getClientRects().length ) { - - isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; - - // Where available, offsetWidth/offsetHeight approximate border box dimensions. - // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the - // retrieved value as a content box dimension. - valueIsBorderBox = offsetProp in elem; - if ( valueIsBorderBox ) { - val = elem[ offsetProp ]; - } - } - - // Normalize "" and auto - val = parseFloat( val ) || 0; - - // Adjust for the element's box model - return ( val + - boxModelAdjustment( - elem, - dimension, - extra || ( isBorderBox ? "border" : "content" ), - valueIsBorderBox, - styles, - - // Provide the current computed size to request scroll gutter calculation (gh-3589) - val - ) - ) + "px"; -} - -jQuery.extend( { - - // Add in style property hooks for overriding the default - // behavior of getting and setting a style property - cssHooks: { - opacity: { - get: function( elem, computed ) { - if ( computed ) { - - // We should always get a number back from opacity - var ret = curCSS( elem, "opacity" ); - return ret === "" ? "1" : ret; - } - } - } - }, - - // Don't automatically add "px" to these possibly-unitless properties - cssNumber: { - "animationIterationCount": true, - "columnCount": true, - "fillOpacity": true, - "flexGrow": true, - "flexShrink": true, - "fontWeight": true, - "gridArea": true, - "gridColumn": true, - "gridColumnEnd": true, - "gridColumnStart": true, - "gridRow": true, - "gridRowEnd": true, - "gridRowStart": true, - "lineHeight": true, - "opacity": true, - "order": true, - "orphans": true, - "widows": true, - "zIndex": true, - "zoom": true - }, - - // Add in properties whose names you wish to fix before - // setting or getting the value - cssProps: {}, - - // Get and set the style property on a DOM Node - style: function( elem, name, value, extra ) { - - // Don't set styles on text and comment nodes - if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { - return; - } - - // Make sure that we're working with the right name - var ret, type, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ), - style = elem.style; - - // Make sure that we're working with the right name. We don't - // want to query the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Gets hook for the prefixed version, then unprefixed version - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // Check if we're setting a value - if ( value !== undefined ) { - type = typeof value; - - // Convert "+=" or "-=" to relative numbers (#7345) - if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { - value = adjustCSS( elem, name, ret ); - - // Fixes bug #9237 - type = "number"; - } - - // Make sure that null and NaN values aren't set (#7116) - if ( value == null || value !== value ) { - return; - } - - // If a number was passed in, add the unit (except for certain CSS properties) - // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append - // "px" to a few hardcoded values. - if ( type === "number" && !isCustomProp ) { - value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); - } - - // background-* props affect original clone's values - if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { - style[ name ] = "inherit"; - } - - // If a hook was provided, use that value, otherwise just set the specified value - if ( !hooks || !( "set" in hooks ) || - ( value = hooks.set( elem, value, extra ) ) !== undefined ) { - - if ( isCustomProp ) { - style.setProperty( name, value ); - } else { - style[ name ] = value; - } - } - - } else { - - // If a hook was provided get the non-computed value from there - if ( hooks && "get" in hooks && - ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { - - return ret; - } - - // Otherwise just get the value from the style object - return style[ name ]; - } - }, - - css: function( elem, name, extra, styles ) { - var val, num, hooks, - origName = camelCase( name ), - isCustomProp = rcustomProp.test( name ); - - // Make sure that we're working with the right name. We don't - // want to modify the value if it is a CSS custom property - // since they are user-defined. - if ( !isCustomProp ) { - name = finalPropName( origName ); - } - - // Try prefixed name followed by the unprefixed name - hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; - - // If a hook was provided get the computed value from there - if ( hooks && "get" in hooks ) { - val = hooks.get( elem, true, extra ); - } - - // Otherwise, if a way to get the computed value exists, use that - if ( val === undefined ) { - val = curCSS( elem, name, styles ); - } - - // Convert "normal" to computed value - if ( val === "normal" && name in cssNormalTransform ) { - val = cssNormalTransform[ name ]; - } - - // Make numeric if forced or a qualifier was provided and val looks numeric - if ( extra === "" || extra ) { - num = parseFloat( val ); - return extra === true || isFinite( num ) ? num || 0 : val; - } - - return val; - } -} ); - -jQuery.each( [ "height", "width" ], function( _i, dimension ) { - jQuery.cssHooks[ dimension ] = { - get: function( elem, computed, extra ) { - if ( computed ) { - - // Certain elements can have dimension info if we invisibly show them - // but it must have a current display style that would benefit - return rdisplayswap.test( jQuery.css( elem, "display" ) ) && - - // Support: Safari 8+ - // Table columns in Safari have non-zero offsetWidth & zero - // getBoundingClientRect().width unless display is changed. - // Support: IE <=11 only - // Running getBoundingClientRect on a disconnected node - // in IE throws an error. - ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? - swap( elem, cssShow, function() { - return getWidthOrHeight( elem, dimension, extra ); - } ) : - getWidthOrHeight( elem, dimension, extra ); - } - }, - - set: function( elem, value, extra ) { - var matches, - styles = getStyles( elem ), - - // Only read styles.position if the test has a chance to fail - // to avoid forcing a reflow. - scrollboxSizeBuggy = !support.scrollboxSize() && - styles.position === "absolute", - - // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) - boxSizingNeeded = scrollboxSizeBuggy || extra, - isBorderBox = boxSizingNeeded && - jQuery.css( elem, "boxSizing", false, styles ) === "border-box", - subtract = extra ? - boxModelAdjustment( - elem, - dimension, - extra, - isBorderBox, - styles - ) : - 0; - - // Account for unreliable border-box dimensions by comparing offset* to computed and - // faking a content-box to get border and padding (gh-3699) - if ( isBorderBox && scrollboxSizeBuggy ) { - subtract -= Math.ceil( - elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - - parseFloat( styles[ dimension ] ) - - boxModelAdjustment( elem, dimension, "border", false, styles ) - - 0.5 - ); - } - - // Convert to pixels if value adjustment is needed - if ( subtract && ( matches = rcssNum.exec( value ) ) && - ( matches[ 3 ] || "px" ) !== "px" ) { - - elem.style[ dimension ] = value; - value = jQuery.css( elem, dimension ); - } - - return setPositiveNumber( elem, value, subtract ); - } - }; -} ); - -jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, - function( elem, computed ) { - if ( computed ) { - return ( parseFloat( curCSS( elem, "marginLeft" ) ) || - elem.getBoundingClientRect().left - - swap( elem, { marginLeft: 0 }, function() { - return elem.getBoundingClientRect().left; - } ) - ) + "px"; - } - } -); - -// These hooks are used by animate to expand properties -jQuery.each( { - margin: "", - padding: "", - border: "Width" -}, function( prefix, suffix ) { - jQuery.cssHooks[ prefix + suffix ] = { - expand: function( value ) { - var i = 0, - expanded = {}, - - // Assumes a single number if not a string - parts = typeof value === "string" ? value.split( " " ) : [ value ]; - - for ( ; i < 4; i++ ) { - expanded[ prefix + cssExpand[ i ] + suffix ] = - parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; - } - - return expanded; - } - }; - - if ( prefix !== "margin" ) { - jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; - } -} ); - -jQuery.fn.extend( { - css: function( name, value ) { - return access( this, function( elem, name, value ) { - var styles, len, - map = {}, - i = 0; - - if ( Array.isArray( name ) ) { - styles = getStyles( elem ); - len = name.length; - - for ( ; i < len; i++ ) { - map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); - } - - return map; - } - - return value !== undefined ? - jQuery.style( elem, name, value ) : - jQuery.css( elem, name ); - }, name, value, arguments.length > 1 ); - } -} ); - - -function Tween( elem, options, prop, end, easing ) { - return new Tween.prototype.init( elem, options, prop, end, easing ); -} -jQuery.Tween = Tween; - -Tween.prototype = { - constructor: Tween, - init: function( elem, options, prop, end, easing, unit ) { - this.elem = elem; - this.prop = prop; - this.easing = easing || jQuery.easing._default; - this.options = options; - this.start = this.now = this.cur(); - this.end = end; - this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); - }, - cur: function() { - var hooks = Tween.propHooks[ this.prop ]; - - return hooks && hooks.get ? - hooks.get( this ) : - Tween.propHooks._default.get( this ); - }, - run: function( percent ) { - var eased, - hooks = Tween.propHooks[ this.prop ]; - - if ( this.options.duration ) { - this.pos = eased = jQuery.easing[ this.easing ]( - percent, this.options.duration * percent, 0, 1, this.options.duration - ); - } else { - this.pos = eased = percent; - } - this.now = ( this.end - this.start ) * eased + this.start; - - if ( this.options.step ) { - this.options.step.call( this.elem, this.now, this ); - } - - if ( hooks && hooks.set ) { - hooks.set( this ); - } else { - Tween.propHooks._default.set( this ); - } - return this; - } -}; - -Tween.prototype.init.prototype = Tween.prototype; - -Tween.propHooks = { - _default: { - get: function( tween ) { - var result; - - // Use a property on the element directly when it is not a DOM element, - // or when there is no matching style property that exists. - if ( tween.elem.nodeType !== 1 || - tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { - return tween.elem[ tween.prop ]; - } - - // Passing an empty string as a 3rd parameter to .css will automatically - // attempt a parseFloat and fallback to a string if the parse fails. - // Simple values such as "10px" are parsed to Float; - // complex values such as "rotate(1rad)" are returned as-is. - result = jQuery.css( tween.elem, tween.prop, "" ); - - // Empty strings, null, undefined and "auto" are converted to 0. - return !result || result === "auto" ? 0 : result; - }, - set: function( tween ) { - - // Use step hook for back compat. - // Use cssHook if its there. - // Use .style if available and use plain properties where available. - if ( jQuery.fx.step[ tween.prop ] ) { - jQuery.fx.step[ tween.prop ]( tween ); - } else if ( tween.elem.nodeType === 1 && ( - jQuery.cssHooks[ tween.prop ] || - tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { - jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); - } else { - tween.elem[ tween.prop ] = tween.now; - } - } - } -}; - -// Support: IE <=9 only -// Panic based approach to setting things on disconnected nodes -Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { - set: function( tween ) { - if ( tween.elem.nodeType && tween.elem.parentNode ) { - tween.elem[ tween.prop ] = tween.now; - } - } -}; - -jQuery.easing = { - linear: function( p ) { - return p; - }, - swing: function( p ) { - return 0.5 - Math.cos( p * Math.PI ) / 2; - }, - _default: "swing" -}; - -jQuery.fx = Tween.prototype.init; - -// Back compat <1.8 extension point -jQuery.fx.step = {}; - - - - -var - fxNow, inProgress, - rfxtypes = /^(?:toggle|show|hide)$/, - rrun = /queueHooks$/; - -function schedule() { - if ( inProgress ) { - if ( document.hidden === false && window.requestAnimationFrame ) { - window.requestAnimationFrame( schedule ); - } else { - window.setTimeout( schedule, jQuery.fx.interval ); - } - - jQuery.fx.tick(); - } -} - -// Animations created synchronously will run synchronously -function createFxNow() { - window.setTimeout( function() { - fxNow = undefined; - } ); - return ( fxNow = Date.now() ); -} - -// Generate parameters to create a standard animation -function genFx( type, includeWidth ) { - var which, - i = 0, - attrs = { height: type }; - - // If we include width, step value is 1 to do all cssExpand values, - // otherwise step value is 2 to skip over Left and Right - includeWidth = includeWidth ? 1 : 0; - for ( ; i < 4; i += 2 - includeWidth ) { - which = cssExpand[ i ]; - attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; - } - - if ( includeWidth ) { - attrs.opacity = attrs.width = type; - } - - return attrs; -} - -function createTween( value, prop, animation ) { - var tween, - collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), - index = 0, - length = collection.length; - for ( ; index < length; index++ ) { - if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { - - // We're done with this property - return tween; - } - } -} - -function defaultPrefilter( elem, props, opts ) { - var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, - isBox = "width" in props || "height" in props, - anim = this, - orig = {}, - style = elem.style, - hidden = elem.nodeType && isHiddenWithinTree( elem ), - dataShow = dataPriv.get( elem, "fxshow" ); - - // Queue-skipping animations hijack the fx hooks - if ( !opts.queue ) { - hooks = jQuery._queueHooks( elem, "fx" ); - if ( hooks.unqueued == null ) { - hooks.unqueued = 0; - oldfire = hooks.empty.fire; - hooks.empty.fire = function() { - if ( !hooks.unqueued ) { - oldfire(); - } - }; - } - hooks.unqueued++; - - anim.always( function() { - - // Ensure the complete handler is called before this completes - anim.always( function() { - hooks.unqueued--; - if ( !jQuery.queue( elem, "fx" ).length ) { - hooks.empty.fire(); - } - } ); - } ); - } - - // Detect show/hide animations - for ( prop in props ) { - value = props[ prop ]; - if ( rfxtypes.test( value ) ) { - delete props[ prop ]; - toggle = toggle || value === "toggle"; - if ( value === ( hidden ? "hide" : "show" ) ) { - - // Pretend to be hidden if this is a "show" and - // there is still data from a stopped show/hide - if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { - hidden = true; - - // Ignore all other no-op show/hide data - } else { - continue; - } - } - orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); - } - } - - // Bail out if this is a no-op like .hide().hide() - propTween = !jQuery.isEmptyObject( props ); - if ( !propTween && jQuery.isEmptyObject( orig ) ) { - return; - } - - // Restrict "overflow" and "display" styles during box animations - if ( isBox && elem.nodeType === 1 ) { - - // Support: IE <=9 - 11, Edge 12 - 15 - // Record all 3 overflow attributes because IE does not infer the shorthand - // from identically-valued overflowX and overflowY and Edge just mirrors - // the overflowX value there. - opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; - - // Identify a display type, preferring old show/hide data over the CSS cascade - restoreDisplay = dataShow && dataShow.display; - if ( restoreDisplay == null ) { - restoreDisplay = dataPriv.get( elem, "display" ); - } - display = jQuery.css( elem, "display" ); - if ( display === "none" ) { - if ( restoreDisplay ) { - display = restoreDisplay; - } else { - - // Get nonempty value(s) by temporarily forcing visibility - showHide( [ elem ], true ); - restoreDisplay = elem.style.display || restoreDisplay; - display = jQuery.css( elem, "display" ); - showHide( [ elem ] ); - } - } - - // Animate inline elements as inline-block - if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { - if ( jQuery.css( elem, "float" ) === "none" ) { - - // Restore the original display value at the end of pure show/hide animations - if ( !propTween ) { - anim.done( function() { - style.display = restoreDisplay; - } ); - if ( restoreDisplay == null ) { - display = style.display; - restoreDisplay = display === "none" ? "" : display; - } - } - style.display = "inline-block"; - } - } - } - - if ( opts.overflow ) { - style.overflow = "hidden"; - anim.always( function() { - style.overflow = opts.overflow[ 0 ]; - style.overflowX = opts.overflow[ 1 ]; - style.overflowY = opts.overflow[ 2 ]; - } ); - } - - // Implement show/hide animations - propTween = false; - for ( prop in orig ) { - - // General show/hide setup for this element animation - if ( !propTween ) { - if ( dataShow ) { - if ( "hidden" in dataShow ) { - hidden = dataShow.hidden; - } - } else { - dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); - } - - // Store hidden/visible for toggle so `.stop().toggle()` "reverses" - if ( toggle ) { - dataShow.hidden = !hidden; - } - - // Show elements before animating them - if ( hidden ) { - showHide( [ elem ], true ); - } - - /* eslint-disable no-loop-func */ - - anim.done( function() { - - /* eslint-enable no-loop-func */ - - // The final step of a "hide" animation is actually hiding the element - if ( !hidden ) { - showHide( [ elem ] ); - } - dataPriv.remove( elem, "fxshow" ); - for ( prop in orig ) { - jQuery.style( elem, prop, orig[ prop ] ); - } - } ); - } - - // Per-property setup - propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); - if ( !( prop in dataShow ) ) { - dataShow[ prop ] = propTween.start; - if ( hidden ) { - propTween.end = propTween.start; - propTween.start = 0; - } - } - } -} - -function propFilter( props, specialEasing ) { - var index, name, easing, value, hooks; - - // camelCase, specialEasing and expand cssHook pass - for ( index in props ) { - name = camelCase( index ); - easing = specialEasing[ name ]; - value = props[ index ]; - if ( Array.isArray( value ) ) { - easing = value[ 1 ]; - value = props[ index ] = value[ 0 ]; - } - - if ( index !== name ) { - props[ name ] = value; - delete props[ index ]; - } - - hooks = jQuery.cssHooks[ name ]; - if ( hooks && "expand" in hooks ) { - value = hooks.expand( value ); - delete props[ name ]; - - // Not quite $.extend, this won't overwrite existing keys. - // Reusing 'index' because we have the correct "name" - for ( index in value ) { - if ( !( index in props ) ) { - props[ index ] = value[ index ]; - specialEasing[ index ] = easing; - } - } - } else { - specialEasing[ name ] = easing; - } - } -} - -function Animation( elem, properties, options ) { - var result, - stopped, - index = 0, - length = Animation.prefilters.length, - deferred = jQuery.Deferred().always( function() { - - // Don't match elem in the :animated selector - delete tick.elem; - } ), - tick = function() { - if ( stopped ) { - return false; - } - var currentTime = fxNow || createFxNow(), - remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), - - // Support: Android 2.3 only - // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) - temp = remaining / animation.duration || 0, - percent = 1 - temp, - index = 0, - length = animation.tweens.length; - - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( percent ); - } - - deferred.notifyWith( elem, [ animation, percent, remaining ] ); - - // If there's more to do, yield - if ( percent < 1 && length ) { - return remaining; - } - - // If this was an empty animation, synthesize a final progress notification - if ( !length ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - } - - // Resolve the animation and report its conclusion - deferred.resolveWith( elem, [ animation ] ); - return false; - }, - animation = deferred.promise( { - elem: elem, - props: jQuery.extend( {}, properties ), - opts: jQuery.extend( true, { - specialEasing: {}, - easing: jQuery.easing._default - }, options ), - originalProperties: properties, - originalOptions: options, - startTime: fxNow || createFxNow(), - duration: options.duration, - tweens: [], - createTween: function( prop, end ) { - var tween = jQuery.Tween( elem, animation.opts, prop, end, - animation.opts.specialEasing[ prop ] || animation.opts.easing ); - animation.tweens.push( tween ); - return tween; - }, - stop: function( gotoEnd ) { - var index = 0, - - // If we are going to the end, we want to run all the tweens - // otherwise we skip this part - length = gotoEnd ? animation.tweens.length : 0; - if ( stopped ) { - return this; - } - stopped = true; - for ( ; index < length; index++ ) { - animation.tweens[ index ].run( 1 ); - } - - // Resolve when we played the last frame; otherwise, reject - if ( gotoEnd ) { - deferred.notifyWith( elem, [ animation, 1, 0 ] ); - deferred.resolveWith( elem, [ animation, gotoEnd ] ); - } else { - deferred.rejectWith( elem, [ animation, gotoEnd ] ); - } - return this; - } - } ), - props = animation.props; - - propFilter( props, animation.opts.specialEasing ); - - for ( ; index < length; index++ ) { - result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); - if ( result ) { - if ( isFunction( result.stop ) ) { - jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = - result.stop.bind( result ); - } - return result; - } - } - - jQuery.map( props, createTween, animation ); - - if ( isFunction( animation.opts.start ) ) { - animation.opts.start.call( elem, animation ); - } - - // Attach callbacks from options - animation - .progress( animation.opts.progress ) - .done( animation.opts.done, animation.opts.complete ) - .fail( animation.opts.fail ) - .always( animation.opts.always ); - - jQuery.fx.timer( - jQuery.extend( tick, { - elem: elem, - anim: animation, - queue: animation.opts.queue - } ) - ); - - return animation; -} - -jQuery.Animation = jQuery.extend( Animation, { - - tweeners: { - "*": [ function( prop, value ) { - var tween = this.createTween( prop, value ); - adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); - return tween; - } ] - }, - - tweener: function( props, callback ) { - if ( isFunction( props ) ) { - callback = props; - props = [ "*" ]; - } else { - props = props.match( rnothtmlwhite ); - } - - var prop, - index = 0, - length = props.length; - - for ( ; index < length; index++ ) { - prop = props[ index ]; - Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; - Animation.tweeners[ prop ].unshift( callback ); - } - }, - - prefilters: [ defaultPrefilter ], - - prefilter: function( callback, prepend ) { - if ( prepend ) { - Animation.prefilters.unshift( callback ); - } else { - Animation.prefilters.push( callback ); - } - } -} ); - -jQuery.speed = function( speed, easing, fn ) { - var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { - complete: fn || !fn && easing || - isFunction( speed ) && speed, - duration: speed, - easing: fn && easing || easing && !isFunction( easing ) && easing - }; - - // Go to the end state if fx are off - if ( jQuery.fx.off ) { - opt.duration = 0; - - } else { - if ( typeof opt.duration !== "number" ) { - if ( opt.duration in jQuery.fx.speeds ) { - opt.duration = jQuery.fx.speeds[ opt.duration ]; - - } else { - opt.duration = jQuery.fx.speeds._default; - } - } - } - - // Normalize opt.queue - true/undefined/null -> "fx" - if ( opt.queue == null || opt.queue === true ) { - opt.queue = "fx"; - } - - // Queueing - opt.old = opt.complete; - - opt.complete = function() { - if ( isFunction( opt.old ) ) { - opt.old.call( this ); - } - - if ( opt.queue ) { - jQuery.dequeue( this, opt.queue ); - } - }; - - return opt; -}; - -jQuery.fn.extend( { - fadeTo: function( speed, to, easing, callback ) { - - // Show any hidden elements after setting opacity to 0 - return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() - - // Animate to the value specified - .end().animate( { opacity: to }, speed, easing, callback ); - }, - animate: function( prop, speed, easing, callback ) { - var empty = jQuery.isEmptyObject( prop ), - optall = jQuery.speed( speed, easing, callback ), - doAnimation = function() { - - // Operate on a copy of prop so per-property easing won't be lost - var anim = Animation( this, jQuery.extend( {}, prop ), optall ); - - // Empty animations, or finishing resolves immediately - if ( empty || dataPriv.get( this, "finish" ) ) { - anim.stop( true ); - } - }; - doAnimation.finish = doAnimation; - - return empty || optall.queue === false ? - this.each( doAnimation ) : - this.queue( optall.queue, doAnimation ); - }, - stop: function( type, clearQueue, gotoEnd ) { - var stopQueue = function( hooks ) { - var stop = hooks.stop; - delete hooks.stop; - stop( gotoEnd ); - }; - - if ( typeof type !== "string" ) { - gotoEnd = clearQueue; - clearQueue = type; - type = undefined; - } - if ( clearQueue ) { - this.queue( type || "fx", [] ); - } - - return this.each( function() { - var dequeue = true, - index = type != null && type + "queueHooks", - timers = jQuery.timers, - data = dataPriv.get( this ); - - if ( index ) { - if ( data[ index ] && data[ index ].stop ) { - stopQueue( data[ index ] ); - } - } else { - for ( index in data ) { - if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { - stopQueue( data[ index ] ); - } - } - } - - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && - ( type == null || timers[ index ].queue === type ) ) { - - timers[ index ].anim.stop( gotoEnd ); - dequeue = false; - timers.splice( index, 1 ); - } - } - - // Start the next in the queue if the last step wasn't forced. - // Timers currently will call their complete callbacks, which - // will dequeue but only if they were gotoEnd. - if ( dequeue || !gotoEnd ) { - jQuery.dequeue( this, type ); - } - } ); - }, - finish: function( type ) { - if ( type !== false ) { - type = type || "fx"; - } - return this.each( function() { - var index, - data = dataPriv.get( this ), - queue = data[ type + "queue" ], - hooks = data[ type + "queueHooks" ], - timers = jQuery.timers, - length = queue ? queue.length : 0; - - // Enable finishing flag on private data - data.finish = true; - - // Empty the queue first - jQuery.queue( this, type, [] ); - - if ( hooks && hooks.stop ) { - hooks.stop.call( this, true ); - } - - // Look for any active animations, and finish them - for ( index = timers.length; index--; ) { - if ( timers[ index ].elem === this && timers[ index ].queue === type ) { - timers[ index ].anim.stop( true ); - timers.splice( index, 1 ); - } - } - - // Look for any animations in the old queue and finish them - for ( index = 0; index < length; index++ ) { - if ( queue[ index ] && queue[ index ].finish ) { - queue[ index ].finish.call( this ); - } - } - - // Turn off finishing flag - delete data.finish; - } ); - } -} ); - -jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { - var cssFn = jQuery.fn[ name ]; - jQuery.fn[ name ] = function( speed, easing, callback ) { - return speed == null || typeof speed === "boolean" ? - cssFn.apply( this, arguments ) : - this.animate( genFx( name, true ), speed, easing, callback ); - }; -} ); - -// Generate shortcuts for custom animations -jQuery.each( { - slideDown: genFx( "show" ), - slideUp: genFx( "hide" ), - slideToggle: genFx( "toggle" ), - fadeIn: { opacity: "show" }, - fadeOut: { opacity: "hide" }, - fadeToggle: { opacity: "toggle" } -}, function( name, props ) { - jQuery.fn[ name ] = function( speed, easing, callback ) { - return this.animate( props, speed, easing, callback ); - }; -} ); - -jQuery.timers = []; -jQuery.fx.tick = function() { - var timer, - i = 0, - timers = jQuery.timers; - - fxNow = Date.now(); - - for ( ; i < timers.length; i++ ) { - timer = timers[ i ]; - - // Run the timer and safely remove it when done (allowing for external removal) - if ( !timer() && timers[ i ] === timer ) { - timers.splice( i--, 1 ); - } - } - - if ( !timers.length ) { - jQuery.fx.stop(); - } - fxNow = undefined; -}; - -jQuery.fx.timer = function( timer ) { - jQuery.timers.push( timer ); - jQuery.fx.start(); -}; - -jQuery.fx.interval = 13; -jQuery.fx.start = function() { - if ( inProgress ) { - return; - } - - inProgress = true; - schedule(); -}; - -jQuery.fx.stop = function() { - inProgress = null; -}; - -jQuery.fx.speeds = { - slow: 600, - fast: 200, - - // Default speed - _default: 400 -}; - - -// Based off of the plugin by Clint Helfers, with permission. -// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ -jQuery.fn.delay = function( time, type ) { - time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; - type = type || "fx"; - - return this.queue( type, function( next, hooks ) { - var timeout = window.setTimeout( next, time ); - hooks.stop = function() { - window.clearTimeout( timeout ); - }; - } ); -}; - - -( function() { - var input = document.createElement( "input" ), - select = document.createElement( "select" ), - opt = select.appendChild( document.createElement( "option" ) ); - - input.type = "checkbox"; - - // Support: Android <=4.3 only - // Default value for a checkbox should be "on" - support.checkOn = input.value !== ""; - - // Support: IE <=11 only - // Must access selectedIndex to make default options select - support.optSelected = opt.selected; - - // Support: IE <=11 only - // An input loses its value after becoming a radio - input = document.createElement( "input" ); - input.value = "t"; - input.type = "radio"; - support.radioValue = input.value === "t"; -} )(); - - -var boolHook, - attrHandle = jQuery.expr.attrHandle; - -jQuery.fn.extend( { - attr: function( name, value ) { - return access( this, jQuery.attr, name, value, arguments.length > 1 ); - }, - - removeAttr: function( name ) { - return this.each( function() { - jQuery.removeAttr( this, name ); - } ); - } -} ); - -jQuery.extend( { - attr: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set attributes on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - // Fallback to prop when attributes are not supported - if ( typeof elem.getAttribute === "undefined" ) { - return jQuery.prop( elem, name, value ); - } - - // Attribute hooks are determined by the lowercase version - // Grab necessary hook if one is defined - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - hooks = jQuery.attrHooks[ name.toLowerCase() ] || - ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); - } - - if ( value !== undefined ) { - if ( value === null ) { - jQuery.removeAttr( elem, name ); - return; - } - - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - elem.setAttribute( name, value + "" ); - return value; - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - ret = jQuery.find.attr( elem, name ); - - // Non-existent attributes return null, we normalize to undefined - return ret == null ? undefined : ret; - }, - - attrHooks: { - type: { - set: function( elem, value ) { - if ( !support.radioValue && value === "radio" && - nodeName( elem, "input" ) ) { - var val = elem.value; - elem.setAttribute( "type", value ); - if ( val ) { - elem.value = val; - } - return value; - } - } - } - }, - - removeAttr: function( elem, value ) { - var name, - i = 0, - - // Attribute names can contain non-HTML whitespace characters - // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 - attrNames = value && value.match( rnothtmlwhite ); - - if ( attrNames && elem.nodeType === 1 ) { - while ( ( name = attrNames[ i++ ] ) ) { - elem.removeAttribute( name ); - } - } - } -} ); - -// Hooks for boolean attributes -boolHook = { - set: function( elem, value, name ) { - if ( value === false ) { - - // Remove boolean attributes when set to false - jQuery.removeAttr( elem, name ); - } else { - elem.setAttribute( name, name ); - } - return name; - } -}; - -jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { - var getter = attrHandle[ name ] || jQuery.find.attr; - - attrHandle[ name ] = function( elem, name, isXML ) { - var ret, handle, - lowercaseName = name.toLowerCase(); - - if ( !isXML ) { - - // Avoid an infinite loop by temporarily removing this function from the getter - handle = attrHandle[ lowercaseName ]; - attrHandle[ lowercaseName ] = ret; - ret = getter( elem, name, isXML ) != null ? - lowercaseName : - null; - attrHandle[ lowercaseName ] = handle; - } - return ret; - }; -} ); - - - - -var rfocusable = /^(?:input|select|textarea|button)$/i, - rclickable = /^(?:a|area)$/i; - -jQuery.fn.extend( { - prop: function( name, value ) { - return access( this, jQuery.prop, name, value, arguments.length > 1 ); - }, - - removeProp: function( name ) { - return this.each( function() { - delete this[ jQuery.propFix[ name ] || name ]; - } ); - } -} ); - -jQuery.extend( { - prop: function( elem, name, value ) { - var ret, hooks, - nType = elem.nodeType; - - // Don't get/set properties on text, comment and attribute nodes - if ( nType === 3 || nType === 8 || nType === 2 ) { - return; - } - - if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { - - // Fix name and attach hooks - name = jQuery.propFix[ name ] || name; - hooks = jQuery.propHooks[ name ]; - } - - if ( value !== undefined ) { - if ( hooks && "set" in hooks && - ( ret = hooks.set( elem, value, name ) ) !== undefined ) { - return ret; - } - - return ( elem[ name ] = value ); - } - - if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { - return ret; - } - - return elem[ name ]; - }, - - propHooks: { - tabIndex: { - get: function( elem ) { - - // Support: IE <=9 - 11 only - // elem.tabIndex doesn't always return the - // correct value when it hasn't been explicitly set - // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ - // Use proper attribute retrieval(#12072) - var tabindex = jQuery.find.attr( elem, "tabindex" ); - - if ( tabindex ) { - return parseInt( tabindex, 10 ); - } - - if ( - rfocusable.test( elem.nodeName ) || - rclickable.test( elem.nodeName ) && - elem.href - ) { - return 0; - } - - return -1; - } - } - }, - - propFix: { - "for": "htmlFor", - "class": "className" - } -} ); - -// Support: IE <=11 only -// Accessing the selectedIndex property -// forces the browser to respect setting selected -// on the option -// The getter ensures a default option is selected -// when in an optgroup -// eslint rule "no-unused-expressions" is disabled for this code -// since it considers such accessions noop -if ( !support.optSelected ) { - jQuery.propHooks.selected = { - get: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent && parent.parentNode ) { - parent.parentNode.selectedIndex; - } - return null; - }, - set: function( elem ) { - - /* eslint no-unused-expressions: "off" */ - - var parent = elem.parentNode; - if ( parent ) { - parent.selectedIndex; - - if ( parent.parentNode ) { - parent.parentNode.selectedIndex; - } - } - } - }; -} - -jQuery.each( [ - "tabIndex", - "readOnly", - "maxLength", - "cellSpacing", - "cellPadding", - "rowSpan", - "colSpan", - "useMap", - "frameBorder", - "contentEditable" -], function() { - jQuery.propFix[ this.toLowerCase() ] = this; -} ); - - - - - // Strip and collapse whitespace according to HTML spec - // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace - function stripAndCollapse( value ) { - var tokens = value.match( rnothtmlwhite ) || []; - return tokens.join( " " ); - } - - -function getClass( elem ) { - return elem.getAttribute && elem.getAttribute( "class" ) || ""; -} - -function classesToArray( value ) { - if ( Array.isArray( value ) ) { - return value; - } - if ( typeof value === "string" ) { - return value.match( rnothtmlwhite ) || []; - } - return []; -} - -jQuery.fn.extend( { - addClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - if ( cur.indexOf( " " + clazz + " " ) < 0 ) { - cur += clazz + " "; - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - removeClass: function( value ) { - var classes, elem, cur, curValue, clazz, j, finalValue, - i = 0; - - if ( isFunction( value ) ) { - return this.each( function( j ) { - jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); - } ); - } - - if ( !arguments.length ) { - return this.attr( "class", "" ); - } - - classes = classesToArray( value ); - - if ( classes.length ) { - while ( ( elem = this[ i++ ] ) ) { - curValue = getClass( elem ); - - // This expression is here for better compressibility (see addClass) - cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); - - if ( cur ) { - j = 0; - while ( ( clazz = classes[ j++ ] ) ) { - - // Remove *all* instances - while ( cur.indexOf( " " + clazz + " " ) > -1 ) { - cur = cur.replace( " " + clazz + " ", " " ); - } - } - - // Only assign if different to avoid unneeded rendering. - finalValue = stripAndCollapse( cur ); - if ( curValue !== finalValue ) { - elem.setAttribute( "class", finalValue ); - } - } - } - } - - return this; - }, - - toggleClass: function( value, stateVal ) { - var type = typeof value, - isValidValue = type === "string" || Array.isArray( value ); - - if ( typeof stateVal === "boolean" && isValidValue ) { - return stateVal ? this.addClass( value ) : this.removeClass( value ); - } - - if ( isFunction( value ) ) { - return this.each( function( i ) { - jQuery( this ).toggleClass( - value.call( this, i, getClass( this ), stateVal ), - stateVal - ); - } ); - } - - return this.each( function() { - var className, i, self, classNames; - - if ( isValidValue ) { - - // Toggle individual class names - i = 0; - self = jQuery( this ); - classNames = classesToArray( value ); - - while ( ( className = classNames[ i++ ] ) ) { - - // Check each className given, space separated list - if ( self.hasClass( className ) ) { - self.removeClass( className ); - } else { - self.addClass( className ); - } - } - - // Toggle whole class name - } else if ( value === undefined || type === "boolean" ) { - className = getClass( this ); - if ( className ) { - - // Store className if set - dataPriv.set( this, "__className__", className ); - } - - // If the element has a class name or if we're passed `false`, - // then remove the whole classname (if there was one, the above saved it). - // Otherwise bring back whatever was previously saved (if anything), - // falling back to the empty string if nothing was stored. - if ( this.setAttribute ) { - this.setAttribute( "class", - className || value === false ? - "" : - dataPriv.get( this, "__className__" ) || "" - ); - } - } - } ); - }, - - hasClass: function( selector ) { - var className, elem, - i = 0; - - className = " " + selector + " "; - while ( ( elem = this[ i++ ] ) ) { - if ( elem.nodeType === 1 && - ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { - return true; - } - } - - return false; - } -} ); - - - - -var rreturn = /\r/g; - -jQuery.fn.extend( { - val: function( value ) { - var hooks, ret, valueIsFunction, - elem = this[ 0 ]; - - if ( !arguments.length ) { - if ( elem ) { - hooks = jQuery.valHooks[ elem.type ] || - jQuery.valHooks[ elem.nodeName.toLowerCase() ]; - - if ( hooks && - "get" in hooks && - ( ret = hooks.get( elem, "value" ) ) !== undefined - ) { - return ret; - } - - ret = elem.value; - - // Handle most common string cases - if ( typeof ret === "string" ) { - return ret.replace( rreturn, "" ); - } - - // Handle cases where value is null/undef or number - return ret == null ? "" : ret; - } - - return; - } - - valueIsFunction = isFunction( value ); - - return this.each( function( i ) { - var val; - - if ( this.nodeType !== 1 ) { - return; - } - - if ( valueIsFunction ) { - val = value.call( this, i, jQuery( this ).val() ); - } else { - val = value; - } - - // Treat null/undefined as ""; convert numbers to string - if ( val == null ) { - val = ""; - - } else if ( typeof val === "number" ) { - val += ""; - - } else if ( Array.isArray( val ) ) { - val = jQuery.map( val, function( value ) { - return value == null ? "" : value + ""; - } ); - } - - hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; - - // If set returns undefined, fall back to normal setting - if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { - this.value = val; - } - } ); - } -} ); - -jQuery.extend( { - valHooks: { - option: { - get: function( elem ) { - - var val = jQuery.find.attr( elem, "value" ); - return val != null ? - val : - - // Support: IE <=10 - 11 only - // option.text throws exceptions (#14686, #14858) - // Strip and collapse whitespace - // https://html.spec.whatwg.org/#strip-and-collapse-whitespace - stripAndCollapse( jQuery.text( elem ) ); - } - }, - select: { - get: function( elem ) { - var value, option, i, - options = elem.options, - index = elem.selectedIndex, - one = elem.type === "select-one", - values = one ? null : [], - max = one ? index + 1 : options.length; - - if ( index < 0 ) { - i = max; - - } else { - i = one ? index : 0; - } - - // Loop through all the selected options - for ( ; i < max; i++ ) { - option = options[ i ]; - - // Support: IE <=9 only - // IE8-9 doesn't update selected after form reset (#2551) - if ( ( option.selected || i === index ) && - - // Don't return options that are disabled or in a disabled optgroup - !option.disabled && - ( !option.parentNode.disabled || - !nodeName( option.parentNode, "optgroup" ) ) ) { - - // Get the specific value for the option - value = jQuery( option ).val(); - - // We don't need an array for one selects - if ( one ) { - return value; - } - - // Multi-Selects return an array - values.push( value ); - } - } - - return values; - }, - - set: function( elem, value ) { - var optionSet, option, - options = elem.options, - values = jQuery.makeArray( value ), - i = options.length; - - while ( i-- ) { - option = options[ i ]; - - /* eslint-disable no-cond-assign */ - - if ( option.selected = - jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 - ) { - optionSet = true; - } - - /* eslint-enable no-cond-assign */ - } - - // Force browsers to behave consistently when non-matching value is set - if ( !optionSet ) { - elem.selectedIndex = -1; - } - return values; - } - } - } -} ); - -// Radios and checkboxes getter/setter -jQuery.each( [ "radio", "checkbox" ], function() { - jQuery.valHooks[ this ] = { - set: function( elem, value ) { - if ( Array.isArray( value ) ) { - return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); - } - } - }; - if ( !support.checkOn ) { - jQuery.valHooks[ this ].get = function( elem ) { - return elem.getAttribute( "value" ) === null ? "on" : elem.value; - }; - } -} ); - - - - -// Return jQuery for attributes-only inclusion - - -support.focusin = "onfocusin" in window; - - -var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, - stopPropagationCallback = function( e ) { - e.stopPropagation(); - }; - -jQuery.extend( jQuery.event, { - - trigger: function( event, data, elem, onlyHandlers ) { - - var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, - eventPath = [ elem || document ], - type = hasOwn.call( event, "type" ) ? event.type : event, - namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; - - cur = lastElement = tmp = elem = elem || document; - - // Don't do events on text and comment nodes - if ( elem.nodeType === 3 || elem.nodeType === 8 ) { - return; - } - - // focus/blur morphs to focusin/out; ensure we're not firing them right now - if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { - return; - } - - if ( type.indexOf( "." ) > -1 ) { - - // Namespaced trigger; create a regexp to match event type in handle() - namespaces = type.split( "." ); - type = namespaces.shift(); - namespaces.sort(); - } - ontype = type.indexOf( ":" ) < 0 && "on" + type; - - // Caller can pass in a jQuery.Event object, Object, or just an event type string - event = event[ jQuery.expando ] ? - event : - new jQuery.Event( type, typeof event === "object" && event ); - - // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) - event.isTrigger = onlyHandlers ? 2 : 3; - event.namespace = namespaces.join( "." ); - event.rnamespace = event.namespace ? - new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : - null; - - // Clean up the event in case it is being reused - event.result = undefined; - if ( !event.target ) { - event.target = elem; - } - - // Clone any incoming data and prepend the event, creating the handler arg list - data = data == null ? - [ event ] : - jQuery.makeArray( data, [ event ] ); - - // Allow special events to draw outside the lines - special = jQuery.event.special[ type ] || {}; - if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { - return; - } - - // Determine event propagation path in advance, per W3C events spec (#9951) - // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) - if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { - - bubbleType = special.delegateType || type; - if ( !rfocusMorph.test( bubbleType + type ) ) { - cur = cur.parentNode; - } - for ( ; cur; cur = cur.parentNode ) { - eventPath.push( cur ); - tmp = cur; - } - - // Only add window if we got to document (e.g., not plain obj or detached DOM) - if ( tmp === ( elem.ownerDocument || document ) ) { - eventPath.push( tmp.defaultView || tmp.parentWindow || window ); - } - } - - // Fire handlers on the event path - i = 0; - while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { - lastElement = cur; - event.type = i > 1 ? - bubbleType : - special.bindType || type; - - // jQuery handler - handle = ( - dataPriv.get( cur, "events" ) || Object.create( null ) - )[ event.type ] && - dataPriv.get( cur, "handle" ); - if ( handle ) { - handle.apply( cur, data ); - } - - // Native handler - handle = ontype && cur[ ontype ]; - if ( handle && handle.apply && acceptData( cur ) ) { - event.result = handle.apply( cur, data ); - if ( event.result === false ) { - event.preventDefault(); - } - } - } - event.type = type; - - // If nobody prevented the default action, do it now - if ( !onlyHandlers && !event.isDefaultPrevented() ) { - - if ( ( !special._default || - special._default.apply( eventPath.pop(), data ) === false ) && - acceptData( elem ) ) { - - // Call a native DOM method on the target with the same name as the event. - // Don't do default actions on window, that's where global variables be (#6170) - if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { - - // Don't re-trigger an onFOO event when we call its FOO() method - tmp = elem[ ontype ]; - - if ( tmp ) { - elem[ ontype ] = null; - } - - // Prevent re-triggering of the same event, since we already bubbled it above - jQuery.event.triggered = type; - - if ( event.isPropagationStopped() ) { - lastElement.addEventListener( type, stopPropagationCallback ); - } - - elem[ type ](); - - if ( event.isPropagationStopped() ) { - lastElement.removeEventListener( type, stopPropagationCallback ); - } - - jQuery.event.triggered = undefined; - - if ( tmp ) { - elem[ ontype ] = tmp; - } - } - } - } - - return event.result; - }, - - // Piggyback on a donor event to simulate a different one - // Used only for `focus(in | out)` events - simulate: function( type, elem, event ) { - var e = jQuery.extend( - new jQuery.Event(), - event, - { - type: type, - isSimulated: true - } - ); - - jQuery.event.trigger( e, null, elem ); - } - -} ); - -jQuery.fn.extend( { - - trigger: function( type, data ) { - return this.each( function() { - jQuery.event.trigger( type, data, this ); - } ); - }, - triggerHandler: function( type, data ) { - var elem = this[ 0 ]; - if ( elem ) { - return jQuery.event.trigger( type, data, elem, true ); - } - } -} ); - - -// Support: Firefox <=44 -// Firefox doesn't have focus(in | out) events -// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 -// -// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 -// focus(in | out) events fire after focus & blur events, -// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order -// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 -if ( !support.focusin ) { - jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { - - // Attach a single capturing handler on the document while someone wants focusin/focusout - var handler = function( event ) { - jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); - }; - - jQuery.event.special[ fix ] = { - setup: function() { - - // Handle: regular nodes (via `this.ownerDocument`), window - // (via `this.document`) & document (via `this`). - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ); - - if ( !attaches ) { - doc.addEventListener( orig, handler, true ); - } - dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); - }, - teardown: function() { - var doc = this.ownerDocument || this.document || this, - attaches = dataPriv.access( doc, fix ) - 1; - - if ( !attaches ) { - doc.removeEventListener( orig, handler, true ); - dataPriv.remove( doc, fix ); - - } else { - dataPriv.access( doc, fix, attaches ); - } - } - }; - } ); -} -var location = window.location; - -var nonce = { guid: Date.now() }; - -var rquery = ( /\?/ ); - - - -// Cross-browser xml parsing -jQuery.parseXML = function( data ) { - var xml; - if ( !data || typeof data !== "string" ) { - return null; - } - - // Support: IE 9 - 11 only - // IE throws on parseFromString with invalid input. - try { - xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); - } catch ( e ) { - xml = undefined; - } - - if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { - jQuery.error( "Invalid XML: " + data ); - } - return xml; -}; - - -var - rbracket = /\[\]$/, - rCRLF = /\r?\n/g, - rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, - rsubmittable = /^(?:input|select|textarea|keygen)/i; - -function buildParams( prefix, obj, traditional, add ) { - var name; - - if ( Array.isArray( obj ) ) { - - // Serialize array item. - jQuery.each( obj, function( i, v ) { - if ( traditional || rbracket.test( prefix ) ) { - - // Treat each array item as a scalar. - add( prefix, v ); - - } else { - - // Item is non-scalar (array or object), encode its numeric index. - buildParams( - prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", - v, - traditional, - add - ); - } - } ); - - } else if ( !traditional && toType( obj ) === "object" ) { - - // Serialize object item. - for ( name in obj ) { - buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); - } - - } else { - - // Serialize scalar item. - add( prefix, obj ); - } -} - -// Serialize an array of form elements or a set of -// key/values into a query string -jQuery.param = function( a, traditional ) { - var prefix, - s = [], - add = function( key, valueOrFunction ) { - - // If value is a function, invoke it and use its return value - var value = isFunction( valueOrFunction ) ? - valueOrFunction() : - valueOrFunction; - - s[ s.length ] = encodeURIComponent( key ) + "=" + - encodeURIComponent( value == null ? "" : value ); - }; - - if ( a == null ) { - return ""; - } - - // If an array was passed in, assume that it is an array of form elements. - if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { - - // Serialize the form elements - jQuery.each( a, function() { - add( this.name, this.value ); - } ); - - } else { - - // If traditional, encode the "old" way (the way 1.3.2 or older - // did it), otherwise encode params recursively. - for ( prefix in a ) { - buildParams( prefix, a[ prefix ], traditional, add ); - } - } - - // Return the resulting serialization - return s.join( "&" ); -}; - -jQuery.fn.extend( { - serialize: function() { - return jQuery.param( this.serializeArray() ); - }, - serializeArray: function() { - return this.map( function() { - - // Can add propHook for "elements" to filter or add form elements - var elements = jQuery.prop( this, "elements" ); - return elements ? jQuery.makeArray( elements ) : this; - } ) - .filter( function() { - var type = this.type; - - // Use .is( ":disabled" ) so that fieldset[disabled] works - return this.name && !jQuery( this ).is( ":disabled" ) && - rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && - ( this.checked || !rcheckableType.test( type ) ); - } ) - .map( function( _i, elem ) { - var val = jQuery( this ).val(); - - if ( val == null ) { - return null; - } - - if ( Array.isArray( val ) ) { - return jQuery.map( val, function( val ) { - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ); - } - - return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; - } ).get(); - } -} ); - - -var - r20 = /%20/g, - rhash = /#.*$/, - rantiCache = /([?&])_=[^&]*/, - rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, - - // #7653, #8125, #8152: local protocol detection - rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, - rnoContent = /^(?:GET|HEAD)$/, - rprotocol = /^\/\//, - - /* Prefilters - * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) - * 2) These are called: - * - BEFORE asking for a transport - * - AFTER param serialization (s.data is a string if s.processData is true) - * 3) key is the dataType - * 4) the catchall symbol "*" can be used - * 5) execution will start with transport dataType and THEN continue down to "*" if needed - */ - prefilters = {}, - - /* Transports bindings - * 1) key is the dataType - * 2) the catchall symbol "*" can be used - * 3) selection will start with transport dataType and THEN go to "*" if needed - */ - transports = {}, - - // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression - allTypes = "*/".concat( "*" ), - - // Anchor tag for parsing the document origin - originAnchor = document.createElement( "a" ); - originAnchor.href = location.href; - -// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport -function addToPrefiltersOrTransports( structure ) { - - // dataTypeExpression is optional and defaults to "*" - return function( dataTypeExpression, func ) { - - if ( typeof dataTypeExpression !== "string" ) { - func = dataTypeExpression; - dataTypeExpression = "*"; - } - - var dataType, - i = 0, - dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; - - if ( isFunction( func ) ) { - - // For each dataType in the dataTypeExpression - while ( ( dataType = dataTypes[ i++ ] ) ) { - - // Prepend if requested - if ( dataType[ 0 ] === "+" ) { - dataType = dataType.slice( 1 ) || "*"; - ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); - - // Otherwise append - } else { - ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); - } - } - } - }; -} - -// Base inspection function for prefilters and transports -function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { - - var inspected = {}, - seekingTransport = ( structure === transports ); - - function inspect( dataType ) { - var selected; - inspected[ dataType ] = true; - jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { - var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); - if ( typeof dataTypeOrTransport === "string" && - !seekingTransport && !inspected[ dataTypeOrTransport ] ) { - - options.dataTypes.unshift( dataTypeOrTransport ); - inspect( dataTypeOrTransport ); - return false; - } else if ( seekingTransport ) { - return !( selected = dataTypeOrTransport ); - } - } ); - return selected; - } - - return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); -} - -// A special extend for ajax options -// that takes "flat" options (not to be deep extended) -// Fixes #9887 -function ajaxExtend( target, src ) { - var key, deep, - flatOptions = jQuery.ajaxSettings.flatOptions || {}; - - for ( key in src ) { - if ( src[ key ] !== undefined ) { - ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; - } - } - if ( deep ) { - jQuery.extend( true, target, deep ); - } - - return target; -} - -/* Handles responses to an ajax request: - * - finds the right dataType (mediates between content-type and expected dataType) - * - returns the corresponding response - */ -function ajaxHandleResponses( s, jqXHR, responses ) { - - var ct, type, finalDataType, firstDataType, - contents = s.contents, - dataTypes = s.dataTypes; - - // Remove auto dataType and get content-type in the process - while ( dataTypes[ 0 ] === "*" ) { - dataTypes.shift(); - if ( ct === undefined ) { - ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); - } - } - - // Check if we're dealing with a known content-type - if ( ct ) { - for ( type in contents ) { - if ( contents[ type ] && contents[ type ].test( ct ) ) { - dataTypes.unshift( type ); - break; - } - } - } - - // Check to see if we have a response for the expected dataType - if ( dataTypes[ 0 ] in responses ) { - finalDataType = dataTypes[ 0 ]; - } else { - - // Try convertible dataTypes - for ( type in responses ) { - if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { - finalDataType = type; - break; - } - if ( !firstDataType ) { - firstDataType = type; - } - } - - // Or just use first one - finalDataType = finalDataType || firstDataType; - } - - // If we found a dataType - // We add the dataType to the list if needed - // and return the corresponding response - if ( finalDataType ) { - if ( finalDataType !== dataTypes[ 0 ] ) { - dataTypes.unshift( finalDataType ); - } - return responses[ finalDataType ]; - } -} - -/* Chain conversions given the request and the original response - * Also sets the responseXXX fields on the jqXHR instance - */ -function ajaxConvert( s, response, jqXHR, isSuccess ) { - var conv2, current, conv, tmp, prev, - converters = {}, - - // Work with a copy of dataTypes in case we need to modify it for conversion - dataTypes = s.dataTypes.slice(); - - // Create converters map with lowercased keys - if ( dataTypes[ 1 ] ) { - for ( conv in s.converters ) { - converters[ conv.toLowerCase() ] = s.converters[ conv ]; - } - } - - current = dataTypes.shift(); - - // Convert to each sequential dataType - while ( current ) { - - if ( s.responseFields[ current ] ) { - jqXHR[ s.responseFields[ current ] ] = response; - } - - // Apply the dataFilter if provided - if ( !prev && isSuccess && s.dataFilter ) { - response = s.dataFilter( response, s.dataType ); - } - - prev = current; - current = dataTypes.shift(); - - if ( current ) { - - // There's only work to do if current dataType is non-auto - if ( current === "*" ) { - - current = prev; - - // Convert response if prev dataType is non-auto and differs from current - } else if ( prev !== "*" && prev !== current ) { - - // Seek a direct converter - conv = converters[ prev + " " + current ] || converters[ "* " + current ]; - - // If none found, seek a pair - if ( !conv ) { - for ( conv2 in converters ) { - - // If conv2 outputs current - tmp = conv2.split( " " ); - if ( tmp[ 1 ] === current ) { - - // If prev can be converted to accepted input - conv = converters[ prev + " " + tmp[ 0 ] ] || - converters[ "* " + tmp[ 0 ] ]; - if ( conv ) { - - // Condense equivalence converters - if ( conv === true ) { - conv = converters[ conv2 ]; - - // Otherwise, insert the intermediate dataType - } else if ( converters[ conv2 ] !== true ) { - current = tmp[ 0 ]; - dataTypes.unshift( tmp[ 1 ] ); - } - break; - } - } - } - } - - // Apply converter (if not an equivalence) - if ( conv !== true ) { - - // Unless errors are allowed to bubble, catch and return them - if ( conv && s.throws ) { - response = conv( response ); - } else { - try { - response = conv( response ); - } catch ( e ) { - return { - state: "parsererror", - error: conv ? e : "No conversion from " + prev + " to " + current - }; - } - } - } - } - } - } - - return { state: "success", data: response }; -} - -jQuery.extend( { - - // Counter for holding the number of active queries - active: 0, - - // Last-Modified header cache for next request - lastModified: {}, - etag: {}, - - ajaxSettings: { - url: location.href, - type: "GET", - isLocal: rlocalProtocol.test( location.protocol ), - global: true, - processData: true, - async: true, - contentType: "application/x-www-form-urlencoded; charset=UTF-8", - - /* - timeout: 0, - data: null, - dataType: null, - username: null, - password: null, - cache: null, - throws: false, - traditional: false, - headers: {}, - */ - - accepts: { - "*": allTypes, - text: "text/plain", - html: "text/html", - xml: "application/xml, text/xml", - json: "application/json, text/javascript" - }, - - contents: { - xml: /\bxml\b/, - html: /\bhtml/, - json: /\bjson\b/ - }, - - responseFields: { - xml: "responseXML", - text: "responseText", - json: "responseJSON" - }, - - // Data converters - // Keys separate source (or catchall "*") and destination types with a single space - converters: { - - // Convert anything to text - "* text": String, - - // Text to html (true = no transformation) - "text html": true, - - // Evaluate text as a json expression - "text json": JSON.parse, - - // Parse text as xml - "text xml": jQuery.parseXML - }, - - // For options that shouldn't be deep extended: - // you can add your own custom options here if - // and when you create one that shouldn't be - // deep extended (see ajaxExtend) - flatOptions: { - url: true, - context: true - } - }, - - // Creates a full fledged settings object into target - // with both ajaxSettings and settings fields. - // If target is omitted, writes into ajaxSettings. - ajaxSetup: function( target, settings ) { - return settings ? - - // Building a settings object - ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : - - // Extending ajaxSettings - ajaxExtend( jQuery.ajaxSettings, target ); - }, - - ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), - ajaxTransport: addToPrefiltersOrTransports( transports ), - - // Main method - ajax: function( url, options ) { - - // If url is an object, simulate pre-1.5 signature - if ( typeof url === "object" ) { - options = url; - url = undefined; - } - - // Force options to be an object - options = options || {}; - - var transport, - - // URL without anti-cache param - cacheURL, - - // Response headers - responseHeadersString, - responseHeaders, - - // timeout handle - timeoutTimer, - - // Url cleanup var - urlAnchor, - - // Request state (becomes false upon send and true upon completion) - completed, - - // To know if global events are to be dispatched - fireGlobals, - - // Loop variable - i, - - // uncached part of the url - uncached, - - // Create the final options object - s = jQuery.ajaxSetup( {}, options ), - - // Callbacks context - callbackContext = s.context || s, - - // Context for global events is callbackContext if it is a DOM node or jQuery collection - globalEventContext = s.context && - ( callbackContext.nodeType || callbackContext.jquery ) ? - jQuery( callbackContext ) : - jQuery.event, - - // Deferreds - deferred = jQuery.Deferred(), - completeDeferred = jQuery.Callbacks( "once memory" ), - - // Status-dependent callbacks - statusCode = s.statusCode || {}, - - // Headers (they are sent all at once) - requestHeaders = {}, - requestHeadersNames = {}, - - // Default abort message - strAbort = "canceled", - - // Fake xhr - jqXHR = { - readyState: 0, - - // Builds headers hashtable if needed - getResponseHeader: function( key ) { - var match; - if ( completed ) { - if ( !responseHeaders ) { - responseHeaders = {}; - while ( ( match = rheaders.exec( responseHeadersString ) ) ) { - responseHeaders[ match[ 1 ].toLowerCase() + " " ] = - ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) - .concat( match[ 2 ] ); - } - } - match = responseHeaders[ key.toLowerCase() + " " ]; - } - return match == null ? null : match.join( ", " ); - }, - - // Raw string - getAllResponseHeaders: function() { - return completed ? responseHeadersString : null; - }, - - // Caches the header - setRequestHeader: function( name, value ) { - if ( completed == null ) { - name = requestHeadersNames[ name.toLowerCase() ] = - requestHeadersNames[ name.toLowerCase() ] || name; - requestHeaders[ name ] = value; - } - return this; - }, - - // Overrides response content-type header - overrideMimeType: function( type ) { - if ( completed == null ) { - s.mimeType = type; - } - return this; - }, - - // Status-dependent callbacks - statusCode: function( map ) { - var code; - if ( map ) { - if ( completed ) { - - // Execute the appropriate callbacks - jqXHR.always( map[ jqXHR.status ] ); - } else { - - // Lazy-add the new callbacks in a way that preserves old ones - for ( code in map ) { - statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; - } - } - } - return this; - }, - - // Cancel the request - abort: function( statusText ) { - var finalText = statusText || strAbort; - if ( transport ) { - transport.abort( finalText ); - } - done( 0, finalText ); - return this; - } - }; - - // Attach deferreds - deferred.promise( jqXHR ); - - // Add protocol if not provided (prefilters might expect it) - // Handle falsy url in the settings object (#10093: consistency with old signature) - // We also use the url parameter if available - s.url = ( ( url || s.url || location.href ) + "" ) - .replace( rprotocol, location.protocol + "//" ); - - // Alias method option to type as per ticket #12004 - s.type = options.method || options.type || s.method || s.type; - - // Extract dataTypes list - s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; - - // A cross-domain request is in order when the origin doesn't match the current origin. - if ( s.crossDomain == null ) { - urlAnchor = document.createElement( "a" ); - - // Support: IE <=8 - 11, Edge 12 - 15 - // IE throws exception on accessing the href property if url is malformed, - // e.g. http://example.com:80x/ - try { - urlAnchor.href = s.url; - - // Support: IE <=8 - 11 only - // Anchor's host property isn't correctly set when s.url is relative - urlAnchor.href = urlAnchor.href; - s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== - urlAnchor.protocol + "//" + urlAnchor.host; - } catch ( e ) { - - // If there is an error parsing the URL, assume it is crossDomain, - // it can be rejected by the transport if it is invalid - s.crossDomain = true; - } - } - - // Convert data if not already a string - if ( s.data && s.processData && typeof s.data !== "string" ) { - s.data = jQuery.param( s.data, s.traditional ); - } - - // Apply prefilters - inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); - - // If request was aborted inside a prefilter, stop there - if ( completed ) { - return jqXHR; - } - - // We can fire global events as of now if asked to - // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) - fireGlobals = jQuery.event && s.global; - - // Watch for a new set of requests - if ( fireGlobals && jQuery.active++ === 0 ) { - jQuery.event.trigger( "ajaxStart" ); - } - - // Uppercase the type - s.type = s.type.toUpperCase(); - - // Determine if request has content - s.hasContent = !rnoContent.test( s.type ); - - // Save the URL in case we're toying with the If-Modified-Since - // and/or If-None-Match header later on - // Remove hash to simplify url manipulation - cacheURL = s.url.replace( rhash, "" ); - - // More options handling for requests with no content - if ( !s.hasContent ) { - - // Remember the hash so we can put it back - uncached = s.url.slice( cacheURL.length ); - - // If data is available and should be processed, append data to url - if ( s.data && ( s.processData || typeof s.data === "string" ) ) { - cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; - - // #9682: remove data so that it's not used in an eventual retry - delete s.data; - } - - // Add or update anti-cache param if needed - if ( s.cache === false ) { - cacheURL = cacheURL.replace( rantiCache, "$1" ); - uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + - uncached; - } - - // Put hash and anti-cache on the URL that will be requested (gh-1732) - s.url = cacheURL + uncached; - - // Change '%20' to '+' if this is encoded form body content (gh-2658) - } else if ( s.data && s.processData && - ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { - s.data = s.data.replace( r20, "+" ); - } - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - if ( jQuery.lastModified[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); - } - if ( jQuery.etag[ cacheURL ] ) { - jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); - } - } - - // Set the correct header, if data is being sent - if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { - jqXHR.setRequestHeader( "Content-Type", s.contentType ); - } - - // Set the Accepts header for the server, depending on the dataType - jqXHR.setRequestHeader( - "Accept", - s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? - s.accepts[ s.dataTypes[ 0 ] ] + - ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : - s.accepts[ "*" ] - ); - - // Check for headers option - for ( i in s.headers ) { - jqXHR.setRequestHeader( i, s.headers[ i ] ); - } - - // Allow custom headers/mimetypes and early abort - if ( s.beforeSend && - ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { - - // Abort if not done already and return - return jqXHR.abort(); - } - - // Aborting is no longer a cancellation - strAbort = "abort"; - - // Install callbacks on deferreds - completeDeferred.add( s.complete ); - jqXHR.done( s.success ); - jqXHR.fail( s.error ); - - // Get transport - transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); - - // If no transport, we auto-abort - if ( !transport ) { - done( -1, "No Transport" ); - } else { - jqXHR.readyState = 1; - - // Send global event - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); - } - - // If request was aborted inside ajaxSend, stop there - if ( completed ) { - return jqXHR; - } - - // Timeout - if ( s.async && s.timeout > 0 ) { - timeoutTimer = window.setTimeout( function() { - jqXHR.abort( "timeout" ); - }, s.timeout ); - } - - try { - completed = false; - transport.send( requestHeaders, done ); - } catch ( e ) { - - // Rethrow post-completion exceptions - if ( completed ) { - throw e; - } - - // Propagate others as results - done( -1, e ); - } - } - - // Callback for when everything is done - function done( status, nativeStatusText, responses, headers ) { - var isSuccess, success, error, response, modified, - statusText = nativeStatusText; - - // Ignore repeat invocations - if ( completed ) { - return; - } - - completed = true; - - // Clear timeout if it exists - if ( timeoutTimer ) { - window.clearTimeout( timeoutTimer ); - } - - // Dereference transport for early garbage collection - // (no matter how long the jqXHR object will be used) - transport = undefined; - - // Cache response headers - responseHeadersString = headers || ""; - - // Set readyState - jqXHR.readyState = status > 0 ? 4 : 0; - - // Determine if successful - isSuccess = status >= 200 && status < 300 || status === 304; - - // Get response data - if ( responses ) { - response = ajaxHandleResponses( s, jqXHR, responses ); - } - - // Use a noop converter for missing script - if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) { - s.converters[ "text script" ] = function() {}; - } - - // Convert no matter what (that way responseXXX fields are always set) - response = ajaxConvert( s, response, jqXHR, isSuccess ); - - // If successful, handle type chaining - if ( isSuccess ) { - - // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. - if ( s.ifModified ) { - modified = jqXHR.getResponseHeader( "Last-Modified" ); - if ( modified ) { - jQuery.lastModified[ cacheURL ] = modified; - } - modified = jqXHR.getResponseHeader( "etag" ); - if ( modified ) { - jQuery.etag[ cacheURL ] = modified; - } - } - - // if no content - if ( status === 204 || s.type === "HEAD" ) { - statusText = "nocontent"; - - // if not modified - } else if ( status === 304 ) { - statusText = "notmodified"; - - // If we have data, let's convert it - } else { - statusText = response.state; - success = response.data; - error = response.error; - isSuccess = !error; - } - } else { - - // Extract error from statusText and normalize for non-aborts - error = statusText; - if ( status || !statusText ) { - statusText = "error"; - if ( status < 0 ) { - status = 0; - } - } - } - - // Set data for the fake xhr object - jqXHR.status = status; - jqXHR.statusText = ( nativeStatusText || statusText ) + ""; - - // Success/Error - if ( isSuccess ) { - deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); - } else { - deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); - } - - // Status-dependent callbacks - jqXHR.statusCode( statusCode ); - statusCode = undefined; - - if ( fireGlobals ) { - globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", - [ jqXHR, s, isSuccess ? success : error ] ); - } - - // Complete - completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); - - if ( fireGlobals ) { - globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); - - // Handle the global AJAX counter - if ( !( --jQuery.active ) ) { - jQuery.event.trigger( "ajaxStop" ); - } - } - } - - return jqXHR; - }, - - getJSON: function( url, data, callback ) { - return jQuery.get( url, data, callback, "json" ); - }, - - getScript: function( url, callback ) { - return jQuery.get( url, undefined, callback, "script" ); - } -} ); - -jQuery.each( [ "get", "post" ], function( _i, method ) { - jQuery[ method ] = function( url, data, callback, type ) { - - // Shift arguments if data argument was omitted - if ( isFunction( data ) ) { - type = type || callback; - callback = data; - data = undefined; - } - - // The url can be an options object (which then must have .url) - return jQuery.ajax( jQuery.extend( { - url: url, - type: method, - dataType: type, - data: data, - success: callback - }, jQuery.isPlainObject( url ) && url ) ); - }; -} ); - -jQuery.ajaxPrefilter( function( s ) { - var i; - for ( i in s.headers ) { - if ( i.toLowerCase() === "content-type" ) { - s.contentType = s.headers[ i ] || ""; - } - } -} ); - - -jQuery._evalUrl = function( url, options, doc ) { - return jQuery.ajax( { - url: url, - - // Make this explicit, since user can override this through ajaxSetup (#11264) - type: "GET", - dataType: "script", - cache: true, - async: false, - global: false, - - // Only evaluate the response if it is successful (gh-4126) - // dataFilter is not invoked for failure responses, so using it instead - // of the default converter is kludgy but it works. - converters: { - "text script": function() {} - }, - dataFilter: function( response ) { - jQuery.globalEval( response, options, doc ); - } - } ); -}; - - -jQuery.fn.extend( { - wrapAll: function( html ) { - var wrap; - - if ( this[ 0 ] ) { - if ( isFunction( html ) ) { - html = html.call( this[ 0 ] ); - } - - // The elements to wrap the target around - wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); - - if ( this[ 0 ].parentNode ) { - wrap.insertBefore( this[ 0 ] ); - } - - wrap.map( function() { - var elem = this; - - while ( elem.firstElementChild ) { - elem = elem.firstElementChild; - } - - return elem; - } ).append( this ); - } - - return this; - }, - - wrapInner: function( html ) { - if ( isFunction( html ) ) { - return this.each( function( i ) { - jQuery( this ).wrapInner( html.call( this, i ) ); - } ); - } - - return this.each( function() { - var self = jQuery( this ), - contents = self.contents(); - - if ( contents.length ) { - contents.wrapAll( html ); - - } else { - self.append( html ); - } - } ); - }, - - wrap: function( html ) { - var htmlIsFunction = isFunction( html ); - - return this.each( function( i ) { - jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); - } ); - }, - - unwrap: function( selector ) { - this.parent( selector ).not( "body" ).each( function() { - jQuery( this ).replaceWith( this.childNodes ); - } ); - return this; - } -} ); - - -jQuery.expr.pseudos.hidden = function( elem ) { - return !jQuery.expr.pseudos.visible( elem ); -}; -jQuery.expr.pseudos.visible = function( elem ) { - return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); -}; - - - - -jQuery.ajaxSettings.xhr = function() { - try { - return new window.XMLHttpRequest(); - } catch ( e ) {} -}; - -var xhrSuccessStatus = { - - // File protocol always yields status code 0, assume 200 - 0: 200, - - // Support: IE <=9 only - // #1450: sometimes IE returns 1223 when it should be 204 - 1223: 204 - }, - xhrSupported = jQuery.ajaxSettings.xhr(); - -support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); -support.ajax = xhrSupported = !!xhrSupported; - -jQuery.ajaxTransport( function( options ) { - var callback, errorCallback; - - // Cross domain only allowed if supported through XMLHttpRequest - if ( support.cors || xhrSupported && !options.crossDomain ) { - return { - send: function( headers, complete ) { - var i, - xhr = options.xhr(); - - xhr.open( - options.type, - options.url, - options.async, - options.username, - options.password - ); - - // Apply custom fields if provided - if ( options.xhrFields ) { - for ( i in options.xhrFields ) { - xhr[ i ] = options.xhrFields[ i ]; - } - } - - // Override mime type if needed - if ( options.mimeType && xhr.overrideMimeType ) { - xhr.overrideMimeType( options.mimeType ); - } - - // X-Requested-With header - // For cross-domain requests, seeing as conditions for a preflight are - // akin to a jigsaw puzzle, we simply never set it to be sure. - // (it can always be set on a per-request basis or even using ajaxSetup) - // For same-domain requests, won't change header if already provided. - if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { - headers[ "X-Requested-With" ] = "XMLHttpRequest"; - } - - // Set headers - for ( i in headers ) { - xhr.setRequestHeader( i, headers[ i ] ); - } - - // Callback - callback = function( type ) { - return function() { - if ( callback ) { - callback = errorCallback = xhr.onload = - xhr.onerror = xhr.onabort = xhr.ontimeout = - xhr.onreadystatechange = null; - - if ( type === "abort" ) { - xhr.abort(); - } else if ( type === "error" ) { - - // Support: IE <=9 only - // On a manual native abort, IE9 throws - // errors on any property access that is not readyState - if ( typeof xhr.status !== "number" ) { - complete( 0, "error" ); - } else { - complete( - - // File: protocol always yields status 0; see #8605, #14207 - xhr.status, - xhr.statusText - ); - } - } else { - complete( - xhrSuccessStatus[ xhr.status ] || xhr.status, - xhr.statusText, - - // Support: IE <=9 only - // IE9 has no XHR2 but throws on binary (trac-11426) - // For XHR2 non-text, let the caller handle it (gh-2498) - ( xhr.responseType || "text" ) !== "text" || - typeof xhr.responseText !== "string" ? - { binary: xhr.response } : - { text: xhr.responseText }, - xhr.getAllResponseHeaders() - ); - } - } - }; - }; - - // Listen to events - xhr.onload = callback(); - errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); - - // Support: IE 9 only - // Use onreadystatechange to replace onabort - // to handle uncaught aborts - if ( xhr.onabort !== undefined ) { - xhr.onabort = errorCallback; - } else { - xhr.onreadystatechange = function() { - - // Check readyState before timeout as it changes - if ( xhr.readyState === 4 ) { - - // Allow onerror to be called first, - // but that will not handle a native abort - // Also, save errorCallback to a variable - // as xhr.onerror cannot be accessed - window.setTimeout( function() { - if ( callback ) { - errorCallback(); - } - } ); - } - }; - } - - // Create the abort callback - callback = callback( "abort" ); - - try { - - // Do send the request (this may raise an exception) - xhr.send( options.hasContent && options.data || null ); - } catch ( e ) { - - // #14683: Only rethrow if this hasn't been notified as an error yet - if ( callback ) { - throw e; - } - } - }, - - abort: function() { - if ( callback ) { - callback(); - } - } - }; - } -} ); - - - - -// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) -jQuery.ajaxPrefilter( function( s ) { - if ( s.crossDomain ) { - s.contents.script = false; - } -} ); - -// Install script dataType -jQuery.ajaxSetup( { - accepts: { - script: "text/javascript, application/javascript, " + - "application/ecmascript, application/x-ecmascript" - }, - contents: { - script: /\b(?:java|ecma)script\b/ - }, - converters: { - "text script": function( text ) { - jQuery.globalEval( text ); - return text; - } - } -} ); - -// Handle cache's special case and crossDomain -jQuery.ajaxPrefilter( "script", function( s ) { - if ( s.cache === undefined ) { - s.cache = false; - } - if ( s.crossDomain ) { - s.type = "GET"; - } -} ); - -// Bind script tag hack transport -jQuery.ajaxTransport( "script", function( s ) { - - // This transport only deals with cross domain or forced-by-attrs requests - if ( s.crossDomain || s.scriptAttrs ) { - var script, callback; - return { - send: function( _, complete ) { - script = jQuery( " -{% endmacro %} \ No newline at end of file diff --git a/docs/_build/html/genindex.html b/docs/_build/html/genindex.html deleted file mode 100644 index b24b4f7d..00000000 --- a/docs/_build/html/genindex.html +++ /dev/null @@ -1,996 +0,0 @@ - - - - - - - - Index — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- - -

Index

- -
- _ - | A - | B - | C - | D - | E - | F - | G - | H - | I - | L - | M - | O - | P - | R - | S - | T - | U - | V - -
-

_

- - - -
- -

A

- - - -
- -

B

- - - -
- -

C

- - - -
- -

D

- - - -
- -

E

- - - -
- -

F

- - - -
- -

G

- - - -
- -

H

- - -
- -

I

- - - -
- -

L

- - - -
- -

M

- - -
- -

O

- - - -
- -

P

- - - -
- -

R

- - - -
- -

S

- - - -
- -

T

- - - -
- -

U

- - - -
- -

V

- - - -
- - - -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/getting-started.html b/docs/_build/html/getting-started.html deleted file mode 100644 index 8af1d590..00000000 --- a/docs/_build/html/getting-started.html +++ /dev/null @@ -1,326 +0,0 @@ - - - - - - - - Getting Started — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - -
- -
- Contents -
- -
-
-
-
-
- -
- -
-

Getting Started

-
-

K8s Install

-

If you are running inside of the Splice Machine Cloud Service in a Jupyter Notebook, MLManager will already be installed for you. If you’d like to install it (or upgrade it), you can install from git with

-
pip install [--upgrade] git+https://www.github.com/splicemachine/pysplice@<RELEASE>
-
-
-
-
-

External Installation

-

If you would like to install outside of the K8s cluster (and use the ExtPySpliceContext), you can install the stable build with

-
sudo pip install git+http://www.github.com/splicemachine/pysplice@2.3.0-k8
-
-
-

Or latest with

-
sudo pip install git+http://www.github.com/splicemachine/pysplice
-
-
-
-
-

Usage

-

This section covers importing and instantiating the Native Spark DataSource

-
- -
-

To use the Native Spark DataSource inside of the `cloud service<https://cloud.splicemachine.io/register?utm_source=pydocs&utm_medium=header&utm_campaign=sandbox>`_., first create a Spark Session and then import your PySpliceContext

-
from pyspark.sql import SparkSession
-from splicemachine.spark import PySpliceContext
-from splicemachine.mlflow_support import * # Connects your MLflow session automatically
-from splicemachine.features import FeatureStore # Splice Machine Feature Store
-
-spark = SparkSession.builder.getOrCreate()
-splice = PySpliceContext(spark) # The Native Spark Datasource (PySpliceContext) takes a Spark Session
-fs = FeatureStore(splice) # Create your Feature Store
-mlflow.register_splice_context(splice) # Gives mlflow native DB connection
-mlflow.register_feature_store(fs) # Tracks Feature Store work in Mlflow automatically
-
-
-
-
-

To use the External Native Spark DataSource, create a Spark Session with your external Jars configured. Then, import your ExtPySpliceContext and set the necessary parameters. -Once created, the functionality is identical to the internal Native Spark Datasource (PySpliceContext)

-
from pyspark.sql import SparkSession
-from splicemachine.spark import ExtPySpliceContext
-from splicemachine.mlflow_support import * # Connects your MLflow session automatically
-from splicemachine.features import FeatureStore # Splice Machine Feature Store
-
-spark = SparkSession.builder.config('spark.jars', '/path/to/splice_spark2-3.0.0.1962-SNAPSHOT-shaded.jar').config('spark.driver.extraClassPath', 'path/to/Splice/jars/dir/*').getOrCreate()
-JDBC_URL = '' #Set your JDBC URL here. You can get this from the Cloud Manager UI. Make sure to append ';user=<USERNAME>;password=<PASSWORD>' after ';ssl=basic' so you can authenticate in
-# The ExtPySpliceContext communicates with the database via Kafka
-kafka_server = 'kafka-broker-0-' + JDBC_URL.split('jdbc:splice://jdbc-')[1].split(':1527')[0] + ':19092' # Formatting kafka URL from JDBC
-splice = ExtPySpliceContext(spark, JDBC_URL=JDBC_URL, kafkaServers=kafka_server)
-
-fs = FeatureStore(splice) # Create your Feature Store
-mlflow.register_splice_context(splice) # Gives mlflow native DB connection
-mlflow.register_feature_store(fs) # Tracks Feature Store work in Mlflow automatically
-
-
-
-
-
-
- - -
- - - - -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/index.html b/docs/_build/html/index.html deleted file mode 100644 index fa04879a..00000000 --- a/docs/_build/html/index.html +++ /dev/null @@ -1,269 +0,0 @@ - - - - - - - - Welcome to Splicemachine’s documentation! — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - - -
-
-
-
- -
- -
-

Welcome to Splicemachine’s documentation!

-

This documentation will will you through what you need to start using Splice Machine’s MLManager workbench. See the Getting Started section below for installation and usage. Then see the splicemachine.spark section for Database connectivity as well as mlflow_support for our embedded mlflow connection.

- -
-
-

Indices and tables

- -
- - -
- - -
- - Getting Started - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/objects.inv b/docs/_build/html/objects.inv deleted file mode 100644 index 9e2d6cda..00000000 Binary files a/docs/_build/html/objects.inv and /dev/null differ diff --git a/docs/_build/html/py-modindex.html b/docs/_build/html/py-modindex.html deleted file mode 100644 index 7fee9a32..00000000 --- a/docs/_build/html/py-modindex.html +++ /dev/null @@ -1,279 +0,0 @@ - - - - - - - - Python Module Index — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- - -

Python Module Index

- -
- s -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 
- s
- splicemachine -
    - splicemachine.features -
    - splicemachine.features.feature -
    - splicemachine.features.feature_set -
    - splicemachine.features.feature_store -
    - splicemachine.features.training_view -
    - splicemachine.mlflow_support.mlflow_support -
    - splicemachine.notebook -
    - splicemachine.spark -
    - splicemachine.spark.context -
    - splicemachine.stats -
- - -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/search.html b/docs/_build/html/search.html deleted file mode 100644 index 3a59518e..00000000 --- a/docs/_build/html/search.html +++ /dev/null @@ -1,233 +0,0 @@ - - - - - - - - Search — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -

Search

-
- -

- Please activate JavaScript to enable the search - functionality. -

-
-

- Searching for multiple words only shows matches that contain - all words. -

-
- - - -
- -
- -
- -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/searchindex.js b/docs/_build/html/searchindex.js deleted file mode 100644 index 441058e3..00000000 --- a/docs/_build/html/searchindex.js +++ /dev/null @@ -1 +0,0 @@ -Search.setIndex({docnames:["getting-started","index","spark","splicemachine","splicemachine.features","splicemachine.mlflow_support","splicemachine.notebook","splicemachine.spark","splicemachine.stats"],envversion:{"sphinx.domains.c":2,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":3,"sphinx.domains.index":1,"sphinx.domains.javascript":2,"sphinx.domains.math":2,"sphinx.domains.python":2,"sphinx.domains.rst":2,"sphinx.domains.std":1,"sphinx.ext.todo":2,"sphinx.ext.viewcode":1,sphinx:56},filenames:["getting-started.rst","index.rst","spark.rst","splicemachine.rst","splicemachine.features.rst","splicemachine.mlflow_support.rst","splicemachine.notebook.rst","splicemachine.spark.rst","splicemachine.stats.rst"],objects:{"splicemachine.features":{feature:[4,0,0,"-"],feature_set:[4,0,0,"-"],feature_store:[4,0,0,"-"],training_view:[4,0,0,"-"]},"splicemachine.features.feature":{Feature:[4,1,1,""]},"splicemachine.features.feature.Feature":{is_categorical:[4,2,1,""],is_continuous:[4,2,1,""],is_ordinal:[4,2,1,""]},"splicemachine.features.feature_set":{FeatureSet:[4,1,1,""]},"splicemachine.features.feature_set.FeatureSet":{is_deployed:[4,2,1,""]},"splicemachine.features.feature_store":{FeatureStore:[4,1,1,""]},"splicemachine.features.feature_store.FeatureStore":{create_aggregation_feature_set_from_source:[4,2,1,""],create_feature:[4,2,1,""],create_feature_set:[4,2,1,""],create_source:[4,2,1,""],create_training_view:[4,2,1,""],deploy_feature_set:[4,2,1,""],describe_feature_set:[4,2,1,""],describe_feature_sets:[4,2,1,""],describe_training_view:[4,2,1,""],describe_training_views:[4,2,1,""],display_feature_search:[4,2,1,""],display_model_drift:[4,2,1,""],display_model_feature_drift:[4,2,1,""],feature_exists:[4,2,1,""],feature_set_exists:[4,2,1,""],get_backfill_intervals:[4,2,1,""],get_backfill_sql:[4,2,1,""],get_deployments:[4,2,1,""],get_feature_description:[4,2,1,""],get_feature_details:[4,2,1,""],get_feature_primary_keys:[4,2,1,""],get_feature_sets:[4,2,1,""],get_feature_vector:[4,2,1,""],get_feature_vector_sql_from_training_view:[4,2,1,""],get_features_by_name:[4,2,1,""],get_pipeline_sql:[4,2,1,""],get_summary:[4,2,1,""],get_training_set:[4,2,1,""],get_training_set_by_name:[4,2,1,""],get_training_set_features:[4,2,1,""],get_training_set_from_deployment:[4,2,1,""],get_training_set_from_view:[4,2,1,""],get_training_view:[4,2,1,""],get_training_view_features:[4,2,1,""],get_training_view_id:[4,2,1,""],get_training_views:[4,2,1,""],link_training_set_to_mlflow:[4,2,1,""],list_training_sets:[4,2,1,""],login_fs:[4,2,1,""],register_splice_context:[4,2,1,""],remove_feature:[4,2,1,""],remove_feature_set:[4,2,1,""],remove_source:[4,2,1,""],remove_training_view:[4,2,1,""],run_feature_elimination:[4,2,1,""],set_feature_description:[4,2,1,""],set_feature_store_url:[4,2,1,""],set_token:[4,2,1,""],training_view_exists:[4,2,1,""],update_feature_metadata:[4,2,1,""]},"splicemachine.features.training_view":{TrainingView:[4,1,1,""]},"splicemachine.mlflow_support":{mlflow_support:[5,0,0,"-"]},"splicemachine.mlflow_support.mlflow_support":{_current_exp_id:[5,3,1,""],_current_run_id:[5,3,1,""],_deploy_aws:[5,3,1,""],_deploy_azure:[5,3,1,""],_deploy_db:[5,3,1,""],_deploy_kubernetes:[5,3,1,""],_download_artifact:[5,3,1,""],_end_run:[5,3,1,""],_fetch_logs:[5,3,1,""],_get_current_run_data:[5,3,1,""],_get_deployed_models:[5,3,1,""],_get_model_name:[5,3,1,""],_get_run_ids_by_name:[5,3,1,""],_lm:[5,3,1,""],_load_model:[5,3,1,""],_log_artifact:[5,3,1,""],_log_feature_transformations:[5,3,1,""],_log_model:[5,3,1,""],_log_model_params:[5,3,1,""],_log_pipeline_stages:[5,3,1,""],_login_director:[5,3,1,""],_lp:[5,3,1,""],_mlflow_patch:[5,3,1,""],_register_feature_store:[5,3,1,""],_register_splice_context:[5,3,1,""],_remove_active_training_set:[5,3,1,""],_set_mlflow_uri:[5,3,1,""],_start_run:[5,3,1,""],_timer:[5,3,1,""],_undeploy_kubernetes:[5,3,1,""],_watch_job:[5,3,1,""]},"splicemachine.notebook":{get_mlflow_ui:[6,3,1,""],get_spark_ui:[6,3,1,""],hide_toggle:[6,3,1,""],run_sql:[6,3,1,""]},"splicemachine.spark":{context:[7,0,0,"-"]},"splicemachine.spark.context":{ExtPySpliceContext:[7,1,1,""],PySpliceContext:[7,1,1,""]},"splicemachine.spark.context.ExtPySpliceContext":{"delete":[7,2,1,""],"export":[7,2,1,""],analyzeSchema:[7,2,1,""],analyzeTable:[7,2,1,""],autoCommitting:[7,2,1,""],bulkImportHFile:[7,2,1,""],bulkImportHFileWithRdd:[7,2,1,""],columnNamesCaseSensitive:[7,2,1,""],commit:[7,2,1,""],createAndInsertTable:[7,2,1,""],createDataFrame:[7,2,1,""],createTable:[7,2,1,""],createTableWithSchema:[7,2,1,""],deleteWithRdd:[7,2,1,""],df:[7,2,1,""],dropTable:[7,2,1,""],dropTableIfExists:[7,2,1,""],execute:[7,2,1,""],executeUpdate:[7,2,1,""],exportBinary:[7,2,1,""],fileToTable:[7,2,1,""],getConnection:[7,2,1,""],getSchema:[7,2,1,""],insert:[7,2,1,""],insertRdd:[7,2,1,""],insertRddWithStatus:[7,2,1,""],insertWithStatus:[7,2,1,""],internalDf:[7,2,1,""],internalRdd:[7,2,1,""],mergeInto:[7,2,1,""],mergeIntoWithRdd:[7,2,1,""],pandasToSpark:[7,2,1,""],rdd:[7,2,1,""],releaseSavepoint:[7,2,1,""],replaceDataframeSchema:[7,2,1,""],rollback:[7,2,1,""],rollbackToSavepoint:[7,2,1,""],setAutoCommitOff:[7,2,1,""],setAutoCommitOn:[7,2,1,""],setSavepoint:[7,2,1,""],setSavepointWithName:[7,2,1,""],splitAndInsert:[7,2,1,""],tableExists:[7,2,1,""],toLower:[7,2,1,""],toUpper:[7,2,1,""],transactional:[7,2,1,""],truncateTable:[7,2,1,""],update:[7,2,1,""],updateWithRdd:[7,2,1,""],upsert:[7,2,1,""],upsertWithRdd:[7,2,1,""]},"splicemachine.spark.context.PySpliceContext":{"delete":[7,2,1,""],"export":[7,2,1,""],analyzeSchema:[7,2,1,""],analyzeTable:[7,2,1,""],bulkImportHFile:[7,2,1,""],bulkImportHFileWithRdd:[7,2,1,""],columnNamesCaseSensitive:[7,2,1,""],createAndInsertTable:[7,2,1,""],createDataFrame:[7,2,1,""],createTable:[7,2,1,""],createTableWithSchema:[7,2,1,""],deleteWithRdd:[7,2,1,""],df:[7,2,1,""],dropTable:[7,2,1,""],dropTableIfExists:[7,2,1,""],execute:[7,2,1,""],executeUpdate:[7,2,1,""],exportBinary:[7,2,1,""],fileToTable:[7,2,1,""],getConnection:[7,2,1,""],getSchema:[7,2,1,""],insert:[7,2,1,""],insertRdd:[7,2,1,""],insertRddWithStatus:[7,2,1,""],insertWithStatus:[7,2,1,""],internalDf:[7,2,1,""],internalRdd:[7,2,1,""],mergeInto:[7,2,1,""],mergeIntoWithRdd:[7,2,1,""],pandasToSpark:[7,2,1,""],rdd:[7,2,1,""],replaceDataframeSchema:[7,2,1,""],splitAndInsert:[7,2,1,""],tableExists:[7,2,1,""],toLower:[7,2,1,""],toUpper:[7,2,1,""],truncateTable:[7,2,1,""],update:[7,2,1,""],updateWithRdd:[7,2,1,""],upsert:[7,2,1,""],upsertWithRdd:[7,2,1,""]},"splicemachine.stats":{DecisionTreeVisualizer:[8,1,1,""],IndReconstructer:[8,1,1,""],OneHotDummies:[8,1,1,""],OverSampleCrossValidator:[8,1,1,""],OverSampler:[8,1,1,""],Rounder:[8,1,1,""],SpliceBaseEvaluator:[8,1,1,""],SpliceBinaryClassificationEvaluator:[8,1,1,""],SpliceMultiClassificationEvaluator:[8,1,1,""],SpliceRegressionEvaluator:[8,1,1,""],best_fit_distribution:[8,3,1,""],estimateCovariance:[8,3,1,""],get_confusion_matrix:[8,3,1,""],get_string_pipeline:[8,3,1,""],inspectTable:[8,3,1,""],make_pdf:[8,3,1,""],pca_with_scores:[8,3,1,""],postprocessing_pipeline:[8,3,1,""],reconstructPCA:[8,3,1,""],varianceExplained:[8,3,1,""],vector_assembler_pipeline:[8,3,1,""]},"splicemachine.stats.DecisionTreeVisualizer":{add_node:[8,2,1,""],feature_importance:[8,2,1,""],parse:[8,2,1,""],replacer:[8,2,1,""],tree_json:[8,2,1,""],visualize:[8,2,1,""]},"splicemachine.stats.SpliceBaseEvaluator":{get_results:[8,2,1,""],input:[8,2,1,""]},"splicemachine.stats.SpliceBinaryClassificationEvaluator":{input:[8,2,1,""],plotROC:[8,2,1,""]},splicemachine:{features:[4,0,0,"-"],notebook:[6,0,0,"-"],spark:[7,0,0,"-"],stats:[8,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","class","Python class"],"2":["py","method","Python method"],"3":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:class","2":"py:method","3":"py:function"},terms:{"000":5,"005":7,"10000":[5,8],"1234":8,"1527":0,"19092":0,"1962":0,"20000":7,"2002":4,"2020":5,"2021":7,"2048mi":5,"30d":4,"3888888888888889":8,"512mi":5,"5888888888888888":8,"7055555555555556":8,"806878306878307":8,"8333333333333333":8,"8556863149300125":8,"9092":7,"90d":4,"boolean":7,"case":[4,7],"char":[],"class":[3,4,5,7,8],"default":[4,5,7,8],"export":7,"final":5,"float":[4,5,7],"function":[0,4,5,6,7,8],"import":[0,4,5,7,8],"int":[4,5,6,7,8],"new":[4,5,7],"null":7,"public":5,"return":[4,5,6,7,8],"static":8,"throw":[4,5,7],"true":[4,5,7,8],"try":[4,7],"while":[7,8],ARE:4,DOING:4,FOR:5,For:[5,7],IDs:5,K8s:[5,7],NOT:[4,5],THAT:5,That:4,The:[0,4,5,6,7,8],Then:[0,1],These:4,USE:4,USED:5,Used:[],Will:5,With:8,__dict__:4,__get_active_us:[],__get_log:[],__get_serialized_mlmodel:[],__initiate_job:[],_clear:[],_copy_param:[],_copyvalu:[],_current_exp_id:5,_current_run_id:5,_deploy_aw:5,_deploy_azur:5,_deploy_db:5,_deploy_kubernet:5,_download_artifact:5,_dummi:[],_end_run:5,_featur:4,_fetch_log:5,_filter:4,_fit:[],_from_java:[],_from_java_impl:[],_get_current_run_data:5,_get_deployed_model:5,_get_model_nam:5,_get_properties_help:[],_get_run_ids_by_nam:5,_initiate_job:[],_lm:5,_load_model:5,_log_artifact:5,_log_feature_transform:5,_log_model:5,_log_model_param:5,_log_pipeline_stag:5,_login_director:5,_lp:5,_mlflow_patch:5,_parallelfittask:[],_properti:[],_randomuid:[],_register_feature_stor:5,_register_splice_context:5,_remove_active_training_set:5,_resetuid:[],_resolveparam:[],_set:[],_set_mlflow_uri:5,_setdefault:[],_shouldown:[],_start_run:5,_timer:5,_to_java:[],_to_java_impl:[],_transform:[],_undeploy_kubernet:5,_unit_test:7,_watch_job:5,abl:[4,5],about:[4,5],abov:[5,8],accept:7,access:[5,7,8],accevalu:8,accuraci:8,across:4,activ:[4,5],active_run:5,activerun:5,actual:8,adapt:7,add:[4,5,6],add_nod:8,addgrid:8,addit:[5,7],affect:[],after:[0,4,5,7,8],again:4,against:8,agg:4,agg_default_valu:4,agg_funct:4,agg_window:4,aggreg:4,aggwindow:4,agnost:5,agre:[5,7],airflow:4,algorithm:[],all:[4,5,7,8],alloc:5,allocated_ram:5,allow:[6,7],along:4,alphabet:4,alreadi:[0,4,5,7],also:[4,5,8],alt_metr:[],alter:5,altern:5,altevalu:8,although:5,amount:5,analysi:8,analyz:7,analyzeschema:7,analyzet:7,anchor:4,ani:[4,5,7],anoth:[],anyth:5,apach:[5,7,8],api:[4,5],app:[5,7],app_nam:5,append:0,appli:[4,5],applic:[5,7],approach:[],appropri:4,approxim:8,ar_clothing_qti:4,ar_delicatessen_qti:4,ar_garden_qti:4,areaunderpr:8,areaunderroc:8,arg:8,argument:5,around:4,arrai:[5,8],artifact:5,as_dict:8,as_list:4,as_pyfunc:5,assembl:8,assign:8,associ:[4,5,6],assum:4,attribut:4,aucevalu:8,auth:5,authent:[0,5],author:5,auto:[7,8],auto_rfm:4,autocommit:7,autolog:5,autom:[4,5],automat:[0,4,5,8],automodul:[],autosc:5,autoscal:5,autoscaling_en:5,avail:[4,5],averag:[],avg:[4,8],avgmetr:8,awesom:4,axi:8,azur:5,azureml:5,b47ee4563368419880b44ad8535f6371:5,back:[4,7],backfil:4,backfill_interv:4,backfill_start:4,backfill_start_tim:4,bad:[7,8],badrecordsallow:7,balanc:5,base:[4,5,7,8],base_replica:5,basi:[5,7],basic:[0,5],batch:5,bayesian:5,becaus:4,becom:[4,7],been:4,befor:[4,5,7],begin:4,being:[4,5,8],belong:4,below:[1,3,8],best:8,best_distribut:8,best_fit_distribut:8,best_param:8,best_ss:8,bestmodel:[],better:5,between:[4,7],bin:8,binari:[5,7,8],binaryclassificationevalu:8,blank:[],block:8,bool:[4,5,6,7,8],both:[4,8],bottom:6,broker:0,buffer:[],build:[0,8],builder:[0,8],built:5,bulk:7,bulkimportdirectori:7,bulkimporthfil:7,bulkimporthfilewithrdd:7,bunch:[],calcul:[4,8],call:[4,5,7,8],callabl:[],can:[0,4,5,7,8],cannot:[4,7],cap:5,care:[5,7],casesensit:7,categor:[4,8],categori:[],caus:5,cell:6,central:5,certain:5,chang:4,characterist:8,check:[3,4,5,7],child:4,children:4,choic:[],choos:4,chosen:4,classif:[4,5,8],classifi:8,classmethod:[],clear:[],client:5,clip:[],clippr:8,closest:[],clothing_qti:4,cloud:[0,5,7],cluster:[0,4,5,7],code:8,col2:7,col:8,col_nam:8,collaps:6,collect:7,collectsubmodel:8,cols_to_exclud:8,column:[4,5,7,8],column_nam:4,column_project:7,columnnamescasesensit:7,com:[0,5,7],come:4,commit:7,common:4,commonli:[],commun:0,comparison:4,complianc:[5,7],compon:8,componenet:8,composit:5,compress:7,comput:8,concaten:[],condit:[4,5,7],confid:5,config:0,configur:[0,5,6],conflict:[],confus:8,confusion_matrix:8,connect:[0,1,7],consid:7,consist:[4,8],constant:[],contain:[3,4,5,6,7,8],content:[1,3],context:[3,4,5],continu:4,control:5,convert:[4,7],copi:[5,7],copyright:[5,7],core:[4,5],correct:[4,7],correspond:[4,7,8],count:[4,8],covari:8,cover:0,cpu:5,cpu_cor:5,cpu_limit:5,cpu_request:5,creat:[0,4,5,7,8],create_aggregation_feature_set_from_sourc:4,create_featur:4,create_feature_set:4,create_model_t:5,create_sourc:4,create_t:7,create_table_opt:7,create_tim:4,create_training_view:4,createandinsertt:7,createdatafram:[7,8],createt:7,createtablewithschema:7,creation:[4,7],cross:8,crossvaldid:[],crossvalid:[],crossvalidatormodel:[],csv:7,current:[4,5,6,7,8],current_exp_id:5,current_run_id:[],current_st:[],current_timestamp:5,current_values_onli:4,curv:8,custom:4,customer_category_act:4,customer_id:4,customer_rfm:4,customerid:4,cutoff:4,cvmodel:8,dai:4,data:[4,5,7,8],databas:[0,1,4,5,7],datafram:[4,5,7,8],datapoint:8,dataset:[4,5,8],datasourc:0,datatyp:[4,5],date:4,datetim:4,db_schema_nam:5,db_table_nam:5,deault:5,debug:[4,8],debugstr:8,decis:[4,8],decisiontreevisu:8,decor:5,deep:[],defaultparamsread:[],defaultparamswrit:[],defin:4,delet:[4,5,7],deletewithrdd:7,delicatessen_qti:4,dens:8,densevector:8,densiti:8,depend:[4,5],deploi:[4,5],deploy:[4,5],deploy_feature_set:4,deployment_mod:5,desc:4,describ:[4,7],describe_feature_set:4,describe_training_view:4,descript:4,deseri:5,desir:[4,6],destin:[5,7],detail:4,determin:[4,5,6,7],develop:4,developerapi:[],deviat:8,devic:[],dict:[4,5,7,8],dictionari:[4,5,7,8],differ:7,dimension:8,dir:0,directli:[4,5],director:5,directori:[5,7],disabl:5,disable_nginx:5,displai:[4,6],display_feature_search:4,display_model_drift:4,display_model_feature_drift:4,dist:8,distribut:[4,5,7,8],dml:7,doc:[5,8],document:3,doe:[4,5,7],doesn:5,don:8,done:4,dopca:8,dot:8,doubl:[],download:5,driver:0,drop:7,drop_tabl:7,droptabl:7,droptableifexist:7,dummi:8,duplic:7,dure:[],dynam:5,e_format:7,each:[4,5,8],earliest:4,east:5,ec2:5,effect:4,eigenvalu:8,eigenvector:8,eigh:8,either:[4,5,7,8],element:8,elimin:4,embed:1,empir:8,emtpi:4,enabl:[4,5],end:[4,5],end_run:5,end_tim:4,endpoint:5,endpoint_nam:5,entir:6,entiti:[],entrypoint:5,environ:[4,5,6],epm:[],equal:[4,8],equival:[],error:[4,8],est:[],estim:[7,8],estimatecovari:8,estimatestatist:7,estimatorparammap:8,etc:[4,7],europ:5,eva:[],eval_tim:5,evalu:8,event:[4,5],event_ts_column:4,eventu:4,everi:[4,8],exact:4,exampl:[4,5,7,8],except:[5,7],exclud:8,execut:[4,5,6,7,8],executeupd:7,exist:[4,5,7],exp:5,experi:[4,5,6],experiment:[],experiment_id:[5,6],explain:[4,8],explainparam:[],explicitli:[],exportbinari:7,expos:5,expose_extern:5,express:[4,5,7],extens:5,extpysplicecontext:[0,7],extra:[4,7],extraclasspath:0,extract:[],extractparammap:[],fail:[4,5,7],failur:7,fall:4,fals:[4,5,6,7,8],far:4,feat:4,featur:[0,1,3,5,8],feature_aggreg:4,feature_column_nam:8,feature_data_typ:4,feature_exist:4,feature_id:4,feature_import:8,feature_name_prefix:4,feature_set:3,feature_set_exist:4,feature_set_id:4,feature_set_nam:4,feature_stor:[3,5],feature_typ:4,featureagg:4,featureaggreg:4,featurecol:8,features_col:8,featurescol:8,featureset:4,featurestor:[0,4,5],featuretyp:4,feed:4,fieldsepar:7,file:[5,7,8],file_nam:5,file_path:7,fileencod:7,filesystem:7,filetot:7,fill:5,filter:4,find:8,finish:5,first:[0,4,5,7,8],fit:[5,8],fitmultipl:[],fittedestim:8,flat:[],flavor:5,flavor_opt:5,fluent:[],fold:[],follow:[3,4,5,8],foo:4,forc:4,forget:8,form:4,format:[0,4,5,7,8],forward:[],found:5,fraction:5,frame:[4,5,7],frequenc:4,frequent:8,from:[0,4,5,6,7,8],from_dictionari:[],from_proto:[],fset:4,full:[4,5,7],fulli:4,function_nam:5,futur:4,garden_qti:4,gaussian:5,gener:[4,5,8],generate_st:[],geojson:5,get:[1,3,4,5,7,8],get_backfill_interv:4,get_backfill_sql:4,get_confusion_matrix:8,get_current_run_data:[],get_deploy:4,get_feature_dataset:4,get_feature_descript:4,get_feature_detail:4,get_feature_primary_kei:4,get_feature_set:4,get_feature_vector:4,get_feature_vector_sql_from_training_view:4,get_features_by_nam:4,get_max_num_step:[],get_mlflow_ui:6,get_pipeline_sql:4,get_result:8,get_run:5,get_run_ids_by_nam:[],get_spark_ui:6,get_string_pipelin:8,get_summari:4,get_training_set:[4,5],get_training_set_by_nam:4,get_training_set_featur:4,get_training_set_from_deploy:4,get_training_set_from_view:[4,5],get_training_view:4,get_training_view_featur:4,get_training_view_id:4,get_valid:4,get_window:4,getaltevalu:[],getcollectsubmodel:[],getconnect:7,getestim:[],getestimatorparammap:[],getevalu:[],getinputcol:8,getlabel:[],getnumfold:[],getorcr:[0,8],getordefault:[],getoutcol:[],getoutputcol:[],getoversampl:[],getparallel:[],getparam:[],getschema:7,getse:[],git:0,github:[0,5],give:0,given:[4,5,7,8],good:8,good_featur:4,gorilla:5,govern:[5,7],grab:4,graph:8,graphvi:8,graphviz:8,grid:8,group:5,groupbi:8,guarante:4,guid:7,gunicorn_work:5,h2o:5,handl:7,happen:[],has:[4,5,8],hasdefault:[],hasinputcol:8,hasoutputcol:8,hasparam:[],have:[4,5,6],hdf:7,header:0,help:8,helper:6,here:[0,4,5,7],hex:[],hfile:7,hide:6,hide_toggl:6,histogram:8,histori:4,horizont:[5,8],how:[4,8],howev:4,hpa:5,html:[5,8],http:[0,5,7,8],hyper:[],ident:[0,4],identifi:4,ids:5,ifram:6,ignor:5,imag:5,implement:[4,5,7,8],impli:[5,7],implicitli:4,inc:[5,7],includ:[4,5,8],increas:4,increment:4,index:[1,8],indic:4,individu:4,indreconstruct:8,infer:4,info:[5,7],inform:[4,5],ingress:5,inherit:8,init:5,initi:[],input:[4,5,7,8],inputcol:[],insensit:4,insert:[5,7],insertrdd:7,insertrddwithstatu:7,insertwithstatu:7,insid:[0,4,5,6,7],inspect:8,inspectt:8,instal:[1,3,7],instanc:5,instance_count:5,instance_typ:5,instanti:[0,7],instead:[4,5,7],instruct:7,integ:5,interact:[3,4,7],interest:4,intern:[0,4,5],internaldf:7,internalrdd:7,interv:4,investig:8,invoc:5,invoiced:4,iron:7,is_categor:4,is_continu:4,is_deploi:4,is_ordin:4,isdefin:[],isn:[],isset:[],iter:[],its:[4,5],itself:4,japan:5,jar:0,java:7,java_stag:[],jdbc:[0,6,7],jdbc_url:[0,7],job:[5,6],job_id:5,join:[4,7],join_kei:4,join_key_column_nam:4,join_key_valu:4,json:8,jupyt:[0,4,6],just:[],jwt:5,jwt_token:5,kafka:0,kafka_serv:0,kafkapolltimeout:7,kafkaserv:[0,7],kei:[4,5,7],kera:5,kernel:6,keyword:4,kind:[5,7],know:4,kubernet:5,kwarg:[4,5,8],label:[4,5,8],label_col:4,label_column:4,label_nam:8,labelcol:8,languag:[5,7],larg:4,largest:8,last:4,last_update_t:4,latest:[0,5,8],latter:[],law:[5,7],layer:5,lazi:7,lead:5,learn:[5,8],length:8,level:[4,7],librari:[],library_specif:5,licens:[5,7],like:[0,4,5,7,8],limit:[5,7],linalg:8,line:8,link:4,link_training_set_to_mlflow:4,list:[4,5,7,8],list_training_set:4,live:4,load:[5,6,7],local:[5,7],local_path:5,localhost:7,locat:7,location_id:4,log:[4,5],log_artifact:5,log_mlflow:4,login_f:4,logist:8,logisticregress:8,lower:[7,8],lowercas:7,machin:[0,1,3,4,5,6,7,8],made:[4,5],magic:6,mai:[5,7],make:0,make_pdf:8,manag:[0,4,5,6,7],mani:[4,8],map:[4,7],markovchain:[],match:5,matplotlib:8,matrix:8,max:[4,5],max_batch_s:5,max_replica:5,maxim:[],maximum:4,maxit:8,mean:[4,5,8],member:[],memory_limit:5,memory_request:5,merg:7,mergeinto:7,mergeintowithrdd:7,metadata:[4,5],method:5,metric:5,metricnam:8,min:[],minor:8,miss:4,mlflow:[0,1,4,5,6],mlflow_run_nam:4,mlflow_support:[0,1,3],mlmanag:[0,1,3,4,5],mlmodel:[],mlreader:[],mlwriter:[],model:[4,5,8],model_class:5,model_col:5,model_lib:5,model_nam:[4,5],model_typ:4,modelevalu:8,modeliter:[],modul:[1,3],moment:4,moment_kei:4,more:[4,5],most:[4,8],mostli:4,movement:5,multi:8,multiclass:8,multiclassclassificationevalu:8,multipl:5,must:[4,5,6],my_first_featur:4,my_imag:5,my_run:5,my_second_featur:4,my_tim:5,myschema:7,mytabl:5,name:[4,5,7,8],namespac:5,nan:7,nativ:0,ndarrai:8,nearest:[],necessari:[0,3,4],necessarili:4,need:[1,4,5],neg:8,neither:[],nest:[4,5],newest:4,newuid:[],next:[4,6],next_stat:[],nginx:5,node:[5,8],node_hash:8,non:4,none:[4,5,6,7,8],normal:[5,8],north:5,not_as_good_featur:4,note:[4,5,8],notebook:[0,1,3,4],now:4,nsd:7,num_featur:4,num_rep:[],number:[4,5,7,8],numer:8,numeric_column:8,numfold:8,numpi:8,oauth2:5,object:[4,5,7,8],observ:[],obtain:[4,5,7],occur:[4,7],off:[4,7],ojbect:8,onc:[0,4,5],one:[4,5,8],onehotdummi:8,onehotencod:8,ones:5,onli:[4,5,7,8],onlin:4,onto:8,oom:5,oper:[5,8],opt:4,option:[4,5,6,7],order:[4,5,8],orderbi:8,ordin:[4,8],org:[5,7,8],orgin:4,origin:8,originalcolumn:8,other:[4,7],otherwis:[4,5],our:[1,7],out:[3,4,5],outer:8,output:[5,7],outputcol:[],outsid:[0,4,5,6,7],over:[4,6,7,8],overrid:5,oversampl:8,oversamplecrossvalid:8,overwritten:4,own:5,ownership:[],packag:1,page:1,pair:4,panda:[4,5,7],pandas_arg:7,pandas_profil:4,pandastospark:7,parallel:8,param:[4,5,6,7,8],paramcomb1_average_altmetric1:[],paramcomb1_average_altmetric2:[],paramcomb1_average_primary_metr:[],paramcomb2_average_altmetric1:[],paramcomb2_average_altmetric2:[],paramcomb2_average_primary_metr:[],paramet:[0,4,5,6,7,8],parameter:4,paramgridbuild:8,parammap:[],paramnam:[],parent:[4,5,8],parquet:7,pars:[5,8],part:8,particular:4,pass:[4,5,6,7],password:[0,4,5],patch:5,path:[0,5],payload:[],pca:8,pca_with_scor:8,pcacolumn:8,pcafeatur:8,pdf:[7,8],pend:4,pending_feature_set_deploy:4,percentag:7,perform:[4,8],period:4,permiss:[5,7],persist:4,pip:0,pipelin:[4,5,8],pipeline_or_model:5,pipelinemodel:5,pk1:4,pk2:4,pk_column:4,placehold:[],pleas:[5,7],plot:[4,8],plotroc:8,png:5,pod:5,point:[4,7,8],popul:[],port:[5,6],posit:8,post:[],postprocess:8,postprocessing_pipelin:8,preced:5,pred_threshold:5,predicit:8,predict:[4,5,8],predict_arg:5,predict_cal:5,predict_proba:5,predictioncol:8,predictions_datafram:8,predictor:5,prefix:4,preprocess:8,present:4,prevalu:8,prevent:5,preview:5,previou:5,prexist:5,primari:[4,5,7],primary_kei:[4,5,7],princip:8,principal_compon:8,print:[4,5,8],probabl:[5,8],problem:5,procedur:8,product:8,profil:4,project:[7,8],proper:[4,5,7],properti:7,proport:8,proto:[],provid:[4,5,7],provis:5,proxi:5,pull:7,purg:4,purpos:5,pydoc:0,pyfunc:5,pyspark:[0,4,5,7,8],pysplic:0,pysplicecontext:[0,4,5,7],python:[4,5],python_api:5,qualiti:4,queri:[4,7,8],query_str:7,queue:5,quotecharact:7,rais:[],ram:5,random:[],randomli:5,rather:4,ratio:8,rdd:[7,8],reach:[],read:[4,7],read_csv:7,real:[5,8],realroot:8,rebuild:4,receiv:[5,8],recent:4,recommend:[4,5],reconstruct:8,reconstructpca:8,record:[4,5,7],recreat:[5,7],recurs:8,ref:[],refer:[4,5,8],referenc:5,reference_schema:5,reference_t:5,region:5,regist:[0,4,5],register_feature_stor:[0,4],register_splice_context:[0,4],regress:[4,8],reincarn:5,rel:[4,5,8],releas:[0,7],releasesavepoint:7,remain:4,remot:7,remov:[4,5],remove_featur:4,remove_feature_set:4,remove_sourc:4,remove_training_view:4,render:[4,8],rep_stat:[],replac:[4,5,7,8],replacedataframeschema:7,replic:7,replica:5,replicaset:5,replicationcount:7,repres:4,represent:[4,7,8],reproduc:4,request:5,requir:[4,5,7],resampl:[],resid:4,resolv:[],resourc:5,resource_group:5,resource_limits_en:5,resource_requests_en:5,respect:5,respons:[],rest:[],result:[4,7,8],retail_f:4,retail_rfm:4,retriev:[4,5],return_cov:5,return_import:4,return_pk_col:4,return_primary_kei:4,return_sql:4,return_std:5,return_ts_col:4,roc:8,rollback:7,rollbacktosavepoint:7,root:8,round:[4,8],rounder:8,roundernoclip:8,row:[4,5,7,8],run:[0,4,5,6,7],run_backfil:4,run_feature_elimin:4,run_id:[5,6],run_nam:5,run_sql:6,run_uuid:5,rundata:[],runinfo:[],safe:[],sagemak:5,same:[4,5,8],sampl:[7,8],sample_fract:7,sampleperc:7,sandbox:0,save:[4,5],save_a:4,save_html:5,save_model:5,savepoint:7,scalar:8,scale:5,scenario:4,schedul:4,schedule_interv:4,schema:[4,5,7],schema_and_or_table_nam:7,schema_nam:[4,7],schema_table_nam:7,schemanam:7,schematablenam:7,scikit:5,scipi:8,score:8,search:[1,4,5],second:4,section:[0,1,3],see:[1,4,5,7],seed:8,select:[4,7],self:[],send:7,sensit:7,sequenc:[],sequenti:[],seri:[4,8],serial:5,serv:4,servic:[0,4,5,7],service_port:5,session:[0,6,8],set:[0,4,5,6,7],set_feature_descript:4,set_feature_store_url:4,set_mlflow_uri:[],set_token:4,setautocommitoff:7,setautocommiton:7,setcollectsubmodel:[],setestim:[],setestimatorparammap:[],setevalu:[],setinputcol:[],setnumfold:[],setoutputcol:[],setparallel:[],setparam:[],setsavepoint:7,setsavepointwithnam:7,setse:[],shade:0,shallow:[],share:[],shortcut:5,should:[4,6,7,8],show:[4,5,8],signatur:5,similar:7,simpl:[],simpli:4,simult:[],sinc:4,singl:5,size:[5,8],sklearn:5,sklearn_arg:[],slide:4,smallest:[4,8],snapshot:0,softwar:[5,7],some:[],sort:8,sourc:[4,5,6,7,8],source_nam:4,space:8,spark2:7,spark:[0,1,3,4,5,6,8],spark_model:5,spark_sess:6,sparkcontext:7,sparkdf:4,sparksess:[0,6,7,8],specif:[4,5,6,7],specifi:[4,5,7],splice:[0,1,3,4,5,6,7,8],splice_context:5,splice_ctx:4,splice_spark2:0,spliceactiverun:[],splicebaseevalu:8,splicebinaryclassificationevalu:8,splicemachin:0,splicemachinecontext:7,splicemachineexcept:5,splicemlcontext:8,splicemlctx:8,splicemulticlassificationevalu:8,spliceregressionevalu:8,split:[0,7],splitandinsert:7,sql:[0,4,5,6,7,8],sqlcontext:8,squar:8,ssl:0,stabl:0,stage:[5,8],standard:[4,5,8],start:[1,3,4,5,7],start_run:5,start_tim:4,stat:[1,3,7],state:[],statement:[4,5,6,7],statist:[7,8],statu:[5,7],statusdirectori:7,std:8,step:[4,5,8],still:4,stop:5,storag:5,store:[0,4,5],str:[4,5,6,7,8],strategi:8,stream:5,string:[4,7,8],string_column:8,stringindex:8,strptime:4,structtyp:7,sub:[],subclass:[],subject:8,submodel:[],submodul:3,subsequ:4,subtract:8,sudo:0,suffici:[],sum:[4,8],summari:4,superset:4,suppli:5,support:[5,7],supported_metr:8,sure:0,svc:5,system:[],tabl:[4,5,7,8],table1:7,table_nam:[4,7],tableexist:7,tablenam:7,tag:[4,5],take:[0,4,5,8],taken:5,target:8,target_cpu_util:5,team:5,tempor:4,temporari:7,tensorflow:5,test:4,testrun_4_featur:4,testrun_5_featur:4,text:5,than:4,the_dict:[],thei:[4,5],them:5,themselv:4,thi:[0,1,3,4,5,6,7,8],those:4,thread:[],threshold:5,through:[1,4,5,8],thu:4,time:[4,5,7],time_interv:4,timepoint:4,timer:5,timer_nam:5,timestamp:4,to_dictionari:[],to_low:7,to_proto:[],to_upp:7,todai:4,toggl:6,toggle_next:6,token:[4,5],tolow:7,too:4,tool:6,top:8,topn:8,toupper:7,track:[0,4,5,7],traffic:5,train:[4,5,8],training_dataset:4,training_set:4,training_set_id:4,training_set_nam:4,training_set_vers:4,training_view:3,training_view_exist:4,trainingset:4,trainingview:4,transact:7,transfer:[],transform:[5,8],transition_prob:[],translat:7,travel:4,travers:8,treat:[4,7],tree:[4,8],tree_json:8,tree_nam:8,trigger:[5,7],truncat:7,truncatet:7,ts_col:4,ts_column:4,tune:[],tupl:8,turn:[4,5,7],tvw:4,two:4,type:[4,5],uid:[],undefin:[],under:[4,5,7],underscor:5,undoc:[],unfit:5,unfit_pipelin:5,unicod:[],union:[4,5],uniqu:[4,5,8],unit:[4,5],unless:[4,5,7],unlimit:7,unnam:7,unspecifi:[5,8],until:4,updat:[4,5,7],update_feature_metadata:4,update_ts_column:4,updatewithrdd:7,upgrad:0,upon:4,uppercas:7,upsert:7,upsertwithrdd:7,uri:5,url:[0,4,5],usag:1,use:[0,4,5,6,7,8],used:[4,5,7,8],useful:4,user:[0,4,5],usernam:[0,4,5],uses:[],using:[1,4,5,7,8],utf:7,util:5,utm_campaign:0,utm_medium:0,utm_sourc:0,uuid:5,valid:[4,8],validatorparam:[],valu:[4,5,7,8],variabl:8,varianc:8,varianceexplain:8,vector:[4,5,8],vector_assembler_pipelin:8,vectorassembl:8,verbos:[4,5],version:[4,5,7],via:[0,4,5,7],view:[4,6],view_id:4,view_sql:4,visual:8,wai:[4,7],want:[4,5,7,8],warn:[],warranti:[5,7],watch:5,web:5,well:[1,4,5],were:4,west:5,what:[1,4],when:[4,5,7],where:[4,5,7,8],whether:[4,5,7,8],which:[4,5,7,8],who:5,whose:[4,7,8],window:4,within:4,without:[5,7],won:4,word:4,work:[0,7],workbench:[1,3],worker:5,workflow:[],workspac:5,would:[0,4,5,7,8],wrap:[],wrapper:[],write:[5,7],written:7,www:[0,5,7],xlarg:5,yet:[4,5],you:[0,1,4,5,6,7,8],your:[0,4,5,8],zero:5,zip:5},titles:["Getting Started","Welcome to Splicemachine\u2019s documentation!","spark package","Splicemachine package","splicemachine.features package","splicemachine.mlflow_support package","splicemachine.notebook module","splicemachine.spark package","splicemachine.stats module"],titleterms:{K8s:0,constant:2,content:[2,4,7],context:[2,7],document:1,extern:0,featur:4,feature_set:4,feature_stor:4,get:0,indic:1,instal:0,mlflow_support:5,modul:[2,4,5,6,7,8],notebook:6,packag:[2,3,4,5,7],spark:[2,7],splicemachin:[1,3,4,5,6,7,8],start:0,stat:8,submodul:[2,4,5,7],subpackag:[2,3],tabl:1,training_view:4,usag:0,util:[],welcom:1}}) \ No newline at end of file diff --git a/docs/_build/html/spark.html b/docs/_build/html/spark.html deleted file mode 100644 index 182b00d5..00000000 --- a/docs/_build/html/spark.html +++ /dev/null @@ -1,276 +0,0 @@ - - - - - - - - spark package — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - - -
-
-
-
- -
- -
-

spark package

-
-

Subpackages

-
-
-
-
-

Submodules

-
-
-

spark.constants module

-
-
-

spark.context module

-
-
-

Module contents

-
-
- - -
- - -
- - -
- -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/splicemachine.features.html b/docs/_build/html/splicemachine.features.html deleted file mode 100644 index ce9bc5e1..00000000 --- a/docs/_build/html/splicemachine.features.html +++ /dev/null @@ -1,1343 +0,0 @@ - - - - - - - - splicemachine.features package — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
- -
-
-
- -
- -
-

splicemachine.features package

-
-

Submodules

-
-
-

splicemachine.features.feature_store module

-

This Module contains the classes and APIs for interacting with the Splice Machine Feature Store.

-
-
-class FeatureStore(splice_ctx: splicemachine.spark.context.PySpliceContext = None)[source]
-

Bases: object

-
-
-create_aggregation_feature_set_from_source(source_name: str, schema_name: str, table_name: str, start_time: datetime.datetime, schedule_interval: str, aggregations: List[splicemachine.features.pipelines.feature_aggregation.FeatureAggregation], backfill_start_time: datetime.datetime = None, backfill_interval: str = None, description: Optional[str] = None, run_backfill: Optional[bool] = True)[source]
-

Creates a temporal aggregation feature set by creating a pipeline linking a Source to a feature set. -Sources are created with features.FeatureStore.create_source(). -Provided aggregations will generate the features for the feature set. This will create the feature set -along with aggregation calculations to create features

-
-
Parameters
-
    -
  • source_name – The name of the of the source created via create_source

  • -
  • schema_name – The schema name of the feature set

  • -
  • table_name – The table name of the feature set

  • -
  • start_time – The start time for the pipeline to run

  • -
  • schedule_interval – The frequency with which to run the pipeline.

  • -
  • aggregations – The list of FeatureAggregations to apply to the column names of the source SQL statement

  • -
  • backfill_start_time – The datetime representing the earliest point in time to get data from when running -backfill

  • -
  • backfill_interval – The “sliding window” interval to increase each timepoint by when performing backfill

  • -
  • run_backfill – Whether or not to run backfill when calling this function. Default False. If this is True -backfill_start_time and backfill_interval MUST BE SET

  • -
-
-
Returns
-

(FeatureSet) the created Feature Set

-
-
Example
-
from splicemachine.features.pipelines import AggWindow, FeatureAgg, FeatureAggregation
-from datetime import datetime
-source_name = 'CUSTOMER_RFM'
-fs.create_source(
-    name=source_name,
-    sql='SELECT * FROM RETAIL_RFM.CUSTOMER_CATEGORY_ACTIVITY',
-    event_ts_column='INVOICEDATE',
-    update_ts_column='LAST_UPDATE_TS',
-    primary_keys=['CUSTOMERID']
-)
-fs.create_aggregation_feature_set_from_source(
-
-)
-start_time = datetime.today()
-schedule_interval = AggWindow.get_window(5,AggWindow.DAY)
-backfill_start = datetime.strptime('2002-01-01 00:00:00', '%Y-%m-%d %H:%M:%S')
-backfill_interval = schedule_interval
-fs.create_aggregation_feature_set_from_source
-(
-    source_name, 'RETAIL_FS', 'AUTO_RFM', start_time=start_time,
-    schedule_interval=schedule_interval, backfill_start_time=backfill_start,
-    backfill_interval=backfill_interval,
-    aggregations = [
-        FeatureAggregation(feature_name_prefix = 'AR_CLOTHING_QTY',     column_name = 'CLOTHING_QTY',     agg_functions=['sum','max'],   agg_windows=['1d','2d','90d'], agg_default_value = 0.0 ),
-        FeatureAggregation(feature_name_prefix = 'AR_DELICATESSEN_QTY', column_name = 'DELICATESSEN_QTY', agg_functions=['avg'],         agg_windows=['1d','2d', '2w'], agg_default_value = 11.5 ),
-        FeatureAggregation(feature_name_prefix = 'AR_GARDEN_QTY' ,      column_name = 'GARDEN_QTY',       agg_functions=['count','avg'], agg_windows=['30d','90d', '1q'], agg_default_value = 8 )
-    ]
-)
-
-
-

This will create, deploy and return a FeatureSet called ‘RETAIL_FS.AUTO_RFM’. -The Feature Set will have 15 features: -* 6 for the ‘AR_CLOTHING_QTY’ prefix (sum & max over provided agg windows) -* 3 for the ‘AR_DELICATESSEN_QTY’ prefix (avg over provided agg windows) -* 6 for the ‘AR_GARDEN_QTY’ prefix (count & avg over provided agg windows)

-

A Pipeline is also created and scheduled in Airflow that feeds it every 5 days from the Source ‘CUSTOMER_RFM’ -Backfill will also occur, reading data from the source as of ‘2002-01-01 00:00:00’ with a 5 day window

-
-
-
- -
-
-create_feature(schema_name: str, table_name: str, name: str, feature_data_type: str, feature_type: str, desc: str = None, tags: List[str] = None, attributes: Dict[str, str] = None)[source]
-

Add a feature to a feature set

-
-
Parameters
-
    -
  • schema_name – The feature set schema

  • -
  • table_name – The feature set table name to add the feature to

  • -
  • name – The feature name

  • -
  • feature_data_type – The datatype of the feature. Must be a valid SQL datatype

  • -
  • feature_type

    splicemachine.features.FeatureType of the feature. The available types are from the FeatureType class: FeatureType.[categorical, ordinal, continuous]. -You can see available feature types by running

    -
    from splicemachine.features import FeatureType
    -print(FeatureType.get_valid())
    -
    -
    -

  • -
  • desc – The (optional) feature description (default None)

  • -
  • tags – (optional) List of (str) tag words (default None)

  • -
  • attributes – (optional) Dict of (str) attribute key/value pairs (default None)

  • -
-
-
Returns
-

Feature created

-
-
-
- -
-
-create_feature_set(schema_name: str, table_name: str, primary_keys: Dict[str, str], desc: Optional[str] = None, features: Optional[List[splicemachine.features.feature.Feature]] = None)splicemachine.features.feature_set.FeatureSet[source]
-

Creates and returns a new feature set

-
-
Parameters
-
    -
  • schema_name – The schema under which to create the feature set table

  • -
  • table_name – The table name for this feature set

  • -
  • primary_keys – The primary key column(s) of this feature set

  • -
  • desc – The (optional) description

  • -
  • features – An optional list of features. If provided, the Features will be created with the Feature Set

  • -
-
-
Example
-
from splicemachine.features import FeatureType, Feature
-f1 = Feature(
-    name='my_first_feature',
-    description='the first feature',
-    feature_data_type='INT',
-    feature_type=FeatureType.ordinal,
-    tags=['good_feature','a new tag', 'ordinal'],
-    attributes={'quality':'awesome'}
-)
-f2 = Feature(
-    name='my_second_feature',
-    description='the second feature',
-    feature_data_type='FLOAT',
-    feature_type=FeatureType.continuous,
-    tags=['not_as_good_feature','a new tag'],
-    attributes={'quality':'not as awesome'}
-)
-feats = [f1, f2]
-feature_set = fs.create_feature_set(
-    schema_name='splice',
-    table_name='foo',
-    primary_keys={'MOMENT_KEY':"INT"},
-    desc='test fset',
-    features=feats
-)
-
-
-
-
Returns
-

FeatureSet

-
-
-
- -
-
-create_source(name: str, sql: str, event_ts_column: datetime.datetime, update_ts_column: datetime.datetime, primary_keys: List[str])[source]
-

Creates, validates, and stores a source in the Feature Store that can be used to create a Pipeline that -feeds a feature set

-
-
Example
-
fs.create_source(
-    name='CUSTOMER_RFM',
-    sql='SELECT * FROM RETAIL_RFM.CUSTOMER_CATEGORY_ACTIVITY',
-    event_ts_column='INVOICEDATE',
-    update_ts_column='LAST_UPDATE_TS',
-    primary_keys=['CUSTOMERID']
-)
-
-
-
-
Parameters
-
    -
  • name – The name of the source. This must be unique across the feature store

  • -
  • sql – the SQL statement that returns the base result set to be used in future aggregation pipelines

  • -
  • event_ts_column – The column of the source query that determines the time of the event (row) being

  • -
-
-
-

described. This is not necessarily the time the record was recorded, but the time the event itself occured.

-
-
Parameters
-

update_ts_column – The column that indicates the time when the record was last updated. When scheduled

-
-
-

pipelines run, they will filter on this column to get only the records that have not been queried before.

-
-
Parameters
-

primary_keys – The list of columns in the source SQL that uniquely identifies each row. These become

-
-
-

the primary keys of the feature set(s) that is/are eventually created from this source.

-
- -
-
-create_training_view(name: str, sql: str, primary_keys: List[str], join_keys: List[str], ts_col: str, label_col: Optional[str] = None, replace: Optional[bool] = False, desc: Optional[str] = None, verbose=False) → None[source]
-

Registers a training view for use in generating training SQL

-
-
Parameters
-
    -
  • name – The training set name. This must be unique to other existing training sets unless replace is True

  • -
  • sql – (str) a SELECT statement that includes: -* the primary key column(s) - uniquely identifying a training row/case -* the inference timestamp column - timestamp column with which to join features (temporal join timestamp) -* join key(s) - the references to the other feature tables’ primary keys (ie customer_id, location_id) -* (optionally) the label expression - defining what the training set is trying to predict

  • -
  • primary_keys – (List[str]) The list of columns from the training SQL that identify the training row

  • -
  • ts_col – The timestamp column of the training SQL that identifies the inference timestamp

  • -
  • label_col – (Optional[str]) The optional label column from the training SQL.

  • -
  • replace – (Optional[bool]) Whether to replace an existing training view

  • -
  • join_keys – (List[str]) A list of join keys in the sql that are used to get the desired features in -get_training_set

  • -
  • desc – (Optional[str]) An optional description of the training set

  • -
  • verbose – Whether or not to print the SQL before execution (default False)

  • -
-
-
Returns
-

-
-
-
- -
-
-deploy_feature_set(schema_name: str, table_name: str)[source]
-

Deploys a feature set to the database. This persists the feature stores existence. -As of now, once deployed you cannot delete the feature set or add/delete features. -The feature set must have already been created with create_feature_set()

-
-
Parameters
-
    -
  • schema_name – The schema of the created feature set

  • -
  • table_name – The table of the created feature set

  • -
-
-
-
- -
-
-describe_feature_set(schema_name: str, table_name: str) → None[source]
-

Prints out a description of a given feature set, with all features in the feature set and whether the feature -set is deployed

-
-
Parameters
-
    -
  • schema_name – feature set schema name

  • -
  • table_name – feature set table name

  • -
-
-
Returns
-

None

-
-
-
- -
-
-describe_feature_sets() → None[source]
-

Prints out a description of a all feature sets, with all features in the feature sets and whether the feature -set is deployed

-
-
Returns
-

None

-
-
-
- -
-
-describe_training_view(training_view: str) → None[source]
-

Prints out a description of a given training view, the ID, name, description and optional label

-
-
Parameters
-

training_view – The training view name

-
-
Returns
-

None

-
-
-
- -
-
-describe_training_views() → None[source]
-

Prints out a description of all training views, the ID, name, description and optional label

-
-
Parameters
-

training_view – The training view name

-
-
Returns
-

None

-
-
-
- -
- -

Returns an interactive feature search that enables users to search for features and profiles the selected Feature. -Two forms of this search exist. 1 for use inside of the managed Splice Machine notebook environment, and one -for standard Jupyter. This is because the managed Splice Jupyter environment has extra functionality that would -not be present outside of it. The search will be automatically rendered depending on the environment.

-
-
Parameters
-

pandas_profile – Whether to use pandas / spark to profile the feature. If pandas is selected

-
-
-

but the dataset is too large, it will fall back to Spark. Default Pandas.

-
- -
-
-display_model_drift(schema_name: str, table_name: str, time_intervals: int, start_time: datetime.datetime = None, end_time: datetime.datetime = None)[source]
-

Displays as many as ‘time_intervals’ plots showing the distribution of the model prediction within each time -period. Time periods are equal periods of time where predictions are present in the model table -‘schema_name’.’table_name’. Model predictions are first filtered to only those occurring after ‘start_time’ if -specified and before ‘end_time’ if specified.

-
-
Parameters
-
    -
  • schema_name – schema where the model table resides

  • -
  • table_name – name of the model table

  • -
  • time_intervals – number of time intervals to plot

  • -
  • start_time – if specified, filters to only show predictions occurring after this date/time

  • -
  • end_time – if specified, filters to only show predictions occurring before this date/time

  • -
-
-
Returns
-

None

-
-
-
- -
-
-display_model_feature_drift(schema_name: str, table_name: str)[source]
-

Displays feature by feature comparison between the training set of the deployed model and the input feature -values used with the model since deployment.

-
-
Parameters
-
    -
  • schema_name – name of database schema where model table is deployed

  • -
  • table_name – name of the model table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-feature_exists(name: str) → bool[source]
-

Returns if a feature exists or not

-
-
Parameters
-

name – The feature name

-
-
Returns
-

bool True if the feature exists, False otherwise

-
-
-
- -
-
-feature_set_exists(schema: str, table: str) → bool[source]
-

Returns if a feature set exists or not

-
-
Parameters
-
    -
  • schema – The feature set schema

  • -
  • table – The feature set table

  • -
-
-
Returns
-

bool True if the feature exists, False otherwise

-
-
-
- -
-
-get_backfill_intervals(schema_name: str, table_name: str) → List[datetime.datetime][source]
-

Gets the backfill intervals necessary for the parameterized backfill SQL obtained from the -features.FeatureStore.get_backfill_sql() function. This function will likely not be -necessary as you can perform backfill at the time of feature set creation automatically.

-
-
Parameters
-
    -
  • schema_name – The schema name of the feature set

  • -
  • table_name – The table name of the feature set

  • -
-
-
Returns
-

The list of datetimes necessary to parameterize the backfill SQL

-
-
-
- -
-
-get_backfill_sql(schema_name: str, table_name: str)[source]
-

Returns the necessary parameterized SQL statement to perform backfill on an Aggregate Feature Set. The Feature -Set must have been deployed using the features.FeatureStore.create_aggregation_feature_set_from_source() -function. Meaning there must be a Source and a Pipeline associated to it. This function will likely not be -necessary as you can perform backfill at the time of feature set creation automatically.

-

This SQL will be parameterized and need a timestamp to execute. You can get those timestamps with the -features.FeatureStore.get_backfill_interval() with the same parameters

-
-
Parameters
-
    -
  • schema_name – The schema name of the feature set

  • -
  • table_name – The table name of the feature set

  • -
-
-
Returns
-

The parameterized Backfill SQL

-
-
-
- -
-
-get_deployments(schema_name: str = None, table_name: str = None, training_set: str = None, feature: str = None, feature_set: str = None)[source]
-

Returns a list of all (or specified) available deployments

-
-
Parameters
-
    -
  • schema_name – model schema name

  • -
  • table_name – model table name

  • -
  • training_set – training set name

  • -
  • feature – passing this in will return all deployments that used this feature

  • -
  • feature_set – passing this in will return all deployments that used this feature set

  • -
-
-
Returns
-

List[Deployment] the list of Deployments as dicts

-
-
-
- -
-
-get_feature_description()[source]
-
- -
-
-get_feature_details(name: str)splicemachine.features.feature.Feature[source]
-

Returns a Feature and it’s detailed information

-
-
Parameters
-

name – The feature name

-
-
Returns
-

Feature

-
-
-
- -
-
-get_feature_primary_keys(features: List[str]) → Dict[str, List[str]][source]
-

Returns a dictionary mapping each individual feature to its primary key(s). This function is not yet implemented.

-
-
Parameters
-

features – (List[str]) The list of features to get primary keys for

-
-
Returns
-

Dict[str, List[str]] A mapping of {feature name: [pk1, pk2, etc]}

-
-
-
- -
-
-get_feature_sets(feature_set_names: List[str] = None) → List[splicemachine.features.feature_set.FeatureSet][source]
-

Returns a list of available feature sets

-
-
Parameters
-

feature_set_names – A list of feature set names in the format ‘{schema_name}.{table_name}’. If none will return all FeatureSets

-
-
Returns
-

List[FeatureSet] the list of Feature Sets

-
-
-
- -
-
-get_feature_vector(features: List[Union[str, splicemachine.features.feature.Feature]], join_key_values: Dict[str, str], return_primary_keys=True, return_sql=False) → Union[str, pandas.core.frame.DataFrame][source]
-

Gets a feature vector given a list of Features and primary key values for their corresponding Feature Sets

-
-
Parameters
-
    -
  • features – List of str Feature names or Features

  • -
  • join_key_values – (dict) join key values to get the proper Feature values formatted as {join_key_column_name: join_key_value}

  • -
  • return_primary_keys – Whether to return the Feature Set primary keys in the vector. Default True

  • -
  • return_sql – Whether to return the SQL needed to get the vector or the values themselves. Default False

  • -
-
-
Returns
-

Pandas Dataframe or str (SQL statement)

-
-
-
- -
-
-get_feature_vector_sql_from_training_view(training_view: str, features: List[Union[str, splicemachine.features.feature.Feature]]) → str[source]
-

Returns the parameterized feature retrieval SQL used for online model serving.

-
-
Parameters
-
    -
  • training_view – (str) The name of the registered training view

  • -
  • features

    (List[str]) the list of features from the feature store to be included in the training

    -
    -
    NOTE
    -
    This function will error if the view SQL is missing a view key required 
    -
    -to retrieve the desired features
    -
    -
    -
    -
    -

  • -
-
-
Returns
-

(str) the parameterized feature vector SQL

-
-
-
- -
-
-get_features_by_name(names: Optional[List[str]] = None, as_list=False) → Union[List[splicemachine.features.feature.Feature], pyspark.sql.dataframe.DataFrame][source]
-

Returns a dataframe or list of features whose names are provided

-
-
Parameters
-
    -
  • names – The list of feature names

  • -
  • as_list – Whether or not to return a list of features. Default False

  • -
-
-
Returns
-

SparkDF or List[Feature] The list of Feature objects or Spark Dataframe of features and their metadata. Note, this is not the Feature

-
-
-

values, simply the describing metadata about the features. To create a training dataset with Feature values, see -features.FeatureStore.get_training_set() or features.FeatureStore.get_feature_dataset()

-
- -
-
-get_pipeline_sql(schema_name: str, table_name: str)[source]
-

Returns the incremental pipeline SQL that feeds a feature set from a source (thus creating a pipeline). -Pipelines are managed for you by default by Splice Machine via Airflow, but if you opt out of using the -managed pipelines you can use this function to get the incremental SQL.

-

This SQL will be parameterized and need a timestamp to execute. You can get those timestamps with the -features.FeatureStore.get_backfill_interval() with the same parameters

-
-
Parameters
-
    -
  • schema_name – The schema name of the feature set

  • -
  • table_name – The table name of the feature set

  • -
-
-
Returns
-

The incremental Pipeline SQL

-
-
-
- -
-
-get_summary()splicemachine.features.training_view.TrainingView[source]
-

This function returns a summary of the feature store including: -* Number of feature sets -* Number of deployed feature sets -* Number of features -* Number of deployed features -* Number of training sets -* Number of training views -* Number of associated models - this is a count of the MLManager.RUNS table where the splice.model_name tag is set and the splice.feature_store.training_set parameter is set -* Number of active (deployed) models (that have used the feature store for training) -* Number of pending feature sets - this will will require a new table featurestore.pending_feature_set_deployments and it will be a count of that

-
- -
-
-get_training_set(features: Union[List[splicemachine.features.feature.Feature], List[str]], current_values_only: bool = False, start_time: datetime.datetime = None, end_time: datetime.datetime = None, label: str = None, return_pk_cols: bool = False, return_ts_col: bool = False, return_sql: bool = False, save_as: str = None) → pyspark.sql.dataframe.DataFrame[source]
-

Gets a set of feature values across feature sets that is not time dependent (ie for non time series clustering). -This feature dataset will be treated and tracked implicitly the same way a training_dataset is tracked from -features.FeatureStore.get_training_set() . The dataset’s metadata and features used will be tracked in mlflow automatically (see -get_training_set for more details).

-
-
NOTE
-
The way point-in-time correctness is guaranteed here is by choosing one of the Feature Sets as the "anchor" dataset.
-This means that the points in time that the query is based off of will be the points in time in which the anchor
-Feature Set recorded changes. The anchor Feature Set is the Feature Set that contains the superset of all primary key
-columns across all Feature Sets from all Features provided. If more than 1 Feature Set has the superset of
-all Feature Sets, the Feature Set with the most primary keys is selected. If more than 1 Feature Set has the same
-maximum number of primary keys, the Feature Set is chosen by alphabetical order (schema_name, table_name).
-
-
-
-
Parameters
-
    -
  • features

    List of Features or strings of feature names

    -
    -
    NOTE
    -
    The Features Sets which the list of Features come from must have common join keys,
    -otherwise the function will fail. If there is no common join key, it is recommended to
    -create a Training View to specify the join conditions.
    -
    -
    -
    -
    -

  • -
  • current_values_only – If you only want the most recent values of the features, set this to true. Otherwise, all history will be returned. Default False

  • -
  • start_time – How far back in history you want Feature values. If not specified (and current_values_only is False), all history will be returned. -This parameter only takes effect if current_values_only is False.

  • -
  • end_time – The most recent values for each selected Feature. This will be the cutoff time, such that any Feature values that -were updated after this point in time won’t be selected. If not specified (and current_values_only is False), -Feature values up to the moment in time you call the function (now) will be retrieved. This parameter -only takes effect if current_values_only is False.

  • -
  • label – An optional label to specify for the training set. If specified, the feature set of that feature -will be used as the “anchor” feature set, meaning all point in time joins will be made to the timestamps of -that feature set. This feature will also be recorded as a “label” feature for this particular training set -(but not others in the future, unless this label is again specified).

  • -
  • return_pk_cols – bool Whether or not the returned sql should include the primary key column(s)

  • -
  • return_ts_cols – bool Whether or not the returned sql should include the timestamp column

  • -
  • save_as – Whether or not to save this Training Set (metadata) in the feature store for reproducibility. This -enables you to version and persist the metadata for a training set of a specific model development. If you are -using the Splice Machine managed MLFlow Service, this will be fully automated and managed for you upon model deployment, -however you can still use this parameter to customize the name of the training set (it will default to the run id). -If you are NOT using Splice Machine’s mlflow service, this is a useful way to link specific modeling experiments -to the exact training sets used. This DOES NOT persist the training set itself, rather the metadata required -to reproduce the identical training set.

  • -
-
-
Returns
-

Spark DF or SQL statement necessary to generate the Training Set

-
-
-
- -
-
-get_training_set_by_name(name, version: int = None, return_pk_cols: bool = False, return_ts_col: bool = False, return_sql=False)[source]
-

Returns a Spark DF (or SQL) of an EXISTING Training Set (one that was saved with the save_as parameter in -get_training_set() or get_training_set_from_view(). This is useful if you’ve deployed -a model with a Training Set and

-
-
Parameters
-
    -
  • name – Training Set name

  • -
  • version – The version of this training set. If not set, it will grab the newest version

  • -
  • return_pk_cols – bool Whether or not the returned sql should include the primary key column(s)

  • -
  • return_ts_cols – bool Whether or not the returned sql should include the timestamp column

  • -
  • return_sql – (Optional[bool]) Return the SQL statement (str) instead of the Spark DF. Defaults False

  • -
-
-
Returns
-

Spark DF or SQL

-
-
-
- -
-
-get_training_set_features(training_set: str = None)[source]
-

Returns a list of all features from an available Training Set, as well as details about that Training Set

-
-
Parameters
-

training_set – training set name

-
-
Returns
-

TrainingSet as dict

-
-
-
- -
-
-get_training_set_from_deployment(schema_name: str, table_name: str, label: str = None, return_pk_cols: bool = False, return_ts_col: bool = False)[source]
-

Reads Feature Store metadata to rebuild orginal training data set used for the given deployed model.

-
-
Parameters
-
    -
  • schema_name – model schema name

  • -
  • table_name – model table name

  • -
  • label – An optional label to specify for the training set. If specified, the feature set of that feature -will be used as the “anchor” feature set, meaning all point in time joins will be made to the timestamps of -that feature set. This feature will also be recorded as a “label” feature for this particular training set -(but not others in the future, unless this label is again specified).

  • -
  • return_pk_cols – bool Whether or not the returned sql should include the primary key column(s)

  • -
  • return_ts_cols – bool Whether or not the returned sql should include the timestamp column

  • -
-
-
Returns
-

SparkDF the Training Frame

-
-
-
- -
-
-get_training_set_from_view(training_view: str, features: Union[List[splicemachine.features.feature.Feature], List[str]] = None, start_time: Optional[datetime.datetime] = None, end_time: Optional[datetime.datetime] = None, return_pk_cols: bool = False, return_ts_col: bool = False, return_sql: bool = False, save_as: str = None) → pyspark.sql.dataframe.DataFrame[source]
-

Returns the training set as a Spark Dataframe from a Training View. When a user calls this function (assuming they have registered -the feature store with mlflow using register_feature_store() ) -the training dataset’s metadata will be tracked in mlflow automatically. The following will be tracked: -including:

-
-
    -
  • Training View

  • -
  • Selected features

  • -
  • Start time

  • -
  • End time

  • -
-
-

This tracking will occur in the current run (if there is an active run) -or in the next run that is started after calling this function (if no run is currently active).

-
-
Parameters
-
    -
  • training_view – (str) The name of the registered training view

  • -
  • features

    (List[str] OR List[Feature]) the list of features from the feature store to be included in the training. -If a list of strings is passed in it will be converted to a list of Feature. If not provided will return all available features.

    -
    -
    NOTE
    -
    This function will error if the view SQL is missing a join key required to retrieve the
    -desired features
    -
    -
    -
    -
    -

  • -
  • start_time

    (Optional[datetime]) The start time of the query (how far back in the data to start). Default None

    -
    -
    NOTE
    -
    If start_time is None, query will start from beginning of history
    -
    -
    -
    -
    -

  • -
  • end_time

    (Optional[datetime]) The end time of the query (how far recent in the data to get). Default None

    -
    -
    NOTE
    -
    If end_time is None, query will get most recently available data
    -
    -
    -
    -
    -

  • -
  • return_pk_cols – bool Whether or not the returned sql should include the primary key column(s)

  • -
  • return_ts_cols – bool Whether or not the returned sql should include the timestamp column

  • -
  • return_sql – (Optional[bool]) Return the SQL statement (str) instead of the Spark DF. Defaults False

  • -
  • save_as – Whether or not to save this Training Set (metadata) in the feature store for reproducibility. This -enables you to version and persist the metadata for a training set of a specific model development. If you are -using the Splice Machine managed MLFlow Service, this will be fully automated and managed for you upon model deployment, -however you can still use this parameter to customize the name of the training set (it will default to the run id). -If you are NOT using Splice Machine’s mlflow service, this is a useful way to link specific modeling experiments -to the exact training sets used. This DOES NOT persist the training set itself, rather the metadata required -to reproduce the identical training set.

  • -
-
-
Returns
-

Optional[SparkDF, str] The Spark dataframe of the training set or the SQL that is used to generate it (for debugging)

-
-
-
- -
-
-get_training_view(training_view: str)splicemachine.features.training_view.TrainingView[source]
-

Gets a training view by name

-
-
Parameters
-

training_view – Training view name

-
-
Returns
-

TrainingView

-
-
-
- -
-
-get_training_view_features(training_view: str) → List[splicemachine.features.feature.Feature][source]
-

Returns the available features for the given a training view name

-
-
Parameters
-

training_view – The name of the training view

-
-
Returns
-

A list of available Feature objects

-
-
-
- -
-
-get_training_view_id(name: str) → int[source]
-

Returns the unique view ID from a name

-
-
Parameters
-

name – The training view name

-
-
Returns
-

The training view id

-
-
-
- -
-
-get_training_views(_filter: Dict[str, Union[int, str]] = None) → List[splicemachine.features.training_view.TrainingView][source]
-

Returns a list of all available training views with an optional filter

-
-
Parameters
-

_filter – Dictionary container the filter keyword (label, description etc) and the value to filter on -If None, will return all TrainingViews

-
-
Returns
-

List[TrainingView]

-
-
-
- -
- -
- -
-
-list_training_sets() → Dict[str, Optional[str]][source]
-

Returns a dictionary a training sets available, with the map name -> description. If there is no description, -the value will be an emtpy string

-
-
Returns
-

Dict[str, Optional[str]]

-
-
-
- -
-
-login_fs(username, password)[source]
-
- -
-
-register_splice_context(splice_ctx: splicemachine.spark.context.PySpliceContext) → None[source]
-
- -
-
-remove_feature(name: str)[source]
-
-
Removes a feature. This will run 2 checks.
    -
  1. See if the feature exists.

  2. -
  3. See if the feature belongs to a feature set that has already been deployed.

  4. -
-

If either of these are true, this function will throw an error explaining which check has failed

-
-
param name
-

feature name

-
-
return
-

-
-
-
-
- -
-
-remove_feature_set(schema_name: str, table_name: str, purge: bool = False) → None[source]
-

Deletes a feature set if appropriate. You can currently delete a feature set in two scenarios: -1. The feature set has not been deployed -2. The feature set has been deployed, but not linked to any training sets

-

If both of these conditions are false, this will fail.

-

Optionally set purge=True to force delete the feature set and all of the associated Training Sets using the -Feature Set. ONLY USE IF YOU KNOW WHAT YOU ARE DOING. This will delete Training Sets, but will still fail if -there is an active deployment with this feature set. That cannot be overwritten

-
-
Parameters
-
    -
  • schema_name – The Feature Set Schema

  • -
  • table_name – The Feature Set Table

  • -
  • purge – Whether to force delete training sets that use the feature set (that are not used in deployments)

  • -
-
-
-
- -
-
-remove_source(name: str)[source]
-

Removes a Source by name. You cannot remove a Source that has child dependencies (Feature Sets). If there is a -Feature Set that is deployed and a Pipeline that is feeding it, you cannot delete the Source until you remove -the Feature Set (which in turn removes the Pipeline)

-
-
Parameters
-

name – The Source name

-
-
-
- -
-
-remove_training_view(name: str)[source]
-

This removes a training view if it is not being used by any currently deployed models. -NOTE: Once this training view is removed, you will not be able to deploy any models that were trained using this -view

-
-
Parameters
-

name – The view name

-
-
-
- -
-
-run_feature_elimination(df, features: List[Union[str, splicemachine.features.feature.Feature]], label: str = 'label', n: int = 10, verbose: int = 0, model_type: str = 'classification', step: int = 1, log_mlflow: bool = False, mlflow_run_name: str = None, return_importances: bool = False)[source]
-

Runs feature elimination using a Spark decision tree on the dataframe passed in. Optionally logs results to mlflow

-
-
Parameters
-
    -
  • df – The dataframe with features and label

  • -
  • features – The list of feature names (or Feature objects) to run elimination on

  • -
  • label – the label column names

  • -
  • n – The number of features desired. Default 10

  • -
  • verbose – The level of verbosity. 0 indicated no printing. 1 indicates printing remaining features after -each round. 2 indicates print features and relative importances after each round. Default 0

  • -
  • model_type – Whether the model to test with will be a regression or classification model. Default classification

  • -
  • log_mlflow – Whether or not to log results to mlflow as nested runs. Default false

  • -
  • mlflow_run_name – The name of the parent run under which all subsequent runs will live. The children run -names will be {mlflow_run_name}_{num_features}_features. ie testrun_5_features, testrun_4_features etc

  • -
-
-
Returns
-

-
-
-
- -
-
-set_feature_description()[source]
-
- -
-
-set_feature_store_url(url: str)[source]
-
- -
-
-set_token(token)[source]
-
- -
-
-training_view_exists(name: str) → bool[source]
-

Returns if a training view exists or not

-
-
Parameters
-

name – The training view name

-
-
Returns
-

bool True if the training view exists, False otherwise

-
-
-
- -
-
-update_feature_metadata(name: str, desc: Optional[str] = None, tags: Optional[List[str]] = None, attributes: Optional[Dict[str, str]] = None)[source]
-

Update the metadata of a feature

-
-
Parameters
-
    -
  • name – The feature name

  • -
  • desc – The (optional) feature description (default None)

  • -
  • tags – (optional) List of (str) tag words (default None)

  • -
  • attributes – (optional) Dict of (str) attribute key/value pairs (default None)

  • -
-
-
Returns
-

updated Feature

-
-
-
- -
- -
-
-

splicemachine.features.feature_set

-

This describes the Python representation of a Feature Set. A feature set is a database table that contains Features and their metadata. -The Feature Set class is mostly used internally but can be used by the user to see the available Features in the given -Feature Set, to see the table and schema name it is deployed to (if it is deployed), and to deploy the feature set -(which can also be done directly through the Feature Store). Feature Sets are unique by their schema.table name, as they -exist in the Splice Machine database as a SQL table. They are case insensitive. -To see the full contents of your Feature Set, you can print, return, or .__dict__ your Feature Set object.

-
-
-class FeatureSet(*, splice_ctx: splicemachine.spark.context.PySpliceContext = None, table_name, schema_name, description, primary_keys: Dict[str, str], feature_set_id=None, deployed: bool = False, **kwargs)[source]
-

Bases: object

-
-
-is_deployed()[source]
-

Returns whether or not this Feature Set has been deployed (the schema.table has been created in the database) -:return: (bool) True if the Feature Set is deployed

-
- -
- -
-
-

splicemachine.features.Feature

-

This describes the Python representation of a Feature. A Feature is a column of a Feature Set table with particular metadata. -A Feature is the smallest unit in the Feature Store, and each Feature within a Feature Set is individually tracked for changes -to enable full time travel and point-in-time consistent training datasets. Features’ names are unique and case insensitive. -To see the full contents of your Feature, you can print, return, or .__dict__ your Feature object.

-
-
-class Feature(*, name, description, feature_data_type, feature_type, tags, attributes, feature_set_id=None, feature_id=None, **kwargs)[source]
-

Bases: object

-
-
-is_categorical()[source]
-

Returns if the type of this feature is categorical

-
- -
-
-is_continuous()[source]
-

Returns if the type of this feature is continuous

-
- -
-
-is_ordinal()[source]
-

Returns if the type of this feature is ordinal

-
- -
- -
-
-

splicemachine.features.training_view

-

This describes the Python representation of a Training View. A Training View is a SQL statement defining an event of interest, and metadata around how to create a training dataset with that view. -To see the full contents of your Training View, you can print, return, or .__dict__ your Training View object.

-
-
-class TrainingView(*, pk_columns: List[str], ts_column, label_column, view_sql, name, description, view_id=None, **kwargs)[source]
-

Bases: object

-
- -
-
-

Module contents

-
-
- - -
- - - - -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/splicemachine.html b/docs/_build/html/splicemachine.html deleted file mode 100644 index 6769bf12..00000000 --- a/docs/_build/html/splicemachine.html +++ /dev/null @@ -1,273 +0,0 @@ - - - - - - - - Splicemachine package — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - -
- -
- Contents -
- -
-
-
-
-
- -
- -
-

Splicemachine package

-

This package contains the classes and modules necessary for interacting with Splice Machine’s MLManager workbench. Follow the documentation below to get started, or check out the Getting Started section for installation

- -
- - -
- - - - -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/splicemachine.mlflow_support.html b/docs/_build/html/splicemachine.mlflow_support.html deleted file mode 100644 index 284a662b..00000000 --- a/docs/_build/html/splicemachine.mlflow_support.html +++ /dev/null @@ -1,921 +0,0 @@ - - - - - - - - splicemachine.mlflow_support package — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - - -
-
-
-
- -
- -
-

splicemachine.mlflow_support package

-
-

Submodules

-
-
-

splicemachine.mlflow_support.mlflow_support module

-

This module contains the entrypoint to the Splice Machine managed mlflow environment

-

Copyright 2020 Splice Machine, Inc.

-

Licensed under the Apache License, Version 2.0 (the “License”); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at

-
-
-

Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an “AS IS” BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.

-
-

All functions in this module are accessible through the mlflow object and are to be referenced without the leading underscore as

-
mlflow.function_name()
-
-
-

For example, the function _current_exp_id() is accessible via

-
mlflow.current_exp_id()
-
-
-

All functions are accessible after running the following import

-
from splicemachine.mlflow_support import *
-
-
-

Importing anything directly from mlflow before running the above statement will cause problems. After running the above import, you can import additional mlflow submodules as normal

-
from splicemachine.mlflow_support import *
-from mlflow.tensorflow import autolog
-
-
-
-
-
-_current_exp_id()[source]
-

Retrieve the current exp id

-
-
Returns
-

(int) the current experiment id

-
-
-
- -
-
-_current_run_id()[source]
-

Retrieve the current run id

-
-
Returns
-

(str) the current run id

-
-
-
- -
-
-_deploy_aws(app_name: str, region: str = 'us-east-2', instance_type: str = 'ml.m5.xlarge', run_id: str = None, instance_count: int = 1, deployment_mode: str = 'replace')[source]
-

Queue Job to deploy a run to sagemaker with the -given run id (found in MLFlow UI or through search API)

-
-
Parameters
-
    -
  • run_id – the id of the run to deploy. Will default to the current -run id.

  • -
  • app_name – the name of the app in sagemaker once deployed

  • -
  • region – the sagemaker region to deploy to (us-east-2, -us-west-1, us-west-2, eu-central-1 supported)

  • -
  • instance_type – the EC2 Sagemaker instance type to deploy on -(ml.m4.xlarge supported)

  • -
  • instance_count – the number of instances to load balance predictions -on

  • -
  • deployment_mode – the method to deploy; create=application will fail -if an app with the name specified already exists; replace=application -in sagemaker will be replaced with this one if app already exists; -add=add the specified model to a prexisting application (not recommended)

  • -
-
-
-
- -
-
-_deploy_azure(endpoint_name: str, resource_group: str, workspace: str, run_id: str, region: str = 'East US', cpu_cores: float = 0.1, allocated_ram: float = 0.5, model_name: str = None)[source]
-

Deploy a given run to AzureML.

-
-
Parameters
-
    -
  • endpoint_name – (str) the name of the endpoint in AzureML when deployed to -Azure Container Services. Must be unique.

  • -
  • resource_group – (str) Azure Resource Group for model. Automatically created if -it doesn’t exist.

  • -
  • workspace – (str) the AzureML workspace to deploy the model under. -Will be created if it doesn’t exist

  • -
  • run_id – (str) if specified, will deploy a previous run ( -must have an spark model logged). Otherwise, will default to the active run

  • -
  • region – (str) AzureML Region to deploy to: Can be East US, East US 2, Central US, -West US 2, North Europe, West Europe or Japan East

  • -
  • cpu_cores – (float) Number of CPU Cores to allocate to the instance. -Can be fractional. Default=0.1

  • -
  • allocated_ram – (float) amount of RAM, in GB, allocated to the container. -Default=0.5

  • -
  • model_name – (str) If specified, this will be the name of the model in AzureML. -Otherwise, the model name will be randomly generated.

  • -
-
-
-
- -
-
-_deploy_db(db_schema_name: str, db_table_name: str, run_id: str, reference_table: Optional[str] = None, reference_schema: Optional[str] = None, primary_key: Optional[Dict[str, str]] = None, df: Optional[Union[pyspark.sql.dataframe.DataFrame, pandas.core.frame.DataFrame]] = None, create_model_table: Optional[bool] = True, model_cols: Optional[List[str]] = None, classes: Optional[List[str]] = None, library_specific: Optional[Dict[str, str]] = None, replace: Optional[bool] = False, max_batch_size: Optional[int] = 10000, verbose: bool = False) → None[source]
-

Deploy a trained (currently Spark, Sklearn, Keras or H2O) model to the Database. -This either creates a new table or alters an existing table in the database (depending on parameters passed)

-
-
Parameters
-
    -
  • db_schema_name – (str) the schema name to deploy to.

  • -
  • db_table_name – (str) the table name to deploy to.

  • -
  • run_id – (str) The run_id to deploy the model on. The model associated with this run will be deployed

  • -
  • reference_table – (str) if creating a new table, an alternative to specifying a dataframe is specifying a -reference table. The column schema of the reference table will be used to create the new table (e.g. MYTABLE)

  • -
  • reference_schema – (str) the db schema for the reference table.

  • -
  • primary_key

    (Dict) Dictionary of column + SQL datatype to use for the primary/composite key.

    -
      -
    • If you are deploying to a table that already exists, it must already have a primary key, and this parameter will be ignored.

    • -
    • If you are creating the table in this function, you MUST pass in a primary key

    • -
    -

  • -
  • df

    (Spark or Pandas DF) The dataframe used to train the model

    -
    -
    NOTE: The columns in this df are the ones that will be used to create the table unless specified by model_cols
    -
    -

  • -
  • create_model_table – Whether or not to create the table from the dataframe. Default True. This -Will ONLY be used if the table does not exist and a dataframe is passed in

  • -
  • model_cols – (List[str]) The columns from the table to use for the model. If None, all columns in the table -will be passed to the model. If specified, the columns will be passed to the model -IN THAT ORDER. The columns passed here must exist in the table.

  • -
  • classes

    (List[str]) The classes (prediction labels) for the model being deployed.

    -

    NOTE: If not supplied, the table will have default column names for each class

    -

  • -
  • library_specific

    (dict{str: str}) Prediction options for certain model types:

    -
      -
    • -
      Certain model types (specifically Keras and Scikit-learn) support prediction arguments. Here are the options that we support:
        -
      • -
        Scikit-learn
          -
        • predict_call: determines function call for the model. Available: ‘predict’ (default), ‘predict_proba’, ‘transform’

        • -
        • predict_args: passed into the predict call (for Gaussian and Bayesian models). Available: ‘return_std’, ‘return_cov’

        • -
        -
        -
        -
      • -
      • -
        Keras
          -
        • pred_threshold: prediction threshold for Keras binary classification models. Note: If the model type is Keras, the output layer has 1 node, and pred_threshold is None, you will NOT receive a class prediction, only the output of the final layer (like model.predict()). If you want a class prediction for your binary classification problem, you MUST pass in a threshold.

        • -
        -
        -
        -
      • -
      -
      -
      -
    • -
    -

  • -
-
-
-

If the model does not support these parameters, they will be ignored. -:param max_batch_size: (int) the max size for the database to batch groups of rows for prediction. Default 10,000. -:param replace: (bool) whether or not to replace a currently existing model. This param is not yet implemented -:return: None

-

This function creates the following IF you are creating a table from the dataframe

-
-
    -
  • The model table where run_id is the run_id passed in. This table will have a column for each feature in the feature vector. It will also contain:

    -
    -
      -
    • USER which is the current user who made the request

    • -
    • EVAL_TIME which is the CURRENT_TIMESTAMP

    • -
    • the PRIMARY KEY column(s) passed in

    • -
    • PREDICTION. The prediction of the model. If the :classes: param is not filled in, this will be default values for classification models

    • -
    • A column for each class of the predictor with the value being the probability/confidence of the model if applicable

    • -
    -
    -
  • -
-
-

IF you are deploying to an existing table, the table will be altered to include the columns above.

-
-
NOTE
-
The columns listed above are default value columns.
-
-This means that on a SQL insert into the table, 
-
-you do not need to reference or insert values into them.
-
-They are automatically taken care of.
-
-Set verbose=True in the function call for more information
-
-
-
-
-

The following will also be created for all deployments:

-
-
    -
  • A trigger that runs on (after) insertion to the data table that runs an INSERT into the prediction table, calling the PREDICT function, passing in the row of data as well as the schema of the dataset, and the run_id of the model to run

  • -
  • A trigger that runs on (after) insertion to the prediction table that calls an UPDATE to the row inserted, parsing the prediction probabilities and filling in proper column values

  • -
-
-
- -
-
-_deploy_kubernetes(run_id: str, service_port: int = 80, base_replicas: int = 1, autoscaling_enabled: bool = False, max_replicas: int = 2, target_cpu_utilization: int = 50, disable_nginx: bool = False, gunicorn_workers: int = 1, resource_requests_enabled: bool = False, resource_limits_enabled: bool = False, cpu_request: int = 0.5, cpu_limit: int = 1, memory_request: str = '512Mi', memory_limit: str = '2048Mi', expose_external: bool = False)[source]
-

Deploy model associated with the specified or active run to Kubernetes cluster.

-
-
Creates the Following Resources:
    -
  • Pod (with your model loaded in via an init container)

  • -
  • ReplicaSet (configured to base replicas specified)

  • -
  • HPA (if autoscaling is enabled)

  • -
  • Service (model-<run id>.<db namespace>.svc.cluster.local:<service port specified>)

  • -
  • Deployment

  • -
  • Ingress (if expose enable is set to True) (on <your cluster url>/<run id>/invocations)

  • -
-
-
-
-
Parameters
-
    -
  • run_id – specified if overriding the active run

  • -
  • service_port – (default 80) the port that the prediction service runs on internally in the cluster

  • -
  • autoscaling_enabled – (default False) whether or not to provision a Horizontal Pod Autoscaler to provision -pods dynamically

  • -
  • max_replicas – (default 2) [USED IF AUTOSCALING ENABLED] max number of pods to scale up to

  • -
  • target_cpu_utilization – (default 50) [USED IF AUTOSCALING ENABLED] the cpu utilization to scale up to -new pods on

  • -
  • disable_nginx – (default False) disable nginx inside of the pod (recommended)

  • -
  • gunicorn_workers – (default 1) [MUST BE 1 FOR SPARK ML models TO PREVENT OOM] Number of web workers.

  • -
  • resource_requests_enabled – (default False) whether or not to enable Kubernetes resource requests

  • -
  • resource_limits_enabled – (default False) whether or not to enable Kubernetes resource limits

  • -
  • cpu_request – (default 0.5) [USED IF RESOURCE REQUESTS ENABLED] number of CPU to request

  • -
  • cpu_limit – (default 1) [USED IF RESOURCE LIMITS ENABLED] number of CPU to cap at

  • -
  • memory_request – (default 512Mi) [USED IF RESOURCE REQUESTS ENABLED] amount of RAM to request

  • -
  • memory_limit – (default 2048Mi) [USED IF RESOURCE LIMITS ENABLED] amount of RAM to limit at

  • -
  • expose_external

    (default False) whether or not to create Ingress resource to deploy outside of the cluster. -:NOTE:

    -
    -
    It is not recommended to create an Ingress resource using this parameter, as your model will be
    -deployed with no authorization (and public access). Instead, it is better to deploy your model
    -as an internal service, and deploy an authentication proxy (such as https://github.com/oauth2-proxy/oauth2-proxy)
    -to proxy traffic to your internal service after authenticating.
    -
    -
    -
    -

  • -
-
-
-
- -
-
-_download_artifact(name, local_path, run_id=None)[source]
-

Download the artifact at the given run id (active default) + name to the local path

-
-
Parameters
-
    -
  • name – (str) artifact name to load (with respect to the run)

  • -
  • local_path – (str) local path to download the model to. This path MUST include the file extension

  • -
  • run_id – (str) the run id to download the artifact from. Defaults to active run

  • -
-
-
Returns
-

None

-
-
-
- -
-
-_end_run(status='FINISHED', save_html=True)[source]
-

End an active MLflow run (if there is one).

-
-
Example
-
import mlflow
-
-# Start run and get status
-mlflow.start_run()
-run = mlflow.active_run()
-print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
-
-# End run and get status
-mlflow.end_run()
-run = mlflow.get_run(run.info.run_id)
-print("run_id: {}; status: {}".format(run.info.run_id, run.info.status))
-print("--")
-
-# Check for any active runs
-print("Active run: {}".format(mlflow.active_run()))
-
-
-
-
-
Output
-
run_id: b47ee4563368419880b44ad8535f6371; status: RUNNING
-run_id: b47ee4563368419880b44ad8535f6371; status: FINISHED
---
-Active run: None
-
-
-
-
- -
-
-_fetch_logs(job_id: int)[source]
-

Get the logs as an array -:param job_id: the job to get the logs for

-
- -
-
-_get_current_run_data()[source]
-

Get the data associated with the current run. -As of MLFLow 1.6, it currently does not support getting run info from the mlflow.active_run object, so we need it -to be retrieved via the tracking client.

-
-
Returns
-

active run data object

-
-
-
- -
-
-_get_deployed_models() → pandas.core.frame.DataFrame[source]
-

Get the currently deployed models in the database -:return: Pandas df

-
- -
-
-_get_model_name(run_id)[source]
-

Gets the model name associated with a run or None

-
-
Parameters
-

run_id – (str) the run_id that the model is stored under

-
-
Returns
-

(str or None) The model name if it exists

-
-
-
- -
-
-_get_run_ids_by_name(run_name, experiment_id=None)[source]
-

Gets a run id from the run name. If there are multiple runs with the same name, all run IDs are returned

-
-
Parameters
-
    -
  • run_name – (str) The name of the run

  • -
  • experiment_id – (int) The experiment to search in. If None, all experiments are searched. [Default None]

  • -
-
-
Returns
-

(List[str]) List of run ids

-
-
-
- -
-
-_lm(key, value, step=None)[source]
-

Add a shortcut for logging metrics in MLFlow.

-
-
Parameters
-
    -
  • key – (str) key for the parameter

  • -
  • value – (str or int) value for the parameter

  • -
  • step – (int) A single integer step at which to log the specified Metrics. If unspecified, each metric is logged at step zero.

  • -
-
-
-
- -
-
-_load_model(run_id=None, name=None, as_pyfunc=False)[source]
-

Download and deserialize a serialized model

-
-
Parameters
-
-
-
-
- -
-
-_log_artifact(file_name, name=None, run_uuid=None)[source]
-

Log an artifact for the active run

-
-
Example
-
with mlflow.start_run():
-
-    mlflow.log_artifact('my_image.png')
-
-
-
-
Parameters
-
    -
  • file_name – (str) the name of the file name to log

  • -
  • name – (str) the name to store the artifact as. Defaults to the file name. If the name param includes the file -extension (or is not passed in) you will be able to preview it in the mlflow UI (image, text, html, geojson files).

  • -
  • run_uuid – (str) the run uuid of a previous run, if none, defaults to current run

  • -
-
-
Returns
-

None

-
-
NOTE
-

We do not currently support logging directories. If you would like to log a directory, please zip it first and log the zip file

-
-
-
- -
-
-_log_feature_transformations(unfit_pipeline)[source]
-

Log feature transformations for an unfit spark pipeline -Logs –> feature movement through the pipeline

-
-
Parameters
-

unfit_pipeline – (PipelineModel) unfit spark pipeline to log

-
-
Returns
-

None

-
-
-
- -
-
-_log_model(model, name='model', model_lib=None, **flavor_options)[source]
-

Log a trained machine learning model

-
-
Parameters
-
    -
  • model – (Model) is the trained Spark/SKlearn/H2O/Keras model -with the current run

  • -
  • name – (str) the run relative name to store the model under. [Deault ‘model’]

  • -
  • model_lib – An optional param specifying the model type of the model to log -Available options match the mlflow built-in model flavors https://www.mlflow.org/docs/1.8.0/models.html#built-in-model-flavors

  • -
  • flavor_options – (**kwargs) The full set of save options to pass into the save_model function. If this is passed, -model_class must also be provided and the keys of this dictionary must match the params of that functions signature -(ie mlflow.pyfunc.save_model). An example of pyfuncs signature is here, although each flavor has its own. -https://mlflow.org/docs/latest/python_api/mlflow.pyfunc.html#mlflow.pyfunc.save_model

  • -
-
-
-
- -
-
-_log_model_params(pipeline_or_model)[source]
-

Log the parameters of a fitted spark model or a model stage of a fitted spark pipeline

-
-
Parameters
-

pipeline_or_model – fitted spark pipeline/fitted spark model

-
-
-
- -
-
-_log_pipeline_stages(pipeline)[source]
-

Log the pipeline stages of a Spark Pipeline as params for the run

-
-
Parameters
-

pipeline – (PipelineModel) fitted/unitted pipeline

-
-
Returns
-

None

-
-
-
- -
-
-_login_director(username=None, password=None, jwt_token=None)[source]
-

Authenticate into the MLManager Director

-
-
Parameters
-
    -
  • username – (str) database username

  • -
  • password – (str) database password

  • -
  • jwt_token – (str) database JWT token authentication

  • -
-
-
-

Either (username/password) for basic auth or jwt_token must be provided. Basic authentication takes precedence if set (mlflow default)

-
- -
-
-_lp(key, value)[source]
-

Add a shortcut for logging parameters in MLFlow.

-
-
Parameters
-
    -
  • key – (str) key for the parameter

  • -
  • value – (str) value for the parameter

  • -
-
-
Returns
-

None

-
-
-
- -
-
-_mlflow_patch(name)[source]
-

Create a MLFlow Patch that applies the default gorilla settings

-
-
Parameters
-

name – destination name under mlflow package

-
-
Returns
-

decorator for patched function

-
-
-
- -
-
-_register_feature_store(fs: splicemachine.features.feature_store.FeatureStore)[source]
-

Register a feature store for feature tracking of experiments

-
-
Parameters
-

feature_store – (FeatureStore) The feature store

-
-
Returns
-

None

-
-
-
- -
-
-_register_splice_context(splice_context)[source]
-

Register a Splice Context for Spark/Database operations (artifact storage, for example)

-
-
Parameters
-

splice_context – (PySpliceContext) splice context to input

-
-
Returns
-

None

-
-
-
- -
-
-_remove_active_training_set()[source]
-

Removes the active training set from mlflow. This function deletes mlflows active training set (retrieved from -the feature store), which will in turn stop the automated logging of features to the active mlflow run. To recreate -an active training set, call fs.get_training_set or fs.get_training_set_from_view in the Feature Store.

-
- -
-
-_set_mlflow_uri(uri)[source]
-

Set the tracking uri for mlflow. Only needed if running outside of the Splice Machine K8s Cloud Service

-
-
Parameters
-

uri – (str) the URL of your mlflow UI.

-
-
Returns
-

None

-
-
-
- -
-
-_start_run(run_id=None, tags=None, experiment_id=None, run_name=None, nested=False)[source]
-

Start a new run

-
-
Example
-
mlflow.start_run(run_name='my_run')
-
-# or
-
-with mlflow.start_run(run_name='my_run'):
-    ...
-
-
-
-
Parameters
-
    -
  • tags – a dictionary containing metadata about the current run. For example: { ‘team’: ‘pd’, ‘purpose’: ‘r&d’ }

  • -
  • run_name – (str) an optional name for the run to show up in the MLFlow UI. [Default None]

  • -
  • run_id – (str) if you want to reincarnate an existing run, pass in the run id [Default None]

  • -
  • experiment_id – (int) if you would like to create an experiment/use one for this run [Default None]

  • -
  • nested – (bool) Controls whether run is nested in parent run. True creates a nest run [Default False]

  • -
-
-
Returns
-

(ActiveRun) the mlflow active run object

-
-
-
- -
-
-_timer(timer_name, param=False)[source]
-

Context manager for logging

-
-
Example
-
with mlflow.timer('my_timer'): 
-
-    ...
-
-
-
-
Parameters
-
    -
  • timer_name – (str) the name of the timer

  • -
  • param – (bool) whether or not to log the timer as a param (default=True). If false, logs as metric.

  • -
-
-
Returns
-

None

-
-
-
- -
-
-_undeploy_kubernetes(run_id: str)[source]
-

Removes a model deployment from Kubernetes. This will delete the Kubernetes deployment and record the event

-
-
Parameters
-

run_id – specified if overriding the active run

-
-
-
- -
-
-_watch_job(job_id: int)[source]
-

Stream the logs in real time to standard out -of a Job -:param job_id: the job id to watch (returned after executing an operation) -NOTE: If the job being watched fails, this function will throw a SpliceMachineException

-
- -
-
- - -
- - - - -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/splicemachine.notebook.html b/docs/_build/html/splicemachine.notebook.html deleted file mode 100644 index 4d1a590d..00000000 --- a/docs/_build/html/splicemachine.notebook.html +++ /dev/null @@ -1,284 +0,0 @@ - - - - - - - - splicemachine.notebook module — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -
-

splicemachine.notebook module

-

This module contains helper functions and tools for use inside of a Jupyter Notebook

-
-
-get_mlflow_ui(experiment_id=None, run_id=None)[source]
-

Display the MLFlow UI as an IFrame

-
-
Parameters
-
    -
  • experiment_id – (int or str) Optionally the experiment id to load into view

  • -
  • run_id – (str) Optionally the run_id to load into view. If you pass in a run_id you must pass an experiment_id

  • -
-
-
Returns
-

(IFrame) An IFrame of the MLFlow UI

-
-
-
- -
-
-get_spark_ui(port=None, spark_session=None)[source]
-

Display the Spark Jobs UI as an IFrame at a specific port

-
-
Parameters
-
    -
  • port – (int or str) The port of the desired spark session

  • -
  • spark_session – (SparkSession) Optionally the Spark Session associated with the desired UI

  • -
-
-
Returns
-

-
-
-
- -
-
-hide_toggle(toggle_next=False)[source]
-

Function to add a toggle at the bottom of Jupyter Notebook cells to allow the entire cell to be collapsed.

-
-
Parameters
-

toggle_next – Bool determine if the toggle should hide the current cell or the next cell

-
-
-
- -
-
-run_sql(sql)[source]
-

Runs a SQL statement over JDBC from the Splice Machine managed Jupyter notebook environment. If you are running -outside of the Splice Jupyter environment, you must have a sql kernel and magic set up and configured. -:param sql: The SQL to execute

-
- -
- - -
- - - - -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/splicemachine.spark.html b/docs/_build/html/splicemachine.spark.html deleted file mode 100644 index 7a3e4437..00000000 --- a/docs/_build/html/splicemachine.spark.html +++ /dev/null @@ -1,1908 +0,0 @@ - - - - - - - - splicemachine.spark package — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - - -
-
-
-
- -
- -
-

splicemachine.spark package

-
-

Submodules

-
-
-

splicemachine.spark.context module

-

This Module contains the classes for interacting with the Database via our NSDS. For installation instructions, please see the Getting Started guide. For use inside the K8s cluster, see PySpliceContext. For use outside of the K8s cluster, see ExtPySpliceContext

-

Copyright 2021 Splice Machine, Inc.

-

Licensed under the Apache License, Version 2.0 (the “License”); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at

-
-
-

Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an “AS IS” BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.

-
-
-class ExtPySpliceContext(sparkSession, JDBC_URL=None, kafkaServers='localhost:9092', kafkaPollTimeout=20000, _unit_testing=False)[source]
-

Bases: splicemachine.spark.context.PySpliceContext

-

This class implements a SplicemachineContext object from com.splicemachine.spark2 for use outside of the K8s Cloud Service

-
-
-analyzeSchema(schema_name)
-

Analyze the schema

-
-
Parameters
-

schema_name – (str) schema name which stats info will be collected

-
-
Returns
-

None

-
-
-
- -
-
-analyzeTable(schema_table_name, estimateStatistics=False, samplePercent=10.0)
-

Collect stats info on a table

-
-
Parameters
-
    -
  • schema_table_name – full table name in the format of ‘schema.table’

  • -
  • estimateStatistics – will use estimate statistics if True

  • -
  • samplePercent – the percentage or rows to be sampled.

  • -
-
-
Returns
-

None

-
-
-
- -
-
-autoCommitting()[source]
-

Check whether auto-commit is on.

-
-
Returns
-

(Boolean) True if auto-commit is on.

-
-
-
- -
-
-bulkImportHFile(dataframe, schema_table_name, options)
-

Bulk Import HFile from a dataframe into a schema.table

-
-
Parameters
-
    -
  • dataframe – (DataFrame)

  • -
  • schema_table_name – (str) Full table name in the format of “schema.table”

  • -
  • options – (Dict) Dictionary of options to be passed to –splice-properties; bulkImportDirectory is required

  • -
-
-
Returns
-

(int) Number of records imported

-
-
-
- -
-
-bulkImportHFileWithRdd(rdd, schema, schema_table_name, options)
-

Bulk Import HFile from an rdd into a schema.table

-
-
Parameters
-
    -
  • rdd – (RDD) Input data

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) Full table name in the format of “schema.table”

  • -
  • options – (Dict) Dictionary of options to be passed to –splice-properties; bulkImportDirectory is required

  • -
-
-
Returns
-

(int) Number of records imported

-
-
-
- -
-
-columnNamesCaseSensitive(caseSensitive)
-

Sets whether column names should be treated as case sensitive.

-
-
Parameters
-

caseSensitive – (boolean) True for case sensitive, False for not case sensitive

-
-
-
- -
-
-commit()[source]
-

Commit the transaction. Throws exception if auto-commit is on.

-
-
Returns
-

None

-
-
-
- -
-
-createAndInsertTable(dataframe, schema_table_name, primary_keys=None, create_table_options=None, to_upper=True)
-

Creates a schema.table (schema_table_name) from a dataframe and inserts the dataframe into the table

-
-
Parameters
-
    -
  • dataframe – The Spark DataFrame to base the table off

  • -
  • schema_table_name – str The schema.table to create

  • -
  • primary_keys – List[str] the primary keys. Default None

  • -
  • create_table_options – str The additional table-level SQL options default None

  • -
  • to_upper – bool If the dataframe columns should be converted to uppercase before table creation. If False, the table will be created with lower case columns. Default True

  • -
  • drop_table – bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception

  • -
-
-
Returns
-

None

-
-
-
- -
-
-createDataFrame(rdd, schema)
-

Creates a dataframe from a given rdd and schema.

-
-
Parameters
-
    -
  • rdd – (RDD) Input data

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
-
-
Returns
-

(DataFrame) The Spark DataFrame

-
-
-
- -
-
-createTable(dataframe, schema_table_name, primary_keys=None, create_table_options=None, to_upper=True, drop_table=False)
-

Creates a schema.table (schema_table_name) from a dataframe

-
-
Parameters
-
    -
  • dataframe – The Spark DataFrame to base the table off

  • -
  • schema_table_name – str The schema.table to create

  • -
  • primary_keys – List[str] the primary keys. Default None

  • -
  • create_table_options – str The additional table-level SQL options default None

  • -
  • to_upper – bool If the dataframe columns should be converted to uppercase before table creation. If False, the table will be created with lower case columns. Default True

  • -
  • drop_table – bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception

  • -
-
-
Returns
-

None

-
-
-
- -
-
-createTableWithSchema(schema_table_name, schema, keys=None, create_table_options=None)
-

Creates a schema.table from a schema

-
-
Parameters
-
    -
  • schema_table_name – str The schema.table to create

  • -
  • schema – (StructType) The schema that describes the columns of the table

  • -
  • keys – (List[str]) The primary keys. Default None

  • -
  • create_table_options – (str) The additional table-level SQL options. Default None

  • -
-
-
Returns
-

None

-
-
-
- -
-
-delete(dataframe, schema_table_name)
-

Delete records in a dataframe based on joining by primary keys from the data frame. -Be careful with column naming and case sensitivity.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to delete

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-deleteWithRdd(rdd, schema, schema_table_name)
-

Delete records using an rdd based on joining by primary keys from the rdd. -Be careful with column naming and case sensitivity.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD containing the primary keys you would like to delete from the table

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-df(sql, to_lower=False)
-

Return a Spark Dataframe from the results of a Splice Machine SQL Query

-
-
Example
-
df = splice.df('SELECT * FROM MYSCHEMA.TABLE1 WHERE COL2 > 3')
-
-
-
-
Parameters
-
    -
  • sql – (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3)

  • -
  • to_lower – Whether or not to convert column names from the dataframe to lowercase

  • -
-
-
Returns
-

(Dataframe) A Spark DataFrame containing the results

-
-
-
- -
-
-dropTable(schema_and_or_table_name, table_name=None)
-

Drop a specified table.

-
-
Example
-
splice.dropTable('schemaName.tableName') 
-
-# or
-
-splice.dropTable('schemaName', 'tableName')
-
-
-
-
Parameters
-
    -
  • schema_and_or_table_name – (str) Pass the schema name in this param when passing the table_name param, -or pass schemaName.tableName in this param without passing the table_name param

  • -
  • table_name – (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name

  • -
-
-
Returns
-

None

-
-
-
- -
-
-dropTableIfExists(schema_table_name, table_name=None)
-

Drops a table if exists

-
-
Example
-
splice.dropTableIfExists('schemaName.tableName') 
-
-# or
-
-splice.dropTableIfExists('schemaName', 'tableName')
-
-
-
-
Parameters
-
    -
  • schema_table_name – (str) Pass the schema name in this param when passing the table_name param, -or pass schemaName.tableName in this param without passing the table_name param

  • -
  • table_name – (optional) (str) Table Name, used when schema_table_name contains only the schema name

  • -
-
-
Returns
-

None

-
-
-
- -
-
-execute(query_string)
-

execute a query over JDBC

-
-
Example
-
splice.execute('DELETE FROM TABLE1 WHERE col2 > 3')
-
-
-
-
Parameters
-

query_string – (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3)

-
-
Returns
-

None

-
-
-
- -
-
-executeUpdate(query_string)
-

execute a dml query:(update,delete,drop,etc)

-
-
Example
-
splice.executeUpdate('DROP TABLE table1')
-
-
-
-
Parameters
-

query_string – (string) SQL Query (eg. DROP TABLE table1)

-
-
Returns
-

None

-
-
-
- -
-
-export(dataframe, location, compression=False, replicationCount=1, fileEncoding=None, fieldSeparator=None, quoteCharacter=None)
-

Export a dataFrame in CSV

-
-
Parameters
-
    -
  • dataframe – (DataFrame)

  • -
  • location – (str) Destination directory

  • -
  • compression – (bool) Whether to compress the output or not

  • -
  • replicationCount – (int) Replication used for HDFS write

  • -
  • fileEncoding – (str) fileEncoding or None, defaults to UTF-8

  • -
  • fieldSeparator – (str) fieldSeparator or None, defaults to ‘,’

  • -
  • quoteCharacter – (str) quoteCharacter or None, defaults to ‘”’

  • -
-
-
Returns
-

None

-
-
-
- -
-
-exportBinary(dataframe, location, compression, e_format='parquet')
-

Export a dataFrame in binary format

-
-
Parameters
-
    -
  • dataframe – (DataFrame)

  • -
  • location – (str) Destination directory

  • -
  • compression – (bool) Whether to compress the output or not

  • -
  • e_format – (str) Binary format to be used, currently only ‘parquet’ is supported. [Default ‘parquet’]

  • -
-
-
Returns
-

None

-
-
-
- -
-
-fileToTable(file_path, schema_table_name, primary_keys=None, drop_table=False, **pandas_args)
-

Load a file from the local filesystem or from a remote location and create a new table -(or recreate an existing table), and load the data from the file into the new table. Any file_path that can be -read by pandas should work here.

-
-
Parameters
-
    -
  • file_path – The local file to load

  • -
  • schema_table_name – The schema.table name

  • -
  • primary_keys – List[str] of primary keys for the table. Default None

  • -
  • drop_table – Whether or not to drop the table. If this is False and the table already exists, the -function will fail. Default False

  • -
  • pandas_args – Extra parameters to be passed into the pd.read_csv function. Any parameters accepted -in pd.read_csv will work here

  • -
-
-
Returns
-

None

-
-
-
- -
-
-getConnection()
-

Return a connection to the database

-
- -
-
-getSchema(schema_table_name)
-

Return the schema via JDBC.

-
-
Parameters
-

schema_table_name – (str) Table name

-
-
Returns
-

(StructType) PySpark StructType representation of the table

-
-
-
- -
-
-insert(dataframe, schema_table_name, to_upper=True, create_table=False)
-

Insert a dataframe into a table (schema.table).

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to insert

  • -
  • schema_table_name – (str) The table in which you would like to insert the DF

  • -
  • to_upper – (bool) If the dataframe columns should be converted to uppercase before table creation -If False, the table will be created with lower case columns. [Default True]

  • -
  • create_table – If the table does not exists at the time of the call, the table will first be created

  • -
-
-
Returns
-

None

-
-
-
- -
-
-insertRdd(rdd, schema, schema_table_name)
-

Insert an rdd into a table (schema.table)

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to insert

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to insert the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
-
-insertRddWithStatus(rdd, schema, schema_table_name, statusDirectory, badRecordsAllowed)
-

Insert an rdd into a table (schema.table) while tracking and limiting records that fail to insert. The status directory and number of badRecordsAllowed allow for duplicate primary keys to be written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written to the status directory.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to insert

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to insert the dataframe

  • -
  • statusDirectory – (str) The status directory where bad records file will be created

  • -
  • badRecordsAllowed – (int) The number of bad records are allowed. -1 for unlimited

  • -
-
-
Returns
-

None

-
-
-
- -
-
-insertWithStatus(dataframe, schema_table_name, statusDirectory, badRecordsAllowed)
-

Insert a dataframe into a table (schema.table) while tracking and limiting records that fail to insert. -The status directory and number of badRecordsAllowed allow for duplicate primary keys to be -written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written -to the status directory.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to insert

  • -
  • schema_table_name – (str) The table in which you would like to insert the dataframe

  • -
  • statusDirectory – (str) The status directory where bad records file will be created

  • -
  • badRecordsAllowed – (int) The number of bad records are allowed. -1 for unlimited

  • -
-
-
Returns
-

None

-
-
-
- -
-
-internalDf(query_string)
-

SQL to Dataframe translation (Lazy). Runs the query inside Splice Machine and sends the results to the Spark Adapter app

-
-
Parameters
-

query_string – (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3)

-
-
Returns
-

(DataFrame) pyspark dataframe contains the result of query_string

-
-
-
- -
-
-internalRdd(schema_table_name, column_projection=None)
-

Table with projections in Splice mapped to an RDD. -Runs the projection inside Splice Machine and sends the results to the Spark Adapter app as an rdd

-
-
Parameters
-
    -
  • schema_table_name – (str) Accessed table

  • -
  • column_projection – (list of strings) Names of selected columns

  • -
-
-
Returns
-

(RDD[Row]) the result of the projection

-
-
-
- -
-
-mergeInto(dataframe, schema_table_name)
-

Rows in the dataframe whose primary key is not in schemaTableName will be inserted into the table; -rows in the dataframe whose primary key is in schemaTableName will be used to update the table.

-

This implementation differs from upsert in a way that allows triggers to work.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to merge in

  • -
  • schema_table_name – (str) The table in which you would like to merge in the dataframe

  • -
-
-
Returns
-

None

-
-
-
- -
-
-mergeIntoWithRdd(rdd, schema, schema_table_name)
-

Rows in the rdd whose primary key is not in schemaTableName will be inserted into the table; -rows in the rdd whose primary key is in schemaTableName will be used to update the table.

-

This implementation differs from upsertWithRdd in a way that allows triggers to work.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to merge in

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to merge in the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
-
-pandasToSpark(pdf)
-

Convert a Pandas DF to Spark, and try to manage NANs from Pandas in case of failure. Spark cannot handle -Pandas NAN existing in String columns (as it considers it NaN Number ironically), so we replace the occurances -with a temporary value and then convert it back to null after it becomes a Spark DF

-
-
Parameters
-

pdf – The Pandas dataframe

-
-
Returns
-

The Spark DF

-
-
-
- -
-
-rdd(schema_table_name, column_projection=None)
-

Table with projections in Splice mapped to an RDD.

-
-
Parameters
-
    -
  • schema_table_name – (string) Accessed table

  • -
  • column_projection – (list of strings) Names of selected columns

  • -
-
-
Returns
-

(RDD[Row]) the result of the projection

-
-
-
- -
-
-releaseSavepoint(savepoint)[source]
-

Release the savepoint. Throws exception if auto-commit is on. -:param savepoint: (java.sql.Savepoint) A Savepoint.

-
-
Returns
-

None

-
-
-
- -
-
-replaceDataframeSchema(dataframe, schema_table_name)
-

Returns a dataframe with all column names replaced with the proper string case from the DB table

-
-
Parameters
-
    -
  • dataframe – (Dataframe) A dataframe with column names to convert

  • -
  • schema_table_name – (str) The schema.table with the correct column cases to pull from the database

  • -
-
-
Returns
-

(DataFrame) A Spark DataFrame with the replaced schema

-
-
-
- -
-
-rollback()[source]
-

Rollback the transaction. Throws exception if auto-commit is on.

-
-
Returns
-

None

-
-
-
- -
-
-rollbackToSavepoint(savepoint)[source]
-

Rollback to the savepoint. Throws exception if auto-commit is on. -:param savepoint: (java.sql.Savepoint) A Savepoint.

-
-
Returns
-

None

-
-
-
- -
-
-setAutoCommitOff()[source]
-

Turn auto-commit off.

-
-
Returns
-

None

-
-
-
- -
-
-setAutoCommitOn()[source]
-

Turn auto-commit on. Auto-commit is on by default when the class is instantiated.

-
-
Returns
-

None

-
-
-
- -
-
-setSavepoint()[source]
-

Create and set a unnamed savepoint at the current point in the transaction. Throws exception if auto-commit is on.

-
-
Returns
-

(java.sql.Savepoint) The unnamed Savepoint

-
-
-
- -
-
-setSavepointWithName(name)[source]
-

Create and set a named savepoint at the current point in the transaction. Throws exception if auto-commit is on. -:param name: (String) The name of the Savepoint.

-
-
Returns
-

(java.sql.Savepoint) The named Savepoint

-
-
-
- -
-
-splitAndInsert(dataframe, schema_table_name, sample_fraction)
-

Sample the dataframe, split the table, and insert a dataFrame into a schema.table. -This corresponds to an insert into from select statement

-
-
Parameters
-
    -
  • dataframe – (DataFrame) Input data

  • -
  • schema_table_name – (str) Full table name in the format of “schema.table”

  • -
  • sample_fraction – (float) A value between 0 and 1 that specifies the percentage of data in the dataFrame that should be sampled to determine the splits. For example, specify 0.005 if you want 0.5% of the data sampled.

  • -
-
-
Returns
-

None

-
-
-
- -
-
-tableExists(schema_and_or_table_name, table_name=None)
-

Check whether or not a table exists

-
-
Example
-
splice.tableExists('schemaName.tableName')
-
-# or
-
-splice.tableExists('schemaName', 'tableName')
-
-
-
-
Parameters
-
    -
  • schema_and_or_table_name – (str) Pass the schema name in this param when passing the table_name param, -or pass schemaName.tableName in this param without passing the table_name param

  • -
  • table_name – (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name

  • -
-
-
Returns
-

(bool) whether or not the table exists

-
-
-
- -
-
-toLower(dataframe)
-

Returns a dataframe with all of the columns in lowercase

-
-
Parameters
-

dataframe – (Dataframe) The dataframe to convert to lowercase

-
-
-
- -
-
-toUpper(dataframe)
-

Returns a dataframe with all of the columns in uppercase

-
-
Parameters
-

dataframe – (Dataframe) The dataframe to convert to uppercase

-
-
-
- -
-
-transactional()[source]
-

Check whether auto-commit is off.

-
-
Returns
-

(Boolean) True if auto-commit is off.

-
-
-
- -
-
-truncateTable(schema_table_name)
-

Truncate a table

-
-
Parameters
-

schema_table_name – (str) the full table name in the format “schema.table_name” which will be truncated

-
-
Returns
-

None

-
-
-
- -
-
-update(dataframe, schema_table_name)
-

Update data from a dataframe for a specified schema_table_name (schema.table). -The keys are required for the update and any other columns provided will be updated -in the rows.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to update

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-updateWithRdd(rdd, schema, schema_table_name)
-

Update data from an rdd for a specified schema_table_name (schema.table). -The keys are required for the update and any other columns provided will be updated -in the rows.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to use for updating the table

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-upsert(dataframe, schema_table_name)
-

Upsert the data from a dataframe into a table (schema.table). -If triggers fail when calling upsert, use the mergeInto function instead of upsert.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to upsert

  • -
  • schema_table_name – (str) The table in which you would like to upsert the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
-
-upsertWithRdd(rdd, schema, schema_table_name)
-

Upsert the data from an RDD into a table (schema.table). -If triggers fail when calling upsertWithRdd, use the mergeIntoWithRdd function instead of upsertWithRdd.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to upsert

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to upsert the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
- -
-
-class PySpliceContext(sparkSession, JDBC_URL=None, _unit_testing=False)[source]
-

Bases: object

-

This class implements a SpliceMachineContext object (similar to the SparkContext object)

-
-
-analyzeSchema(schema_name)[source]
-

Analyze the schema

-
-
Parameters
-

schema_name – (str) schema name which stats info will be collected

-
-
Returns
-

None

-
-
-
- -
-
-analyzeTable(schema_table_name, estimateStatistics=False, samplePercent=10.0)[source]
-

Collect stats info on a table

-
-
Parameters
-
    -
  • schema_table_name – full table name in the format of ‘schema.table’

  • -
  • estimateStatistics – will use estimate statistics if True

  • -
  • samplePercent – the percentage or rows to be sampled.

  • -
-
-
Returns
-

None

-
-
-
- -
-
-bulkImportHFile(dataframe, schema_table_name, options)[source]
-

Bulk Import HFile from a dataframe into a schema.table

-
-
Parameters
-
    -
  • dataframe – (DataFrame)

  • -
  • schema_table_name – (str) Full table name in the format of “schema.table”

  • -
  • options – (Dict) Dictionary of options to be passed to –splice-properties; bulkImportDirectory is required

  • -
-
-
Returns
-

(int) Number of records imported

-
-
-
- -
-
-bulkImportHFileWithRdd(rdd, schema, schema_table_name, options)[source]
-

Bulk Import HFile from an rdd into a schema.table

-
-
Parameters
-
    -
  • rdd – (RDD) Input data

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) Full table name in the format of “schema.table”

  • -
  • options – (Dict) Dictionary of options to be passed to –splice-properties; bulkImportDirectory is required

  • -
-
-
Returns
-

(int) Number of records imported

-
-
-
- -
-
-columnNamesCaseSensitive(caseSensitive)[source]
-

Sets whether column names should be treated as case sensitive.

-
-
Parameters
-

caseSensitive – (boolean) True for case sensitive, False for not case sensitive

-
-
-
- -
-
-createAndInsertTable(dataframe, schema_table_name, primary_keys=None, create_table_options=None, to_upper=True)[source]
-

Creates a schema.table (schema_table_name) from a dataframe and inserts the dataframe into the table

-
-
Parameters
-
    -
  • dataframe – The Spark DataFrame to base the table off

  • -
  • schema_table_name – str The schema.table to create

  • -
  • primary_keys – List[str] the primary keys. Default None

  • -
  • create_table_options – str The additional table-level SQL options default None

  • -
  • to_upper – bool If the dataframe columns should be converted to uppercase before table creation. If False, the table will be created with lower case columns. Default True

  • -
  • drop_table – bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception

  • -
-
-
Returns
-

None

-
-
-
- -
-
-createDataFrame(rdd, schema)[source]
-

Creates a dataframe from a given rdd and schema.

-
-
Parameters
-
    -
  • rdd – (RDD) Input data

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
-
-
Returns
-

(DataFrame) The Spark DataFrame

-
-
-
- -
-
-createTable(dataframe, schema_table_name, primary_keys=None, create_table_options=None, to_upper=True, drop_table=False)[source]
-

Creates a schema.table (schema_table_name) from a dataframe

-
-
Parameters
-
    -
  • dataframe – The Spark DataFrame to base the table off

  • -
  • schema_table_name – str The schema.table to create

  • -
  • primary_keys – List[str] the primary keys. Default None

  • -
  • create_table_options – str The additional table-level SQL options default None

  • -
  • to_upper – bool If the dataframe columns should be converted to uppercase before table creation. If False, the table will be created with lower case columns. Default True

  • -
  • drop_table – bool whether to drop the table if it exists. Default False. If False and the table exists, the function will throw an exception

  • -
-
-
Returns
-

None

-
-
-
- -
-
-createTableWithSchema(schema_table_name, schema, keys=None, create_table_options=None)[source]
-

Creates a schema.table from a schema

-
-
Parameters
-
    -
  • schema_table_name – str The schema.table to create

  • -
  • schema – (StructType) The schema that describes the columns of the table

  • -
  • keys – (List[str]) The primary keys. Default None

  • -
  • create_table_options – (str) The additional table-level SQL options. Default None

  • -
-
-
Returns
-

None

-
-
-
- -
-
-delete(dataframe, schema_table_name)[source]
-

Delete records in a dataframe based on joining by primary keys from the data frame. -Be careful with column naming and case sensitivity.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to delete

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-deleteWithRdd(rdd, schema, schema_table_name)[source]
-

Delete records using an rdd based on joining by primary keys from the rdd. -Be careful with column naming and case sensitivity.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD containing the primary keys you would like to delete from the table

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-df(sql, to_lower=False)[source]
-

Return a Spark Dataframe from the results of a Splice Machine SQL Query

-
-
Example
-
df = splice.df('SELECT * FROM MYSCHEMA.TABLE1 WHERE COL2 > 3')
-
-
-
-
Parameters
-
    -
  • sql – (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3)

  • -
  • to_lower – Whether or not to convert column names from the dataframe to lowercase

  • -
-
-
Returns
-

(Dataframe) A Spark DataFrame containing the results

-
-
-
- -
-
-dropTable(schema_and_or_table_name, table_name=None)[source]
-

Drop a specified table.

-
-
Example
-
splice.dropTable('schemaName.tableName') 
-
-# or
-
-splice.dropTable('schemaName', 'tableName')
-
-
-
-
Parameters
-
    -
  • schema_and_or_table_name – (str) Pass the schema name in this param when passing the table_name param, -or pass schemaName.tableName in this param without passing the table_name param

  • -
  • table_name – (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name

  • -
-
-
Returns
-

None

-
-
-
- -
-
-dropTableIfExists(schema_table_name, table_name=None)[source]
-

Drops a table if exists

-
-
Example
-
splice.dropTableIfExists('schemaName.tableName') 
-
-# or
-
-splice.dropTableIfExists('schemaName', 'tableName')
-
-
-
-
Parameters
-
    -
  • schema_table_name – (str) Pass the schema name in this param when passing the table_name param, -or pass schemaName.tableName in this param without passing the table_name param

  • -
  • table_name – (optional) (str) Table Name, used when schema_table_name contains only the schema name

  • -
-
-
Returns
-

None

-
-
-
- -
-
-execute(query_string)[source]
-

execute a query over JDBC

-
-
Example
-
splice.execute('DELETE FROM TABLE1 WHERE col2 > 3')
-
-
-
-
Parameters
-

query_string – (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3)

-
-
Returns
-

None

-
-
-
- -
-
-executeUpdate(query_string)[source]
-

execute a dml query:(update,delete,drop,etc)

-
-
Example
-
splice.executeUpdate('DROP TABLE table1')
-
-
-
-
Parameters
-

query_string – (string) SQL Query (eg. DROP TABLE table1)

-
-
Returns
-

None

-
-
-
- -
-
-export(dataframe, location, compression=False, replicationCount=1, fileEncoding=None, fieldSeparator=None, quoteCharacter=None)[source]
-

Export a dataFrame in CSV

-
-
Parameters
-
    -
  • dataframe – (DataFrame)

  • -
  • location – (str) Destination directory

  • -
  • compression – (bool) Whether to compress the output or not

  • -
  • replicationCount – (int) Replication used for HDFS write

  • -
  • fileEncoding – (str) fileEncoding or None, defaults to UTF-8

  • -
  • fieldSeparator – (str) fieldSeparator or None, defaults to ‘,’

  • -
  • quoteCharacter – (str) quoteCharacter or None, defaults to ‘”’

  • -
-
-
Returns
-

None

-
-
-
- -
-
-exportBinary(dataframe, location, compression, e_format='parquet')[source]
-

Export a dataFrame in binary format

-
-
Parameters
-
    -
  • dataframe – (DataFrame)

  • -
  • location – (str) Destination directory

  • -
  • compression – (bool) Whether to compress the output or not

  • -
  • e_format – (str) Binary format to be used, currently only ‘parquet’ is supported. [Default ‘parquet’]

  • -
-
-
Returns
-

None

-
-
-
- -
-
-fileToTable(file_path, schema_table_name, primary_keys=None, drop_table=False, **pandas_args)[source]
-

Load a file from the local filesystem or from a remote location and create a new table -(or recreate an existing table), and load the data from the file into the new table. Any file_path that can be -read by pandas should work here.

-
-
Parameters
-
    -
  • file_path – The local file to load

  • -
  • schema_table_name – The schema.table name

  • -
  • primary_keys – List[str] of primary keys for the table. Default None

  • -
  • drop_table – Whether or not to drop the table. If this is False and the table already exists, the -function will fail. Default False

  • -
  • pandas_args – Extra parameters to be passed into the pd.read_csv function. Any parameters accepted -in pd.read_csv will work here

  • -
-
-
Returns
-

None

-
-
-
- -
-
-getConnection()[source]
-

Return a connection to the database

-
- -
-
-getSchema(schema_table_name)[source]
-

Return the schema via JDBC.

-
-
Parameters
-

schema_table_name – (str) Table name

-
-
Returns
-

(StructType) PySpark StructType representation of the table

-
-
-
- -
-
-insert(dataframe, schema_table_name, to_upper=True, create_table=False)[source]
-

Insert a dataframe into a table (schema.table).

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to insert

  • -
  • schema_table_name – (str) The table in which you would like to insert the DF

  • -
  • to_upper – (bool) If the dataframe columns should be converted to uppercase before table creation -If False, the table will be created with lower case columns. [Default True]

  • -
  • create_table – If the table does not exists at the time of the call, the table will first be created

  • -
-
-
Returns
-

None

-
-
-
- -
-
-insertRdd(rdd, schema, schema_table_name)[source]
-

Insert an rdd into a table (schema.table)

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to insert

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to insert the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
-
-insertRddWithStatus(rdd, schema, schema_table_name, statusDirectory, badRecordsAllowed)[source]
-

Insert an rdd into a table (schema.table) while tracking and limiting records that fail to insert. The status directory and number of badRecordsAllowed allow for duplicate primary keys to be written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written to the status directory.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to insert

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to insert the dataframe

  • -
  • statusDirectory – (str) The status directory where bad records file will be created

  • -
  • badRecordsAllowed – (int) The number of bad records are allowed. -1 for unlimited

  • -
-
-
Returns
-

None

-
-
-
- -
-
-insertWithStatus(dataframe, schema_table_name, statusDirectory, badRecordsAllowed)[source]
-

Insert a dataframe into a table (schema.table) while tracking and limiting records that fail to insert. -The status directory and number of badRecordsAllowed allow for duplicate primary keys to be -written to a bad records file. If badRecordsAllowed is set to -1, all bad records will be written -to the status directory.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to insert

  • -
  • schema_table_name – (str) The table in which you would like to insert the dataframe

  • -
  • statusDirectory – (str) The status directory where bad records file will be created

  • -
  • badRecordsAllowed – (int) The number of bad records are allowed. -1 for unlimited

  • -
-
-
Returns
-

None

-
-
-
- -
-
-internalDf(query_string)[source]
-

SQL to Dataframe translation (Lazy). Runs the query inside Splice Machine and sends the results to the Spark Adapter app

-
-
Parameters
-

query_string – (str) SQL Query (eg. SELECT * FROM table1 WHERE col2 > 3)

-
-
Returns
-

(DataFrame) pyspark dataframe contains the result of query_string

-
-
-
- -
-
-internalRdd(schema_table_name, column_projection=None)[source]
-

Table with projections in Splice mapped to an RDD. -Runs the projection inside Splice Machine and sends the results to the Spark Adapter app as an rdd

-
-
Parameters
-
    -
  • schema_table_name – (str) Accessed table

  • -
  • column_projection – (list of strings) Names of selected columns

  • -
-
-
Returns
-

(RDD[Row]) the result of the projection

-
-
-
- -
-
-mergeInto(dataframe, schema_table_name)[source]
-

Rows in the dataframe whose primary key is not in schemaTableName will be inserted into the table; -rows in the dataframe whose primary key is in schemaTableName will be used to update the table.

-

This implementation differs from upsert in a way that allows triggers to work.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to merge in

  • -
  • schema_table_name – (str) The table in which you would like to merge in the dataframe

  • -
-
-
Returns
-

None

-
-
-
- -
-
-mergeIntoWithRdd(rdd, schema, schema_table_name)[source]
-

Rows in the rdd whose primary key is not in schemaTableName will be inserted into the table; -rows in the rdd whose primary key is in schemaTableName will be used to update the table.

-

This implementation differs from upsertWithRdd in a way that allows triggers to work.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to merge in

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to merge in the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
-
-pandasToSpark(pdf)[source]
-

Convert a Pandas DF to Spark, and try to manage NANs from Pandas in case of failure. Spark cannot handle -Pandas NAN existing in String columns (as it considers it NaN Number ironically), so we replace the occurances -with a temporary value and then convert it back to null after it becomes a Spark DF

-
-
Parameters
-

pdf – The Pandas dataframe

-
-
Returns
-

The Spark DF

-
-
-
- -
-
-rdd(schema_table_name, column_projection=None)[source]
-

Table with projections in Splice mapped to an RDD.

-
-
Parameters
-
    -
  • schema_table_name – (string) Accessed table

  • -
  • column_projection – (list of strings) Names of selected columns

  • -
-
-
Returns
-

(RDD[Row]) the result of the projection

-
-
-
- -
-
-replaceDataframeSchema(dataframe, schema_table_name)[source]
-

Returns a dataframe with all column names replaced with the proper string case from the DB table

-
-
Parameters
-
    -
  • dataframe – (Dataframe) A dataframe with column names to convert

  • -
  • schema_table_name – (str) The schema.table with the correct column cases to pull from the database

  • -
-
-
Returns
-

(DataFrame) A Spark DataFrame with the replaced schema

-
-
-
- -
-
-splitAndInsert(dataframe, schema_table_name, sample_fraction)[source]
-

Sample the dataframe, split the table, and insert a dataFrame into a schema.table. -This corresponds to an insert into from select statement

-
-
Parameters
-
    -
  • dataframe – (DataFrame) Input data

  • -
  • schema_table_name – (str) Full table name in the format of “schema.table”

  • -
  • sample_fraction – (float) A value between 0 and 1 that specifies the percentage of data in the dataFrame that should be sampled to determine the splits. For example, specify 0.005 if you want 0.5% of the data sampled.

  • -
-
-
Returns
-

None

-
-
-
- -
-
-tableExists(schema_and_or_table_name, table_name=None)[source]
-

Check whether or not a table exists

-
-
Example
-
splice.tableExists('schemaName.tableName')
-
-# or
-
-splice.tableExists('schemaName', 'tableName')
-
-
-
-
Parameters
-
    -
  • schema_and_or_table_name – (str) Pass the schema name in this param when passing the table_name param, -or pass schemaName.tableName in this param without passing the table_name param

  • -
  • table_name – (optional) (str) Table Name, used when schema_and_or_table_name contains only the schema name

  • -
-
-
Returns
-

(bool) whether or not the table exists

-
-
-
- -
-
-toLower(dataframe)[source]
-

Returns a dataframe with all of the columns in lowercase

-
-
Parameters
-

dataframe – (Dataframe) The dataframe to convert to lowercase

-
-
-
- -
-
-toUpper(dataframe)[source]
-

Returns a dataframe with all of the columns in uppercase

-
-
Parameters
-

dataframe – (Dataframe) The dataframe to convert to uppercase

-
-
-
- -
-
-truncateTable(schema_table_name)[source]
-

Truncate a table

-
-
Parameters
-

schema_table_name – (str) the full table name in the format “schema.table_name” which will be truncated

-
-
Returns
-

None

-
-
-
- -
-
-update(dataframe, schema_table_name)[source]
-

Update data from a dataframe for a specified schema_table_name (schema.table). -The keys are required for the update and any other columns provided will be updated -in the rows.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to update

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-updateWithRdd(rdd, schema, schema_table_name)[source]
-

Update data from an rdd for a specified schema_table_name (schema.table). -The keys are required for the update and any other columns provided will be updated -in the rows.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to use for updating the table

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) Splice Machine Table

  • -
-
-
Returns
-

None

-
-
-
- -
-
-upsert(dataframe, schema_table_name)[source]
-

Upsert the data from a dataframe into a table (schema.table). -If triggers fail when calling upsert, use the mergeInto function instead of upsert.

-
-
Parameters
-
    -
  • dataframe – (Dataframe) The dataframe you would like to upsert

  • -
  • schema_table_name – (str) The table in which you would like to upsert the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
-
-upsertWithRdd(rdd, schema, schema_table_name)[source]
-

Upsert the data from an RDD into a table (schema.table). -If triggers fail when calling upsertWithRdd, use the mergeIntoWithRdd function instead of upsertWithRdd.

-
-
Parameters
-
    -
  • rdd – (RDD) The RDD you would like to upsert

  • -
  • schema – (StructType) The schema of the rows in the RDD

  • -
  • schema_table_name – (str) The table in which you would like to upsert the RDD

  • -
-
-
Returns
-

None

-
-
-
- -
- -
-
-

Module contents

-
-
- - -
- - - - -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/_build/html/splicemachine.stats.html b/docs/_build/html/splicemachine.stats.html deleted file mode 100644 index 0f18df9c..00000000 --- a/docs/_build/html/splicemachine.stats.html +++ /dev/null @@ -1,841 +0,0 @@ - - - - - - - - splicemachine.stats module — Splice MLManager documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
- - - - - - - - -
- -
-
- -
- - - - - - - - - - - - - - -
- - -
- -
-
-
-
-
- -
- -
-

splicemachine.stats module

-

This module contains statistical functions to help with Machine Learning and data analysis.

-
-
-class DecisionTreeVisualizer[source]
-

Visualize a decision tree, either in code like format, or graphviz

-
-
-static add_node(dot, parent, node_hash, root, realroot=False)[source]
-

Traverse through the .debugString json and generate a graphviz tree

-
-
Parameters
-
    -
  • dot – dot file object

  • -
  • parent – not used currently

  • -
  • node_hash – unique node id

  • -
  • root – the root of tree

  • -
  • realroot – whether or not it is the real root, or a recursive root

  • -
-
-
Returns
-

-
-
-
- -
-
-static feature_importance(spark, model, dataset, featuresCol='features')[source]
-

Return a dataframe containing the relative importance of each feature

-
-
Parameters
-
    -
  • model – The Spark Machine Learning model

  • -
  • dataframe – Spark Dataframe

  • -
  • featureCol – (str) the column containing the feature vector

  • -
-
-
Returns
-

dataframe containing importance

-
-
-
- -
-
-static parse(lines)[source]
-

Lines in debug string

-
-
Parameters
-

lines

-
-
Returns
-

block json

-
-
-
- -
-
-static replacer(string, bad, good)[source]
-

Replace every string in “bad” with the corresponding string in “good”

-
-
Parameters
-
    -
  • string – string to replace in

  • -
  • bad – array of strings to replace

  • -
  • good – array of strings to replace with

  • -
-
-
Returns
-

-
-
-
- -
-
-static tree_json(tree)[source]
-

Generate a JSON representation of a decision tree

-
-
Parameters
-

tree – tree debug string

-
-
Returns
-

json

-
-
-
- -
-
-static visualize(model, feature_column_names, label_names, size=None, horizontal=False, tree_name='tree', visual=False)[source]
-

Visualize a decision tree, either in a code like format, or graphviz

-
-
Parameters
-
    -
  • model – the fitted decision tree classifier

  • -
  • feature_column_names – (List[str]) column names for features -You can access these feature names by using your VectorAssembler (in PySpark) and calling it’s .getInputCols() function

  • -
  • label_names – (List[str]) labels vector (below avg, above avg)

  • -
  • size – tuple(int,int) The size of the graph. If unspecified, graphviz will automatically assign a size

  • -
  • horizontal – (Bool) if the tree should be rendered horizontally

  • -
  • tree_name – the name you would like to call the tree

  • -
  • visual – bool, true if you want a graphviz pdf containing your file

  • -
-
-
Return dot
-

The graphvis object

-
-
-
- -
- -
-
-class IndReconstructer(*args, **kwargs)[source]
-

Transformer to reconstruct String Index from OneHotDummy Columns. This can be used as a part of a Pipeline Ojbect

-

Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers

-
-
Parameters
-
    -
  • Transformer – Inherited Class

  • -
  • HasInputCol – Inherited Class

  • -
  • HasOutputCol – Inherited Class

  • -
-
-
Returns
-

Transformed PySpark Dataframe With Original String Indexed Variables

-
-
-
- -
-
-class OneHotDummies(*args, **kwargs)[source]
-

Transformer to generate dummy columns for categorical variables as a part of a preprocessing pipeline

-

Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers

-
-
Parameters
-
    -
  • Transformer – Inherited Classes

  • -
  • HasInputCol – Inherited Classes

  • -
  • HasOutputCol – Inherited Classes

  • -
-
-
Returns
-

pyspark DataFrame

-
-
-
- -
-
-class OverSampleCrossValidator(estimator, estimatorParamMaps, evaluator, numFolds=3, seed=None, parallelism=3, collectSubModels=False, labelCol='label', altEvaluators=None, overSample=True)[source]
-

Class to perform Cross Validation model evaluation while over-sampling minority labels.

-
-
Example
-
>>> from pyspark.sql.session import SparkSession
->>> from pyspark.stats.classification import LogisticRegression
->>> from pyspark.stats.evaluation import BinaryClassificationEvaluator,
-
- MulticlassClassificationEvaluator
->>> from pyspark.stats.linalg import Vectors
->>> from splicemachine.stats.stats import OverSampleCrossValidator
->>> spark = SparkSession.builder.getOrCreate()
->>> dataset = spark.createDataFrame(
-...      [(Vectors.dense([0.0]), 0.0),
-...       (Vectors.dense([0.5]), 0.0),
-...       (Vectors.dense([0.4]), 1.0),
-...       (Vectors.dense([0.6]), 1.0),
-...       (Vectors.dense([1.0]), 1.0)] * 10,
-...      ["features", "label"])
->>> lr = LogisticRegression()
->>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
->>> PRevaluator = BinaryClassificationEvaluator(metricName = 'areaUnderPR')
->>> AUCevaluator = BinaryClassificationEvaluator(metricName = 'areaUnderROC')
->>> ACCevaluator = MulticlassClassificationEvaluator(metricName="accuracy")
->>> cv = OverSampleCrossValidator(estimator=lr, estimatorParamMaps=grid,
-        evaluator=AUCevaluator, altEvaluators = [PRevaluator, ACCevaluator],
-        parallelism=2,seed = 1234)
->>> cvModel = cv.fit(dataset)
->>> print(cvModel.avgMetrics)
-[(0.5, [0.5888888888888888, 0.3888888888888889]), (0.806878306878307,
-    [0.8556863149300125, 0.7055555555555556])]
->>> print(AUCevaluator.evaluate(cvModel.transform(dataset)))
-0.8333333333333333
-
-
-
-
-
- -
-
-class OverSampler(*args, **kwargs)[source]
-

Transformer to oversample datapoints with minority labels

-

Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers

-
-
Parameters
-
    -
  • Transformer – Inherited Class

  • -
  • HasInputCol – Inherited Class

  • -
  • HasOutputCol – Inherited Class

  • -
-
-
Returns
-

PySpark Dataframe with labels in approximately equal ratios

-
-
Example
-
>>> from pyspark.sql import functions as F
->>> from pyspark.sql.session import SparkSession
->>> from pyspark.stats.linalg import Vectors
->>> from splicemachine.stats.stats import OverSampler
->>> spark = SparkSession.builder.getOrCreate()
->>> df = spark.createDataFrame(
-...      [(Vectors.dense([0.0]), 0.0),
-...       (Vectors.dense([0.5]), 0.0),
-...       (Vectors.dense([0.4]), 1.0),
-...       (Vectors.dense([0.6]), 1.0),
-...       (Vectors.dense([1.0]), 1.0)] * 10,
-...      ["features", "Class"])
->>> df.groupBy(F.col("Class")).count().orderBy("count").show()
-+-----+-----+
-|Class|count|
-+-----+-----+
-|  0.0|   20|
-|  1.0|   30|
-+-----+-----+
->>> oversampler = OverSampler(labelCol = "Class", strategy = "auto")
->>> oversampler.transform(df).groupBy("Class").count().show()
-+-----+-----+
-|Class|count|
-+-----+-----+
-|  0.0|   29|
-|  1.0|   30|
-+-----+-----+
-
-
-
-
-
- -
-
-class Rounder(*args, **kwargs)[source]
-

Transformer to round predictions for ordinal regression -Follows: https://spark.apache.org/docs/latest/ml-pipeline.html#transformers

-
-
Parameters
-
    -
  • Transformer – Inherited Class

  • -
  • HasInputCol – Inherited Class

  • -
  • HasOutputCol – Inherited Class

  • -
-
-
Returns
-

Transformed Dataframe with rounded predictionCol

-
-
Example
-
>>> from pyspark.sql.session import SparkSession
->>> from splicemachine.stats.stats import Rounder
->>> spark = SparkSession.builder.getOrCreate()
->>> dataset = spark.createDataFrame(
-...      [(0.2, 0.0),
-...       (1.2, 1.0),
-...       (1.6, 2.0),
-...       (1.1, 0.0),
-...       (3.1, 0.0)],
-...      ["prediction", "label"])
->>> dataset.show()
-+----------+-----+
-|prediction|label|
-+----------+-----+
-|       0.2|  0.0|
-|       1.2|  1.0|
-|       1.6|  2.0|
-|       1.1|  0.0|
-|       3.1|  0.0|
-+----------+-----+
->>> rounder = Rounder(predictionCol = "prediction", labelCol = "label",
-    clipPreds = True)
->>> rounder.transform(dataset).show()
-+----------+-----+
-|prediction|label|
-+----------+-----+
-|       0.0|  0.0|
-|       1.0|  1.0|
-|       2.0|  2.0|
-|       1.0|  0.0|
-|       2.0|  0.0|
-+----------+-----+
->>> rounderNoClip = Rounder(predictionCol = "prediction", labelCol = "label",
-    clipPreds = False)
->>> rounderNoClip.transform(dataset).show()
-+----------+-----+
-|prediction|label|
-+----------+-----+
-|       0.0|  0.0|
-|       1.0|  1.0|
-|       2.0|  2.0|
-|       1.0|  0.0|
-|       3.0|  0.0|
-+----------+-----+
-
-
-
-
-
- -
-
-class SpliceBaseEvaluator(spark, evaluator, supported_metrics, predictionCol='prediction', labelCol='label')[source]
-

Base ModelEvaluator

-
-
-get_results(as_dict=False)[source]
-

Get Results

-
-
Parameters
-

dict – whether to get results in a dict or not

-
-
Returns
-

dictionary

-
-
-
- -
-
-input(predictions_dataframe)[source]
-

Input a dataframe

-
-
Parameters
-
    -
  • ev – evaluator class

  • -
  • predictions_dataframe – input df

  • -
-
-
Returns
-

none

-
-
-
- -
- -
-
-class SpliceBinaryClassificationEvaluator(spark, predictionCol='prediction', labelCol='label', confusion_matrix=True)[source]
-

A Splice Machine evaluator for Spark Binary Classification models. Implements functions from SpliceBaseEvaluator.

-
-
-input(predictions_dataframe)[source]
-

Evaluate actual vs Predicted in a dataframe

-
-
Parameters
-

predictions_dataframe – the dataframe containing the label and the predicition

-
-
-
- -
-
-plotROC(fittedEstimator, ax)[source]
-

Plots the receiver operating characteristic curve for the trained classifier

-
-
Parameters
-
    -
  • fittedEstimator – fitted logistic regression model

  • -
  • ax – matplotlib axis object

  • -
-
-
Returns
-

axis with ROC plot

-
-
-
- -
- -
-
-class SpliceMultiClassificationEvaluator(spark, predictionCol='prediction', labelCol='label')[source]
-

A Splice Machine evaluator for Spark MultiClass models. Implements functions from SpliceBaseEvaluator.

-
- -
-
-class SpliceRegressionEvaluator(spark, predictionCol='prediction', labelCol='label')[source]
-

A Splice Machine evaluator for Spark Regression models. Implements functions from SpliceBaseEvaluator.

-
- -
-
-best_fit_distribution(data, col_name, bins, ax)[source]
-

Model data by finding best fit distribution to data

-
-
Parameters
-
    -
  • data – DataFrame with one column containing the feature whose distribution is to be investigated

  • -
  • col_name – column name for feature

  • -
  • bins – number of bins to use in generating the histogram of this data

  • -
  • ax – axis to plot histogram on

  • -
-
-
Returns
-

(best_distribution.name, best_params, best_sse) -best_distribution.name: string of the best distribution name -best_params: parameters for this distribution -best_sse: sum of squared errors for this distribution against the empirical pdf

-
-
-
- -
-
-estimateCovariance(df, features_col='features')[source]
-

Compute the covariance matrix for a given dataframe.

-
-
Parameters
-
    -
  • df – PySpark dataframe

  • -
  • features_col – name of the column with the features, defaults to ‘features’

  • -
-
-
Returns
-

np.ndarray: A multi-dimensional array where the number of rows and columns both equal the length of the arrays in the input dataframe.

-
-
Note
-

The multi-dimensional covariance array should be calculated using outer products. Don’t forget to normalize the data by first subtracting the mean.

-
-
-
- -
-
-get_confusion_matrix(spark, TP, TN, FP, FN)[source]
-

Creates and returns a confusion matrix

-
-
Parameters
-
    -
  • TP – True Positives

  • -
  • TN – True Negatives

  • -
  • FP – False Positives

  • -
  • FN – False Negatives

  • -
-
-
Returns
-

Spark DataFrame

-
-
-
- -
-
-get_string_pipeline(df, cols_to_exclude, steps=['StringIndexer', 'OneHotEncoder', 'OneHotDummies'])[source]
-

Generates a list of preprocessing stages

-
-
Parameters
-
    -
  • df – DataFrame including only the training data

  • -
  • cols_to_exclude – Column names we don’t want to to include in the preprocessing (i.e. SUBJECT/ target column)

  • -
  • stages – preprocessing steps to take

  • -
-
-
Returns
-

(stages, Numeric_Columns) -stages: list of pipeline stages to be used in preprocessing -Numeric_Columns: list of columns that contain numeric features

-
-
-
- -
-
-inspectTable(spliceMLCtx, sql, topN=5)[source]
-

Inspect the values of the columns of the table (dataframe) returned from the sql query

-
-
Parameters
-
    -
  • spliceMLCtx – SpliceMLContext

  • -
  • sql – sql string to execute

  • -
  • topN – the number of most frequent elements of a column to return, defaults to 5

  • -
-
-
-
- -
-
-make_pdf(dist, params, size=10000)[source]
-

Generate distributions’s Probability Distribution Function

-
-
Parameters
-
-
-
Returns
-

series of probability density function for this distribution

-
-
-
- -
-
-pca_with_scores(df, k=10)[source]
-

Computes the top k principal components, corresponding scores, and all eigenvalues.

-
-
Parameters
-
    -
  • df – A Spark dataframe with a ‘features’ column, which (column) consists of DenseVectors.

  • -
  • k – The number of principal components to return., defaults to 10

  • -
-
-
Returns
-

(eigenvectors, RDD of scores, eigenvalues)

-
    -
  • Eigenvectors: multi-dimensional array where the number of rows equals the length of the arrays in the input RDD and the number of columns equals`k`.

  • -
  • RDD of scores: has the same number of rows as data and consists of arrays of length k.

  • -
  • Eigenvalues is an array of length d (the number of features).

  • -
-

-
-
Note
-

All eigenvalues should be returned in sorted order (largest to smallest). eigh returns -each eigenvectors as a column. This function should also return eigenvectors as columns.

-
-
-
- -
-
-postprocessing_pipeline(df, cols_to_exclude)[source]
-

Assemble postprocessing pipeline to reconstruct original categorical indexed values from OneHotDummy Columns

-
-
Parameters
-
    -
  • df – DataFrame Including the original string Columns

  • -
  • cols_to_exclude – list of columns to exclude

  • -
-
-
Returns
-

(reconstructers, String_Columns) -reconstructers: list of IndReconstructer stages -String_Columns: list of columns that are being reconstructed

-
-
-
- -
-
-reconstructPCA(sql, df, pc, mean, std, originalColumns, fits, pcaColumn='pcaFeatures')[source]
-

Reconstruct data from lower dimensional space after performing PCA

-
-
Parameters
-
    -
  • sql – SQLContext

  • -
  • df – PySpark DataFrame: inputted PySpark DataFrame

  • -
  • pc – numpy.ndarray: principal components projected onto

  • -
  • mean – numpy.ndarray: mean of original columns

  • -
  • std – numpy.ndarray: standard deviation of original columns

  • -
  • originalColumns – list: original column names

  • -
  • fits – fits of features returned from best_fit_distribution

  • -
  • pcaColumn – column in df that contains PCA features, defaults to ‘pcaFeatures’

  • -
-
-
Returns
-

dataframe containing reconstructed data

-
-
-
- -
-
-varianceExplained(df, k=10)[source]
-

Returns the proportion of variance explained by k principal componenets. Calls the above PCA procedure

-
-
Parameters
-
    -
  • df – PySpark DataFrame

  • -
  • k – number of principal components , defaults to 10

  • -
-
-
Returns
-

(proportion, principal_components, scores, eigenvalues)

-
-
-
- -
-
-vector_assembler_pipeline(df, columns, doPCA=False, k=10)[source]
-

After preprocessing String Columns, this function can be used to assemble a feature vector to be used for learning -creates the following stages: VectorAssembler -> Standard Scalar [{ -> PCA}]

-
-
Parameters
-
    -
  • df – DataFrame containing preprocessed Columns

  • -
  • columns – list of Column names of the preprocessed columns

  • -
  • doPCA – Do you want to do PCA as part of the vector assembler? defaults to False

  • -
  • k – Number of Principal Components to use, defaults to 10

  • -
-
-
Returns
-

List of vector assembling stages

-
-
-
- -
- - -
- - - - -
-
-
-
-

- - By Ben Epstein
- - © Copyright 2020, Splice Machine.
-

-
-
-
- - -
-
- - - - - - \ No newline at end of file diff --git a/docs/spark.rst b/docs/spark.rst deleted file mode 100644 index 1067b5ba..00000000 --- a/docs/spark.rst +++ /dev/null @@ -1,38 +0,0 @@ -spark package -============= - -Subpackages ------------ - -.. toctree:: - :maxdepth: 4 - - spark.test - -Submodules ----------- - -spark.constants module ----------------------- - -.. automodule:: spark.constants - :members: - :undoc-members: - :show-inheritance: - -spark.context module --------------------- - -.. automodule:: spark.context - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: spark - :members: - :undoc-members: - :show-inheritance: diff --git a/setup.py b/setup.py index d4acd258..693ec7c5 100755 --- a/setup.py +++ b/setup.py @@ -29,7 +29,7 @@ setup( name="splicemachine", - version="2.7.0.dev0", + version="2.8.0", install_requires=DEPENDENCIES, extras_require={ 'notebook': NOTEBOOK_DEPS, diff --git a/splicemachine/features/feature_store.py b/splicemachine/features/feature_store.py index 2c57465c..de2c0479 100644 --- a/splicemachine/features/feature_store.py +++ b/splicemachine/features/feature_store.py @@ -1,5 +1,5 @@ from sys import stderr -from typing import List, Dict, Optional, Union +from typing import List, Dict, Optional, Union, Any from datetime import datetime import json @@ -71,9 +71,9 @@ def remove_training_view(self, name: str): make_request(self._FS_URL, Endpoints.TRAINING_VIEWS, RequestType.DELETE, self._auth, { "name": name }) print('Done.') - def get_summary(self) -> TrainingView: + def get_summary(self) -> Dict[str, str]: """ - This function returns a summary of the feature store including: + This function returns a summary of the feature store including:\n * Number of feature sets * Number of deployed feature sets * Number of features @@ -122,7 +122,7 @@ def get_training_view_id(self, name: str) -> int: r = make_request(self._FS_URL, Endpoints.TRAINING_VIEW_ID, RequestType.GET, self._auth, { "name": name }) return int(r) - def get_features_by_name(self, names: Optional[List[str]] = None, as_list=False) -> Union[List[Feature], SparkDF]: + def get_features_by_name(self, names: Optional[List[str]] = None, as_list=False) -> Union[List[Feature], PandasDF]: """ Returns a dataframe or list of features whose names are provided @@ -378,12 +378,13 @@ def get_training_set_from_view(self, training_view: str, features: Union[List[Fe """ Returns the training set as a Spark Dataframe from a Training View. When a user calls this function (assuming they have registered the feature store with mlflow using :py:meth:`~mlflow.register_feature_store` ) - the training dataset's metadata will be tracked in mlflow automatically. The following will be tracked: - including: - * Training View - * Selected features - * Start time - * End time + the training dataset's metadata will be tracked in mlflow automatically.\n + The following will be tracked:\n + * Training View + * Selected features + * Start time + * End time + This tracking will occur in the current run (if there is an active run) or in the next run that is started after calling this function (if no run is currently active). @@ -591,7 +592,7 @@ def create_training_view(self, name: str, sql: str, primary_keys: List[str], joi Registers a training view for use in generating training SQL :param name: The training set name. This must be unique to other existing training sets unless replace is True - :param sql: (str) a SELECT statement that includes: + :param sql: (str) a SELECT statement that includes:\n * the primary key column(s) - uniquely identifying a training row/case * the inference timestamp column - timestamp column with which to join features (temporal join timestamp) * join key(s) - the references to the other feature tables' primary keys (ie customer_id, location_id) @@ -647,12 +648,24 @@ def deploy_feature_set(self, schema_name: str, table_name: str): def get_features_from_feature_set(self, schema_name: str, table_name: str) -> List[Feature]: """ - Returns either a pandas DF of feature details or a List of features for a specified feature set + Returns either a pandas DF of feature details or a List of features for a specified feature set. + You can get features from multiple feature sets by concatenating the results of this call. + For example, to get features from 2 feature sets, `foo.bar1` and `foo2.bar4`: + + .. code-block:: python + + features = fs.get_features_from_feature_set('foo','bar1') + fs.get_features_from_feature_set('foo2','bar4') + + If you want a list of just the Feature NAMES (ie a List[str]) you can simply run: + + .. code-block:: python + + features = fs.get_features_from_feature_set('foo','bar1') + fs.get_features_from_feature_set('foo2','bar4') + feature_names = [f.name for f in features] :param schema_name: Feature Set schema name :param table_name: Feature Set table name - :param as_list: Whehter to return a list of Features or a Pandas DF of the Features - :return: Either a PandasDF of feature information or a List of Features + :return: List of Features """ r = make_request(self._FS_URL, Endpoints.FEATURE_SET_DETAILS, RequestType.GET, self._auth, params={'schema':schema_name, 'table':table_name}) @@ -969,12 +982,12 @@ def create_aggregation_feature_set_from_source(self, source_name: str, schema_na ) This will create, deploy and return a FeatureSet called 'RETAIL_FS.AUTO_RFM'. - The Feature Set will have 15 features: - * 6 for the 'AR_CLOTHING_QTY' prefix (sum & max over provided agg windows) - * 3 for the 'AR_DELICATESSEN_QTY' prefix (avg over provided agg windows) - * 6 for the 'AR_GARDEN_QTY' prefix (count & avg over provided agg windows) + The Feature Set will have 15 features:\n + * 6 for the `AR_CLOTHING_QTY` prefix (sum & max over provided agg windows) + * 3 for the `AR_DELICATESSEN_QTY` prefix (avg over provided agg windows) + * 6 for the `AR_GARDEN_QTY` prefix (count & avg over provided agg windows) - A Pipeline is also created and scheduled in Airflow that feeds it every 5 days from the Source 'CUSTOMER_RFM' + A Pipeline is also created and scheduled in Airflow that feeds it every 5 days from the Source `CUSTOMER_RFM` Backfill will also occur, reading data from the source as of '2002-01-01 00:00:00' with a 5 day window """ schema_name, table_name, source_name = schema_name.upper(), table_name.upper(), source_name.upper() @@ -1105,24 +1118,23 @@ def display_model_feature_drift(self, schema_name: str, table_name: str): self._auth, params={ "schema": schema_name, "table": table_name})['metadata'] training_set_df, model_table_df = self._retrieve_model_data_sets(schema_name, table_name) - features = metadata['features'].split(',') + features = [f.upper() for f in metadata['features'].split(',')] build_feature_drift_plot(features, training_set_df, model_table_df) def display_model_drift(self, schema_name: str, table_name: str, time_intervals: int, start_time: datetime = None, end_time: datetime = None): """ - Displays as many as 'time_intervals' plots showing the distribution of the model prediction within each time + Displays as many as `time_intervals` plots showing the distribution of the model prediction within each time period. Time periods are equal periods of time where predictions are present in the model table - 'schema_name'.'table_name'. Model predictions are first filtered to only those occurring after 'start_time' if - specified and before 'end_time' if specified. + `schema_name.table_name`. Model predictions are first filtered to only those occurring after `start_time` if + specified and before `end_time` if specified. :param schema_name: schema where the model table resides :param table_name: name of the model table :param time_intervals: number of time intervals to plot :param start_time: if specified, filters to only show predictions occurring after this date/time :param end_time: if specified, filters to only show predictions occurring before this date/time - :return: None """ # database stores object names in upper case schema_name = schema_name.upper() @@ -1328,12 +1340,35 @@ def link_training_set_to_mlflow(self, features: Union[List[Feature], List[str]], def set_feature_store_url(self, url: str): + """ + Sets the Feature Store URL. You must call this before calling any feature store functions, or set the FS_URL + environment variable before creating your Feature Store object + + :param url: The Feature Store URL + """ self._FS_URL = url def login_fs(self, username, password): + """ + Function to login to the Feature Store using basic auth. These correspond to your Splice Machine database user + and password. If you are running outside of the managed Splice Machine Cloud Service, you must call either + this or set_token in order to call any functions in the feature store, or by setting the SPLICE_JUPYTER_USER and + SPLICE_JUPYTER_PASSWORD environments variable before creating your FeatureStore object. + + :param username: Username + :param password: Password + """ self._auth = HTTPBasicAuth(username, password) def set_token(self, token): + """ + Function to login to the Feature Store using JWT. This corresponds to your Splice Machine database user's JWT + token. If you are running outside of the managed Splice Machine Cloud Service, you must call either + this or login_fs in order to call any functions in the feature store, or by setting the SPLICE_JUPYTER_TOKEN + environment variable before creating your FeatureStore object. + + :param token: JWT Token + """ self._auth = token def __try_auto_login(self): diff --git a/splicemachine/features/utils/drift_utils.py b/splicemachine/features/utils/drift_utils.py index 2a86ce4b..d07d3ad4 100644 --- a/splicemachine/features/utils/drift_utils.py +++ b/splicemachine/features/utils/drift_utils.py @@ -75,7 +75,6 @@ def build_feature_drift_plot(features, training_set_df, model_table_df): :param features: list of features to analyze :param training_set_df: the dataframe used for training the model that contains all the features to analyze :param model_table_df: the dataframe with the content of the model table containing all input features - :return: None """ final_features = [f for f in features if f in model_table_df.columns] # prep plot area diff --git a/splicemachine/mlflow_support/mlflow_support.py b/splicemachine/mlflow_support/mlflow_support.py index 533b8a0e..481d5b8a 100644 --- a/splicemachine/mlflow_support/mlflow_support.py +++ b/splicemachine/mlflow_support/mlflow_support.py @@ -505,7 +505,6 @@ def _timer(timer_name, param=False): :param timer_name: (str) the name of the timer :param param: (bool) whether or not to log the timer as a param (default=True). If false, logs as metric. - :return: None """ t0 = time.time() try: @@ -838,6 +837,7 @@ def _deploy_kubernetes(run_id: str, service_port: int = 80, :param memory_request: (default 512Mi) [USED IF RESOURCE REQUESTS ENABLED] amount of RAM to request :param memory_limit: (default 2048Mi) [USED IF RESOURCE LIMITS ENABLED] amount of RAM to limit at :param expose_external: (default False) whether or not to create Ingress resource to deploy outside of the cluster. + :NOTE: .. code-block:: text @@ -926,13 +926,13 @@ def _deploy_db(db_schema_name: str, :param replace: (bool) whether or not to replace a currently existing model. This param is not yet implemented :return: None - This function creates the following IF you are creating a table from the dataframe \n - * The model table where run_id is the run_id passed in. This table will have a column for each feature in the feature vector. It will also contain:\n - * USER which is the current user who made the request - * EVAL_TIME which is the CURRENT_TIMESTAMP - * the PRIMARY KEY column(s) passed in - * PREDICTION. The prediction of the model. If the :classes: param is not filled in, this will be default values for classification models - * A column for each class of the predictor with the value being the probability/confidence of the model if applicable\n + This function creates the following IF you are creating a table from the dataframe\n + * The model table where run_id is the run_id passed in. This table will have a column for each feature in the feature vector. It will also contain:\n + * USER which is the current user who made the request + * EVAL_TIME which is the CURRENT_TIMESTAMP + * the PRIMARY KEY column(s) passed in + * PREDICTION. The prediction of the model. If the :classes: param is not filled in, this will be default values for classification models + * A column for each class of the predictor with the value being the probability/confidence of the model if applicable\n IF you are deploying to an existing table, the table will be altered to include the columns above. \n :NOTE: .. code-block:: text @@ -1002,8 +1002,9 @@ def _watch_job(job_id: int): """ Stream the logs in real time to standard out of a Job + :param job_id: the job id to watch (returned after executing an operation) - NOTE: If the job being watched fails, this function will throw a SpliceMachineException + :raise SpliceMachineException: If the job being watched fails """ previous_lines = [] warn = False # If there were any warnings from the log, we want to notify the user explicitly diff --git a/splicemachine/notebook.py b/splicemachine/notebook.py index 51869f59..68d48cb8 100644 --- a/splicemachine/notebook.py +++ b/splicemachine/notebook.py @@ -22,6 +22,7 @@ def run_sql(sql): """ Runs a SQL statement over JDBC from the Splice Machine managed Jupyter notebook environment. If you are running outside of the Splice Jupyter environment, you must have a sql kernel and magic set up and configured. + :param sql: The SQL to execute """ if not get_ipython():