diff --git a/Docs/.doctrees/environment.pickle b/Docs/.doctrees/environment.pickle deleted file mode 100644 index 46f516b..0000000 Binary files a/Docs/.doctrees/environment.pickle and /dev/null differ diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..8941a68 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +SPHINXPROJ = pyFTS +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file diff --git a/docs/_build/doctrees/environment.pickle b/docs/_build/doctrees/environment.pickle new file mode 100644 index 0000000..e85d8df Binary files /dev/null and b/docs/_build/doctrees/environment.pickle differ diff --git a/Docs/.doctrees/index.doctree b/docs/_build/doctrees/index.doctree similarity index 84% rename from Docs/.doctrees/index.doctree rename to docs/_build/doctrees/index.doctree index c5b622c..1c4e020 100644 Binary files a/Docs/.doctrees/index.doctree and b/docs/_build/doctrees/index.doctree differ diff --git a/docs/_build/doctrees/modules.doctree b/docs/_build/doctrees/modules.doctree new file mode 100644 index 0000000..3c1e189 Binary files /dev/null and b/docs/_build/doctrees/modules.doctree differ diff --git a/docs/_build/doctrees/pyFTS.benchmarks.doctree b/docs/_build/doctrees/pyFTS.benchmarks.doctree new file mode 100644 index 0000000..93f1d07 Binary files /dev/null and b/docs/_build/doctrees/pyFTS.benchmarks.doctree differ diff --git a/docs/_build/doctrees/pyFTS.common.doctree b/docs/_build/doctrees/pyFTS.common.doctree new file mode 100644 index 0000000..401a510 Binary files /dev/null and b/docs/_build/doctrees/pyFTS.common.doctree differ diff --git a/docs/_build/doctrees/pyFTS.data.doctree b/docs/_build/doctrees/pyFTS.data.doctree new file mode 100644 index 0000000..8082b14 Binary files /dev/null and b/docs/_build/doctrees/pyFTS.data.doctree differ diff --git a/docs/_build/doctrees/pyFTS.doctree b/docs/_build/doctrees/pyFTS.doctree new file mode 100644 index 0000000..4a6d2a6 Binary files /dev/null and b/docs/_build/doctrees/pyFTS.doctree differ diff --git a/docs/_build/doctrees/pyFTS.models.doctree b/docs/_build/doctrees/pyFTS.models.doctree new file mode 100644 index 0000000..71d9703 Binary files /dev/null and b/docs/_build/doctrees/pyFTS.models.doctree differ diff --git a/docs/_build/doctrees/pyFTS.models.ensemble.doctree b/docs/_build/doctrees/pyFTS.models.ensemble.doctree new file mode 100644 index 0000000..881c1d5 Binary files /dev/null and b/docs/_build/doctrees/pyFTS.models.ensemble.doctree differ diff --git a/docs/_build/doctrees/pyFTS.models.multivariate.doctree b/docs/_build/doctrees/pyFTS.models.multivariate.doctree new file mode 100644 index 0000000..66568fc Binary files /dev/null and b/docs/_build/doctrees/pyFTS.models.multivariate.doctree differ diff --git a/docs/_build/doctrees/pyFTS.models.nonstationary.doctree b/docs/_build/doctrees/pyFTS.models.nonstationary.doctree new file mode 100644 index 0000000..1bf8bfd Binary files /dev/null and b/docs/_build/doctrees/pyFTS.models.nonstationary.doctree differ diff --git a/docs/_build/doctrees/pyFTS.models.seasonal.doctree b/docs/_build/doctrees/pyFTS.models.seasonal.doctree new file mode 100644 index 0000000..46959dd Binary files /dev/null and b/docs/_build/doctrees/pyFTS.models.seasonal.doctree differ diff --git a/docs/_build/doctrees/pyFTS.partitioners.doctree b/docs/_build/doctrees/pyFTS.partitioners.doctree new file mode 100644 index 0000000..ea76c90 Binary files /dev/null and b/docs/_build/doctrees/pyFTS.partitioners.doctree differ diff --git a/docs/_build/doctrees/pyFTS.probabilistic.doctree b/docs/_build/doctrees/pyFTS.probabilistic.doctree new file mode 100644 index 0000000..4249cec Binary files /dev/null and b/docs/_build/doctrees/pyFTS.probabilistic.doctree differ diff --git a/Docs/.buildinfo b/docs/_build/html/.buildinfo similarity index 82% rename from Docs/.buildinfo rename to docs/_build/html/.buildinfo index 52301e1..c7a169b 100644 --- a/Docs/.buildinfo +++ b/docs/_build/html/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 23a83af8fc84956f406e435cfed523f9 +config: e7ed10e5c6fce8ae09ccdd825dd277b3 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/Docs/.nojekyll b/docs/_build/html/.nojekyll similarity index 100% rename from Docs/.nojekyll rename to docs/_build/html/.nojekyll diff --git a/docs/_build/html/_modules/index.html b/docs/_build/html/_modules/index.html new file mode 100644 index 0000000..87f5b30 --- /dev/null +++ b/docs/_build/html/_modules/index.html @@ -0,0 +1,151 @@ + + + + + + + + Overview: module code — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

All modules for which code is available

+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/Measures.html b/docs/_build/html/_modules/pyFTS/benchmarks/Measures.html new file mode 100644 index 0000000..1983517 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/Measures.html @@ -0,0 +1,532 @@ + + + + + + + + pyFTS.benchmarks.Measures — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.Measures

+# -*- coding: utf8 -*-
+
+"""
+pyFTS module for common benchmark metrics
+"""
+
+import time
+import numpy as np
+import pandas as pd
+from pyFTS.common import FuzzySet,SortedCollection
+from pyFTS.probabilistic import ProbabilityDistribution
+
+
+
[docs]def acf(data, k): + """ + Autocorrelation function estimative + + :param data: + :param k: + :return: + """ + mu = np.mean(data) + sigma = np.var(data) + n = len(data) + s = 0 + for t in np.arange(0,n-k): + s += (data[t]-mu) * (data[t+k] - mu) + + return 1/((n-k)*sigma)*s
+ + +
[docs]def rmse(targets, forecasts): + """ + Root Mean Squared Error + + :param targets: + :param forecasts: + :return: + """ + if isinstance(targets, list): + targets = np.array(targets) + if isinstance(forecasts, list): + forecasts = np.array(forecasts) + return np.sqrt(np.nanmean((targets - forecasts) ** 2))
+ + +
[docs]def rmse_interval(targets, forecasts): + """ + Root Mean Squared Error + + :param targets: + :param forecasts: + :return: + """ + fmean = [np.mean(i) for i in forecasts] + return np.sqrt(np.nanmean((fmean - targets) ** 2))
+ + +
[docs]def mape(targets, forecasts): + """ + Mean Average Percentual Error + + :param targets: + :param forecasts: + :return: + """ + if isinstance(targets, list): + targets = np.array(targets) + if isinstance(forecasts, list): + forecasts = np.array(forecasts) + return np.mean(np.abs((targets - forecasts) / targets)) * 100
+ + +
[docs]def smape(targets, forecasts, type=2): + """ + Symmetric Mean Average Percentual Error + + :param targets: + :param forecasts: + :param type: + :return: + """ + if isinstance(targets, list): + targets = np.array(targets) + if isinstance(forecasts, list): + forecasts = np.array(forecasts) + if type == 1: + return np.mean(np.abs(forecasts - targets) / ((forecasts + targets)/2)) + elif type == 2: + return np.mean(np.abs(forecasts - targets) / (abs(forecasts) + abs(targets)) )*100 + else: + return sum(np.abs(forecasts - targets)) / sum(forecasts + targets)
+ + +
[docs]def mape_interval(targets, forecasts): + fmean = [np.mean(i) for i in forecasts] + return np.mean(abs(fmean - targets) / fmean) * 100
+ + +
[docs]def UStatistic(targets, forecasts): + """ + Theil's U Statistic + + :param targets: + :param forecasts: + :return: + """ + l = len(targets) + if isinstance(targets, list): + targets = np.array(targets) + if isinstance(forecasts, list): + forecasts = np.array(forecasts) + + naive = [] + y = [] + for k in np.arange(0,l-1): + y.append((forecasts[k ] - targets[k]) ** 2) + naive.append((targets[k + 1] - targets[k]) ** 2) + return np.sqrt(sum(y) / sum(naive))
+ + +
[docs]def TheilsInequality(targets, forecasts): + """ + Theil’s Inequality Coefficient + + :param targets: + :param forecasts: + :return: + """ + res = targets - forecasts + t = len(res) + us = np.sqrt(sum([u**2 for u in res])) + ys = np.sqrt(sum([y**2 for y in targets])) + fs = np.sqrt(sum([f**2 for f in forecasts])) + return us / (ys + fs)
+ + + +
[docs]def BoxPierceStatistic(data, h): + """ + Q Statistic for Box-Pierce test + + :param data: + :param h: + :return: + """ + n = len(data) + s = 0 + for k in np.arange(1,h+1): + r = acf(data, k) + s += r**2 + return n*s
+ + +
[docs]def BoxLjungStatistic(data, h): + """ + Q Statistic for Ljung–Box test + + :param data: + :param h: + :return: + """ + n = len(data) + s = 0 + for k in np.arange(1,h+1): + r = acf(data, k) + s += r**2 / (n -k) + return n*(n-2)*s
+ + +
[docs]def sharpness(forecasts): + """Sharpness - Mean size of the intervals""" + tmp = [i[1] - i[0] for i in forecasts] + return np.mean(tmp)
+ + + +
[docs]def resolution(forecasts): + """Resolution - Standard deviation of the intervals""" + shp = sharpness(forecasts) + tmp = [abs((i[1] - i[0]) - shp) for i in forecasts] + return np.mean(tmp)
+ + +
[docs]def coverage(targets, forecasts): + """Percent of target values that fall inside forecasted interval""" + preds = [] + for i in np.arange(0, len(forecasts)): + if targets[i] >= forecasts[i][0] and targets[i] <= forecasts[i][1]: + preds.append(1) + else: + preds.append(0) + return np.mean(preds)
+ + +
[docs]def pinball(tau, target, forecast): + """ + Pinball loss function. Measure the distance of forecast to the tau-quantile of the target + + :param tau: quantile value in the range (0,1) + :param target: + :param forecast: + :return: float, distance of forecast to the tau-quantile of the target + """ + if target >= forecast: + return (target - forecast) * tau + else: + return (forecast - target) * (1 - tau)
+ + +
[docs]def pinball_mean(tau, targets, forecasts): + """ + Mean pinball loss value of the forecast for a given tau-quantile of the targets + + :param tau: quantile value in the range (0,1) + :param targets: list of target values + :param forecasts: list of prediction intervals + :return: float, the pinball loss mean for tau quantile + """ + if tau <= 0.5: + preds = [pinball(tau, targets[i], forecasts[i][0]) for i in np.arange(0, len(forecasts))] + else: + preds = [pinball(tau, targets[i], forecasts[i][1]) for i in np.arange(0, len(forecasts))] + return np.nanmean(preds)
+ + +
[docs]def winkler_score(tau, target, forecast): + '''R. L. Winkler, A Decision-Theoretic Approach to Interval Estimation, J. Am. Stat. Assoc. 67 (337) (1972) 187–191. doi:10.2307/2284720. ''' + delta = forecast[1] - forecast[0] + if forecast[0] < target and target < forecast[1]: + return delta + elif forecast[0] > target: + return delta + 2*(forecast[0] - target)/tau + elif forecast[1] < target: + return delta + 2*(target - forecast[1])/tau
+ + +
[docs]def winkler_mean(tau, targets, forecasts): + """ + Mean Winkler score value of the forecast for a given tau-quantile of the targets + + :param tau: quantile value in the range (0,1) + :param targets: list of target values + :param forecasts: list of prediction intervals + :return: float, the Winkler score mean for tau quantile + """ + preds = [winkler_score(tau, targets[i], forecasts[i]) for i in np.arange(0, len(forecasts))] + + return np.nanmean(preds)
+ + +
[docs]def brier_score(targets, densities): + '''Brier (1950). "Verification of Forecasts Expressed in Terms of Probability". Monthly Weather Review. 78: 1–3. ''' + ret = [] + for ct, d in enumerate(densities): + try: + v = d.bin_index.find_ge(targets[ct]) + + score = sum([d.distribution[k] ** 2 for k in d.bins if k != v]) + score += (d.distribution[v] - 1) ** 2 + ret.append(score) + except ValueError as ex: + ret.append(sum([d.distribution[k] ** 2 for k in d.bins])) + return sum(ret)/len(ret)
+ + +
[docs]def pmf_to_cdf(density): + ret = [] + for row in density.index: + tmp = [] + prev = 0 + for col in density.columns: + prev += density[col][row] if not np.isnan(density[col][row]) else 0 + tmp.append( prev ) + ret.append(tmp) + df = pd.DataFrame(ret, columns=density.columns) + return df
+ + +
[docs]def heavyside(bin, target): + return 1 if bin >= target else 0
+ +
[docs]def heavyside_cdf(bins, targets): + ret = [] + for t in targets: + result = [1 if b >= t else 0 for b in bins] + ret.append(result) + df = pd.DataFrame(ret, columns=bins) + return df
+ + +
[docs]def crps(targets, densities): + ''' + Continuous Ranked Probability Score + + :param targets: a list with the target values + :param densities: a list with pyFTS.probabil objectsistic.ProbabilityDistribution + :return: float + ''' + _crps = float(0.0) + if isinstance(densities, ProbabilityDistribution.ProbabilityDistribution): + densities = [densities] + + l = len(densities[0].bins) + n = len(densities) + for ct, df in enumerate(densities): + _crps += sum([(df.cummulative(bin) - (1 if bin >= targets[ct] else 0)) ** 2 for bin in df.bins]) + + return _crps / float(l * n)
+ + +
[docs]def get_point_statistics(data, model, **kwargs): + ''' + Condensate all measures for point forecasters + + :param data: test data + :param model: FTS model with point forecasting capability + :param kwargs: + :return: a list with the RMSE, SMAPE and U Statistic + ''' + + steps_ahead = kwargs.get('steps_ahead',1) + kwargs['type'] = 'point' + + indexer = kwargs.get('indexer', None) + + if indexer is not None: + ndata = np.array(indexer.get_data(data)) + elif model.is_multivariate: + if not isinstance(data, pd.DataFrame): + raise ValueError("Multivariate data must be a Pandas DataFrame!") + ndata = data + else: + ndata = np.array(data) + + ret = list() + + if steps_ahead == 1: + forecasts = model.predict(ndata, **kwargs) + + if model.is_multivariate and model.has_seasonality: + ndata = model.indexer.get_data(ndata) + elif model.is_multivariate: + ndata = ndata[model.target_variable.data_label].values + + if not isinstance(forecasts, (list, np.ndarray)): + forecasts = [forecasts] + + forecasts = np.array(forecasts[:-1]) + + ret.append(np.round(rmse(ndata[model.max_lag:], forecasts), 2)) + ret.append(np.round(mape(ndata[model.max_lag:], forecasts), 2)) + ret.append(np.round(UStatistic(ndata[model.max_lag:], forecasts), 2)) + else: + steps_ahead_sampler = kwargs.get('steps_ahead_sampler', 1) + nforecasts = [] + for k in np.arange(model.order, len(ndata)-steps_ahead,steps_ahead_sampler): + sample = ndata[k - model.order: k] + tmp = model.predict(sample, **kwargs) + nforecasts.append(tmp[-1]) + + start = model.max_lag + steps_ahead -1 + ret.append(np.round(rmse(ndata[start:-1:steps_ahead_sampler], nforecasts), 2)) + ret.append(np.round(mape(ndata[start:-1:steps_ahead_sampler], nforecasts), 2)) + ret.append(np.round(UStatistic(ndata[start:-1:steps_ahead_sampler], nforecasts), 2)) + + return ret
+ + +
[docs]def get_interval_statistics(data, model, **kwargs): + ''' + Condensate all measures for point interval forecasters + + :param data: test data + :param model: FTS model with interval forecasting capability + :param kwargs: + :return: a list with the sharpness, resolution, coverage, .05 pinball mean, + .25 pinball mean, .75 pinball mean and .95 pinball mean. + ''' + + steps_ahead = kwargs.get('steps_ahead', 1) + kwargs['type'] = 'interval' + + ret = list() + + if steps_ahead == 1: + forecasts = model.predict(data, **kwargs) + ret.append(round(sharpness(forecasts), 2)) + ret.append(round(resolution(forecasts), 2)) + ret.append(round(coverage(data[model.order:], forecasts[:-1]), 2)) + ret.append(round(pinball_mean(0.05, data[model.max_lag:], forecasts[:-1]), 2)) + ret.append(round(pinball_mean(0.25, data[model.max_lag:], forecasts[:-1]), 2)) + ret.append(round(pinball_mean(0.75, data[model.max_lag:], forecasts[:-1]), 2)) + ret.append(round(pinball_mean(0.95, data[model.max_lag:], forecasts[:-1]), 2)) + ret.append(round(winkler_mean(0.05, data[model.max_lag:], forecasts[:-1]), 2)) + ret.append(round(winkler_mean(0.25, data[model.max_lag:], forecasts[:-1]), 2)) + else: + forecasts = [] + for k in np.arange(model.order, len(data) - steps_ahead): + sample = data[k - model.order: k] + tmp = model.predict(sample, **kwargs) + forecasts.append(tmp[-1]) + + start = model.max_lag + steps_ahead -1 + ret.append(round(sharpness(forecasts), 2)) + ret.append(round(resolution(forecasts), 2)) + ret.append(round(coverage(data[model.max_lag:], forecasts), 2)) + ret.append(round(pinball_mean(0.05, data[start:], forecasts), 2)) + ret.append(round(pinball_mean(0.25, data[start:], forecasts), 2)) + ret.append(round(pinball_mean(0.75, data[start:], forecasts), 2)) + ret.append(round(pinball_mean(0.95, data[start:], forecasts), 2)) + ret.append(round(winkler_mean(0.05, data[start:], forecasts), 2)) + ret.append(round(winkler_mean(0.25, data[start:], forecasts), 2)) + return ret
+ + +
[docs]def get_distribution_statistics(data, model, **kwargs): + ''' + Get CRPS statistic and time for a forecasting model + + :param data: test data + :param model: FTS model with probabilistic forecasting capability + :param kwargs: + :return: a list with the CRPS and execution time + ''' + steps_ahead = kwargs.get('steps_ahead', 1) + kwargs['type'] = 'distribution' + + ret = list() + + if steps_ahead == 1: + _s1 = time.time() + forecasts = model.predict(data, **kwargs) + _e1 = time.time() + ret.append(round(crps(data[model.max_lag:], forecasts[:-1]), 3)) + ret.append(round(_e1 - _s1, 3)) + ret.append(round(brier_score(data[model.max_lag:], forecasts[:-1]), 3)) + else: + skip = kwargs.get('steps_ahead_sampler', 1) + forecasts = [] + _s1 = time.time() + for k in np.arange(model.max_lag, len(data) - steps_ahead, skip): + sample = data[k - model.max_lag: k] + tmp = model.predict(sample, **kwargs) + forecasts.append(tmp[-1]) + _e1 = time.time() + + start = model.max_lag + steps_ahead + ret.append(round(crps(data[start:-1:skip], forecasts), 3)) + ret.append(round(_e1 - _s1, 3)) + ret.append(round(brier_score(data[start:-1:skip], forecasts), 3)) + return ret
+ + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/ResidualAnalysis.html b/docs/_build/html/_modules/pyFTS/benchmarks/ResidualAnalysis.html new file mode 100644 index 0000000..0ef4d42 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/ResidualAnalysis.html @@ -0,0 +1,232 @@ + + + + + + + + pyFTS.benchmarks.ResidualAnalysis — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.ResidualAnalysis

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+"""Residual Analysis methods"""
+
+import numpy as np
+import pandas as pd
+import matplotlib as plt
+import matplotlib.pyplot as plt
+from pyFTS.common import Transformations,Util
+from pyFTS.benchmarks import Measures
+from scipy import stats
+
+
+
[docs]def residuals(targets, forecasts, order=1): + """First order residuals""" + return np.array(targets[order:]) - np.array(forecasts[:-1])
+ + +
[docs]def chi_squared(q, h): + """ + Chi-Squared value + + :param q: + :param h: + :return: + """ + p = stats.chi2.sf(q, h) + return p
+ + +
[docs]def compare_residuals(data, models): + """ + Compare residual's statistics of several models + + :param data: test data + :param models: + :return: a Pandas dataframe with the Box-Ljung statistic for each model + """ + from statsmodels.stats.diagnostic import acorr_ljungbox + rows = [] + columns = ["Model","Order","AVG","STD","Box-Ljung","p-value"] + for mfts in models: + forecasts = mfts.forecast(data) + res = residuals(data, forecasts, mfts.order) + mu = np.mean(res) + sig = np.std(res) + row = [mfts.shortname, mfts.order, mu, sig] + stat, pval = acorr_ljungbox(res) + row.extend([stat, pval]) + rows.append(row) + return pd.DataFrame(rows, columns=columns)
+ + +
[docs]def plotResiduals(targets, models, tam=[8, 8], save=False, file=None): + """ + Plot residuals and statistics + + :param targets: + :param models: + :param tam: + :param save: + :param file: + :return: + """ + fig, axes = plt.subplots(nrows=len(models), ncols=3, figsize=tam) + for c, mfts in enumerate(models): + if len(models) > 1: + ax = axes[c] + else: + ax = axes + forecasts = mfts.forecast(targets) + res = residuals(targets,forecasts,mfts.order) + mu = np.mean(res) + sig = np.std(res) + + ax[0].set_title("Residuals Mean=" + str(mu) + " STD = " + str(sig)) + ax[0].set_ylabel('E') + ax[0].set_xlabel('T') + ax[0].plot(res) + + ax[1].set_title("Residuals Autocorrelation") + ax[1].set_ylabel('ACS') + ax[1].set_xlabel('Lag') + ax[1].acorr(res) + + ax[2].set_title("Residuals Histogram") + ax[2].set_ylabel('Freq') + ax[2].set_xlabel('Bins') + ax[2].hist(res) + + c += 1 + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + +
[docs]def plot_residuals(targets, models, tam=[8, 8], save=False, file=None): + fig, axes = plt.subplots(nrows=len(models), ncols=3, figsize=tam) + + for c, mfts in enumerate(models, start=0): + if len(models) > 1: + ax = axes[c] + else: + ax = axes + forecasts = mfts.forecast(targets) + res = residuals(targets, forecasts, mfts.order) + mu = np.mean(res) + sig = np.std(res) + + if c == 0: ax[0].set_title("Residuals", size='large') + ax[0].set_ylabel(mfts.shortname, size='large') + ax[0].set_xlabel(' ') + ax[0].plot(res) + + if c == 0: ax[1].set_title("Residuals Autocorrelation", size='large') + ax[1].set_ylabel('ACS') + ax[1].set_xlabel('Lag') + ax[1].acorr(res) + + if c == 0: ax[2].set_title("Residuals Histogram", size='large') + ax[2].set_ylabel('Freq') + ax[2].set_xlabel('Bins') + ax[2].hist(res) + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + +
[docs]def single_plot_residuals(targets, forecasts, order, tam=[8, 8], save=False, file=None): + fig, ax = plt.subplots(nrows=1, ncols=3, figsize=tam) + + res = residuals(targets, forecasts, order) + + ax[0].set_title("Residuals", size='large') + ax[0].set_ylabel("Model", size='large') + ax[0].set_xlabel(' ') + ax[0].plot(res) + + ax[1].set_title("Residuals Autocorrelation", size='large') + ax[1].set_ylabel('ACS') + ax[1].set_xlabel('Lag') + ax[1].acorr(res) + + ax[2].set_title("Residuals Histogram", size='large') + ax[2].set_ylabel('Freq') + ax[2].set_xlabel('Bins') + ax[2].hist(res) + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/Util.html b/docs/_build/html/_modules/pyFTS/benchmarks/Util.html new file mode 100644 index 0000000..f1be776 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/Util.html @@ -0,0 +1,1286 @@ + + + + + + + + pyFTS.benchmarks.Util — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.Util

+"""
+Facilities for pyFTS Benchmark module
+"""
+
+import matplotlib as plt
+import matplotlib.cm as cmx
+import matplotlib.colors as pltcolors
+import matplotlib.pyplot as plt
+import numpy as np
+import pandas as pd
+import sqlite3
+#from mpl_toolkits.mplot3d import Axes3D
+
+
+from copy import deepcopy
+from pyFTS.common import Util
+
+
+
[docs]def open_benchmark_db(name): + conn = sqlite3.connect(name) + + #performance optimizations + conn.execute("PRAGMA journal_mode = WAL") + conn.execute("PRAGMA synchronous = NORMAL") + + create_benchmark_tables(conn) + return conn
+ + +
[docs]def create_benchmark_tables(conn): + c = conn.cursor() + + c.execute('''CREATE TABLE if not exists benchmarks( + ID integer primary key, Date int, Dataset text, Tag text, + Type text, Model text, Transformation text, 'Order' int, + Scheme text, Partitions int, + Size int, Steps int, Method text, Measure text, Value real)''') + + conn.commit()
+ + +
[docs]def insert_benchmark(data, conn): + c = conn.cursor() + + c.execute("INSERT INTO benchmarks(Date, Dataset, Tag, Type, Model, " + + "Transformation, 'Order', Scheme, Partitions, " + + "Size, Steps, Method, Measure, Value) " + + "VALUES(datetime('now'),?,?,?,?,?,?,?,?,?,?,?,?,?)", data) + conn.commit()
+ + +
[docs]def process_common_data(dataset, tag, type, job): + model = job["obj"] + if model.benchmark_only: + data = [dataset, tag, type, model.shortname, + str(model.transformations[0]) if len(model.transformations) > 0 else None, + model.order, None, None, + None, job['steps'], job['method']] + else: + data = [dataset, tag, type, model.shortname, + str(model.partitioner.transformation) if model.partitioner.transformation is not None else None, + model.order, model.partitioner.name, str(model.partitioner.partitions), + len(model), job['steps'], job['method']] + + return data
+ + +
[docs]def get_dataframe_from_bd(file, filter): + con = sqlite3.connect(file) + sql = "SELECT * from benchmarks" + if filter is not None: + sql += " WHERE " + filter + return pd.read_sql_query(sql, con)
+ + + +
[docs]def extract_measure(dataframe, measure, data_columns): + if not dataframe.empty: + df = dataframe[(dataframe.Measure == measure)][data_columns] + tmp = df.to_dict(orient="records")[0] + ret = [k for k in tmp.values() if not np.isnan(k)] + return ret + else: + return None
+ + +
[docs]def find_best(dataframe, criteria, ascending): + models = dataframe.Model.unique() + orders = dataframe.Order.unique() + ret = {} + for m in models: + for o in orders: + mod = {} + df = dataframe[(dataframe.Model == m) & (dataframe.Order == o)].sort_values(by=criteria, ascending=ascending) + if not df.empty: + _key = str(m) + str(o) + best = df.loc[df.index[0]] + mod['Model'] = m + mod['Order'] = o + mod['Scheme'] = best["Scheme"] + mod['Partitions'] = best["Partitions"] + + ret[_key] = mod + + return ret
+ + +
[docs]def analytic_tabular_dataframe(dataframe): + experiments = len(dataframe.columns) - len(base_dataframe_columns()) - 1 + models = dataframe.Model.unique() + orders = dataframe.Order.unique() + schemes = dataframe.Scheme.unique() + partitions = dataframe.Partitions.unique() + steps = dataframe.Steps.unique() + measures = dataframe.Measure.unique() + data_columns = analytical_data_columns(experiments) + + ret = [] + + for m in models: + for o in orders: + for s in schemes: + for p in partitions: + for st in steps: + for ms in measures: + df = dataframe[(dataframe.Model == m) & (dataframe.Order == o) + & (dataframe.Scheme == s) & (dataframe.Partitions == p) + & (dataframe.Steps == st) & (dataframe.Measure == ms) ] + + if not df.empty: + for col in data_columns: + mod = [m, o, s, p, st, ms, df[col].values[0]] + ret.append(mod) + + dat = pd.DataFrame(ret, columns=tabular_dataframe_columns()) + return dat
+ + +
[docs]def tabular_dataframe_columns(): + return ["Model", "Order", "Scheme", "Partitions", "Steps", "Measure", "Value"]
+ + +
[docs]def base_dataframe_columns(): + return ["Model", "Order", "Scheme", "Partitions", "Size", "Steps", "Method"]
+ +
[docs]def point_dataframe_synthetic_columns(): + return base_dataframe_columns().extend(["RMSEAVG", "RMSESTD", + "SMAPEAVG", "SMAPESTD", "UAVG","USTD", "TIMEAVG", "TIMESTD"])
+ + +
[docs]def point_dataframe_analytic_columns(experiments): + columns = [str(k) for k in np.arange(0, experiments)] + columns.insert(0, "Model") + columns.insert(1, "Order") + columns.insert(2, "Scheme") + columns.insert(3, "Partitions") + columns.insert(4, "Size") + columns.insert(5, "Steps") + columns.insert(6, "Method") + columns.insert(7, "Measure") + return columns
+ + +
[docs]def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u, steps, method): + """ + Create a dataframe to store the benchmark results + + :param experiments: dictionary with the execution results + :param file: + :param objs: + :param rmse: + :param save: + :param synthetic: + :param smape: + :param times: + :param u: + :return: + """ + ret = [] + + if synthetic: + + for k in sorted(objs.keys()): + try: + mod = [] + mfts = objs[k] + mod.append(mfts.shortname) + mod.append(mfts.order) + if not mfts.benchmark_only: + mod.append(mfts.partitioner.name) + mod.append(mfts.partitioner.partitions) + mod.append(len(mfts)) + else: + mod.append('-') + mod.append('-') + mod.append('-') + mod.append(steps[k]) + mod.append(method[k]) + mod.append(np.round(np.nanmean(rmse[k]), 2)) + mod.append(np.round(np.nanstd(rmse[k]), 2)) + mod.append(np.round(np.nanmean(smape[k]), 2)) + mod.append(np.round(np.nanstd(smape[k]), 2)) + mod.append(np.round(np.nanmean(u[k]), 2)) + mod.append(np.round(np.nanstd(u[k]), 2)) + mod.append(np.round(np.nanmean(times[k]), 4)) + mod.append(np.round(np.nanstd(times[k]), 4)) + ret.append(mod) + except Exception as ex: + print("Erro ao salvar ", k) + print("Exceção ", ex) + + columns = point_dataframe_synthetic_columns() + else: + for k in sorted(objs.keys()): + try: + mfts = objs[k] + n = mfts.shortname + o = mfts.order + if not mfts.benchmark_only: + s = mfts.partitioner.name + p = mfts.partitioner.partitions + l = len(mfts) + else: + s = '-' + p = '-' + l = '-' + st = steps[k] + mt = method[k] + tmp = [n, o, s, p, l, st, mt, 'RMSE'] + tmp.extend(rmse[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'SMAPE'] + tmp.extend(smape[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'U'] + tmp.extend(u[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'TIME'] + tmp.extend(times[k]) + ret.append(deepcopy(tmp)) + except Exception as ex: + print("Erro ao salvar ", k) + print("Exceção ", ex) + columns = point_dataframe_analytic_columns(experiments) + try: + dat = pd.DataFrame(ret, columns=columns) + if save: dat.to_csv(Util.uniquefilename(file), sep=";", index=False) + return dat + except Exception as ex: + print(ex) + print(experiments) + print(columns) + print(ret)
+ + +
[docs]def cast_dataframe_to_synthetic(infile, outfile, experiments, type): + if type == 'point': + analytic_columns = point_dataframe_analytic_columns + synthetic_columns = point_dataframe_synthetic_columns + synthetize_measures = cast_dataframe_to_synthetic_point + elif type == 'interval': + analytic_columns = interval_dataframe_analytic_columns + synthetic_columns = interval_dataframe_synthetic_columns + synthetize_measures = cast_dataframe_to_synthetic_interval + elif type == 'distribution': + analytic_columns = probabilistic_dataframe_analytic_columns + synthetic_columns = probabilistic_dataframe_synthetic_columns + synthetize_measures = cast_dataframe_to_synthetic_probabilistic + else: + raise ValueError("Type parameter has an unknown value!") + + columns = analytic_columns(experiments) + dat = pd.read_csv(infile, sep=";", usecols=columns) + models = dat.Model.unique() + orders = dat.Order.unique() + schemes = dat.Scheme.unique() + partitions = dat.Partitions.unique() + steps = dat.Steps.unique() + methods = dat.Method.unique() + + data_columns = analytical_data_columns(experiments) + + ret = [] + + for m in models: + for o in orders: + for s in schemes: + for p in partitions: + for st in steps: + for mt in methods: + df = dat[(dat.Model == m) & (dat.Order == o) & (dat.Scheme == s) & + (dat.Partitions == p) & (dat.Steps == st) & (dat.Method == mt)] + if not df.empty: + mod = synthetize_measures(df, data_columns) + mod.insert(0, m) + mod.insert(1, o) + mod.insert(2, s) + mod.insert(3, p) + mod.insert(4, df.iat[0,5]) + mod.insert(5, st) + mod.insert(6, mt) + ret.append(mod) + + dat = pd.DataFrame(ret, columns=synthetic_columns()) + dat.to_csv(outfile, sep=";", index=False)
+ + +
[docs]def cast_dataframe_to_synthetic_point(df, data_columns): + ret = [] + rmse = extract_measure(df, 'RMSE', data_columns) + smape = extract_measure(df, 'SMAPE', data_columns) + u = extract_measure(df, 'U', data_columns) + times = extract_measure(df, 'TIME', data_columns) + ret.append(np.round(np.nanmean(rmse), 2)) + ret.append(np.round(np.nanstd(rmse), 2)) + ret.append(np.round(np.nanmean(smape), 2)) + ret.append(np.round(np.nanstd(smape), 2)) + ret.append(np.round(np.nanmean(u), 2)) + ret.append(np.round(np.nanstd(u), 2)) + ret.append(np.round(np.nanmean(times), 4)) + ret.append(np.round(np.nanstd(times), 4)) + + return ret
+ + +
[docs]def analytical_data_columns(experiments): + data_columns = [str(k) for k in np.arange(0, experiments)] + return data_columns
+ + +
[docs]def scale_params(data): + vmin = np.nanmin(data) + vlen = np.nanmax(data) - vmin + return (vmin, vlen)
+ + + +
[docs]def scale(data, params): + ndata = [(k-params[0])/params[1] for k in data] + return ndata
+ + +
[docs]def stats(measure, data): + print(measure, np.nanmean(data), np.nanstd(data))
+ + +
[docs]def unified_scaled_point(experiments, tam, save=False, file=None, + sort_columns=['UAVG', 'RMSEAVG', 'USTD', 'RMSESTD'], + sort_ascend=[1, 1, 1, 1],save_best=False, + ignore=None, replace=None): + + fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam) + + axes[0].set_title('RMSE') + axes[1].set_title('SMAPE') + axes[2].set_title('U Statistic') + + models = {} + + for experiment in experiments: + + mdl = {} + + dat_syn = pd.read_csv(experiment[0], sep=";", usecols=point_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(experiment[1], sep=";", usecols=point_dataframe_analytic_columns(experiment[2])) + + rmse = [] + smape = [] + u = [] + times = [] + + data_columns = analytical_data_columns(experiment[2]) + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + + if b not in models: + models[b] = {} + models[b]['rmse'] = [] + models[b]['smape'] = [] + models[b]['u'] = [] + models[b]['times'] = [] + + if b not in mdl: + mdl[b] = {} + mdl[b]['rmse'] = [] + mdl[b]['smape'] = [] + mdl[b]['u'] = [] + mdl[b]['times'] = [] + + best = bests[b] + tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + tmpl = extract_measure(tmp,'RMSE',data_columns) + mdl[b]['rmse'].extend( tmpl ) + rmse.extend( tmpl ) + tmpl = extract_measure(tmp, 'SMAPE', data_columns) + mdl[b]['smape'].extend(tmpl) + smape.extend(tmpl) + tmpl = extract_measure(tmp, 'U', data_columns) + mdl[b]['u'].extend(tmpl) + u.extend(tmpl) + tmpl = extract_measure(tmp, 'TIME', data_columns) + mdl[b]['times'].extend(tmpl) + times.extend(tmpl) + + models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace) + + print("GLOBAL") + rmse_param = scale_params(rmse) + stats("rmse", rmse) + smape_param = scale_params(smape) + stats("smape", smape) + u_param = scale_params(u) + stats("u", u) + times_param = scale_params(times) + + for key in sorted(models.keys()): + models[key]['rmse'].extend( scale(mdl[key]['rmse'], rmse_param) ) + models[key]['smape'].extend( scale(mdl[key]['smape'], smape_param) ) + models[key]['u'].extend( scale(mdl[key]['u'], u_param) ) + models[key]['times'].extend( scale(mdl[key]['times'], times_param) ) + + rmse = [] + smape = [] + u = [] + times = [] + labels = [] + for key in sorted(models.keys()): + print(key) + rmse.append(models[key]['rmse']) + stats("rmse", models[key]['rmse']) + smape.append(models[key]['smape']) + stats("smape", models[key]['smape']) + u.append(models[key]['u']) + stats("u", models[key]['u']) + times.append(models[key]['times']) + labels.append(models[key]['label']) + + axes[0].boxplot(rmse, labels=labels, autorange=True, showmeans=True) + axes[0].set_title("RMSE") + axes[1].boxplot(smape, labels=labels, autorange=True, showmeans=True) + axes[1].set_title("SMAPE") + axes[2].boxplot(u, labels=labels, autorange=True, showmeans=True) + axes[2].set_title("U Statistic") + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + +
[docs]def plot_dataframe_point(file_synthetic, file_analytic, experiments, tam, save=False, file=None, + sort_columns=['UAVG', 'RMSEAVG', 'USTD', 'RMSESTD'], + sort_ascend=[1, 1, 1, 1],save_best=False, + ignore=None,replace=None): + + fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam) + + axes[0].set_title('RMSE') + axes[1].set_title('SMAPE') + axes[2].set_title('U Statistic') + + dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=point_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(file_analytic, sep=";", usecols=point_dataframe_analytic_columns(experiments)) + + data_columns = analytical_data_columns(experiments) + + if save_best: + dat = pd.DataFrame.from_dict(bests, orient='index') + dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False) + + rmse = [] + smape = [] + u = [] + times = [] + labels = [] + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + + best = bests[b] + tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + rmse.append( extract_measure(tmp,'RMSE',data_columns) ) + smape.append(extract_measure(tmp, 'SMAPE', data_columns)) + u.append(extract_measure(tmp, 'U', data_columns)) + times.append(extract_measure(tmp, 'TIME', data_columns)) + + labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]),replace)) + + axes[0].boxplot(rmse, labels=labels, autorange=True, showmeans=True) + axes[0].set_title("RMSE") + axes[1].boxplot(smape, labels=labels, autorange=True, showmeans=True) + axes[1].set_title("SMAPE") + axes[2].boxplot(u, labels=labels, autorange=True, showmeans=True) + axes[2].set_title("U Statistic") + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + + +
[docs]def check_replace_list(m, replace): + if replace is not None: + for r in replace: + if r[0] in m: + return r[1] + return m
+ + + +
[docs]def check_ignore_list(b, ignore): + flag = False + if ignore is not None: + for i in ignore: + if i in b: + flag = True + return flag
+ + +
[docs]def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, synthetic, times, + q05, q25, q75, q95, steps, method): + ret = [] + if synthetic: + for k in sorted(objs.keys()): + mod = [] + mfts = objs[k] + mod.append(mfts.shortname) + mod.append(mfts.order) + l = len(mfts) + if not mfts.benchmark_only: + mod.append(mfts.partitioner.name) + mod.append(mfts.partitioner.partitions) + mod.append(l) + else: + mod.append('-') + mod.append('-') + mod.append('-') + mod.append(steps[k]) + mod.append(method[k]) + mod.append(round(np.nanmean(sharpness[k]), 2)) + mod.append(round(np.nanstd(sharpness[k]), 2)) + mod.append(round(np.nanmean(resolution[k]), 2)) + mod.append(round(np.nanstd(resolution[k]), 2)) + mod.append(round(np.nanmean(coverage[k]), 2)) + mod.append(round(np.nanstd(coverage[k]), 2)) + mod.append(round(np.nanmean(times[k]), 2)) + mod.append(round(np.nanstd(times[k]), 2)) + mod.append(round(np.nanmean(q05[k]), 2)) + mod.append(round(np.nanstd(q05[k]), 2)) + mod.append(round(np.nanmean(q25[k]), 2)) + mod.append(round(np.nanstd(q25[k]), 2)) + mod.append(round(np.nanmean(q75[k]), 2)) + mod.append(round(np.nanstd(q75[k]), 2)) + mod.append(round(np.nanmean(q95[k]), 2)) + mod.append(round(np.nanstd(q95[k]), 2)) + mod.append(l) + ret.append(mod) + + columns = interval_dataframe_synthetic_columns() + else: + for k in sorted(objs.keys()): + try: + mfts = objs[k] + n = mfts.shortname + o = mfts.order + if not mfts.benchmark_only: + s = mfts.partitioner.name + p = mfts.partitioner.partitions + l = len(mfts) + else: + s = '-' + p = '-' + l = '-' + st = steps[k] + mt = method[k] + tmp = [n, o, s, p, l, st, mt, 'Sharpness'] + tmp.extend(sharpness[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'Resolution'] + tmp.extend(resolution[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'Coverage'] + tmp.extend(coverage[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'TIME'] + tmp.extend(times[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'Q05'] + tmp.extend(q05[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'Q25'] + tmp.extend(q25[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'Q75'] + tmp.extend(q75[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'Q95'] + tmp.extend(q95[k]) + ret.append(deepcopy(tmp)) + except Exception as ex: + print("Erro ao salvar ", k) + print("Exceção ", ex) + columns = interval_dataframe_analytic_columns(experiments) + dat = pd.DataFrame(ret, columns=columns) + if save: dat.to_csv(Util.uniquefilename(file), sep=";") + return dat
+ + +
[docs]def interval_dataframe_analytic_columns(experiments): + columns = [str(k) for k in np.arange(0, experiments)] + columns.insert(0, "Model") + columns.insert(1, "Order") + columns.insert(2, "Scheme") + columns.insert(3, "Partitions") + columns.insert(4, "Size") + columns.insert(5, "Steps") + columns.insert(6, "Method") + columns.insert(7, "Measure") + return columns
+ + + +
[docs]def interval_dataframe_synthetic_columns(): + columns = ["Model", "Order", "Scheme", "Partitions","SIZE", "Steps","Method" "SHARPAVG", "SHARPSTD", "RESAVG", "RESSTD", "COVAVG", + "COVSTD", "TIMEAVG", "TIMESTD", "Q05AVG", "Q05STD", "Q25AVG", "Q25STD", "Q75AVG", "Q75STD", "Q95AVG", "Q95STD"] + return columns
+ + +
[docs]def cast_dataframe_to_synthetic_interval(df, data_columns): + sharpness = extract_measure(df, 'Sharpness', data_columns) + resolution = extract_measure(df, 'Resolution', data_columns) + coverage = extract_measure(df, 'Coverage', data_columns) + times = extract_measure(df, 'TIME', data_columns) + q05 = extract_measure(df, 'Q05', data_columns) + q25 = extract_measure(df, 'Q25', data_columns) + q75 = extract_measure(df, 'Q75', data_columns) + q95 = extract_measure(df, 'Q95', data_columns) + ret = [] + ret.append(np.round(np.nanmean(sharpness), 2)) + ret.append(np.round(np.nanstd(sharpness), 2)) + ret.append(np.round(np.nanmean(resolution), 2)) + ret.append(np.round(np.nanstd(resolution), 2)) + ret.append(np.round(np.nanmean(coverage), 2)) + ret.append(np.round(np.nanstd(coverage), 2)) + ret.append(np.round(np.nanmean(times), 4)) + ret.append(np.round(np.nanstd(times), 4)) + ret.append(np.round(np.nanmean(q05), 4)) + ret.append(np.round(np.nanstd(q05), 4)) + ret.append(np.round(np.nanmean(q25), 4)) + ret.append(np.round(np.nanstd(q25), 4)) + ret.append(np.round(np.nanmean(q75), 4)) + ret.append(np.round(np.nanstd(q75), 4)) + ret.append(np.round(np.nanmean(q95), 4)) + ret.append(np.round(np.nanstd(q95), 4)) + return ret
+ + + + +
[docs]def unified_scaled_interval(experiments, tam, save=False, file=None, + sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'], + sort_ascend=[True, False, True, True],save_best=False, + ignore=None, replace=None): + fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam) + + axes[0].set_title('Sharpness') + axes[1].set_title('Resolution') + axes[2].set_title('Coverage') + + models = {} + + for experiment in experiments: + + mdl = {} + + dat_syn = pd.read_csv(experiment[0], sep=";", usecols=interval_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(experiment[1], sep=";", usecols=interval_dataframe_analytic_columns(experiment[2])) + + sharpness = [] + resolution = [] + coverage = [] + times = [] + + data_columns = analytical_data_columns(experiment[2]) + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + + if b not in models: + models[b] = {} + models[b]['sharpness'] = [] + models[b]['resolution'] = [] + models[b]['coverage'] = [] + models[b]['times'] = [] + + if b not in mdl: + mdl[b] = {} + mdl[b]['sharpness'] = [] + mdl[b]['resolution'] = [] + mdl[b]['coverage'] = [] + mdl[b]['times'] = [] + + best = bests[b] + print(best) + tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + tmpl = extract_measure(tmp, 'Sharpness', data_columns) + mdl[b]['sharpness'].extend(tmpl) + sharpness.extend(tmpl) + tmpl = extract_measure(tmp, 'Resolution', data_columns) + mdl[b]['resolution'].extend(tmpl) + resolution.extend(tmpl) + tmpl = extract_measure(tmp, 'Coverage', data_columns) + mdl[b]['coverage'].extend(tmpl) + coverage.extend(tmpl) + tmpl = extract_measure(tmp, 'TIME', data_columns) + mdl[b]['times'].extend(tmpl) + times.extend(tmpl) + + models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace) + + sharpness_param = scale_params(sharpness) + resolution_param = scale_params(resolution) + coverage_param = scale_params(coverage) + times_param = scale_params(times) + + for key in sorted(models.keys()): + models[key]['sharpness'].extend(scale(mdl[key]['sharpness'], sharpness_param)) + models[key]['resolution'].extend(scale(mdl[key]['resolution'], resolution_param)) + models[key]['coverage'].extend(scale(mdl[key]['coverage'], coverage_param)) + models[key]['times'].extend(scale(mdl[key]['times'], times_param)) + + sharpness = [] + resolution = [] + coverage = [] + times = [] + labels = [] + for key in sorted(models.keys()): + sharpness.append(models[key]['sharpness']) + resolution.append(models[key]['resolution']) + coverage.append(models[key]['coverage']) + times.append(models[key]['times']) + labels.append(models[key]['label']) + + axes[0].boxplot(sharpness, labels=labels, autorange=True, showmeans=True) + axes[1].boxplot(resolution, labels=labels, autorange=True, showmeans=True) + axes[2].boxplot(coverage, labels=labels, autorange=True, showmeans=True) + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + + +
[docs]def plot_dataframe_interval(file_synthetic, file_analytic, experiments, tam, save=False, file=None, + sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'], + sort_ascend=[True, False, True, True],save_best=False, + ignore=None, replace=None): + + fig, axes = plt.subplots(nrows=3, ncols=1, figsize=tam) + + axes[0].set_title('Sharpness') + axes[1].set_title('Resolution') + axes[2].set_title('Coverage') + + dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=interval_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(file_analytic, sep=";", usecols=interval_dataframe_analytic_columns(experiments)) + + data_columns = analytical_data_columns(experiments) + + if save_best: + dat = pd.DataFrame.from_dict(bests, orient='index') + dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False) + + sharpness = [] + resolution = [] + coverage = [] + times = [] + labels = [] + bounds_shp = [] + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + best = bests[b] + df = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + sharpness.append( extract_measure(df,'Sharpness',data_columns) ) + resolution.append(extract_measure(df, 'Resolution', data_columns)) + coverage.append(extract_measure(df, 'Coverage', data_columns)) + times.append(extract_measure(df, 'TIME', data_columns)) + labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]), replace)) + + axes[0].boxplot(sharpness, labels=labels, autorange=True, showmeans=True) + axes[0].set_title("Sharpness") + axes[1].boxplot(resolution, labels=labels, autorange=True, showmeans=True) + axes[1].set_title("Resolution") + axes[2].boxplot(coverage, labels=labels, autorange=True, showmeans=True) + axes[2].set_title("Coverage") + axes[2].set_ylim([0, 1.1]) + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + + +
[docs]def unified_scaled_interval_pinball(experiments, tam, save=False, file=None, + sort_columns=['COVAVG','SHARPAVG','COVSTD','SHARPSTD'], + sort_ascend=[True, False, True, True], save_best=False, + ignore=None, replace=None): + fig, axes = plt.subplots(nrows=1, ncols=4, figsize=tam) + axes[0].set_title(r'$\tau=0.05$') + axes[1].set_title(r'$\tau=0.25$') + axes[2].set_title(r'$\tau=0.75$') + axes[3].set_title(r'$\tau=0.95$') + models = {} + + for experiment in experiments: + + mdl = {} + + dat_syn = pd.read_csv(experiment[0], sep=";", usecols=interval_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(experiment[1], sep=";", usecols=interval_dataframe_analytic_columns(experiment[2])) + + q05 = [] + q25 = [] + q75 = [] + q95 = [] + + data_columns = analytical_data_columns(experiment[2]) + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + + if b not in models: + models[b] = {} + models[b]['q05'] = [] + models[b]['q25'] = [] + models[b]['q75'] = [] + models[b]['q95'] = [] + + if b not in mdl: + mdl[b] = {} + mdl[b]['q05'] = [] + mdl[b]['q25'] = [] + mdl[b]['q75'] = [] + mdl[b]['q95'] = [] + + best = bests[b] + print(best) + tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + tmpl = extract_measure(tmp, 'Q05', data_columns) + mdl[b]['q05'].extend(tmpl) + q05.extend(tmpl) + tmpl = extract_measure(tmp, 'Q25', data_columns) + mdl[b]['q25'].extend(tmpl) + q25.extend(tmpl) + tmpl = extract_measure(tmp, 'Q75', data_columns) + mdl[b]['q75'].extend(tmpl) + q75.extend(tmpl) + tmpl = extract_measure(tmp, 'Q95', data_columns) + mdl[b]['q95'].extend(tmpl) + q95.extend(tmpl) + + models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace) + + q05_param = scale_params(q05) + q25_param = scale_params(q25) + q75_param = scale_params(q75) + q95_param = scale_params(q95) + + for key in sorted(models.keys()): + models[key]['q05'].extend(scale(mdl[key]['q05'], q05_param)) + models[key]['q25'].extend(scale(mdl[key]['q25'], q25_param)) + models[key]['q75'].extend(scale(mdl[key]['q75'], q75_param)) + models[key]['q95'].extend(scale(mdl[key]['q95'], q95_param)) + + q05 = [] + q25 = [] + q75 = [] + q95 = [] + labels = [] + for key in sorted(models.keys()): + q05.append(models[key]['q05']) + q25.append(models[key]['q25']) + q75.append(models[key]['q75']) + q95.append(models[key]['q95']) + labels.append(models[key]['label']) + + axes[0].boxplot(q05, labels=labels, vert=False, autorange=True, showmeans=True) + axes[1].boxplot(q25, labels=labels, vert=False, autorange=True, showmeans=True) + axes[2].boxplot(q75, labels=labels, vert=False, autorange=True, showmeans=True) + axes[3].boxplot(q95, labels=labels, vert=False, autorange=True, showmeans=True) + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + + +
[docs]def plot_dataframe_interval_pinball(file_synthetic, file_analytic, experiments, tam, save=False, file=None, + sort_columns=['COVAVG','SHARPAVG','COVSTD','SHARPSTD'], + sort_ascend=[True, False, True, True], save_best=False, + ignore=None, replace=None): + + fig, axes = plt.subplots(nrows=1, ncols=4, figsize=tam) + axes[0].set_title(r'$\tau=0.05$') + axes[1].set_title(r'$\tau=0.25$') + axes[2].set_title(r'$\tau=0.75$') + axes[3].set_title(r'$\tau=0.95$') + + dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=interval_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(file_analytic, sep=";", usecols=interval_dataframe_analytic_columns(experiments)) + + data_columns = analytical_data_columns(experiments) + + if save_best: + dat = pd.DataFrame.from_dict(bests, orient='index') + dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False) + + q05 = [] + q25 = [] + q75 = [] + q95 = [] + labels = [] + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + best = bests[b] + df = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + q05.append(extract_measure(df, 'Q05', data_columns)) + q25.append(extract_measure(df, 'Q25', data_columns)) + q75.append(extract_measure(df, 'Q75', data_columns)) + q95.append(extract_measure(df, 'Q95', data_columns)) + labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]), replace)) + + axes[0].boxplot(q05, labels=labels, vert=False, autorange=True, showmeans=True) + axes[1].boxplot(q25, labels=labels, vert=False, autorange=True, showmeans=True) + axes[2].boxplot(q75, labels=labels, vert=False, autorange=True, showmeans=True) + axes[3].boxplot(q95, labels=labels, vert=False, autorange=True, showmeans=True) + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + +
[docs]def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, synthetic, steps, method): + """ + Save benchmark results for m-step ahead probabilistic forecasters + :param experiments: + :param file: + :param objs: + :param crps_interval: + :param crps_distr: + :param times: + :param times2: + :param save: + :param synthetic: + :return: + """ + ret = [] + + if synthetic: + + for k in sorted(objs.keys()): + try: + ret = [] + for k in sorted(objs.keys()): + try: + mod = [] + mfts = objs[k] + mod.append(mfts.shortname) + mod.append(mfts.order) + if not mfts.benchmark_only: + mod.append(mfts.partitioner.name) + mod.append(mfts.partitioner.partitions) + mod.append(len(mfts)) + else: + mod.append('-') + mod.append('-') + mod.append('-') + mod.append(steps[k]) + mod.append(method[k]) + mod.append(np.round(np.nanmean(crps[k]), 2)) + mod.append(np.round(np.nanstd(crps[k]), 2)) + mod.append(np.round(np.nanmean(times[k]), 4)) + mod.append(np.round(np.nanstd(times[k]), 4)) + ret.append(mod) + except Exception as e: + print('Erro: %s' % e) + except Exception as ex: + print("Erro ao salvar ", k) + print("Exceção ", ex) + + columns = probabilistic_dataframe_synthetic_columns() + else: + for k in sorted(objs.keys()): + try: + mfts = objs[k] + n = mfts.shortname + o = mfts.order + if not mfts.benchmark_only: + s = mfts.partitioner.name + p = mfts.partitioner.partitions + l = len(mfts) + else: + s = '-' + p = '-' + l = '-' + st = steps[k] + mt = method[k] + tmp = [n, o, s, p, l, st, mt, 'CRPS'] + tmp.extend(crps[k]) + ret.append(deepcopy(tmp)) + tmp = [n, o, s, p, l, st, mt, 'TIME'] + tmp.extend(times[k]) + ret.append(deepcopy(tmp)) + except Exception as ex: + print("Erro ao salvar ", k) + print("Exceção ", ex) + columns = probabilistic_dataframe_analytic_columns(experiments) + dat = pd.DataFrame(ret, columns=columns) + if save: dat.to_csv(Util.uniquefilename(file), sep=";") + return dat
+ + +
[docs]def probabilistic_dataframe_analytic_columns(experiments): + columns = [str(k) for k in np.arange(0, experiments)] + columns.insert(0, "Model") + columns.insert(1, "Order") + columns.insert(2, "Scheme") + columns.insert(3, "Partitions") + columns.insert(4, "Size") + columns.insert(5, "Steps") + columns.insert(6, "Method") + columns.insert(7, "Measure") + return columns
+ + +
[docs]def probabilistic_dataframe_synthetic_columns(): + columns = ["Model", "Order", "Scheme", "Partitions","Size", "Steps", "Method", "CRPSAVG", "CRPSSTD", + "TIMEAVG", "TIMESTD"] + return columns
+ + +
[docs]def cast_dataframe_to_synthetic_probabilistic(df, data_columns): + crps1 = extract_measure(df, 'CRPS', data_columns) + times1 = extract_measure(df, 'TIME', data_columns) + ret = [] + ret.append(np.round(np.nanmean(crps1), 2)) + ret.append(np.round(np.nanstd(crps1), 2)) + ret.append(np.round(np.nanmean(times1), 2)) + ret.append(np.round(np.nanstd(times1), 2)) + return ret
+ + +
[docs]def unified_scaled_probabilistic(experiments, tam, save=False, file=None, + sort_columns=['CRPSAVG', 'CRPSSTD'], + sort_ascend=[True, True], save_best=False, + ignore=None, replace=None): + fig, axes = plt.subplots(nrows=1, ncols=1, figsize=tam) + + axes.set_title('CRPS') + #axes[1].set_title('CRPS Distribution Ahead') + + models = {} + + for experiment in experiments: + + print(experiment) + + mdl = {} + + dat_syn = pd.read_csv(experiment[0], sep=";", usecols=probabilistic_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(experiment[1], sep=";", usecols=probabilistic_dataframe_analytic_columns(experiment[2])) + + crps1 = [] + crps2 = [] + + data_columns = analytical_data_columns(experiment[2]) + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + + if b not in models: + models[b] = {} + models[b]['crps1'] = [] + models[b]['crps2'] = [] + + if b not in mdl: + mdl[b] = {} + mdl[b]['crps1'] = [] + mdl[b]['crps2'] = [] + + best = bests[b] + + print(best) + + tmp = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + tmpl = extract_measure(tmp, 'CRPS_Interval', data_columns) + mdl[b]['crps1'].extend(tmpl) + crps1.extend(tmpl) + tmpl = extract_measure(tmp, 'CRPS_Distribution', data_columns) + mdl[b]['crps2'].extend(tmpl) + crps2.extend(tmpl) + + models[b]['label'] = check_replace_list(best["Model"] + " " + str(best["Order"]), replace) + + crps1_param = scale_params(crps1) + crps2_param = scale_params(crps2) + + for key in sorted(mdl.keys()): + print(key) + models[key]['crps1'].extend(scale(mdl[key]['crps1'], crps1_param)) + models[key]['crps2'].extend(scale(mdl[key]['crps2'], crps2_param)) + + crps1 = [] + crps2 = [] + labels = [] + for key in sorted(models.keys()): + crps1.append(models[key]['crps1']) + crps2.append(models[key]['crps2']) + labels.append(models[key]['label']) + + axes[0].boxplot(crps1, labels=labels, autorange=True, showmeans=True) + axes[1].boxplot(crps2, labels=labels, autorange=True, showmeans=True) + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + + +
[docs]def plot_dataframe_probabilistic(file_synthetic, file_analytic, experiments, tam, save=False, file=None, + sort_columns=['CRPS1AVG', 'CRPS2AVG', 'CRPS1STD', 'CRPS2STD'], + sort_ascend=[True, True, True, True], save_best=False, + ignore=None, replace=None): + + fig, axes = plt.subplots(nrows=2, ncols=1, figsize=tam) + + axes[0].set_title('CRPS') + axes[1].set_title('CRPS') + + dat_syn = pd.read_csv(file_synthetic, sep=";", usecols=probabilistic_dataframe_synthetic_columns()) + + bests = find_best(dat_syn, sort_columns, sort_ascend) + + dat_ana = pd.read_csv(file_analytic, sep=";", usecols=probabilistic_dataframe_analytic_columns(experiments)) + + data_columns = analytical_data_columns(experiments) + + if save_best: + dat = pd.DataFrame.from_dict(bests, orient='index') + dat.to_csv(Util.uniquefilename(file_synthetic.replace("synthetic","best")), sep=";", index=False) + + crps1 = [] + crps2 = [] + labels = [] + + for b in sorted(bests.keys()): + if check_ignore_list(b, ignore): + continue + best = bests[b] + df = dat_ana[(dat_ana.Model == best["Model"]) & (dat_ana.Order == best["Order"]) + & (dat_ana.Scheme == best["Scheme"]) & (dat_ana.Partitions == best["Partitions"])] + crps1.append( extract_measure(df,'CRPS_Interval',data_columns) ) + crps2.append(extract_measure(df, 'CRPS_Distribution', data_columns)) + labels.append(check_replace_list(best["Model"] + " " + str(best["Order"]), replace)) + + axes[0].boxplot(crps1, labels=labels, autorange=True, showmeans=True) + axes[1].boxplot(crps2, labels=labels, autorange=True, showmeans=True) + + plt.tight_layout() + Util.show_and_save_image(fig, file, save)
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/arima.html b/docs/_build/html/_modules/pyFTS/benchmarks/arima.html new file mode 100644 index 0000000..ea192cd --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/arima.html @@ -0,0 +1,287 @@ + + + + + + + + pyFTS.benchmarks.arima — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.arima

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+import numpy as np
+import pandas as pd
+from statsmodels.tsa.arima_model import ARIMA as stats_arima
+import scipy.stats as st
+from pyFTS.common import SortedCollection, fts
+from pyFTS.probabilistic import ProbabilityDistribution
+
+
+
[docs]class ARIMA(fts.FTS): + """ + Façade for statsmodels.tsa.arima_model + """ + def __init__(self, **kwargs): + super(ARIMA, self).__init__(**kwargs) + self.name = "ARIMA" + self.detail = "Auto Regressive Integrated Moving Average" + self.is_high_order = True + self.has_point_forecasting = True + self.has_interval_forecasting = True + self.has_probability_forecasting = True + self.model = None + self.model_fit = None + self.trained_data = None + self.p = 1 + self.d = 0 + self.q = 0 + self.benchmark_only = True + self.min_order = 1 + self.alpha = kwargs.get("alpha", 0.05) + self.order = kwargs.get("order", (1,0,0)) + self._decompose_order(self.order) + + def _decompose_order(self, order): + if isinstance(order, (tuple, set, list)): + self.p = order[0] + self.d = order[1] + self.q = order[2] + self.order = self.p + self.q + (self.q - 1 if self.q > 0 else 0) + self.max_lag = self.order + self.d = len(self.transformations) + self.shortname = "ARIMA(" + str(self.p) + "," + str(self.d) + "," + str(self.q) + ") - " + str(self.alpha) + +
[docs] def train(self, data, **kwargs): + + if 'order' in kwargs: + order = kwargs.pop('order') + self._decompose_order(order) + + if self.indexer is not None: + data = self.indexer.get_data(data) + + try: + self.model = stats_arima(data, order=(self.p, self.d, self.q)) + self.model_fit = self.model.fit(disp=0) + except Exception as ex: + print(ex) + self.model_fit = None
+ +
[docs] def ar(self, data): + return data.dot(self.model_fit.arparams)
+ +
[docs] def ma(self, data): + return data.dot(self.model_fit.maparams)
+ +
[docs] def forecast(self, ndata, **kwargs): + if self.model_fit is None: + return np.nan + + ndata = np.array(ndata) + + l = len(ndata) + + ret = [] + + ar = np.array([self.ar(ndata[k - self.p: k]) for k in np.arange(self.p, l+1)]) #+1 to forecast one step ahead given all available lags + + if self.q > 0: + residuals = ndata[self.p-1:] - ar + + ma = np.array([self.ma(residuals[k - self.q: k]) for k in np.arange(self.q, len(residuals) + 1)]) + + ret = ar[self.q - 1:] + ma + ret = ret[self.q:] + else: + ret = ar + + #ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]]) nforecasts = np.array(forecasts) + + return ret
+ +
[docs] def forecast_interval(self, data, **kwargs): + + if self.model_fit is None: + return np.nan + + sigma = np.sqrt(self.model_fit.sigma2) + + l = len(data) + + ret = [] + + for k in np.arange(self.order, l+1): + tmp = [] + + sample = [data[i] for i in np.arange(k - self.order, k)] + + mean = self.forecast(sample) + + if isinstance(mean,(list, np.ndarray)): + mean = mean[0] + + tmp.append(mean + st.norm.ppf(self.alpha) * sigma) + tmp.append(mean + st.norm.ppf(1 - self.alpha) * sigma) + + ret.append(tmp) + + return ret
+ +
[docs] def forecast_ahead_interval(self, ndata, steps, **kwargs): + if self.model_fit is None: + return np.nan + + smoothing = kwargs.get("smoothing",0.5) + + sigma = np.sqrt(self.model_fit.sigma2) + + l = len(ndata) + + nmeans = self.forecast_ahead(ndata, steps, **kwargs) + + ret = [] + + for k in np.arange(0, steps): + tmp = [] + + hsigma = (1 + k*smoothing)*sigma + + tmp.append(nmeans[k] + st.norm.ppf(self.alpha) * hsigma) + tmp.append(nmeans[k] + st.norm.ppf(1 - self.alpha) * hsigma) + + ret.append(tmp) + + return ret
+ +
[docs] def forecast_distribution(self, data, **kwargs): + + sigma = np.sqrt(self.model_fit.sigma2) + + l = len(data) + + ret = [] + + for k in np.arange(self.order, l + 1): + sample = [data[i] for i in np.arange(k - self.order, k)] + + mean = self.forecast(sample) + + if isinstance(mean, (list, np.ndarray)): + mean = mean[0] + + dist = ProbabilityDistribution.ProbabilityDistribution(type="histogram", uod=[self.original_min, self.original_max]) + intervals = [] + for alpha in np.arange(0.05, 0.5, 0.05): + + qt1 = mean + st.norm.ppf(alpha) * sigma + qt2 = mean + st.norm.ppf(1 - alpha) * sigma + + intervals.append([qt1, qt2]) + + dist.append_interval(intervals) + + ret.append(dist) + + return ret
+ + +
[docs] def forecast_ahead_distribution(self, data, steps, **kwargs): + smoothing = kwargs.get("smoothing", 0.5) + + sigma = np.sqrt(self.model_fit.sigma2) + + l = len(data) + + ret = [] + + nmeans = self.forecast_ahead(data, steps, **kwargs) + + for k in np.arange(0, steps): + dist = ProbabilityDistribution.ProbabilityDistribution(type="histogram", + uod=[self.original_min, self.original_max]) + intervals = [] + for alpha in np.arange(0.05, 0.5, 0.05): + tmp = [] + + hsigma = (1 + k * smoothing) * sigma + + tmp.append(nmeans[k] + st.norm.ppf(alpha) * hsigma) + tmp.append(nmeans[k] + st.norm.ppf(1 - alpha) * hsigma) + + intervals.append(tmp) + + dist.append_interval(intervals) + + ret.append(dist) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/benchmarks.html b/docs/_build/html/_modules/pyFTS/benchmarks/benchmarks.html new file mode 100644 index 0000000..d586890 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/benchmarks.html @@ -0,0 +1,1234 @@ + + + + + + + + pyFTS.benchmarks.benchmarks — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.benchmarks

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+"""Benchmarks methods for FTS methods"""
+
+
+import datetime
+import time
+from copy import deepcopy
+import traceback
+
+import matplotlib as plt
+import matplotlib.cm as cmx
+import matplotlib.colors as pltcolors
+import matplotlib.pyplot as plt
+import numpy as np
+from mpl_toolkits.mplot3d import Axes3D
+
+from pyFTS.probabilistic import ProbabilityDistribution
+from pyFTS.common import Transformations
+from pyFTS.models import song, chen, yu, ismailefendi, sadaei, hofts, pwfts, ifts, cheng, hwang
+from pyFTS.models.ensemble import ensemble
+from pyFTS.benchmarks import Measures, naive, arima, ResidualAnalysis, quantreg, knn
+from pyFTS.benchmarks import Util as bUtil
+from pyFTS.common import Util as cUtil
+# from sklearn.cross_validation import KFold
+from pyFTS.partitioners import Grid
+from matplotlib import rc
+
+#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
+## for Palatino and other serif fonts use:
+#rc('font',**{'family':'serif','serif':['Palatino']})
+#rc('text', usetex=True)
+
+colors = ['grey', 'darkgrey', 'rosybrown', 'maroon', 'red','orange', 'gold', 'yellow', 'olive', 'green',
+          'darkgreen', 'cyan', 'lightblue','blue', 'darkblue', 'purple', 'darkviolet' ]
+
+ncol = len(colors)
+
+styles = ['-','--','-.',':','.']
+
+nsty = len(styles)
+
+
+def __pop(key, default, kwargs):
+    if key in kwargs:
+        return kwargs.pop(key)
+    else:
+        return default
+
+
+
[docs]def get_benchmark_point_methods(): + """Return all non FTS methods for point forecasting""" + return [naive.Naive, arima.ARIMA, quantreg.QuantileRegression]
+ + +
[docs]def get_point_methods(): + """Return all FTS methods for point forecasting""" + return [song.ConventionalFTS, chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS, + cheng.TrendWeightedFTS, sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, hwang.HighOrderFTS, + pwfts.ProbabilisticWeightedFTS]
+ + +
[docs]def get_benchmark_interval_methods(): + """Return all non FTS methods for point_to_interval forecasting""" + return [ arima.ARIMA, quantreg.QuantileRegression]
+ + +
[docs]def get_interval_methods(): + """Return all FTS methods for point_to_interval forecasting""" + return [ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS]
+ + +
[docs]def get_probabilistic_methods(): + """Return all FTS methods for probabilistic forecasting""" + return [ensemble.AllMethodEnsembleFTS, pwfts.ProbabilisticWeightedFTS]
+ + +
[docs]def get_benchmark_probabilistic_methods(): + """Return all FTS methods for probabilistic forecasting""" + return [arima.ARIMA, quantreg.QuantileRegression, knn.KNearestNeighbors]
+ + +
[docs]def sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs): + """ + Sliding window benchmarks for FTS forecasters. + + For each data window, a train and test datasets will be splitted. For each train split, number of + partitions and partitioning method will be created a partitioner model. And for each partitioner, order, + steps ahead and FTS method a foreasting model will be trained. + + Then all trained models are benchmarked on the test data and the metrics are stored on a sqlite3 database + (identified by the 'file' parameter) for posterior analysis. + + All these process can be distributed on a dispy cluster, setting the atributed 'distributed' to true and + informing the list of dispy nodes on 'nodes' parameter. + + The number of experiments is determined by 'windowsize' and 'inc' parameters. + + :param data: test data + :param windowsize: size of sliding window + :param train: percentual of sliding window data used to train the models + :param kwargs: dict, optional arguments + + :keyword benchmark_methods: a list with Non FTS models to benchmark. The default is None. + :keyword benchmark_methods_parameters: a list with Non FTS models parameters. The default is None. + :keyword benchmark_models: A boolean value indicating if external FTS methods will be used on benchmark. The default is False. + :keyword build_methods: A boolean value indicating if the default FTS methods will be used on benchmark. The default is True. + :keyword dataset: the dataset name to identify the current set of benchmarks results on database. + :keyword distributed: A boolean value indicating if the forecasting procedure will be distributed in a dispy cluster. . The default is False + :keyword file: file path to save the results. The default is benchmarks.db. + :keyword inc: a float on interval [0,1] indicating the percentage of the windowsize to move the window + :keyword methods: a list with FTS class names. The default depends on the forecasting type and contains the list of all FTS methods. + :keyword models: a list with prebuilt FTS objects. The default is None. + :keyword nodes: a list with the dispy cluster nodes addresses. The default is [127.0.0.1]. + :keyword orders: a list with orders of the models (for high order models). The default is [1,2,3]. + :keyword partitions: a list with the numbers of partitions on the Universe of Discourse. The default is [10]. + :keyword partitioners_models: a list with prebuilt Universe of Discourse partitioners objects. The default is None. + :keyword partitioners_methods: a list with Universe of Discourse partitioners class names. The default is [partitioners.Grid.GridPartitioner]. + :keyword progress: If true a progress bar will be displayed during the benchmarks. The default is False. + :keyword start: in the multi step forecasting, the index of the data where to start forecasting. The default is 0. + :keyword steps_ahead: a list with the forecasting horizons, i. e., the number of steps ahead to forecast. The default is 1. + :keyword tag: a name to identify the current set of benchmarks results on database. + :keyword type: the forecasting type, one of these values: point(default), interval or distribution. The default is point. + :keyword transformations: a list with data transformations do apply . The default is [None]. + """ + + tag = __pop('tag', None, kwargs) + dataset = __pop('dataset', None, kwargs) + + distributed = __pop('distributed', False, kwargs) + + transformations = kwargs.get('transformations', [None]) + progress = kwargs.get('progress', None) + type = kwargs.get("type", 'point') + + orders = __pop("orders", [1,2,3], kwargs) + + partitioners_models = __pop("partitioners_models", None, kwargs) + partitioners_methods = __pop("partitioners_methods", [Grid.GridPartitioner], kwargs) + partitions = __pop("partitions", [10], kwargs) + + steps_ahead = __pop('steps_ahead', [1], kwargs) + + methods = __pop('methods', None, kwargs) + + models = __pop('models', None, kwargs) + + pool = [] if models is None else models + + if methods is None: + if type == 'point': + methods = get_point_methods() + elif type == 'interval': + methods = get_interval_methods() + elif type == 'distribution': + methods = get_probabilistic_methods() + + build_methods = __pop("build_methods", True, kwargs) + + if build_methods: + for method in methods: + mfts = method() + + if mfts.is_high_order: + for order in orders: + if order >= mfts.min_order: + mfts = method() + mfts.order = order + pool.append(mfts) + else: + mfts.order = 1 + pool.append(mfts) + + benchmark_models = __pop("benchmark_models", False, kwargs) + + if benchmark_models != False: + + benchmark_methods = __pop("benchmark_methods", None, kwargs) + benchmark_methods_parameters = __pop("benchmark_methods_parameters", None, kwargs) + + benchmark_pool = [] if ( benchmark_models is None or not isinstance(benchmark_models, list)) \ + else benchmark_models + + if benchmark_models is None and benchmark_methods is None: + if type == 'point'or type == 'partition': + benchmark_methods = get_benchmark_point_methods() + elif type == 'interval': + benchmark_methods = get_benchmark_interval_methods() + elif type == 'distribution': + benchmark_methods = get_benchmark_probabilistic_methods() + + if benchmark_methods is not None: + for transformation in transformations: + for count, model in enumerate(benchmark_methods, start=0): + par = benchmark_methods_parameters[count] + mfts = model(**par) + mfts.append_transformation(transformation) + benchmark_pool.append(mfts) + + if type == 'point': + experiment_method = run_point + synthesis_method = process_point_jobs + elif type == 'interval': + experiment_method = run_interval + synthesis_method = process_interval_jobs + elif type == 'distribution': + experiment_method = run_probabilistic + synthesis_method = process_probabilistic_jobs + else: + raise ValueError("Type parameter has a unkown value!") + + if distributed: + import dispy, dispy.httpd + + nodes = kwargs.get("nodes", ['127.0.0.1']) + cluster, http_server = cUtil.start_dispy_cluster(experiment_method, nodes) + + jobs = [] + + inc = __pop("inc", 0.1, kwargs) + + if progress: + from tqdm import tqdm + _tdata = len(data) / (windowsize * inc) + _tasks = (len(partitioners_models) * len(orders) * len(partitions) * len(transformations) * len(steps_ahead)) + _tbcmk = len(benchmark_pool)*len(steps_ahead) + progressbar = tqdm(total=_tdata*_tasks + _tdata*_tbcmk, desc="Benchmarks:") + + file = kwargs.get('file', "benchmarks.db") + + conn = bUtil.open_benchmark_db(file) + + for ct, train, test in cUtil.sliding_window(data, windowsize, train, inc=inc, **kwargs): + if benchmark_models != False: + for model in benchmark_pool: + for step in steps_ahead: + + kwargs['steps_ahead'] = step + + if not distributed: + if progress: + progressbar.update(1) + try: + job = experiment_method(deepcopy(model), None, train, test, **kwargs) + synthesis_method(dataset, tag, job, conn) + except Exception as ex: + print('EXCEPTION! ', model.shortname, model.order) + traceback.print_exc() + else: + job = cluster.submit(deepcopy(model), None, train, test, **kwargs) + jobs.append(job) + + partitioners_pool = [] + + if partitioners_models is None: + + for transformation in transformations: + + for partition in partitions: + + for partitioner in partitioners_methods: + + data_train_fs = partitioner(data=train, npart=partition, transformation=transformation) + + partitioners_pool.append(data_train_fs) + else: + partitioners_pool = partitioners_models + + for step in steps_ahead: + + for partitioner in partitioners_pool: + + for _id, model in enumerate(pool,start=0): + + kwargs['steps_ahead'] = step + + if not distributed: + if progress: + progressbar.update(1) + try: + job = experiment_method(deepcopy(model), deepcopy(partitioner), train, test, **kwargs) + synthesis_method(dataset, tag, job, conn) + except Exception as ex: + print('EXCEPTION! ',model.shortname, model.order, partitioner.name, + partitioner.partitions, str(partitioner.transformation)) + traceback.print_exc() + else: + job = cluster.submit(deepcopy(model), deepcopy(partitioner), train, test, **kwargs) + job.id = id # associate an ID to identify jobs (if needed later) + jobs.append(job) + + if progress: + progressbar.close() + + if distributed: + + for job in jobs: + if progress: + progressbar.update(1) + job() + if job.status == dispy.DispyJob.Finished and job is not None: + tmp = job.result + synthesis_method(dataset, tag, tmp, conn) + else: + print("status",job.status) + print("result",job.result) + print("stdout",job.stdout) + print("stderr",job.exception) + + cluster.wait() # wait for all jobs to finish + + cUtil.stop_dispy_cluster(cluster, http_server) + + conn.close()
+ + + + +
[docs]def run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwargs): + """ + Point forecast benchmark function to be executed on cluster nodes + + :param mfts: FTS model + :param partitioner: Universe of Discourse partitioner + :param train_data: data used to train the model + :param test_data: ata used to test the model + :param window_key: id of the sliding window + :param transformation: data transformation + :param indexer: seasonal indexer + :return: a dictionary with the benchmark results + """ + import time + from pyFTS.models import yu, chen, hofts, pwfts,ismailefendi,sadaei, song, cheng, hwang + from pyFTS.partitioners import Grid, Entropy, FCM + from pyFTS.benchmarks import Measures, naive, arima, quantreg + from pyFTS.common import Transformations + + tmp = [song.ConventionalFTS, chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS, + cheng.TrendWeightedFTS, sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, hwang.HighOrderFTS, + pwfts.ProbabilisticWeightedFTS] + + tmp2 = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner] + + tmp4 = [naive.Naive, arima.ARIMA, quantreg.QuantileRegression] + + tmp3 = [Measures.get_point_statistics] + + tmp5 = [Transformations.Differential] + + indexer = kwargs.get('indexer', None) + + steps_ahead = kwargs.get('steps_ahead', 1) + method = kwargs.get('method', None) + + if mfts.benchmark_only: + _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + else: + pttr = str(partitioner.__module__).split('.')[-1] + _key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions) + mfts.partitioner = partitioner + mfts.append_transformation(partitioner.transformation) + + _key += str(steps_ahead) + _key += str(method) if method is not None else "" + + _start = time.time() + mfts.fit(train_data, **kwargs) + _end = time.time() + times = _end - _start + + + _start = time.time() + _rmse, _smape, _u = Measures.get_point_statistics(test_data, mfts, **kwargs) + _end = time.time() + times += _end - _start + + ret = {'key': _key, 'obj': mfts, 'rmse': _rmse, 'smape': _smape, 'u': _u, 'time': times, 'window': window_key, + 'steps': steps_ahead, 'method': method} + + return ret
+ + +
[docs]def run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kwargs): + """ + Interval forecast benchmark function to be executed on cluster nodes + + :param mfts: FTS model + :param partitioner: Universe of Discourse partitioner + :param train_data: data used to train the model + :param test_data: ata used to test the model + :param window_key: id of the sliding window + :param transformation: data transformation + :param indexer: seasonal indexer + :return: a dictionary with the benchmark results + """ + import time + from pyFTS.models import hofts,ifts,pwfts + from pyFTS.partitioners import Grid, Entropy, FCM + from pyFTS.benchmarks import Measures, arima, quantreg + + tmp = [hofts.HighOrderFTS, ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS] + + tmp2 = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner] + + tmp4 = [arima.ARIMA, quantreg.QuantileRegression] + + tmp3 = [Measures.get_interval_statistics] + + steps_ahead = kwargs.get('steps_ahead', 1) + method = kwargs.get('method', None) + + if mfts.benchmark_only: + _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha) + else: + pttr = str(partitioner.__module__).split('.')[-1] + _key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions) + mfts.partitioner = partitioner + mfts.append_transformation(partitioner.transformation) + + _key += str(steps_ahead) + _key += str(method) if method is not None else "" + + _start = time.time() + mfts.fit(train_data, **kwargs) + _end = time.time() + times = _end - _start + + _start = time.time() + #_sharp, _res, _cov, _q05, _q25, _q75, _q95, _w05, _w25 + metrics = Measures.get_interval_statistics(test_data, mfts, **kwargs) + _end = time.time() + times += _end - _start + + ret = {'key': _key, 'obj': mfts, 'sharpness': metrics[0], 'resolution': metrics[1], 'coverage': metrics[2], + 'time': times,'Q05': metrics[3], 'Q25': metrics[4], 'Q75': metrics[5], 'Q95': metrics[6], + 'winkler05': metrics[7], 'winkler25': metrics[8], + 'window': window_key,'steps': steps_ahead, 'method': method} + + return ret
+ + +
[docs]def run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None, **kwargs): + """ + Probabilistic forecast benchmark function to be executed on cluster nodes + + :param mfts: FTS model + :param partitioner: Universe of Discourse partitioner + :param train_data: data used to train the model + :param test_data: ata used to test the model + :param steps: + :param resolution: + :param window_key: id of the sliding window + :param transformation: data transformation + :param indexer: seasonal indexer + :return: a dictionary with the benchmark results + """ + import time + import numpy as np + from pyFTS.models import hofts, ifts, pwfts + from pyFTS.models.ensemble import ensemble + from pyFTS.partitioners import Grid, Entropy, FCM + from pyFTS.benchmarks import Measures, arima, quantreg, knn + from pyFTS.models.seasonal import SeasonalIndexer + + tmp = [hofts.HighOrderFTS, ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS, arima.ARIMA, + ensemble.AllMethodEnsembleFTS, knn.KNearestNeighbors] + + tmp2 = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner] + + tmp3 = [Measures.get_distribution_statistics, SeasonalIndexer.SeasonalIndexer, SeasonalIndexer.LinearSeasonalIndexer] + + indexer = kwargs.get('indexer', None) + + steps_ahead = kwargs.get('steps_ahead', 1) + method = kwargs.get('method', None) + + if mfts.benchmark_only: + _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha) + else: + pttr = str(partitioner.__module__).split('.')[-1] + _key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions) + mfts.partitioner = partitioner + mfts.append_transformation(partitioner.transformation) + + _key += str(steps_ahead) + _key += str(method) if method is not None else "" + + if mfts.has_seasonality: + mfts.indexer = indexer + + _start = time.time() + mfts.fit(train_data, **kwargs) + _end = time.time() + times = _end - _start + + _crps1, _t1, _brier = Measures.get_distribution_statistics(test_data, mfts, **kwargs) + _t1 += times + + ret = {'key': _key, 'obj': mfts, 'CRPS': _crps1, 'time': _t1, 'brier': _brier, 'window': window_key, + 'steps': steps_ahead, 'method': method} + + return ret
+ + +
[docs]def process_point_jobs(dataset, tag, job, conn): + + data = bUtil.process_common_data(dataset, tag, 'point',job) + + rmse = deepcopy(data) + rmse.extend(["rmse", job["rmse"]]) + bUtil.insert_benchmark(rmse, conn) + smape = deepcopy(data) + smape.extend(["smape", job["smape"]]) + bUtil.insert_benchmark(smape, conn) + u = deepcopy(data) + u.extend(["u", job["u"]]) + bUtil.insert_benchmark(u, conn) + time = deepcopy(data) + time.extend(["time", job["time"]]) + bUtil.insert_benchmark(time, conn)
+ + +
[docs]def process_interval_jobs(dataset, tag, job, conn): + + data = bUtil.process_common_data(dataset, tag, 'interval', job) + + sharpness = deepcopy(data) + sharpness.extend(["sharpness", job["sharpness"]]) + bUtil.insert_benchmark(sharpness, conn) + resolution = deepcopy(data) + resolution.extend(["resolution", job["resolution"]]) + bUtil.insert_benchmark(resolution, conn) + coverage = deepcopy(data) + coverage.extend(["coverage", job["coverage"]]) + bUtil.insert_benchmark(coverage, conn) + time = deepcopy(data) + time.extend(["time", job["time"]]) + bUtil.insert_benchmark(time, conn) + Q05 = deepcopy(data) + Q05.extend(["Q05", job["Q05"]]) + bUtil.insert_benchmark(Q05, conn) + Q25 = deepcopy(data) + Q25.extend(["Q25", job["Q25"]]) + bUtil.insert_benchmark(Q25, conn) + Q75 = deepcopy(data) + Q75.extend(["Q75", job["Q75"]]) + bUtil.insert_benchmark(Q75, conn) + Q95 = deepcopy(data) + Q95.extend(["Q95", job["Q95"]]) + bUtil.insert_benchmark(Q95, conn) + W05 = deepcopy(data) + W05.extend(["winkler05", job["winkler05"]]) + bUtil.insert_benchmark(W05, conn) + W25 = deepcopy(data) + W25.extend(["winkler25", job["winkler25"]]) + bUtil.insert_benchmark(W25, conn)
+ + +
[docs]def process_probabilistic_jobs(dataset, tag, job, conn): + + data = bUtil.process_common_data(dataset, tag, 'density', job) + + crps = deepcopy(data) + crps.extend(["crps",job["CRPS"]]) + bUtil.insert_benchmark(crps, conn) + time = deepcopy(data) + time.extend(["time", job["time"]]) + bUtil.insert_benchmark(time, conn) + brier = deepcopy(data) + brier.extend(["brier", job["brier"]]) + bUtil.insert_benchmark(brier, conn)
+ + + + + + + + + + + + +
[docs]def plot_compared_intervals_ahead(original, models, colors, distributions, time_from, time_to, intervals = True, + save=False, file=None, tam=[20, 5], resolution=None, + cmap='Blues', linewidth=1.5): + """ + Plot the forecasts of several one step ahead models, by point or by interval + + :param original: Original time series data (list) + :param models: List of models to compare + :param colors: List of models colors + :param distributions: True to plot a distribution + :param time_from: index of data poit to start the ahead forecasting + :param time_to: number of steps ahead to forecast + :param interpol: Fill space between distribution plots + :param save: Save the picture on file + :param file: Filename to save the picture + :param tam: Size of the picture + :param resolution: + :param cmap: Color map to be used on distribution plot + :param option: Distribution type to be passed for models + :return: + """ + fig = plt.figure(figsize=tam) + ax = fig.add_subplot(111) + + cm = plt.get_cmap(cmap) + cNorm = pltcolors.Normalize(vmin=0, vmax=1) + scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm) + + if resolution is None: resolution = (max(original) - min(original)) / 100 + + mi = [] + ma = [] + + for count, fts in enumerate(models, start=0): + if fts.has_probability_forecasting and distributions[count]: + density = fts.forecast_ahead_distribution(original[time_from - fts.order:time_from], time_to, + resolution=resolution) + + #plot_density_scatter(ax, cmap, density, fig, resolution, time_from, time_to) + plot_density_rectange(ax, cm, density, fig, resolution, time_from, time_to) + + if fts.has_interval_forecasting and intervals: + forecasts = fts.forecast_ahead_interval(original[time_from - fts.order:time_from], time_to) + lower = [kk[0] for kk in forecasts] + upper = [kk[1] for kk in forecasts] + mi.append(min(lower)) + ma.append(max(upper)) + for k in np.arange(0, time_from - fts.order): + lower.insert(0, None) + upper.insert(0, None) + ax.plot(lower, color=colors[count], label=fts.shortname, linewidth=linewidth) + ax.plot(upper, color=colors[count], linewidth=linewidth*1.5) + + ax.plot(original, color='black', label="Original", linewidth=linewidth*1.5) + handles0, labels0 = ax.get_legend_handles_labels() + if True in distributions: + lgd = ax.legend(handles0, labels0, loc=2) + else: + lgd = ax.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1)) + _mi = min(mi) + if _mi < 0: + _mi *= 1.1 + else: + _mi *= 0.9 + _ma = max(ma) + if _ma < 0: + _ma *= 0.9 + else: + _ma *= 1.1 + + ax.set_ylim([_mi, _ma]) + ax.set_ylabel('F(T)') + ax.set_xlabel('T') + ax.set_xlim([0, len(original)]) + + cUtil.show_and_save_image(fig, file, save, lgd=lgd)
+ + + +
[docs]def plot_density_rectange(ax, cmap, density, fig, resolution, time_from, time_to): + from matplotlib.patches import Rectangle + from matplotlib.collections import PatchCollection + patches = [] + colors = [] + for x in density.index: + for y in density.columns: + s = Rectangle((time_from + x, y), 1, resolution, fill=True, lw = 0) + patches.append(s) + colors.append(density[y][x]*5) + pc = PatchCollection(patches=patches, match_original=True) + pc.set_clim([0, 1]) + pc.set_cmap(cmap) + pc.set_array(np.array(colors)) + ax.add_collection(pc) + cb = fig.colorbar(pc, ax=ax) + cb.set_label('Density')
+ + +
[docs]def plot_distribution(ax, cmap, probabilitydist, fig, time_from, reference_data=None): + from matplotlib.patches import Rectangle + from matplotlib.collections import PatchCollection + patches = [] + colors = [] + for ct, dt in enumerate(probabilitydist): + disp = 0.0 + if reference_data is not None: + disp = reference_data[time_from+ct] + + for y in dt.bins: + s = Rectangle((time_from+ct, y+disp), 1, dt.resolution, fill=True, lw = 0) + patches.append(s) + colors.append(dt.density(y)) + scale = Transformations.Scale() + colors = scale.apply(colors) + pc = PatchCollection(patches=patches, match_original=True) + pc.set_clim([0, 1]) + pc.set_cmap(cmap) + pc.set_array(np.array(colors)) + ax.add_collection(pc) + cb = fig.colorbar(pc, ax=ax) + cb.set_label('Density')
+ + +
[docs]def plot_interval(axis, intervals, order, label, color='red', typeonlegend=False, ls='-', linewidth=1): + lower = [kk[0] for kk in intervals] + upper = [kk[1] for kk in intervals] + mi = min(lower) * 0.95 + ma = max(upper) * 1.05 + for k in np.arange(0, order): + lower.insert(0, None) + upper.insert(0, None) + if typeonlegend: label += " (Interval)" + axis.plot(lower, color=color, label=label, ls=ls,linewidth=linewidth) + axis.plot(upper, color=color, ls=ls,linewidth=linewidth) + return [mi, ma]
+ + +
[docs]def plot_point(axis, points, order, label, color='red', ls='-', linewidth=1): + mi = min(points) * 0.95 + ma = max(points) * 1.05 + for k in np.arange(0, order): + points.insert(0, None) + axis.plot(points, color=color, label=label, ls=ls,linewidth=linewidth) + return [mi, ma]
+ + + +
[docs]def plot_compared_series(original, models, colors, typeonlegend=False, save=False, file=None, tam=[20, 5], + points=True, intervals=True, linewidth=1.5): + """ + Plot the forecasts of several one step ahead models, by point or by interval + + :param original: Original time series data (list) + :param models: List of models to compare + :param colors: List of models colors + :param typeonlegend: Add the type of forecast (point / interval) on legend + :param save: Save the picture on file + :param file: Filename to save the picture + :param tam: Size of the picture + :param points: True to plot the point forecasts, False otherwise + :param intervals: True to plot the interval forecasts, False otherwise + :param linewidth: + :return: + """ + + fig = plt.figure(figsize=tam) + ax = fig.add_subplot(111) + + mi = [] + ma = [] + + legends = [] + + ax.plot(original, color='black', label="Original", linewidth=linewidth*1.5) + + for count, fts in enumerate(models, start=0): + try: + if fts.has_point_forecasting and points: + forecasts = fts.forecast(original) + if isinstance(forecasts, np.ndarray): + forecasts = forecasts.tolist() + mi.append(min(forecasts) * 0.95) + ma.append(max(forecasts) * 1.05) + for k in np.arange(0, fts.order): + forecasts.insert(0, None) + lbl = fts.shortname + str(fts.order if fts.is_high_order and not fts.benchmark_only else "") + if typeonlegend: lbl += " (Point)" + ax.plot(forecasts, color=colors[count], label=lbl, ls="-",linewidth=linewidth) + + if fts.has_interval_forecasting and intervals: + forecasts = fts.forecast_interval(original) + lbl = fts.shortname + " " + str(fts.order if fts.is_high_order and not fts.benchmark_only else "") + if not points and intervals: + ls = "-" + else: + ls = "--" + tmpmi, tmpma = plot_interval(ax, forecasts, fts.order, label=lbl, typeonlegend=typeonlegend, + color=colors[count], ls=ls, linewidth=linewidth) + mi.append(tmpmi) + ma.append(tmpma) + except ValueError as ex: + print(fts.shortname) + + handles0, labels0 = ax.get_legend_handles_labels() + lgd = ax.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1)) + legends.append(lgd) + + # ax.set_title(fts.name) + ax.set_ylim([min(mi), max(ma)]) + ax.set_ylabel('F(T)') + ax.set_xlabel('T') + ax.set_xlim([0, len(original)])
+ + #Util.show_and_save_image(fig, file, save, lgd=legends) + + +
[docs]def plot_probability_distributions(pmfs, lcolors, tam=[15, 7]): + fig = plt.figure(figsize=tam) + ax = fig.add_subplot(111) + + for k,m in enumerate(pmfs,start=0): + m.plot(ax, color=lcolors[k]) + + handles0, labels0 = ax.get_legend_handles_labels() + ax.legend(handles0, labels0)
+ + + +
[docs]def plotCompared(original, forecasts, labels, title): + fig = plt.figure(figsize=[13, 6]) + ax = fig.add_subplot(111) + ax.plot(original, color='k', label="Original") + for c in range(0, len(forecasts)): + ax.plot(forecasts[c], label=labels[c]) + handles0, labels0 = ax.get_legend_handles_labels() + ax.legend(handles0, labels0) + ax.set_title(title) + ax.set_ylabel('F(T)') + ax.set_xlabel('T') + ax.set_xlim([0, len(original)]) + ax.set_ylim([min(original), max(original)])
+ + +
[docs]def SelecaoSimples_MenorRMSE(original, parameters, modelo): + ret = [] + errors = [] + forecasted_best = [] + print("Série Original") + fig = plt.figure(figsize=[20, 12]) + fig.suptitle("Comparação de modelos ") + ax0 = fig.add_axes([0, 0.5, 0.65, 0.45]) # left, bottom, width, height + ax0.set_xlim([0, len(original)]) + ax0.set_ylim([min(original), max(original)]) + ax0.set_title('Série Temporal') + ax0.set_ylabel('F(T)') + ax0.set_xlabel('T') + ax0.plot(original, label="Original") + min_rmse = 100000.0 + best = None + for p in parameters: + sets = Grid.GridPartitioner(data=original, npart=p).sets + fts = modelo(str(p) + " particoes") + fts.train(original, sets=sets) + # print(original) + forecasted = fts.forecast(original) + forecasted.insert(0, original[0]) + # print(forecasted) + ax0.plot(forecasted, label=fts.name) + error = Measures.rmse(np.array(forecasted), np.array(original)) + print(p, error) + errors.append(error) + if error < min_rmse: + min_rmse = error + best = fts + forecasted_best = forecasted + handles0, labels0 = ax0.get_legend_handles_labels() + ax0.legend(handles0, labels0) + ax1 = fig.add_axes([0.7, 0.5, 0.3, 0.45]) # left, bottom, width, height + ax1.set_title('Comparação dos Erros Quadráticos Médios') + ax1.set_ylabel('RMSE') + ax1.set_xlabel('Quantidade de Partições') + ax1.set_xlim([min(parameters), max(parameters)]) + ax1.plot(parameters, errors) + ret.append(best) + ret.append(forecasted_best) + # Modelo diferencial + print("\nSérie Diferencial") + difffts = Transformations.differential(original) + errors = [] + forecastedd_best = [] + ax2 = fig.add_axes([0, 0, 0.65, 0.45]) # left, bottom, width, height + ax2.set_xlim([0, len(difffts)]) + ax2.set_ylim([min(difffts), max(difffts)]) + ax2.set_title('Série Temporal') + ax2.set_ylabel('F(T)') + ax2.set_xlabel('T') + ax2.plot(difffts, label="Original") + min_rmse = 100000.0 + bestd = None + for p in parameters: + sets = Grid.GridPartitioner(data=difffts, npart=p) + fts = modelo(str(p) + " particoes") + fts.train(difffts, sets=sets) + forecasted = fts.forecast(difffts) + forecasted.insert(0, difffts[0]) + ax2.plot(forecasted, label=fts.name) + error = Measures.rmse(np.array(forecasted), np.array(difffts)) + print(p, error) + errors.append(error) + if error < min_rmse: + min_rmse = error + bestd = fts + forecastedd_best = forecasted + handles0, labels0 = ax2.get_legend_handles_labels() + ax2.legend(handles0, labels0) + ax3 = fig.add_axes([0.7, 0, 0.3, 0.45]) # left, bottom, width, height + ax3.set_title('Comparação dos Erros Quadráticos Médios') + ax3.set_ylabel('RMSE') + ax3.set_xlabel('Quantidade de Partições') + ax3.set_xlim([min(parameters), max(parameters)]) + ax3.plot(parameters, errors) + ret.append(bestd) + ret.append(forecastedd_best) + return ret
+ + +
[docs]def compareModelsPlot(original, models_fo, models_ho): + fig = plt.figure(figsize=[13, 6]) + fig.suptitle("Comparação de modelos ") + ax0 = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height + rows = [] + for model in models_fo: + fts = model["model"] + ax0.plot(model["forecasted"], label=model["name"]) + for model in models_ho: + fts = model["model"] + ax0.plot(model["forecasted"], label=model["name"]) + handles0, labels0 = ax0.get_legend_handles_labels() + ax0.legend(handles0, labels0)
+ + +
[docs]def compareModelsTable(original, models_fo, models_ho): + fig = plt.figure(figsize=[12, 4]) + fig.suptitle("Comparação de modelos ") + columns = ['Modelo', 'Ordem', 'Partições', 'RMSE', 'MAPE (%)'] + rows = [] + for model in models_fo: + fts = model["model"] + error_r = Measures.rmse(model["forecasted"], original) + error_m = round(Measures.mape(model["forecasted"], original) * 100, 2) + rows.append([model["name"], fts.order, len(fts.sets), error_r, error_m]) + for model in models_ho: + fts = model["model"] + error_r = Measures.rmse(model["forecasted"][fts.order:], original[fts.order:]) + error_m = round(Measures.mape(model["forecasted"][fts.order:], original[fts.order:]) * 100, 2) + rows.append([model["name"], fts.order, len(fts.sets), error_r, error_m]) + ax1 = fig.add_axes([0, 0, 1, 1]) # left, bottom, width, height + ax1.set_xticks([]) + ax1.set_yticks([]) + ax1.table(cellText=rows, + colLabels=columns, + cellLoc='center', + bbox=[0, 0, 1, 1]) + sup = "\\begin{tabular}{" + header = "" + body = "" + footer = "" + + for c in columns: + sup = sup + "|c" + if len(header) > 0: + header = header + " & " + header = header + "\\textbf{" + c + "} " + sup = sup + "|} \\hline\n" + header = header + "\\\\ \\hline \n" + + for r in rows: + lin = "" + for c in r: + if len(lin) > 0: + lin = lin + " & " + lin = lin + str(c) + + body = body + lin + "\\\\ \\hline \n" + + return sup + header + body + "\\end{tabular}"
+ + +
[docs]def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=None, tam=[10, 15], + plotforecasts=False, elev=30, azim=144, intervals=False,parameters=None, + partitioner=Grid.GridPartitioner,transformation=None,indexer=None): + _3d = len(orders) > 1 + ret = [] + if _3d: + errors = np.array([[0 for k in range(len(partitions))] for kk in range(len(orders))]) + else: + errors = [] + forecasted_best = [] + fig = plt.figure(figsize=tam) + # fig.suptitle("Comparação de modelos ") + if plotforecasts: + ax0 = fig.add_axes([0, 0.4, 0.9, 0.5]) # left, bottom, width, height + ax0.set_xlim([0, len(train)]) + ax0.set_ylim([min(train) * 0.9, max(train) * 1.1]) + ax0.set_title('Forecasts') + ax0.set_ylabel('F(T)') + ax0.set_xlabel('T') + min_rmse = 1000000.0 + best = None + + for pc, p in enumerate(partitions, start=0): + + sets = partitioner(data=train, npart=p, transformation=transformation).sets + for oc, o in enumerate(orders, start=0): + fts = model("q = " + str(p) + " n = " + str(o)) + fts.append_transformation(transformation) + fts.train(train, sets=sets, order=o, parameters=parameters) + if not intervals: + forecasted = fts.forecast(test) + if not fts.has_seasonality: + error = Measures.rmse(np.array(test[o:]), np.array(forecasted[:-1])) + else: + error = Measures.rmse(np.array(test[o:]), np.array(forecasted)) + for kk in range(o): + forecasted.insert(0, None) + if plotforecasts: ax0.plot(forecasted, label=fts.name) + else: + forecasted = fts.forecast_interval(test) + error = 1.0 - Measures.rmse_interval(np.array(test[o:]), np.array(forecasted[:-1])) + if _3d: + errors[oc, pc] = error + else: + errors.append( error ) + if error < min_rmse: + min_rmse = error + best = fts + forecasted_best = forecasted + + # print(min_rmse) + if plotforecasts: + # handles0, labels0 = ax0.get_legend_handles_labels() + # ax0.legend(handles0, labels0) + ax0.plot(test, label="Original", linewidth=3.0, color="black") + if _3d: ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim) + if _3d and not plotforecasts: + ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim) + ax1.set_title('Error Surface') + ax1.set_ylabel('Model order') + ax1.set_xlabel('Number of partitions') + ax1.set_zlabel('RMSE') + X, Y = np.meshgrid(partitions, orders) + surf = ax1.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True) + else: + ax1 = fig.add_axes([0, 1, 0.9, 0.9]) + ax1.set_title('Error Curve') + ax1.set_xlabel('Number of partitions') + ax1.set_ylabel('RMSE') + ax1.plot(partitions, errors) + ret.append(best) + ret.append(forecasted_best) + ret.append(min_rmse) + + # plt.tight_layout() + + cUtil.show_and_save_image(fig, file, save) + + return ret
+ + + +
[docs]def pftsExploreOrderAndPartitions(data,save=False, file=None): + fig, axes = plt.subplots(nrows=4, ncols=1, figsize=[6, 8]) + data_fs1 = Grid.GridPartitioner(data=data, npart=10).sets + mi = [] + ma = [] + + axes[0].set_title('Point Forecasts by Order') + axes[2].set_title('Interval Forecasts by Order') + + for order in np.arange(1, 6): + fts = pwfts.ProbabilisticWeightedFTS("") + fts.shortname = "n = " + str(order) + fts.train(data, sets=data_fs1.sets, order=order) + point_forecasts = fts.forecast(data) + interval_forecasts = fts.forecast_interval(data) + lower = [kk[0] for kk in interval_forecasts] + upper = [kk[1] for kk in interval_forecasts] + mi.append(min(lower) * 0.95) + ma.append(max(upper) * 1.05) + for k in np.arange(0, order): + point_forecasts.insert(0, None) + lower.insert(0, None) + upper.insert(0, None) + axes[0].plot(point_forecasts, label=fts.shortname) + axes[2].plot(lower, label=fts.shortname) + axes[2].plot(upper) + + axes[1].set_title('Point Forecasts by Number of Partitions') + axes[3].set_title('Interval Forecasts by Number of Partitions') + + for partitions in np.arange(5, 11): + data_fs = Grid.GridPartitioner(data=data, npart=partitions).sets + fts = pwfts.ProbabilisticWeightedFTS("") + fts.shortname = "q = " + str(partitions) + fts.train(data, sets=data_fs.sets, order=1) + point_forecasts = fts.forecast(data) + interval_forecasts = fts.forecast_interval(data) + lower = [kk[0] for kk in interval_forecasts] + upper = [kk[1] for kk in interval_forecasts] + mi.append(min(lower) * 0.95) + ma.append(max(upper) * 1.05) + point_forecasts.insert(0, None) + lower.insert(0, None) + upper.insert(0, None) + axes[1].plot(point_forecasts, label=fts.shortname) + axes[3].plot(lower, label=fts.shortname) + axes[3].plot(upper) + + for ax in axes: + ax.set_ylabel('F(T)') + ax.set_xlabel('T') + ax.plot(data, label="Original", color="black", linewidth=1.5) + handles, labels = ax.get_legend_handles_labels() + ax.legend(handles, labels, loc=2, bbox_to_anchor=(1, 1)) + ax.set_ylim([min(mi), max(ma)]) + ax.set_xlim([0, len(data)]) + + plt.tight_layout() + + cUtil.show_and_save_image(fig, file, save)
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/knn.html b/docs/_build/html/_modules/pyFTS/benchmarks/knn.html new file mode 100644 index 0000000..22cb9af --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/knn.html @@ -0,0 +1,146 @@ + + + + + + + + pyFTS.benchmarks.knn — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.knn

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+import numpy as np
+from statsmodels.tsa.tsatools import lagmat
+from pyFTS.common import fts
+from pyFTS.probabilistic import ProbabilityDistribution
+
+
+
[docs]class KNearestNeighbors(fts.FTS): + """ + K-Nearest Neighbors + """ + def __init__(self, **kwargs): + super(KNearestNeighbors, self).__init__(**kwargs) + self.name = "kNN" + self.shortname = "kNN" + self.detail = "K-Nearest Neighbors" + self.is_high_order = True + self.has_point_forecasting = True + self.has_interval_forecasting = True + self.has_probability_forecasting = True + self.benchmark_only = True + self.min_order = 1 + self.alpha = kwargs.get("alpha", 0.05) + self.lag = None + self.k = kwargs.get("k", 30) + self.uod = None + +
[docs] def train(self, data, **kwargs): + self.data = np.array(data)
+ + +
[docs] def knn(self, sample): + + if self.order == 1: + dist = np.apply_along_axis(lambda x: (x - sample) ** 2, 0, self.data) + ix = np.argsort(dist) + 1 + else: + dist = [] + for k in np.arange(self.order, len(self.data)): + dist.append(sum([ (self.data[k - kk] - sample[kk])**2 for kk in range(self.order)])) + ix = np.argsort(np.array(dist)) + self.order + 1 + + ix2 = np.clip(ix[:self.k], 0, len(self.data)-1) + return self.data[ix2]
+ +
[docs] def forecast_distribution(self, data, **kwargs): + ret = [] + + smooth = kwargs.get("smooth", "KDE") + alpha = kwargs.get("alpha", None) + + uod = self.get_UoD() + + for k in np.arange(self.order, len(data)): + + sample = data[k-self.order : k] + + forecasts = self.knn(sample) + + dist = ProbabilityDistribution.ProbabilityDistribution(smooth, uod=uod, data=forecasts, + name="", **kwargs) + ret.append(dist) + + return ret
+ + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/naive.html b/docs/_build/html/_modules/pyFTS/benchmarks/naive.html new file mode 100644 index 0000000..56d858e --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/naive.html @@ -0,0 +1,96 @@ + + + + + + + + pyFTS.benchmarks.naive — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.naive

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+from pyFTS.common import fts
+
+
+
[docs]class Naive(fts.FTS): + """Naïve Forecasting method""" + def __init__(self, **kwargs): + super(Naive, self).__init__(order=1, name="Naive",**kwargs) + self.name = "Naïve Model" + self.detail = "Naïve Model" + self.benchmark_only = True + self.is_high_order = False + +
[docs] def forecast(self, data, **kwargs): + return data
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/parallel_benchmarks.html b/docs/_build/html/_modules/pyFTS/benchmarks/parallel_benchmarks.html new file mode 100644 index 0000000..2dd72f2 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/parallel_benchmarks.html @@ -0,0 +1,476 @@ + + + + + + + + pyFTS.benchmarks.parallel_benchmarks — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.parallel_benchmarks

+"""
+joblib Parallelized Benchmarks to FTS methods
+"""
+
+import datetime
+import multiprocessing
+import time
+from copy import deepcopy
+
+import numpy as np
+from joblib import Parallel, delayed
+
+from pyFTS.benchmarks import benchmarks, Util as bUtil
+from pyFTS.common import Util
+from pyFTS.partitioners import Grid
+
+
+
[docs]def run_point(mfts, partitioner, train_data, test_data, transformation=None, indexer=None): + """ + Point forecast benchmark function to be executed on threads + :param mfts: FTS model + :param partitioner: Universe of Discourse partitioner + :param train_data: data used to train the model + :param test_data: ata used to test the model + :param window_key: id of the sliding window + :param transformation: data transformation + :param indexer: seasonal indexer + :return: a dictionary with the benchmark results + """ + pttr = str(partitioner.__module__).split('.')[-1] + _key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions) + mfts.partitioner = partitioner + if transformation is not None: + mfts.append_transformation(transformation) + + try: + _start = time.time() + mfts.train(train_data, partitioner.sets, order=mfts.order) + _end = time.time() + times = _end - _start + + _start = time.time() + _rmse, _smape, _u = benchmarks.get_point_statistics(test_data, mfts, indexer) + _end = time.time() + times += _end - _start + except Exception as e: + print(e) + _rmse = np.nan + _smape = np.nan + _u = np.nan + times = np.nan + + ret = {'key': _key, 'obj': mfts, 'rmse': _rmse, 'smape': _smape, 'u': _u, 'time': times} + + print(ret) + + return ret
+ + +
[docs]def point_sliding_window(data, windowsize, train=0.8, models=None, partitioners=[Grid.GridPartitioner], + partitions=[10], max_order=3, transformation=None, indexer=None, dump=False, + save=False, file=None, sintetic=False): + """ + Parallel sliding window benchmarks for FTS point forecasters + :param data: + :param windowsize: size of sliding window + :param train: percentual of sliding window data used to train the models + :param models: FTS point forecasters + :param partitioners: Universe of Discourse partitioner + :param partitions: the max number of partitions on the Universe of Discourse + :param max_order: the max order of the models (for high order models) + :param transformation: data transformation + :param indexer: seasonal indexer + :param dump: + :param save: save results + :param file: file path to save the results + :param sintetic: if true only the average and standard deviation of the results + :return: DataFrame with the results + """ + _process_start = time.time() + + print("Process Start: {0: %H:%M:%S}".format(datetime.datetime.now())) + + num_cores = multiprocessing.cpu_count() + + pool = [] + + objs = {} + rmse = {} + smape = {} + u = {} + times = {} + + for model in benchmarks.get_point_methods(): + mfts = model("") + + if mfts.is_high_order: + for order in np.arange(1, max_order + 1): + if order >= mfts.min_order: + mfts = model("") + mfts.order = order + pool.append(mfts) + else: + pool.append(mfts) + + experiments = 0 + for ct, train, test in Util.sliding_window(data, windowsize, train): + experiments += 1 + + if dump: print('\nWindow: {0}\n'.format(ct)) + + for partition in partitions: + + for partitioner in partitioners: + + data_train_fs = partitioner(train, partition, transformation=transformation) + + results = Parallel(n_jobs=num_cores)( + delayed(run_point)(deepcopy(m), deepcopy(data_train_fs), deepcopy(train), deepcopy(test), + transformation) + for m in pool) + + for tmp in results: + if tmp['key'] not in objs: + objs[tmp['key']] = tmp['obj'] + rmse[tmp['key']] = [] + smape[tmp['key']] = [] + u[tmp['key']] = [] + times[tmp['key']] = [] + rmse[tmp['key']].append_rhs(tmp['rmse']) + smape[tmp['key']].append_rhs(tmp['smape']) + u[tmp['key']].append_rhs(tmp['u']) + times[tmp['key']].append_rhs(tmp['time']) + + _process_end = time.time() + + print("Process End: {0: %H:%M:%S}".format(datetime.datetime.now())) + + print("Process Duration: {0}".format(_process_end - _process_start)) + + return Util.save_dataframe_point(experiments, file, objs, rmse, save, sintetic, smape, times, u)
+ + +
[docs]def run_interval(mfts, partitioner, train_data, test_data, transformation=None, indexer=None): + """ + Interval forecast benchmark function to be executed on threads + :param mfts: FTS model + :param partitioner: Universe of Discourse partitioner + :param train_data: data used to train the model + :param test_data: ata used to test the model + :param window_key: id of the sliding window + :param transformation: data transformation + :param indexer: seasonal indexer + :return: a dictionary with the benchmark results + """ + pttr = str(partitioner.__module__).split('.')[-1] + _key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions) + mfts.partitioner = partitioner + if transformation is not None: + mfts.append_transformation(transformation) + + try: + _start = time.time() + mfts.train(train_data, partitioner.sets, order=mfts.order) + _end = time.time() + times = _end - _start + + _start = time.time() + _sharp, _res, _cov = benchmarks.get_interval_statistics(test_data, mfts) + _end = time.time() + times += _end - _start + except Exception as e: + print(e) + _sharp = np.nan + _res = np.nan + _cov = np.nan + times = np.nan + + ret = {'key': _key, 'obj': mfts, 'sharpness': _sharp, 'resolution': _res, 'coverage': _cov, 'time': times} + + print(ret) + + return ret
+ + +
[docs]def interval_sliding_window(data, windowsize, train=0.8, models=None, partitioners=[Grid.GridPartitioner], + partitions=[10], max_order=3, transformation=None, indexer=None, dump=False, + save=False, file=None, sintetic=False): + """ + Parallel sliding window benchmarks for FTS point_to_interval forecasters + :param data: + :param windowsize: size of sliding window + :param train: percentual of sliding window data used to train the models + :param models: FTS point forecasters + :param partitioners: Universe of Discourse partitioner + :param partitions: the max number of partitions on the Universe of Discourse + :param max_order: the max order of the models (for high order models) + :param transformation: data transformation + :param indexer: seasonal indexer + :param dump: + :param save: save results + :param file: file path to save the results + :param sintetic: if true only the average and standard deviation of the results + :return: DataFrame with the results + """ + _process_start = time.time() + + print("Process Start: {0: %H:%M:%S}".format(datetime.datetime.now())) + + num_cores = multiprocessing.cpu_count() + + pool = [] + + objs = {} + sharpness = {} + resolution = {} + coverage = {} + times = {} + + for model in benchmarks.get_interval_methods(): + mfts = model("") + + if mfts.is_high_order: + for order in np.arange(1, max_order + 1): + if order >= mfts.min_order: + mfts = model("") + mfts.order = order + pool.append(mfts) + else: + pool.append(mfts) + + experiments = 0 + for ct, train, test in Util.sliding_window(data, windowsize, train): + experiments += 1 + + if dump: print('\nWindow: {0}\n'.format(ct)) + + for partition in partitions: + + for partitioner in partitioners: + + data_train_fs = partitioner(train, partition, transformation=transformation) + + results = Parallel(n_jobs=num_cores)( + delayed(run_interval)(deepcopy(m), deepcopy(data_train_fs), deepcopy(train), deepcopy(test), + transformation) + for m in pool) + + for tmp in results: + if tmp['key'] not in objs: + objs[tmp['key']] = tmp['obj'] + sharpness[tmp['key']] = [] + resolution[tmp['key']] = [] + coverage[tmp['key']] = [] + times[tmp['key']] = [] + + sharpness[tmp['key']].append_rhs(tmp['sharpness']) + resolution[tmp['key']].append_rhs(tmp['resolution']) + coverage[tmp['key']].append_rhs(tmp['coverage']) + times[tmp['key']].append_rhs(tmp['time']) + + _process_end = time.time() + + print("Process End: {0: %H:%M:%S}".format(datetime.datetime.now())) + + print("Process Duration: {0}".format(_process_end - _process_start)) + + return Util.save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, sintetic, times)
+ + +
[docs]def run_ahead(mfts, partitioner, train_data, test_data, steps, resolution, transformation=None, indexer=None): + """ + Probabilistic m-step ahead forecast benchmark function to be executed on threads + :param mfts: FTS model + :param partitioner: Universe of Discourse partitioner + :param train_data: data used to train the model + :param test_data: ata used to test the model + :param steps: + :param resolution: + :param transformation: data transformation + :param indexer: seasonal indexer + :return: a dictionary with the benchmark results + """ + pttr = str(partitioner.__module__).split('.')[-1] + _key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions) + mfts.partitioner = partitioner + if transformation is not None: + mfts.append_transformation(transformation) + + try: + _start = time.time() + mfts.train(train_data, partitioner.sets, order=mfts.order) + _end = time.time() + times = _end - _start + + _crps1, _crps2, _t1, _t2 = benchmarks.get_distribution_statistics(test_data, mfts, steps=steps, + resolution=resolution) + _t1 += times + _t2 += times + except Exception as e: + print(e) + _crps1 = np.nan + _crps2 = np.nan + _t1 = np.nan + _t2 = np.nan + + ret = {'key': _key, 'obj': mfts, 'CRPS_Interval': _crps1, 'CRPS_Distribution': _crps2, 'TIME_Interval': _t1, 'TIME_Distribution': _t2} + + print(ret) + + return ret
+ + +
[docs]def ahead_sliding_window(data, windowsize, train, steps,resolution, models=None, partitioners=[Grid.GridPartitioner], + partitions=[10], max_order=3, transformation=None, indexer=None, dump=False, + save=False, file=None, sintetic=False): + """ + Parallel sliding window benchmarks for FTS probabilistic forecasters + :param data: + :param windowsize: size of sliding window + :param train: percentual of sliding window data used to train the models + :param steps: + :param resolution: + :param models: FTS point forecasters + :param partitioners: Universe of Discourse partitioner + :param partitions: the max number of partitions on the Universe of Discourse + :param max_order: the max order of the models (for high order models) + :param transformation: data transformation + :param indexer: seasonal indexer + :param dump: + :param save: save results + :param file: file path to save the results + :param sintetic: if true only the average and standard deviation of the results + :return: DataFrame with the results + """ + _process_start = time.time() + + print("Process Start: {0: %H:%M:%S}".format(datetime.datetime.now())) + + num_cores = multiprocessing.cpu_count() + + pool = [] + + objs = {} + crps_interval = {} + crps_distr = {} + times1 = {} + times2 = {} + + for model in benchmarks.get_interval_methods(): + mfts = model("") + + if mfts.is_high_order: + for order in np.arange(1, max_order + 1): + if order >= mfts.min_order: + mfts = model("") + mfts.order = order + pool.append(mfts) + else: + pool.append(mfts) + + experiments = 0 + for ct, train, test in Util.sliding_window(data, windowsize, train): + experiments += 1 + + if dump: print('\nWindow: {0}\n'.format(ct)) + + for partition in partitions: + + for partitioner in partitioners: + + data_train_fs = partitioner(train, partition, transformation=transformation) + + results = Parallel(n_jobs=num_cores)( + delayed(run_ahead)(deepcopy(m), deepcopy(data_train_fs), deepcopy(train), deepcopy(test), + steps, resolution, transformation) + for m in pool) + + for tmp in results: + if tmp['key'] not in objs: + objs[tmp['key']] = tmp['obj'] + crps_interval[tmp['key']] = [] + crps_distr[tmp['key']] = [] + times1[tmp['key']] = [] + times2[tmp['key']] = [] + + crps_interval[tmp['key']].append_rhs(tmp['CRPS_Interval']) + crps_distr[tmp['key']].append_rhs(tmp['CRPS_Distribution']) + times1[tmp['key']].append_rhs(tmp['TIME_Interval']) + times2[tmp['key']].append_rhs(tmp['TIME_Distribution']) + + _process_end = time.time() + + print("Process End: {0: %H:%M:%S}".format(datetime.datetime.now())) + + print("Process Duration: {0}".format(_process_end - _process_start)) + + return Util.save_dataframe_ahead(experiments, file, objs, crps_interval, crps_distr, times1, times2, save, sintetic)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/benchmarks/quantreg.html b/docs/_build/html/_modules/pyFTS/benchmarks/quantreg.html new file mode 100644 index 0000000..d085f92 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/benchmarks/quantreg.html @@ -0,0 +1,231 @@ + + + + + + + + pyFTS.benchmarks.quantreg — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.benchmarks.quantreg

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+import numpy as np
+import pandas as pd
+from statsmodels.regression.quantile_regression import QuantReg
+from statsmodels.tsa.tsatools import lagmat
+from pyFTS.common import SortedCollection, fts
+from pyFTS.probabilistic import ProbabilityDistribution
+
+
+
[docs]class QuantileRegression(fts.FTS): + """Façade for statsmodels.regression.quantile_regression""" + def __init__(self, **kwargs): + super(QuantileRegression, self).__init__(**kwargs) + self.name = "QR" + self.detail = "Quantile Regression" + self.is_high_order = True + self.has_point_forecasting = True + self.has_interval_forecasting = True + self.has_probability_forecasting = True + self.benchmark_only = True + self.min_order = 1 + self.alpha = kwargs.get("alpha", 0.05) + self.dist = kwargs.get("dist", False) + self.upper_qt = None + self.mean_qt = None + self.lower_qt = None + self.dist_qt = None + +
[docs] def train(self, data, **kwargs): + if self.indexer is not None and isinstance(data, pd.DataFrame): + data = self.indexer.get_data(data) + + lagdata, ndata = lagmat(data, maxlag=self.order, trim="both", original='sep') + + mqt = QuantReg(ndata, lagdata).fit(0.5) + if self.alpha is not None: + uqt = QuantReg(ndata, lagdata).fit(1 - self.alpha) + lqt = QuantReg(ndata, lagdata).fit(self.alpha) + + self.mean_qt = [k for k in mqt.params] + if self.alpha is not None: + self.upper_qt = [k for k in uqt.params] + self.lower_qt = [k for k in lqt.params] + + if self.dist: + self.dist_qt = [] + for alpha in np.arange(0.05,0.5,0.05): + lqt = QuantReg(ndata, lagdata).fit(alpha) + uqt = QuantReg(ndata, lagdata).fit(1 - alpha) + lo_qt = [k for k in lqt.params] + up_qt = [k for k in uqt.params] + self.dist_qt.append([lo_qt, up_qt]) + + self.shortname = "QAR(" + str(self.order) + ") - " + str(self.alpha)
+ +
[docs] def linearmodel(self,data,params): + #return params[0] + sum([ data[k] * params[k+1] for k in np.arange(0, self.order) ]) + return sum([data[k] * params[k] for k in np.arange(0, self.order)])
+ +
[docs] def point_to_interval(self, data, lo_params, up_params): + lo = self.linearmodel(data, lo_params) + up = self.linearmodel(data, up_params) + return [lo, up]
+ +
[docs] def interval_to_interval(self, data, lo_params, up_params): + lo = self.linearmodel([k[0] for k in data], lo_params) + up = self.linearmodel([k[1] for k in data], up_params) + return [lo, up]
+ +
[docs] def forecast(self, ndata, **kwargs): + + l = len(ndata) + + ret = [] + + for k in np.arange(self.order, l+1): #+1 to forecast one step ahead given all available lags + sample = ndata[k - self.order : k] + + ret.append(self.linearmodel(sample, self.mean_qt)) + + return ret
+ +
[docs] def forecast_interval(self, ndata, **kwargs): + + l = len(ndata) + + ret = [] + + for k in np.arange(self.order , l): + sample = ndata[k - self.order: k] + ret.append(self.point_to_interval(sample, self.lower_qt, self.upper_qt)) + + return ret
+ +
[docs] def forecast_ahead_interval(self, ndata, steps, **kwargs): + + smoothing = kwargs.get("smoothing", 0.9) + + l = len(ndata) + + ret = [] + + nmeans = self.forecast_ahead(ndata, steps, **kwargs) + + for k in np.arange(0, self.order): + nmeans.insert(k,ndata[-(k+1)]) + + for k in np.arange(self.order, steps+self.order): + intl = self.point_to_interval(nmeans[k - self.order: k], self.lower_qt, self.upper_qt) + + ret.append([intl[0]*(1 + k*smoothing), intl[1]*(1 + k*smoothing)]) + + return ret[-steps:]
+ +
[docs] def forecast_distribution(self, ndata, **kwargs): + + ret = [] + + l = len(ndata) + + for k in np.arange(self.order, l + 1): + dist = ProbabilityDistribution.ProbabilityDistribution(type="histogram", + uod=[self.original_min, self.original_max]) + intervals = [] + for qt in self.dist_qt: + sample = ndata[k - self.order: k] + intl = self.point_to_interval(sample, qt[0], qt[1]) + intervals.append(intl) + + dist.append_interval(intervals) + + ret.append(dist) + + return ret
+ +
[docs] def forecast_ahead_distribution(self, ndata, steps, **kwargs): + + ret = [] + + for k in np.arange(self.order, steps + self.order): + dist = ProbabilityDistribution.ProbabilityDistribution(type="histogram", + uod=[self.original_min, self.original_max]) + intervals = [[k, k] for k in ndata[-self.order:]] + for qt in self.dist_qt: + intl = self.interval_to_interval([intervals[x] for x in np.arange(k - self.order, k)], qt[0], qt[1]) + intervals.append(intl) + dist.append_interval(intervals) + + ret.append(dist) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/Composite.html b/docs/_build/html/_modules/pyFTS/common/Composite.html new file mode 100644 index 0000000..971c7ed --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/Composite.html @@ -0,0 +1,137 @@ + + + + + + + + pyFTS.common.Composite — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.Composite

+"""
+Composite Fuzzy Sets
+"""
+
+import numpy as np
+from pyFTS import *
+from pyFTS.common import Membership, FuzzySet
+
+
+
[docs]class FuzzySet(FuzzySet.FuzzySet): + """ + Composite Fuzzy Set + """ + def __init__(self, name, superset=False): + """ + Create an empty composite fuzzy set + :param name: fuzzy set name + """ + super(FuzzySet, self).__init__(name, None, None, None, type='composite') + self.superset = superset + if self.superset: + self.sets = [] + else: + self.mf = [] + self.parameters = [] + + +
[docs] def membership(self, x): + """ + Calculate the membership value of a given input + + :param x: input value + :return: membership value of x at this fuzzy set + """ + if self.superset: + return max([s.membership(x) for s in self.sets]) + else: + return min([self.mf[ct](x, self.parameters[ct]) for ct in np.arange(0, len(self.mf))])
+ +
[docs] def append(self, mf, parameters): + """ + Adds a new function to composition + + :param mf: + :param parameters: + :return: + """ + self.mf.append(mf) + self.parameters.append(parameters)
+ +
[docs] def append_set(self, set): + """ + Adds a new function to composition + + :param mf: + :param parameters: + :return: + """ + self.sets.append(set)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/FLR.html b/docs/_build/html/_modules/pyFTS/common/FLR.html new file mode 100644 index 0000000..c129dd0 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/FLR.html @@ -0,0 +1,196 @@ + + + + + + + + pyFTS.common.FLR — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.FLR

+"""
+This module implements functions for Fuzzy Logical Relationship generation
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet
+
+
+
[docs]class FLR(object): + """ + Fuzzy Logical Relationship + + Represents a temporal transition of the fuzzy set LHS on time t for the fuzzy set RHS on time t+1. + """ + def __init__(self, LHS, RHS): + """ + Creates a Fuzzy Logical Relationship + """ + self.LHS = LHS + """Left Hand Side fuzzy set""" + self.RHS = RHS + """Right Hand Side fuzzy set""" + + def __str__(self): + return str(self.LHS) + " -> " + str(self.RHS)
+ + +
[docs]class IndexedFLR(FLR): + """Season Indexed Fuzzy Logical Relationship""" + def __init__(self, index, LHS, RHS): + """ + Create a Season Indexed Fuzzy Logical Relationship + """ + super(IndexedFLR, self).__init__(LHS, RHS) + self.index = index + """seasonal index""" + + def __str__(self): + return str(self.index) + ": "+ str(self.LHS) + " -> " + str(self.RHS)
+ + +
[docs]def generate_high_order_recurrent_flr(fuzzyData): + """ + Create a ordered FLR set from a list of fuzzy sets with recurrence + + :param fuzzyData: ordered list of fuzzy sets + :return: ordered list of FLR + """ + flrs = [] + for i in np.arange(1,len(fuzzyData)): + lhs = fuzzyData[i - 1] + rhs = fuzzyData[i] + if isinstance(lhs, list) and isinstance(rhs, list): + for l in lhs: + for r in rhs: + tmp = FLR(l, r) + flrs.append(tmp) + else: + tmp = FLR(lhs,rhs) + flrs.append(tmp) + return flrs
+ + +
[docs]def generate_recurrent_flrs(fuzzyData): + """ + Create a ordered FLR set from a list of fuzzy sets with recurrence + + :param fuzzyData: ordered list of fuzzy sets + :return: ordered list of FLR + """ + flrs = [] + for i in np.arange(1,len(fuzzyData)): + lhs = [fuzzyData[i - 1]] + rhs = [fuzzyData[i]] + for l in np.array(lhs).flatten(): + for r in np.array(rhs).flatten(): + tmp = FLR(l, r) + flrs.append(tmp) + return flrs
+ + +
[docs]def generate_non_recurrent_flrs(fuzzyData): + """ + Create a ordered FLR set from a list of fuzzy sets without recurrence + + :param fuzzyData: ordered list of fuzzy sets + :return: ordered list of FLR + """ + flrs = generate_recurrent_flrs(fuzzyData) + tmp = {} + for flr in flrs: tmp[str(flr)] = flr + ret = [value for key, value in tmp.items()] + return ret
+ + +
[docs]def generate_indexed_flrs(sets, indexer, data, transformation=None, alpha_cut=0.0): + """ + Create a season-indexed ordered FLR set from a list of fuzzy sets with recurrence + + :param sets: fuzzy sets + :param indexer: seasonality indexer + :param data: original data + :return: ordered list of FLR + """ + flrs = [] + index = indexer.get_season_of_data(data) + ndata = indexer.get_data(data) + if transformation is not None: + ndata = transformation.apply(ndata) + for k in np.arange(1,len(ndata)): + lhs = FuzzySet.fuzzyfy_series([ndata[k - 1]], sets, method='fuzzy',alpha_cut=alpha_cut) + rhs = FuzzySet.fuzzyfy_series([ndata[k]], sets, method='fuzzy',alpha_cut=alpha_cut) + season = index[k] + for _l in np.array(lhs).flatten(): + for _r in np.array(rhs).flatten(): + flr = IndexedFLR(season,_l,_r) + flrs.append(flr) + return flrs
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/FuzzySet.html b/docs/_build/html/_modules/pyFTS/common/FuzzySet.html new file mode 100644 index 0000000..26d3814 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/FuzzySet.html @@ -0,0 +1,274 @@ + + + + + + + + pyFTS.common.FuzzySet — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.FuzzySet

+import numpy as np
+from pyFTS import *
+from pyFTS.common import Membership
+
+
+
[docs]class FuzzySet(object): + """ + Fuzzy Set + """ + def __init__(self, name, mf, parameters, centroid, alpha=1.0, **kwargs): + """ + Create a Fuzzy Set + """ + self.name = name + """The fuzzy set name""" + self.mf = mf + """The membership function""" + self.parameters = parameters + """The parameters of the membership function""" + self.centroid = centroid + """The fuzzy set center of mass (or midpoint)""" + self.alpha = alpha + """The alpha cut value""" + self.type = kwargs.get('type', 'common') + """The fuzzy set type (common, composite, nonstationary, etc)""" + self.variable = kwargs.get('variable',None) + """In multivariate time series, indicate for which variable this fuzzy set belogs""" + self.Z = None + """Partition function in respect to the membership function""" + if self.mf == Membership.trimf: + self.lower = min(parameters) + self.upper = max(parameters) + elif self.mf == Membership.gaussmf: + self.lower = parameters[0] - parameters[1]*3 + self.upper = parameters[0] + parameters[1]*3 + self.metadata = {} + +
[docs] def membership(self, x): + """ + Calculate the membership value of a given input + + :param x: input value + :return: membership value of x at this fuzzy set + """ + return self.mf(x, self.parameters) * self.alpha
+ +
[docs] def partition_function(self,uod=None, nbins=100): + """ + Calculate the partition function over the membership function. + + :param uod: + :param nbins: + :return: + """ + if self.Z is None and uod is not None: + self.Z = 0.0 + for k in np.linspace(uod[0], uod[1], nbins): + self.Z += self.membership(k) + + return self.Z
+ + def __str__(self): + return self.name + ": " + str(self.mf.__name__) + "(" + str(self.parameters) + ")"
+ + +
[docs]def set_ordered(fuzzySets): + """Order a fuzzy set list by their centroids""" + if len(fuzzySets) > 0: + tmp1 = [fuzzySets[k] for k in fuzzySets.keys()] + return [k.name for k in sorted(tmp1, key=lambda x: x.centroid)]
+ + +
[docs]def fuzzyfy_instance(inst, fuzzySets, ordered_sets=None): + """ + Calculate the membership values for a data point given fuzzy sets + + :param inst: data point + :param fuzzySets: dict of fuzzy sets + :return: array of membership values + """ + + if ordered_sets is None: + ordered_sets = set_ordered(fuzzySets) + + mv = [] + for key in ordered_sets: + mv.append( fuzzySets[key].membership(inst)) + return np.array(mv)
+ + +
[docs]def fuzzyfy_instances(data, fuzzySets, ordered_sets=None): + """ + Calculate the membership values for a data point given fuzzy sets + + :param inst: data point + :param fuzzySets: dict of fuzzy sets + :return: array of membership values + """ + ret = [] + if ordered_sets is None: + ordered_sets = set_ordered(fuzzySets) + for inst in data: + mv = np.array([fuzzySets[key].membership(inst) for key in ordered_sets]) + ret.append(mv) + return ret
+ + +
[docs]def get_fuzzysets(inst, fuzzySets, ordered_sets=None, alpha_cut=0.0): + """ + Return the fuzzy sets which membership value for a inst is greater than the alpha_cut + + :param inst: data point + :param fuzzySets: dict of fuzzy sets + :param alpha_cut: Minimal membership to be considered on fuzzyfication process + :return: array of membership values + """ + + if ordered_sets is None: + ordered_sets = set_ordered(fuzzySets) + + fs = [key for key in ordered_sets if fuzzySets[key].membership(inst) > alpha_cut] + return fs
+ +
[docs]def get_maximum_membership_fuzzyset(inst, fuzzySets, ordered_sets=None): + """ + Fuzzify a data point, returning the fuzzy set with maximum membership value + + :param inst: data point + :param fuzzySets: dict of fuzzy sets + :return: fuzzy set with maximum membership + """ + if ordered_sets is None: + ordered_sets = set_ordered(fuzzySets) + mv = np.array([fuzzySets[key].membership(inst) for key in ordered_sets]) + key = ordered_sets[np.argwhere(mv == max(mv))[0, 0]] + return fuzzySets[key]
+ + +
[docs]def get_maximum_membership_fuzzyset_index(inst, fuzzySets): + """ + Fuzzify a data point, returning the fuzzy set with maximum membership value + + :param inst: data point + :param fuzzySets: dict of fuzzy sets + :return: fuzzy set with maximum membership + """ + mv = fuzzyfy_instance(inst, fuzzySets) + return np.argwhere(mv == max(mv))[0, 0]
+ + +
[docs]def fuzzyfy_series_old(data, fuzzySets, method='maximum'): + fts = [] + for item in data: + fts.append(get_maximum_membership_fuzzyset(item, fuzzySets).name) + return fts
+ + +
[docs]def fuzzyfy_series(data, fuzzySets, method='maximum', alpha_cut=0.0): + fts = [] + ordered_sets = set_ordered(fuzzySets) + for t, i in enumerate(data): + mv = np.array([fuzzySets[key].membership(i) for key in ordered_sets]) + if len(mv) == 0: + sets = check_bounds(i, fuzzySets.items(), ordered_sets) + else: + if method == 'fuzzy': + ix = np.ravel(np.argwhere(mv > alpha_cut)) + sets = [fuzzySets[ordered_sets[i]].name for i in ix] + elif method == 'maximum': + mx = max(mv) + ix = np.ravel(np.argwhere(mv == mx)) + sets = fuzzySets[ordered_sets[ix[0]]].name + fts.append(sets) + return fts
+ + +
[docs]def grant_bounds(data, sets, ordered_sets): + if data < sets[ordered_sets[0]].lower: + return sets[ordered_sets[0]].lower + elif data > sets[ordered_sets[-1]].upper: + return sets[ordered_sets[-1]].upper + else: + return data
+ +
[docs]def check_bounds(data, sets, ordered_sets): + if data < sets[ordered_sets[0]].lower: + return sets[ordered_sets[0]] + elif data > sets[ordered_sets[-1]].upper: + return sets[ordered_sets[-1]]
+ + +
[docs]def check_bounds_index(data, sets, ordered_sets): + if data < sets[ordered_sets[0]].get_lower(): + return 0 + elif data > sets[ordered_sets[-1]].get_upper(): + return len(sets) -1
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/Membership.html b/docs/_build/html/_modules/pyFTS/common/Membership.html new file mode 100644 index 0000000..3e01b9b --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/Membership.html @@ -0,0 +1,157 @@ + + + + + + + + pyFTS.common.Membership — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.Membership

+"""
+Membership functions for Fuzzy Sets
+"""
+
+import numpy as np
+import math
+from pyFTS import *
+
+
+
[docs]def trimf(x, parameters): + """ + Triangular fuzzy membership function + + :param x: data point + :param parameters: a list with 3 real values + :return: the membership value of x given the parameters + """ + xx = np.round(x, 3) + if xx < parameters[0]: + return 0 + elif parameters[0] <= xx < parameters[1]: + return (x - parameters[0]) / (parameters[1] - parameters[0]) + elif parameters[1] <= xx <= parameters[2]: + return (parameters[2] - xx) / (parameters[2] - parameters[1]) + else: + return 0
+ + +
[docs]def trapmf(x, parameters): + """ + Trapezoidal fuzzy membership function + + :param x: data point + :param parameters: a list with 4 real values + :return: the membership value of x given the parameters + """ + if x < parameters[0]: + return 0 + elif parameters[0] <= x < parameters[1]: + return (x - parameters[0]) / (parameters[1] - parameters[0]) + elif parameters[1] <= x <= parameters[2]: + return 1 + elif parameters[2] <= x <= parameters[3]: + return (parameters[3] - x) / (parameters[3] - parameters[2]) + else: + return 0
+ + +
[docs]def gaussmf(x, parameters): + """ + Gaussian fuzzy membership function + + :param x: data point + :param parameters: a list with 2 real values (mean and variance) + :return: the membership value of x given the parameters + """ + return math.exp((-(x - parameters[0])**2)/(2 * parameters[1]**2))
+ + +
[docs]def bellmf(x, parameters): + """ + Bell shaped membership function + + :param x: + :param parameters: + :return: + """ + return 1 / (1 + abs((x - parameters[2]) / parameters[0]) ** (2 * parameters[1]))
+ + +
[docs]def sigmf(x, parameters): + """ + Sigmoid / Logistic membership function + + :param x: + :param parameters: an list with 2 real values (smoothness and midpoint) + :return: + """ + return 1 / (1 + math.exp(-parameters[0] * (x - parameters[1])))
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/SortedCollection.html b/docs/_build/html/_modules/pyFTS/common/SortedCollection.html new file mode 100644 index 0000000..c2b69a6 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/SortedCollection.html @@ -0,0 +1,305 @@ + + + + + + + + pyFTS.common.SortedCollection — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.SortedCollection

+from bisect import bisect_left, bisect_right
+
+#
+# Original Source Code: https://code.activestate.com/recipes/577197-sortedcollection/
+# Author: RAYMOND HETTINGER
+
+
+
[docs]class SortedCollection(object): + '''Sequence sorted by a key function. + + SortedCollection() is much easier to work with than using bisect() directly. + It supports key functions like those use in sorted(), min(), and max(). + The result of the key function call is saved so that keys can be searched + efficiently. + + Instead of returning an insertion-point which can be hard to interpret, the + five find-methods return a specific item in the sequence. They can scan for + exact matches, the last item less-than-or-equal to a key, or the first item + greater-than-or-equal to a key. + + Once found, an item's ordinal position can be located with the index() method. + New items can be added with the insert() and insert_right() methods. + Old items can be deleted with the remove() method. + + The usual sequence methods are provided to support indexing, slicing, + length lookup, clearing, copying, forward and reverse iteration, contains + checking, item counts, item removal, and a nice looking repr. + + Finding and indexing are O(log n) operations while iteration and insertion + are O(n). The initial sort is O(n log n). + + The key function is stored in the 'key' attibute for easy introspection or + so that you can assign a new key function (triggering an automatic re-sort). + + In short, the class was designed to handle all of the common use cases for + bisect but with a simpler API and support for key functions. + + >>> from pprint import pprint + >>> from operator import itemgetter + + >>> s = SortedCollection(key=itemgetter(2)) + >>> for record in [ + ... ('roger', 'young', 30), + ... ('angela', 'jones', 28), + ... ('bill', 'smith', 22), + ... ('david', 'thomas', 32)]: + ... s.insert(record) + + >>> pprint(list(s)) # show records sorted by age + [('bill', 'smith', 22), + ('angela', 'jones', 28), + ('roger', 'young', 30), + ('david', 'thomas', 32)] + + >>> s.find_le(29) # find oldest person aged 29 or younger + ('angela', 'jones', 28) + >>> s.find_lt(28) # find oldest person under 28 + ('bill', 'smith', 22) + >>> s.find_gt(28) # find youngest person over 28 + ('roger', 'young', 30) + + >>> r = s.find_ge(32) # find youngest person aged 32 or older + >>> s.index(r) # get the index of their record + 3 + >>> s[3] # fetch the record at that index + ('david', 'thomas', 32) + + >>> s.key = itemgetter(0) # now sort by first name + >>> pprint(list(s)) + [('angela', 'jones', 28), + ('bill', 'smith', 22), + ('david', 'thomas', 32), + ('roger', 'young', 30)] + + ''' + + def __init__(self, iterable=(), key=None): + self._given_key = key + key = (lambda x: x) if key is None else key + decorated = sorted((key(item), item) for item in iterable) + self._keys = [k for k, item in decorated] + self._items = [item for k, item in decorated] + self._key = key + + def _getkey(self): + return self._key + + def _setkey(self, key): + if key is not self._key: + self.__init__(self._items, key=key) + + def _delkey(self): + self._setkey(None) + + key = property(_getkey, _setkey, _delkey, 'key function') + +
[docs] def clear(self): + self.__init__([], self._key)
+ +
[docs] def copy(self): + return self.__class__(self, self._key)
+ + def __len__(self): + return len(self._items) + + def __getitem__(self, i): + return self._items[i] + + def __iter__(self): + return iter(self._items) + + def __reversed__(self): + return reversed(self._items) + + def __repr__(self): + return '%s(%r, key=%s)' % ( + self.__class__.__name__, + self._items, + getattr(self._given_key, '__name__', repr(self._given_key)) + ) + + def __reduce__(self): + return self.__class__, (self._items, self._given_key) + + def __contains__(self, item): + k = self._key(item) + i = bisect_left(self._keys, k) + j = bisect_right(self._keys, k) + return item in self._items[i:j] + +
[docs] def index(self, item): + 'Find the position of an item. Raise ValueError if not found.' + k = self._key(item) + i = bisect_left(self._keys, k) + j = bisect_right(self._keys, k) + return self._items[i:j].index(item) + i
+ +
[docs] def count(self, item): + 'Return number of occurrences of item' + k = self._key(item) + i = bisect_left(self._keys, k) + j = bisect_right(self._keys, k) + return self._items[i:j].count(item)
+ +
[docs] def insert(self, item): + 'Insert a new item. If equal keys are found, add to the left' + k = self._key(item) + i = bisect_left(self._keys, k) + self._keys.insert(i, k) + self._items.insert(i, item)
+ +
[docs] def insert_right(self, item): + 'Insert a new item. If equal keys are found, add to the right' + k = self._key(item) + i = bisect_right(self._keys, k) + self._keys.insert(i, k) + self._items.insert(i, item)
+ +
[docs] def remove(self, item): + 'Remove first occurence of item. Raise ValueError if not found' + i = self.index(item) + del self._keys[i] + del self._items[i]
+ +
[docs] def find(self, k): + 'Return first item with a key == k. Raise ValueError if not found.' + i = bisect_left(self._keys, k) + if i != len(self) and self._keys[i] == k: + return self._items[i] + raise ValueError('No item found with key equal to: %r' % (k,))
+ +
[docs] def find_le(self, k): + 'Return last item with a key <= k. Raise ValueError if not found.' + i = bisect_right(self._keys, k) + if i: + return self._items[i-1] + raise ValueError('No item found with key at or below: %r' % (k,))
+ +
[docs] def find_lt(self, k): + 'Return last item with a key < k. Raise ValueError if not found.' + i = bisect_left(self._keys, k) + if i: + return self._items[i-1] + raise ValueError('No item found with key below: %r' % (k,))
+ +
[docs] def find_ge(self, k): + 'Return first item with a key >= equal to k. Raise ValueError if not found' + i = bisect_left(self._keys, k) + if i != len(self): + return self._items[i] + raise ValueError('No item found with key at or above: %r' % (k,))
+ +
[docs] def find_gt(self, k): + 'Return first item with a key > k. Raise ValueError if not found' + i = bisect_right(self._keys, k) + if i != len(self): + return self._items[i] + raise ValueError('No item found with key above: %r' % (k,))
+ +
[docs] def between(self, ge, le): + g = bisect_left(self._keys, ge) + l = bisect_right(self._keys, le) + if g != len(self) and l != len(self): + return self._items[g : l] + raise ValueError('No item found between keys : %r,%r' % (ge,le))
+ +
[docs] def inside(self, ge, le): + l = bisect_right(self._keys, le) + g = bisect_left(self._keys, ge) + if g != len(self) and l != len(self) and g != l: + return self._items[g : l] + elif g != len(self) and l != len(self) and g == l: + return [ self._items[g] ] + elif g != len(self): + return self._items[g-1: l] + elif l != len(self): + return self._items[g: l-1] + else: + return self._items[g - 1: l - 1] + raise ValueError('No item found inside keys: %r,%r' % (ge,le))
+ +
[docs] def around(self, k): + g = bisect_right(self._keys, k) + l = bisect_left(self._keys, k) + if g != len(self) and l != len(self): + return self._items[g : l] + raise ValueError('No item found around key : %r' % (k,))
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/Transformations.html b/docs/_build/html/_modules/pyFTS/common/Transformations.html new file mode 100644 index 0000000..196b132 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/Transformations.html @@ -0,0 +1,313 @@ + + + + + + + + pyFTS.common.Transformations — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.Transformations

+"""
+Common data transformation used on pre and post processing of the FTS
+"""
+
+import numpy as np
+import math
+from pyFTS import *
+
+
+
[docs]class Transformation(object): + """ + Data transformation used on pre and post processing of the FTS + """ + + def __init__(self, **kwargs): + self.is_invertible = True + self.minimal_length = 1 + +
[docs] def apply(self, data, param, **kwargs): + """ + Apply the transformation on input data + + :param data: input data + :param param: + :param kwargs: + :return: numpy array with transformed data + """ + pass
+ +
[docs] def inverse(self,data, param, **kwargs): + """ + + :param data: transformed data + :param param: + :param kwargs: + :return: numpy array with inverse transformed data + """ + pass
+ + def __str__(self): + return self.__class__.__name__ + '(' + str(self.parameters) + ')'
+ + +
[docs]class Differential(Transformation): + """ + Differentiation data transform + """ + def __init__(self, lag): + super(Differential, self).__init__() + self.lag = lag + self.minimal_length = 2 + + @property + def parameters(self): + return self.lag + +
[docs] def apply(self, data, param=None, **kwargs): + if param is not None: + self.lag = param + + if not isinstance(data, (list, np.ndarray, np.generic)): + data = [data] + + if isinstance(data, (np.ndarray, np.generic)): + data = data.tolist() + + n = len(data) + diff = [data[t] - data[t - self.lag] for t in np.arange(self.lag, n)] + for t in np.arange(0, self.lag): diff.insert(0, 0) + return diff
+ +
[docs] def inverse(self, data, param, **kwargs): + + type = kwargs.get("type","point") + steps_ahead = kwargs.get("steps_ahead", 1) + + if isinstance(data, (np.ndarray, np.generic)): + data = data.tolist() + + if not isinstance(data, list): + data = [data] + + n = len(data) + +# print(n) +# print(len(param)) + + if steps_ahead == 1: + if type == "point": + inc = [data[t] + param[t] for t in np.arange(0, n)] + elif type == "interval": + inc = [[data[t][0] + param[t], data[t][1] + param[t]] for t in np.arange(0, n)] + elif type == "distribution": + for t in np.arange(0, n): + data[t].differential_offset(param[t]) + inc = data + else: + if type == "point": + inc = [data[0] + param[0]] + for t in np.arange(1, steps_ahead): + inc.append(data[t] + inc[t-1]) + elif type == "interval": + inc = [[data[0][0] + param[0], data[0][1] + param[0]]] + for t in np.arange(1, steps_ahead): + inc.append([data[t][0] + np.nanmean(inc[t-1]), data[t][1] + np.nanmean(inc[t-1])]) + elif type == "distribution": + data[0].differential_offset(param[0]) + for t in np.arange(1, steps_ahead): + ex = data[t-1].expected_value() + data[t].differential_offset(ex) + inc = data + + if n == 1: + return inc[0] + else: + return inc
+ + +
[docs]class Scale(Transformation): + """ + Scale data inside a interval [min, max] + + + """ + def __init__(self, min=0, max=1): + super(Scale, self).__init__() + self.data_max = None + self.data_min = None + self.transf_max = max + self.transf_min = min + + @property + def parameters(self): + return [self.transf_max, self.transf_min] + +
[docs] def apply(self, data, param=None,**kwargs): + if self.data_max is None: + self.data_max = np.nanmax(data) + self.data_min = np.nanmin(data) + data_range = self.data_max - self.data_min + transf_range = self.transf_max - self.transf_min + if isinstance(data, list): + tmp = [(k + (-1 * self.data_min)) / data_range for k in data] + tmp2 = [ (k * transf_range) + self.transf_min for k in tmp] + else: + tmp = (data + (-1 * self.data_min)) / data_range + tmp2 = (tmp * transf_range) + self.transf_min + + return tmp2
+ +
[docs] def inverse(self, data, param, **kwargs): + data_range = self.data_max - self.data_min + transf_range = self.transf_max - self.transf_min + if isinstance(data, list): + tmp2 = [(k - self.transf_min) / transf_range for k in data] + tmp = [(k * data_range) + self.data_min for k in tmp2] + else: + tmp2 = (data - self.transf_min) / transf_range + tmp = (tmp2 * data_range) + self.data_min + return tmp
+ + +
[docs]class AdaptiveExpectation(Transformation): + """ + Adaptive Expectation post processing + """ + def __init__(self, parameters): + super(AdaptiveExpectation, self).__init__(parameters) + self.h = parameters + + @property + def parameters(self): + return self.parameters + +
[docs] def apply(self, data, param=None,**kwargs): + return data
+ +
[docs] def inverse(self, data, param,**kwargs): + n = len(data) + + inc = [param[t] + self.h*(data[t] - param[t]) for t in np.arange(0, n)] + + if n == 1: + return inc[0] + else: + return inc
+ + +
[docs]class BoxCox(Transformation): + """ + Box-Cox power transformation + """ + def __init__(self, plambda): + super(BoxCox, self).__init__() + self.plambda = plambda + + @property + def parameters(self): + return self.plambda + +
[docs] def apply(self, data, param=None, **kwargs): + if self.plambda != 0: + modified = [(dat ** self.plambda - 1) / self.plambda for dat in data] + else: + modified = [np.log(dat) for dat in data] + return np.array(modified)
+ +
[docs] def inverse(self, data, param=None, **kwargs): + if self.plambda != 0: + modified = [np.exp(np.log(dat * self.plambda + 1) ) / self.plambda for dat in data] + else: + modified = [np.exp(dat) for dat in data] + return np.array(modified)
+ + +
[docs]def Z(original): + mu = np.mean(original) + sigma = np.std(original) + z = [(k - mu)/sigma for k in original] + return z
+ + +# retrieved from Sadaei and Lee (2014) - Multilayer Stock ForecastingModel Using Fuzzy Time Series +
[docs]def roi(original): + n = len(original) + roi = [] + for t in np.arange(0, n-1): + roi.append( (original[t+1] - original[t])/original[t] ) + return roi
+ +
[docs]def smoothing(original, lags): + pass
+ +
[docs]def aggregate(original, operation): + pass
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/Util.html b/docs/_build/html/_modules/pyFTS/common/Util.html new file mode 100644 index 0000000..8df5383 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/Util.html @@ -0,0 +1,413 @@ + + + + + + + + pyFTS.common.Util — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.Util

+"""
+Common facilities for pyFTS
+"""
+
+import time
+import matplotlib.pyplot as plt
+import dill
+import numpy as np
+
+
+
[docs]def plot_rules(model, size=[5, 5], axis=None, rules_by_axis=None, columns=1): + if axis is None and rules_by_axis is None: + rows = 1 + elif axis is None and rules_by_axis is not None: + rows = (((len(model.flrgs.keys())//rules_by_axis)) // columns)+1 + + fig, axis = plt.subplots(nrows=rows, ncols=columns, figsize=size) + + if rules_by_axis is None: + draw_sets_on_axis(axis, model, size) + + _lhs = model.partitioner.ordered_sets if not model.is_high_order else model.flrgs.keys() + + for ct, key in enumerate(_lhs): + + xticks = [] + xtickslabels = [] + + if rules_by_axis is None: + ax = axis + else: + colcount = (ct // rules_by_axis) % columns + rowcount = (ct // rules_by_axis) // columns + + if rows > 1 and columns > 1: + ax = axis[rowcount, colcount] + elif columns > 1: + ax = axis[rowcount] + else: + ax = axis + + if ct % rules_by_axis == 0: + draw_sets_on_axis(ax, model, size) + + if not model.is_high_order: + if key in model.flrgs: + x = (ct % rules_by_axis) + 1 + flrg = model.flrgs[key] + y = model.sets[key].centroid + ax.plot([x],[y],'o') + xticks.append(x) + xtickslabels.append(key) + for rhs in flrg.RHS: + dest = model.sets[rhs].centroid + ax.arrow(x+.1, y, 0.8, dest - y, #length_includes_head=True, + head_width=0.1, head_length=0.1, shape='full', overhang=0, + fc='k', ec='k') + else: + flrg = model.flrgs[key] + x = (ct%rules_by_axis)*model.order + 1 + for ct2, lhs in enumerate(flrg.LHS): + y = model.sets[lhs].centroid + ax.plot([x+ct2], [y], 'o') + xticks.append(x+ct2) + xtickslabels.append(lhs) + for ct2 in range(1, model.order): + fs1 = flrg.LHS[ct2-1] + fs2 = flrg.LHS[ct2] + y = model.sets[fs1].centroid + dest = model.sets[fs2].centroid + ax.plot([x+ct2-1,x+ct2], [y,dest],'-') + + y = model.sets[flrg.LHS[-1]].centroid + for rhs in flrg.RHS: + dest = model.sets[rhs].centroid + ax.arrow(x + model.order -1 + .1, y, 0.8, dest - y, # length_includes_head=True, + head_width=0.1, head_length=0.1, shape='full', overhang=0, + fc='k', ec='k') + + + ax.set_xticks(xticks) + ax.set_xticklabels(xtickslabels) + ax.set_xlim([0,rules_by_axis*model.order+1]) + + plt.tight_layout() + plt.show()
+ + +
[docs]def draw_sets_on_axis(axis, model, size): + if axis is None: + fig, axis = plt.subplots(nrows=1, ncols=1, figsize=size) + for ct, key in enumerate(model.partitioner.ordered_sets): + fs = model.sets[key] + axis.plot([0, 1, 0], fs.parameters, label=fs.name) + axis.axhline(fs.centroid, c="lightgray", alpha=0.5) + axis.set_xlim([0, len(model.partitioner.ordered_sets)]) + axis.set_xticks(range(0, len(model.partitioner.ordered_sets))) + tmp = [''] + tmp.extend(model.partitioner.ordered_sets) + axis.set_xticklabels(tmp) + axis.set_ylim([model.partitioner.min, model.partitioner.max]) + axis.set_yticks([model.sets[k].centroid for k in model.partitioner.ordered_sets]) + axis.set_yticklabels([str(round(model.sets[k].centroid, 1)) + " - " + k + for k in model.partitioner.ordered_sets])
+ + +current_milli_time = lambda: int(round(time.time() * 1000)) + + +
[docs]def uniquefilename(name): + if '.' in name: + tmp = name.split('.') + return tmp[0] + str(current_milli_time()) + '.' + tmp[1] + else: + return name + str(current_milli_time())
+ + + +
[docs]def show_and_save_image(fig, file, flag, lgd=None): + """ + Show and image and save on file + + :param fig: Matplotlib Figure object + :param file: filename to save the picture + :param flag: if True the image will be saved + :param lgd: legend + """ + if flag: + plt.show() + if lgd is not None: + fig.savefig(file, additional_artists=lgd,bbox_inches='tight') #bbox_extra_artists=(lgd,), ) + else: + fig.savefig(file) + plt.close(fig)
+ + +
[docs]def enumerate2(xs, start=0, step=1): + for x in xs: + yield (start, x) + start += step
+ + +
[docs]def sliding_window(data, windowsize, train=0.8, inc=0.1, **kwargs): + """ + Sliding window method of cross validation for time series + + :param data: the entire dataset + :param windowsize: window size + :param train: percentual of the window size will be used for training the models + :param inc: percentual of data used for slide the window + :return: window count, training set, test set + """ + l = len(data) + ttrain = int(round(windowsize * train, 0)) + ic = int(round(windowsize * inc, 0)) + + progressbar = kwargs.get('progress', None) + + rng = np.arange(0,l-windowsize+ic,ic) + + if progressbar: + from tqdm import tqdm + rng = tqdm(rng) + + for count in rng: + if count + windowsize > l: + _end = l + else: + _end = count + windowsize + yield (count, data[count : count + ttrain], data[count + ttrain : _end] )
+ + +
[docs]def persist_obj(obj, file): + """ + Persist an object on filesystem. This function depends on Dill package + + :param obj: object on memory + :param file: file name to store the object + """ + with open(file, 'wb') as _file: + dill.dump(obj, _file)
+ + +
[docs]def load_obj(file): + """ + Load to memory an object stored filesystem. This function depends on Dill package + + :param file: file name where the object is stored + :return: object + """ + with open(file, 'rb') as _file: + obj = dill.load(_file) + return obj
+ + +
[docs]def persist_env(file): + """ + Persist an entire environment on file. This function depends on Dill package + + :param file: file name to store the environment + """ + dill.dump_session(file)
+ + +
[docs]def load_env(file): + dill.load_session(file)
+ + + +
[docs]def start_dispy_cluster(method, nodes): + import dispy, dispy.httpd, logging + + cluster = dispy.JobCluster(method, nodes=nodes, loglevel=logging.DEBUG, ping_interval=1000) + + http_server = dispy.httpd.DispyHTTPServer(cluster) + + return cluster, http_server
+ + + +
[docs]def stop_dispy_cluster(cluster, http_server): + cluster.wait() # wait for all jobs to finish + + cluster.print_status() + + http_server.shutdown() # this waits until browser gets all updates + cluster.close()
+ + + +
[docs]def simple_model_train(model, data, parameters): + model.train(data, **parameters) + return model
+ + + +
[docs]def distributed_train(model, train_method, nodes, fts_method, data, num_batches=10, + train_parameters={}, **kwargs): + import dispy, dispy.httpd, datetime + + batch_save = kwargs.get('batch_save', False) # save model between batches + + batch_save_interval = kwargs.get('batch_save_interval', 1) + + file_path = kwargs.get('file_path', None) + + cluster, http_server = start_dispy_cluster(train_method, nodes) + + print("[{0: %H:%M:%S}] Distrituted Train Started".format(datetime.datetime.now())) + + jobs = [] + n = len(data) + batch_size = int(n / num_batches) + bcount = 1 + for ct in range(model.order, n, batch_size): + if model.is_multivariate: + ndata = data.iloc[ct - model.order:ct + batch_size] + else: + ndata = data[ct - model.order: ct + batch_size] + + tmp_model = fts_method(str(bcount)) + + tmp_model.clone_parameters(model) + + job = cluster.submit(tmp_model, ndata, train_parameters) + job.id = bcount # associate an ID to identify jobs (if needed later) + jobs.append(job) + + bcount += 1 + + for job in jobs: + print("[{0: %H:%M:%S}] Processing batch ".format(datetime.datetime.now()) + str(job.id)) + tmp = job() + if job.status == dispy.DispyJob.Finished and tmp is not None: + model.merge(tmp) + + if batch_save and (job.id % batch_save_interval) == 0: + persist_obj(model, file_path) + + else: + print(job.exception) + print(job.stdout) + + print("[{0: %H:%M:%S}] Finished batch ".format(datetime.datetime.now()) + str(job.id)) + + print("[{0: %H:%M:%S}] Distrituted Train Finished".format(datetime.datetime.now())) + + stop_dispy_cluster(cluster, http_server) + + return model
+ + + +
[docs]def simple_model_predict(model, data, parameters): + return model.predict(data, **parameters)
+ + + +
[docs]def distributed_predict(model, parameters, nodes, data, num_batches): + import dispy, dispy.httpd + + cluster, http_server = start_dispy_cluster(simple_model_predict, nodes) + + jobs = [] + n = len(data) + batch_size = int(n / num_batches) + bcount = 1 + for ct in range(model.order, n, batch_size): + if model.is_multivariate: + ndata = data.iloc[ct - model.order:ct + batch_size] + else: + ndata = data[ct - model.order: ct + batch_size] + + job = cluster.submit(model, ndata, parameters) + job.id = bcount # associate an ID to identify jobs (if needed later) + jobs.append(job) + + bcount += 1 + + ret = [] + + for job in jobs: + tmp = job() + if job.status == dispy.DispyJob.Finished and tmp is not None: + if job.id < batch_size: + ret.extend(tmp[:-1]) + else: + ret.extend(tmp) + else: + print(job.exception) + print(job.stdout) + + stop_dispy_cluster(cluster, http_server) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/flrg.html b/docs/_build/html/_modules/pyFTS/common/flrg.html new file mode 100644 index 0000000..aef3721 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/flrg.html @@ -0,0 +1,188 @@ + + + + + + + + pyFTS.common.flrg — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.flrg

+import numpy as np
+
+
+
[docs]class FLRG(object): + """ + Fuzzy Logical Relationship Group + + Group a set of FLR's with the same LHS. Represents the temporal patterns for time t+1 (the RHS fuzzy sets) + when the LHS pattern is identified on time t. + """ + + def __init__(self, order, **kwargs): + self.LHS = None + """Left Hand Side of the rule""" + self.RHS = None + """Right Hand Side of the rule""" + self.order = order + """Number of lags on LHS""" + self.midpoint = None + self.lower = None + self.upper = None + self.key = None + +
[docs] def append_rhs(self, set, **kwargs): + pass
+ +
[docs] def get_key(self): + """Returns a unique identifier for this FLRG""" + if self.key is None: + if isinstance(self.LHS, (list, set)): + names = [c for c in self.LHS] + elif isinstance(self.LHS, dict): + names = [self.LHS[k] for k in self.LHS.keys()] + else: + names = [self.LHS] + + self.key = "" + + for n in names: + if len(self.key) > 0: + self.key += "," + self.key = self.key + n + return self.key
+ +
[docs] def get_membership(self, data, sets): + """ + Returns the membership value of the FLRG for the input data + + :param data: input data + :param sets: fuzzy sets + :return: the membership value + """ + ret = 0.0 + if isinstance(self.LHS, (list, set)): + if len(self.LHS) == len(data): + ret = np.nanmin([sets[self.LHS[ct]].membership(dat) for ct, dat in enumerate(data)]) + else: + ret = sets[self.LHS].membership(data) + return ret
+ +
[docs] def get_midpoint(self, sets): + """ + Returns the midpoint value for the RHS fuzzy sets + + :param sets: fuzzy sets + :return: the midpoint value + """ + if self.midpoint is None: + self.midpoint = np.nanmean(self.get_midpoints(sets)) + return self.midpoint
+ +
[docs] def get_midpoints(self, sets): + if isinstance(self.RHS, (list, set)): + return np.array([sets[s].centroid for s in self.RHS]) + elif isinstance(self.RHS, dict): + return np.array([sets[self.RHS[s]].centroid for s in self.RHS.keys()])
+ +
[docs] def get_lower(self, sets): + """ + Returns the lower bound value for the RHS fuzzy sets + + :param sets: fuzzy sets + :return: lower bound value + """ + if self.lower is None: + if isinstance(self.RHS, list): + self.lower = min([sets[rhs].lower for rhs in self.RHS]) + elif isinstance(self.RHS, dict): + self.lower = min([sets[self.RHS[s]].lower for s in self.RHS.keys()]) + return self.lower
+ +
[docs] def get_upper(self, sets): + """ + Returns the upper bound value for the RHS fuzzy sets + + :param sets: fuzzy sets + :return: upper bound value + """ + if self.upper is None: + if isinstance(self.RHS, list): + self.upper = max([sets[rhs].upper for rhs in self.RHS]) + elif isinstance(self.RHS, dict): + self.upper = max([sets[self.RHS[s]].upper for s in self.RHS.keys()]) + return self.upper
+ + def __len__(self): + return len(self.RHS)
+ + + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/fts.html b/docs/_build/html/_modules/pyFTS/common/fts.html new file mode 100644 index 0000000..2b5b7df --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/fts.html @@ -0,0 +1,554 @@ + + + + + + + + pyFTS.common.fts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.fts

+import numpy as np
+import pandas as pd
+from pyFTS.common import FuzzySet, SortedCollection, tree, Util
+
+
+
[docs]class FTS(object): + """ + Fuzzy Time Series object model + """ + def __init__(self, **kwargs): + """ + Create a Fuzzy Time Series model + """ + + self.sets = {} + """The list of fuzzy sets used on this model""" + self.flrgs = {} + """The list of Fuzzy Logical Relationship Groups - FLRG""" + self.order = kwargs.get('order',1) + """A integer with the model order (number of past lags are used on forecasting)""" + self.shortname = kwargs.get('name',"") + """A string with a short name or alias for the model""" + self.name = kwargs.get('name',"") + """A string with the model name""" + self.detail = kwargs.get('name',"") + """A string with the model detailed information""" + self.is_high_order = False + """A boolean value indicating if the model support orders greater than 1, default: False""" + self.min_order = 1 + """In high order models, this integer value indicates the minimal order supported for the model, default: 1""" + self.has_seasonality = False + """A boolean value indicating if the model supports seasonal indexers, default: False""" + self.has_point_forecasting = True + """A boolean value indicating if the model supports point forecasting, default: True""" + self.has_interval_forecasting = False + """A boolean value indicating if the model supports interval forecasting, default: False""" + self.has_probability_forecasting = False + """A boolean value indicating if the model support probabilistic forecasting, default: False""" + self.is_multivariate = False + """A boolean value indicating if the model support multivariate time series (Pandas DataFrame), default: False""" + self.dump = False + self.transformations = [] + """A list with the data transformations (common.Transformations) applied on model pre and post processing, default: []""" + self.transformations_param = [] + """A list with the specific parameters for each data transformation""" + self.original_max = 0 + """A float with the upper limit of the Universe of Discourse, the maximal value found on training data""" + self.original_min = 0 + """A float with the lower limit of the Universe of Discourse, the minimal value found on training data""" + self.partitioner = kwargs.get("partitioner", None) + """A pyFTS.partitioners.Partitioner object with the Universe of Discourse partitioner used on the model. This is a mandatory dependecy. """ + if self.partitioner != None: + self.sets = self.partitioner.sets + self.auto_update = False + """A boolean value indicating that model is incremental""" + self.benchmark_only = False + """A boolean value indicating a façade for external (non-FTS) model used on benchmarks or ensembles.""" + self.indexer = kwargs.get("indexer", None) + """An pyFTS.models.seasonal.Indexer object for indexing the time series data""" + self.uod_clip = kwargs.get("uod_clip", True) + """Flag indicating if the test data will be clipped inside the training Universe of Discourse""" + self.alpha_cut = kwargs.get("alpha_cut", 0.0) + """A float with the minimal membership to be considered on fuzzyfication process""" + self.max_lag = self.order + """A integer indicating the largest lag used by the model. This value also indicates the minimum number of past lags + needed to forecast a single step ahead""" + +
[docs] def fuzzy(self, data): + """ + Fuzzify a data point + + :param data: data point + :return: maximum membership fuzzy set + """ + best = {"fuzzyset": "", "membership": 0.0} + + for f in self.sets: + fset = self.sets[f] + if best["membership"] <= fset.membership(data): + best["fuzzyset"] = fset.name + best["membership"] = fset.membership(data) + + return best
+ +
[docs] def predict(self, data, **kwargs): + """ + Forecast using trained model + + :param data: time series with minimal length to the order of the model + + :keyword type: the forecasting type, one of these values: point(default), interval or distribution. + :keyword steps_ahead: The forecasting horizon, i. e., the number of steps ahead to forecast + :keyword start: in the multi step forecasting, the index of the data where to start forecasting + :keyword distributed: boolean, indicate if the forecasting procedure will be distributed in a dispy cluster + :keyword nodes: a list with the dispy cluster nodes addresses + + :return: a numpy array with the forecasted data + """ + + if self.is_multivariate: + ndata = data + else: + ndata = self.apply_transformations(data) + + if self.uod_clip: + ndata = np.clip(ndata, self.original_min, self.original_max) + + if 'distributed' in kwargs: + distributed = kwargs.pop('distributed') + else: + distributed = False + + if distributed is None or distributed == False: + + if 'type' in kwargs: + type = kwargs.pop("type") + else: + type = 'point' + + steps_ahead = kwargs.get("steps_ahead", None) + + if steps_ahead == None or steps_ahead == 1: + if type == 'point': + ret = self.forecast(ndata, **kwargs) + elif type == 'interval': + ret = self.forecast_interval(ndata, **kwargs) + elif type == 'distribution': + ret = self.forecast_distribution(ndata, **kwargs) + elif steps_ahead > 1: + if type == 'point': + ret = self.forecast_ahead(ndata, steps_ahead, **kwargs) + elif type == 'interval': + ret = self.forecast_ahead_interval(ndata, steps_ahead, **kwargs) + elif type == 'distribution': + ret = self.forecast_ahead_distribution(ndata, steps_ahead, **kwargs) + + if not ['point', 'interval', 'distribution'].__contains__(type): + raise ValueError('The argument \'type\' has an unknown value.') + + else: + + nodes = kwargs.get("nodes", ['127.0.0.1']) + num_batches = kwargs.get('num_batches', 10) + + ret = Util.distributed_predict(self, kwargs, nodes, ndata, num_batches) + + if not self.is_multivariate: + kwargs['type'] = type + ret = self.apply_inverse_transformations(ret, params=[data[self.max_lag - 1:]], **kwargs) + + return ret
+ +
[docs] def forecast(self, data, **kwargs): + """ + Point forecast one step ahead + + :param data: time series data with the minimal length equal to the max_lag of the model + :param kwargs: model specific parameters + :return: a list with the forecasted values + """ + raise NotImplementedError('This model do not perform one step ahead point forecasts!')
+ +
[docs] def forecast_interval(self, data, **kwargs): + """ + Interval forecast one step ahead + + :param data: time series data with the minimal length equal to the max_lag of the model + :param kwargs: model specific parameters + :return: a list with the forecasted intervals + """ + raise NotImplementedError('This model do not perform one step ahead interval forecasts!')
+ +
[docs] def forecast_distribution(self, data, **kwargs): + """ + Probabilistic forecast one step ahead + + :param data: time series data with the minimal length equal to the max_lag of the model + :param kwargs: model specific parameters + :return: a list with the forecasted Probability Distributions + """ + raise NotImplementedError('This model do not perform one step ahead distribution forecasts!')
+ +
[docs] def forecast_ahead(self, data, steps, **kwargs): + """ + Point forecast n steps ahead + + :param data: time series data with the minimal length equal to the max_lag of the model + :param steps: the number of steps ahead to forecast + :param kwargs: model specific parameters + :return: a list with the forecasted values + """ + ret = [] + for k in np.arange(0,steps): + tmp = self.forecast(data[-self.order:], **kwargs) + + if isinstance(tmp,(list, np.ndarray)): + tmp = tmp[0] + + ret.append(tmp) + data.append_rhs(tmp) + + return ret
+ +
[docs] def forecast_ahead_interval(self, data, steps, **kwargs): + """ + Interval forecast n steps ahead + + :param data: time series data with the minimal length equal to the max_lag of the model + :param steps: the number of steps ahead to forecast + :param kwargs: model specific parameters + :return: a list with the forecasted intervals + """ + raise NotImplementedError('This model do not perform multi step ahead interval forecasts!')
+ +
[docs] def forecast_ahead_distribution(self, data, steps, **kwargs): + """ + Probabilistic forecast n steps ahead + + :param data: time series data with the minimal length equal to the max_lag of the model + :param steps: the number of steps ahead to forecast + :param kwargs: model specific parameters + :return: a list with the forecasted Probability Distributions + """ + raise NotImplementedError('This model do not perform multi step ahead distribution forecasts!')
+ +
[docs] def train(self, data, **kwargs): + """ + Method specific parameter fitting + + :param data: training time series data + :param kwargs: Method specific parameters + + """ + pass
+ +
[docs] def fit(self, ndata, **kwargs): + """ + Fit the model's parameters based on the training data. + + :param ndata: training time series data + :param kwargs: + + :keyword num_batches: split the training data in num_batches to save memory during the training process + :keyword save_model: save final model on disk + :keyword batch_save: save the model between each batch + :keyword file_path: path to save the model + :keyword distributed: boolean, indicate if the training procedure will be distributed in a dispy cluster + :keyword nodes: a list with the dispy cluster nodes addresses + + """ + + import datetime + + if self.is_multivariate: + data = ndata + else: + data = self.apply_transformations(ndata) + + self.original_min = np.nanmin(data) + self.original_max = np.nanmax(data) + + if 'sets' in kwargs: + self.sets = kwargs.pop('sets') + + if 'partitioner' in kwargs: + self.partitioner = kwargs.pop('partitioner') + + if (self.sets is None or len(self.sets) == 0) and not self.benchmark_only and not self.is_multivariate: + if self.partitioner is not None: + self.sets = self.partitioner.sets + else: + raise Exception("Fuzzy sets were not provided for the model. Use 'sets' parameter or 'partitioner'. ") + + if 'order' in kwargs: + self.order = kwargs.pop('order') + + dump = kwargs.get('dump', None) + + num_batches = kwargs.get('num_batches', None) + + save = kwargs.get('save_model', False) # save model on disk + + batch_save = kwargs.get('batch_save', False) #save model between batches + + file_path = kwargs.get('file_path', None) + + distributed = kwargs.get('distributed', False) + + batch_save_interval = kwargs.get('batch_save_interval', 10) + + if distributed: + nodes = kwargs.get('nodes', False) + train_method = kwargs.get('train_method', Util.simple_model_train) + Util.distributed_train(self, train_method, nodes, type(self), data, num_batches, {}, + batch_save=batch_save, file_path=file_path, + batch_save_interval=batch_save_interval) + else: + + if dump == 'time': + print("[{0: %H:%M:%S}] Start training".format(datetime.datetime.now())) + + if num_batches is not None: + n = len(data) + batch_size = int(n / num_batches) + bcount = 1 + + rng = range(self.order, n, batch_size) + + if dump == 'tqdm': + from tqdm import tqdm + + rng = tqdm(rng) + + for ct in rng: + if dump == 'time': + print("[{0: %H:%M:%S}] Starting batch ".format(datetime.datetime.now()) + str(bcount)) + if self.is_multivariate: + mdata = data.iloc[ct - self.order:ct + batch_size] + else: + mdata = data[ct - self.order : ct + batch_size] + + self.train(mdata, **kwargs) + + if batch_save: + Util.persist_obj(self,file_path) + + if dump == 'time': + print("[{0: %H:%M:%S}] Finish batch ".format(datetime.datetime.now()) + str(bcount)) + + bcount += 1 + + else: + self.train(data, **kwargs) + + if dump == 'time': + print("[{0: %H:%M:%S}] Finish training".format(datetime.datetime.now())) + + if save: + Util.persist_obj(self, file_path)
+ +
[docs] def clone_parameters(self, model): + """ + Import the parameters values from other model + + :param model: + """ + + self.order = model.order + self.shortname = model.shortname + self.name = model.name + self.detail = model.detail + self.is_high_order = model.is_high_order + self.min_order = model.min_order + self.has_seasonality = model.has_seasonality + self.has_point_forecasting = model.has_point_forecasting + self.has_interval_forecasting = model.has_interval_forecasting + self.has_probability_forecasting = model.has_probability_forecasting + self.is_multivariate = model.is_multivariate + self.dump = model.dump + self.transformations = model.transformations + self.transformations_param = model.transformations_param + self.original_max = model.original_max + self.original_min = model.original_min + self.partitioner = model.partitioner + self.sets = model.sets + self.auto_update = model.auto_update + self.benchmark_only = model.benchmark_only + self.indexer = model.indexer
+ +
[docs] def merge(self, model): + """ + Merge the FLRG rules from other model + + :param model: source model + :return: + """ + + for key in model.flrgs.keys(): + flrg = model.flrgs[key] + if flrg.get_key() not in self.flrgs: + self.flrgs[flrg.get_key()] = flrg + else: + if isinstance(flrg.RHS, (list, set)): + for k in flrg.RHS: + self.flrgs[flrg.get_key()].append_rhs(k) + elif isinstance(flrg.RHS, dict): + for k in flrg.RHS.keys(): + self.flrgs[flrg.get_key()].append_rhs(flrg.RHS[k]) + else: + self.flrgs[flrg.get_key()].append_rhs(flrg.RHS)
+ +
[docs] def append_transformation(self, transformation): + if transformation is not None: + self.transformations.append(transformation)
+ +
[docs] def apply_transformations(self, data, params=None, updateUoD=False, **kwargs): + """ + Apply the data transformations for data preprocessing + + :param data: input data + :param params: transformation parameters + :param updateUoD: + :param kwargs: + :return: preprocessed data + """ + + ndata = data + if updateUoD: + if min(data) < 0: + self.original_min = min(data) * 1.1 + else: + self.original_min = min(data) * 0.9 + + if max(data) > 0: + self.original_max = max(data) * 1.1 + else: + self.original_max = max(data) * 0.9 + + if len(self.transformations) > 0: + if params is None: + params = [ None for k in self.transformations] + + for c, t in enumerate(self.transformations, start=0): + ndata = t.apply(ndata,params[c]) + + return ndata
+ +
[docs] def apply_inverse_transformations(self, data, params=None, **kwargs): + """ + Apply the data transformations for data postprocessing + + :param data: input data + :param params: transformation parameters + :param updateUoD: + :param kwargs: + :return: postprocessed data + """ + if len(self.transformations) > 0: + if params is None: + params = [None for k in self.transformations] + + for c, t in enumerate(reversed(self.transformations), start=0): + ndata = t.inverse(data, params[c], **kwargs) + + return ndata + else: + return data
+ +
[docs] def get_UoD(self): + return [self.original_min, self.original_max]
+ + def __str__(self): + """String representation of the model""" + + tmp = self.name + ":\n" + for r in sorted(self.flrgs, key=lambda key: self.flrgs[key].get_midpoint(self.sets)): + tmp = tmp + str(self.flrgs[r]) + "\n" + return tmp + + def __len__(self): + """ + The length (number of rules) of the model + + :return: number of rules + """ + return len(self.flrgs) + +
[docs] def len_total(self): + return sum([len(k) for k in self.flrgs])
+ + + + + + + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/common/tree.html b/docs/_build/html/_modules/pyFTS/common/tree.html new file mode 100644 index 0000000..4ab91a7 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/common/tree.html @@ -0,0 +1,151 @@ + + + + + + + + pyFTS.common.tree — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.common.tree

+"""
+Tree data structure
+"""
+
+from pyFTS import *
+from functools import reduce
+import numpy as np
+
+
+
[docs]class FLRGTreeNode: + """ + Tree node for + """ + + def __init__(self, value): + self.isRoot = False + self.children = [] + self.value = value + +
[docs] def appendChild(self, child): + self.children.append(child)
+ +
[docs] def getChildren(self): + for child in self.children: + yield child
+ +
[docs] def paths(self, acc=[]): + if len(self.children) == 0: + yield [self.value] + acc + + for child in self.children: + for leaf_path in child.paths([self.value] + acc): # these two + yield leaf_path
+ +
[docs] def getStr(self, k): + if self.isRoot: + tmp = str(self.value) + else: + tmp = "\\" + ("-" * k) + str(self.value) + for child in self.getChildren(): + tmp = tmp + "\n" + child.getStr(k + 1) + return tmp
+ + def __str__(self): + return self.getStr(0)
+ + +
[docs]class FLRGTree: + """Represents a FLRG set with a tree structure""" + def __init__(self): + self.root = FLRGTreeNode(None)
+ + +
[docs]def flat(dados): + for inst in dados: + if isinstance(inst, (list, tuple)): + x = flat(inst) + for k in x: + yield k + else: + yield inst
+ + +
[docs]def build_tree_without_order(node, lags, level): + + if level not in lags: + return + + for s in lags[level]: + node.appendChild(FLRGTreeNode(s)) + + for child in node.getChildren(): + build_tree_without_order(child, lags, level + 1)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/AirPassengers.html b/docs/_build/html/_modules/pyFTS/data/AirPassengers.html new file mode 100644 index 0000000..3b458d6 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/AirPassengers.html @@ -0,0 +1,104 @@ + + + + + + + + pyFTS.data.AirPassengers — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.AirPassengers

+from pyFTS.data import common
+import pandas as pd
+import numpy as np
+
+
+
[docs]def get_data(): + """ + Get a simple univariate time series data. + + :return: numpy array + """ + dat = get_dataframe() + dat = np.array(dat["Passengers"]) + return dat
+ +
[docs]def get_dataframe(): + """ + Get the complete multivariate time series data. + + :return: Pandas DataFrame + """ + dat = common.get_dataframe('AirPassengers.csv', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/AirPassengers.csv', + sep=",") + return dat
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/Enrollments.html b/docs/_build/html/_modules/pyFTS/data/Enrollments.html new file mode 100644 index 0000000..9921474 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/Enrollments.html @@ -0,0 +1,99 @@ + + + + + + + + pyFTS.data.Enrollments — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.Enrollments

+from pyFTS.data import common
+import pandas as pd
+import numpy as np
+
+
+
[docs]def get_data(): + """ + Get a simple univariate time series data. + + :return: numpy array + """ + dat = get_dataframe() + dat = np.array(dat["Enrollments"]) + return dat
+ + +
[docs]def get_dataframe(): + dat = common.get_dataframe('Enrollments.csv', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/Enrollments.csv', + sep=";") + return dat
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/INMET.html b/docs/_build/html/_modules/pyFTS/data/INMET.html new file mode 100644 index 0000000..1f224e0 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/INMET.html @@ -0,0 +1,101 @@ + + + + + + + + pyFTS.data.INMET — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.INMET

+"""
+INMET - Instituto Nacional Meteorologia / Brasil
+
+Belo Horizonte station, from 2000-01-01 to  31/12/2012
+
+Source: http://www.inmet.gov.br
+"""
+
+from pyFTS.data import common
+import pandas as pd
+
+
+
[docs]def get_dataframe(): + """ + Get the complete multivariate time series data. + + :return: Pandas DataFrame + """ + dat = common.get_dataframe('INMET.csv.bz2', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/INMET.csv.bz2', + sep=";", compression='bz2') + dat["DataHora"] = pd.to_datetime(dat["DataHora"], format='%d/%m/%Y %H:%M') + return dat
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/NASDAQ.html b/docs/_build/html/_modules/pyFTS/data/NASDAQ.html new file mode 100644 index 0000000..272d6c5 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/NASDAQ.html @@ -0,0 +1,106 @@ + + + + + + + + pyFTS.data.NASDAQ — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.NASDAQ

+from pyFTS.data import common
+import pandas as pd
+import numpy as np
+
+
+
[docs]def get_data(field="avg"): + """ + Get a simple univariate time series data. + + :param field: the dataset field name to extract + :return: numpy array + """ + dat = get_dataframe() + dat = np.array(dat[field]) + return dat
+ + +
[docs]def get_dataframe(): + """ + Get the complete multivariate time series data. + + :return: Pandas DataFrame + """ + dat = common.get_dataframe('NASDAQ.csv.bz2', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/NASDAQ.csv.bz2', + sep=",", compression='bz2') + return dat
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/SONDA.html b/docs/_build/html/_modules/pyFTS/data/SONDA.html new file mode 100644 index 0000000..57e831b --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/SONDA.html @@ -0,0 +1,115 @@ + + + + + + + + pyFTS.data.SONDA — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.SONDA

+"""
+SONDA - Sistema de Organização Nacional de Dados Ambientais, from INPE - Instituto Nacional de Pesquisas Espaciais, Brasil.
+
+Brasilia station
+
+Source: http://sonda.ccst.inpe.br/
+
+"""
+from pyFTS.data import common
+import pandas as pd
+import numpy as np
+
+
+
[docs]def get_data(field): + """ + Get a simple univariate time series data. + + :param field: the dataset field name to extract + :return: numpy array + """ + dat = get_dataframe() + dat = np.array(dat[field]) + return dat
+ + +
[docs]def get_dataframe(): + """ + Get the complete multivariate time series data. + + :return: Pandas DataFrame + """ + dat = common.get_dataframe('SONDA_BSB.csv.bz2', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/SONDA_BSB.csv.bz2', + sep=";", compression='bz2') + dat["datahora"] = pd.to_datetime(dat["datahora"], format='%Y-%m-%d %H:%M:%S') + return dat
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/SP500.html b/docs/_build/html/_modules/pyFTS/data/SP500.html new file mode 100644 index 0000000..3674df9 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/SP500.html @@ -0,0 +1,104 @@ + + + + + + + + pyFTS.data.SP500 — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.SP500

+from pyFTS.data import common
+import pandas as pd
+import numpy as np
+
+
+
[docs]def get_data(): + """ + Get the univariate time series data. + + :return: numpy array + """ + dat = get_dataframe() + return np.array(dat["Avg"])
+ + +
[docs]def get_dataframe(): + """ + Get the complete multivariate time series data. + + :return: Pandas DataFrame + """ + dat = common.get_dataframe('SP500.csv.bz2', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/SP500.csv.bz2', + sep=",", compression='bz2') + return dat
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/TAIEX.html b/docs/_build/html/_modules/pyFTS/data/TAIEX.html new file mode 100644 index 0000000..d60abbc --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/TAIEX.html @@ -0,0 +1,106 @@ + + + + + + + + pyFTS.data.TAIEX — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.TAIEX

+from pyFTS.data import common
+import pandas as pd
+import numpy as np
+
+
+
[docs]def get_data(): + """ + Get the univariate time series data. + + :return: numpy array + """ + dat = get_dataframe() + dat = np.array(dat["avg"]) + return dat
+ + +
[docs]def get_dataframe(): + """ + Get the complete multivariate time series data. + + :return: Pandas DataFrame + """ + dat = common.get_dataframe('TAIEX.csv.bz2', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/TAIEX.csv.bz2', + sep=",", compression='bz2') + dat["Date"] = pd.to_datetime(dat["Date"]) + return dat
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/artificial.html b/docs/_build/html/_modules/pyFTS/data/artificial.html new file mode 100644 index 0000000..b995bd2 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/artificial.html @@ -0,0 +1,158 @@ + + + + + + + + pyFTS.data.artificial — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.artificial

+"""
+Facilities to generate synthetic stochastic processes
+"""
+
+import numpy as np
+
+
+
[docs]def generate_gaussian_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=100, num=10, vmin=None, vmax=None): + """ + Generate data sampled from Gaussian distribution, with constant or linear changing parameters + + :param mu_ini: Initial mean + :param sigma_ini: Initial variance + :param mu_inc: Mean increment after 'num' samples + :param sigma_inc: Variance increment after 'num' samples + :param it: Number of iterations + :param num: Number of samples generated on each iteration + :param vmin: Lower bound value of generated data + :param vmax: Upper bound value of generated data + :return: A list of it*num float values + """ + mu = mu_ini + sigma = sigma_ini + ret = [] + for k in np.arange(0,it): + tmp = np.random.normal(mu, sigma, num) + if vmin is not None: + tmp = np.maximum(np.full(num, vmin), tmp) + if vmax is not None: + tmp = np.minimum(np.full(num, vmax), tmp) + ret.extend(tmp) + mu += mu_inc + sigma += sigma_inc + return ret
+ + +
[docs]def generate_uniform_linear(min_ini, max_ini, min_inc, max_inc, it=100, num=10, vmin=None, vmax=None): + """ + Generate data sampled from Uniform distribution, with constant or linear changing bounds + + :param mu_ini: Initial mean + :param sigma_ini: Initial variance + :param mu_inc: Mean increment after 'num' samples + :param sigma_inc: Variance increment after 'num' samples + :param it: Number of iterations + :param num: Number of samples generated on each iteration + :param vmin: Lower bound value of generated data + :param vmax: Upper bound value of generated data + :return: A list of it*num float values + """ + _min = min_ini + _max = max_ini + ret = [] + for k in np.arange(0,it): + tmp = np.random.uniform(_min, _max, num) + if vmin is not None: + tmp = np.maximum(np.full(num, vmin), tmp) + if vmax is not None: + tmp = np.minimum(np.full(num, vmax), tmp) + ret.extend(tmp) + _min += min_inc + _max += max_inc + return ret
+ + +
[docs]def white_noise(n=500): + return np.random.normal(0, 1, n)
+ + +
[docs]def random_walk(n=500, type='gaussian'): + if type == 'gaussian': + tmp = generate_gaussian_linear(0, 1, 0, 0, it=1, num=n) + else: + tmp = generate_uniform_linear(-1, 1, 0, 0, it=1, num=n) + ret = [0] + for i in range(n): + ret.append(tmp[i] + ret[i]) + + return ret
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/common.html b/docs/_build/html/_modules/pyFTS/data/common.html new file mode 100644 index 0000000..217fca1 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/common.html @@ -0,0 +1,108 @@ + + + + + + + + pyFTS.data.common — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.common

+
+import pandas as pd
+import numpy as np
+import os
+import pkg_resources
+from pathlib import Path
+from urllib import request
+
+
+
[docs]def get_dataframe(filename, url, sep=";", compression='infer'): + """ + This method check if filename already exists, read the file and return its data. + If the file don't already exists, it will be downloaded and decompressed. + + :param filename: dataset local filename + :param url: dataset internet URL + :param sep: CSV field separator + :param compression: type of compression + :return: Pandas dataset + """ + + tmp_file = Path(filename) + + if tmp_file.is_file(): + return pd.read_csv(filename, sep=sep, compression=compression) + else: + request.urlretrieve(url, filename) + return pd.read_csv(filename, sep=sep, compression=compression)
+ + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/henon.html b/docs/_build/html/_modules/pyFTS/data/henon.html new file mode 100644 index 0000000..c3837fa --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/henon.html @@ -0,0 +1,108 @@ + + + + + + + + pyFTS.data.henon — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.henon

+import numpy as np
+import pandas as pd
+
+
+
[docs]def get_data(var, a=1.4, b=0.3, initial_values = [1, 1], iterations=1000): + return get_dataframe(a,b, initial_values, iterations)[var].values
+ + +
[docs]def get_dataframe(a=1.4, b=0.3, initial_values = [1, 1], iterations=1000): + ''' + M. Hénon. "A two-dimensional mapping with a strange attractor". Commun. Math. Phys. 50, 69-77 (1976) + + dx/dt = a + by(t-1) - x(t-1)^2 + dy/dt = x + + :param a: Equation coefficient + :param b: Equation coefficient + :param initial_values: numpy array with the initial values of x and y. Default: [1, 1] + :param iterations: number of iterations. Default: 1000 + :return: Panda dataframe with the x and y values + ''' + + x = [initial_values[0]] + y = [initial_values[1]] + for t in np.arange(0, iterations): + xx = a + b * y[t] - x[t] ** 2 + y.append(x[t]) + x.append(xx) + + return pd.DataFrame({'x': x, 'y':y})
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/logistic_map.html b/docs/_build/html/_modules/pyFTS/data/logistic_map.html new file mode 100644 index 0000000..7da5bed --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/logistic_map.html @@ -0,0 +1,100 @@ + + + + + + + + pyFTS.data.logistic_map — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.logistic_map

+#
+
+import numpy as np
+
+
+
[docs]def get_data(r = 4, initial_value = 0.3, iterations=100): + ''' + May, Robert M. (1976). "Simple mathematical models with very complicated dynamics". + Nature. 261 (5560): 459–467. doi:10.1038/261459a0. + + x(t) = r * x(t-1) * (1 - x(t -1) ) + + :param r: Equation coefficient + :param initial_value: Initial value of x. Default: 0.3 + :param iterations: number of iterations. Default: 100 + :return: + ''' + x = [initial_value] + for t in np.arange(0,iterations): + x.append(r * x[t]*(1 - x[t])) + + return x
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/lorentz.html b/docs/_build/html/_modules/pyFTS/data/lorentz.html new file mode 100644 index 0000000..07dff85 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/lorentz.html @@ -0,0 +1,119 @@ + + + + + + + + pyFTS.data.lorentz — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.lorentz

+import numpy as np
+import pandas as pd
+
+
+
[docs]def get_data(var, a = 10.0, b = 28.0, c = 8.0 / 3.0, dt = 0.01, + initial_values = [0.1, 0, 0], iterations=1000): + return get_dataframe(a, b, c, dt, initial_values, iterations)[var].values
+ + +
[docs]def get_dataframe(a = 10.0, b = 28.0, c = 8.0 / 3.0, dt = 0.01, + initial_values = [0.1, 0, 0], iterations=1000): + ''' + Lorenz, Edward Norton (1963). "Deterministic nonperiodic flow". Journal of the Atmospheric Sciences. 20 (2): 130–141. + https://doi.org/10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2 + + dx/dt = a(y -x) + dy/dt = x(b - z) - y + dz/dt = xy - cz + + :param a: Equation coefficient. Default value: 10 + :param b: Equation coefficient. Default value: 28 + :param c: Equation coefficient. Default value: 8.0/3.0 + :param dt: Time differential for continuous time integration. Default value: 0.01 + :param initial_values: numpy array with the initial values of x,y and z. Default: [0.1, 0, 0] + :param iterations: number of iterations. Default: 1000 + :return: Panda dataframe with the x, y and z values + ''' + + x = [initial_values[0]] + y = [initial_values[1]] + z = [initial_values[2]] + + for t in np.arange(0, iterations): + dxdt = a * (y[t] - x[t]) + dydt = x[t] * (b - z[t]) - y[t] + dzdt = x[t] * y[t] - c * z[t] + x.append(x[t] + dt * dxdt) + y.append(y[t] + dt * dydt) + z.append(z[t] + dt * dzdt) + + return pd.DataFrame({'x': x, 'y':y, 'z': z})
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/mackey_glass.html b/docs/_build/html/_modules/pyFTS/data/mackey_glass.html new file mode 100644 index 0000000..8775b2c --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/mackey_glass.html @@ -0,0 +1,101 @@ + + + + + + + + pyFTS.data.mackey_glass — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.mackey_glass

+import numpy as np
+
+
+
[docs]def get_data(b=0.1, c=0.2, tau=17, initial_values = np.linspace(0.5,1.5, 18), iterations=1000): + ''' + Mackey, M. C. and Glass, L. (1977). Oscillation and chaos in physiological control systems. + Science, 197(4300):287-289. + + dy/dt = -by(t)+ cy(t - tau) / 1+y(t-tau)^10 + + :param b: Equation coefficient + :param c: Equation coefficient + :param tau: Lag parameter, default: 17 + :param initial_values: numpy array with the initial values of y. Default: np.linspace(0.5,1.5,18) + :param iterations: number of iterations. Default: 1000 + :return: + ''' + y = initial_values.tolist() + + for n in np.arange(len(y)-1, iterations+100): + y.append(y[n] - b * y[n] + c * y[n - tau] / (1 + y[n - tau] ** 10)) + + return y[100:]
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/rossler.html b/docs/_build/html/_modules/pyFTS/data/rossler.html new file mode 100644 index 0000000..38c5c8d --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/rossler.html @@ -0,0 +1,118 @@ + + + + + + + + pyFTS.data.rossler — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.rossler

+import numpy as np
+import pandas as pd
+
+
+
[docs]def get_data(var, a = 0.2, b = 0.2, c = 5.7, dt = 0.01, + initial_values = [0.001, 0.001, 0.001], iterations=5000): + return get_dataframe(a, b, c, dt, initial_values, iterations)[var].values
+ + +
[docs]def get_dataframe(a = 0.2, b = 0.2, c = 5.7, dt = 0.01, + initial_values = [0.001, 0.001, 0.001], iterations=5000): + ''' + O. E. Rössler, Phys. Lett. 57A, 397 (1976). + + dx/dt = -z - y + dy/dt = x + ay + dz/dt = b + z( x - c ) + + :param a: Equation coefficient. Default value: 0.2 + :param b: Equation coefficient. Default value: 0.2 + :param c: Equation coefficient. Default value: 5.7 + :param dt: Time differential for continuous time integration. Default value: 0.01 + :param initial_values: numpy array with the initial values of x,y and z. Default: [0.001, 0.001, 0.001] + :param iterations: number of iterations. Default: 5000 + :return: Panda dataframe with the x, y and z values + ''' + + x = [initial_values[0]] + y = [initial_values[1]] + z = [initial_values[2]] + + for t in np.arange(0, iterations): + dxdt = - (y[t] + z[t]) + dydt = x[t] + a * y[t] + dzdt = b + z[t] * x[t] - z[t] * c + x.append(x[t] + dt * dxdt) + y.append(y[t] + dt * dydt) + z.append(z[t] + dt * dzdt) + + return pd.DataFrame({'x': x, 'y':y, 'z': z})
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/data/sunspots.html b/docs/_build/html/_modules/pyFTS/data/sunspots.html new file mode 100644 index 0000000..2bf57b8 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/data/sunspots.html @@ -0,0 +1,103 @@ + + + + + + + + pyFTS.data.sunspots — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.data.sunspots

+from pyFTS.data import common
+import pandas as pd
+import numpy as np
+
+
[docs]def get_data(): + """ + Get a simple univariate time series data. + + :return: numpy array + """ + dat = get_dataframe() + dat = np.array(dat["SUNACTIVITY"]) + return dat
+ +
[docs]def get_dataframe(): + """ + Get the complete multivariate time series data. + + :return: Pandas DataFrame + """ + dat = common.get_dataframe('sunspots.csv', + 'https://github.com/petroniocandido/pyFTS/raw/8f20f3634aa6a8f58083bdcd1bbf93795e6ed767/pyFTS/data/sunspots.csv', + sep=",") + return dat
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/chen.html b/docs/_build/html/_modules/pyFTS/models/chen.html new file mode 100644 index 0000000..b623631 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/chen.html @@ -0,0 +1,154 @@ + + + + + + + + pyFTS.models.chen — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.chen

+"""
+First Order Conventional Fuzzy Time Series by Chen (1996)
+
+S.-M. Chen, “Forecasting enrollments based on fuzzy time series,” Fuzzy Sets Syst., vol. 81, no. 3, pp. 311–319, 1996.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts, flrg
+
+
+
[docs]class ConventionalFLRG(flrg.FLRG): + """First Order Conventional Fuzzy Logical Relationship Group""" + def __init__(self, LHS, **kwargs): + super(ConventionalFLRG, self).__init__(1, **kwargs) + self.LHS = LHS + self.RHS = set() + +
[docs] def get_key(self, sets): + return sets[self.LHS].name
+ +
[docs] def append_rhs(self, c, **kwargs): + self.RHS.add(c)
+ + def __str__(self): + tmp = str(self.LHS) + " -> " + tmp2 = "" + for c in sorted(self.RHS, key=lambda s: s): + if len(tmp2) > 0: + tmp2 = tmp2 + "," + tmp2 = tmp2 + str(c) + return tmp + tmp2
+ + +
[docs]class ConventionalFTS(fts.FTS): + """Conventional Fuzzy Time Series""" + def __init__(self, **kwargs): + super(ConventionalFTS, self).__init__(order=1, **kwargs) + self.name = "Conventional FTS" + self.detail = "Chen" + self.shortname = "CFTS" + self.flrgs = {} + +
[docs] def generate_flrg(self, flrs): + for flr in flrs: + if flr.LHS in self.flrgs: + self.flrgs[flr.LHS].append_rhs(flr.RHS) + else: + self.flrgs[flr.LHS] = ConventionalFLRG(flr.LHS) + self.flrgs[flr.LHS].append_rhs(flr.RHS)
+ +
[docs] def train(self, data, **kwargs): + + tmpdata = FuzzySet.fuzzyfy_series(data, self.sets, method='maximum') + flrs = FLR.generate_non_recurrent_flrs(tmpdata) + self.generate_flrg(flrs)
+ +
[docs] def forecast(self, ndata, **kwargs): + + l = len(ndata) + + ret = [] + + for k in np.arange(0, l): + + mv = FuzzySet.fuzzyfy_instance(ndata[k], self.sets) + + actual = FuzzySet.get_maximum_membership_fuzzyset(ndata[k], self.sets) #self.sets[np.argwhere(mv == max(mv))[0, 0]] + + if actual.name not in self.flrgs: + ret.append(actual.centroid) + else: + _flrg = self.flrgs[actual.name] + + ret.append(_flrg.get_midpoint(self.sets)) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/cheng.html b/docs/_build/html/_modules/pyFTS/models/cheng.html new file mode 100644 index 0000000..55f71d3 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/cheng.html @@ -0,0 +1,139 @@ + + + + + + + + pyFTS.models.cheng — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.cheng

+"""
+Trend Weighted Fuzzy Time Series by Cheng, Chen and Wu (2009)
+
+C.-H. Cheng, Y.-S. Chen, and Y.-L. Wu, “Forecasting innovation diffusion of products using trend-weighted fuzzy time-series model,” 
+Expert Syst. Appl., vol. 36, no. 2, pp. 1826–1832, 2009.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts
+from pyFTS.models import yu
+
+
+
[docs]class TrendWeightedFLRG(yu.WeightedFLRG): + """ + First Order Trend Weighted Fuzzy Logical Relationship Group + """ + def __init__(self, LHS, **kwargs): + super(TrendWeightedFLRG, self).__init__(LHS, **kwargs) + self.w = None + +
[docs] def weights(self, sets): + if self.w is None: + count_nochange = 0.0 + count_up = 0.0 + count_down = 0.0 + weights = [] + + for c in self.RHS: + tmp = 0 + if sets[self.LHS].centroid == sets[c].centroid: + count_nochange += 1.0 + tmp = count_nochange + elif sets[self.LHS].centroid > sets[c].centroid: + count_down += 1.0 + tmp = count_down + else: + count_up += 1.0 + tmp = count_up + weights.append(tmp) + + tot = sum(weights) + self.w = np.array([k / tot for k in weights]) + return self.w
+ + +
[docs]class TrendWeightedFTS(yu.WeightedFTS): + """First Order Trend Weighted Fuzzy Time Series""" + def __init__(self, **kwargs): + super(TrendWeightedFTS, self).__init__(**kwargs) + self.shortname = "TWFTS" + self.name = "Trend Weighted FTS" + self.detail = "Cheng" + self.is_high_order = False + +
[docs] def generate_FLRG(self, flrs): + for flr in flrs: + if flr.LHS in self.flrgs: + self.flrgs[flr.LHS].append_rhs(flr.RHS) + else: + self.flrgs[flr.LHS] = TrendWeightedFLRG(flr.LHS) + self.flrgs[flr.LHS].append_rhs(flr.RHS)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/ensemble/ensemble.html b/docs/_build/html/_modules/pyFTS/models/ensemble/ensemble.html new file mode 100644 index 0000000..8404b36 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/ensemble/ensemble.html @@ -0,0 +1,380 @@ + + + + + + + + pyFTS.models.ensemble.ensemble — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.ensemble.ensemble

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+import numpy as np
+import pandas as pd
+from pyFTS.common import SortedCollection, fts, tree
+from pyFTS.models import chen, cheng, hofts, hwang, ismailefendi, sadaei, song, yu
+from pyFTS.probabilistic import ProbabilityDistribution
+import scipy.stats as st
+
+
+
[docs]def sampler(data, quantiles): + ret = [] + for qt in quantiles: + ret.append(np.nanpercentile(data, q=qt * 100)) + return ret
+ + +
[docs]class EnsembleFTS(fts.FTS): + """ + Ensemble FTS + """ + def __init__(self, **kwargs): + super(EnsembleFTS, self).__init__(**kwargs) + self.shortname = "Ensemble FTS" + self.name = "Ensemble FTS" + self.flrgs = {} + self.has_point_forecasting = True + self.has_interval_forecasting = True + self.has_probability_forecasting = True + self.is_high_order = True + self.models = [] + """A list of FTS models, the ensemble components""" + self.parameters = [] + """A list with the parameters for each component model""" + self.alpha = kwargs.get("alpha", 0.05) + self.point_method = kwargs.get('point_method', 'mean') + self.interval_method = kwargs.get('interval_method', 'quantile') + self.order = 1 + +
[docs] def append_model(self, model): + """ + Append a new model to the ensemble + + :param model: FTS model + + """ + self.models.append(model) + if model.order > self.order: + self.order = model.order + + if model.is_multivariate: + self.is_multivariate = True + + if model.has_seasonality: + self.has_seasonality = True
+ + +
[docs] def train(self, data, **kwargs): + pass
+ +
[docs] def get_models_forecasts(self,data): + tmp = [] + for model in self.models: + if model.is_multivariate or model.has_seasonality: + forecast = model.forecast(data) + else: + + if isinstance(data, pd.DataFrame) and self.indexer is not None: + data = self.indexer.get_data(data) + + sample = data[-model.order:] + forecast = model.forecast(sample) + if isinstance(forecast, (list,np.ndarray)) and len(forecast) > 0: + forecast = int(forecast[-1]) + elif isinstance(forecast, (list,np.ndarray)) and len(forecast) == 0: + forecast = np.nan + if isinstance(forecast, list): + tmp.extend(forecast) + else: + tmp.append(forecast) + return tmp
+ +
[docs] def get_point(self,forecasts, **kwargs): + if self.point_method == 'mean': + ret = np.nanmean(forecasts) + elif self.point_method == 'median': + ret = np.nanpercentile(forecasts, 50) + elif self.point_method == 'quantile': + alpha = kwargs.get("alpha",0.05) + ret = np.percentile(forecasts, alpha*100) + + return ret
+ +
[docs] def get_interval(self, forecasts): + ret = [] + if self.interval_method == 'extremum': + ret.append([min(forecasts), max(forecasts)]) + elif self.interval_method == 'quantile': + qt_lo = np.nanpercentile(forecasts, q=self.alpha * 100) + qt_up = np.nanpercentile(forecasts, q=(1-self.alpha) * 100) + ret.append([qt_lo, qt_up]) + elif self.interval_method == 'normal': + mu = np.nanmean(forecasts) + sigma = np.sqrt(np.nanvar(forecasts)) + ret.append(mu + st.norm.ppf(self.alpha) * sigma) + ret.append(mu + st.norm.ppf(1 - self.alpha) * sigma) + + return ret
+ +
[docs] def get_distribution_interquantile(self,forecasts, alpha): + size = len(forecasts) + qt_lower = int(np.ceil(size * alpha)) - 1 + qt_upper = int(np.ceil(size * (1- alpha))) - 1 + + ret = sorted(forecasts)[qt_lower : qt_upper] + + return ret
+ +
[docs] def forecast(self, data, **kwargs): + + if "method" in kwargs: + self.point_method = kwargs.get('method','mean') + + l = len(data) + ret = [] + + for k in np.arange(self.order, l+1): + sample = data[k - self.order : k] + tmp = self.get_models_forecasts(sample) + point = self.get_point(tmp) + ret.append(point) + + return ret
+ +
[docs] def forecast_interval(self, data, **kwargs): + + if "method" in kwargs: + self.interval_method = kwargs.get('method','quantile') + + if 'alpha' in kwargs: + self.alpha = kwargs.get('alpha',0.05) + + l = len(data) + + ret = [] + + for k in np.arange(self.order, l+1): + sample = data[k - self.order : k] + tmp = self.get_models_forecasts(sample) + interval = self.get_interval(tmp) + if len(interval) == 1: + interval = interval[-1] + ret.append(interval) + + return ret
+ +
[docs] def forecast_ahead_interval(self, data, steps, **kwargs): + + if 'method' in kwargs: + self.interval_method = kwargs.get('method','quantile') + + if 'alpha' in kwargs: + self.alpha = kwargs.get('alpha', self.alpha) + + ret = [] + + samples = [[k] for k in data[-self.order:]] + + for k in np.arange(self.order, steps + self.order): + forecasts = [] + lags = {} + for i in np.arange(0, self.order): lags[i] = samples[k - self.order + i] + + # Build the tree with all possible paths + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + for p in root.paths(): + path = list(reversed(list(filter(None.__ne__, p)))) + + forecasts.extend(self.get_models_forecasts(path)) + + samples.append(sampler(forecasts, np.arange(0.1, 1, 0.2))) + interval = self.get_interval(forecasts) + + if len(interval) == 1: + interval = interval[0] + + ret.append(interval) + + return ret
+ +
[docs] def forecast_distribution(self, data, **kwargs): + ret = [] + + smooth = kwargs.get("smooth", "KDE") + alpha = kwargs.get("alpha", None) + + uod = self.get_UoD() + + for k in np.arange(self.order, len(data)): + + sample = data[k-self.order : k] + + forecasts = self.get_models_forecasts(sample) + + if alpha is None: + forecasts = np.ravel(forecasts).tolist() + else: + forecasts = self.get_distribution_interquantile(np.ravel(forecasts).tolist(), alpha) + + dist = ProbabilityDistribution.ProbabilityDistribution(smooth, uod=uod, data=forecasts, + name="", **kwargs) + + ret.append(dist) + + return ret
+ + +
[docs] def forecast_ahead_distribution(self, data, steps, **kwargs): + if 'method' in kwargs: + self.point_method = kwargs.get('method','mean') + + smooth = kwargs.get("smooth", "KDE") + alpha = kwargs.get("alpha", None) + + ret = [] + + start = kwargs.get('start', self.order) + + uod = self.get_UoD() + + sample = data[start - self.order: start] + + for k in np.arange(self.order, steps+self.order): + forecasts = [] + lags = {} + for i in np.arange(0, self.order): lags[i] = sample[k-self.order] + + # Build the tree with all possible paths + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + for p in root.paths(): + path = list(reversed(list(filter(None.__ne__, p)))) + + forecasts.extend(self.get_models_forecasts(path)) + + sample.append(sampler(forecasts, np.arange(0.1, 1, 0.1))) + + if alpha is None: + forecasts = np.ravel(forecasts).tolist() + else: + forecasts = self.get_distribution_interquantile(np.ravel(forecasts).tolist(), alpha) + + dist = ProbabilityDistribution.ProbabilityDistribution(smooth, uod=uod, data=forecasts, + name="", **kwargs) + + ret.append(dist) + + return ret
+ + +
[docs]class AllMethodEnsembleFTS(EnsembleFTS): + def __init__(self, **kwargs): + super(AllMethodEnsembleFTS, self).__init__(**kwargs) + self.min_order = 3 + self.shortname ="Ensemble FTS" + +
[docs] def set_transformations(self, model): + for t in self.transformations: + model.append_transformation(t)
+ +
[docs] def train(self, data, **kwargs): + fo_methods = [song.ConventionalFTS, chen.ConventionalFTS, yu.WeightedFTS, cheng.TrendWeightedFTS, + sadaei.ExponentialyWeightedFTS, ismailefendi.ImprovedWeightedFTS] + + ho_methods = [hofts.HighOrderFTS, hwang.HighOrderFTS] + + for method in fo_methods: + model = method(partitioner=self.partitioner) + self.set_transformations(model) + model.fit(data, **kwargs) + self.append_model(model) + + for method in ho_methods: + for o in np.arange(1, self.order+1): + model = method(partitioner=self.partitioner) + if model.min_order >= o: + model.order = o + self.set_transformations(model) + model.fit(data, **kwargs) + self.append_model(model)
+ + + + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/ensemble/multiseasonal.html b/docs/_build/html/_modules/pyFTS/models/ensemble/multiseasonal.html new file mode 100644 index 0000000..11df525 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/ensemble/multiseasonal.html @@ -0,0 +1,169 @@ + + + + + + + + pyFTS.models.ensemble.multiseasonal — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.ensemble.multiseasonal

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+import numpy as np
+from pyFTS.common import Util as cUtil
+from pyFTS.models.ensemble import ensemble
+from pyFTS.models.seasonal import cmsfts
+from pyFTS.probabilistic import ProbabilityDistribution
+from copy import deepcopy
+from joblib import Parallel, delayed
+import multiprocessing
+
+
+
[docs]def train_individual_model(partitioner, train_data, indexer): + pttr = str(partitioner.__module__).split('.')[-1] + diff = "_diff" if partitioner.transformation is not None else "" + _key = "msfts_" + pttr + str(partitioner.partitions) + diff + "_" + indexer.name + + print(_key) + + model = cmsfts.ContextualMultiSeasonalFTS(_key, indexer=indexer) + model.append_transformation(partitioner.transformation) + model.train(train_data, partitioner.sets, order=1) + + cUtil.persist_obj(model, "models/"+_key+".pkl") + + return model
+ + +
[docs]class SeasonalEnsembleFTS(ensemble.EnsembleFTS): + def __init__(self, name, **kwargs): + super(SeasonalEnsembleFTS, self).__init__(name="Seasonal Ensemble FTS", **kwargs) + self.min_order = 1 + self.indexers = [] + self.partitioners = [] + self.is_multivariate = True + self.has_seasonality = True + self.has_probability_forecasting = True + +
[docs] def update_uod(self, data): + self.original_max = max(self.indexer.get_data(data)) + self.original_min = min(self.indexer.get_data(data))
+ +
[docs] def train(self, data, **kwargs): + self.original_max = max(self.indexer.get_data(data)) + self.original_min = min(self.indexer.get_data(data)) + + num_cores = multiprocessing.cpu_count() + + pool = {} + count = 0 + for ix in self.indexers: + for pt in self.partitioners: + pool[count] = {'ix': ix, 'pt': pt} + count += 1 + + results = Parallel(n_jobs=num_cores)( + delayed(train_individual_model)(deepcopy(pool[m]['pt']), data, deepcopy(pool[m]['ix'])) + for m in pool.keys()) + + for tmp in results: + self.append_model(tmp) + + cUtil.persist_obj(self, "models/"+self.name+".pkl")
+ +
[docs] def forecast_distribution(self, data, **kwargs): + + ret = [] + + smooth = kwargs.get("smooth", "KDE") + alpha = kwargs.get("alpha", None) + + uod = self.get_UoD() + + for k in data.index: + + tmp = self.get_models_forecasts(data.ix[k]) + + if alpha is None: + tmp = np.ravel(tmp).tolist() + else: + tmp = self.get_distribution_interquantile( np.ravel(tmp).tolist(), alpha) + + name = str(self.indexer.get_index(data.ix[k])) + + dist = ProbabilityDistribution.ProbabilityDistribution(smooth, uod=uod, data=tmp, + name=name, **kwargs) + + ret.append(dist) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/hofts.html b/docs/_build/html/_modules/pyFTS/models/hofts.html new file mode 100644 index 0000000..e88faf1 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/hofts.html @@ -0,0 +1,216 @@ + + + + + + + + pyFTS.models.hofts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.hofts

+"""
+High Order FTS
+
+Severiano, S. A. Jr; Silva, P. C. L.; Sadaei, H. J.; Guimarães, F. G. Very Short-term Solar Forecasting
+using Fuzzy Time Series. 2017 IEEE International Conference on Fuzzy Systems. DOI10.1109/FUZZ-IEEE.2017.8015732
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts, flrg, tree
+
+
[docs]class HighOrderFLRG(flrg.FLRG): + """Conventional High Order Fuzzy Logical Relationship Group""" + def __init__(self, order, **kwargs): + super(HighOrderFLRG, self).__init__(order, **kwargs) + self.LHS = [] + self.RHS = {} + self.strlhs = "" + +
[docs] def append_rhs(self, c, **kwargs): + if c not in self.RHS: + self.RHS[c] = c
+ +
[docs] def append_lhs(self, c): + self.LHS.append(c)
+ + def __str__(self): + tmp = "" + for c in sorted(self.RHS): + if len(tmp) > 0: + tmp = tmp + "," + tmp = tmp + c + return self.get_key() + " -> " + tmp + + + def __len__(self): + return len(self.RHS)
+ + +
[docs]class HighOrderFTS(fts.FTS): + """Conventional High Order Fuzzy Time Series""" + def __init__(self, **kwargs): + super(HighOrderFTS, self).__init__(**kwargs) + self.name = "High Order FTS" + self.shortname = "HOFTS" + self.detail = "Severiano, Silva, Sadaei and Guimarães" + self.is_high_order = True + self.min_order = 1 + self.order= kwargs.get("order", 2) + self.lags = kwargs.get("lags", None) + self.configure_lags(**kwargs) + +
[docs] def configure_lags(self, **kwargs): + if "order" in kwargs: + self.order = kwargs.get("order", 2) + + if "lags" in kwargs: + self.lags = kwargs.get("lags", None) + + if self.lags is not None: + self.max_lag = max(self.lags) + else: + self.max_lag = self.order + self.lags = np.arange(1, self.order+1)
+ +
[docs] def generate_lhs_flrg(self, sample): + lags = {} + + flrgs = [] + + for ct, o in enumerate(self.lags): + lhs = [key for key in self.partitioner.ordered_sets + if self.sets[key].membership(sample[o-1]) > self.alpha_cut] + lags[ct] = lhs + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + # Trace the possible paths + for p in root.paths(): + flrg = HighOrderFLRG(self.order) + path = list(reversed(list(filter(None.__ne__, p)))) + + for lhs in path: + flrg.append_lhs(lhs) + + flrgs.append(flrg) + + return flrgs
+ +
[docs] def generate_flrg(self, data): + l = len(data) + for k in np.arange(self.max_lag, l): + if self.dump: print("FLR: " + str(k)) + + sample = data[k - self.max_lag: k] + + rhs = [key for key in self.partitioner.ordered_sets + if self.sets[key].membership(data[k]) > self.alpha_cut] + + flrgs = self.generate_lhs_flrg(sample) + + for flrg in flrgs: + if flrg.get_key() not in self.flrgs: + self.flrgs[flrg.get_key()] = flrg; + + for st in rhs: + self.flrgs[flrg.get_key()].append_rhs(st)
+ +
[docs] def train(self, data, **kwargs): + self.configure_lags(**kwargs) + self.generate_flrg(data)
+ +
[docs] def forecast(self, ndata, **kwargs): + + ret = [] + + l = len(ndata) + + if l <= self.max_lag: + return ndata + + for k in np.arange(self.max_lag, l+1): + flrgs = self.generate_lhs_flrg(ndata[k - self.max_lag: k]) + + tmp = [] + for flrg in flrgs: + + if flrg.get_key() not in self.flrgs: + if len(flrg.LHS) > 0: + tmp.append(self.sets[flrg.LHS[-1]].centroid) + else: + flrg = self.flrgs[flrg.get_key()] + tmp.append(flrg.get_midpoint(self.sets)) + + ret.append(np.nanmean(tmp)) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/hwang.html b/docs/_build/html/_modules/pyFTS/models/hwang.html new file mode 100644 index 0000000..63ebb0a --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/hwang.html @@ -0,0 +1,143 @@ + + + + + + + + pyFTS.models.hwang — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.hwang

+"""
+High Order Fuzzy Time Series by Hwang, Chen and Lee (1998)
+
+Jeng-Ren Hwang, Shyi-Ming Chen, and Chia-Hoang Lee, “Handling forecasting problems using fuzzy time series,” 
+Fuzzy Sets Syst., no. 100, pp. 217–228, 1998.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, Transformations, fts
+
+
+
[docs]class HighOrderFTS(fts.FTS): + def __init__(self, **kwargs): + super(HighOrderFTS, self).__init__(**kwargs) + self.is_high_order = True + self.min_order = 2 + self.name = "Hwang High Order FTS" + self.shortname = "Hwang" + self.detail = "Hwang" + self.configure_lags(**kwargs) + +
[docs] def configure_lags(self, **kwargs): + if "order" in kwargs: + self.order = kwargs.get("order", 2) + + self.max_lag = self.order
+ +
[docs] def forecast(self, ndata, **kwargs): + + l = len(self.sets) + + cn = np.array([0.0 for k in range(l)]) + ow = np.array([[0.0 for k in range(l)] for z in range(self.order - 1)]) + rn = np.array([[0.0 for k in range(l)] for z in range(self.order - 1)]) + ft = np.array([0.0 for k in range(l)]) + + ret = [] + + for t in np.arange(self.order-1, len(ndata)): + + for ix in range(l): + s = self.partitioner.ordered_sets[ix] + cn[ix] = self.sets[s].membership( FuzzySet.grant_bounds(ndata[t], self.sets, self.partitioner.ordered_sets)) + for w in np.arange(self.order-1): + ow[w, ix] = self.sets[s].membership(FuzzySet.grant_bounds(ndata[t - w], self.sets, self.partitioner.ordered_sets)) + rn[w, ix] = ow[w, ix] * cn[ix] + ft[ix] = max(ft[ix], rn[w, ix]) + mft = max(ft) + out = 0.0 + count = 0.0 + for ix in range(l): + s = self.partitioner.ordered_sets[ix] + if ft[ix] == mft: + out = out + self.sets[s].centroid + count += 1.0 + ret.append(out / count) + + return ret
+ +
[docs] def train(self, data, **kwargs): + + if self.sets == None: + self.sets = self.partitioner.sets + + self.configure_lags(**kwargs)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/ifts.html b/docs/_build/html/_modules/pyFTS/models/ifts.html new file mode 100644 index 0000000..a1bdfdd --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/ifts.html @@ -0,0 +1,167 @@ + + + + + + + + pyFTS.models.ifts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.ifts

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+"""
+High Order Interval Fuzzy Time Series
+
+SILVA, Petrônio CL; SADAEI, Hossein Javedani; GUIMARÃES, Frederico Gadelha. Interval Forecasting with Fuzzy Time Series.
+In: Computational Intelligence (SSCI), 2016 IEEE Symposium Series on. IEEE, 2016. p. 1-8.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts, tree
+from pyFTS.models import hofts
+
+
+
[docs]class IntervalFTS(hofts.HighOrderFTS): + """ + High Order Interval Fuzzy Time Series + """ + def __init__(self, **kwargs): + super(IntervalFTS, self).__init__(**kwargs) + self.shortname = "IFTS" + self.name = "Interval FTS" + self.detail = "Silva, P.; Guimarães, F.; Sadaei, H. (2016)" + self.flrgs = {} + self.has_point_forecasting = False + self.has_interval_forecasting = True + self.is_high_order = True + self.min_order = 1 + +
[docs] def get_upper(self, flrg): + ret = np.nan + if len(flrg.LHS) > 0: + if flrg.get_key() in self.flrgs: + tmp = self.flrgs[flrg.get_key()] + ret = tmp.get_upper(self.sets) + else: + ret = self.sets[flrg.LHS[-1]].upper + return ret
+ +
[docs] def get_lower(self, flrg): + ret = np.nan + if len(flrg.LHS) > 0: + if flrg.get_key() in self.flrgs: + tmp = self.flrgs[flrg.get_key()] + ret = tmp.get_lower(self.sets) + else: + ret = self.sets[flrg.LHS[-1]].lower + return ret
+ +
[docs] def get_sequence_membership(self, data, fuzzySets): + mb = [fuzzySets[k].membership(data[k]) for k in np.arange(0, len(data))] + return mb
+ + +
[docs] def forecast_interval(self, ndata, **kwargs): + + ret = [] + + l = len(ndata) + + if l <= self.order: + return ndata + + for k in np.arange(self.max_lag, l+1): + + sample = ndata[k - self.max_lag: k] + + flrgs = self.generate_lhs_flrg(sample) + + up = [] + lo = [] + affected_flrgs_memberships = [] + + for flrg in flrgs: + if len(flrg.LHS) > 0: + # achar o os bounds de cada FLRG, ponderados pela pertinência + mv = flrg.get_membership(sample, self.sets) + up.append(mv * self.get_upper(flrg)) + lo.append(mv * self.get_lower(flrg)) + affected_flrgs_memberships.append(mv) + + # gerar o intervalo + norm = sum(affected_flrgs_memberships) + lo_ = sum(lo) / norm + up_ = sum(up) / norm + ret.append([lo_, up_]) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/ismailefendi.html b/docs/_build/html/_modules/pyFTS/models/ismailefendi.html new file mode 100644 index 0000000..c7bda9b --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/ismailefendi.html @@ -0,0 +1,172 @@ + + + + + + + + pyFTS.models.ismailefendi — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.ismailefendi

+"""
+First Order Improved Weighted Fuzzy Time Series by Efendi, Ismail and Deris (2013)
+
+R. Efendi, Z. Ismail, and M. M. Deris, “Improved weight Fuzzy Time Series as used in the exchange rates forecasting of 
+US Dollar to Ringgit Malaysia,” Int. J. Comput. Intell. Appl., vol. 12, no. 1, p. 1350005, 2013.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts, flrg
+
+
+
[docs]class ImprovedWeightedFLRG(flrg.FLRG): + """First Order Improved Weighted Fuzzy Logical Relationship Group""" + def __init__(self, LHS, **kwargs): + super(ImprovedWeightedFLRG, self).__init__(1, **kwargs) + self.LHS = LHS + self.RHS = {} + self.rhs_counts = {} + self.count = 0.0 + self.w = None + +
[docs] def append_rhs(self, c, **kwargs): + if c not in self.RHS: + self.RHS[c] = c + self.rhs_counts[c] = 1.0 + else: + self.rhs_counts[c] += 1.0 + self.count += 1.0
+ +
[docs] def weights(self): + if self.w is None: + self.w = np.array([self.rhs_counts[c] / self.count for c in self.RHS.keys()]) + return self.w
+ + def __str__(self): + tmp = self.LHS + " -> " + tmp2 = "" + for c in sorted(self.RHS.keys()): + if len(tmp2) > 0: + tmp2 = tmp2 + "," + tmp2 = tmp2 + c + "(" + str(round(self.rhs_counts[c] / self.count, 3)) + ")" + return tmp + tmp2 + + def __len__(self): + return len(self.RHS)
+ + +
[docs]class ImprovedWeightedFTS(fts.FTS): + """First Order Improved Weighted Fuzzy Time Series""" + def __init__(self, **kwargs): + super(ImprovedWeightedFTS, self).__init__(order=1, name="IWFTS", **kwargs) + self.name = "Improved Weighted FTS" + self.detail = "Ismail & Efendi" + +
[docs] def generate_flrg(self, flrs): + for flr in flrs: + if flr.LHS in self.flrgs: + self.flrgs[flr.LHS].append_rhs(flr.RHS) + else: + self.flrgs[flr.LHS] = ImprovedWeightedFLRG(flr.LHS); + self.flrgs[flr.LHS].append_rhs(flr.RHS)
+ +
[docs] def train(self, ndata, **kwargs): + + tmpdata = FuzzySet.fuzzyfy_series(ndata, self.sets, method='maximum') + flrs = FLR.generate_recurrent_flrs(tmpdata) + self.generate_flrg(flrs)
+ +
[docs] def forecast(self, ndata, **kwargs): + l = 1 + + if self.partitioner is not None: + ordered_sets = self.partitioner.ordered_sets + else: + ordered_sets = FuzzySet.set_ordered(self.sets) + + ndata = np.array(ndata) + l = len(ndata) + + ret = [] + + for k in np.arange(0, l): + + actual = FuzzySet.get_maximum_membership_fuzzyset(ndata[k], self.sets, ordered_sets) + + if actual.name not in self.flrgs: + ret.append(actual.centroid) + else: + flrg = self.flrgs[actual.name] + mp = flrg.get_midpoints(self.sets) + + ret.append(mp.dot(flrg.weights())) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/multivariate/FLR.html b/docs/_build/html/_modules/pyFTS/models/multivariate/FLR.html new file mode 100644 index 0000000..0deb0fd --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/multivariate/FLR.html @@ -0,0 +1,103 @@ + + + + + + + + pyFTS.models.multivariate.FLR — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.multivariate.FLR

+
+
+
[docs]class FLR(object): + """Multivariate Fuzzy Logical Relationship""" + + def __init__(self): + """ + Creates a Fuzzy Logical Relationship + :param LHS: Left Hand Side fuzzy set + :param RHS: Right Hand Side fuzzy set + """ + self.LHS = {} + self.RHS = None + +
[docs] def set_lhs(self, var, set): + self.LHS[var] = set
+ +
[docs] def set_rhs(self, set): + self.RHS = set
+ + def __str__(self): + return str([self.LHS[k] for k in self.LHS.keys()]) + " -> " + self.RHS
+ + + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/multivariate/common.html b/docs/_build/html/_modules/pyFTS/models/multivariate/common.html new file mode 100644 index 0000000..1f70571 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/multivariate/common.html @@ -0,0 +1,89 @@ + + + + + + + + pyFTS.models.multivariate.common — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.multivariate.common

+import numpy as np
+import pandas as pd
+
+
+
[docs]def fuzzyfy_instance(data_point, var): + mv = np.array([var.partitioner.sets[key].membership(data_point) for key in var.partitioner.ordered_sets]) + ix = np.ravel(np.argwhere(mv > var.alpha_cut)) + sets = [(var.name, var.partitioner.ordered_sets[i]) for i in ix] + return sets
+ + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/multivariate/flrg.html b/docs/_build/html/_modules/pyFTS/models/multivariate/flrg.html new file mode 100644 index 0000000..8882433 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/multivariate/flrg.html @@ -0,0 +1,121 @@ + + + + + + + + pyFTS.models.multivariate.flrg — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.multivariate.flrg

+
+import numpy as np
+from pyFTS.common import flrg as flg
+
+
+
[docs]class FLRG(flg.FLRG): + """ + Multivariate Fuzzy Logical Rule Group + """ + + def __init__(self, **kwargs): + super(FLRG,self).__init__(0,**kwargs) + self.order = kwargs.get('order', 1) + self.LHS = kwargs.get('lhs', {}) + self.RHS = set() + +
[docs] def set_lhs(self, var, fset): + if self.order == 1: + self.LHS[var] = fset + else: + if var not in self.LHS: + self.LHS[var] = [] + self.LHS[var].append(fset)
+ + +
[docs] def append_rhs(self, fset, **kwargs): + self.RHS.add(fset)
+ +
[docs] def get_membership(self, data, variables): + mvs = [] + for var in variables: + s = self.LHS[var.name] + mvs.append(var.partitioner.sets[s].membership(data[var.name])) + + return np.nanmin(mvs)
+ + def __str__(self): + _str = "" + for k in self.RHS: + _str += "," if len(_str) > 0 else "" + _str += k + + return self.get_key() + " -> " + _str
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/multivariate/mvfts.html b/docs/_build/html/_modules/pyFTS/models/multivariate/mvfts.html new file mode 100644 index 0000000..bb13673 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/multivariate/mvfts.html @@ -0,0 +1,225 @@ + + + + + + + + pyFTS.models.multivariate.mvfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.multivariate.mvfts

+from pyFTS.common import fts, FuzzySet, FLR, Membership, tree
+from pyFTS.partitioners import Grid
+from pyFTS.models.multivariate import FLR as MVFLR, common, flrg as mvflrg
+
+import numpy as np
+import pandas as pd
+
+
+
[docs]class MVFTS(fts.FTS): + """ + Multivariate extension of Chen's ConventionalFTS method + """ + def __init__(self, **kwargs): + super(MVFTS, self).__init__(order=1, **kwargs) + self.explanatory_variables = [] + self.target_variable = None + self.flrgs = {} + self.is_multivariate = True + self.shortname = "MVFTS" + self.name = "Multivariate FTS" + +
[docs] def append_variable(self, var): + """ + Append a new endogenous variable to the model + + :param var: variable object + :return: + """ + self.explanatory_variables.append(var)
+ +
[docs] def format_data(self, data): + ndata = {} + for var in self.explanatory_variables: + ndata[var.name] = data[var.data_label] + + return ndata
+ +
[docs] def apply_transformations(self, data, params=None, updateUoD=False, **kwargs): + ndata = data.copy(deep=True) + for var in self.explanatory_variables: + ndata[var.data_label] = var.apply_transformations(data[var.data_label].values) + + return ndata
+ +
[docs] def generate_lhs_flrs(self, data): + flrs = [] + lags = {} + for vc, var in enumerate(self.explanatory_variables): + data_point = data[var.data_label] + lags[vc] = common.fuzzyfy_instance(data_point, var) + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + for p in root.paths(): + path = list(reversed(list(filter(None.__ne__, p)))) + + flr = MVFLR.FLR() + + for v, s in path: + flr.set_lhs(v, s) + + if len(flr.LHS.keys()) == len(self.explanatory_variables): + flrs.append(flr) + + return flrs
+ +
[docs] def generate_flrs(self, data): + flrs = [] + for ct in range(1, len(data.index)): + ix = data.index[ct-1] + data_point = data.loc[ix] + + tmp_flrs = self.generate_lhs_flrs(data_point) + + target_ix = data.index[ct] + target_point = data[self.target_variable.data_label][target_ix] + target = common.fuzzyfy_instance(target_point, self.target_variable) + + for flr in tmp_flrs: + for v, s in target: + flr.set_rhs(s) + flrs.append(flr) + + return flrs
+ +
[docs] def generate_flrg(self, flrs): + for flr in flrs: + flrg = mvflrg.FLRG(lhs=flr.LHS) + + if flrg.get_key() not in self.flrgs: + self.flrgs[flrg.get_key()] = flrg + + self.flrgs[flrg.get_key()].append_rhs(flr.RHS)
+ + +
[docs] def train(self, data, **kwargs): + + ndata = self.apply_transformations(data) + + flrs = self.generate_flrs(ndata) + self.generate_flrg(flrs)
+ +
[docs] def forecast(self, data, **kwargs): + ret = [] + ndata = self.apply_transformations(data) + for ix in ndata.index: + data_point = ndata.loc[ix] + flrs = self.generate_lhs_flrs(data_point) + mvs = [] + mps = [] + for flr in flrs: + flrg = mvflrg.FLRG(lhs=flr.LHS) + if flrg.get_key() not in self.flrgs: + #print('hit') + mvs.append(0.) + mps.append(0.) + else: + mvs.append(self.flrgs[flrg.get_key()].get_membership(self.format_data(data_point), self.explanatory_variables)) + mps.append(self.flrgs[flrg.get_key()].get_midpoint(self.target_variable.partitioner.sets)) + + #print('mv', mvs) + #print('mp', mps) + mv = np.array(mvs) + mp = np.array(mps) + + ret.append(np.dot(mv,mp.T)/np.sum(mv)) + + ret = self.target_variable.apply_inverse_transformations(ret, + params=data[self.target_variable.data_label].values) + return ret
+ +
[docs] def clone_parameters(self, model): + super(MVFTS, self).clone_parameters(model) + + self.explanatory_variables = model.explanatory_variables + self.target_variable = model.target_variable
+ + def __str__(self): + _str = self.name + ":\n" + for k in self.flrgs.keys(): + _str += str(self.flrgs[k]) + "\n" + + return _str
+ + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/multivariate/variable.html b/docs/_build/html/_modules/pyFTS/models/multivariate/variable.html new file mode 100644 index 0000000..1edd6c0 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/multivariate/variable.html @@ -0,0 +1,151 @@ + + + + + + + + pyFTS.models.multivariate.variable — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.multivariate.variable

+from pyFTS.common import fts, FuzzySet, FLR, Membership, tree
+from pyFTS.partitioners import Grid
+from pyFTS.models.multivariate import FLR as MVFLR
+
+
+
[docs]class Variable: + """ + A variable of a fuzzy time series multivariate model. Each variable contains its own + transformations and partitioners. + """ + def __init__(self, name, **kwargs): + """ + + :param name: + :param \**kwargs: See below + + :Keyword Arguments: + * *alias* -- Alternative name for the variable + """ + self.name = name + """A string with the name of the variable""" + self.alias = kwargs.get('alias', self.name) + """A string with the alias of the variable""" + self.data_label = kwargs.get('data_label', self.name) + """A string with the column name on DataFrame""" + self.type = kwargs.get('type', 'common') + self.transformation = kwargs.get('transformation', None) + self.transformation_params = kwargs.get('transformation_params', None) + self.partitioner = None + self.alpha_cut = kwargs.get('alpha_cut', 0.0) + + if kwargs.get('data', None) is not None: + self.build(**kwargs) + +
[docs] def build(self, **kwargs): + """ + + :param kwargs: + :return: + """ + fs = kwargs.get('partitioner', Grid.GridPartitioner) + mf = kwargs.get('func', Membership.trimf) + np = kwargs.get('npart', 10) + data = kwargs.get('data', None) + kw = kwargs.get('partitioner_specific', {}) + self.partitioner = fs(data=data[self.data_label].values, npart=np, func=mf, + transformation=self.transformation, prefix=self.alias, + variable=self.name, **kw) + + self.partitioner.name = self.name + " " + self.partitioner.name
+ +
[docs] def apply_transformations(self, data, **kwargs): + + if kwargs.get('params', None) is not None: + self.transformation_params = kwargs.get('params', None) + + if self.transformation is not None: + return self.transformation.apply(data, self.transformation_params) + + return data
+ +
[docs] def apply_inverse_transformations(self, data, **kwargs): + + if kwargs.get('params', None) is not None: + self.transformation_params = kwargs.get('params', None) + + if self.transformation is not None: + return self.transformation.inverse(data, self.transformation_params) + + return data
+ + def __str__(self): + return self.name
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/common.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/common.html new file mode 100644 index 0000000..ffc34a8 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/common.html @@ -0,0 +1,321 @@ + + + + + + + + pyFTS.models.nonstationary.common — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.common

+"""
+Non Stationary Fuzzy Sets
+
+GARIBALDI, Jonathan M.; JAROSZEWSKI, Marcin; MUSIKASUWAN, Salang. Nonstationary fuzzy sets.
+IEEE Transactions on Fuzzy Systems, v. 16, n. 4, p. 1072-1086, 2008.
+"""
+
+import numpy as np
+from pyFTS import *
+from pyFTS.common import FuzzySet as FS, Membership, FLR
+from pyFTS.partitioners import partitioner
+from pyFTS.models.nonstationary import perturbation
+
+
+
[docs]class FuzzySet(FS.FuzzySet): + """ + Non Stationary Fuzzy Sets + """ + + def __init__(self, name, mf, parameters, **kwargs): + """ + Constructor + """ + super(FuzzySet, self).__init__(name=name, mf=mf, parameters=parameters, centroid=None, alpha=1.0, **kwargs) + + self.location = kwargs.get("location", None) + """Pertubation function that affects the location of the membership function""" + self.location_params = kwargs.get("location_params", None) + """Parameters for location pertubation function""" + self.location_roots = kwargs.get("location_roots", 0) + self.width = kwargs.get("width", None) + """Pertubation function that affects the width of the membership function""" + self.width_params = kwargs.get("width_params", None) + """Parameters for width pertubation function""" + self.width_roots = kwargs.get("width_roots", 0) + self.noise = kwargs.get("noise", None) + """Pertubation function that adds noise on the membership function""" + self.noise_params = kwargs.get("noise_params", None) + """Parameters for noise pertubation function""" + self.perturbated_parameters = {} + self.type = 'nonstationary' + + if self.location is not None and not isinstance(self.location, (list, set)): + self.location = [self.location] + self.location_params = [self.location_params] + self.location_roots = [self.location_roots] + + if self.width is not None and not isinstance(self.width, (list, set)): + self.width = [self.width] + self.width_params = [self.width_params] + self.width_roots = [self.width_roots] + +
[docs] def perform_location(self, t, param): + if self.location is None: + inc = t + else: + l = len(self.location) + + inc = sum([self.location[k](t + self.location_roots[k], self.location_params[k]) for k in np.arange(0, l)]) + + if self.mf == Membership.gaussmf: + # changes only the mean parameter + return [param[0] + inc, param[1]] + elif self.mf == Membership.sigmf: + # changes only the midpoint parameter + return [param[0], param[1] + inc] + elif self.mf == Membership.bellmf: + return [param[0], param[1], param[2] + inc] + else: + # translate all parameters + return [k + inc for k in param]
+ +
[docs] def perform_width(self, t, param): + if self.width is None: + inc = t + else: + + l = len(self.width) + + inc = sum([self.width[k](t + self.width_roots[k], self.width_params[k]) for k in np.arange(0, l)]) + + if self.mf == Membership.gaussmf: + # changes only the variance parameter + return [param[0], param[1] + inc] + elif self.mf == Membership.sigmf: + # changes only the smooth parameter + return [param[0] + inc, param[1]] + elif self.mf == Membership.trimf: + tmp = inc / 2 + return [param[0] - tmp, param[1], param[2] + tmp] + elif self.mf == Membership.trapmf: + l = (param[3] - param[0]) + rab = (param[1] - param[0]) / l + rcd = (param[3] - param[2]) / l + return [param[0] - inc, param[1] - inc * rab, param[2] + inc * rcd, param[3] + inc] + else: + return param
+ +
[docs] def membership(self, x, t): + """ + Calculate the membership value of a given input + + :param x: input value + :param t: time displacement or perturbation parameters + :return: membership value of x at this fuzzy set + """ + + self.perturbate_parameters(t) + + tmp = self.mf(x, self.perturbated_parameters[str(t)]) + + if self.noise is not None: + tmp += self.noise(t, self.noise_params) + + return tmp
+ +
[docs] def perturbate_parameters(self, t): + if str(t) not in self.perturbated_parameters: + param = self.parameters + if isinstance(t, (list, set)): + param = self.perform_location(t[0], param) + param = self.perform_width(t[1], param) + else: + param = self.perform_location(t, param) + param = self.perform_width(t, param) + self.perturbated_parameters[str(t)] = param
+ +
[docs] def get_midpoint(self, t): + + self.perturbate_parameters(t) + param = self.perturbated_parameters[str(t)] + + if self.mf == Membership.gaussmf: + return param[0] + elif self.mf == Membership.sigmf: + return param[1] + elif self.mf == Membership.trimf: + return param[1] + elif self.mf == Membership.trapmf: + return (param[2] - param[1]) / 2 + else: + return param
+ +
[docs] def get_lower(self, t): + + self.perturbate_parameters(t) + param = self.perturbated_parameters[str(t)] + + if self.mf == Membership.gaussmf: + return param[0] - 3*param[1] + elif self.mf == Membership.sigmf: + return param[0] - param[1] + elif self.mf == Membership.trimf: + return param[0] + elif self.mf == Membership.trapmf: + return param[0] + else: + return param
+ +
[docs] def get_upper(self, t): + + self.perturbate_parameters(t) + param = self.perturbated_parameters[str(t)] + + if self.mf == Membership.gaussmf: + return param[0] + 3*param[1] + elif self.mf == Membership.sigmf: + return param[0] + param[1] + elif self.mf == Membership.trimf: + return param[2] + elif self.mf == Membership.trapmf: + return param[3] + else: + return param
+ + def __str__(self): + tmp = "" + if self.location is not None: + tmp += "Location: " + for ct, f in enumerate(self.location): + tmp += str(f.__name__) + "(" + str(["{0:.2f}".format(p) for p in self.location_params[ct]]) + ") " + if self.width is not None: + tmp += "Width: " + for ct, f in enumerate(self.width): + tmp += str(f.__name__) + "(" + str(["{0:.2f}".format(p) for p in self.width_params[ct]]) + ") " + tmp = "(" + str(["{0:.2f}".format(p) for p in self.parameters]) + ") " + tmp + return self.name + ": " + str(self.mf.__name__) + tmp
+ + +
[docs]def fuzzify(inst, t, fuzzySets): + """ + Calculate the membership values for a data point given nonstationary fuzzy sets + + :param inst: data points + :param t: time displacement of the instance + :param fuzzySets: list of fuzzy sets + :return: array of membership values + """ + ret = [] + if not isinstance(inst, list): + inst = [inst] + for t, i in enumerate(inst): + mv = np.array([fs.membership(i, t) for fs in fuzzySets]) + ret.append(mv) + return ret
+ + +
[docs]def fuzzySeries(data, fuzzySets, ordered_sets, window_size=1, method='fuzzy', const_t= None): + fts = [] + for t, i in enumerate(data): + tdisp = window_index(t, window_size) if const_t is None else const_t + mv = np.array([fuzzySets[fs].membership(i, tdisp) for fs in ordered_sets]) + if len(mv) == 0: + sets = [check_bounds(i, fuzzySets, tdisp)] + else: + if method == 'fuzzy': + ix = np.ravel(np.argwhere(mv > 0.0)) + elif method == 'maximum': + mx = max(mv) + ix = np.ravel(np.argwhere(mv == mx)) + sets = [fuzzySets[ordered_sets[i]] for i in ix] + fts.append(sets) + return fts
+ + +
[docs]def window_index(t, window_size): + if isinstance(t, (list, set)): + return t + return t - (t % window_size)
+ + +
[docs]def check_bounds(data, partitioner, t): + if data < partitioner.lower_set().get_lower(t): + return partitioner.lower_set() + elif data > partitioner.upper_set().get_upper(t): + return partitioner.upper_set()
+ + +
[docs]def check_bounds_index(data, partitioner, t): + if data < partitioner.lower_set().get_lower(t): + return 0 + elif data > partitioner.upper_set().get_upper(t): + return len(partitioner.sets) -1
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/cvfts.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/cvfts.html new file mode 100644 index 0000000..788e7d6 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/cvfts.html @@ -0,0 +1,349 @@ + + + + + + + + pyFTS.models.nonstationary.cvfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.cvfts

+import numpy as np
+from pyFTS.models import hofts
+from pyFTS.models.nonstationary import common,nsfts
+from pyFTS.common import FLR, flrg, tree
+
+
+
[docs]class HighOrderNonstationaryFLRG(hofts.HighOrderFTS): + """Conventional High Order Fuzzy Logical Relationship Group""" + def __init__(self, order, **kwargs): + super(HighOrderNonstationaryFLRG, self).__init__(order, **kwargs) + self.LHS = [] + self.RHS = {} + self.strlhs = "" + +
[docs] def append_rhs(self, c, **kwargs): + if c not in self.RHS: + self.RHS[c] = c
+ +
[docs] def append_lhs(self, c): + self.LHS.append(c)
+ + def __str__(self): + tmp = "" + for c in sorted(self.RHS): + if len(tmp) > 0: + tmp = tmp + "," + tmp = tmp + c + return self.get_key() + " -> " + tmp + + + def __len__(self): + return len(self.RHS)
+ + +
[docs]class ConditionalVarianceFTS(hofts.HighOrderFTS): + def __init__(self, **kwargs): + super(ConditionalVarianceFTS, self).__init__(**kwargs) + self.name = "Conditional Variance FTS" + self.shortname = "CVFTS " + self.detail = "" + self.flrgs = {} + self.is_high_order = False + if self.partitioner is not None: + self.append_transformation(self.partitioner.transformation) + + self.min_stack = [0,0,0] + self.max_stack = [0,0,0] + self.uod_clip = False + self.order = 1 + self.min_order = 1 + self.max_lag = 1 + self.inputs = [] + self.forecasts = [] + self.residuals = [] + self.variance_residual = 0. + self.mean_residual = 0. + self.memory_window = kwargs.get("memory_window",5) + +
[docs] def train(self, ndata, **kwargs): + + tmpdata = common.fuzzySeries(ndata, self.sets, + self.partitioner.ordered_sets, + method='fuzzy', const_t=0) + flrs = FLR.generate_non_recurrent_flrs(tmpdata) + self.generate_flrg(flrs) + + self.forecasts = self.forecast(ndata, no_update=True) + self.residuals = np.array(ndata[1:]) - np.array(self.forecasts[:-1]) + + self.variance_residual = np.var(self.residuals) # np.max(self.residuals + self.mean_residual = np.mean(self.residuals) + + self.residuals = self.residuals[-self.memory_window:].tolist() + self.forecasts = self.forecasts[-self.memory_window:] + self.inputs = np.array(ndata[-self.memory_window:]).tolist()
+ + +
[docs] def generate_flrg(self, flrs, **kwargs): + for flr in flrs: + if flr.LHS.name in self.flrgs: + self.flrgs[flr.LHS.name].append_rhs(flr.RHS.name) + else: + self.flrgs[flr.LHS.name] = nsfts.ConventionalNonStationaryFLRG(flr.LHS.name) + self.flrgs[flr.LHS.name].append_rhs(flr.RHS.name)
+ + + def _smooth(self, a): + return .1 * a[0] + .3 * a[1] + .6 * a[2] + +
[docs] def perturbation_factors(self, data, **kwargs): + npart = len(self.partitioner.sets) + _max = 0 + _min = 0 + if data < self.original_min: + _min = data - self.original_min if data < 0 else self.original_min - data + elif data > self.original_max: + _max = data - self.original_max if data > 0 else self.original_max - data + self.min_stack.pop(2) + self.min_stack.insert(0, _min) + _min = min(self.min_stack) + self.max_stack.pop(2) + self.max_stack.insert(0, _max) + _max = max(self.max_stack) + + _range = (_max - _min)/2 + + translate = np.linspace(_min, _max, npart) + + var = np.std(self.residuals) + + var = 0 if var < 1 else var + + loc = (self.mean_residual + np.mean(self.residuals)) + + location = [_range + w + loc + k for k in np.linspace(-var,var, npart) for w in translate] + + scale = [abs(location[0] - location[2])] + scale.extend([abs(location[k - 1] - location[k + 1]) for k in np.arange(1, npart)]) + scale.append(abs(location[-1] - location[-3])) + + perturb = [[location[k], scale[k]] for k in np.arange(npart)] + + return perturb
+ +
[docs] def perturbation_factors__old(self, data): + npart = len(self.partitioner.sets) + _max = 0 + _min = 0 + if data < self.original_min: + _min = data - self.original_min if data < 0 else self.original_min - data + elif data > self.original_max: + _max = data - self.original_max if data > 0 else self.original_max - data + self.min_stack.pop(2) + self.min_stack.insert(0,_min) + _min = min(self.min_stack) + self.max_stack.pop(2) + self.max_stack.insert(0, _max) + _max = max(self.max_stack) + + location = np.linspace(_min, _max, npart) + scale = [abs(location[0] - location[2])] + scale.extend([abs(location[k-1] - location[k+1]) for k in np.arange(1, npart)]) + scale.append(abs(location[-1] - location[-3])) + + perturb = [[location[k], scale[k]] for k in np.arange(0, npart)] + + return perturb
+ + def _fsset_key(self, ix): + return self.partitioner.ordered_sets[ix] + + def _affected_sets(self, sample, perturb): + + affected_sets = [[ct, self.sets[self._fsset_key(ct)].membership(sample, perturb[ct])] + for ct in np.arange(len(self.partitioner.sets)) + if self.sets[self._fsset_key(ct)].membership(sample, perturb[ct]) > 0.0] + + if len(affected_sets) == 0: + + if sample < self.partitioner.lower_set().get_lower(perturb[0]): + affected_sets.append([0, 1]) + elif sample > self.partitioner.upper_set().get_upper(perturb[-1]): + affected_sets.append([len(self.sets) - 1, 1]) + + return affected_sets + +
[docs] def forecast(self, ndata, **kwargs): + l = len(ndata) + + ret = [] + + no_update = kwargs.get("no_update",False) + + for k in np.arange(0, l): + + sample = ndata[k] + + if not no_update: + perturb = self.perturbation_factors(sample) + else: + perturb = [[0, 1] for k in np.arange(len(self.partitioner.sets))] + + affected_sets = self._affected_sets(sample, perturb) + + numerator = [] + denominator = [] + + if len(affected_sets) == 1: + ix = affected_sets[0][0] + aset = self.partitioner.ordered_sets[ix] + if aset in self.flrgs: + numerator.append(self.flrgs[aset].get_midpoint(self.sets, perturb[ix])) + else: + fuzzy_set = self.sets[aset] + numerator.append(fuzzy_set.get_midpoint(perturb[ix])) + denominator.append(1) + else: + for aset in affected_sets: + ix = aset[0] + fs = self.partitioner.ordered_sets[ix] + tdisp = perturb[ix] + if fs in self.flrgs: + numerator.append(self.flrgs[fs].get_midpoint(self.sets, tdisp) * aset[1]) + else: + fuzzy_set = self.sets[fs] + numerator.append(fuzzy_set.get_midpoint(tdisp) * aset[1]) + denominator.append(aset[1]) + + if sum(denominator) > 0: + pto = sum(numerator) /sum(denominator) + else: + pto = sum(numerator) + + ret.append(pto) + + if not no_update: + self.forecasts.append(pto) + self.residuals.append(self.inputs[-1] - self.forecasts[-1]) + self.inputs.append(sample) + + self.inputs.pop(0) + self.forecasts.pop(0) + self.residuals.pop(0) + + return ret
+ + +
[docs] def forecast_interval(self, ndata, **kwargs): + l = len(ndata) + + ret = [] + + for k in np.arange(0, l): + + sample = ndata[k] + + perturb = self.perturbation_factors(sample) + + affected_sets = self._affected_sets(sample, perturb) + + upper = [] + lower = [] + + if len(affected_sets) == 1: + ix = affected_sets[0][0] + aset = self.partitioner.ordered_sets[ix] + if aset in self.flrgs: + lower.append(self.flrgs[aset].get_lower(perturb[ix])) + upper.append(self.flrgs[aset].get_upper(perturb[ix])) + else: + fuzzy_set = self.sets[aset] + lower.append(fuzzy_set.get_lower(perturb[ix])) + upper.append(fuzzy_set.get_upper(perturb[ix])) + else: + for aset in affected_sets: + ix = aset[0] + fs = self.partitioner.ordered_sets[ix] + tdisp = perturb[ix] + if fs in self.flrgs: + lower.append(self.flrgs[fs].get_lower(tdisp) * aset[1]) + upper.append(self.flrgs[fs].get_upper(tdisp) * aset[1]) + else: + fuzzy_set = self.sets[fs] + lower.append(fuzzy_set.get_lower(tdisp) * aset[1]) + upper.append(fuzzy_set.get_upper(tdisp) * aset[1]) + + itvl = [sum(lower), sum(upper)] + + ret.append(itvl) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/flrg.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/flrg.html new file mode 100644 index 0000000..dcc9a5a --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/flrg.html @@ -0,0 +1,152 @@ + + + + + + + + pyFTS.models.nonstationary.flrg — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.flrg

+
+from pyFTS.common import flrg
+from pyFTS.models.nonstationary import common
+import numpy as np
+
+
+
[docs]class NonStationaryFLRG(flrg.FLRG): +
[docs] def unpack_args(self, *args): + l = len(args) + tmp = args + sets, t, w = None, 0, 1 + if l > 0 and isinstance(tmp[0], dict): + sets = tmp[0] + if l > 1 and isinstance(tmp[1], (int, list, tuple)): + t = tmp[1] + if l > 2 and isinstance(tmp[2], int): + w = tmp[2] + + return (sets, t, w)
+ + + def __init__(self, LHS, **kwargs): + super(NonStationaryFLRG, self).__init__(1, **kwargs) + self.LHS = LHS + self.RHS = set() + +
[docs] def get_key(self): + if isinstance(self.LHS, list): + return str([k for k in self.LHS]) + elif isinstance(self.LHS, dict): + return str(self.LHS.keys()) + else: + return self.LHS
+ +
[docs] def get_membership(self, data, *args): + sets, t, window_size = self.unpack_args(*args) + ret = 0.0 + if isinstance(self.LHS, (list, set)): + ret = min([sets[self.LHS[ct]].membership(dat, common.window_index(t - (self.order - ct), window_size)) + for ct, dat in enumerate(data)]) + else: + ret = self.LHS.membership(data, common.window_index(t, window_size)) + return ret
+ +
[docs] def get_midpoint(self, *args): + sets, t, window_size = self.unpack_args(*args) + if len(self.RHS) > 0: + if isinstance(self.RHS, (list, set)): + tmp = [sets[r].get_midpoint(common.window_index(t, window_size)) for r in self.RHS] + elif isinstance(self.RHS, dict): + tmp = [sets[r].get_midpoint(common.window_index(t, window_size)) for r in self.RHS.keys()] + return sum(tmp) / len(tmp) + else: + return sets[self.LHS[-1]].get_midpoint(common.window_index(t, window_size))
+ +
[docs] def get_lower(self, *args): + sets, t, window_size = self.unpack_args(*args) + if len(self.RHS) > 0: + if isinstance(self.RHS, (list, set)): + return min([sets[r].get_lower(common.window_index(t, window_size)) for r in self.RHS]) + elif isinstance(self.RHS, dict): + return min([sets[r].get_lower(common.window_index(t, window_size)) for r in self.RHS.keys()]) + else: + return sets[self.LHS[-1]].get_lower(common.window_index(t, window_size))
+ +
[docs] def get_upper(self, *args): + sets, t, window_size = self.unpack_args(*args) + if len(self.RHS) > 0: + if isinstance(self.RHS, (list, set)): + return max([sets[r].get_upper(common.window_index(t, window_size)) for r in self.RHS]) + elif isinstance(self.RHS, dict): + return max([sets[r].get_upper(common.window_index(t, window_size)) for r in self.RHS.keys()]) + else: + return sets[self.LHS[-1]].get_upper(common.window_index(t, window_size))
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/honsfts.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/honsfts.html new file mode 100644 index 0000000..3c5744a --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/honsfts.html @@ -0,0 +1,327 @@ + + + + + + + + pyFTS.models.nonstationary.honsfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.honsfts

+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts, tree
+from pyFTS.models import hofts
+from pyFTS.models.nonstationary import common, flrg
+
+
+
[docs]class HighOrderNonStationaryFLRG(flrg.NonStationaryFLRG): + """First Order NonStationary Fuzzy Logical Relationship Group""" + def __init__(self, order, **kwargs): + super(HighOrderNonStationaryFLRG, self).__init__(order, **kwargs) + + self.LHS = [] + self.RHS = {} + +
[docs] def append_rhs(self, c, **kwargs): + if c.name not in self.RHS: + self.RHS[c.name] = c
+ +
[docs] def append_lhs(self, c): + self.LHS.append(c)
+ + def __str__(self): + tmp = "" + for c in sorted(self.RHS): + if len(tmp) > 0: + tmp = tmp + "," + tmp = tmp + c + return self.get_key() + " -> " + tmp
+ + +
[docs]class HighOrderNonStationaryFTS(hofts.HighOrderFTS): + """NonStationaryFTS Fuzzy Time Series""" + def __init__(self, name, **kwargs): + super(HighOrderNonStationaryFTS, self).__init__("HONSFTS " + name, **kwargs) + self.name = "High Order Non Stationary FTS" + self.detail = "" + self.flrgs = {} + +
[docs] def generate_flrg(self, data, **kwargs): + l = len(data) + window_size = kwargs.get("window_size", 1) + for k in np.arange(self.order, l): + if self.dump: print("FLR: " + str(k)) + + sample = data[k - self.order: k] + + disp = common.window_index(k, window_size) + + rhs = [self.sets[key] for key in self.partitioner.ordered_sets + if self.sets[key].membership(data[k], disp) > 0.0] + + if len(rhs) == 0: + rhs = [common.check_bounds(data[k], self.partitioner, disp)] + + lags = {} + + for o in np.arange(0, self.order): + tdisp = common.window_index(k - (self.order - o), window_size) + lhs = [self.sets[key] for key in self.partitioner.ordered_sets + if self.sets[key].membership(sample[o], tdisp) > 0.0] + + if len(lhs) == 0: + lhs = [common.check_bounds(sample[o], self.partitioner, tdisp)] + + lags[o] = lhs + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + # Trace the possible paths + for p in root.paths(): + flrg = HighOrderNonStationaryFLRG(self.order) + path = list(reversed(list(filter(None.__ne__, p)))) + + for c, e in enumerate(path, start=0): + flrg.append_lhs(e) + + if flrg.get_key() not in self.flrgs: + self.flrgs[flrg.get_key()] = flrg; + + for st in rhs: + self.flrgs[flrg.get_key()].append_rhs(st)
+ + # flrgs = sorted(flrgs, key=lambda flrg: flrg.get_midpoint(0, window_size=1)) + +
[docs] def train(self, data, **kwargs): + + if kwargs.get('order', None) is not None: + self.order = kwargs.get('order', 1) + + if kwargs.get('sets', None) is not None: + self.sets = kwargs.get('sets', None) + + window_size = kwargs.get('parameters', 1) + self.generate_flrg(data, window_size=window_size)
+ + def _affected_flrgs(self, sample, k, time_displacement, window_size): + # print("input: " + str(ndata[k])) + + affected_flrgs = [] + affected_flrgs_memberships = [] + + lags = {} + + for ct, dat in enumerate(sample): + tdisp = common.window_index((k + time_displacement) - (self.order - ct), window_size) + + sel = [ct for ct, key in enumerate(self.partitioner.ordered_sets) + if self.sets[key].membership(dat, tdisp) > 0.0] + + if len(sel) == 0: + sel.append(common.check_bounds_index(dat, self.partitioner, tdisp)) + + lags[ct] = sel + + # Build the tree with all possible paths + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + # Trace the possible paths and build the PFLRG's + + for p in root.paths(): + path = list(reversed(list(filter(None.__ne__, p)))) + flrg = HighOrderNonStationaryFLRG(self.order) + + for kk in path: + flrg.append_lhs(self.sets[self.partitioner.ordered_sets[kk]]) + + affected_flrgs.append(flrg) + # affected_flrgs_memberships.append_rhs(flrg.get_membership(sample, disp)) + + # print(flrg.get_key()) + + # the FLRG is here because of the bounds verification + mv = [] + for ct, dat in enumerate(sample): + td = common.window_index((k + time_displacement) - (self.order - ct), window_size) + tmp = flrg.LHS[ct].membership(dat, td) + + + mv.append(tmp) + # print(mv) + + affected_flrgs_memberships.append(np.prod(mv)) + + return [affected_flrgs, affected_flrgs_memberships] + +
[docs] def forecast(self, ndata, **kwargs): + + time_displacement = kwargs.get("time_displacement",0) + + window_size = kwargs.get("window_size", 1) + + l = len(ndata) + + ret = [] + + for k in np.arange(self.order, l+1): + + sample = ndata[k - self.order: k] + + affected_flrgs, affected_flrgs_memberships = self._affected_flrgs(sample, k, + time_displacement, window_size) + + #print([str(k) for k in affected_flrgs]) + #print(affected_flrgs_memberships) + + tmp = [] + tdisp = common.window_index(k + time_displacement, window_size) + if len(affected_flrgs) == 0: + tmp.append(common.check_bounds(sample[-1], self.sets, tdisp)) + elif len(affected_flrgs) == 1: + flrg = affected_flrgs[0] + if flrg.get_key() in self.flrgs: + tmp.append(self.flrgs[flrg.get_key()].get_midpoint(tdisp)) + else: + tmp.append(flrg.LHS[-1].get_midpoint(tdisp)) + else: + for ct, aset in enumerate(affected_flrgs): + if aset.get_key() in self.flrgs: + tmp.append(self.flrgs[aset.get_key()].get_midpoint(tdisp) * + affected_flrgs_memberships[ct]) + else: + tmp.append(aset.LHS[-1].get_midpoint(tdisp)* + affected_flrgs_memberships[ct]) + pto = sum(tmp) + + #print(pto) + + ret.append(pto) + + return ret
+ +
[docs] def forecast_interval(self, ndata, **kwargs): + + time_displacement = kwargs.get("time_displacement", 0) + + window_size = kwargs.get("window_size", 1) + + l = len(ndata) + + ret = [] + + for k in np.arange(self.order, l + 1): + + sample = ndata[k - self.order: k] + + affected_flrgs, affected_flrgs_memberships = self._affected_flrgs(sample, k, + time_displacement, window_size) + + # print([str(k) for k in affected_flrgs]) + # print(affected_flrgs_memberships) + + upper = [] + lower = [] + + tdisp = common.window_index(k + time_displacement, window_size) + if len(affected_flrgs) == 0: + aset = common.check_bounds(sample[-1], self.sets, tdisp) + lower.append(aset.get_lower(tdisp)) + upper.append(aset.get_upper(tdisp)) + elif len(affected_flrgs) == 1: + _flrg = affected_flrgs[0] + if _flrg.get_key() in self.flrgs: + lower.append(self.flrgs[_flrg.get_key()].get_lower(tdisp)) + upper.append(self.flrgs[_flrg.get_key()].get_upper(tdisp)) + else: + lower.append(_flrg.LHS[-1].get_lower(tdisp)) + upper.append(_flrg.LHS[-1].get_upper(tdisp)) + else: + for ct, aset in enumerate(affected_flrgs): + if aset.get_key() in self.flrgs: + lower.append(self.flrgs[aset.get_key()].get_lower(tdisp) * + affected_flrgs_memberships[ct]) + upper.append(self.flrgs[aset.get_key()].get_upper(tdisp) * + affected_flrgs_memberships[ct]) + else: + lower.append(aset.LHS[-1].get_lower(tdisp) * + affected_flrgs_memberships[ct]) + upper.append(aset.LHS[-1].get_upper(tdisp) * + affected_flrgs_memberships[ct]) + + ret.append([sum(lower), sum(upper)]) + + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/nsfts.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/nsfts.html new file mode 100644 index 0000000..0374920 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/nsfts.html @@ -0,0 +1,352 @@ + + + + + + + + pyFTS.models.nonstationary.nsfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.nsfts

+import numpy as np
+from pyFTS.common import FLR, fts
+from pyFTS.models.nonstationary import common, flrg
+
+
+
[docs]class ConventionalNonStationaryFLRG(flrg.NonStationaryFLRG): + """First Order NonStationary Fuzzy Logical Relationship Group""" + + def __init__(self, LHS, **kwargs): + super(ConventionalNonStationaryFLRG, self).__init__(1, **kwargs) + self.LHS = LHS + self.RHS = set() + +
[docs] def get_key(self): + return self.LHS
+ +
[docs] def append_rhs(self, c, **kwargs): + self.RHS.add(c)
+ + def __str__(self): + tmp = self.LHS + " -> " + tmp2 = "" + for c in sorted(self.RHS): + if len(tmp2) > 0: + tmp2 = tmp2 + "," + tmp2 = tmp2 + c + return tmp + tmp2
+ + +
[docs]class NonStationaryFTS(fts.FTS): + """NonStationaryFTS Fuzzy Time Series""" + def __init__(self, **kwargs): + super(NonStationaryFTS, self).__init__(**kwargs) + self.name = "Non Stationary FTS" + self.shortname = "NSFTS" + self.detail = "" + self.flrgs = {} + self.method = kwargs.get('method','conditional') + self.is_high_order = False + if self.partitioner is not None: + self.append_transformation(self.partitioner.transformation) + + if self.method == 'conditional': + self.min_stack = [0, 0, 0] + self.max_stack = [0, 0, 0] + self.uod_clip = False + self.order = 1 + self.min_order = 1 + self.max_lag = 1 + self.inputs = [] + self.forecasts = [] + self.residuals = [] + self.variance_residual = 0. + self.mean_residual = 0. + self.memory_window = kwargs.get("memory_window", 5) + +
[docs] def generate_flrg(self, flrs, **kwargs): + for flr in flrs: + if flr.LHS.name in self.flrgs: + self.flrgs[flr.LHS.name].append_rhs(flr.RHS.name) + else: + self.flrgs[flr.LHS.name] = ConventionalNonStationaryFLRG(flr.LHS.name) + self.flrgs[flr.LHS.name].append_rhs(flr.RHS.name)
+ + def _smooth(self, a): + return .1 * a[0] + .3 * a[1] + .6 * a[2] + +
[docs] def train(self, data, **kwargs): + + if self.method == 'unconditional': + window_size = kwargs.get('parameters', 1) + tmpdata = common.fuzzySeries(data, self.sets, + self.partitioner.ordered_sets, + window_size, method='fuzzy') + else: + tmpdata = common.fuzzySeries(data, self.sets, + self.partitioner.ordered_sets, + method='fuzzy', const_t=0) + + flrs = FLR.generate_non_recurrent_flrs(tmpdata) + self.generate_flrg(flrs) + + if self.method == 'conditional': + self.forecasts = self.forecast(data, no_update=True) + self.residuals = np.array(data[1:]) - np.array(self.forecasts[:-1]) + + self.variance_residual = np.var(self.residuals) # np.max(self.residuals + self.mean_residual = np.mean(self.residuals) + + self.residuals = self.residuals[-self.memory_window:].tolist() + self.forecasts = self.forecasts[-self.memory_window:] + self.inputs = np.array(data[-self.memory_window:]).tolist()
+ +
[docs] def conditional_perturbation_factors(self, data, **kwargs): + npart = len(self.partitioner.sets) + _max = 0 + _min = 0 + if data < self.original_min: + _min = data - self.original_min if data < 0 else self.original_min - data + elif data > self.original_max: + _max = data - self.original_max if data > 0 else self.original_max - data + self.min_stack.pop(2) + self.min_stack.insert(0, _min) + _min = min(self.min_stack) + self.max_stack.pop(2) + self.max_stack.insert(0, _max) + _max = max(self.max_stack) + + _range = (_max - _min)/2 + + translate = np.linspace(_min, _max, npart) + + var = np.std(self.residuals) + + var = 0 if var < 1 else var + + loc = (self.mean_residual + np.mean(self.residuals)) + + location = [_range + w + loc + k for k in np.linspace(-var,var, npart) for w in translate] + + scale = [abs(location[0] - location[2])] + scale.extend([abs(location[k - 1] - location[k + 1]) for k in np.arange(1, npart)]) + scale.append(abs(location[-1] - location[-3])) + + perturb = [[location[k], scale[k]] for k in np.arange(npart)] + + return perturb
+ + def _fsset_key(self, ix): + return self.partitioner.ordered_sets[ix] + + def _affected_sets(self, sample, perturb): + + if self.method == 'conditional': + + affected_sets = [[ct, self.sets[self._fsset_key(ct)].membership(sample, perturb[ct])] + for ct in np.arange(len(self.partitioner.sets)) + if self.sets[self._fsset_key(ct)].membership(sample, perturb[ct]) > 0.0] + if len(affected_sets) == 0: + + if sample < self.partitioner.lower_set().get_lower(perturb[0]): + affected_sets.append([0, 1]) + elif sample > self.partitioner.upper_set().get_upper(perturb[-1]): + affected_sets.append([len(self.sets) - 1, 1]) + + else: + affected_sets = [[ct, self.sets[self._fsset_key(ct)].membership(sample, perturb)] + for ct in np.arange(len(self.partitioner.sets)) + if self.sets[self._fsset_key(ct)].membership(sample, perturb) > 0.0] + + if len(affected_sets) == 0: + + if sample < self.partitioner.lower_set().get_lower(perturb): + affected_sets.append([0, 1]) + elif sample > self.partitioner.upper_set().get_upper(perturb): + affected_sets.append([len(self.sets) - 1, 1]) + + return affected_sets + +
[docs] def forecast(self, ndata, **kwargs): + + time_displacement = kwargs.get("time_displacement",0) + + window_size = kwargs.get("window_size", 1) + + no_update = kwargs.get("no_update", False) + + l = len(ndata) + + ret = [] + + for k in np.arange(0, l): + + sample = ndata[k] + + if self.method == 'unconditional': + perturb = common.window_index(k + time_displacement, window_size) + elif self.method == 'conditional': + if not no_update: + perturb = self.conditional_perturbation_factors(sample) + else: + perturb = [[0, 1] for k in np.arange(len(self.partitioner.sets))] + + affected_sets = self._affected_sets(sample, perturb) + + numerator = [] + denominator = [] + + if len(affected_sets) == 1: + ix = affected_sets[0][0] + aset = self.partitioner.ordered_sets[ix] + if aset in self.flrgs: + numerator.append(self.flrgs[aset].get_midpoint(self.sets, perturb[ix])) + else: + fuzzy_set = self.sets[aset] + numerator.append(fuzzy_set.get_midpoint(perturb[ix])) + denominator.append(1) + else: + for aset in affected_sets: + ix = aset[0] + fs = self.partitioner.ordered_sets[ix] + tdisp = perturb[ix] + if fs in self.flrgs: + numerator.append(self.flrgs[fs].get_midpoint(self.sets, tdisp) * aset[1]) + else: + fuzzy_set = self.sets[fs] + numerator.append(fuzzy_set.get_midpoint(tdisp) * aset[1]) + denominator.append(aset[1]) + + if sum(denominator) > 0: + pto = sum(numerator) / sum(denominator) + else: + pto = sum(numerator) + + ret.append(pto) + + if self.method == 'conditional' and not no_update: + self.forecasts.append(pto) + self.residuals.append(self.inputs[-1] - self.forecasts[-1]) + self.inputs.append(sample) + + self.inputs.pop(0) + self.forecasts.pop(0) + self.residuals.pop(0) + + return ret
+ +
[docs] def forecast_interval(self, ndata, **kwargs): + + time_displacement = kwargs.get("time_displacement", 0) + + window_size = kwargs.get("window_size", 1) + + l = len(ndata) + + ret = [] + + for k in np.arange(0, l): + + # print("input: " + str(ndata[k])) + + tdisp = common.window_index(k + time_displacement, window_size) + + affected_sets = [[self.sets[key], self.sets[key].membership(ndata[k], tdisp)] + for key in self.partitioner.ordered_sets + if self.sets[key].membership(ndata[k], tdisp) > 0.0] + + if len(affected_sets) == 0: + affected_sets.append([common.check_bounds(ndata[k], self.partitioner, tdisp), 1.0]) + + upper = [] + lower = [] + + if len(affected_sets) == 1: + aset = affected_sets[0][0] + if aset.name in self.flrgs: + lower.append(self.flrgs[aset.name].get_lower(tdisp)) + upper.append(self.flrgs[aset.name].get_upper(tdisp)) + else: + lower.append(aset.get_lower(tdisp)) + upper.append(aset.get_upper(tdisp)) + else: + for aset in affected_sets: + if aset[0].name in self.flrgs: + lower.append(self.flrgs[aset[0].name].get_lower(tdisp) * aset[1]) + upper.append(self.flrgs[aset[0].name].get_upper(tdisp) * aset[1]) + else: + lower.append(aset[0].get_lower(tdisp) * aset[1]) + upper.append(aset[0].get_upper(tdisp) * aset[1]) + + + ret.append([sum(lower), sum(upper)]) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/partitioners.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/partitioners.html new file mode 100644 index 0000000..2bb9e6e --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/partitioners.html @@ -0,0 +1,227 @@ + + + + + + + + pyFTS.models.nonstationary.partitioners — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.partitioners

+import numpy as np
+from pyFTS.partitioners import partitioner
+from pyFTS.models.nonstationary import common, perturbation
+from pyFTS.common import FuzzySet as stationary_fs
+
+
+
[docs]class PolynomialNonStationaryPartitioner(partitioner.Partitioner): + """ + Non Stationary Universe of Discourse Partitioner + """ + + def __init__(self, data, part, **kwargs): + """""" + super(PolynomialNonStationaryPartitioner, self).__init__(name=part.name, data=data, npart=part.partitions, + func=part.membership_function, names=part.setnames, + prefix=part.prefix, transformation=part.transformation, + indexer=part.indexer, preprocess=False) + + self.sets = {} + + loc_params, wid_params = self.get_polynomial_perturbations(data, **kwargs) + + if self.ordered_sets is None and self.setnames is not None: + self.ordered_sets = part.setnames + else: + self.ordered_sets = stationary_fs.set_ordered(part.sets) + + for ct, key in enumerate(self.ordered_sets): + set = part.sets[key] + loc_roots = np.roots(loc_params[ct])[0] + wid_roots = np.roots(wid_params[ct])[0] + tmp = common.FuzzySet(set.name, set.mf, set.parameters, + location=perturbation.polynomial, + location_params=loc_params[ct], + location_roots=loc_roots, #**kwargs) + width=perturbation.polynomial, + width_params=wid_params[ct], + width_roots=wid_roots, **kwargs) + + self.sets[set.name] = tmp + +
[docs] def poly_width(self, par1, par2, rng, deg): + a = np.polyval(par1, rng) + b = np.polyval(par2, rng) + diff = [b[k] - a[k] for k in rng] + tmp = np.polyfit(rng, diff, deg=deg) + return tmp
+ +
[docs] def scale_up(self,x,pct): + if x > 0: return x*(1+pct) + else: return x*pct
+ +
[docs] def scale_down(self,x,pct): + if x > 0: return x*pct + else: return x*(1+pct)
+ +
[docs] def get_polynomial_perturbations(self, data, **kwargs): + w = kwargs.get("window_size", int(len(data) / 5)) + degree = kwargs.get("degree", 2) + xmax = [data[0]] + tmax = [0] + xmin = [data[0]] + tmin = [0] + + l = len(data) + + for i in np.arange(0, l, w): + sample = data[i:i + w] + tx = max(sample) + xmax.append(tx) + tmax.append(np.ravel(np.argwhere(data == tx)).tolist()[0]) + tn = min(sample) + xmin.append(tn) + tmin.append(np.ravel(np.argwhere(data == tn)).tolist()[0]) + + cmax = np.polyfit(tmax, xmax, deg=degree) + cmin = np.polyfit(tmin, xmin, deg=degree) + + cmed = [] + + for d in np.arange(0, degree + 1): + cmed.append(np.linspace(cmin[d], cmax[d], self.partitions)[1:self.partitions - 1]) + + loc_params = [cmin.tolist()] + for i in np.arange(0, self.partitions - 2): + tmp = [cmed[k][i] for k in np.arange(0, degree + 1)] + loc_params.append(tmp) + loc_params.append(cmax.tolist()) + + rng = np.arange(0, l) + + clen = [] + + for i in np.arange(1, self.partitions-1): + tmp = self.poly_width(loc_params[i - 1], loc_params[i + 1], rng, degree) + clen.append(tmp) + + tmp = self.poly_width(loc_params[0], loc_params[1], rng, degree) + clen.insert(0, tmp) + + tmp = self.poly_width(loc_params[self.partitions-2], loc_params[self.partitions-1], rng, degree) + clen.append(tmp) + + tmp = (loc_params, clen) + + return tmp
+ +
[docs] def build(self, data): + pass
+ + +
[docs]class SimpleNonStationaryPartitioner(partitioner.Partitioner): + """ + Non Stationary Universe of Discourse Partitioner + """ + + def __init__(self, data, part, **kwargs): + """""" + super(SimpleNonStationaryPartitioner, self).__init__(name=part.name, data=data, npart=part.partitions, + func=part.membership_function, names=part.setnames, + prefix=part.prefix, transformation=part.transformation, + indexer=part.indexer)#, preprocess=False) + + for key in part.sets.keys(): + set = part.sets[key] + tmp = common.FuzzySet(set.name, set.mf, set.parameters, **kwargs) + tmp.centroid = set.centroid + + self.sets[key] =tmp + + self.ordered_sets = stationary_fs.set_ordered(self.sets) + +
[docs] def build(self, data): + return {}
+ + +
[docs]def simplenonstationary_gridpartitioner_builder(data, npart, transformation): + from pyFTS.partitioners import Grid + from pyFTS.models.nonstationary import perturbation, partitioners + + tmp_fs = Grid.GridPartitioner(data=data, npart=npart, transformation=transformation) + fs = partitioners.SimpleNonStationaryPartitioner(data, tmp_fs, + location=perturbation.polynomial, + location_params=[1, 0], + location_roots=0, + width=perturbation.polynomial, + width_params=[1, 0], + width_roots=0) + return fs
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/perturbation.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/perturbation.html new file mode 100644 index 0000000..9334c89 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/perturbation.html @@ -0,0 +1,101 @@ + + + + + + + + pyFTS.models.nonstationary.perturbation — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.perturbation

+"""
+Pertubation functions for Non Stationary Fuzzy Sets
+"""
+
+import numpy as np
+from pyFTS import *
+from pyFTS.common import FuzzySet, Membership
+
+
+
[docs]def linear(x, parameters): + return np.polyval(parameters, x)
+ + +
[docs]def polynomial(x, parameters): + return np.polyval(parameters, x)
+ + +
[docs]def exponential(x, parameters): + return np.exp(x*parameters[0])
+ + +
[docs]def periodic(x, parameters): + return parameters[0] * np.sin(x * parameters[1])
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/nonstationary/util.html b/docs/_build/html/_modules/pyFTS/models/nonstationary/util.html new file mode 100644 index 0000000..6d60fbe --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/nonstationary/util.html @@ -0,0 +1,172 @@ + + + + + + + + pyFTS.models.nonstationary.util — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.nonstationary.util

+import numpy as np
+import pandas as pd
+import matplotlib as plt
+import matplotlib.colors as pltcolors
+import matplotlib.pyplot as plt
+from pyFTS.common import Membership, Util
+
+
+
[docs]def plot_sets(partitioner, start=0, end=10, step=1, tam=[5, 5], colors=None, + save=False, file=None, axes=None, data=None, window_size = 1, only_lines=False): + + range = np.arange(start,end,step) + ticks = [] + if axes is None: + fig, axes = plt.subplots(nrows=1, ncols=1, figsize=tam) + + for ct, key in enumerate(partitioner.ordered_sets): + fset = partitioner.sets[key] + if not only_lines: + for t in range: + tdisp = t - (t % window_size) + fset.membership(0, tdisp) + param = fset.perturbated_parameters[str(tdisp)] + + if fset.mf == Membership.trimf: + if t == start: + line = axes.plot([t, t+1, t], param, label=fset.name) + fset.metadata['color'] = line[0].get_color() + else: + axes.plot([t, t + 1, t], param,c=fset.metadata['color']) + + ticks.extend(["t+"+str(t),""]) + else: + tmp = [] + for t in range: + tdisp = t - (t % window_size) + fset.membership(0, tdisp) + param = fset.perturbated_parameters[str(tdisp)] + tmp.append(np.polyval(param, tdisp)) + axes.plot(range, tmp, ls="--", c="blue") + + axes.set_ylabel("Universe of Discourse") + axes.set_xlabel("Time") + plt.xticks([k for k in range], ticks, rotation='vertical') + + handles0, labels0 = axes.get_legend_handles_labels() + lgd = axes.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1)) + + if data is not None: + axes.plot(np.arange(start, start + len(data), 1), data,c="black") + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + +
[docs]def plot_sets_conditional(model, data, step=1, size=[5, 5], colors=None, + save=False, file=None, axes=None): + range = np.arange(0, len(data), step) + ticks = [] + if axes is None: + fig, axes = plt.subplots(nrows=1, ncols=1, figsize=size) + + for t in range: + model.forecast([data[t]]) + perturb = model.conditional_perturbation_factors(data[t]) + + for ct, key in enumerate(model.partitioner.ordered_sets): + set = model.partitioner.sets[key] + set.perturbate_parameters(perturb[ct]) + param = set.perturbated_parameters[str(perturb[ct])] + + if set.mf == Membership.trimf: + if t == 0: + line = axes.plot([t, t+1, t], param, label=set.name) + set.metadata['color'] = line[0].get_color() + else: + axes.plot([t, t + 1, t], param,c=set.metadata['color']) + + ticks.extend(["t+"+str(t),""]) + + axes.set_ylabel("Universe of Discourse") + axes.set_xlabel("Time") + plt.xticks([k for k in range], ticks, rotation='vertical') + + handles0, labels0 = axes.get_legend_handles_labels() + lgd = axes.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1)) + + if data is not None: + axes.plot(np.arange(0, len(data), 1), data,c="black") + + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/pwfts.html b/docs/_build/html/_modules/pyFTS/models/pwfts.html new file mode 100644 index 0000000..337bae5 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/pwfts.html @@ -0,0 +1,635 @@ + + + + + + + + pyFTS.models.pwfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.pwfts

+#!/usr/bin/python
+# -*- coding: utf8 -*-
+
+import numpy as np
+import pandas as pd
+import math
+from operator import itemgetter
+from pyFTS.common import FLR, FuzzySet, tree
+from pyFTS.models import hofts, ifts
+from pyFTS.probabilistic import ProbabilityDistribution
+
+
+
[docs]class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG): + """High Order Probabilistic Weighted Fuzzy Logical Relationship Group""" + def __init__(self, order): + super(ProbabilisticWeightedFLRG, self).__init__(order) + self.RHS = {} + self.rhs_count = {} + self.frequency_count = 0.0 + self.Z = None + +
[docs] def get_membership(self, data, sets): + if isinstance(data, (np.ndarray, list)): + return np.nanprod([sets[key].membership(data[count]) + for count, key in enumerate(self.LHS, start=0)]) + else: + return sets[self.LHS[0]].membership(data)
+ +
[docs] def append_rhs(self, c, **kwargs): + mv = kwargs.get('mv', 1.0) + self.frequency_count += mv + if c in self.RHS: + self.rhs_count[c] += mv + else: + self.RHS[c] = c + self.rhs_count[c] = mv
+ +
[docs] def lhs_conditional_probability(self, x, sets, norm, uod, nbins): + pk = self.frequency_count / norm + + tmp = pk * (self.get_membership(x, sets) / self.partition_function(sets, uod, nbins=nbins)) + + return tmp
+ +
[docs] def rhs_unconditional_probability(self, c): + return self.rhs_count[c] / self.frequency_count
+ +
[docs] def rhs_conditional_probability(self, x, sets, uod, nbins): + total = 0.0 + for rhs in self.RHS: + set = sets[rhs] + wi = self.rhs_unconditional_probability(rhs) + mv = set.membership(x) / set.partition_function(uod, nbins=nbins) + total += wi * mv + + return total
+ +
[docs] def partition_function(self, sets, uod, nbins=100): + if self.Z is None: + self.Z = 0.0 + for k in np.linspace(uod[0], uod[1], nbins): + for key in self.LHS: + self.Z += sets[key].membership(k) + + return self.Z
+ +
[docs] def get_midpoint(self, sets): + '''Return the expectation of the PWFLRG, the weighted sum''' + if self.midpoint is None: + self.midpoint = np.sum(np.array([self.rhs_unconditional_probability(s) * sets[s].centroid + for s in self.RHS])) + + return self.midpoint
+ +
[docs] def get_upper(self, sets): + if self.upper is None: + self.upper = np.sum(np.array([self.rhs_unconditional_probability(s) * sets[s].upper for s in self.RHS])) + + return self.upper
+ +
[docs] def get_lower(self, sets): + if self.lower is None: + self.lower = np.sum(np.array([self.rhs_unconditional_probability(s) * sets[s].lower for s in self.RHS])) + + return self.lower
+ + def __str__(self): + tmp2 = "" + for c in sorted(self.RHS): + if len(tmp2) > 0: + tmp2 = tmp2 + ", " + tmp2 = tmp2 + "(" + str(round(self.rhs_count[c] / self.frequency_count, 3)) + ")" + c + return self.get_key() + " -> " + tmp2
+ + +
[docs]class ProbabilisticWeightedFTS(ifts.IntervalFTS): + """High Order Probabilistic Weighted Fuzzy Time Series""" + def __init__(self, **kwargs): + super(ProbabilisticWeightedFTS, self).__init__(**kwargs) + self.shortname = "PWFTS" + self.name = "Probabilistic FTS" + self.detail = "Silva, P.; Guimarães, F.; Sadaei, H." + self.flrgs = {} + self.global_frequency_count = 0 + self.has_point_forecasting = True + self.has_interval_forecasting = True + self.has_probability_forecasting = True + self.is_high_order = True + self.min_order = 1 + self.auto_update = kwargs.get('update',False) + self.configure_lags(**kwargs) + +
[docs] def train(self, data, **kwargs): + + self.configure_lags(**kwargs) + parameters = kwargs.get('parameters','fuzzy') + + if parameters == 'monotonic': + tmpdata = FuzzySet.fuzzyfy_series_old(data, self.sets) + flrs = FLR.generate_recurrent_flrs(tmpdata) + self.generateFLRG(flrs) + else: + self.generate_flrg(data)
+ +
[docs] def generate_lhs_flrg(self, sample): + lags = {} + + flrgs = [] + + for ct, o in enumerate(self.lags): + lhs = [key for key in self.partitioner.ordered_sets + if self.sets[key].membership(sample[o-1]) > self.alpha_cut] + lags[ct] = lhs + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + # Trace the possible paths + for p in root.paths(): + flrg = ProbabilisticWeightedFLRG(self.order) + path = list(reversed(list(filter(None.__ne__, p)))) + + for lhs in path: + flrg.append_lhs(lhs) + + flrgs.append(flrg) + + return flrgs
+ +
[docs] def generate_flrg(self, data): + l = len(data) + for k in np.arange(self.max_lag, l): + if self.dump: print("FLR: " + str(k)) + + sample = data[k - self.max_lag: k] + + flrgs = self.generate_lhs_flrg(sample) + + for flrg in flrgs: + + lhs_mv = flrg.get_membership(sample, self.sets) + + if flrg.get_key() not in self.flrgs: + self.flrgs[flrg.get_key()] = flrg; + + fuzzyfied = [(s, self.sets[s].membership(data[k])) + for s in self.sets.keys() + if self.sets[s].membership(data[k]) > self.alpha_cut] + + mvs = [] + for set, mv in fuzzyfied: + self.flrgs[flrg.get_key()].append_rhs(set, mv=lhs_mv * mv) + mvs.append(mv) + + tmp_fq = sum([lhs_mv*kk for kk in mvs if kk > 0]) + + self.global_frequency_count += tmp_fq
+ + +
[docs] def update_model(self,data): + pass
+ + +
[docs] def add_new_PWFLGR(self, flrg): + if flrg.get_key() not in self.flrgs: + tmp = ProbabilisticWeightedFLRG(self.order) + for fs in flrg.LHS: tmp.append_lhs(fs) + tmp.append_rhs(flrg.LHS[-1]) + self.flrgs[tmp.get_key()] = tmp; + self.global_frequency_count += 1
+ +
[docs] def flrg_lhs_unconditional_probability(self, flrg): + if flrg.get_key() in self.flrgs: + return self.flrgs[flrg.get_key()].frequency_count / self.global_frequency_count + else: + return 0.0
+ #self.add_new_PWFLGR(flrg) + #return self.flrg_lhs_unconditional_probability(flrg) + +
[docs] def flrg_lhs_conditional_probability(self, x, flrg): + mv = flrg.get_membership(x, self.sets) + pb = self.flrg_lhs_unconditional_probability(flrg) + return mv * pb
+ +
[docs] def get_midpoint(self, flrg): + if flrg.get_key() in self.flrgs: + tmp = self.flrgs[flrg.get_key()] + ret = tmp.get_midpoint(self.sets) #sum(np.array([tmp.rhs_unconditional_probability(s) * self.setsDict[s].centroid for s in tmp.RHS])) + else: + if len(flrg.LHS) > 0: + pi = 1 / len(flrg.LHS) + ret = sum(np.array([pi * self.sets[s].centroid for s in flrg.LHS])) + else: + ret = np.nan + return ret
+ +
[docs] def flrg_rhs_conditional_probability(self, x, flrg): + + if flrg.get_key() in self.flrgs: + _flrg = self.flrgs[flrg.get_key()] + cond = [] + for s in _flrg.RHS.keys(): + _set = self.sets[s] + tmp = _flrg.rhs_unconditional_probability(s) * (_set.membership(x) / _set.partition_function(uod=self.get_UoD())) + cond.append(tmp) + ret = sum(np.array(cond)) + else: + ########################################## + # this may be the problem! TEST IT!!! + ########################################## + pi = 1 / len(flrg.LHS) + ret = sum(np.array([pi * self.sets[s].membership(x) for s in flrg.LHS])) + return ret
+ +
[docs] def get_upper(self, flrg): + if flrg.get_key() in self.flrgs: + tmp = self.flrgs[flrg.get_key()] + ret = tmp.get_upper(self.sets) + else: + ret = 0 + return ret
+ +
[docs] def get_lower(self, flrg): + if flrg.get_key() in self.flrgs: + tmp = self.flrgs[flrg.get_key()] + ret = tmp.get_lower(self.sets) + else: + ret = 0 + return ret
+ +
[docs] def forecast(self, data, **kwargs): + method = kwargs.get('method','heuristic') + + l = len(data) + + ret = [] + + for k in np.arange(self.max_lag - 1, l): + sample = data[k - (self.max_lag - 1): k + 1] + + if method == 'heuristic': + ret.append(self.point_heuristic(sample, **kwargs)) + elif method == 'expected_value': + ret.append(self.point_expected_value(sample, **kwargs)) + else: + raise ValueError("Unknown point forecasting method!") + + if self.auto_update and k > self.order+1: self.update_model(data[k - self.order - 1 : k]) + + return ret
+ +
[docs] def point_heuristic(self, sample, **kwargs): + + flrgs = self.generate_lhs_flrg(sample) + + mp = [] + norms = [] + for flrg in flrgs: + norm = self.flrg_lhs_conditional_probability(sample, flrg) + if norm == 0: + norm = self.flrg_lhs_unconditional_probability(flrg) + mp.append(norm * self.get_midpoint(flrg)) + norms.append(norm) + + norm = sum(norms) + if norm == 0: + return 0 + else: + return sum(mp) / norm
+ + +
[docs] def point_expected_value(self, sample, **kwargs): + return self.forecast_distribution(sample)[0].expected_value()
+ + +
[docs] def forecast_interval(self, ndata, **kwargs): + + method = kwargs.get('method','heuristic') + alpha = kwargs.get('alpha', 0.05) + + l = len(ndata) + + ret = [] + + for k in np.arange(self.max_lag - 1, l): + + sample = ndata[k - (self.max_lag - 1): k + 1] + + if method == 'heuristic': + ret.append(self.interval_heuristic(sample)) + elif method == 'quantile': + ret.append(self.interval_quantile(sample, alpha)) + else: + raise ValueError("Unknown interval forecasting method!") + + return ret
+ +
[docs] def interval_quantile(self, ndata, alpha): + dist = self.forecast_distribution(ndata) + itvl = dist[0].quantile([alpha, 1.0 - alpha]) + return itvl
+ +
[docs] def interval_heuristic(self, sample): + + flrgs = self.generate_lhs_flrg(sample) + + up = [] + lo = [] + norms = [] + for flrg in flrgs: + norm = self.flrg_lhs_conditional_probability(sample, flrg) + if norm == 0: + norm = self.flrg_lhs_unconditional_probability(flrg) + up.append(norm * self.get_upper(flrg)) + lo.append(norm * self.get_lower(flrg)) + norms.append(norm) + + # gerar o intervalo + norm = sum(norms) + if norm == 0: + return [0, 0] + else: + lo_ = sum(lo) / norm + up_ = sum(up) / norm + return [lo_, up_]
+ +
[docs] def forecast_distribution(self, ndata, **kwargs): + + smooth = kwargs.get("smooth", "none") + + l = len(ndata) + uod = self.get_UoD() + + if 'bins' in kwargs: + _bins = kwargs.pop('bins') + nbins = len(_bins) + else: + nbins = kwargs.get("num_bins", 100) + _bins = np.linspace(uod[0], uod[1], nbins) + + ret = [] + + for k in np.arange(self.max_lag - 1, l): + sample = ndata[k - (self.max_lag - 1): k + 1] + + flrgs = self.generate_lhs_flrg(sample) + + if 'type' in kwargs: + kwargs.pop('type') + + dist = ProbabilityDistribution.ProbabilityDistribution(smooth, uod=uod, bins=_bins, **kwargs) + + for bin in _bins: + num = [] + den = [] + for s in flrgs: + if s.get_key() in self.flrgs: + flrg = self.flrgs[s.get_key()] + pk = flrg.lhs_conditional_probability(sample, self.sets, self.global_frequency_count, uod, nbins) + wi = flrg.rhs_conditional_probability(bin, self.sets, uod, nbins) + num.append(wi * pk) + den.append(pk) + else: + num.append(0.0) + den.append(0.000000001) + pf = sum(num) / sum(den) + + dist.set(bin, pf) + + ret.append(dist) + + return ret
+ + def __check_point_bounds(self, point): + lower_set = self.partitioner.lower_set() + upper_set = self.partitioner.upper_set() + return point <= lower_set.lower or point >= upper_set.upper + +
[docs] def forecast_ahead(self, data, steps, **kwargs): + + l = len(data) + + start = kwargs.get('start', self.max_lag) + + ret = data[start - self.max_lag: start].tolist() + + for k in np.arange(self.max_lag, steps+self.max_lag): + + if self.__check_point_bounds(ret[-1]) : + ret.append(ret[-1]) + else: + mp = self.forecast(ret[k - self.max_lag: k], **kwargs) + ret.append(mp[0]) + + return ret[self.max_lag:]
+ + def __check_interval_bounds(self, interval): + if len(self.transformations) > 0: + lower_set = self.partitioner.lower_set() + upper_set = self.partitioner.upper_set() + return interval[0] <= lower_set.lower and interval[1] >= upper_set.upper + elif len(self.transformations) == 0: + return interval[0] <= self.original_min and interval[1] >= self.original_max + +
[docs] def forecast_ahead_interval(self, data, steps, **kwargs): + + l = len(data) + + start = kwargs.get('start', self.max_lag) + + sample = data[start - self.max_lag: start] + + ret = [[k, k] for k in sample] + + ret.append(self.forecast_interval(sample)[0]) + + for k in np.arange(self.max_lag+1, steps+self.max_lag): + + if len(ret) > 0 and self.__check_interval_bounds(ret[-1]): + ret.append(ret[-1]) + else: + lower = self.forecast_interval([ret[x][0] for x in np.arange(k - self.max_lag, k)], **kwargs) + upper = self.forecast_interval([ret[x][1] for x in np.arange(k - self.max_lag, k)], **kwargs) + + ret.append([np.min(lower), np.max(upper)]) + + return ret[self.order:]
+ +
[docs] def forecast_ahead_distribution(self, ndata, steps, **kwargs): + + ret = [] + + smooth = kwargs.get("smooth", "none") + + uod = self.get_UoD() + + if 'bins' in kwargs: + _bins = kwargs.pop('bins') + nbins = len(_bins) + else: + nbins = kwargs.get("num_bins", 100) + _bins = np.linspace(uod[0], uod[1], nbins) + + start = kwargs.get('start', self.max_lag) + + sample = ndata[start - self.max_lag: start] + + for dat in sample: + if 'type' in kwargs: + kwargs.pop('type') + tmp = ProbabilityDistribution.ProbabilityDistribution(smooth, uod=uod, bins=_bins, **kwargs) + tmp.set(dat, 1.0) + ret.append(tmp) + + dist = self.forecast_distribution(sample, bins=_bins)[0] + + ret.append(dist) + + for k in np.arange(self.max_lag+1, steps+self.max_lag+1): + dist = ProbabilityDistribution.ProbabilityDistribution(smooth, uod=uod, bins=_bins, **kwargs) + + lags = {} + + # Find all bins of past distributions with probability greater than zero + + for ct, d in enumerate(self.lags): + dd = ret[k - d] + vals = [float(v) for v in dd.bins if round(dd.density(v), 4) > 0] + lags[ct] = sorted(vals) + + root = tree.FLRGTreeNode(None) + + tree.build_tree_without_order(root, lags, 0) + + # Trace all possible combinations between the bins of past distributions + + for p in root.paths(): + path = list(reversed(list(filter(None.__ne__, p)))) + + # get the combined probabilities for this path + + pk = np.prod([ret[k - self.max_lag + o].density(path[ct]) + for ct, o in enumerate(self.lags)]) + + + d = self.forecast_distribution(path)[0] + + for bin in _bins: + dist.set(bin, dist.density(bin) + pk * d.density(bin)) + + ret.append(dist) + + return ret[self.order:]
+ + def __str__(self): + tmp = self.name + ":\n" + for r in sorted(self.flrgs): + p = round(self.flrgs[r].frequency_count / self.global_frequency_count, 3) + tmp = tmp + "(" + str(p) + ") " + str(self.flrgs[r]) + "\n" + return tmp
+ + +
[docs]def visualize_distributions(model, **kwargs): + import matplotlib.pyplot as plt + from matplotlib import gridspec + import seaborn as sns + + ordered_sets = model.partitioner.ordered_sets + ftpg_keys = sorted(model.flrgs.keys(), key=lambda x: model.flrgs[x].get_midpoint(model.sets)) + + lhs_probs = [model.flrg_lhs_unconditional_probability(model.flrgs[k]) + for k in ftpg_keys] + + mat = np.zeros((len(ftpg_keys), len(ordered_sets))) + for row, w in enumerate(ftpg_keys): + for col, k in enumerate(ordered_sets): + if k in model.flrgs[w].RHS: + mat[row, col] = model.flrgs[w].rhs_unconditional_probability(k) + + size = kwargs.get('size', (5,10)) + + fig = plt.figure(figsize=size) + + gs = gridspec.GridSpec(1, 2, width_ratios=[1, 4]) + ax1 = plt.subplot(gs[0]) + sns.barplot(x='y', y='x', color='darkblue', data={'x': ftpg_keys, 'y': lhs_probs}, ax=ax1) + ax1.set_ylabel("LHS Probabilities") + + ind_sets = range(len(ordered_sets)) + ax = plt.subplot(gs[1]) + sns.heatmap(mat, cmap='Blues', ax=ax, yticklabels=False) + ax.set_title("RHS probabilities") + ax.set_xticks(ind_sets) + ax.set_xticklabels(ordered_sets) + ax.grid(True) + ax.xaxis.set_tick_params(rotation=90)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/sadaei.html b/docs/_build/html/_modules/pyFTS/models/sadaei.html new file mode 100644 index 0000000..65be79c --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/sadaei.html @@ -0,0 +1,177 @@ + + + + + + + + pyFTS.models.sadaei — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.sadaei

+"""
+First Order Exponentialy Weighted Fuzzy Time Series by Sadaei et al. (2013)
+
+H. J. Sadaei, R. Enayatifar, A. H. Abdullah, and A. Gani, “Short-term load forecasting using a hybrid model with a 
+refined exponentially weighted fuzzy time series and an improved harmony search,” Int. J. Electr. Power Energy Syst., vol. 62, no. from 2005, pp. 118–129, 2014.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet,FLR,fts, flrg
+
+default_c = 1.1
+
+
+
[docs]class ExponentialyWeightedFLRG(flrg.FLRG): + """First Order Exponentialy Weighted Fuzzy Logical Relationship Group""" + def __init__(self, LHS, **kwargs): + super(ExponentialyWeightedFLRG, self).__init__(1, **kwargs) + self.LHS = LHS + self.RHS = [] + self.count = 0.0 + self.c = kwargs.get("c",default_c) + self.w = None + +
[docs] def append_rhs(self, c, **kwargs): + self.RHS.append(c) + self.count = self.count + 1.0
+ +
[docs] def weights(self): + if self.w is None: + wei = [self.c ** k for k in np.arange(0.0, self.count, 1.0)] + tot = sum(wei) + self.w = np.array([k / tot for k in wei]) + return self.w
+ + def __str__(self): + tmp = self.LHS + " -> " + tmp2 = "" + cc = 0 + wei = [self.c ** k for k in np.arange(0.0, self.count, 1.0)] + tot = sum(wei) + for c in sorted(self.RHS): + if len(tmp2) > 0: + tmp2 = tmp2 + "," + tmp2 = tmp2 + c + "(" + str(wei[cc] / tot) + ")" + cc = cc + 1 + return tmp + tmp2 + + def __len__(self): + return len(self.RHS)
+ + +
[docs]class ExponentialyWeightedFTS(fts.FTS): + """First Order Exponentialy Weighted Fuzzy Time Series""" + def __init__(self, **kwargs): + super(ExponentialyWeightedFTS, self).__init__(order=1, name="EWFTS", **kwargs) + self.name = "Exponentialy Weighted FTS" + self.detail = "Sadaei" + self.c = kwargs.get('c', default_c) + +
[docs] def generate_flrg(self, flrs, c): + for flr in flrs: + if flr.LHS in self.flrgs: + self.flrgs[flr.LHS].append_rhs(flr.RHS) + else: + self.flrgs[flr.LHS] = ExponentialyWeightedFLRG(flr.LHS, c=c); + self.flrgs[flr.LHS].append_rhs(flr.RHS)
+ +
[docs] def train(self, data, **kwargs): + tmpdata = FuzzySet.fuzzyfy_series(data, self.sets, method='maximum') + flrs = FLR.generate_recurrent_flrs(tmpdata) + self.generate_flrg(flrs, self.c)
+ +
[docs] def forecast(self, ndata, **kwargs): + l = 1 + + if self.partitioner is not None: + ordered_sets = self.partitioner.ordered_sets + else: + ordered_sets = FuzzySet.set_ordered(self.sets) + + data = np.array(ndata) + + l = len(ndata) + + ret = [] + + for k in np.arange(0, l): + + actual = FuzzySet.get_maximum_membership_fuzzyset(ndata[k], self.sets, ordered_sets) + + if actual.name not in self.flrgs: + ret.append(actual.centroid) + else: + flrg = self.flrgs[actual.name] + mp = flrg.get_midpoints(self.sets) + + ret.append(mp.dot(flrg.weights())) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/seasonal/SeasonalIndexer.html b/docs/_build/html/_modules/pyFTS/models/seasonal/SeasonalIndexer.html new file mode 100644 index 0000000..1242263 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/seasonal/SeasonalIndexer.html @@ -0,0 +1,280 @@ + + + + + + + + pyFTS.models.seasonal.SeasonalIndexer — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.seasonal.SeasonalIndexer

+import numpy as np
+import pandas as pd
+from pyFTS.models.seasonal import common
+
+
+
[docs]class SeasonalIndexer(object): + """ + Seasonal Indexer. Responsible to find the seasonal index of a data point inside its data set + """ + def __init__(self,num_seasons, **kwargs): + self.num_seasons = num_seasons + self.name = kwargs.get("name","") + +
[docs] def get_season_of_data(self,data): + pass
+ +
[docs] def get_season_by_index(self,inde): + pass
+ +
[docs] def get_data_by_season(self, data, indexes): + pass
+ +
[docs] def get_index_by_season(self, indexes): + pass
+ +
[docs] def get_data(self, data): + pass
+ +
[docs] def get_index(self, data): + pass
+ + +
[docs]class LinearSeasonalIndexer(SeasonalIndexer): + """Use the data array/list position to index the seasonality """ + def __init__(self, seasons, units, ignore=None, **kwargs): + """ + Indexer for array/list position + :param seasons: A list with the season group (i.e: 7 for week, 30 for month, etc) + :param units: A list with the units used for each season group, the default is 1 for each + :param ignore: + :param kwargs: + """ + super(LinearSeasonalIndexer, self).__init__(len(seasons), **kwargs) + self.seasons = seasons + self.units = units + self.ignore = ignore + +
[docs] def get_season_of_data(self,data): + return self.get_season_by_index(np.arange(0, len(data)).tolist())
+ +
[docs] def get_season_by_index(self, index): + ret = [] + if not isinstance(index, (list, np.ndarray)): + if self.num_seasons == 1: + season = (index // self.units[0]) % self.seasons[0] + else: + season = [] + for ct, seasonality in enumerate(self.seasons, start=0): + tmp = (index // self.units[ct]) % self.seasons[ct] + if not self.ignore[ct]: + season.append(tmp) + ret.append(season) + else: + for ix in index: + if self.num_seasons == 1: + season = (ix // self.units[0]) % self.seasons[0] + else: + season = [] + for ct, seasonality in enumerate(self.seasons, start=0): + tmp = (ix // self.units[ct]) % self.seasons[ct] + if not self.ignore[ct]: + season.append(tmp) + ret.append(season) + + return ret
+ +
[docs] def get_index_by_season(self, indexes): + ix = 0; + + for count,season in enumerate(self.seasons): + ix += season*(indexes[count]) + + #ix += indexes[-1] + + return ix
+ +
[docs] def get_data(self, data): + return data
+ + +
[docs]class DataFrameSeasonalIndexer(SeasonalIndexer): + """Use the Pandas.DataFrame index position to index the seasonality """ + def __init__(self,index_fields,index_seasons, data_field,**kwargs): + """ + + :param index_fields: DataFrame field to use as index + :param index_seasons: A list with the season group, i. e., multiples of positions that are considered a season (i.e: 7 for week, 30 for month, etc) + :param data_fields: DataFrame field to use as data + :param kwargs: + """ + super(DataFrameSeasonalIndexer, self).__init__(len(index_seasons), **kwargs) + self.fields = index_fields + self.seasons = index_seasons + self.data_field = data_field + +
[docs] def get_season_of_data(self,data): + #data = data.copy() + ret = [] + for ix in data.index: + season = [] + for c, f in enumerate(self.fields, start=0): + if self.seasons[c] is None: + season.append(data[f][ix]) + else: + a = data[f][ix] + season.append(a // self.seasons[c]) + ret.append(season) + return ret
+ +
[docs] def get_season_by_index(self,index): + raise Exception("Operation not available!")
+ +
[docs] def get_data_by_season(self, data, indexes): + for season in indexes: + for c, f in enumerate(self.fields, start=0): + if self.seasons[c] is None: + data = data[data[f]== season[c]] + else: + data = data[(data[f] // self.seasons[c]) == season[c]] + return data[self.data_field]
+ +
[docs] def get_index_by_season(self, indexes): + raise Exception("Operation not available!")
+ +
[docs] def get_data(self, data): + return data[self.data_field].tolist()
+ +
[docs] def set_data(self, data, value): + data.loc[:,self.data_field] = value + return data
+ + +
[docs]class DateTimeSeasonalIndexer(SeasonalIndexer): + """Use a Pandas.DataFrame date field to index the seasonality """ + def __init__(self,date_field, index_fields, index_seasons, data_field,**kwargs): + """ + + :param date_field: DataFrame field that contains the datetime field used on index + :param index_fields: List with commom.DataTime fields + :param index_seasons: Multiples of index_fields, the default is 1 + :param data_field: DataFrame field with the time series data + :param kwargs: + """ + super(DateTimeSeasonalIndexer, self).__init__(len(index_seasons), **kwargs) + self.fields = index_fields + self.seasons = index_seasons + self.data_field = data_field + self.date_field = date_field + +
[docs] def get_season_of_data(self, data): + + ret = [] + + if isinstance(data, pd.DataFrame): + for ix in data.index: + date = data[self.date_field][ix] + season = [] + for c, f in enumerate(self.fields, start=0): + tmp = common.strip_datepart(date, f) + if self.seasons[c] is not None: + tmp = tmp // self.seasons[c] + season.append(tmp) + ret.append(season) + + elif isinstance(data, pd.Series): + date = data[self.date_field] + season = [] + for c, f in enumerate(self.fields, start=0): + season.append(common.strip_datepart(date, f, self.seasons[c])) + ret.append(season) + + return ret
+ +
[docs] def get_season_by_index(self, index): + raise Exception("Operation not available!")
+ +
[docs] def get_data_by_season(self, data, indexes): + raise Exception("Operation not available!")
+ +
[docs] def get_index_by_season(self, indexes): + raise Exception("Operation not available!")
+ +
[docs] def get_data(self, data): + return data[self.data_field].tolist()
+ +
[docs] def get_index(self, data): + return data[self.date_field].tolist() if isinstance(data, pd.DataFrame) else data[self.date_field]
+ +
[docs] def set_data(self, data, value): + raise Exception("Operation not available!")
+ + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/seasonal/cmsfts.html b/docs/_build/html/_modules/pyFTS/models/seasonal/cmsfts.html new file mode 100644 index 0000000..f185d95 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/seasonal/cmsfts.html @@ -0,0 +1,188 @@ + + + + + + + + pyFTS.models.seasonal.cmsfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.seasonal.cmsfts

+import numpy as np
+from pyFTS.common import FuzzySet, FLR
+from pyFTS.models.seasonal import sfts
+from pyFTS.models import chen
+
+
+
[docs]class ContextualSeasonalFLRG(sfts.SeasonalFLRG): + """ + Contextual Seasonal Fuzzy Logical Relationship Group + """ + def __init__(self, seasonality): + super(ContextualSeasonalFLRG, self).__init__(seasonality) + self.RHS = {} + +
[docs] def append_rhs(self, flr, **kwargs): + if flr.LHS in self.RHS: + self.RHS[flr.LHS].append_rhs(flr.RHS) + else: + self.RHS[flr.LHS] = chen.ConventionalFLRG(flr.LHS) + self.RHS[flr.LHS].append_rhs(flr.RHS)
+ + def __str__(self): + tmp = str(self.LHS) + ": \n " + tmp2 = "\t" + for r in sorted(self.RHS): + tmp2 += str(self.RHS[r]) + "\n\t" + return tmp + tmp2 + "\n"
+ + +
[docs]class ContextualMultiSeasonalFTS(sfts.SeasonalFTS): + """ + Contextual Multi-Seasonal Fuzzy Time Series + """ + def __init__(self, **kwargs): + super(ContextualMultiSeasonalFTS, self).__init__(**kwargs) + self.name = "Contextual Multi Seasonal FTS" + self.shortname = "CMSFTS " + self.detail = "" + self.seasonality = 1 + self.has_seasonality = True + self.has_point_forecasting = True + self.is_high_order = True + self.is_multivariate = True + self.order = 1 + self.flrgs = {} + +
[docs] def generate_flrg(self, flrs): + for flr in flrs: + + if str(flr.index) not in self.flrgs: + self.flrgs[str(flr.index)] = ContextualSeasonalFLRG(flr.index) + + self.flrgs[str(flr.index)].append_rhs(flr)
+ +
[docs] def train(self, data, **kwargs): + if kwargs.get('sets', None) is not None: + self.sets = kwargs.get('sets', None) + if kwargs.get('parameters', None) is not None: + self.seasonality = kwargs.get('parameters', None) + flrs = FLR.generate_indexed_flrs(self.sets, self.indexer, data, + transformation=self.partitioner.transformation, + alpha_cut=self.alpha_cut) + self.generate_flrg(flrs)
+ +
[docs] def get_midpoints(self, flrg, data): + ret = [] + for d in data: + if d in flrg.RHS: + ret.extend([self.sets[s].centroid for s in flrg.RHS[d].RHS]) + else: + ret.extend([self.sets[d].centroid]) + + return np.array(ret)
+ +
[docs] def forecast(self, data, **kwargs): + ordered_sets = FuzzySet.set_ordered(self.sets) + + ret = [] + + index = self.indexer.get_season_of_data(data) + ndata = self.indexer.get_data(data) + + for k in np.arange(0, len(data)): + + if str(index[k]) in self.flrgs: + + flrg = self.flrgs[str(index[k])] + + d = FuzzySet.get_fuzzysets(ndata[k], self.sets, ordered_sets, alpha_cut=self.alpha_cut) + + mp = self.get_midpoints(flrg, d) + + ret.append(sum(mp) / len(mp)) + else: + ret.append(np.nan) + + return ret
+ +
[docs] def forecast_ahead(self, data, steps, **kwargs): + ret = [] + for i in steps: + flrg = self.flrgs[str(i)] + + mp = self.get_midpoints(flrg) + + ret.append(sum(mp) / len(mp)) + + return ret
+ + +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/seasonal/common.html b/docs/_build/html/_modules/pyFTS/models/seasonal/common.html new file mode 100644 index 0000000..1bd039b --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/seasonal/common.html @@ -0,0 +1,175 @@ + + + + + + + + pyFTS.models.seasonal.common — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.seasonal.common

+import numpy as np
+import pandas as pd
+from enum import Enum
+from pyFTS.common import FuzzySet, Membership
+from pyFTS.partitioners import partitioner, Grid
+
+
+
[docs]class DateTime(Enum): + year = 1 + month = 12 + day_of_month = 30 + day_of_year = 364 + day_of_week = 7 + hour = 6 + minute = 7 + second = 8 + hour_of_day = 24 + hour_of_week = 168 + hour_of_month = 744 + hour_of_year = 8736 + minute_of_hour = 60 + minute_of_day = 1440 + minute_of_week = 10080 + minute_of_month = 44640 + minute_of_year = 524160 + second_of_minute = 60.00001 + second_of_hour = 3600 + second_of_day = 86400
+ + +
[docs]def strip_datepart(date, date_part): + if date_part == DateTime.year: + tmp = date.year + elif date_part == DateTime.month: + tmp = date.month + elif date_part == DateTime.day_of_year: + tmp = date.timetuple().tm_yday + elif date_part == DateTime.day_of_month: + tmp = date.day + elif date_part == DateTime.day_of_week: + tmp = date.weekday() + elif date_part == DateTime.hour or date_part == DateTime.hour_of_day: + tmp = date.hour + elif date_part == DateTime.hour_of_week: + wk = (date.weekday()-1) * 24 + tmp = date.hour + wk + elif date_part == DateTime.hour_of_month: + wk = (date.day-1) * 24 + tmp = date.hour + wk + elif date_part == DateTime.hour_of_year: + wk = (date.timetuple().tm_yday-1) * 24 + tmp = date.hour + wk + elif date_part == DateTime.minute or date_part == DateTime.minute_of_hour: + tmp = date.minute + elif date_part == DateTime.minute_of_day: + wk = date.hour * 60 + tmp = date.minute + wk + elif date_part == DateTime.minute_of_week: + wk1 = (date.weekday()-1) * 1440 #24 * 60 + wk2 = date.hour * 60 + tmp = date.minute + wk1 + wk2 + elif date_part == DateTime.minute_of_month: + wk1 = (date.day - 1) * 1440 #24 * 60 + wk2 = date.hour * 60 + tmp = date.minute + wk1 + wk2 + elif date_part == DateTime.minute_of_year: + wk1 = (date.timetuple().tm_yday - 1) * 1440 #24 * 60 + wk2 = date.hour * 60 + tmp = date.minute + wk1 + wk2 + elif date_part == DateTime.second or date_part == DateTime.second_of_minute: + tmp = date.second + elif date_part == DateTime.second_of_hour: + wk1 = date.minute * 60 + tmp = date.second + wk1 + elif date_part == DateTime.second_of_day: + wk1 = date.hour * 3600 #60 * 60 + wk2 = date.minute * 60 + tmp = date.second + wk1 + wk2 + else: + raise Exception("Unknown DateTime value!") + + return tmp
+ + +
[docs]class FuzzySet(FuzzySet.FuzzySet): + """ + Temporal/Seasonal Fuzzy Set + """ + + def __init__(self, datepart, name, mf, parameters, centroid, alpha=1.0, **kwargs): + super(FuzzySet, self).__init__(name, mf, parameters, centroid, alpha, type = 'datetime', **kwargs) + self.datepart = datepart + self.type = 'seasonal' + +
[docs] def membership(self, x): + dp = strip_datepart(x, self.datepart) + return self.mf(dp, self.parameters) * self.alpha
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/seasonal/msfts.html b/docs/_build/html/_modules/pyFTS/models/seasonal/msfts.html new file mode 100644 index 0000000..2f72914 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/seasonal/msfts.html @@ -0,0 +1,144 @@ + + + + + + + + pyFTS.models.seasonal.msfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.seasonal.msfts

+import numpy as np
+from pyFTS.common import FLR
+from pyFTS.models.seasonal import sfts
+
+
+
[docs]class MultiSeasonalFTS(sfts.SeasonalFTS): + """ + Multi-Seasonal Fuzzy Time Series + """ + def __init__(self, name, indexer, **kwargs): + super(MultiSeasonalFTS, self).__init__("MSFTS") + self.name = "Multi Seasonal FTS" + self.shortname = "MSFTS " + name + self.detail = "" + self.seasonality = 1 + self.has_seasonality = True + self.has_point_forecasting = True + self.is_high_order = False + self.is_multivariate = True + self.indexer = indexer + self.flrgs = {} + +
[docs] def generate_flrg(self, flrs): + for flr in flrs: + + if str(flr.index) not in self.flrgs: + self.flrgs[str(flr.index)] = sfts.SeasonalFLRG(flr.index) + + self.flrgs[str(flr.index)].append_rhs(flr.RHS)
+ +
[docs] def train(self, data, **kwargs): + if kwargs.get('sets', None) is not None: + self.sets = kwargs.get('sets', None) + if kwargs.get('parameters', None) is not None: + self.seasonality = kwargs.get('parameters', None) + #ndata = self.indexer.set_data(data,self.doTransformations(self.indexer.get_data(data))) + flrs = FLR.generate_indexed_flrs(self.sets, self.indexer, data) + self.generate_flrg(flrs)
+ +
[docs] def forecast(self, data, **kwargs): + + ret = [] + + index = self.indexer.get_season_of_data(data) + ndata = self.indexer.get_data(data) + + for k in np.arange(0, len(index)): + + flrg = self.flrgs[str(index[k])] + + mp = self.getMidpoints(flrg) + + ret.append(sum(mp) / len(mp)) + + return ret
+ +
[docs] def forecast_ahead(self, data, steps, **kwargs): + ret = [] + for i in steps: + flrg = self.flrgs[str(i)] + + mp = self.getMidpoints(flrg) + + ret.append(sum(mp) / len(mp)) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/seasonal/partitioner.html b/docs/_build/html/_modules/pyFTS/models/seasonal/partitioner.html new file mode 100644 index 0000000..b3311eb --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/seasonal/partitioner.html @@ -0,0 +1,197 @@ + + + + + + + + pyFTS.models.seasonal.partitioner — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.seasonal.partitioner

+from pyFTS.common import Membership, FuzzySet as FS
+from pyFTS.common.Composite import FuzzySet as Composite
+from pyFTS.partitioners import partitioner, Grid
+from pyFTS.models.seasonal.common import DateTime, FuzzySet, strip_datepart
+import numpy as np
+import matplotlib.pylab as plt
+
+
+
[docs]class TimeGridPartitioner(partitioner.Partitioner): + """Even Length DateTime Grid Partitioner""" + + def __init__(self, **kwargs): + """ + Even Length Grid Partitioner + :param seasonality: Time granularity, from pyFTS.models.seasonal.common.DateTime + :param data: Training data of which the universe of discourse will be extracted. The universe of discourse is the open interval between the minimum and maximum values of the training data. + :param npart: The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created + :param func: Fuzzy membership function (pyFTS.common.Membership) + """ + super(TimeGridPartitioner, self).__init__(name="TimeGrid", preprocess=False, **kwargs) + + self.season = kwargs.get('seasonality', DateTime.day_of_year) + data = kwargs.get('data', None) + if self.season == DateTime.year: + ndata = [strip_datepart(k, self.season) for k in data] + self.min = min(ndata) + self.max = max(ndata) + else: + tmp = (self.season.value / self.partitions) / 2 + self.min = tmp + self.max = self.season.value + tmp + + self.sets = self.build(None) + + if self.ordered_sets is None and self.setnames is not None: + self.ordered_sets = self.setnames + else: + self.ordered_sets = FS.set_ordered(self.sets) + +
[docs] def build(self, data): + sets = {} + + kwargs = {'variable': self.variable} + + if self.season == DateTime.year: + dlen = (self.max - self.min) + partlen = dlen / self.partitions + else: + partlen = self.season.value / self.partitions + pl2 = partlen / 2 + + count = 0 + for c in np.arange(self.min, self.max, partlen): + set_name = self.get_name(count) + if self.membership_function == Membership.trimf: + if c == self.min: + tmp = Composite(set_name, superset=True) + tmp.append_set(FuzzySet(self.season, set_name, Membership.trimf, + [self.season.value - pl2, self.season.value, + self.season.value + 0.0000001], self.season.value, alpha=.5, + **kwargs)) + tmp.append_set(FuzzySet(self.season, set_name, Membership.trimf, + [c - partlen, c, c + partlen], c, + **kwargs)) + tmp.centroid = c + sets[set_name] = tmp + else: + sets[set_name] = FuzzySet(self.season, set_name, Membership.trimf, + [c - partlen, c, c + partlen], c, + **kwargs) + elif self.membership_function == Membership.gaussmf: + sets[set_name] = FuzzySet(self.season, set_name, Membership.gaussmf, [c, partlen / 3], c, + **kwargs) + elif self.membership_function == Membership.trapmf: + q = partlen / 4 + if c == self.min: + tmp = Composite(set_name, superset=True) + tmp.append_set(FuzzySet(self.season, set_name, Membership.trimf, + [self.season.value - pl2, self.season.value, + self.season.value + 0.0000001], 0, + **kwargs)) + tmp.append_set(FuzzySet(self.season, set_name, Membership.trapmf, + [c - partlen, c - q, c + q, c + partlen], c, + **kwargs)) + tmp.centroid = c + sets[set_name] = tmp + else: + sets[set_name] = FuzzySet(self.season, set_name, Membership.trapmf, + [c - partlen, c - q, c + q, c + partlen], c, + **kwargs) + count += 1 + + self.min = 0 + + return sets
+ + +
[docs] def plot(self, ax): + """ + Plot the + :param ax: + :return: + """ + ax.set_title(self.name) + ax.set_ylim([0, 1]) + ax.set_xlim([0, self.season.value]) + ticks = [] + x = [] + for key in self.sets.keys(): + s = self.sets[key] + if s.type == 'composite': + for ss in s.sets: + self.plot_set(ax, ss) + else: + self.plot_set(ax, s) + ticks.append(str(round(s.centroid, 0)) + '\n' + s.name) + x.append(s.centroid) + ax.xaxis.set_ticklabels(ticks) + ax.xaxis.set_ticks(x)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/seasonal/sfts.html b/docs/_build/html/_modules/pyFTS/models/seasonal/sfts.html new file mode 100644 index 0000000..7ebbaeb --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/seasonal/sfts.html @@ -0,0 +1,171 @@ + + + + + + + + pyFTS.models.seasonal.sfts — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.seasonal.sfts

+"""
+Simple First Order Seasonal Fuzzy Time Series implementation of Song (1999) based of Conventional FTS by Chen (1996)
+
+Q. Song, “Seasonal forecasting in fuzzy time series,” Fuzzy sets Syst., vol. 107, pp. 235–236, 1999.
+
+S.-M. Chen, “Forecasting enrollments based on fuzzy time series,” Fuzzy Sets Syst., vol. 81, no. 3, pp. 311–319, 1996.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, flrg, fts
+
+
+
[docs]class SeasonalFLRG(flrg.FLRG): + """First Order Seasonal Fuzzy Logical Relationship Group""" + def __init__(self, seasonality): + super(SeasonalFLRG, self).__init__(1) + self.LHS = seasonality + self.RHS = [] + +
[docs] def get_key(self): + return self.LHS
+ +
[docs] def append_rhs(self, c, **kwargs): + self.RHS.append(c)
+ + def __str__(self): + tmp = str(self.LHS) + " -> " + tmp2 = "" + for c in sorted(self.RHS, key=lambda s: str(s)): + if len(tmp2) > 0: + tmp2 = tmp2 + "," + tmp2 = tmp2 + str(c) + return tmp + tmp2 + + def __len__(self): + return len(self.RHS)
+ + +
[docs]class SeasonalFTS(fts.FTS): + """First Order Seasonal Fuzzy Time Series""" + def __init__(self, **kwargs): + super(SeasonalFTS, self).__init__(**kwargs) + self.name = "Seasonal FTS" + self.shortname = "SFTS" + self.order = 1 + self.seasonality = 1 + self.has_seasonality = True + self.has_point_forecasting = True + self.is_high_order = False + self.flrgs = {} + +
[docs] def generate_flrg(self, flrs): + + for ct, flr in enumerate(flrs, start=1): + + season = self.indexer.get_season_by_index(ct)[0] + + ss = str(season) + + if ss not in self.flrgs: + self.flrgs[ss] = SeasonalFLRG(season) + + #print(season) + self.flrgs[ss].append_rhs(flr.RHS)
+ +
[docs] def get_midpoints(self, flrg): + ret = np.array([self.sets[s].centroid for s in flrg.RHS]) + return ret
+ +
[docs] def train(self, data, **kwargs): + if kwargs.get('sets', None) is not None: + self.sets = kwargs.get('sets', None) + tmpdata = FuzzySet.fuzzyfy_series_old(data, self.sets) + flrs = FLR.generate_non_recurrent_flrs(tmpdata) + self.generate_flrg(flrs)
+ +
[docs] def forecast(self, data, **kwargs): + + l = len(data) + + ret = [] + + for k in np.arange(0, l): + + season = self.indexer.get_season_by_index(k)[0] + + flrg = self.flrgs[str(season)] + + mp = self.get_midpoints(flrg) + + ret.append(np.percentile(mp, 50)) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/song.html b/docs/_build/html/_modules/pyFTS/models/song.html new file mode 100644 index 0000000..793a310 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/song.html @@ -0,0 +1,165 @@ + + + + + + + + pyFTS.models.song — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.song

+"""
+First Order Traditional Fuzzy Time Series method by Song & Chissom (1993)
+
+Q. Song and B. S. Chissom, “Fuzzy time series and its models,” Fuzzy Sets Syst., vol. 54, no. 3, pp. 269–277, 1993.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts
+
+
+
[docs]class ConventionalFTS(fts.FTS): + """Traditional Fuzzy Time Series""" + def __init__(self, **kwargs): + super(ConventionalFTS, self).__init__(order=1, name="FTS", **kwargs) + self.name = "Traditional FTS" + self.detail = "Song & Chissom" + if self.sets is not None and self.partitioner is not None: + self.sets = self.partitioner.sets + + self.R = None + + if self.sets is not None: + l = len(self.sets) + self.R = np.zeros((l,l)) + +
[docs] def flr_membership_matrix(self, flr): + ordered_set = FuzzySet.set_ordered(self.sets) + centroids = [self.sets[k].centroid for k in ordered_set] + lm = [self.sets[flr.LHS].membership(k) for k in centroids] + rm = [self.sets[flr.RHS].membership(k) for k in centroids] + + l = len(ordered_set) + r = np.zeros((l, l)) + for k in range(0,l): + for l in range(0, l): + r[k][l] = min(lm[k], rm[l]) + + return r
+ +
[docs] def operation_matrix(self, flrs): + l = len(self.sets) + if self.R is None or len(self.R) == 0 : + self.R = np.zeros((l, l)) + for k in flrs: + mm = self.flr_membership_matrix(k) + for k in range(0, l): + for l in range(0, l): + self.R[k][l] = max(self.R[k][l], mm[k][l])
+ + +
[docs] def train(self, data, **kwargs): + + tmpdata = FuzzySet.fuzzyfy_series(data, self.sets, method='maximum') + flrs = FLR.generate_non_recurrent_flrs(tmpdata) + self.operation_matrix(flrs)
+ +
[docs] def forecast(self, ndata, **kwargs): + + if self.partitioner is not None: + ordered_sets = self.partitioner.ordered_sets + else: + ordered_sets = FuzzySet.set_ordered(self.sets) + + l = len(ndata) + npart = len(self.sets) + + ret = [] + + for k in np.arange(0, l): + mv = FuzzySet.fuzzyfy_instance(ndata[k], self.sets) + + r = [max([ min(self.R[i][j], mv[j]) for j in np.arange(0,npart) ]) for i in np.arange(0,npart)] + + fs = np.ravel(np.argwhere(r == max(r))) + + if len(fs) == 1: + ret.append(self.sets[ordered_sets[fs[0]]].centroid) + else: + mp = [self.sets[ordered_sets[s]].centroid for s in fs] + + ret.append( sum(mp)/len(mp)) + + return ret
+ + def __str__(self): + tmp = self.name + ":\n" + return tmp + str(self.R)
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/models/yu.html b/docs/_build/html/_modules/pyFTS/models/yu.html new file mode 100644 index 0000000..b01f98f --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/models/yu.html @@ -0,0 +1,168 @@ + + + + + + + + pyFTS.models.yu — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.models.yu

+"""
+First Order Weighted Fuzzy Time Series by Yu(2005)
+
+H.-K. Yu, “Weighted fuzzy time series models for TAIEX forecasting,” 
+Phys. A Stat. Mech. its Appl., vol. 349, no. 3, pp. 609–624, 2005.
+"""
+
+import numpy as np
+from pyFTS.common import FuzzySet, FLR, fts, flrg
+from pyFTS.models import chen
+
+
+
[docs]class WeightedFLRG(flrg.FLRG): + """First Order Weighted Fuzzy Logical Relationship Group""" + def __init__(self, LHS, **kwargs): + super(WeightedFLRG, self).__init__(1, **kwargs) + self.LHS = LHS + self.RHS = [] + self.count = 1.0 + self.w = None + +
[docs] def append_rhs(self, c, **kwargs): + self.RHS.append(c) + self.count = self.count + 1.0
+ +
[docs] def weights(self, sets): + if self.w is None: + tot = sum(np.arange(1.0, self.count, 1.0)) + self.w = np.array([k / tot for k in np.arange(1.0, self.count, 1.0)]) + return self.w
+ + def __str__(self): + tmp = self.LHS + " -> " + tmp2 = "" + cc = 1.0 + tot = sum(np.arange(1.0, self.count, 1.0)) + for c in sorted(self.RHS): + if len(tmp2) > 0: + tmp2 = tmp2 + "," + tmp2 = tmp2 + c + "(" + str(round(cc / tot, 3)) + ")" + cc = cc + 1.0 + return tmp + tmp2
+ + +
[docs]class WeightedFTS(fts.FTS): + """First Order Weighted Fuzzy Time Series""" + def __init__(self, **kwargs): + super(WeightedFTS, self).__init__(order=1, name="WFTS", **kwargs) + self.name = "Weighted FTS" + self.detail = "Yu" + +
[docs] def generate_FLRG(self, flrs): + for flr in flrs: + if flr.LHS in self.flrgs: + self.flrgs[flr.LHS].append_rhs(flr.RHS) + else: + self.flrgs[flr.LHS] = WeightedFLRG(flr.LHS); + self.flrgs[flr.LHS].append_rhs(flr.RHS)
+ +
[docs] def train(self, ndata, **kwargs): + tmpdata = FuzzySet.fuzzyfy_series(ndata, self.sets, method='maximum') + flrs = FLR.generate_recurrent_flrs(tmpdata) + self.generate_FLRG(flrs)
+ +
[docs] def forecast(self, ndata, **kwargs): + + if self.partitioner is not None: + ordered_sets = self.partitioner.ordered_sets + else: + ordered_sets = FuzzySet.set_ordered(self.sets) + + ndata = np.array(ndata) + + l = len(ndata) + + ret = [] + + for k in np.arange(0, l): + + actual = FuzzySet.get_maximum_membership_fuzzyset(ndata[k], self.sets, ordered_sets) + + if actual.name not in self.flrgs: + ret.append(actual.centroid) + else: + flrg = self.flrgs[actual.name] + mp = flrg.get_midpoints(self.sets) + + ret.append(mp.dot(flrg.weights(self.sets))) + + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/CMeans.html b/docs/_build/html/_modules/pyFTS/partitioners/CMeans.html new file mode 100644 index 0000000..558c5e8 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/CMeans.html @@ -0,0 +1,175 @@ + + + + + + + + pyFTS.partitioners.CMeans — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.CMeans

+import numpy as np
+import math
+import random as rnd
+import functools, operator
+from pyFTS.common import FuzzySet, Membership
+from pyFTS.partitioners import partitioner
+
+
+
[docs]def distance(x, y): + if isinstance(x, list): + tmp = functools.reduce(operator.add, [(x[k] - y[k]) ** 2 for k in range(0, len(x))]) + else: + tmp = (x - y) ** 2 + return math.sqrt(tmp)
+ + +
[docs]def c_means(k, dados, tam): + # Inicializa as centróides escolhendo elementos aleatórios dos conjuntos + centroides = [dados[rnd.randint(0, len(dados)-1)] for kk in range(0, k)] + + grupos = [-1 for x in range(0, len(dados))] + + it_semmodificacao = 0 + + # para cada instância + iteracoes = 0 + while iteracoes < 1000 and it_semmodificacao < 10: + inst_count = 0 + + modificacao = False + + for instancia in dados: + + # verifica a distância para cada centroide + grupo_count = 0 + dist = 10000 + + grupotmp = grupos[inst_count] + + for grupo in centroides: + tmp = distance(instancia, grupo) + if tmp < dist: + dist = tmp + # associa a a centroide de menor distância à instância + grupos[inst_count] = grupo_count + grupo_count = grupo_count + 1 + + if grupotmp != grupos[inst_count]: + modificacao = True + + inst_count = inst_count + 1 + + if not modificacao: + it_semmodificacao = it_semmodificacao + 1 + else: + it_semmodificacao = 0 + + # atualiza cada centroide com base nos valores médios de todas as instâncias à ela associadas + grupo_count = 0 + for grupo in centroides: + total_inst = functools.reduce(operator.add, [1 for xx in grupos if xx == grupo_count], 0) + if total_inst > 0: + if tam > 1: + for count in range(0, tam): + soma = functools.reduce(operator.add, + [dados[kk][count] for kk in range(0, len(dados)) if + grupos[kk] == grupo_count]) + centroides[grupo_count][count] = soma / total_inst + else: + soma = functools.reduce(operator.add, + [dados[kk] for kk in range(0, len(dados)) if grupos[kk] == grupo_count]) + centroides[grupo_count] = soma / total_inst + grupo_count = grupo_count + 1 + + iteracoes = iteracoes + 1 + + return centroides
+ + +
[docs]class CMeansPartitioner(partitioner.Partitioner): + def __init__(self, **kwargs): + super(CMeansPartitioner, self).__init__(name="CMeans", **kwargs) + +
[docs] def build(self, data): + sets = {} + centroides = c_means(self.partitions, data, 1) + centroides.append(self.max) + centroides.append(self.min) + centroides = list(set(centroides)) + centroides.sort() + for c in np.arange(1, len(centroides) - 1): + _name = self.get_name(c) + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trimf, + [round(centroides[c - 1], 3), round(centroides[c], 3), round(centroides[c + 1], 3)], + round(centroides[c], 3)) + + return sets
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/Entropy.html b/docs/_build/html/_modules/pyFTS/partitioners/Entropy.html new file mode 100644 index 0000000..6ea78ef --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/Entropy.html @@ -0,0 +1,186 @@ + + + + + + + + pyFTS.partitioners.Entropy — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.Entropy

+"""
+C. H. Cheng, R. J. Chang, and C. A. Yeh, “Entropy-based and trapezoidal fuzzification-based fuzzy time series approach for forecasting IT project cost,”
+Technol. Forecast. Social Change, vol. 73, no. 5, pp. 524–542, Jun. 2006.
+"""
+
+import numpy as np
+import math
+import random as rnd
+import functools, operator
+from pyFTS.common import FuzzySet, Membership
+from pyFTS.partitioners import partitioner
+
+
+
[docs]def splitBelow(data,threshold): + return [k for k in data if k <= threshold]
+ + +
[docs]def splitAbove(data,threshold): + return [k for k in data if k > threshold]
+ + +
[docs]def PMF(data, threshold): + a = sum([1.0 for k in splitBelow(data,threshold)]) + b = sum([1.0 for k in splitAbove(data, threshold)]) + l = len(data) + return [a / l, b / l]
+ + +
[docs]def entropy(data, threshold): + pmf = PMF(data, threshold) + if pmf[0] == 0 or pmf[1] == 0: + return 1 + else: + return - sum([pmf[0] * math.log(pmf[0]), pmf[1] * math.log(pmf[1])])
+ + +
[docs]def informationGain(data, thres1, thres2): + return entropy(data, thres1) - entropy(data, thres2)
+ + +
[docs]def bestSplit(data, npart): + if len(data) < 2: + return None + count = 1 + ndata = list(set(data)) + ndata.sort() + l = len(ndata) + threshold = 0 + try: + while count < l and informationGain(data, ndata[count - 1], ndata[count]) <= 0: + threshold = ndata[count] + count += 1 + except IndexError: + print(threshold) + print (ndata) + print (count) + + rem = npart % 2 + + if (npart - rem)/2 > 1: + p1 = splitBelow(data,threshold) + p2 = splitAbove(data,threshold) + + if len(p1) > len(p2): + np1 = (npart - rem)/2 + rem + np2 = (npart - rem)/2 + else: + np1 = (npart - rem) / 2 + np2 = (npart - rem) / 2 + rem + + tmp = [threshold] + + for k in bestSplit(p1, np1 ): tmp.append(k) + for k in bestSplit(p2, np2 ): tmp.append(k) + + return tmp + + else: + return [threshold]
+ + +
[docs]class EntropyPartitioner(partitioner.Partitioner): + """Huarng Entropy Partitioner""" + def __init__(self, **kwargs): + super(EntropyPartitioner, self).__init__(name="Entropy", **kwargs) + +
[docs] def build(self, data): + sets = {} + + partitions = bestSplit(data, self.partitions) + partitions.append(self.min) + partitions.append(self.max) + partitions = list(set(partitions)) + partitions.sort() + for c in np.arange(1, len(partitions) - 1): + _name = self.get_name(c) + if self.membership_function == Membership.trimf: + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trimf, + [partitions[c - 1], partitions[c], partitions[c + 1]],partitions[c]) + elif self.membership_function == Membership.trapmf: + b1 = (partitions[c] - partitions[c - 1])/2 + b2 = (partitions[c + 1] - partitions[c]) / 2 + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trapmf, + [partitions[c - 1], partitions[c] - b1, + partitions[c] + b2, partitions[c + 1]], + partitions[c]) + + return sets
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/FCM.html b/docs/_build/html/_modules/pyFTS/partitioners/FCM.html new file mode 100644 index 0000000..344e4d2 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/FCM.html @@ -0,0 +1,214 @@ + + + + + + + + pyFTS.partitioners.FCM — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.FCM

+"""
+S. T. Li, Y. C. Cheng, and S. Y. Lin, “A FCM-based deterministic forecasting model for fuzzy time series,”
+Comput. Math. Appl., vol. 56, no. 12, pp. 3052–3063, Dec. 2008. DOI: 10.1016/j.camwa.2008.07.033.
+"""
+import numpy as np
+import math
+import random as rnd
+import functools, operator
+from pyFTS.common import FuzzySet, Membership
+from pyFTS.partitioners import partitioner
+
+
+
[docs]def fuzzy_distance(x, y): + if isinstance(x, list): + tmp = functools.reduce(operator.add, [(x[k] - y[k]) ** 2 for k in range(0, len(x))]) + else: + tmp = (x - y) ** 2 + return math.sqrt(tmp)
+ + +
[docs]def membership(val, vals): + soma = 0 + for k in vals: + if k == 0: + k = 1 + soma = soma + (val / k) ** 2 + + return soma
+ + +
[docs]def fuzzy_cmeans(k, dados, tam, m, deltadist=0.001): + tam_dados = len(dados) + + # Inicializa as centróides escolhendo elementos aleatórios dos conjuntos + centroides = [dados[rnd.randint(0, tam_dados - 1)] for kk in range(0, k)] + + # Tabela de pertinência das instâncias aos grupos + grupos = [[0 for kk in range(0, k)] for xx in range(0, tam_dados)] + + alteracaomedia = 1000 + + m_exp = 1 / (m - 1) + + # para cada instância + iteracoes = 0 + + while iteracoes < 1000 and alteracaomedia > deltadist: + + alteracaomedia = 0 + + # verifica a distância para cada centroide + # Atualiza a pertinencia daquela instância para cada um dos grupos + + inst_count = 0 + for instancia in dados: + + dist_grupos = [0 for xx in range(0, k)] + + grupo_count = 0 + for grupo in centroides: + dist_grupos[grupo_count] = fuzzy_distance(grupo, instancia) + grupo_count = grupo_count + 1 + + dist_grupos_total = functools.reduce(operator.add, [xk for xk in dist_grupos]) + + for grp in range(0, k): + if dist_grupos[grp] == 0: + grupos[inst_count][grp] = 1 + else: + grupos[inst_count][grp] = 1 / membership(dist_grupos[grp], dist_grupos) + # grupos[inst_count][grp] = 1/(dist_grupos[grp] / dist_grupos_total) + # grupos[inst_count][grp] = (1/(dist_grupos[grp]**2))**m_exp / (1/(dist_grupos_total**2))**m_exp + + inst_count = inst_count + 1 + + # return centroides + + # atualiza cada centroide com base na Média de todos os padrões ponderados pelo grau de pertinência + + grupo_count = 0 + for grupo in centroides: + if tam > 1: + oldgrp = [xx for xx in grupo] + for atr in range(0, tam): + soma = functools.reduce(operator.add, + [grupos[xk][grupo_count] * dados[xk][atr] for xk in range(0, tam_dados)]) + norm = functools.reduce(operator.add, [grupos[xk][grupo_count] for xk in range(0, tam_dados)]) + centroides[grupo_count][atr] = soma / norm + else: + oldgrp = grupo + soma = functools.reduce(operator.add, + [grupos[xk][grupo_count] * dados[xk] for xk in range(0, tam_dados)]) + norm = functools.reduce(operator.add, [grupos[xk][grupo_count] for xk in range(0, tam_dados)]) + centroides[grupo_count] = soma / norm + + alteracaomedia = alteracaomedia + fuzzy_distance(oldgrp, grupo) + grupo_count = grupo_count + 1 + + alteracaomedia = alteracaomedia / k + iteracoes = iteracoes + 1 + + return centroides
+ + +
[docs]class FCMPartitioner(partitioner.Partitioner): + """ + + """ + + def __init__(self, **kwargs): + super(FCMPartitioner, self).__init__(name="FCM", **kwargs) + +
[docs] def build(self, data): + sets = {} + + centroids = fuzzy_cmeans(self.partitions, data, 1, 2) + centroids.append(self.max) + centroids.append(self.min) + centroids = list(set(centroids)) + centroids.sort() + for c in np.arange(1, len(centroids) - 1): + _name = self.get_name(c) + if self.membership_function == Membership.trimf: + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trimf, + [round(centroids[c - 1], 3), round(centroids[c], 3), + round(centroids[c + 1], 3)], + round(centroids[c], 3)) + elif self.membership_function == Membership.trapmf: + q1 = (round(centroids[c], 3) - round(centroids[c - 1], 3)) / 2 + q2 = (round(centroids[c + 1], 3) - round(centroids[c], 3)) / 2 + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trimf, + [round(centroids[c - 1], 3), round(centroids[c], 3) - q1, + round(centroids[c], 3) + q2, round(centroids[c + 1], 3)], + round(centroids[c], 3)) + + return sets
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/Grid.html b/docs/_build/html/_modules/pyFTS/partitioners/Grid.html new file mode 100644 index 0000000..7072458 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/Grid.html @@ -0,0 +1,120 @@ + + + + + + + + pyFTS.partitioners.Grid — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.Grid

+"""Even Length Grid Partitioner"""
+
+import numpy as np
+import math
+import random as rnd
+import functools, operator
+from pyFTS.common import FuzzySet, Membership
+from pyFTS.partitioners import partitioner
+
+
+
[docs]class GridPartitioner(partitioner.Partitioner): + """Even Length Grid Partitioner""" + + def __init__(self, **kwargs): + """ + Even Length Grid Partitioner + """ + super(GridPartitioner, self).__init__(name="Grid", **kwargs) + +
[docs] def build(self, data): + sets = {} + + kwargs = {'type': self.type, 'variable': self.variable} + + dlen = self.max - self.min + partlen = dlen / self.partitions + + count = 0 + for c in np.arange(self.min, self.max, partlen): + _name = self.get_name(count) + if self.membership_function == Membership.trimf: + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trimf, [c - partlen, c, c + partlen],c,**kwargs) + elif self.membership_function == Membership.gaussmf: + sets[_name] = FuzzySet.FuzzySet(_name, Membership.gaussmf, [c, partlen / 3], c,**kwargs) + elif self.membership_function == Membership.trapmf: + q = partlen / 2 + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trapmf, [c - partlen, c - q, c + q, c + partlen], c,**kwargs) + count += 1 + + self.min = self.min - partlen + + return sets
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/Huarng.html b/docs/_build/html/_modules/pyFTS/partitioners/Huarng.html new file mode 100644 index 0000000..f0f1917 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/Huarng.html @@ -0,0 +1,131 @@ + + + + + + + + pyFTS.partitioners.Huarng — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.Huarng

+"""
+K. H. Huarng, “Effective lengths of intervals to improve forecasting in fuzzy time series,”
+Fuzzy Sets Syst., vol. 123, no. 3, pp. 387–394, Nov. 2001.
+"""
+
+import numpy as np
+import math
+import random as rnd
+import functools, operator
+from pyFTS.common import FuzzySet, Membership, Transformations
+
+from pyFTS.partitioners import partitioner
+
+
[docs]class HuarngPartitioner(partitioner.Partitioner): + """Huarng Empirical Partitioner""" + def __init__(self, **kwargs): + super(HuarngPartitioner, self).__init__(name="Huarng", **kwargs) + +
[docs] def build(self, data): + diff = Transformations.Differential(1) + data2 = diff.apply(data) + davg = np.abs( np.mean(data2) / 2 ) + + if davg <= 1.0: + base = 0.1 + elif 1 < davg <= 10: + base = 1.0 + elif 10 < davg <= 100: + base = 10 + else: + base = 100 + + sets = {} + + dlen = self.max - self.min + npart = math.ceil(dlen / base) + partition = math.ceil(self.min) + for c in range(npart): + _name = self.get_name(c) + if self.membership_function == Membership.trimf: + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trimf, + [partition - base, partition, partition + base], partition) + elif self.membership_function == Membership.gaussmf: + sets[_name] = FuzzySet.FuzzySet(_name, Membership.gaussmf, + [partition, base/2], partition) + elif self.membership_function == Membership.trapmf: + sets[_name] = FuzzySet.FuzzySet(_name, Membership.trapmf, + [partition - base, partition - (base/2), + partition + (base / 2), partition + base], partition) + + partition += base + + return sets
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/Util.html b/docs/_build/html/_modules/pyFTS/partitioners/Util.html new file mode 100644 index 0000000..2122805 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/Util.html @@ -0,0 +1,167 @@ + + + + + + + + pyFTS.partitioners.Util — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.Util

+"""
+Facility methods for pyFTS partitioners module
+"""
+
+import numpy as np
+import pandas as pd
+import matplotlib as plt
+import matplotlib.colors as pltcolors
+import matplotlib.pyplot as plt
+#from mpl_toolkits.mplot3d import Axes3D
+
+from pyFTS.benchmarks import Measures
+from pyFTS.common import Membership, Util
+from pyFTS.partitioners import Grid,Huarng,FCM,Entropy
+
+all_methods = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner, Huarng.HuarngPartitioner]
+
+mfs = [Membership.trimf, Membership.gaussmf, Membership.trapmf]
+
+
+
[docs]def plot_sets(data, sets, titles, size=[12, 10], save=False, file=None, axis=None): + num = len(sets) + + if axis is None: + fig, axes = plt.subplots(nrows=num, ncols=1,figsize=size) + for k in np.arange(0,num): + ticks = [] + x = [] + ax = axes[k] if axis is None else axis + ax.set_title(titles[k]) + ax.set_ylim([0, 1.1]) + for key in sets[k].keys(): + s = sets[k][key] + if s.mf == Membership.trimf: + ax.plot(s.parameters,[0,1,0]) + elif s.mf == Membership.gaussmf: + tmpx = [ kk for kk in np.arange(s.lower, s.upper)] + tmpy = [s.membership(kk) for kk in np.arange(s.lower, s.upper)] + ax.plot(tmpx, tmpy) + elif s.mf == Membership.trapmf: + ax.plot(s.parameters, [0, 1, 1, 0]) + ticks.append(str(round(s.centroid, 0)) + '\n' + s.name) + x.append(s.centroid) + ax.xaxis.set_ticklabels(ticks) + ax.xaxis.set_ticks(x) + + if axis is None: + plt.tight_layout() + + Util.show_and_save_image(fig, file, save)
+ + +
[docs]def plot_partitioners(data, objs, tam=[12, 10], save=False, file=None, axis=None): + sets = [k.sets for k in objs] + titles = [k.name for k in objs] + plot_sets(data, sets, titles, tam, save, file, axis)
+ + +
[docs]def explore_partitioners(data, npart, methods=None, mf=None, transformation=None, + size=[12, 10], save=False, file=None): + """ + Create partitioners for the mf membership functions and npart partitions and show the partitioning images. + :data: Time series data + :npart: Maximum number of partitions of the universe of discourse + :methods: A list with the partitioning methods to be used + :mf: A list with the membership functions to be used + :transformation: a transformation to be used in partitioner + :size: list, the size of the output image [width, height] + :save: boolean, if the image will be saved on disk + :file: string, the file path to save the image + :return: the list of the built partitioners + """ + if methods is None: + methods = all_methods + + if mf is None: + mf = mfs + + objs = [] + + for p in methods: + for m in mf: + obj = p(data=data, npart=npart, func=m, transformation=transformation) + obj.name = obj.name + " - " + obj.membership_function.__name__ + objs.append(obj) + + plot_partitioners(data, objs, size, save, file) + + return objs
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/parallel_util.html b/docs/_build/html/_modules/pyFTS/partitioners/parallel_util.html new file mode 100644 index 0000000..ce3fb63 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/parallel_util.html @@ -0,0 +1,110 @@ + + + + + + + + pyFTS.partitioners.parallel_util — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.parallel_util

+from copy import deepcopy
+from joblib import Parallel, delayed
+import multiprocessing
+import  numpy as np
+
+from pyFTS.common import Membership, Util
+from pyFTS.partitioners import Grid,Huarng,FCM,Entropy
+from pyFTS.partitioners import Util
+
+
+
[docs]def explore_partitioners(data, npart, methods=None, mf=None, tam=[12, 10], save=False, file=None): + all_methods = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner] + mfs = [Membership.trimf, Membership.gaussmf, Membership.trapmf] + + if methods is None: + methods = all_methods + + if mf is None: + mf = mfs + + num_cores = multiprocessing.cpu_count() + + objs = [] + for method in methods: + print(str(method)) + tmp = Parallel(n_jobs=num_cores)(delayed(method)(deepcopy(data), npart, m) for m in mf) + objs.append(tmp) + + objs = np.ravel(objs).tolist() + + Util.plot_partitioners(data, objs, tam, save, file)
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/partitioners/partitioner.html b/docs/_build/html/_modules/pyFTS/partitioners/partitioner.html new file mode 100644 index 0000000..f127f6c --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/partitioners/partitioner.html @@ -0,0 +1,203 @@ + + + + + + + + pyFTS.partitioners.partitioner — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.partitioners.partitioner

+from pyFTS.common import FuzzySet, Membership
+import numpy as np
+import matplotlib.pylab as plt
+
+
+
[docs]class Partitioner(object): + """ + Universe of Discourse partitioner. Split data on several fuzzy sets + """ + + def __init__(self, **kwargs): + """ + Universe of Discourse partitioner scheme. Split data on several fuzzy sets + """ + self.name = kwargs.get('name',"") + """partitioner name""" + self.partitions = kwargs.get('npart', 10) + """The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created""" + self.sets = {} + self.membership_function = kwargs.get('func', Membership.trimf) + """Fuzzy membership function (pyFTS.common.Membership)""" + self.setnames = kwargs.get('names', None) + """list of partitions names. If None is given the partitions will be auto named with prefix""" + self.prefix = kwargs.get('prefix', 'A') + """prefix of auto generated partition names""" + self.transformation = kwargs.get('transformation', None) + """data transformation to be applied on data""" + self.indexer = kwargs.get('indexer', None) + self.variable = kwargs.get('variable', None) + self.type = kwargs.get('type', 'common') + self.ordered_sets = None + + if kwargs.get('preprocess',True): + + data = kwargs.get('data',[None]) + + if self.indexer is not None: + ndata = self.indexer.get_data(data) + else: + ndata = data + + if self.transformation is not None: + ndata = self.transformation.apply(ndata) + else: + ndata = data + + if self.indexer is not None: + ndata = self.indexer.get_data(ndata) + + _min = np.nanmin(ndata) + if _min == -np.inf: + ndata[ndata == -np.inf] = 0 + _min = np.nanmin(ndata) + + self.min = float(_min * 1.1 if _min < 0 else _min * 0.9) + + _max = max(ndata) + self.max = float(_max * 1.1 if _max > 0 else _max * 0.9) + + self.sets = self.build(ndata) + + if self.ordered_sets is None and self.setnames is not None: + self.ordered_sets = self.setnames + else: + self.ordered_sets = FuzzySet.set_ordered(self.sets) + + del(ndata) + +
[docs] def build(self, data): + """ + Perform the partitioning of the Universe of Discourse + + :param data: training data + :return: + """ + pass
+ +
[docs] def get_name(self, counter): + return self.prefix + str(counter) if self.setnames is None else self.setnames[counter]
+ +
[docs] def lower_set(self): + return self.sets[self.ordered_sets[0]]
+ +
[docs] def upper_set(self): + return self.sets[self.ordered_sets[-1]]
+ +
[docs] def plot(self, ax): + """ + Plot the + :param ax: + :return: + """ + ax.set_title(self.name) + ax.set_ylim([0, 1]) + ax.set_xlim([self.min, self.max]) + ticks = [] + x = [] + for key in self.sets.keys(): + s = self.sets[key] + if s.type == 'common': + self.plot_set(ax, s) + elif s.type == 'composite': + for ss in s.sets: + self.plot_set(ax, ss) + ticks.append(str(round(s.centroid,0))+'\n'+s.name) + x.append(s.centroid) + ax.xaxis.set_ticklabels(ticks) + ax.xaxis.set_ticks(x)
+ +
[docs] def plot_set(self, ax, s): + if s.mf == Membership.trimf: + ax.plot([s.parameters[0], s.parameters[1], s.parameters[2]], [0, s.alpha, 0]) + elif s.mf == Membership.gaussmf: + tmpx = [kk for kk in np.arange(s.lower, s.upper)] + tmpy = [s.membership(kk) for kk in np.arange(s.lower, s.upper)] + ax.plot(tmpx, tmpy) + elif s.mf == Membership.trapmf: + ax.plot(s.parameters, [0, s.alpha, s.alpha, 0])
+ + + def __str__(self): + tmp = self.name + ":\n" + for key in self.sets.keys(): + tmp += str(self.sets[key])+ "\n" + return tmp
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/probabilistic/ProbabilityDistribution.html b/docs/_build/html/_modules/pyFTS/probabilistic/ProbabilityDistribution.html new file mode 100644 index 0000000..e5f4a71 --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/probabilistic/ProbabilityDistribution.html @@ -0,0 +1,330 @@ + + + + + + + + pyFTS.probabilistic.ProbabilityDistribution — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.probabilistic.ProbabilityDistribution

+import numpy as np
+import pandas as pd
+import matplotlib.pyplot as plt
+from pyFTS.common import FuzzySet,SortedCollection,tree
+from pyFTS.probabilistic import kde
+
+
+
[docs]class ProbabilityDistribution(object): + """ + Represents a discrete or continous probability distribution + If type is histogram, the PDF is discrete + If type is KDE the PDF is continuous + """ + def __init__(self, type = "KDE", **kwargs): + self.uod = kwargs.get("uod", None) + """Universe of discourse""" + + self.data = [] + + self.type = type + """ + If type is histogram, the PDF is discrete + If type is KDE the PDF is continuous + """ + + self.bins = kwargs.get("bins", None) + """Number of bins on a discrete PDF""" + self.labels = kwargs.get("bins_labels", None) + """Bins labels on a discrete PDF""" + + data = kwargs.get("data", None) + + if self.type == "KDE": + self.kde = kde.KernelSmoothing(kwargs.get("h", 0.5), kwargs.get("kernel", "epanechnikov")) + _min = np.nanmin(data) + _min = _min * .7 if _min > 0 else _min * 1.3 + _max = np.nanmax(data) + _max = _max * 1.3 if _max > 0 else _max * .7 + self.uod = [_min, _max] + + self.nbins = kwargs.get("num_bins", 100) + + if self.bins is None: + self.bins = np.linspace(int(self.uod[0]), int(self.uod[1]), int(self.nbins)).tolist() + self.labels = [str(k) for k in self.bins] + + if self.uod is not None: + self.resolution = (self.uod[1] - self.uod[0]) / self.nbins + + self.bin_index = SortedCollection.SortedCollection(iterable=sorted(self.bins)) + self.quantile_index = None + self.distribution = {} + self.cdf = None + self.qtl = None + self.count = 0 + for k in self.bins: self.distribution[k] = 0 + + if data is not None: + self.append(data) + + self.name = kwargs.get("name", "") + +
[docs] def set(self, value, density): + k = self.bin_index.find_ge(value) + self.distribution[k] = density
+ +
[docs] def append(self, values): + if self.type == "histogram": + for k in values: + v = self.bin_index.find_ge(k) + self.distribution[v] += 1 + self.count += 1 + else: + self.data.extend(values) + self.distribution = {} + dens = self.density(self.bins) + for v,d in enumerate(dens): + self.distribution[self.bins[v]] = d
+ +
[docs] def append_interval(self, intervals): + if self.type == "histogram": + for interval in intervals: + for k in self.bin_index.inside(interval[0], interval[1]): + self.distribution[k] += 1 + self.count += 1
+ +
[docs] def density(self, values): + ret = [] + scalar = False + + if not isinstance(values, list): + values = [values] + scalar = True + + for k in values: + if self.type == "histogram": + v = self.bin_index.find_ge(k) + ret.append(self.distribution[v] / (self.count + 1e-5)) + elif self.type == "KDE": + v = self.kde.probability(k, self.data) + ret.append(v) + else: + v = self.bin_index.find_ge(k) + ret.append(self.distribution[v]) + + if scalar: + return ret[0] + + return ret
+ +
[docs] def differential_offset(self, value): + nbins = [] + dist = {} + + for k in self.bins: + nk = k+value + nbins.append(nk) + dist[nk] = self.distribution[k] + + self.bins = nbins + self.distribution = dist + self.labels = [str(k) for k in self.bins] + + self.bin_index = SortedCollection.SortedCollection(iterable=sorted(self.bins)) + self.quantile_index = None + self.cdf = None + self.qtl = None
+ +
[docs] def expected_value(self): + return np.nansum([v * self.distribution[v] for v in self.bins])
+ +
[docs] def build_cdf_qtl(self): + ret = 0.0 + self.cdf = {} + self.qtl = {} + for k in sorted(self.bins): + ret += self.density(k) + if k not in self.cdf: + self.cdf[k] = ret + + if str(ret) not in self.qtl: + self.qtl[str(ret)] = [] + + self.qtl[str(ret)].append(k) + + _keys = [float(k) for k in sorted(self.qtl.keys())] + + self.quantile_index = SortedCollection.SortedCollection(iterable=_keys)
+ +
[docs] def cummulative(self, values): + if self.cdf is None: + self.build_cdf_qtl() + + if isinstance(values, list): + ret = [] + for val in values: + k = self.bin_index.find_ge(val) + ret.append(self.cdf[k]) + else: + k = self.bin_index.find_ge(values) + return self.cdf[values]
+ +
[docs] def quantile(self, values): + if self.qtl is None: + self.build_cdf_qtl() + + if isinstance(values, list): + ret = [] + for val in values: + k = self.quantile_index.find_ge(val) + ret.append(self.qtl[str(k)][0]) + else: + k = self.quantile_index.find_ge(values) + ret = self.qtl[str(k)] + + return ret
+ +
[docs] def entropy(self): + h = -sum([self.distribution[k] * np.log(self.distribution[k]) if self.distribution[k] > 0 else 0 + for k in self.bins]) + return h
+ +
[docs] def crossentropy(self,q): + h = -sum([self.distribution[k] * np.log(q.distribution[k]) if self.distribution[k] > 0 else 0 + for k in self.bins]) + return h
+ +
[docs] def kullbackleiblerdivergence(self,q): + h = sum([self.distribution[k] * np.log(self.distribution[k]/q.distribution[k]) if self.distribution[k] > 0 else 0 + for k in self.bins]) + return h
+ +
[docs] def empiricalloglikelihood(self): + _s = 0 + for k in self.bins: + if self.distribution[k] > 0: + _s += np.log(self.distribution[k]) + return _s
+ +
[docs] def pseudologlikelihood(self, data): + + densities = self.density(data) + + _s = 0 + for k in densities: + if k > 0: + _s += np.log(k) + return _s
+ +
[docs] def averageloglikelihood(self, data): + + densities = self.density(data) + + _s = 0 + for k in densities: + if k > 0: + _s += np.log(k) + return _s / len(data)
+ +
[docs] def plot(self,axis=None,color="black",tam=[10, 6], title = None): + + if axis is None: + fig = plt.figure(figsize=tam) + axis = fig.add_subplot(111) + + if self.type == "histogram": + ys = [self.distribution[k]/self.count for k in self.bins] + else: + ys = [self.distribution[k] for k in self.bins] + yp = [0 for k in self.data] + axis.plot(self.data, yp, c="red") + + if title is None: + title = self.name + axis.plot(self.bins, ys, c=color) + axis.set_title(title) + + axis.set_xlabel('Universe of Discourse') + axis.set_ylabel('Probability')
+ + def __str__(self): + ret = "" + for k in sorted(self.bins): + ret += str(round(k,2)) + ':\t' + if self.type == "histogram": + ret += str(round(self.distribution[k] / self.count,3)) + elif self.type == "KDE": + ret += str(round(self.density(k),3)) + else: + ret += str(round(self.distribution[k], 6)) + ret += '\n' + return ret
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_modules/pyFTS/probabilistic/kde.html b/docs/_build/html/_modules/pyFTS/probabilistic/kde.html new file mode 100644 index 0000000..ad2aa1b --- /dev/null +++ b/docs/_build/html/_modules/pyFTS/probabilistic/kde.html @@ -0,0 +1,134 @@ + + + + + + + + pyFTS.probabilistic.kde — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for pyFTS.probabilistic.kde

+# -*- coding: utf8 -*-
+
+"""
+Kernel Density Estimation 
+"""
+
+from pyFTS.common import Transformations
+import numpy as np
+
+
+
[docs]class KernelSmoothing(object): + """Kernel Density Estimation""" + def __init__(self,h, kernel="epanechnikov"): + self.h = h + """Width parameter""" + self.kernel = kernel + """Kernel function""" + self.transf = Transformations.Scale(min=0,max=1) + +
[docs] def kernel_function(self, u): + if self.kernel == "epanechnikov": + tmp = (3/4)*(1.0 - u**2) + return tmp if tmp > 0 else 0 + elif self.kernel == "gaussian": + return (1.0/np.sqrt(2*np.pi))*np.exp(-0.5*u**2) + elif self.kernel == "uniform": + return 0.5 + elif self.kernel == "triangular": + tmp = 1.0 - np.abs(u) + return tmp if tmp > 0 else 0 + elif self.kernel == "logistic": + return 1.0/(np.exp(u)+2+np.exp(-u)) + elif self.kernel == "cosine": + return (np.pi/4.0)*np.cos((np.pi/2.0)*u) + elif self.kernel == "sigmoid": + return (2.0/np.pi)*(1.0/(np.exp(u)+np.exp(-u))) + elif self.kernel == "tophat": + return 1 if np.abs(u) < 0.5 else 0 + elif self.kernel == "exponential": + return 0.5 * np.exp(-np.abs(u))
+ +
[docs] def probability(self, x, data): + """ + Probability of the point x on data + + :param x: + :param data: + :return: + """ + l = len(data) + + ndata = self.transf.apply(data) + nx = self.transf.apply(x) + p = sum([self.kernel_function((nx - k)/self.h) for k in ndata]) / l*self.h + + return p
+
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/_sources/index.rst.txt b/docs/_build/html/_sources/index.rst.txt new file mode 100644 index 0000000..5558b62 --- /dev/null +++ b/docs/_build/html/_sources/index.rst.txt @@ -0,0 +1,20 @@ +.. pyFTS documentation master file, created by + sphinx-quickstart on Wed Aug 29 13:47:28 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to pyFTS's documentation! +================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/_build/html/_sources/modules.rst.txt b/docs/_build/html/_sources/modules.rst.txt new file mode 100644 index 0000000..9ae9899 --- /dev/null +++ b/docs/_build/html/_sources/modules.rst.txt @@ -0,0 +1,7 @@ +pyFTS +===== + +.. toctree:: + :maxdepth: 4 + + pyFTS diff --git a/docs/_build/html/_sources/pyFTS.benchmarks.rst.txt b/docs/_build/html/_sources/pyFTS.benchmarks.rst.txt new file mode 100644 index 0000000..ff8abee --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.benchmarks.rst.txt @@ -0,0 +1,94 @@ +pyFTS.benchmarks package +======================== + +Submodules +---------- + +pyFTS.benchmarks.Measures module +-------------------------------- + +.. automodule:: pyFTS.benchmarks.Measures + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.ResidualAnalysis module +---------------------------------------- + +.. automodule:: pyFTS.benchmarks.ResidualAnalysis + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.Util module +---------------------------- + +.. automodule:: pyFTS.benchmarks.Util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.arima module +----------------------------- + +.. automodule:: pyFTS.benchmarks.arima + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.benchmarks module +---------------------------------- + +.. automodule:: pyFTS.benchmarks.benchmarks + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.distributed\_benchmarks module +----------------------------------------------- + +.. automodule:: pyFTS.benchmarks.distributed_benchmarks + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.knn module +--------------------------- + +.. automodule:: pyFTS.benchmarks.knn + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.naive module +----------------------------- + +.. automodule:: pyFTS.benchmarks.naive + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.parallel\_benchmarks module +-------------------------------------------- + +.. automodule:: pyFTS.benchmarks.parallel_benchmarks + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.quantreg module +-------------------------------- + +.. automodule:: pyFTS.benchmarks.quantreg + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.benchmarks + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.common.rst.txt b/docs/_build/html/_sources/pyFTS.common.rst.txt new file mode 100644 index 0000000..69c5903 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.common.rst.txt @@ -0,0 +1,94 @@ +pyFTS.common package +==================== + +Submodules +---------- + +pyFTS.common.Composite module +----------------------------- + +.. automodule:: pyFTS.common.Composite + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.FLR module +----------------------- + +.. automodule:: pyFTS.common.FLR + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.FuzzySet module +---------------------------- + +.. automodule:: pyFTS.common.FuzzySet + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.Membership module +------------------------------ + +.. automodule:: pyFTS.common.Membership + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.SortedCollection module +------------------------------------ + +.. automodule:: pyFTS.common.SortedCollection + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.Transformations module +----------------------------------- + +.. automodule:: pyFTS.common.Transformations + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.Util module +------------------------ + +.. automodule:: pyFTS.common.Util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.flrg module +------------------------ + +.. automodule:: pyFTS.common.flrg + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.fts module +----------------------- + +.. automodule:: pyFTS.common.fts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.tree module +------------------------ + +.. automodule:: pyFTS.common.tree + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.common + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.data.rst.txt b/docs/_build/html/_sources/pyFTS.data.rst.txt new file mode 100644 index 0000000..f451c32 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.data.rst.txt @@ -0,0 +1,134 @@ +pyFTS.data package +================== + +Submodules +---------- + +pyFTS.data.AirPassengers module +------------------------------- + +.. automodule:: pyFTS.data.AirPassengers + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.Enrollments module +----------------------------- + +.. automodule:: pyFTS.data.Enrollments + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.INMET module +----------------------- + +.. automodule:: pyFTS.data.INMET + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.NASDAQ module +------------------------ + +.. automodule:: pyFTS.data.NASDAQ + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.SONDA module +----------------------- + +.. automodule:: pyFTS.data.SONDA + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.SP500 module +----------------------- + +.. automodule:: pyFTS.data.SP500 + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.TAIEX module +----------------------- + +.. automodule:: pyFTS.data.TAIEX + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.artificial module +---------------------------- + +.. automodule:: pyFTS.data.artificial + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.common module +------------------------ + +.. automodule:: pyFTS.data.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.henon module +----------------------- + +.. automodule:: pyFTS.data.henon + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.logistic\_map module +------------------------------- + +.. automodule:: pyFTS.data.logistic_map + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.lorentz module +------------------------- + +.. automodule:: pyFTS.data.lorentz + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.mackey\_glass module +------------------------------- + +.. automodule:: pyFTS.data.mackey_glass + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.rossler module +------------------------- + +.. automodule:: pyFTS.data.rossler + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.sunspots module +-------------------------- + +.. automodule:: pyFTS.data.sunspots + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.data + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.models.ensemble.rst.txt b/docs/_build/html/_sources/pyFTS.models.ensemble.rst.txt new file mode 100644 index 0000000..93822f8 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.models.ensemble.rst.txt @@ -0,0 +1,30 @@ +pyFTS.models.ensemble package +============================= + +Submodules +---------- + +pyFTS.models.ensemble.ensemble module +------------------------------------- + +.. automodule:: pyFTS.models.ensemble.ensemble + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.ensemble.multiseasonal module +------------------------------------------ + +.. automodule:: pyFTS.models.ensemble.multiseasonal + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.ensemble + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.models.multivariate.rst.txt b/docs/_build/html/_sources/pyFTS.models.multivariate.rst.txt new file mode 100644 index 0000000..10ac3ef --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.models.multivariate.rst.txt @@ -0,0 +1,54 @@ +pyFTS.models.multivariate package +================================= + +Submodules +---------- + +pyFTS.models.multivariate.FLR module +------------------------------------ + +.. automodule:: pyFTS.models.multivariate.FLR + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.common module +--------------------------------------- + +.. automodule:: pyFTS.models.multivariate.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.flrg module +------------------------------------- + +.. automodule:: pyFTS.models.multivariate.flrg + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.mvfts module +-------------------------------------- + +.. automodule:: pyFTS.models.multivariate.mvfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.variable module +----------------------------------------- + +.. automodule:: pyFTS.models.multivariate.variable + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.multivariate + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.models.nonstationary.rst.txt b/docs/_build/html/_sources/pyFTS.models.nonstationary.rst.txt new file mode 100644 index 0000000..351df32 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.models.nonstationary.rst.txt @@ -0,0 +1,78 @@ +pyFTS.models.nonstationary package +================================== + +Submodules +---------- + +pyFTS.models.nonstationary.common module +---------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.cvfts module +--------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.cvfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.flrg module +-------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.flrg + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.honsfts module +----------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.honsfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.nsfts module +--------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.nsfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.partitioners module +---------------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.partitioners + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.perturbation module +---------------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.perturbation + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.util module +-------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.util + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.nonstationary + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.models.rst.txt b/docs/_build/html/_sources/pyFTS.models.rst.txt new file mode 100644 index 0000000..91a0bd7 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.models.rst.txt @@ -0,0 +1,104 @@ +pyFTS.models package +==================== + +Subpackages +----------- + +.. toctree:: + + pyFTS.models.ensemble + pyFTS.models.multivariate + pyFTS.models.nonstationary + pyFTS.models.seasonal + +Submodules +---------- + +pyFTS.models.chen module +------------------------ + +.. automodule:: pyFTS.models.chen + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.cheng module +------------------------- + +.. automodule:: pyFTS.models.cheng + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.hofts module +------------------------- + +.. automodule:: pyFTS.models.hofts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.hwang module +------------------------- + +.. automodule:: pyFTS.models.hwang + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.ifts module +------------------------ + +.. automodule:: pyFTS.models.ifts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.ismailefendi module +-------------------------------- + +.. automodule:: pyFTS.models.ismailefendi + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.pwfts module +------------------------- + +.. automodule:: pyFTS.models.pwfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.sadaei module +-------------------------- + +.. automodule:: pyFTS.models.sadaei + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.song module +------------------------ + +.. automodule:: pyFTS.models.song + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.yu module +---------------------- + +.. automodule:: pyFTS.models.yu + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.models.seasonal.rst.txt b/docs/_build/html/_sources/pyFTS.models.seasonal.rst.txt new file mode 100644 index 0000000..9c3cfba --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.models.seasonal.rst.txt @@ -0,0 +1,62 @@ +pyFTS.models.seasonal package +============================= + +Submodules +---------- + +pyFTS.models.seasonal.SeasonalIndexer module +-------------------------------------------- + +.. automodule:: pyFTS.models.seasonal.SeasonalIndexer + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.cmsfts module +----------------------------------- + +.. automodule:: pyFTS.models.seasonal.cmsfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.common module +----------------------------------- + +.. automodule:: pyFTS.models.seasonal.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.msfts module +---------------------------------- + +.. automodule:: pyFTS.models.seasonal.msfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.partitioner module +---------------------------------------- + +.. automodule:: pyFTS.models.seasonal.partitioner + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.sfts module +--------------------------------- + +.. automodule:: pyFTS.models.seasonal.sfts + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.seasonal + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.partitioners.rst.txt b/docs/_build/html/_sources/pyFTS.partitioners.rst.txt new file mode 100644 index 0000000..b6ba008 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.partitioners.rst.txt @@ -0,0 +1,78 @@ +pyFTS.partitioners package +========================== + +Submodules +---------- + +pyFTS.partitioners.CMeans module +-------------------------------- + +.. automodule:: pyFTS.partitioners.CMeans + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Entropy module +--------------------------------- + +.. automodule:: pyFTS.partitioners.Entropy + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.FCM module +----------------------------- + +.. automodule:: pyFTS.partitioners.FCM + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Grid module +------------------------------ + +.. automodule:: pyFTS.partitioners.Grid + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Huarng module +-------------------------------- + +.. automodule:: pyFTS.partitioners.Huarng + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Util module +------------------------------ + +.. automodule:: pyFTS.partitioners.Util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.parallel\_util module +---------------------------------------- + +.. automodule:: pyFTS.partitioners.parallel_util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.partitioner module +------------------------------------- + +.. automodule:: pyFTS.partitioners.partitioner + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.partitioners + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.probabilistic.rst.txt b/docs/_build/html/_sources/pyFTS.probabilistic.rst.txt new file mode 100644 index 0000000..097a5a2 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.probabilistic.rst.txt @@ -0,0 +1,30 @@ +pyFTS.probabilistic package +=========================== + +Submodules +---------- + +pyFTS.probabilistic.ProbabilityDistribution module +-------------------------------------------------- + +.. automodule:: pyFTS.probabilistic.ProbabilityDistribution + :members: + :undoc-members: + :show-inheritance: + +pyFTS.probabilistic.kde module +------------------------------ + +.. automodule:: pyFTS.probabilistic.kde + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.probabilistic + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_sources/pyFTS.rst.txt b/docs/_build/html/_sources/pyFTS.rst.txt new file mode 100644 index 0000000..707f7f7 --- /dev/null +++ b/docs/_build/html/_sources/pyFTS.rst.txt @@ -0,0 +1,34 @@ +pyFTS package +============= + +Subpackages +----------- + +.. toctree:: + + pyFTS.benchmarks + pyFTS.common + pyFTS.data + pyFTS.models + pyFTS.partitioners + pyFTS.probabilistic + +Submodules +---------- + +pyFTS.conf module +----------------- + +.. automodule:: pyFTS.conf + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/_build/html/_static/ajax-loader.gif b/docs/_build/html/_static/ajax-loader.gif new file mode 100644 index 0000000..61faf8c Binary files /dev/null and b/docs/_build/html/_static/ajax-loader.gif differ diff --git a/docs/_build/html/_static/alabaster.css b/docs/_build/html/_static/alabaster.css new file mode 100644 index 0000000..be65b13 --- /dev/null +++ b/docs/_build/html/_static/alabaster.css @@ -0,0 +1,693 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: 'Garamond', 'Georgia', serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} \ No newline at end of file diff --git a/docs/_build/html/_static/basic.css b/docs/_build/html/_static/basic.css new file mode 100644 index 0000000..19ced10 --- /dev/null +++ b/docs/_build/html/_static/basic.css @@ -0,0 +1,665 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 450px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: relative; + left: 0px; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/_build/html/_static/comment-bright.png b/docs/_build/html/_static/comment-bright.png new file mode 100644 index 0000000..15e27ed Binary files /dev/null and b/docs/_build/html/_static/comment-bright.png differ diff --git a/docs/_build/html/_static/comment-close.png b/docs/_build/html/_static/comment-close.png new file mode 100644 index 0000000..4d91bcf Binary files /dev/null and b/docs/_build/html/_static/comment-close.png differ diff --git a/docs/_build/html/_static/comment.png b/docs/_build/html/_static/comment.png new file mode 100644 index 0000000..dfbc0cb Binary files /dev/null and b/docs/_build/html/_static/comment.png differ diff --git a/docs/_build/html/_static/custom.css b/docs/_build/html/_static/custom.css new file mode 100644 index 0000000..2a924f1 --- /dev/null +++ b/docs/_build/html/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/docs/_build/html/_static/doctools.js b/docs/_build/html/_static/doctools.js new file mode 100644 index 0000000..0c15c00 --- /dev/null +++ b/docs/_build/html/_static/doctools.js @@ -0,0 +1,311 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var bbox = span.getBBox(); + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + var parentOfText = node.parentNode.parentNode; + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated === 'undefined') + return string; + return (typeof translated === 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated === 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash && $.browser.mozilla) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) === 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this === '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); \ No newline at end of file diff --git a/docs/_build/html/_static/documentation_options.js b/docs/_build/html/_static/documentation_options.js new file mode 100644 index 0000000..0c676dc --- /dev/null +++ b/docs/_build/html/_static/documentation_options.js @@ -0,0 +1,9 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: '', + VERSION: '1.2.3', + LANGUAGE: 'None', + COLLAPSE_INDEX: false, + FILE_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt' +}; \ No newline at end of file diff --git a/docs/_build/html/_static/down-pressed.png b/docs/_build/html/_static/down-pressed.png new file mode 100644 index 0000000..5756c8c Binary files /dev/null and b/docs/_build/html/_static/down-pressed.png differ diff --git a/docs/_build/html/_static/down.png b/docs/_build/html/_static/down.png new file mode 100644 index 0000000..1b3bdad Binary files /dev/null and b/docs/_build/html/_static/down.png differ diff --git a/docs/_build/html/_static/file.png b/docs/_build/html/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/docs/_build/html/_static/file.png differ diff --git a/docs/_build/html/_static/jquery-3.2.1.js b/docs/_build/html/_static/jquery-3.2.1.js new file mode 100644 index 0000000..d2d8ca4 --- /dev/null +++ b/docs/_build/html/_static/jquery-3.2.1.js @@ -0,0 +1,10253 @@ +/*! + * jQuery JavaScript Library v3.2.1 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2017-03-20T18:59Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var document = window.document; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var concat = arr.concat; + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + + + + function DOMEval( code, doc ) { + doc = doc || document; + + var script = doc.createElement( "script" ); + + script.text = code; + doc.head.appendChild( script ).parentNode.removeChild( script ); + } +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.2.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android <=4.0 only + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + + if ( copyIsArray ) { + copyIsArray = false; + clone = src && Array.isArray( src ) ? src : []; + + } else { + clone = src && jQuery.isPlainObject( src ) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isFunction: function( obj ) { + return jQuery.type( obj ) === "function"; + }, + + isWindow: function( obj ) { + return obj != null && obj === obj.window; + }, + + isNumeric: function( obj ) { + + // As of jQuery 3.0, isNumeric is limited to + // strings and numbers (primitives or objects) + // that can be coerced to finite numbers (gh-2662) + var type = jQuery.type( obj ); + return ( type === "number" || type === "string" ) && + + // parseFloat NaNs numeric-cast false positives ("") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + !isNaN( obj - parseFloat( obj ) ); + }, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + + /* eslint-disable no-unused-vars */ + // See https://github.com/eslint/eslint/issues/6125 + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + globalEval: function( code ) { + DOMEval( code ); + }, + + // Convert dashed to camelCase; used by the css and data modules + // Support: IE <=9 - 11, Edge 12 - 13 + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // Support: Android <=4.0 only + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: Date.now, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), +function( i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +} ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.3 + * https://sizzlejs.com/ + * + * Copyright jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2016-08-08 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = "(?:\\\\.|[\\w-]|[^\0-\\xa0])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + disabledAncestor = addCombinator( + function( elem ) { + return elem.disabled === true && ("form" in elem || "label" in elem); + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && (match = rquickExpr.exec( selector )) ) { + + // ID selector + if ( (m = match[1]) ) { + + // Document context + if ( nodeType === 9 ) { + if ( (elem = context.getElementById( m )) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && (elem = newContext.getElementById( m )) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( (m = match[3]) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !compilerCache[ selector + " " ] && + (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + + if ( nodeType !== 1 ) { + newContext = context; + newSelector = selector; + + // qSA looks outside Element context, which is not what we want + // Thanks to Andrew Dupont for this workaround technique + // Support: IE <=8 + // Exclude object elements + } else if ( context.nodeName.toLowerCase() !== "object" ) { + + // Capture the context ID, setting it first if necessary + if ( (nid = context.getAttribute( "id" )) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", (nid = expando) ); + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[i] = "#" + nid + " " + toSelector( groups[i] ); + } + newSelector = groups.join( "," ); + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement("fieldset"); + + try { + return !!fn( el ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + disabledAncestor( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9-11, Edge + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + if ( preferredDoc !== document && + (subWindow = document.defaultView) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert(function( el ) { + el.className = "i"; + return !el.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( el ) { + el.appendChild( document.createComment("") ); + return !el.getElementsByTagName("*").length; + }); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + }); + + // ID filter and find + if ( support.getById ) { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( (elem = elems[i++]) ) { + node = elem.getAttributeNode("id"); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( document.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( el ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll("[msallowcapture^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push("~="); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push(".#.+[+~]"); + } + }); + + assert(function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement("input"); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll(":enabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll(":disabled").length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( el ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === document || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === document || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === document ? -1 : + b === document ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + !compilerCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch (e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return (sel + "").replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== "undefined" && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + // Use previously-cached element index if available + if ( useCache ) { + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || (node[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + (outerCache[ node.uniqueID ] = {}); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + // Don't keep the element (issue #299) + input[0] = null; + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || (outerCache[ elem.uniqueID ] = {}); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( (oldCache = uniqueCache[ key ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context === document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + if ( !context && elem.ownerDocument !== document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context || document, xml) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( el ) { + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement("fieldset") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( el ) { + return el.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +}; +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Simple selector that can be filtered directly, removing non-Elements + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + // Complex selector, compare the two sets, removing non-Elements + qualifier = jQuery.filter( qualifier, elements ); + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not && elem.nodeType === 1; + } ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( nodeName( elem, "iframe" ) ) { + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( jQuery.isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && jQuery.type( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && jQuery.isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && jQuery.isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = jQuery.isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( jQuery.isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + jQuery.isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the master Deferred + master = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + master.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( master.state() === "pending" || + jQuery.isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return master.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), master.reject ); + } + + return master.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ jQuery.camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ jQuery.camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ jQuery.camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( jQuery.camelCase ); + } else { + key = jQuery.camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + jQuery.contains( elem.ownerDocument, elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + +var swap = function( elem, options, callback, args ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.apply( elem, args || [] ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, + scale = 1, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + do { + + // If previous iteration zeroed out, double until we get *something*. + // Use string for doubling so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + initialInUnit = initialInUnit / scale; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // Break the loop if scale is unchanged or perfect, or if we've just had enough. + } while ( + scale !== ( scale = currentValue() / initial ) && scale !== 1 && --maxIterations + ); + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]+)/i ); + +var rscriptType = ( /^$|\/(?:java|ecma)script/i ); + + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // Support: IE <=9 only + option: [ 1, "" ], + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +// Support: IE <=9 only +wrapMap.optgroup = wrapMap.option; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, contains, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; +} )(); +var documentElement = document.documentElement; + + + +var + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 only +// See #13393 for more info +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = {}; + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + // Make a writable jQuery.Event from the native event object + var event = jQuery.event.fix( nativeEvent ); + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + handlers = ( dataPriv.get( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or 2) have namespace(s) + // a subset or equal to those in the bound event (both can have no namespace). + if ( !event.rnamespace || event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: jQuery.isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + this.focus(); + return false; + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( this.type === "checkbox" && this.click && nodeName( this, "input" ) ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + + which: function( event ) { + var button = event.button; + + // Add which for key events + if ( event.which == null && rkeyEvent.test( event.type ) ) { + return event.charCode != null ? event.charCode : event.keyCode; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) { + if ( button & 1 ) { + return 1; + } + + if ( button & 2 ) { + return 3; + } + + if ( button & 4 ) { + return 2; + } + + return 0; + } + + return event.which; + } +}, jQuery.event.addProp ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + /* eslint-disable max-len */ + + // See https://github.com/eslint/eslint/issues/3229 + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi, + + /* eslint-enable */ + + // Support: IE <=10 - 11, Edge 12 - 13 + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( ">tbody", elem )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + + if ( match ) { + elem.type = match[ 1 ]; + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, pdataCur, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.access( src ); + pdataCur = dataPriv.set( dest, pdataOld ); + events = pdataOld.events; + + if ( events ) { + delete pdataCur.handle; + pdataCur.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( isFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && jQuery.contains( node.ownerDocument, node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html.replace( rxhtmlTag, "<$1>" ); + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = jQuery.contains( elem.ownerDocument, elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rmargin = ( /^margin/ ); + +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + div.style.cssText = + "box-sizing:border-box;" + + "position:relative;display:block;" + + "margin:auto;border:1px;padding:1px;" + + "top:1%;width:50%"; + div.innerHTML = ""; + documentElement.appendChild( container ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = divStyle.marginLeft === "2px"; + boxSizingReliableVal = divStyle.width === "4px"; + + // Support: Android 4.0 - 4.3 only + // Some styles come back with percentage values, even though they shouldn't + div.style.marginRight = "50%"; + pixelMarginRightVal = divStyle.marginRight === "4px"; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + var pixelPositionVal, boxSizingReliableVal, pixelMarginRightVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + container.style.cssText = "border:0;width:8px;height:0;top:0;left:-9999px;" + + "padding:0;margin-top:1px;position:absolute"; + container.appendChild( div ); + + jQuery.extend( support, { + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelMarginRight: function() { + computeStyleTests(); + return pixelMarginRightVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelMarginRight() && rnumnonpx.test( ret ) && rmargin.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }, + + cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style; + +// Return a css property mapped to a potentially vendor prefixed property +function vendorPropName( name ) { + + // Shortcut for names that are not vendor prefixed + if ( name in emptyStyle ) { + return name; + } + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a property mapped along what jQuery.cssProps suggests or to +// a vendor prefixed property. +function finalPropName( name ) { + var ret = jQuery.cssProps[ name ]; + if ( !ret ) { + ret = jQuery.cssProps[ name ] = vendorPropName( name ) || name; + } + return ret; +} + +function setPositiveNumber( elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox, styles ) { + var i, + val = 0; + + // If we already have the right measurement, avoid augmentation + if ( extra === ( isBorderBox ? "border" : "content" ) ) { + i = 4; + + // Otherwise initialize for horizontal or vertical properties + } else { + i = name === "width" ? 1 : 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + val += jQuery.css( elem, extra + cssExpand[ i ], true, styles ); + } + + if ( isBorderBox ) { + + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // At this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } else { + + // At this point, extra isn't content, so add padding + val += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // At this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with computed style + var valueIsBorderBox, + styles = getStyles( elem ), + val = curCSS( elem, name, styles ), + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test( val ) ) { + return val; + } + + // Check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && + ( support.boxSizingReliable() || val === elem.style[ name ] ); + + // Fall back to offsetWidth/Height when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + if ( val === "auto" ) { + val = elem[ "offset" + name[ 0 ].toUpperCase() + name.slice( 1 ) ]; + } + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + + // Use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + "float": "cssFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + if ( type === "number" ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = jQuery.camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + } ) : + getWidthOrHeight( elem, name, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = extra && getStyles( elem ), + subtract = extra && augmentWidthOrHeight( + elem, + name, + extra, + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + styles + ); + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ name ] = value; + value = jQuery.css( elem, name ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && + ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || + jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = jQuery.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 13 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( jQuery.isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + jQuery.proxy( result.stop, result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://html.spec.whatwg.org/multipage/infrastructure.html#strip-and-collapse-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( jQuery.isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + if ( typeof value === "string" && value ) { + classes = value.match( rnothtmlwhite ) || []; + + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value; + + if ( typeof stateVal === "boolean" && type === "string" ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( jQuery.isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( type === "string" ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = value.match( rnothtmlwhite ) || []; + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, isFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || {} )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && jQuery.isFunction( elem[ type ] ) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup contextmenu" ).split( " " ), + function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; +} ); + +jQuery.fn.extend( { + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +} ); + + + + +support.focusin = "onfocusin" in window; + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = jQuery.now(); + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) { + xml = undefined; + } + + if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = jQuery.isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ) + .filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ) + .map( function( i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( jQuery.isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match == null ? null : match; + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 13 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available, append data to url + if ( s.data ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce++ ) + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + + +jQuery._evalUrl = function( url ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + "throws": true + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( jQuery.isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( isFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " + + + + + + + + + + + + + + + +
+
+
+
+ + +

Index

+ +
+ A + | B + | C + | D + | E + | F + | G + | H + | I + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + | Y + | Z + +
+

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

K

+ + + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

Q

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ +

Y

+ + +
+ +

Z

+ + + +
+ + + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/index.html b/docs/_build/html/index.html new file mode 100644 index 0000000..59e617c --- /dev/null +++ b/docs/_build/html/index.html @@ -0,0 +1,104 @@ + + + + + + + + Welcome to pyFTS’s documentation! — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

Welcome to pyFTS’s documentation!

+
+
+
+
+

Indices and tables

+ +
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/modules.html b/docs/_build/html/modules.html new file mode 100644 index 0000000..b3b622d --- /dev/null +++ b/docs/_build/html/modules.html @@ -0,0 +1,187 @@ + + + + + + + + pyFTS — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS

+
+ +
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/objects.inv b/docs/_build/html/objects.inv new file mode 100644 index 0000000..97d751f Binary files /dev/null and b/docs/_build/html/objects.inv differ diff --git a/docs/_build/html/py-modindex.html b/docs/_build/html/py-modindex.html new file mode 100644 index 0000000..1326b2b --- /dev/null +++ b/docs/_build/html/py-modindex.html @@ -0,0 +1,526 @@ + + + + + + + + Python Module Index — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + +

Python Module Index

+ +
+ p +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ p
+ pyFTS +
    + pyFTS.benchmarks +
    + pyFTS.benchmarks.arima +
    + pyFTS.benchmarks.benchmarks +
    + pyFTS.benchmarks.knn +
    + pyFTS.benchmarks.Measures +
    + pyFTS.benchmarks.naive +
    + pyFTS.benchmarks.parallel_benchmarks +
    + pyFTS.benchmarks.quantreg +
    + pyFTS.benchmarks.ResidualAnalysis +
    + pyFTS.benchmarks.Util +
    + pyFTS.common +
    + pyFTS.common.Composite +
    + pyFTS.common.FLR +
    + pyFTS.common.flrg +
    + pyFTS.common.fts +
    + pyFTS.common.FuzzySet +
    + pyFTS.common.Membership +
    + pyFTS.common.SortedCollection +
    + pyFTS.common.Transformations +
    + pyFTS.common.tree +
    + pyFTS.common.Util +
    + pyFTS.conf +
    + pyFTS.data +
    + pyFTS.data.AirPassengers +
    + pyFTS.data.artificial +
    + pyFTS.data.common +
    + pyFTS.data.Enrollments +
    + pyFTS.data.henon +
    + pyFTS.data.INMET +
    + pyFTS.data.logistic_map +
    + pyFTS.data.lorentz +
    + pyFTS.data.mackey_glass +
    + pyFTS.data.NASDAQ +
    + pyFTS.data.rossler +
    + pyFTS.data.SONDA +
    + pyFTS.data.SP500 +
    + pyFTS.data.sunspots +
    + pyFTS.data.TAIEX +
    + pyFTS.models +
    + pyFTS.models.chen +
    + pyFTS.models.cheng +
    + pyFTS.models.ensemble +
    + pyFTS.models.ensemble.ensemble +
    + pyFTS.models.ensemble.multiseasonal +
    + pyFTS.models.hofts +
    + pyFTS.models.hwang +
    + pyFTS.models.ifts +
    + pyFTS.models.ismailefendi +
    + pyFTS.models.multivariate +
    + pyFTS.models.multivariate.common +
    + pyFTS.models.multivariate.FLR +
    + pyFTS.models.multivariate.flrg +
    + pyFTS.models.multivariate.mvfts +
    + pyFTS.models.multivariate.variable +
    + pyFTS.models.nonstationary +
    + pyFTS.models.nonstationary.common +
    + pyFTS.models.nonstationary.cvfts +
    + pyFTS.models.nonstationary.flrg +
    + pyFTS.models.nonstationary.honsfts +
    + pyFTS.models.nonstationary.nsfts +
    + pyFTS.models.nonstationary.partitioners +
    + pyFTS.models.nonstationary.perturbation +
    + pyFTS.models.nonstationary.util +
    + pyFTS.models.pwfts +
    + pyFTS.models.sadaei +
    + pyFTS.models.seasonal +
    + pyFTS.models.seasonal.cmsfts +
    + pyFTS.models.seasonal.common +
    + pyFTS.models.seasonal.msfts +
    + pyFTS.models.seasonal.partitioner +
    + pyFTS.models.seasonal.SeasonalIndexer +
    + pyFTS.models.seasonal.sfts +
    + pyFTS.models.song +
    + pyFTS.models.yu +
    + pyFTS.partitioners +
    + pyFTS.partitioners.CMeans +
    + pyFTS.partitioners.Entropy +
    + pyFTS.partitioners.FCM +
    + pyFTS.partitioners.Grid +
    + pyFTS.partitioners.Huarng +
    + pyFTS.partitioners.parallel_util +
    + pyFTS.partitioners.partitioner +
    + pyFTS.partitioners.Util +
    + pyFTS.probabilistic +
    + pyFTS.probabilistic.kde +
    + pyFTS.probabilistic.ProbabilityDistribution +
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.benchmarks.html b/docs/_build/html/pyFTS.benchmarks.html new file mode 100644 index 0000000..aec567f --- /dev/null +++ b/docs/_build/html/pyFTS.benchmarks.html @@ -0,0 +1,1633 @@ + + + + + + + + pyFTS.benchmarks package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.benchmarks package

+
+

Submodules

+
+
+

pyFTS.benchmarks.Measures module

+

pyFTS module for common benchmark metrics

+
+
+pyFTS.benchmarks.Measures.BoxLjungStatistic(data, h)[source]
+

Q Statistic for Ljung–Box test

+ +++ + + + + + +
Parameters:
    +
  • data
  • +
  • h
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.BoxPierceStatistic(data, h)[source]
+

Q Statistic for Box-Pierce test

+ +++ + + + + + +
Parameters:
    +
  • data
  • +
  • h
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.TheilsInequality(targets, forecasts)[source]
+

Theil’s Inequality Coefficient

+ +++ + + + + + +
Parameters:
    +
  • targets
  • +
  • forecasts
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.UStatistic(targets, forecasts)[source]
+

Theil’s U Statistic

+ +++ + + + + + +
Parameters:
    +
  • targets
  • +
  • forecasts
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.acf(data, k)[source]
+

Autocorrelation function estimative

+ +++ + + + + + +
Parameters:
    +
  • data
  • +
  • k
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.brier_score(targets, densities)[source]
+

Brier (1950). “Verification of Forecasts Expressed in Terms of Probability”. Monthly Weather Review. 78: 1–3.

+
+ +
+
+pyFTS.benchmarks.Measures.coverage(targets, forecasts)[source]
+

Percent of target values that fall inside forecasted interval

+
+ +
+
+pyFTS.benchmarks.Measures.crps(targets, densities)[source]
+

Continuous Ranked Probability Score

+ +++ + + + + + +
Parameters:
    +
  • targets – a list with the target values
  • +
  • densities – a list with pyFTS.probabil objectsistic.ProbabilityDistribution
  • +
+
Returns:

float

+
+
+ +
+
+pyFTS.benchmarks.Measures.get_distribution_statistics(data, model, **kwargs)[source]
+

Get CRPS statistic and time for a forecasting model

+ +++ + + + + + +
Parameters:
    +
  • data – test data
  • +
  • model – FTS model with probabilistic forecasting capability
  • +
  • kwargs
  • +
+
Returns:

a list with the CRPS and execution time

+
+
+ +
+
+pyFTS.benchmarks.Measures.get_interval_statistics(data, model, **kwargs)[source]
+

Condensate all measures for point interval forecasters

+ +++ + + + + + +
Parameters:
    +
  • data – test data
  • +
  • model – FTS model with interval forecasting capability
  • +
  • kwargs
  • +
+
Returns:

a list with the sharpness, resolution, coverage, .05 pinball mean,

+
+

.25 pinball mean, .75 pinball mean and .95 pinball mean.

+
+ +
+
+pyFTS.benchmarks.Measures.get_point_statistics(data, model, **kwargs)[source]
+

Condensate all measures for point forecasters

+ +++ + + + + + +
Parameters:
    +
  • data – test data
  • +
  • model – FTS model with point forecasting capability
  • +
  • kwargs
  • +
+
Returns:

a list with the RMSE, SMAPE and U Statistic

+
+
+ +
+
+pyFTS.benchmarks.Measures.heavyside(bin, target)[source]
+
+ +
+
+pyFTS.benchmarks.Measures.heavyside_cdf(bins, targets)[source]
+
+ +
+
+pyFTS.benchmarks.Measures.mape(targets, forecasts)[source]
+

Mean Average Percentual Error

+ +++ + + + + + +
Parameters:
    +
  • targets
  • +
  • forecasts
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.mape_interval(targets, forecasts)[source]
+
+ +
+
+pyFTS.benchmarks.Measures.pinball(tau, target, forecast)[source]
+

Pinball loss function. Measure the distance of forecast to the tau-quantile of the target

+ +++ + + + + + +
Parameters:
    +
  • tau – quantile value in the range (0,1)
  • +
  • target
  • +
  • forecast
  • +
+
Returns:

float, distance of forecast to the tau-quantile of the target

+
+
+ +
+
+pyFTS.benchmarks.Measures.pinball_mean(tau, targets, forecasts)[source]
+

Mean pinball loss value of the forecast for a given tau-quantile of the targets

+ +++ + + + + + +
Parameters:
    +
  • tau – quantile value in the range (0,1)
  • +
  • targets – list of target values
  • +
  • forecasts – list of prediction intervals
  • +
+
Returns:

float, the pinball loss mean for tau quantile

+
+
+ +
+
+pyFTS.benchmarks.Measures.pmf_to_cdf(density)[source]
+
+ +
+
+pyFTS.benchmarks.Measures.resolution(forecasts)[source]
+

Resolution - Standard deviation of the intervals

+
+ +
+
+pyFTS.benchmarks.Measures.rmse(targets, forecasts)[source]
+

Root Mean Squared Error

+ +++ + + + + + +
Parameters:
    +
  • targets
  • +
  • forecasts
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.rmse_interval(targets, forecasts)[source]
+

Root Mean Squared Error

+ +++ + + + + + +
Parameters:
    +
  • targets
  • +
  • forecasts
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.sharpness(forecasts)[source]
+

Sharpness - Mean size of the intervals

+
+ +
+
+pyFTS.benchmarks.Measures.smape(targets, forecasts, type=2)[source]
+

Symmetric Mean Average Percentual Error

+ +++ + + + + + +
Parameters:
    +
  • targets
  • +
  • forecasts
  • +
  • type
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Measures.winkler_mean(tau, targets, forecasts)[source]
+

Mean Winkler score value of the forecast for a given tau-quantile of the targets

+ +++ + + + + + +
Parameters:
    +
  • tau – quantile value in the range (0,1)
  • +
  • targets – list of target values
  • +
  • forecasts – list of prediction intervals
  • +
+
Returns:

float, the Winkler score mean for tau quantile

+
+
+ +
+
+pyFTS.benchmarks.Measures.winkler_score(tau, target, forecast)[source]
+
    +
    1. +
    2. Winkler, A Decision-Theoretic Approach to Interval Estimation, J. Am. Stat. Assoc. 67 (337) (1972) 187–191. doi:10.2307/2284720.
    3. +
    +
  1. +
+
+ +
+
+

pyFTS.benchmarks.ResidualAnalysis module

+

Residual Analysis methods

+
+
+pyFTS.benchmarks.ResidualAnalysis.chi_squared(q, h)[source]
+

Chi-Squared value

+ +++ + + + + + +
Parameters:
    +
  • q
  • +
  • h
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.ResidualAnalysis.compare_residuals(data, models)[source]
+

Compare residual’s statistics of several models

+ +++ + + + + + +
Parameters:
    +
  • data – test data
  • +
  • models
  • +
+
Returns:

a Pandas dataframe with the Box-Ljung statistic for each model

+
+
+ +
+
+pyFTS.benchmarks.ResidualAnalysis.plotResiduals(targets, models, tam=[8, 8], save=False, file=None)[source]
+

Plot residuals and statistics

+ +++ + + + + + +
Parameters:
    +
  • targets
  • +
  • models
  • +
  • tam
  • +
  • save
  • +
  • file
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.ResidualAnalysis.plot_residuals(targets, models, tam=[8, 8], save=False, file=None)[source]
+
+ +
+
+pyFTS.benchmarks.ResidualAnalysis.residuals(targets, forecasts, order=1)[source]
+

First order residuals

+
+ +
+
+pyFTS.benchmarks.ResidualAnalysis.single_plot_residuals(targets, forecasts, order, tam=[8, 8], save=False, file=None)[source]
+
+ +
+
+

pyFTS.benchmarks.Util module

+

Facilities for pyFTS Benchmark module

+
+
+pyFTS.benchmarks.Util.analytic_tabular_dataframe(dataframe)[source]
+
+ +
+
+pyFTS.benchmarks.Util.analytical_data_columns(experiments)[source]
+
+ +
+
+pyFTS.benchmarks.Util.base_dataframe_columns()[source]
+
+ +
+
+pyFTS.benchmarks.Util.cast_dataframe_to_synthetic(infile, outfile, experiments, type)[source]
+
+ +
+
+pyFTS.benchmarks.Util.cast_dataframe_to_synthetic_interval(df, data_columns)[source]
+
+ +
+
+pyFTS.benchmarks.Util.cast_dataframe_to_synthetic_point(df, data_columns)[source]
+
+ +
+
+pyFTS.benchmarks.Util.cast_dataframe_to_synthetic_probabilistic(df, data_columns)[source]
+
+ +
+
+pyFTS.benchmarks.Util.check_ignore_list(b, ignore)[source]
+
+ +
+
+pyFTS.benchmarks.Util.check_replace_list(m, replace)[source]
+
+ +
+
+pyFTS.benchmarks.Util.create_benchmark_tables(conn)[source]
+
+ +
+
+pyFTS.benchmarks.Util.extract_measure(dataframe, measure, data_columns)[source]
+
+ +
+
+pyFTS.benchmarks.Util.find_best(dataframe, criteria, ascending)[source]
+
+ +
+
+pyFTS.benchmarks.Util.get_dataframe_from_bd(file, filter)[source]
+
+ +
+
+pyFTS.benchmarks.Util.insert_benchmark(data, conn)[source]
+
+ +
+
+pyFTS.benchmarks.Util.interval_dataframe_analytic_columns(experiments)[source]
+
+ +
+
+pyFTS.benchmarks.Util.interval_dataframe_synthetic_columns()[source]
+
+ +
+
+pyFTS.benchmarks.Util.open_benchmark_db(name)[source]
+
+ +
+
+pyFTS.benchmarks.Util.plot_dataframe_interval(file_synthetic, file_analytic, experiments, tam, save=False, file=None, sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'], sort_ascend=[True, False, True, True], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+pyFTS.benchmarks.Util.plot_dataframe_interval_pinball(file_synthetic, file_analytic, experiments, tam, save=False, file=None, sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'], sort_ascend=[True, False, True, True], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+pyFTS.benchmarks.Util.plot_dataframe_point(file_synthetic, file_analytic, experiments, tam, save=False, file=None, sort_columns=['UAVG', 'RMSEAVG', 'USTD', 'RMSESTD'], sort_ascend=[1, 1, 1, 1], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+pyFTS.benchmarks.Util.plot_dataframe_probabilistic(file_synthetic, file_analytic, experiments, tam, save=False, file=None, sort_columns=['CRPS1AVG', 'CRPS2AVG', 'CRPS1STD', 'CRPS2STD'], sort_ascend=[True, True, True, True], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+pyFTS.benchmarks.Util.point_dataframe_analytic_columns(experiments)[source]
+
+ +
+
+pyFTS.benchmarks.Util.point_dataframe_synthetic_columns()[source]
+
+ +
+
+pyFTS.benchmarks.Util.probabilistic_dataframe_analytic_columns(experiments)[source]
+
+ +
+
+pyFTS.benchmarks.Util.probabilistic_dataframe_synthetic_columns()[source]
+
+ +
+
+pyFTS.benchmarks.Util.process_common_data(dataset, tag, type, job)[source]
+
+ +
+
+pyFTS.benchmarks.Util.save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, synthetic, times, q05, q25, q75, q95, steps, method)[source]
+
+ +
+
+pyFTS.benchmarks.Util.save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u, steps, method)[source]
+

Create a dataframe to store the benchmark results

+ +++ + + + + + +
Parameters:
    +
  • experiments – dictionary with the execution results
  • +
  • file
  • +
  • objs
  • +
  • rmse
  • +
  • save
  • +
  • synthetic
  • +
  • smape
  • +
  • times
  • +
  • u
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.Util.save_dataframe_probabilistic(experiments, file, objs, crps, times, save, synthetic, steps, method)[source]
+

Save benchmark results for m-step ahead probabilistic forecasters +:param experiments: +:param file: +:param objs: +:param crps_interval: +:param crps_distr: +:param times: +:param times2: +:param save: +:param synthetic: +:return:

+
+ +
+
+pyFTS.benchmarks.Util.scale(data, params)[source]
+
+ +
+
+pyFTS.benchmarks.Util.scale_params(data)[source]
+
+ +
+
+pyFTS.benchmarks.Util.stats(measure, data)[source]
+
+ +
+
+pyFTS.benchmarks.Util.tabular_dataframe_columns()[source]
+
+ +
+
+pyFTS.benchmarks.Util.unified_scaled_interval(experiments, tam, save=False, file=None, sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'], sort_ascend=[True, False, True, True], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+pyFTS.benchmarks.Util.unified_scaled_interval_pinball(experiments, tam, save=False, file=None, sort_columns=['COVAVG', 'SHARPAVG', 'COVSTD', 'SHARPSTD'], sort_ascend=[True, False, True, True], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+pyFTS.benchmarks.Util.unified_scaled_point(experiments, tam, save=False, file=None, sort_columns=['UAVG', 'RMSEAVG', 'USTD', 'RMSESTD'], sort_ascend=[1, 1, 1, 1], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+pyFTS.benchmarks.Util.unified_scaled_probabilistic(experiments, tam, save=False, file=None, sort_columns=['CRPSAVG', 'CRPSSTD'], sort_ascend=[True, True], save_best=False, ignore=None, replace=None)[source]
+
+ +
+
+

pyFTS.benchmarks.arima module

+
+
+class pyFTS.benchmarks.arima.ARIMA(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Façade for statsmodels.tsa.arima_model

+
+
+ar(data)[source]
+
+ +
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead_distribution(data, steps, **kwargs)[source]
+

Probabilistic forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_ahead_interval(ndata, steps, **kwargs)[source]
+

Interval forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+forecast_distribution(data, **kwargs)[source]
+

Probabilistic forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_interval(data, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+ma(data)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.benchmarks.benchmarks module

+

Benchmarks methods for FTS methods

+
+
+pyFTS.benchmarks.benchmarks.SelecaoSimples_MenorRMSE(original, parameters, modelo)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.compareModelsPlot(original, models_fo, models_ho)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.compareModelsTable(original, models_fo, models_ho)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.get_benchmark_interval_methods()[source]
+

Return all non FTS methods for point_to_interval forecasting

+
+ +
+
+pyFTS.benchmarks.benchmarks.get_benchmark_point_methods()[source]
+

Return all non FTS methods for point forecasting

+
+ +
+
+pyFTS.benchmarks.benchmarks.get_benchmark_probabilistic_methods()[source]
+

Return all FTS methods for probabilistic forecasting

+
+ +
+
+pyFTS.benchmarks.benchmarks.get_interval_methods()[source]
+

Return all FTS methods for point_to_interval forecasting

+
+ +
+
+pyFTS.benchmarks.benchmarks.get_point_methods()[source]
+

Return all FTS methods for point forecasting

+
+ +
+
+pyFTS.benchmarks.benchmarks.get_probabilistic_methods()[source]
+

Return all FTS methods for probabilistic forecasting

+
+ +
+
+pyFTS.benchmarks.benchmarks.pftsExploreOrderAndPartitions(data, save=False, file=None)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.plotCompared(original, forecasts, labels, title)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.plot_compared_intervals_ahead(original, models, colors, distributions, time_from, time_to, intervals=True, save=False, file=None, tam=[20, 5], resolution=None, cmap='Blues', linewidth=1.5)[source]
+

Plot the forecasts of several one step ahead models, by point or by interval

+ +++ + + + + + +
Parameters:
    +
  • original – Original time series data (list)
  • +
  • models – List of models to compare
  • +
  • colors – List of models colors
  • +
  • distributions – True to plot a distribution
  • +
  • time_from – index of data poit to start the ahead forecasting
  • +
  • time_to – number of steps ahead to forecast
  • +
  • interpol – Fill space between distribution plots
  • +
  • save – Save the picture on file
  • +
  • file – Filename to save the picture
  • +
  • tam – Size of the picture
  • +
  • resolution
  • +
  • cmap – Color map to be used on distribution plot
  • +
  • option – Distribution type to be passed for models
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.benchmarks.plot_compared_series(original, models, colors, typeonlegend=False, save=False, file=None, tam=[20, 5], points=True, intervals=True, linewidth=1.5)[source]
+

Plot the forecasts of several one step ahead models, by point or by interval

+ +++ + + + + + +
Parameters:
    +
  • original – Original time series data (list)
  • +
  • models – List of models to compare
  • +
  • colors – List of models colors
  • +
  • typeonlegend – Add the type of forecast (point / interval) on legend
  • +
  • save – Save the picture on file
  • +
  • file – Filename to save the picture
  • +
  • tam – Size of the picture
  • +
  • points – True to plot the point forecasts, False otherwise
  • +
  • intervals – True to plot the interval forecasts, False otherwise
  • +
  • linewidth
  • +
+
Returns:

+
+
+ +
+
+pyFTS.benchmarks.benchmarks.plot_density_rectange(ax, cmap, density, fig, resolution, time_from, time_to)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.plot_distribution(ax, cmap, probabilitydist, fig, time_from, reference_data=None)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.plot_interval(axis, intervals, order, label, color='red', typeonlegend=False, ls='-', linewidth=1)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.plot_point(axis, points, order, label, color='red', ls='-', linewidth=1)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.plot_probability_distributions(pmfs, lcolors, tam=[15, 7])[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.print_distribution_statistics(original, models, steps, resolution)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.print_interval_statistics(original, models)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.print_point_statistics(data, models, externalmodels=None, externalforecasts=None, indexers=None)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.process_interval_jobs(dataset, tag, job, conn)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.process_point_jobs(dataset, tag, job, conn)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.process_probabilistic_jobs(dataset, tag, job, conn)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kwargs)[source]
+

Interval forecast benchmark function to be executed on cluster nodes

+ +++ + + + + + +
Parameters:
    +
  • mfts – FTS model
  • +
  • partitioner – Universe of Discourse partitioner
  • +
  • train_data – data used to train the model
  • +
  • test_data – ata used to test the model
  • +
  • window_key – id of the sliding window
  • +
  • transformation – data transformation
  • +
  • indexer – seasonal indexer
  • +
+
Returns:

a dictionary with the benchmark results

+
+
+ +
+
+pyFTS.benchmarks.benchmarks.run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwargs)[source]
+

Point forecast benchmark function to be executed on cluster nodes

+ +++ + + + + + +
Parameters:
    +
  • mfts – FTS model
  • +
  • partitioner – Universe of Discourse partitioner
  • +
  • train_data – data used to train the model
  • +
  • test_data – ata used to test the model
  • +
  • window_key – id of the sliding window
  • +
  • transformation – data transformation
  • +
  • indexer – seasonal indexer
  • +
+
Returns:

a dictionary with the benchmark results

+
+
+ +
+
+pyFTS.benchmarks.benchmarks.run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None, **kwargs)[source]
+

Probabilistic forecast benchmark function to be executed on cluster nodes

+ +++ + + + + + +
Parameters:
    +
  • mfts – FTS model
  • +
  • partitioner – Universe of Discourse partitioner
  • +
  • train_data – data used to train the model
  • +
  • test_data – ata used to test the model
  • +
  • steps
  • +
  • resolution
  • +
  • window_key – id of the sliding window
  • +
  • transformation – data transformation
  • +
  • indexer – seasonal indexer
  • +
+
Returns:

a dictionary with the benchmark results

+
+
+ +
+
+pyFTS.benchmarks.benchmarks.simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=None, tam=[10, 15], plotforecasts=False, elev=30, azim=144, intervals=False, parameters=None, partitioner=<class 'pyFTS.partitioners.Grid.GridPartitioner'>, transformation=None, indexer=None)[source]
+
+ +
+
+pyFTS.benchmarks.benchmarks.sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs)[source]
+

Sliding window benchmarks for FTS forecasters.

+

For each data window, a train and test datasets will be splitted. For each train split, number of +partitions and partitioning method will be created a partitioner model. And for each partitioner, order, +steps ahead and FTS method a foreasting model will be trained.

+

Then all trained models are benchmarked on the test data and the metrics are stored on a sqlite3 database +(identified by the ‘file’ parameter) for posterior analysis.

+

All these process can be distributed on a dispy cluster, setting the atributed ‘distributed’ to true and +informing the list of dispy nodes on ‘nodes’ parameter.

+

The number of experiments is determined by ‘windowsize’ and ‘inc’ parameters.

+ +++ + + + +
Parameters:
    +
  • data – test data
  • +
  • windowsize – size of sliding window
  • +
  • train – percentual of sliding window data used to train the models
  • +
  • kwargs – dict, optional arguments
  • +
  • benchmark_methods – a list with Non FTS models to benchmark. The default is None.
  • +
  • benchmark_methods_parameters – a list with Non FTS models parameters. The default is None.
  • +
  • benchmark_models – A boolean value indicating if external FTS methods will be used on benchmark. The default is False.
  • +
  • build_methods – A boolean value indicating if the default FTS methods will be used on benchmark. The default is True.
  • +
  • dataset – the dataset name to identify the current set of benchmarks results on database.
  • +
  • distributed – A boolean value indicating if the forecasting procedure will be distributed in a dispy cluster. . The default is False
  • +
  • file – file path to save the results. The default is benchmarks.db.
  • +
  • inc – a float on interval [0,1] indicating the percentage of the windowsize to move the window
  • +
  • methods – a list with FTS class names. The default depends on the forecasting type and contains the list of all FTS methods.
  • +
  • models – a list with prebuilt FTS objects. The default is None.
  • +
  • nodes – a list with the dispy cluster nodes addresses. The default is [127.0.0.1].
  • +
  • orders – a list with orders of the models (for high order models). The default is [1,2,3].
  • +
  • partitions – a list with the numbers of partitions on the Universe of Discourse. The default is [10].
  • +
  • partitioners_models – a list with prebuilt Universe of Discourse partitioners objects. The default is None.
  • +
  • partitioners_methods – a list with Universe of Discourse partitioners class names. The default is [partitioners.Grid.GridPartitioner].
  • +
  • progress – If true a progress bar will be displayed during the benchmarks. The default is False.
  • +
  • start – in the multi step forecasting, the index of the data where to start forecasting. The default is 0.
  • +
  • steps_ahead – a list with the forecasting horizons, i. e., the number of steps ahead to forecast. The default is 1.
  • +
  • tag – a name to identify the current set of benchmarks results on database.
  • +
  • type – the forecasting type, one of these values: point(default), interval or distribution. The default is point.
  • +
  • transformations – a list with data transformations do apply . The default is [None].
  • +
+
+
+ +
+
+

pyFTS.benchmarks.distributed_benchmarks module

+
+
+

pyFTS.benchmarks.knn module

+
+
+class pyFTS.benchmarks.knn.KNearestNeighbors(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

K-Nearest Neighbors

+
+
+forecast_distribution(data, **kwargs)[source]
+

Probabilistic forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+knn(sample)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.benchmarks.naive module

+
+
+class pyFTS.benchmarks.naive.Naive(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Naïve Forecasting method

+
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+ +
+
+

pyFTS.benchmarks.parallel_benchmarks module

+

joblib Parallelized Benchmarks to FTS methods

+
+
+pyFTS.benchmarks.parallel_benchmarks.ahead_sliding_window(data, windowsize, train, steps, resolution, models=None, partitioners=[<class 'pyFTS.partitioners.Grid.GridPartitioner'>], partitions=[10], max_order=3, transformation=None, indexer=None, dump=False, save=False, file=None, sintetic=False)[source]
+

Parallel sliding window benchmarks for FTS probabilistic forecasters +:param data: +:param windowsize: size of sliding window +:param train: percentual of sliding window data used to train the models +:param steps: +:param resolution: +:param models: FTS point forecasters +:param partitioners: Universe of Discourse partitioner +:param partitions: the max number of partitions on the Universe of Discourse +:param max_order: the max order of the models (for high order models) +:param transformation: data transformation +:param indexer: seasonal indexer +:param dump: +:param save: save results +:param file: file path to save the results +:param sintetic: if true only the average and standard deviation of the results +:return: DataFrame with the results

+
+ +
+
+pyFTS.benchmarks.parallel_benchmarks.interval_sliding_window(data, windowsize, train=0.8, models=None, partitioners=[<class 'pyFTS.partitioners.Grid.GridPartitioner'>], partitions=[10], max_order=3, transformation=None, indexer=None, dump=False, save=False, file=None, sintetic=False)[source]
+

Parallel sliding window benchmarks for FTS point_to_interval forecasters +:param data: +:param windowsize: size of sliding window +:param train: percentual of sliding window data used to train the models +:param models: FTS point forecasters +:param partitioners: Universe of Discourse partitioner +:param partitions: the max number of partitions on the Universe of Discourse +:param max_order: the max order of the models (for high order models) +:param transformation: data transformation +:param indexer: seasonal indexer +:param dump: +:param save: save results +:param file: file path to save the results +:param sintetic: if true only the average and standard deviation of the results +:return: DataFrame with the results

+
+ +
+
+pyFTS.benchmarks.parallel_benchmarks.point_sliding_window(data, windowsize, train=0.8, models=None, partitioners=[<class 'pyFTS.partitioners.Grid.GridPartitioner'>], partitions=[10], max_order=3, transformation=None, indexer=None, dump=False, save=False, file=None, sintetic=False)[source]
+

Parallel sliding window benchmarks for FTS point forecasters +:param data: +:param windowsize: size of sliding window +:param train: percentual of sliding window data used to train the models +:param models: FTS point forecasters +:param partitioners: Universe of Discourse partitioner +:param partitions: the max number of partitions on the Universe of Discourse +:param max_order: the max order of the models (for high order models) +:param transformation: data transformation +:param indexer: seasonal indexer +:param dump: +:param save: save results +:param file: file path to save the results +:param sintetic: if true only the average and standard deviation of the results +:return: DataFrame with the results

+
+ +
+
+pyFTS.benchmarks.parallel_benchmarks.run_ahead(mfts, partitioner, train_data, test_data, steps, resolution, transformation=None, indexer=None)[source]
+

Probabilistic m-step ahead forecast benchmark function to be executed on threads +:param mfts: FTS model +:param partitioner: Universe of Discourse partitioner +:param train_data: data used to train the model +:param test_data: ata used to test the model +:param steps: +:param resolution: +:param transformation: data transformation +:param indexer: seasonal indexer +:return: a dictionary with the benchmark results

+
+ +
+
+pyFTS.benchmarks.parallel_benchmarks.run_interval(mfts, partitioner, train_data, test_data, transformation=None, indexer=None)[source]
+

Interval forecast benchmark function to be executed on threads +:param mfts: FTS model +:param partitioner: Universe of Discourse partitioner +:param train_data: data used to train the model +:param test_data: ata used to test the model +:param window_key: id of the sliding window +:param transformation: data transformation +:param indexer: seasonal indexer +:return: a dictionary with the benchmark results

+
+ +
+
+pyFTS.benchmarks.parallel_benchmarks.run_point(mfts, partitioner, train_data, test_data, transformation=None, indexer=None)[source]
+

Point forecast benchmark function to be executed on threads +:param mfts: FTS model +:param partitioner: Universe of Discourse partitioner +:param train_data: data used to train the model +:param test_data: ata used to test the model +:param window_key: id of the sliding window +:param transformation: data transformation +:param indexer: seasonal indexer +:return: a dictionary with the benchmark results

+
+ +
+
+

pyFTS.benchmarks.quantreg module

+
+
+class pyFTS.benchmarks.quantreg.QuantileRegression(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Façade for statsmodels.regression.quantile_regression

+
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead_distribution(ndata, steps, **kwargs)[source]
+

Probabilistic forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_ahead_interval(ndata, steps, **kwargs)[source]
+

Interval forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+forecast_distribution(ndata, **kwargs)[source]
+

Probabilistic forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_interval(ndata, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+interval_to_interval(data, lo_params, up_params)[source]
+
+ +
+
+linearmodel(data, params)[source]
+
+ +
+
+point_to_interval(data, lo_params, up_params)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

Module contents

+

pyFTS module for benchmarking the FTS models

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.common.html b/docs/_build/html/pyFTS.common.html new file mode 100644 index 0000000..91847f8 --- /dev/null +++ b/docs/_build/html/pyFTS.common.html @@ -0,0 +1,1864 @@ + + + + + + + + pyFTS.common package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.common package

+
+

Submodules

+
+
+

pyFTS.common.Composite module

+

Composite Fuzzy Sets

+
+
+class pyFTS.common.Composite.FuzzySet(name, superset=False)[source]
+

Bases: pyFTS.common.FuzzySet.FuzzySet

+

Composite Fuzzy Set

+
+
+append(mf, parameters)[source]
+

Adds a new function to composition

+ +++ + + + + + +
Parameters:
    +
  • mf
  • +
  • parameters
  • +
+
Returns:

+
+
+ +
+
+append_set(set)[source]
+

Adds a new function to composition

+ +++ + + + + + +
Parameters:
    +
  • mf
  • +
  • parameters
  • +
+
Returns:

+
+
+ +
+
+membership(x)[source]
+

Calculate the membership value of a given input

+ +++ + + + + + +
Parameters:x – input value
Returns:membership value of x at this fuzzy set
+
+ +
+ +
+
+

pyFTS.common.FLR module

+

This module implements functions for Fuzzy Logical Relationship generation

+
+
+class pyFTS.common.FLR.FLR(LHS, RHS)[source]
+

Bases: object

+

Fuzzy Logical Relationship

+

Represents a temporal transition of the fuzzy set LHS on time t for the fuzzy set RHS on time t+1.

+
+
+LHS = None
+

Left Hand Side fuzzy set

+
+ +
+
+RHS = None
+

Right Hand Side fuzzy set

+
+ +
+ +
+
+class pyFTS.common.FLR.IndexedFLR(index, LHS, RHS)[source]
+

Bases: pyFTS.common.FLR.FLR

+

Season Indexed Fuzzy Logical Relationship

+
+
+index = None
+

seasonal index

+
+ +
+ +
+
+pyFTS.common.FLR.generate_high_order_recurrent_flr(fuzzyData)[source]
+

Create a ordered FLR set from a list of fuzzy sets with recurrence

+ +++ + + + + + +
Parameters:fuzzyData – ordered list of fuzzy sets
Returns:ordered list of FLR
+
+ +
+
+pyFTS.common.FLR.generate_indexed_flrs(sets, indexer, data, transformation=None, alpha_cut=0.0)[source]
+

Create a season-indexed ordered FLR set from a list of fuzzy sets with recurrence

+ +++ + + + + + +
Parameters:
    +
  • sets – fuzzy sets
  • +
  • indexer – seasonality indexer
  • +
  • data – original data
  • +
+
Returns:

ordered list of FLR

+
+
+ +
+
+pyFTS.common.FLR.generate_non_recurrent_flrs(fuzzyData)[source]
+

Create a ordered FLR set from a list of fuzzy sets without recurrence

+ +++ + + + + + +
Parameters:fuzzyData – ordered list of fuzzy sets
Returns:ordered list of FLR
+
+ +
+
+pyFTS.common.FLR.generate_recurrent_flrs(fuzzyData)[source]
+

Create a ordered FLR set from a list of fuzzy sets with recurrence

+ +++ + + + + + +
Parameters:fuzzyData – ordered list of fuzzy sets
Returns:ordered list of FLR
+
+ +
+
+

pyFTS.common.FuzzySet module

+
+
+class pyFTS.common.FuzzySet.FuzzySet(name, mf, parameters, centroid, alpha=1.0, **kwargs)[source]
+

Bases: object

+

Fuzzy Set

+
+
+Z = None
+

Partition function in respect to the membership function

+
+ +
+
+alpha = None
+

The alpha cut value

+
+ +
+
+centroid = None
+

The fuzzy set center of mass (or midpoint)

+
+ +
+
+membership(x)[source]
+

Calculate the membership value of a given input

+ +++ + + + + + +
Parameters:x – input value
Returns:membership value of x at this fuzzy set
+
+ +
+
+mf = None
+

The membership function

+
+ +
+
+name = None
+

The fuzzy set name

+
+ +
+
+parameters = None
+

The parameters of the membership function

+
+ +
+
+partition_function(uod=None, nbins=100)[source]
+

Calculate the partition function over the membership function.

+ +++ + + + + + +
Parameters:
    +
  • uod
  • +
  • nbins
  • +
+
Returns:

+
+
+ +
+
+type = None
+

The fuzzy set type (common, composite, nonstationary, etc)

+
+ +
+
+variable = None
+

In multivariate time series, indicate for which variable this fuzzy set belogs

+
+ +
+ +
+
+pyFTS.common.FuzzySet.check_bounds(data, sets, ordered_sets)[source]
+
+ +
+
+pyFTS.common.FuzzySet.check_bounds_index(data, sets, ordered_sets)[source]
+
+ +
+
+pyFTS.common.FuzzySet.fuzzyfy_instance(inst, fuzzySets, ordered_sets=None)[source]
+

Calculate the membership values for a data point given fuzzy sets

+ +++ + + + + + +
Parameters:
    +
  • inst – data point
  • +
  • fuzzySets – dict of fuzzy sets
  • +
+
Returns:

array of membership values

+
+
+ +
+
+pyFTS.common.FuzzySet.fuzzyfy_instances(data, fuzzySets, ordered_sets=None)[source]
+

Calculate the membership values for a data point given fuzzy sets

+ +++ + + + + + +
Parameters:
    +
  • inst – data point
  • +
  • fuzzySets – dict of fuzzy sets
  • +
+
Returns:

array of membership values

+
+
+ +
+
+pyFTS.common.FuzzySet.fuzzyfy_series(data, fuzzySets, method='maximum', alpha_cut=0.0)[source]
+
+ +
+
+pyFTS.common.FuzzySet.fuzzyfy_series_old(data, fuzzySets, method='maximum')[source]
+
+ +
+
+pyFTS.common.FuzzySet.get_fuzzysets(inst, fuzzySets, ordered_sets=None, alpha_cut=0.0)[source]
+

Return the fuzzy sets which membership value for a inst is greater than the alpha_cut

+ +++ + + + + + +
Parameters:
    +
  • inst – data point
  • +
  • fuzzySets – dict of fuzzy sets
  • +
  • alpha_cut – Minimal membership to be considered on fuzzyfication process
  • +
+
Returns:

array of membership values

+
+
+ +
+
+pyFTS.common.FuzzySet.get_maximum_membership_fuzzyset(inst, fuzzySets, ordered_sets=None)[source]
+

Fuzzify a data point, returning the fuzzy set with maximum membership value

+ +++ + + + + + +
Parameters:
    +
  • inst – data point
  • +
  • fuzzySets – dict of fuzzy sets
  • +
+
Returns:

fuzzy set with maximum membership

+
+
+ +
+
+pyFTS.common.FuzzySet.get_maximum_membership_fuzzyset_index(inst, fuzzySets)[source]
+

Fuzzify a data point, returning the fuzzy set with maximum membership value

+ +++ + + + + + +
Parameters:
    +
  • inst – data point
  • +
  • fuzzySets – dict of fuzzy sets
  • +
+
Returns:

fuzzy set with maximum membership

+
+
+ +
+
+pyFTS.common.FuzzySet.grant_bounds(data, sets, ordered_sets)[source]
+
+ +
+
+pyFTS.common.FuzzySet.set_ordered(fuzzySets)[source]
+

Order a fuzzy set list by their centroids

+
+ +
+
+

pyFTS.common.Membership module

+

Membership functions for Fuzzy Sets

+
+
+pyFTS.common.Membership.bellmf(x, parameters)[source]
+

Bell shaped membership function

+ +++ + + + + + +
Parameters:
    +
  • x
  • +
  • parameters
  • +
+
Returns:

+
+
+ +
+
+pyFTS.common.Membership.gaussmf(x, parameters)[source]
+

Gaussian fuzzy membership function

+ +++ + + + + + +
Parameters:
    +
  • x – data point
  • +
  • parameters – a list with 2 real values (mean and variance)
  • +
+
Returns:

the membership value of x given the parameters

+
+
+ +
+
+pyFTS.common.Membership.sigmf(x, parameters)[source]
+

Sigmoid / Logistic membership function

+ +++ + + + + + +
Parameters:
    +
  • x
  • +
  • parameters – an list with 2 real values (smoothness and midpoint)
  • +
+
Returns:

+
+
+ +
+
+pyFTS.common.Membership.trapmf(x, parameters)[source]
+

Trapezoidal fuzzy membership function

+ +++ + + + + + +
Parameters:
    +
  • x – data point
  • +
  • parameters – a list with 4 real values
  • +
+
Returns:

the membership value of x given the parameters

+
+
+ +
+
+pyFTS.common.Membership.trimf(x, parameters)[source]
+

Triangular fuzzy membership function

+ +++ + + + + + +
Parameters:
    +
  • x – data point
  • +
  • parameters – a list with 3 real values
  • +
+
Returns:

the membership value of x given the parameters

+
+
+ +
+
+

pyFTS.common.SortedCollection module

+
+
+class pyFTS.common.SortedCollection.SortedCollection(iterable=(), key=None)[source]
+

Bases: object

+

Sequence sorted by a key function.

+

SortedCollection() is much easier to work with than using bisect() directly. +It supports key functions like those use in sorted(), min(), and max(). +The result of the key function call is saved so that keys can be searched +efficiently.

+

Instead of returning an insertion-point which can be hard to interpret, the +five find-methods return a specific item in the sequence. They can scan for +exact matches, the last item less-than-or-equal to a key, or the first item +greater-than-or-equal to a key.

+

Once found, an item’s ordinal position can be located with the index() method. +New items can be added with the insert() and insert_right() methods. +Old items can be deleted with the remove() method.

+

The usual sequence methods are provided to support indexing, slicing, +length lookup, clearing, copying, forward and reverse iteration, contains +checking, item counts, item removal, and a nice looking repr.

+

Finding and indexing are O(log n) operations while iteration and insertion +are O(n). The initial sort is O(n log n).

+

The key function is stored in the ‘key’ attibute for easy introspection or +so that you can assign a new key function (triggering an automatic re-sort).

+

In short, the class was designed to handle all of the common use cases for +bisect but with a simpler API and support for key functions.

+
>>> from pprint import pprint
+>>> from operator import itemgetter
+
+
+
>>> s = SortedCollection(key=itemgetter(2))
+>>> for record in [
+...         ('roger', 'young', 30),
+...         ('angela', 'jones', 28),
+...         ('bill', 'smith', 22),
+...         ('david', 'thomas', 32)]:
+...     s.insert(record)
+
+
+
>>> pprint(list(s))         # show records sorted by age
+[('bill', 'smith', 22),
+ ('angela', 'jones', 28),
+ ('roger', 'young', 30),
+ ('david', 'thomas', 32)]
+
+
+
>>> s.find_le(29)           # find oldest person aged 29 or younger
+('angela', 'jones', 28)
+>>> s.find_lt(28)           # find oldest person under 28
+('bill', 'smith', 22)
+>>> s.find_gt(28)           # find youngest person over 28
+('roger', 'young', 30)
+
+
+
>>> r = s.find_ge(32)       # find youngest person aged 32 or older
+>>> s.index(r)              # get the index of their record
+3
+>>> s[3]                    # fetch the record at that index
+('david', 'thomas', 32)
+
+
+
>>> s.key = itemgetter(0)   # now sort by first name
+>>> pprint(list(s))
+[('angela', 'jones', 28),
+ ('bill', 'smith', 22),
+ ('david', 'thomas', 32),
+ ('roger', 'young', 30)]
+
+
+
+
+around(k)[source]
+
+ +
+
+between(ge, le)[source]
+
+ +
+
+clear()[source]
+
+ +
+
+copy()[source]
+
+ +
+
+count(item)[source]
+

Return number of occurrences of item

+
+ +
+
+find(k)[source]
+

Return first item with a key == k. Raise ValueError if not found.

+
+ +
+
+find_ge(k)[source]
+

Return first item with a key >= equal to k. Raise ValueError if not found

+
+ +
+
+find_gt(k)[source]
+

Return first item with a key > k. Raise ValueError if not found

+
+ +
+
+find_le(k)[source]
+

Return last item with a key <= k. Raise ValueError if not found.

+
+ +
+
+find_lt(k)[source]
+

Return last item with a key < k. Raise ValueError if not found.

+
+ +
+
+index(item)[source]
+

Find the position of an item. Raise ValueError if not found.

+
+ +
+
+insert(item)[source]
+

Insert a new item. If equal keys are found, add to the left

+
+ +
+
+insert_right(item)[source]
+

Insert a new item. If equal keys are found, add to the right

+
+ +
+
+inside(ge, le)[source]
+
+ +
+
+key
+

key function

+
+ +
+
+remove(item)[source]
+

Remove first occurence of item. Raise ValueError if not found

+
+ +
+ +
+
+

pyFTS.common.Transformations module

+

Common data transformation used on pre and post processing of the FTS

+
+
+class pyFTS.common.Transformations.AdaptiveExpectation(parameters)[source]
+

Bases: pyFTS.common.Transformations.Transformation

+

Adaptive Expectation post processing

+
+
+apply(data, param=None, **kwargs)[source]
+

Apply the transformation on input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with transformed data

+
+
+ +
+
+inverse(data, param, **kwargs)[source]
+
+++ + + + + + +
Parameters:
    +
  • data – transformed data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with inverse transformed data

+
+
+ +
+
+parameters
+
+ +
+ +
+
+class pyFTS.common.Transformations.BoxCox(plambda)[source]
+

Bases: pyFTS.common.Transformations.Transformation

+

Box-Cox power transformation

+
+
+apply(data, param=None, **kwargs)[source]
+

Apply the transformation on input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with transformed data

+
+
+ +
+
+inverse(data, param=None, **kwargs)[source]
+
+++ + + + + + +
Parameters:
    +
  • data – transformed data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with inverse transformed data

+
+
+ +
+
+parameters
+
+ +
+ +
+
+class pyFTS.common.Transformations.Differential(lag)[source]
+

Bases: pyFTS.common.Transformations.Transformation

+

Differentiation data transform

+
+
+apply(data, param=None, **kwargs)[source]
+

Apply the transformation on input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with transformed data

+
+
+ +
+
+inverse(data, param, **kwargs)[source]
+
+++ + + + + + +
Parameters:
    +
  • data – transformed data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with inverse transformed data

+
+
+ +
+
+parameters
+
+ +
+ +
+
+class pyFTS.common.Transformations.Scale(min=0, max=1)[source]
+

Bases: pyFTS.common.Transformations.Transformation

+

Scale data inside a interval [min, max]

+
+
+apply(data, param=None, **kwargs)[source]
+

Apply the transformation on input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with transformed data

+
+
+ +
+
+inverse(data, param, **kwargs)[source]
+
+++ + + + + + +
Parameters:
    +
  • data – transformed data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with inverse transformed data

+
+
+ +
+
+parameters
+
+ +
+ +
+
+class pyFTS.common.Transformations.Transformation(**kwargs)[source]
+

Bases: object

+

Data transformation used on pre and post processing of the FTS

+
+
+apply(data, param, **kwargs)[source]
+

Apply the transformation on input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with transformed data

+
+
+ +
+
+inverse(data, param, **kwargs)[source]
+
+++ + + + + + +
Parameters:
    +
  • data – transformed data
  • +
  • param
  • +
  • kwargs
  • +
+
Returns:

numpy array with inverse transformed data

+
+
+ +
+ +
+
+pyFTS.common.Transformations.Z(original)[source]
+
+ +
+
+pyFTS.common.Transformations.aggregate(original, operation)[source]
+
+ +
+
+pyFTS.common.Transformations.roi(original)[source]
+
+ +
+
+pyFTS.common.Transformations.smoothing(original, lags)[source]
+
+ +
+
+

pyFTS.common.Util module

+

Common facilities for pyFTS

+
+
+pyFTS.common.Util.current_milli_time()
+
+ +
+
+pyFTS.common.Util.distributed_predict(model, parameters, nodes, data, num_batches)[source]
+
+ +
+
+pyFTS.common.Util.distributed_train(model, train_method, nodes, fts_method, data, num_batches=10, train_parameters={}, **kwargs)[source]
+
+ +
+
+pyFTS.common.Util.draw_sets_on_axis(axis, model, size)[source]
+
+ +
+
+pyFTS.common.Util.enumerate2(xs, start=0, step=1)[source]
+
+ +
+
+pyFTS.common.Util.load_env(file)[source]
+
+ +
+
+pyFTS.common.Util.load_obj(file)[source]
+

Load to memory an object stored filesystem. This function depends on Dill package

+ +++ + + + + + +
Parameters:file – file name where the object is stored
Returns:object
+
+ +
+
+pyFTS.common.Util.persist_env(file)[source]
+

Persist an entire environment on file. This function depends on Dill package

+ +++ + + + +
Parameters:file – file name to store the environment
+
+ +
+
+pyFTS.common.Util.persist_obj(obj, file)[source]
+

Persist an object on filesystem. This function depends on Dill package

+ +++ + + + +
Parameters:
    +
  • obj – object on memory
  • +
  • file – file name to store the object
  • +
+
+
+ +
+
+pyFTS.common.Util.plot_rules(model, size=[5, 5], axis=None, rules_by_axis=None, columns=1)[source]
+
+ +
+
+pyFTS.common.Util.show_and_save_image(fig, file, flag, lgd=None)[source]
+

Show and image and save on file

+ +++ + + + +
Parameters:
    +
  • fig – Matplotlib Figure object
  • +
  • file – filename to save the picture
  • +
  • flag – if True the image will be saved
  • +
  • lgd – legend
  • +
+
+
+ +
+
+pyFTS.common.Util.simple_model_predict(model, data, parameters)[source]
+
+ +
+
+pyFTS.common.Util.simple_model_train(model, data, parameters)[source]
+
+ +
+
+pyFTS.common.Util.sliding_window(data, windowsize, train=0.8, inc=0.1, **kwargs)[source]
+

Sliding window method of cross validation for time series

+ +++ + + + + + +
Parameters:
    +
  • data – the entire dataset
  • +
  • windowsize – window size
  • +
  • train – percentual of the window size will be used for training the models
  • +
  • inc – percentual of data used for slide the window
  • +
+
Returns:

window count, training set, test set

+
+
+ +
+
+pyFTS.common.Util.start_dispy_cluster(method, nodes)[source]
+
+ +
+
+pyFTS.common.Util.stop_dispy_cluster(cluster, http_server)[source]
+
+ +
+
+pyFTS.common.Util.uniquefilename(name)[source]
+
+ +
+
+

pyFTS.common.flrg module

+
+
+class pyFTS.common.flrg.FLRG(order, **kwargs)[source]
+

Bases: object

+

Fuzzy Logical Relationship Group

+

Group a set of FLR’s with the same LHS. Represents the temporal patterns for time t+1 (the RHS fuzzy sets) +when the LHS pattern is identified on time t.

+
+
+LHS = None
+

Left Hand Side of the rule

+
+ +
+
+RHS = None
+

Right Hand Side of the rule

+
+ +
+
+append_rhs(set, **kwargs)[source]
+
+ +
+
+get_key()[source]
+

Returns a unique identifier for this FLRG

+
+ +
+
+get_lower(sets)[source]
+

Returns the lower bound value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:lower bound value
+
+ +
+
+get_membership(data, sets)[source]
+

Returns the membership value of the FLRG for the input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • sets – fuzzy sets
  • +
+
Returns:

the membership value

+
+
+ +
+
+get_midpoint(sets)[source]
+

Returns the midpoint value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:the midpoint value
+
+ +
+
+get_midpoints(sets)[source]
+
+ +
+
+get_upper(sets)[source]
+

Returns the upper bound value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:upper bound value
+
+ +
+
+order = None
+

Number of lags on LHS

+
+ +
+ +
+
+

pyFTS.common.fts module

+
+
+class pyFTS.common.fts.FTS(**kwargs)[source]
+

Bases: object

+

Fuzzy Time Series object model

+
+
+alpha_cut = None
+

A float with the minimal membership to be considered on fuzzyfication process

+
+ +
+
+append_transformation(transformation)[source]
+
+ +
+
+apply_inverse_transformations(data, params=None, **kwargs)[source]
+

Apply the data transformations for data postprocessing

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • params – transformation parameters
  • +
  • updateUoD
  • +
  • kwargs
  • +
+
Returns:

postprocessed data

+
+
+ +
+
+apply_transformations(data, params=None, updateUoD=False, **kwargs)[source]
+

Apply the data transformations for data preprocessing

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • params – transformation parameters
  • +
  • updateUoD
  • +
  • kwargs
  • +
+
Returns:

preprocessed data

+
+
+ +
+
+auto_update = None
+

A boolean value indicating that model is incremental

+
+ +
+
+benchmark_only = None
+

A boolean value indicating a façade for external (non-FTS) model used on benchmarks or ensembles.

+
+ +
+
+clone_parameters(model)[source]
+

Import the parameters values from other model

+ +++ + + + +
Parameters:model
+
+ +
+
+detail = None
+

A string with the model detailed information

+
+ +
+
+fit(ndata, **kwargs)[source]
+

Fit the model’s parameters based on the training data.

+ +++ + + + +
Parameters:
    +
  • ndata – training time series data
  • +
  • kwargs
  • +
  • num_batches – split the training data in num_batches to save memory during the training process
  • +
  • save_model – save final model on disk
  • +
  • batch_save – save the model between each batch
  • +
  • file_path – path to save the model
  • +
  • distributed – boolean, indicate if the training procedure will be distributed in a dispy cluster
  • +
  • nodes – a list with the dispy cluster nodes addresses
  • +
+
+
+ +
+
+flrgs = None
+

The list of Fuzzy Logical Relationship Groups - FLRG

+
+ +
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead(data, steps, **kwargs)[source]
+

Point forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead_distribution(data, steps, **kwargs)[source]
+

Probabilistic forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_ahead_interval(data, steps, **kwargs)[source]
+

Interval forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+forecast_distribution(data, **kwargs)[source]
+

Probabilistic forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_interval(data, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+fuzzy(data)[source]
+

Fuzzify a data point

+ +++ + + + + + +
Parameters:data – data point
Returns:maximum membership fuzzy set
+
+ +
+
+get_UoD()[source]
+
+ +
+
+has_interval_forecasting = None
+

A boolean value indicating if the model supports interval forecasting, default: False

+
+ +
+
+has_point_forecasting = None
+

A boolean value indicating if the model supports point forecasting, default: True

+
+ +
+
+has_probability_forecasting = None
+

A boolean value indicating if the model support probabilistic forecasting, default: False

+
+ +
+
+has_seasonality = None
+

A boolean value indicating if the model supports seasonal indexers, default: False

+
+ +
+
+indexer = None
+

An pyFTS.models.seasonal.Indexer object for indexing the time series data

+
+ +
+
+is_high_order = None
+

A boolean value indicating if the model support orders greater than 1, default: False

+
+ +
+
+is_multivariate = None
+

A boolean value indicating if the model support multivariate time series (Pandas DataFrame), default: False

+
+ +
+
+len_total()[source]
+
+ +
+
+max_lag = None
+

A integer indicating the largest lag used by the model. This value also indicates the minimum number of past lags +needed to forecast a single step ahead

+
+ +
+
+merge(model)[source]
+

Merge the FLRG rules from other model

+ +++ + + + + + +
Parameters:model – source model
Returns:
+
+ +
+
+min_order = None
+

In high order models, this integer value indicates the minimal order supported for the model, default: 1

+
+ +
+
+name = None
+

A string with the model name

+
+ +
+
+order = None
+

A integer with the model order (number of past lags are used on forecasting)

+
+ +
+
+original_max = None
+

A float with the upper limit of the Universe of Discourse, the maximal value found on training data

+
+ +
+
+original_min = None
+

A float with the lower limit of the Universe of Discourse, the minimal value found on training data

+
+ +
+
+partitioner = None
+

A pyFTS.partitioners.Partitioner object with the Universe of Discourse partitioner used on the model. This is a mandatory dependecy.

+
+ +
+
+predict(data, **kwargs)[source]
+

Forecast using trained model

+ +++ + + + + + +
Parameters:
    +
  • data – time series with minimal length to the order of the model
  • +
  • type – the forecasting type, one of these values: point(default), interval or distribution.
  • +
  • steps_ahead – The forecasting horizon, i. e., the number of steps ahead to forecast
  • +
  • start – in the multi step forecasting, the index of the data where to start forecasting
  • +
  • distributed – boolean, indicate if the forecasting procedure will be distributed in a dispy cluster
  • +
  • nodes – a list with the dispy cluster nodes addresses
  • +
+
Returns:

a numpy array with the forecasted data

+
+
+ +
+
+sets = None
+

The list of fuzzy sets used on this model

+
+ +
+
+shortname = None
+

A string with a short name or alias for the model

+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+
+transformations = None
+

A list with the data transformations (common.Transformations) applied on model pre and post processing, default: []

+
+ +
+
+transformations_param = None
+

A list with the specific parameters for each data transformation

+
+ +
+
+uod_clip = None
+

Flag indicating if the test data will be clipped inside the training Universe of Discourse

+
+ +
+ +
+
+

pyFTS.common.tree module

+

Tree data structure

+
+
+class pyFTS.common.tree.FLRGTree[source]
+

Bases: object

+

Represents a FLRG set with a tree structure

+
+ +
+
+class pyFTS.common.tree.FLRGTreeNode(value)[source]
+

Bases: object

+

Tree node for

+
+
+appendChild(child)[source]
+
+ +
+
+getChildren()[source]
+
+ +
+
+getStr(k)[source]
+
+ +
+
+paths(acc=[])[source]
+
+ +
+ +
+
+pyFTS.common.tree.build_tree_without_order(node, lags, level)[source]
+
+ +
+
+pyFTS.common.tree.flat(dados)[source]
+
+ +
+
+

Module contents

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.data.html b/docs/_build/html/pyFTS.data.html new file mode 100644 index 0000000..9332887 --- /dev/null +++ b/docs/_build/html/pyFTS.data.html @@ -0,0 +1,618 @@ + + + + + + + + pyFTS.data package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.data package

+
+

Submodules

+
+
+

pyFTS.data.AirPassengers module

+
+
+pyFTS.data.AirPassengers.get_data()[source]
+

Get a simple univariate time series data.

+ +++ + + + +
Returns:numpy array
+
+ +
+
+pyFTS.data.AirPassengers.get_dataframe()[source]
+

Get the complete multivariate time series data.

+ +++ + + + +
Returns:Pandas DataFrame
+
+ +
+
+

pyFTS.data.Enrollments module

+
+
+pyFTS.data.Enrollments.get_data()[source]
+

Get a simple univariate time series data.

+ +++ + + + +
Returns:numpy array
+
+ +
+
+pyFTS.data.Enrollments.get_dataframe()[source]
+
+ +
+
+

pyFTS.data.INMET module

+

INMET - Instituto Nacional Meteorologia / Brasil

+

Belo Horizonte station, from 2000-01-01 to 31/12/2012

+

Source: http://www.inmet.gov.br

+
+
+pyFTS.data.INMET.get_dataframe()[source]
+

Get the complete multivariate time series data.

+ +++ + + + +
Returns:Pandas DataFrame
+
+ +
+
+

pyFTS.data.NASDAQ module

+
+
+pyFTS.data.NASDAQ.get_data(field='avg')[source]
+

Get a simple univariate time series data.

+ +++ + + + + + +
Parameters:field – the dataset field name to extract
Returns:numpy array
+
+ +
+
+pyFTS.data.NASDAQ.get_dataframe()[source]
+

Get the complete multivariate time series data.

+ +++ + + + +
Returns:Pandas DataFrame
+
+ +
+
+

pyFTS.data.SONDA module

+

SONDA - Sistema de Organização Nacional de Dados Ambientais, from INPE - Instituto Nacional de Pesquisas Espaciais, Brasil.

+

Brasilia station

+

Source: http://sonda.ccst.inpe.br/

+
+
+pyFTS.data.SONDA.get_data(field)[source]
+

Get a simple univariate time series data.

+ +++ + + + + + +
Parameters:field – the dataset field name to extract
Returns:numpy array
+
+ +
+
+pyFTS.data.SONDA.get_dataframe()[source]
+

Get the complete multivariate time series data.

+ +++ + + + +
Returns:Pandas DataFrame
+
+ +
+
+

pyFTS.data.SP500 module

+
+
+pyFTS.data.SP500.get_data()[source]
+

Get the univariate time series data.

+ +++ + + + +
Returns:numpy array
+
+ +
+
+pyFTS.data.SP500.get_dataframe()[source]
+

Get the complete multivariate time series data.

+ +++ + + + +
Returns:Pandas DataFrame
+
+ +
+
+

pyFTS.data.TAIEX module

+
+
+pyFTS.data.TAIEX.get_data()[source]
+

Get the univariate time series data.

+ +++ + + + +
Returns:numpy array
+
+ +
+
+pyFTS.data.TAIEX.get_dataframe()[source]
+

Get the complete multivariate time series data.

+ +++ + + + +
Returns:Pandas DataFrame
+
+ +
+
+

pyFTS.data.artificial module

+

Facilities to generate synthetic stochastic processes

+
+
+pyFTS.data.artificial.generate_gaussian_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=100, num=10, vmin=None, vmax=None)[source]
+

Generate data sampled from Gaussian distribution, with constant or linear changing parameters

+ +++ + + + + + +
Parameters:
    +
  • mu_ini – Initial mean
  • +
  • sigma_ini – Initial variance
  • +
  • mu_inc – Mean increment after ‘num’ samples
  • +
  • sigma_inc – Variance increment after ‘num’ samples
  • +
  • it – Number of iterations
  • +
  • num – Number of samples generated on each iteration
  • +
  • vmin – Lower bound value of generated data
  • +
  • vmax – Upper bound value of generated data
  • +
+
Returns:

A list of it*num float values

+
+
+ +
+
+pyFTS.data.artificial.generate_uniform_linear(min_ini, max_ini, min_inc, max_inc, it=100, num=10, vmin=None, vmax=None)[source]
+

Generate data sampled from Uniform distribution, with constant or linear changing bounds

+ +++ + + + + + +
Parameters:
    +
  • mu_ini – Initial mean
  • +
  • sigma_ini – Initial variance
  • +
  • mu_inc – Mean increment after ‘num’ samples
  • +
  • sigma_inc – Variance increment after ‘num’ samples
  • +
  • it – Number of iterations
  • +
  • num – Number of samples generated on each iteration
  • +
  • vmin – Lower bound value of generated data
  • +
  • vmax – Upper bound value of generated data
  • +
+
Returns:

A list of it*num float values

+
+
+ +
+
+pyFTS.data.artificial.random_walk(n=500, type='gaussian')[source]
+
+ +
+
+pyFTS.data.artificial.white_noise(n=500)[source]
+
+ +
+
+

pyFTS.data.common module

+
+
+pyFTS.data.common.get_dataframe(filename, url, sep=';', compression='infer')[source]
+

This method check if filename already exists, read the file and return its data. +If the file don’t already exists, it will be downloaded and decompressed.

+ +++ + + + + + +
Parameters:
    +
  • filename – dataset local filename
  • +
  • url – dataset internet URL
  • +
  • sep – CSV field separator
  • +
  • compression – type of compression
  • +
+
Returns:

Pandas dataset

+
+
+ +
+
+

pyFTS.data.henon module

+
+
+pyFTS.data.henon.get_data(var, a=1.4, b=0.3, initial_values=[1, 1], iterations=1000)[source]
+
+ +
+
+pyFTS.data.henon.get_dataframe(a=1.4, b=0.3, initial_values=[1, 1], iterations=1000)[source]
+
    +
  1. Hénon. “A two-dimensional mapping with a strange attractor”. Commun. Math. Phys. 50, 69-77 (1976)
  2. +
+

dx/dt = a + by(t-1) - x(t-1)^2 +dy/dt = x

+ +++ + + + + + +
Parameters:
    +
  • a – Equation coefficient
  • +
  • b – Equation coefficient
  • +
  • initial_values – numpy array with the initial values of x and y. Default: [1, 1]
  • +
  • iterations – number of iterations. Default: 1000
  • +
+
Returns:

Panda dataframe with the x and y values

+
+
+ +
+
+

pyFTS.data.logistic_map module

+
+
+pyFTS.data.logistic_map.get_data(r=4, initial_value=0.3, iterations=100)[source]
+

May, Robert M. (1976). “Simple mathematical models with very complicated dynamics”. +Nature. 261 (5560): 459–467. doi:10.1038/261459a0.

+

x(t) = r * x(t-1) * (1 - x(t -1) )

+ +++ + + + + + +
Parameters:
    +
  • r – Equation coefficient
  • +
  • initial_value – Initial value of x. Default: 0.3
  • +
  • iterations – number of iterations. Default: 100
  • +
+
Returns:

+
+
+ +
+
+

pyFTS.data.lorentz module

+
+
+pyFTS.data.lorentz.get_data(var, a=10.0, b=28.0, c=2.6666666666666665, dt=0.01, initial_values=[0.1, 0, 0], iterations=1000)[source]
+
+ +
+
+pyFTS.data.lorentz.get_dataframe(a=10.0, b=28.0, c=2.6666666666666665, dt=0.01, initial_values=[0.1, 0, 0], iterations=1000)[source]
+

Lorenz, Edward Norton (1963). “Deterministic nonperiodic flow”. Journal of the Atmospheric Sciences. 20 (2): 130–141. +https://doi.org/10.1175/1520-0469(1963)020<0130:DNF>2.0.CO;2

+

dx/dt = a(y -x) +dy/dt = x(b - z) - y +dz/dt = xy - cz

+ +++ + + + + + +
Parameters:
    +
  • a – Equation coefficient. Default value: 10
  • +
  • b – Equation coefficient. Default value: 28
  • +
  • c – Equation coefficient. Default value: 8.0/3.0
  • +
  • dt – Time differential for continuous time integration. Default value: 0.01
  • +
  • initial_values – numpy array with the initial values of x,y and z. Default: [0.1, 0, 0]
  • +
  • iterations – number of iterations. Default: 1000
  • +
+
Returns:

Panda dataframe with the x, y and z values

+
+
+ +
+
+

pyFTS.data.mackey_glass module

+
+
+pyFTS.data.mackey_glass.get_data(b=0.1, c=0.2, tau=17, initial_values=array([0.5, 0.55882353, 0.61764706, 0.67647059, 0.73529412, 0.79411765, 0.85294118, 0.91176471, 0.97058824, 1.02941176, 1.08823529, 1.14705882, 1.20588235, 1.26470588, 1.32352941, 1.38235294, 1.44117647, 1.5 ]), iterations=1000)[source]
+

Mackey, M. C. and Glass, L. (1977). Oscillation and chaos in physiological control systems. +Science, 197(4300):287-289.

+

dy/dt = -by(t)+ cy(t - tau) / 1+y(t-tau)^10

+ +++ + + + + + +
Parameters:
    +
  • b – Equation coefficient
  • +
  • c – Equation coefficient
  • +
  • tau – Lag parameter, default: 17
  • +
  • initial_values – numpy array with the initial values of y. Default: np.linspace(0.5,1.5,18)
  • +
  • iterations – number of iterations. Default: 1000
  • +
+
Returns:

+
+
+ +
+
+

pyFTS.data.rossler module

+
+
+pyFTS.data.rossler.get_data(var, a=0.2, b=0.2, c=5.7, dt=0.01, initial_values=[0.001, 0.001, 0.001], iterations=5000)[source]
+
+ +
+
+pyFTS.data.rossler.get_dataframe(a=0.2, b=0.2, c=5.7, dt=0.01, initial_values=[0.001, 0.001, 0.001], iterations=5000)[source]
+
    +
    1. +
    2. Rössler, Phys. Lett. 57A, 397 (1976).
    3. +
    +
  1. +
+

dx/dt = -z - y +dy/dt = x + ay +dz/dt = b + z( x - c )

+ +++ + + + + + +
Parameters:
    +
  • a – Equation coefficient. Default value: 0.2
  • +
  • b – Equation coefficient. Default value: 0.2
  • +
  • c – Equation coefficient. Default value: 5.7
  • +
  • dt – Time differential for continuous time integration. Default value: 0.01
  • +
  • initial_values – numpy array with the initial values of x,y and z. Default: [0.001, 0.001, 0.001]
  • +
  • iterations – number of iterations. Default: 5000
  • +
+
Returns:

Panda dataframe with the x, y and z values

+
+
+ +
+
+

pyFTS.data.sunspots module

+
+
+pyFTS.data.sunspots.get_data()[source]
+

Get a simple univariate time series data.

+ +++ + + + +
Returns:numpy array
+
+ +
+
+pyFTS.data.sunspots.get_dataframe()[source]
+

Get the complete multivariate time series data.

+ +++ + + + +
Returns:Pandas DataFrame
+
+ +
+
+

Module contents

+

Module for pyFTS standard datasets facilities

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.html b/docs/_build/html/pyFTS.html new file mode 100644 index 0000000..0d18828 --- /dev/null +++ b/docs/_build/html/pyFTS.html @@ -0,0 +1,245 @@ + + + + + + + + pyFTS package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS package

+
+

Subpackages

+
+ +
+
+
+

Submodules

+
+
+

pyFTS.conf module

+
+
+

Module contents

+

pyFTS - A Python library for Fuzzy Time Series models

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.models.ensemble.html b/docs/_build/html/pyFTS.models.ensemble.html new file mode 100644 index 0000000..27088be --- /dev/null +++ b/docs/_build/html/pyFTS.models.ensemble.html @@ -0,0 +1,379 @@ + + + + + + + + pyFTS.models.ensemble package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.models.ensemble package

+
+

Submodules

+
+
+

pyFTS.models.ensemble.ensemble module

+
+
+class pyFTS.models.ensemble.ensemble.AllMethodEnsembleFTS(**kwargs)[source]
+

Bases: pyFTS.models.ensemble.ensemble.EnsembleFTS

+
+
+set_transformations(model)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+class pyFTS.models.ensemble.ensemble.EnsembleFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Ensemble FTS

+
+
+append_model(model)[source]
+

Append a new model to the ensemble

+ +++ + + + +
Parameters:model – FTS model
+
+ +
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead_distribution(data, steps, **kwargs)[source]
+

Probabilistic forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_ahead_interval(data, steps, **kwargs)[source]
+

Interval forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+forecast_distribution(data, **kwargs)[source]
+

Probabilistic forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_interval(data, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+get_distribution_interquantile(forecasts, alpha)[source]
+
+ +
+
+get_interval(forecasts)[source]
+
+ +
+
+get_models_forecasts(data)[source]
+
+ +
+
+get_point(forecasts, **kwargs)[source]
+
+ +
+
+models = None
+

A list of FTS models, the ensemble components

+
+ +
+
+parameters = None
+

A list with the parameters for each component model

+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+pyFTS.models.ensemble.ensemble.sampler(data, quantiles)[source]
+
+ +
+
+

pyFTS.models.ensemble.multiseasonal module

+
+
+class pyFTS.models.ensemble.multiseasonal.SeasonalEnsembleFTS(name, **kwargs)[source]
+

Bases: pyFTS.models.ensemble.ensemble.EnsembleFTS

+
+
+forecast_distribution(data, **kwargs)[source]
+

Probabilistic forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+
+update_uod(data)[source]
+
+ +
+ +
+
+pyFTS.models.ensemble.multiseasonal.train_individual_model(partitioner, train_data, indexer)[source]
+
+ +
+
+

Module contents

+

Meta FTS that aggregates other FTS methods

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.models.html b/docs/_build/html/pyFTS.models.html new file mode 100644 index 0000000..325ccfe --- /dev/null +++ b/docs/_build/html/pyFTS.models.html @@ -0,0 +1,1064 @@ + + + + + + + + pyFTS.models package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.models package

+ +
+

Submodules

+
+
+

pyFTS.models.chen module

+

First Order Conventional Fuzzy Time Series by Chen (1996)

+

S.-M. Chen, “Forecasting enrollments based on fuzzy time series,” Fuzzy Sets Syst., vol. 81, no. 3, pp. 311–319, 1996.

+
+
+class pyFTS.models.chen.ConventionalFLRG(LHS, **kwargs)[source]
+

Bases: pyFTS.common.flrg.FLRG

+

First Order Conventional Fuzzy Logical Relationship Group

+
+
+append_rhs(c, **kwargs)[source]
+
+ +
+
+get_key(sets)[source]
+

Returns a unique identifier for this FLRG

+
+ +
+ +
+
+class pyFTS.models.chen.ConventionalFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Conventional Fuzzy Time Series

+
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_flrg(flrs)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.cheng module

+

Trend Weighted Fuzzy Time Series by Cheng, Chen and Wu (2009)

+

C.-H. Cheng, Y.-S. Chen, and Y.-L. Wu, “Forecasting innovation diffusion of products using trend-weighted fuzzy time-series model,” +Expert Syst. Appl., vol. 36, no. 2, pp. 1826–1832, 2009.

+
+
+class pyFTS.models.cheng.TrendWeightedFLRG(LHS, **kwargs)[source]
+

Bases: pyFTS.models.yu.WeightedFLRG

+

First Order Trend Weighted Fuzzy Logical Relationship Group

+
+
+weights(sets)[source]
+
+ +
+ +
+
+class pyFTS.models.cheng.TrendWeightedFTS(**kwargs)[source]
+

Bases: pyFTS.models.yu.WeightedFTS

+

First Order Trend Weighted Fuzzy Time Series

+
+
+generate_FLRG(flrs)[source]
+
+ +
+ +
+
+

pyFTS.models.hofts module

+

High Order FTS

+

Severiano, S. A. Jr; Silva, P. C. L.; Sadaei, H. J.; Guimarães, F. G. Very Short-term Solar Forecasting +using Fuzzy Time Series. 2017 IEEE International Conference on Fuzzy Systems. DOI10.1109/FUZZ-IEEE.2017.8015732

+
+
+class pyFTS.models.hofts.HighOrderFLRG(order, **kwargs)[source]
+

Bases: pyFTS.common.flrg.FLRG

+

Conventional High Order Fuzzy Logical Relationship Group

+
+
+append_lhs(c)[source]
+
+ +
+
+append_rhs(c, **kwargs)[source]
+
+ +
+ +
+
+class pyFTS.models.hofts.HighOrderFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Conventional High Order Fuzzy Time Series

+
+
+configure_lags(**kwargs)[source]
+
+ +
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_flrg(data)[source]
+
+ +
+
+generate_lhs_flrg(sample)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.hwang module

+

High Order Fuzzy Time Series by Hwang, Chen and Lee (1998)

+

Jeng-Ren Hwang, Shyi-Ming Chen, and Chia-Hoang Lee, “Handling forecasting problems using fuzzy time series,” +Fuzzy Sets Syst., no. 100, pp. 217–228, 1998.

+
+
+class pyFTS.models.hwang.HighOrderFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+
+
+configure_lags(**kwargs)[source]
+
+ +
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.ifts module

+

High Order Interval Fuzzy Time Series

+

SILVA, Petrônio CL; SADAEI, Hossein Javedani; GUIMARÃES, Frederico Gadelha. Interval Forecasting with Fuzzy Time Series. +In: Computational Intelligence (SSCI), 2016 IEEE Symposium Series on. IEEE, 2016. p. 1-8.

+
+
+class pyFTS.models.ifts.IntervalFTS(**kwargs)[source]
+

Bases: pyFTS.models.hofts.HighOrderFTS

+

High Order Interval Fuzzy Time Series

+
+
+forecast_interval(ndata, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+get_lower(flrg)[source]
+
+ +
+
+get_sequence_membership(data, fuzzySets)[source]
+
+ +
+
+get_upper(flrg)[source]
+
+ +
+ +
+
+

pyFTS.models.ismailefendi module

+

First Order Improved Weighted Fuzzy Time Series by Efendi, Ismail and Deris (2013)

+

R. Efendi, Z. Ismail, and M. M. Deris, “Improved weight Fuzzy Time Series as used in the exchange rates forecasting of +US Dollar to Ringgit Malaysia,” Int. J. Comput. Intell. Appl., vol. 12, no. 1, p. 1350005, 2013.

+
+
+class pyFTS.models.ismailefendi.ImprovedWeightedFLRG(LHS, **kwargs)[source]
+

Bases: pyFTS.common.flrg.FLRG

+

First Order Improved Weighted Fuzzy Logical Relationship Group

+
+
+append_rhs(c, **kwargs)[source]
+
+ +
+
+weights()[source]
+
+ +
+ +
+
+class pyFTS.models.ismailefendi.ImprovedWeightedFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

First Order Improved Weighted Fuzzy Time Series

+
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_flrg(flrs)[source]
+
+ +
+
+train(ndata, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.pwfts module

+
+
+class pyFTS.models.pwfts.ProbabilisticWeightedFLRG(order)[source]
+

Bases: pyFTS.models.hofts.HighOrderFLRG

+

High Order Probabilistic Weighted Fuzzy Logical Relationship Group

+
+
+append_rhs(c, **kwargs)[source]
+
+ +
+
+get_lower(sets)[source]
+

Returns the lower bound value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:lower bound value
+
+ +
+
+get_membership(data, sets)[source]
+

Returns the membership value of the FLRG for the input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • sets – fuzzy sets
  • +
+
Returns:

the membership value

+
+
+ +
+
+get_midpoint(sets)[source]
+

Return the expectation of the PWFLRG, the weighted sum

+
+ +
+
+get_upper(sets)[source]
+

Returns the upper bound value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:upper bound value
+
+ +
+
+lhs_conditional_probability(x, sets, norm, uod, nbins)[source]
+
+ +
+
+partition_function(sets, uod, nbins=100)[source]
+
+ +
+
+rhs_conditional_probability(x, sets, uod, nbins)[source]
+
+ +
+
+rhs_unconditional_probability(c)[source]
+
+ +
+ +
+
+class pyFTS.models.pwfts.ProbabilisticWeightedFTS(**kwargs)[source]
+

Bases: pyFTS.models.ifts.IntervalFTS

+

High Order Probabilistic Weighted Fuzzy Time Series

+
+
+add_new_PWFLGR(flrg)[source]
+
+ +
+
+flrg_lhs_conditional_probability(x, flrg)[source]
+
+ +
+
+flrg_lhs_unconditional_probability(flrg)[source]
+
+ +
+
+flrg_rhs_conditional_probability(x, flrg)[source]
+
+ +
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead(data, steps, **kwargs)[source]
+

Point forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead_distribution(ndata, steps, **kwargs)[source]
+

Probabilistic forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_ahead_interval(data, steps, **kwargs)[source]
+

Interval forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+forecast_distribution(ndata, **kwargs)[source]
+

Probabilistic forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted Probability Distributions

+
+
+ +
+
+forecast_interval(ndata, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+generate_flrg(data)[source]
+
+ +
+
+generate_lhs_flrg(sample)[source]
+
+ +
+
+get_lower(flrg)[source]
+
+ +
+
+get_midpoint(flrg)[source]
+
+ +
+
+get_upper(flrg)[source]
+
+ +
+
+interval_heuristic(sample)[source]
+
+ +
+
+interval_quantile(ndata, alpha)[source]
+
+ +
+
+point_expected_value(sample, **kwargs)[source]
+
+ +
+
+point_heuristic(sample, **kwargs)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+
+update_model(data)[source]
+
+ +
+ +
+
+pyFTS.models.pwfts.visualize_distributions(model, **kwargs)[source]
+
+ +
+
+

pyFTS.models.sadaei module

+

First Order Exponentialy Weighted Fuzzy Time Series by Sadaei et al. (2013)

+

H. J. Sadaei, R. Enayatifar, A. H. Abdullah, and A. Gani, “Short-term load forecasting using a hybrid model with a +refined exponentially weighted fuzzy time series and an improved harmony search,” Int. J. Electr. Power Energy Syst., vol. 62, no. from 2005, pp. 118–129, 2014.

+
+
+class pyFTS.models.sadaei.ExponentialyWeightedFLRG(LHS, **kwargs)[source]
+

Bases: pyFTS.common.flrg.FLRG

+

First Order Exponentialy Weighted Fuzzy Logical Relationship Group

+
+
+append_rhs(c, **kwargs)[source]
+
+ +
+
+weights()[source]
+
+ +
+ +
+
+class pyFTS.models.sadaei.ExponentialyWeightedFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

First Order Exponentialy Weighted Fuzzy Time Series

+
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_flrg(flrs, c)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.song module

+

First Order Traditional Fuzzy Time Series method by Song & Chissom (1993)

+
    +
  1. Song and B. S. Chissom, “Fuzzy time series and its models,” Fuzzy Sets Syst., vol. 54, no. 3, pp. 269–277, 1993.
  2. +
+
+
+class pyFTS.models.song.ConventionalFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Traditional Fuzzy Time Series

+
+
+flr_membership_matrix(flr)[source]
+
+ +
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+operation_matrix(flrs)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.yu module

+

First Order Weighted Fuzzy Time Series by Yu(2005)

+

H.-K. Yu, “Weighted fuzzy time series models for TAIEX forecasting,” +Phys. A Stat. Mech. its Appl., vol. 349, no. 3, pp. 609–624, 2005.

+
+
+class pyFTS.models.yu.WeightedFLRG(LHS, **kwargs)[source]
+

Bases: pyFTS.common.flrg.FLRG

+

First Order Weighted Fuzzy Logical Relationship Group

+
+
+append_rhs(c, **kwargs)[source]
+
+ +
+
+weights(sets)[source]
+
+ +
+ +
+
+class pyFTS.models.yu.WeightedFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

First Order Weighted Fuzzy Time Series

+
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_FLRG(flrs)[source]
+
+ +
+
+train(ndata, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

Module contents

+

Fuzzy Time Series methods

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.models.multivariate.html b/docs/_build/html/pyFTS.models.multivariate.html new file mode 100644 index 0000000..8628537 --- /dev/null +++ b/docs/_build/html/pyFTS.models.multivariate.html @@ -0,0 +1,354 @@ + + + + + + + + pyFTS.models.multivariate package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.models.multivariate package

+
+

Submodules

+
+
+

pyFTS.models.multivariate.FLR module

+
+
+class pyFTS.models.multivariate.FLR.FLR[source]
+

Bases: object

+

Multivariate Fuzzy Logical Relationship

+
+
+set_lhs(var, set)[source]
+
+ +
+
+set_rhs(set)[source]
+
+ +
+ +
+
+

pyFTS.models.multivariate.common module

+
+
+pyFTS.models.multivariate.common.fuzzyfy_instance(data_point, var)[source]
+
+ +
+
+

pyFTS.models.multivariate.flrg module

+
+
+class pyFTS.models.multivariate.flrg.FLRG(**kwargs)[source]
+

Bases: pyFTS.common.flrg.FLRG

+

Multivariate Fuzzy Logical Rule Group

+
+
+append_rhs(fset, **kwargs)[source]
+
+ +
+
+get_membership(data, variables)[source]
+

Returns the membership value of the FLRG for the input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • sets – fuzzy sets
  • +
+
Returns:

the membership value

+
+
+ +
+
+set_lhs(var, fset)[source]
+
+ +
+ +
+
+

pyFTS.models.multivariate.mvfts module

+
+
+class pyFTS.models.multivariate.mvfts.MVFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

Multivariate extension of Chen’s ConventionalFTS method

+
+
+append_variable(var)[source]
+

Append a new endogenous variable to the model

+ +++ + + + + + +
Parameters:var – variable object
Returns:
+
+ +
+
+apply_transformations(data, params=None, updateUoD=False, **kwargs)[source]
+

Apply the data transformations for data preprocessing

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • params – transformation parameters
  • +
  • updateUoD
  • +
  • kwargs
  • +
+
Returns:

preprocessed data

+
+
+ +
+
+clone_parameters(model)[source]
+

Import the parameters values from other model

+ +++ + + + +
Parameters:model
+
+ +
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+format_data(data)[source]
+
+ +
+
+generate_flrg(flrs)[source]
+
+ +
+
+generate_flrs(data)[source]
+
+ +
+
+generate_lhs_flrs(data)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.multivariate.variable module

+
+
+class pyFTS.models.multivariate.variable.Variable(name, **kwargs)[source]
+

Bases: object

+

A variable of a fuzzy time series multivariate model. Each variable contains its own +transformations and partitioners.

+
+
+alias = None
+

A string with the alias of the variable

+
+ +
+
+apply_inverse_transformations(data, **kwargs)[source]
+
+ +
+
+apply_transformations(data, **kwargs)[source]
+
+ +
+
+build(**kwargs)[source]
+
+++ + + + + + +
Parameters:kwargs
Returns:
+
+ +
+
+data_label = None
+

A string with the column name on DataFrame

+
+ +
+
+name = None
+

A string with the name of the variable

+
+ +
+ +
+
+

Module contents

+

Multivariate Fuzzy Time Series methods

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.models.nonstationary.html b/docs/_build/html/pyFTS.models.nonstationary.html new file mode 100644 index 0000000..f7945c3 --- /dev/null +++ b/docs/_build/html/pyFTS.models.nonstationary.html @@ -0,0 +1,745 @@ + + + + + + + + pyFTS.models.nonstationary package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.models.nonstationary package

+
+

Submodules

+
+
+

pyFTS.models.nonstationary.common module

+

Non Stationary Fuzzy Sets

+

GARIBALDI, Jonathan M.; JAROSZEWSKI, Marcin; MUSIKASUWAN, Salang. Nonstationary fuzzy sets. +IEEE Transactions on Fuzzy Systems, v. 16, n. 4, p. 1072-1086, 2008.

+
+
+class pyFTS.models.nonstationary.common.FuzzySet(name, mf, parameters, **kwargs)[source]
+

Bases: pyFTS.common.FuzzySet.FuzzySet

+

Non Stationary Fuzzy Sets

+
+
+get_lower(t)[source]
+
+ +
+
+get_midpoint(t)[source]
+
+ +
+
+get_upper(t)[source]
+
+ +
+
+location = None
+

Pertubation function that affects the location of the membership function

+
+ +
+
+location_params = None
+

Parameters for location pertubation function

+
+ +
+
+membership(x, t)[source]
+

Calculate the membership value of a given input

+ +++ + + + + + +
Parameters:
    +
  • x – input value
  • +
  • t – time displacement or perturbation parameters
  • +
+
Returns:

membership value of x at this fuzzy set

+
+
+ +
+
+noise = None
+

Pertubation function that adds noise on the membership function

+
+ +
+
+noise_params = None
+

Parameters for noise pertubation function

+
+ +
+
+perform_location(t, param)[source]
+
+ +
+
+perform_width(t, param)[source]
+
+ +
+
+perturbate_parameters(t)[source]
+
+ +
+
+width = None
+

Pertubation function that affects the width of the membership function

+
+ +
+
+width_params = None
+

Parameters for width pertubation function

+
+ +
+ +
+
+pyFTS.models.nonstationary.common.check_bounds(data, partitioner, t)[source]
+
+ +
+
+pyFTS.models.nonstationary.common.check_bounds_index(data, partitioner, t)[source]
+
+ +
+
+pyFTS.models.nonstationary.common.fuzzify(inst, t, fuzzySets)[source]
+

Calculate the membership values for a data point given nonstationary fuzzy sets

+ +++ + + + + + +
Parameters:
    +
  • inst – data points
  • +
  • t – time displacement of the instance
  • +
  • fuzzySets – list of fuzzy sets
  • +
+
Returns:

array of membership values

+
+
+ +
+
+pyFTS.models.nonstationary.common.fuzzySeries(data, fuzzySets, ordered_sets, window_size=1, method='fuzzy', const_t=None)[source]
+
+ +
+
+pyFTS.models.nonstationary.common.window_index(t, window_size)[source]
+
+ +
+
+

pyFTS.models.nonstationary.cvfts module

+
+
+class pyFTS.models.nonstationary.cvfts.ConditionalVarianceFTS(**kwargs)[source]
+

Bases: pyFTS.models.hofts.HighOrderFTS

+
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_interval(ndata, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+generate_flrg(flrs, **kwargs)[source]
+
+ +
+
+perturbation_factors(data, **kwargs)[source]
+
+ +
+
+perturbation_factors__old(data)[source]
+
+ +
+
+train(ndata, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+class pyFTS.models.nonstationary.cvfts.HighOrderNonstationaryFLRG(order, **kwargs)[source]
+

Bases: pyFTS.models.hofts.HighOrderFTS

+

Conventional High Order Fuzzy Logical Relationship Group

+
+
+append_lhs(c)[source]
+
+ +
+
+append_rhs(c, **kwargs)[source]
+
+ +
+ +
+
+

pyFTS.models.nonstationary.flrg module

+
+
+class pyFTS.models.nonstationary.flrg.NonStationaryFLRG(LHS, **kwargs)[source]
+

Bases: pyFTS.common.flrg.FLRG

+
+
+get_key()[source]
+

Returns a unique identifier for this FLRG

+
+ +
+
+get_lower(*args)[source]
+

Returns the lower bound value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:lower bound value
+
+ +
+
+get_membership(data, *args)[source]
+

Returns the membership value of the FLRG for the input data

+ +++ + + + + + +
Parameters:
    +
  • data – input data
  • +
  • sets – fuzzy sets
  • +
+
Returns:

the membership value

+
+
+ +
+
+get_midpoint(*args)[source]
+

Returns the midpoint value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:the midpoint value
+
+ +
+
+get_upper(*args)[source]
+

Returns the upper bound value for the RHS fuzzy sets

+ +++ + + + + + +
Parameters:sets – fuzzy sets
Returns:upper bound value
+
+ +
+
+unpack_args(*args)[source]
+
+ +
+ +
+
+

pyFTS.models.nonstationary.honsfts module

+
+
+class pyFTS.models.nonstationary.honsfts.HighOrderNonStationaryFLRG(order, **kwargs)[source]
+

Bases: pyFTS.models.nonstationary.flrg.NonStationaryFLRG

+

First Order NonStationary Fuzzy Logical Relationship Group

+
+
+append_lhs(c)[source]
+
+ +
+
+append_rhs(c, **kwargs)[source]
+
+ +
+ +
+
+class pyFTS.models.nonstationary.honsfts.HighOrderNonStationaryFTS(name, **kwargs)[source]
+

Bases: pyFTS.models.hofts.HighOrderFTS

+

NonStationaryFTS Fuzzy Time Series

+
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_interval(ndata, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+generate_flrg(data, **kwargs)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.nonstationary.nsfts module

+
+
+class pyFTS.models.nonstationary.nsfts.ConventionalNonStationaryFLRG(LHS, **kwargs)[source]
+

Bases: pyFTS.models.nonstationary.flrg.NonStationaryFLRG

+

First Order NonStationary Fuzzy Logical Relationship Group

+
+
+append_rhs(c, **kwargs)[source]
+
+ +
+
+get_key()[source]
+

Returns a unique identifier for this FLRG

+
+ +
+ +
+
+class pyFTS.models.nonstationary.nsfts.NonStationaryFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

NonStationaryFTS Fuzzy Time Series

+
+
+conditional_perturbation_factors(data, **kwargs)[source]
+
+ +
+
+forecast(ndata, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_interval(ndata, **kwargs)[source]
+

Interval forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted intervals

+
+
+ +
+
+generate_flrg(flrs, **kwargs)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.nonstationary.partitioners module

+
+
+class pyFTS.models.nonstationary.partitioners.PolynomialNonStationaryPartitioner(data, part, **kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+

Non Stationary Universe of Discourse Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+
+get_polynomial_perturbations(data, **kwargs)[source]
+
+ +
+
+poly_width(par1, par2, rng, deg)[source]
+
+ +
+
+scale_down(x, pct)[source]
+
+ +
+
+scale_up(x, pct)[source]
+
+ +
+ +
+
+class pyFTS.models.nonstationary.partitioners.SimpleNonStationaryPartitioner(data, part, **kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+

Non Stationary Universe of Discourse Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+ +
+
+pyFTS.models.nonstationary.partitioners.simplenonstationary_gridpartitioner_builder(data, npart, transformation)[source]
+
+ +
+
+

pyFTS.models.nonstationary.perturbation module

+

Pertubation functions for Non Stationary Fuzzy Sets

+
+
+pyFTS.models.nonstationary.perturbation.exponential(x, parameters)[source]
+
+ +
+
+pyFTS.models.nonstationary.perturbation.linear(x, parameters)[source]
+
+ +
+
+pyFTS.models.nonstationary.perturbation.periodic(x, parameters)[source]
+
+ +
+
+pyFTS.models.nonstationary.perturbation.polynomial(x, parameters)[source]
+
+ +
+
+

pyFTS.models.nonstationary.util module

+
+
+pyFTS.models.nonstationary.util.plot_sets(partitioner, start=0, end=10, step=1, tam=[5, 5], colors=None, save=False, file=None, axes=None, data=None, window_size=1, only_lines=False)[source]
+
+ +
+
+pyFTS.models.nonstationary.util.plot_sets_conditional(model, data, step=1, size=[5, 5], colors=None, save=False, file=None, axes=None)[source]
+
+ +
+
+

Module contents

+

Fuzzy time series with nonstationary fuzzy sets, for heteroskedastic data

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.models.seasonal.html b/docs/_build/html/pyFTS.models.seasonal.html new file mode 100644 index 0000000..5a875b7 --- /dev/null +++ b/docs/_build/html/pyFTS.models.seasonal.html @@ -0,0 +1,673 @@ + + + + + + + + pyFTS.models.seasonal package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.models.seasonal package

+
+

Submodules

+
+
+

pyFTS.models.seasonal.SeasonalIndexer module

+
+
+class pyFTS.models.seasonal.SeasonalIndexer.DataFrameSeasonalIndexer(index_fields, index_seasons, data_field, **kwargs)[source]
+

Bases: pyFTS.models.seasonal.SeasonalIndexer.SeasonalIndexer

+

Use the Pandas.DataFrame index position to index the seasonality

+
+
+get_data(data)[source]
+
+ +
+
+get_data_by_season(data, indexes)[source]
+
+ +
+
+get_index_by_season(indexes)[source]
+
+ +
+
+get_season_by_index(index)[source]
+
+ +
+
+get_season_of_data(data)[source]
+
+ +
+
+set_data(data, value)[source]
+
+ +
+ +
+
+class pyFTS.models.seasonal.SeasonalIndexer.DateTimeSeasonalIndexer(date_field, index_fields, index_seasons, data_field, **kwargs)[source]
+

Bases: pyFTS.models.seasonal.SeasonalIndexer.SeasonalIndexer

+

Use a Pandas.DataFrame date field to index the seasonality

+
+
+get_data(data)[source]
+
+ +
+
+get_data_by_season(data, indexes)[source]
+
+ +
+
+get_index(data)[source]
+
+ +
+
+get_index_by_season(indexes)[source]
+
+ +
+
+get_season_by_index(index)[source]
+
+ +
+
+get_season_of_data(data)[source]
+
+ +
+
+set_data(data, value)[source]
+
+ +
+ +
+
+class pyFTS.models.seasonal.SeasonalIndexer.LinearSeasonalIndexer(seasons, units, ignore=None, **kwargs)[source]
+

Bases: pyFTS.models.seasonal.SeasonalIndexer.SeasonalIndexer

+

Use the data array/list position to index the seasonality

+
+
+get_data(data)[source]
+
+ +
+
+get_index_by_season(indexes)[source]
+
+ +
+
+get_season_by_index(index)[source]
+
+ +
+
+get_season_of_data(data)[source]
+
+ +
+ +
+
+class pyFTS.models.seasonal.SeasonalIndexer.SeasonalIndexer(num_seasons, **kwargs)[source]
+

Bases: object

+

Seasonal Indexer. Responsible to find the seasonal index of a data point inside its data set

+
+
+get_data(data)[source]
+
+ +
+
+get_data_by_season(data, indexes)[source]
+
+ +
+
+get_index(data)[source]
+
+ +
+
+get_index_by_season(indexes)[source]
+
+ +
+
+get_season_by_index(inde)[source]
+
+ +
+
+get_season_of_data(data)[source]
+
+ +
+ +
+
+

pyFTS.models.seasonal.cmsfts module

+
+
+class pyFTS.models.seasonal.cmsfts.ContextualMultiSeasonalFTS(**kwargs)[source]
+

Bases: pyFTS.models.seasonal.sfts.SeasonalFTS

+

Contextual Multi-Seasonal Fuzzy Time Series

+
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead(data, steps, **kwargs)[source]
+

Point forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_flrg(flrs)[source]
+
+ +
+
+get_midpoints(flrg, data)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+class pyFTS.models.seasonal.cmsfts.ContextualSeasonalFLRG(seasonality)[source]
+

Bases: pyFTS.models.seasonal.sfts.SeasonalFLRG

+

Contextual Seasonal Fuzzy Logical Relationship Group

+
+
+append_rhs(flr, **kwargs)[source]
+
+ +
+ +
+
+

pyFTS.models.seasonal.common module

+
+
+class pyFTS.models.seasonal.common.DateTime[source]
+

Bases: enum.Enum

+

An enumeration.

+
+
+day_of_month = 30
+
+ +
+
+day_of_week = 7
+
+ +
+
+day_of_year = 364
+
+ +
+
+hour = 6
+
+ +
+
+hour_of_day = 24
+
+ +
+
+hour_of_month = 744
+
+ +
+
+hour_of_week = 168
+
+ +
+
+hour_of_year = 8736
+
+ +
+
+minute_of_day = 1440
+
+ +
+
+minute_of_hour = 60
+
+ +
+
+minute_of_month = 44640
+
+ +
+
+minute_of_week = 10080
+
+ +
+
+minute_of_year = 524160
+
+ +
+
+month = 12
+
+ +
+
+second = 8
+
+ +
+
+second_of_day = 86400
+
+ +
+
+second_of_hour = 3600
+
+ +
+
+second_of_minute = 60.00001
+
+ +
+
+year = 1
+
+ +
+ +
+
+class pyFTS.models.seasonal.common.FuzzySet(datepart, name, mf, parameters, centroid, alpha=1.0, **kwargs)[source]
+

Bases: pyFTS.common.FuzzySet.FuzzySet

+

Temporal/Seasonal Fuzzy Set

+
+
+membership(x)[source]
+

Calculate the membership value of a given input

+ +++ + + + + + +
Parameters:x – input value
Returns:membership value of x at this fuzzy set
+
+ +
+ +
+
+pyFTS.models.seasonal.common.strip_datepart(date, date_part)[source]
+
+ +
+
+

pyFTS.models.seasonal.msfts module

+
+
+class pyFTS.models.seasonal.msfts.MultiSeasonalFTS(name, indexer, **kwargs)[source]
+

Bases: pyFTS.models.seasonal.sfts.SeasonalFTS

+

Multi-Seasonal Fuzzy Time Series

+
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+forecast_ahead(data, steps, **kwargs)[source]
+

Point forecast n steps ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • steps – the number of steps ahead to forecast
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_flrg(flrs)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

pyFTS.models.seasonal.partitioner module

+
+
+class pyFTS.models.seasonal.partitioner.TimeGridPartitioner(**kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+

Even Length DateTime Grid Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+
+plot(ax)[source]
+

Plot the +:param ax: +:return:

+
+ +
+ +
+
+

pyFTS.models.seasonal.sfts module

+

Simple First Order Seasonal Fuzzy Time Series implementation of Song (1999) based of Conventional FTS by Chen (1996)

+
    +
  1. Song, “Seasonal forecasting in fuzzy time series,” Fuzzy sets Syst., vol. 107, pp. 235–236, 1999.
  2. +
+

S.-M. Chen, “Forecasting enrollments based on fuzzy time series,” Fuzzy Sets Syst., vol. 81, no. 3, pp. 311–319, 1996.

+
+
+class pyFTS.models.seasonal.sfts.SeasonalFLRG(seasonality)[source]
+

Bases: pyFTS.common.flrg.FLRG

+

First Order Seasonal Fuzzy Logical Relationship Group

+
+
+append_rhs(c, **kwargs)[source]
+
+ +
+
+get_key()[source]
+

Returns a unique identifier for this FLRG

+
+ +
+ +
+
+class pyFTS.models.seasonal.sfts.SeasonalFTS(**kwargs)[source]
+

Bases: pyFTS.common.fts.FTS

+

First Order Seasonal Fuzzy Time Series

+
+
+forecast(data, **kwargs)[source]
+

Point forecast one step ahead

+ +++ + + + + + +
Parameters:
    +
  • data – time series data with the minimal length equal to the max_lag of the model
  • +
  • kwargs – model specific parameters
  • +
+
Returns:

a list with the forecasted values

+
+
+ +
+
+generate_flrg(flrs)[source]
+
+ +
+
+get_midpoints(flrg)[source]
+
+ +
+
+train(data, **kwargs)[source]
+

Method specific parameter fitting

+ +++ + + + +
Parameters:
    +
  • data – training time series data
  • +
  • kwargs – Method specific parameters
  • +
+
+
+ +
+ +
+
+

Module contents

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.partitioners.html b/docs/_build/html/pyFTS.partitioners.html new file mode 100644 index 0000000..2e5178d --- /dev/null +++ b/docs/_build/html/pyFTS.partitioners.html @@ -0,0 +1,429 @@ + + + + + + + + pyFTS.partitioners package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.partitioners package

+
+

Submodules

+
+
+

pyFTS.partitioners.CMeans module

+
+
+class pyFTS.partitioners.CMeans.CMeansPartitioner(**kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+ +
+
+pyFTS.partitioners.CMeans.c_means(k, dados, tam)[source]
+
+ +
+
+pyFTS.partitioners.CMeans.distance(x, y)[source]
+
+ +
+
+

pyFTS.partitioners.Entropy module

+

C. H. Cheng, R. J. Chang, and C. A. Yeh, “Entropy-based and trapezoidal fuzzification-based fuzzy time series approach for forecasting IT project cost,” +Technol. Forecast. Social Change, vol. 73, no. 5, pp. 524–542, Jun. 2006.

+
+
+class pyFTS.partitioners.Entropy.EntropyPartitioner(**kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+

Huarng Entropy Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+ +
+
+pyFTS.partitioners.Entropy.PMF(data, threshold)[source]
+
+ +
+
+pyFTS.partitioners.Entropy.bestSplit(data, npart)[source]
+
+ +
+
+pyFTS.partitioners.Entropy.entropy(data, threshold)[source]
+
+ +
+
+pyFTS.partitioners.Entropy.informationGain(data, thres1, thres2)[source]
+
+ +
+
+pyFTS.partitioners.Entropy.splitAbove(data, threshold)[source]
+
+ +
+
+pyFTS.partitioners.Entropy.splitBelow(data, threshold)[source]
+
+ +
+
+

pyFTS.partitioners.FCM module

+

S. T. Li, Y. C. Cheng, and S. Y. Lin, “A FCM-based deterministic forecasting model for fuzzy time series,” +Comput. Math. Appl., vol. 56, no. 12, pp. 3052–3063, Dec. 2008. DOI: 10.1016/j.camwa.2008.07.033.

+
+
+class pyFTS.partitioners.FCM.FCMPartitioner(**kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+ +
+
+pyFTS.partitioners.FCM.fuzzy_cmeans(k, dados, tam, m, deltadist=0.001)[source]
+
+ +
+
+pyFTS.partitioners.FCM.fuzzy_distance(x, y)[source]
+
+ +
+
+pyFTS.partitioners.FCM.membership(val, vals)[source]
+
+ +
+
+

pyFTS.partitioners.Grid module

+

Even Length Grid Partitioner

+
+
+class pyFTS.partitioners.Grid.GridPartitioner(**kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+

Even Length Grid Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+ +
+
+

pyFTS.partitioners.Huarng module

+

K. H. Huarng, “Effective lengths of intervals to improve forecasting in fuzzy time series,” +Fuzzy Sets Syst., vol. 123, no. 3, pp. 387–394, Nov. 2001.

+
+
+class pyFTS.partitioners.Huarng.HuarngPartitioner(**kwargs)[source]
+

Bases: pyFTS.partitioners.partitioner.Partitioner

+

Huarng Empirical Partitioner

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+ +
+
+

pyFTS.partitioners.Util module

+

Facility methods for pyFTS partitioners module

+
+
+pyFTS.partitioners.Util.explore_partitioners(data, npart, methods=None, mf=None, transformation=None, size=[12, 10], save=False, file=None)[source]
+

Create partitioners for the mf membership functions and npart partitions and show the partitioning images. +:data: Time series data +:npart: Maximum number of partitions of the universe of discourse +:methods: A list with the partitioning methods to be used +:mf: A list with the membership functions to be used +:transformation: a transformation to be used in partitioner +:size: list, the size of the output image [width, height] +:save: boolean, if the image will be saved on disk +:file: string, the file path to save the image +:return: the list of the built partitioners

+
+ +
+
+pyFTS.partitioners.Util.plot_partitioners(data, objs, tam=[12, 10], save=False, file=None, axis=None)[source]
+
+ +
+
+pyFTS.partitioners.Util.plot_sets(data, sets, titles, size=[12, 10], save=False, file=None, axis=None)[source]
+
+ +
+
+

pyFTS.partitioners.parallel_util module

+
+
+pyFTS.partitioners.parallel_util.explore_partitioners(data, npart, methods=None, mf=None, tam=[12, 10], save=False, file=None)[source]
+
+ +
+
+

pyFTS.partitioners.partitioner module

+
+
+class pyFTS.partitioners.partitioner.Partitioner(**kwargs)[source]
+

Bases: object

+

Universe of Discourse partitioner. Split data on several fuzzy sets

+
+
+build(data)[source]
+

Perform the partitioning of the Universe of Discourse

+ +++ + + + + + +
Parameters:data – training data
Returns:
+
+ +
+
+get_name(counter)[source]
+
+ +
+
+lower_set()[source]
+
+ +
+
+membership_function = None
+

Fuzzy membership function (pyFTS.common.Membership)

+
+ +
+
+name = None
+

partitioner name

+
+ +
+
+partitions = None
+

The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created

+
+ +
+
+plot(ax)[source]
+

Plot the +:param ax: +:return:

+
+ +
+
+plot_set(ax, s)[source]
+
+ +
+
+prefix = None
+

prefix of auto generated partition names

+
+ +
+
+setnames = None
+

list of partitions names. If None is given the partitions will be auto named with prefix

+
+ +
+
+transformation = None
+

data transformation to be applied on data

+
+ +
+
+upper_set()[source]
+
+ +
+ +
+
+

Module contents

+

Module for pyFTS Universe of Discourse partitioners.

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/pyFTS.probabilistic.html b/docs/_build/html/pyFTS.probabilistic.html new file mode 100644 index 0000000..fcb4e39 --- /dev/null +++ b/docs/_build/html/pyFTS.probabilistic.html @@ -0,0 +1,272 @@ + + + + + + + + pyFTS.probabilistic package — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + +
+
+
+
+ +
+

pyFTS.probabilistic package

+
+

Submodules

+
+
+

pyFTS.probabilistic.ProbabilityDistribution module

+
+
+class pyFTS.probabilistic.ProbabilityDistribution.ProbabilityDistribution(type='KDE', **kwargs)[source]
+

Bases: object

+

Represents a discrete or continous probability distribution +If type is histogram, the PDF is discrete +If type is KDE the PDF is continuous

+
+
+append(values)[source]
+
+ +
+
+append_interval(intervals)[source]
+
+ +
+
+averageloglikelihood(data)[source]
+
+ +
+
+bins = None
+

Number of bins on a discrete PDF

+
+ +
+
+build_cdf_qtl()[source]
+
+ +
+
+crossentropy(q)[source]
+
+ +
+
+cummulative(values)[source]
+
+ +
+
+density(values)[source]
+
+ +
+
+differential_offset(value)[source]
+
+ +
+
+empiricalloglikelihood()[source]
+
+ +
+
+entropy()[source]
+
+ +
+
+expected_value()[source]
+
+ +
+
+kullbackleiblerdivergence(q)[source]
+
+ +
+
+labels = None
+

Bins labels on a discrete PDF

+
+ +
+
+plot(axis=None, color='black', tam=[10, 6], title=None)[source]
+
+ +
+
+pseudologlikelihood(data)[source]
+
+ +
+
+quantile(values)[source]
+
+ +
+
+set(value, density)[source]
+
+ +
+
+type = None
+

If type is histogram, the PDF is discrete +If type is KDE the PDF is continuous

+
+ +
+
+uod = None
+

Universe of discourse

+
+ +
+ +
+
+

pyFTS.probabilistic.kde module

+

Kernel Density Estimation

+
+
+class pyFTS.probabilistic.kde.KernelSmoothing(h, kernel='epanechnikov')[source]
+

Bases: object

+

Kernel Density Estimation

+
+
+h = None
+

Width parameter

+
+ +
+
+kernel = None
+

Kernel function

+
+ +
+
+kernel_function(u)[source]
+
+ +
+
+probability(x, data)[source]
+

Probability of the point x on data

+ +++ + + + + + +
Parameters:
    +
  • x
  • +
  • data
  • +
+
Returns:

+
+
+ +
+ +
+
+

Module contents

+

Probability Distribution objects

+
+
+ + +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/search.html b/docs/_build/html/search.html new file mode 100644 index 0000000..a20ed71 --- /dev/null +++ b/docs/_build/html/search.html @@ -0,0 +1,93 @@ + + + + + + + + Search — pyFTS 1.2.3 documentation + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ +

Search

+
+ +

+ Please activate JavaScript to enable the search + functionality. +

+
+

+ From here you can search these documents. Enter your search + words into the box below and click "search". Note that the search + function will automatically search for all of the words. Pages + containing fewer words won't appear in the result list. +

+
+ + + +
+ +
+ +
+ +
+
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/_build/html/searchindex.js b/docs/_build/html/searchindex.js new file mode 100644 index 0000000..677ba24 --- /dev/null +++ b/docs/_build/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({docnames:["index","modules","pyFTS","pyFTS.benchmarks","pyFTS.common","pyFTS.data","pyFTS.models","pyFTS.models.ensemble","pyFTS.models.multivariate","pyFTS.models.nonstationary","pyFTS.models.seasonal","pyFTS.partitioners","pyFTS.probabilistic"],envversion:53,filenames:["index.rst","modules.rst","pyFTS.rst","pyFTS.benchmarks.rst","pyFTS.common.rst","pyFTS.data.rst","pyFTS.models.rst","pyFTS.models.ensemble.rst","pyFTS.models.multivariate.rst","pyFTS.models.nonstationary.rst","pyFTS.models.seasonal.rst","pyFTS.partitioners.rst","pyFTS.probabilistic.rst"],objects:{"":{pyFTS:[2,0,0,"-"]},"pyFTS.benchmarks":{Measures:[3,0,0,"-"],ResidualAnalysis:[3,0,0,"-"],Util:[3,0,0,"-"],arima:[3,0,0,"-"],benchmarks:[3,0,0,"-"],knn:[3,0,0,"-"],naive:[3,0,0,"-"],parallel_benchmarks:[3,0,0,"-"],quantreg:[3,0,0,"-"]},"pyFTS.benchmarks.Measures":{BoxLjungStatistic:[3,1,1,""],BoxPierceStatistic:[3,1,1,""],TheilsInequality:[3,1,1,""],UStatistic:[3,1,1,""],acf:[3,1,1,""],brier_score:[3,1,1,""],coverage:[3,1,1,""],crps:[3,1,1,""],get_distribution_statistics:[3,1,1,""],get_interval_statistics:[3,1,1,""],get_point_statistics:[3,1,1,""],heavyside:[3,1,1,""],heavyside_cdf:[3,1,1,""],mape:[3,1,1,""],mape_interval:[3,1,1,""],pinball:[3,1,1,""],pinball_mean:[3,1,1,""],pmf_to_cdf:[3,1,1,""],resolution:[3,1,1,""],rmse:[3,1,1,""],rmse_interval:[3,1,1,""],sharpness:[3,1,1,""],smape:[3,1,1,""],winkler_mean:[3,1,1,""],winkler_score:[3,1,1,""]},"pyFTS.benchmarks.ResidualAnalysis":{chi_squared:[3,1,1,""],compare_residuals:[3,1,1,""],plotResiduals:[3,1,1,""],plot_residuals:[3,1,1,""],residuals:[3,1,1,""],single_plot_residuals:[3,1,1,""]},"pyFTS.benchmarks.Util":{analytic_tabular_dataframe:[3,1,1,""],analytical_data_columns:[3,1,1,""],base_dataframe_columns:[3,1,1,""],cast_dataframe_to_synthetic:[3,1,1,""],cast_dataframe_to_synthetic_interval:[3,1,1,""],cast_dataframe_to_synthetic_point:[3,1,1,""],cast_dataframe_to_synthetic_probabilistic:[3,1,1,""],check_ignore_list:[3,1,1,""],check_replace_list:[3,1,1,""],create_benchmark_tables:[3,1,1,""],extract_measure:[3,1,1,""],find_best:[3,1,1,""],get_dataframe_from_bd:[3,1,1,""],insert_benchmark:[3,1,1,""],interval_dataframe_analytic_columns:[3,1,1,""],interval_dataframe_synthetic_columns:[3,1,1,""],open_benchmark_db:[3,1,1,""],plot_dataframe_interval:[3,1,1,""],plot_dataframe_interval_pinball:[3,1,1,""],plot_dataframe_point:[3,1,1,""],plot_dataframe_probabilistic:[3,1,1,""],point_dataframe_analytic_columns:[3,1,1,""],point_dataframe_synthetic_columns:[3,1,1,""],probabilistic_dataframe_analytic_columns:[3,1,1,""],probabilistic_dataframe_synthetic_columns:[3,1,1,""],process_common_data:[3,1,1,""],save_dataframe_interval:[3,1,1,""],save_dataframe_point:[3,1,1,""],save_dataframe_probabilistic:[3,1,1,""],scale:[3,1,1,""],scale_params:[3,1,1,""],stats:[3,1,1,""],tabular_dataframe_columns:[3,1,1,""],unified_scaled_interval:[3,1,1,""],unified_scaled_interval_pinball:[3,1,1,""],unified_scaled_point:[3,1,1,""],unified_scaled_probabilistic:[3,1,1,""]},"pyFTS.benchmarks.arima":{ARIMA:[3,2,1,""]},"pyFTS.benchmarks.arima.ARIMA":{ar:[3,3,1,""],forecast:[3,3,1,""],forecast_ahead_distribution:[3,3,1,""],forecast_ahead_interval:[3,3,1,""],forecast_distribution:[3,3,1,""],forecast_interval:[3,3,1,""],ma:[3,3,1,""],train:[3,3,1,""]},"pyFTS.benchmarks.benchmarks":{SelecaoSimples_MenorRMSE:[3,1,1,""],compareModelsPlot:[3,1,1,""],compareModelsTable:[3,1,1,""],get_benchmark_interval_methods:[3,1,1,""],get_benchmark_point_methods:[3,1,1,""],get_benchmark_probabilistic_methods:[3,1,1,""],get_interval_methods:[3,1,1,""],get_point_methods:[3,1,1,""],get_probabilistic_methods:[3,1,1,""],pftsExploreOrderAndPartitions:[3,1,1,""],plotCompared:[3,1,1,""],plot_compared_intervals_ahead:[3,1,1,""],plot_compared_series:[3,1,1,""],plot_density_rectange:[3,1,1,""],plot_distribution:[3,1,1,""],plot_interval:[3,1,1,""],plot_point:[3,1,1,""],plot_probability_distributions:[3,1,1,""],print_distribution_statistics:[3,1,1,""],print_interval_statistics:[3,1,1,""],print_point_statistics:[3,1,1,""],process_interval_jobs:[3,1,1,""],process_point_jobs:[3,1,1,""],process_probabilistic_jobs:[3,1,1,""],run_interval:[3,1,1,""],run_point:[3,1,1,""],run_probabilistic:[3,1,1,""],simpleSearch_RMSE:[3,1,1,""],sliding_window_benchmarks:[3,1,1,""]},"pyFTS.benchmarks.knn":{KNearestNeighbors:[3,2,1,""]},"pyFTS.benchmarks.knn.KNearestNeighbors":{forecast_distribution:[3,3,1,""],knn:[3,3,1,""],train:[3,3,1,""]},"pyFTS.benchmarks.naive":{Naive:[3,2,1,""]},"pyFTS.benchmarks.naive.Naive":{forecast:[3,3,1,""]},"pyFTS.benchmarks.parallel_benchmarks":{ahead_sliding_window:[3,1,1,""],interval_sliding_window:[3,1,1,""],point_sliding_window:[3,1,1,""],run_ahead:[3,1,1,""],run_interval:[3,1,1,""],run_point:[3,1,1,""]},"pyFTS.benchmarks.quantreg":{QuantileRegression:[3,2,1,""]},"pyFTS.benchmarks.quantreg.QuantileRegression":{forecast:[3,3,1,""],forecast_ahead_distribution:[3,3,1,""],forecast_ahead_interval:[3,3,1,""],forecast_distribution:[3,3,1,""],forecast_interval:[3,3,1,""],interval_to_interval:[3,3,1,""],linearmodel:[3,3,1,""],point_to_interval:[3,3,1,""],train:[3,3,1,""]},"pyFTS.common":{Composite:[4,0,0,"-"],FLR:[4,0,0,"-"],FuzzySet:[4,0,0,"-"],Membership:[4,0,0,"-"],SortedCollection:[4,0,0,"-"],Transformations:[4,0,0,"-"],Util:[4,0,0,"-"],flrg:[4,0,0,"-"],fts:[4,0,0,"-"],tree:[4,0,0,"-"]},"pyFTS.common.Composite":{FuzzySet:[4,2,1,""]},"pyFTS.common.Composite.FuzzySet":{append:[4,3,1,""],append_set:[4,3,1,""],membership:[4,3,1,""]},"pyFTS.common.FLR":{FLR:[4,2,1,""],IndexedFLR:[4,2,1,""],generate_high_order_recurrent_flr:[4,1,1,""],generate_indexed_flrs:[4,1,1,""],generate_non_recurrent_flrs:[4,1,1,""],generate_recurrent_flrs:[4,1,1,""]},"pyFTS.common.FLR.FLR":{LHS:[4,4,1,""],RHS:[4,4,1,""]},"pyFTS.common.FLR.IndexedFLR":{index:[4,4,1,""]},"pyFTS.common.FuzzySet":{FuzzySet:[4,2,1,""],check_bounds:[4,1,1,""],check_bounds_index:[4,1,1,""],fuzzyfy_instance:[4,1,1,""],fuzzyfy_instances:[4,1,1,""],fuzzyfy_series:[4,1,1,""],fuzzyfy_series_old:[4,1,1,""],get_fuzzysets:[4,1,1,""],get_maximum_membership_fuzzyset:[4,1,1,""],get_maximum_membership_fuzzyset_index:[4,1,1,""],grant_bounds:[4,1,1,""],set_ordered:[4,1,1,""]},"pyFTS.common.FuzzySet.FuzzySet":{Z:[4,4,1,""],alpha:[4,4,1,""],centroid:[4,4,1,""],membership:[4,3,1,""],mf:[4,4,1,""],name:[4,4,1,""],parameters:[4,4,1,""],partition_function:[4,3,1,""],type:[4,4,1,""],variable:[4,4,1,""]},"pyFTS.common.Membership":{bellmf:[4,1,1,""],gaussmf:[4,1,1,""],sigmf:[4,1,1,""],trapmf:[4,1,1,""],trimf:[4,1,1,""]},"pyFTS.common.SortedCollection":{SortedCollection:[4,2,1,""]},"pyFTS.common.SortedCollection.SortedCollection":{around:[4,3,1,""],between:[4,3,1,""],clear:[4,3,1,""],copy:[4,3,1,""],count:[4,3,1,""],find:[4,3,1,""],find_ge:[4,3,1,""],find_gt:[4,3,1,""],find_le:[4,3,1,""],find_lt:[4,3,1,""],index:[4,3,1,""],insert:[4,3,1,""],insert_right:[4,3,1,""],inside:[4,3,1,""],key:[4,4,1,""],remove:[4,3,1,""]},"pyFTS.common.Transformations":{AdaptiveExpectation:[4,2,1,""],BoxCox:[4,2,1,""],Differential:[4,2,1,""],Scale:[4,2,1,""],Transformation:[4,2,1,""],Z:[4,1,1,""],aggregate:[4,1,1,""],roi:[4,1,1,""],smoothing:[4,1,1,""]},"pyFTS.common.Transformations.AdaptiveExpectation":{apply:[4,3,1,""],inverse:[4,3,1,""],parameters:[4,4,1,""]},"pyFTS.common.Transformations.BoxCox":{apply:[4,3,1,""],inverse:[4,3,1,""],parameters:[4,4,1,""]},"pyFTS.common.Transformations.Differential":{apply:[4,3,1,""],inverse:[4,3,1,""],parameters:[4,4,1,""]},"pyFTS.common.Transformations.Scale":{apply:[4,3,1,""],inverse:[4,3,1,""],parameters:[4,4,1,""]},"pyFTS.common.Transformations.Transformation":{apply:[4,3,1,""],inverse:[4,3,1,""]},"pyFTS.common.Util":{current_milli_time:[4,1,1,""],distributed_predict:[4,1,1,""],distributed_train:[4,1,1,""],draw_sets_on_axis:[4,1,1,""],enumerate2:[4,1,1,""],load_env:[4,1,1,""],load_obj:[4,1,1,""],persist_env:[4,1,1,""],persist_obj:[4,1,1,""],plot_rules:[4,1,1,""],show_and_save_image:[4,1,1,""],simple_model_predict:[4,1,1,""],simple_model_train:[4,1,1,""],sliding_window:[4,1,1,""],start_dispy_cluster:[4,1,1,""],stop_dispy_cluster:[4,1,1,""],uniquefilename:[4,1,1,""]},"pyFTS.common.flrg":{FLRG:[4,2,1,""]},"pyFTS.common.flrg.FLRG":{LHS:[4,4,1,""],RHS:[4,4,1,""],append_rhs:[4,3,1,""],get_key:[4,3,1,""],get_lower:[4,3,1,""],get_membership:[4,3,1,""],get_midpoint:[4,3,1,""],get_midpoints:[4,3,1,""],get_upper:[4,3,1,""],order:[4,4,1,""]},"pyFTS.common.fts":{FTS:[4,2,1,""]},"pyFTS.common.fts.FTS":{alpha_cut:[4,4,1,""],append_transformation:[4,3,1,""],apply_inverse_transformations:[4,3,1,""],apply_transformations:[4,3,1,""],auto_update:[4,4,1,""],benchmark_only:[4,4,1,""],clone_parameters:[4,3,1,""],detail:[4,4,1,""],fit:[4,3,1,""],flrgs:[4,4,1,""],forecast:[4,3,1,""],forecast_ahead:[4,3,1,""],forecast_ahead_distribution:[4,3,1,""],forecast_ahead_interval:[4,3,1,""],forecast_distribution:[4,3,1,""],forecast_interval:[4,3,1,""],fuzzy:[4,3,1,""],get_UoD:[4,3,1,""],has_interval_forecasting:[4,4,1,""],has_point_forecasting:[4,4,1,""],has_probability_forecasting:[4,4,1,""],has_seasonality:[4,4,1,""],indexer:[4,4,1,""],is_high_order:[4,4,1,""],is_multivariate:[4,4,1,""],len_total:[4,3,1,""],max_lag:[4,4,1,""],merge:[4,3,1,""],min_order:[4,4,1,""],name:[4,4,1,""],order:[4,4,1,""],original_max:[4,4,1,""],original_min:[4,4,1,""],partitioner:[4,4,1,""],predict:[4,3,1,""],sets:[4,4,1,""],shortname:[4,4,1,""],train:[4,3,1,""],transformations:[4,4,1,""],transformations_param:[4,4,1,""],uod_clip:[4,4,1,""]},"pyFTS.common.tree":{FLRGTree:[4,2,1,""],FLRGTreeNode:[4,2,1,""],build_tree_without_order:[4,1,1,""],flat:[4,1,1,""]},"pyFTS.common.tree.FLRGTreeNode":{appendChild:[4,3,1,""],getChildren:[4,3,1,""],getStr:[4,3,1,""],paths:[4,3,1,""]},"pyFTS.data":{AirPassengers:[5,0,0,"-"],Enrollments:[5,0,0,"-"],INMET:[5,0,0,"-"],NASDAQ:[5,0,0,"-"],SONDA:[5,0,0,"-"],SP500:[5,0,0,"-"],TAIEX:[5,0,0,"-"],artificial:[5,0,0,"-"],common:[5,0,0,"-"],henon:[5,0,0,"-"],logistic_map:[5,0,0,"-"],lorentz:[5,0,0,"-"],mackey_glass:[5,0,0,"-"],rossler:[5,0,0,"-"],sunspots:[5,0,0,"-"]},"pyFTS.data.AirPassengers":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.Enrollments":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.INMET":{get_dataframe:[5,1,1,""]},"pyFTS.data.NASDAQ":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.SONDA":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.SP500":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.TAIEX":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.artificial":{generate_gaussian_linear:[5,1,1,""],generate_uniform_linear:[5,1,1,""],random_walk:[5,1,1,""],white_noise:[5,1,1,""]},"pyFTS.data.common":{get_dataframe:[5,1,1,""]},"pyFTS.data.henon":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.logistic_map":{get_data:[5,1,1,""]},"pyFTS.data.lorentz":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.mackey_glass":{get_data:[5,1,1,""]},"pyFTS.data.rossler":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.data.sunspots":{get_data:[5,1,1,""],get_dataframe:[5,1,1,""]},"pyFTS.models":{chen:[6,0,0,"-"],cheng:[6,0,0,"-"],ensemble:[7,0,0,"-"],hofts:[6,0,0,"-"],hwang:[6,0,0,"-"],ifts:[6,0,0,"-"],ismailefendi:[6,0,0,"-"],multivariate:[8,0,0,"-"],nonstationary:[9,0,0,"-"],pwfts:[6,0,0,"-"],sadaei:[6,0,0,"-"],seasonal:[10,0,0,"-"],song:[6,0,0,"-"],yu:[6,0,0,"-"]},"pyFTS.models.chen":{ConventionalFLRG:[6,2,1,""],ConventionalFTS:[6,2,1,""]},"pyFTS.models.chen.ConventionalFLRG":{append_rhs:[6,3,1,""],get_key:[6,3,1,""]},"pyFTS.models.chen.ConventionalFTS":{forecast:[6,3,1,""],generate_flrg:[6,3,1,""],train:[6,3,1,""]},"pyFTS.models.cheng":{TrendWeightedFLRG:[6,2,1,""],TrendWeightedFTS:[6,2,1,""]},"pyFTS.models.cheng.TrendWeightedFLRG":{weights:[6,3,1,""]},"pyFTS.models.cheng.TrendWeightedFTS":{generate_FLRG:[6,3,1,""]},"pyFTS.models.ensemble":{ensemble:[7,0,0,"-"],multiseasonal:[7,0,0,"-"]},"pyFTS.models.ensemble.ensemble":{AllMethodEnsembleFTS:[7,2,1,""],EnsembleFTS:[7,2,1,""],sampler:[7,1,1,""]},"pyFTS.models.ensemble.ensemble.AllMethodEnsembleFTS":{set_transformations:[7,3,1,""],train:[7,3,1,""]},"pyFTS.models.ensemble.ensemble.EnsembleFTS":{append_model:[7,3,1,""],forecast:[7,3,1,""],forecast_ahead_distribution:[7,3,1,""],forecast_ahead_interval:[7,3,1,""],forecast_distribution:[7,3,1,""],forecast_interval:[7,3,1,""],get_distribution_interquantile:[7,3,1,""],get_interval:[7,3,1,""],get_models_forecasts:[7,3,1,""],get_point:[7,3,1,""],models:[7,4,1,""],parameters:[7,4,1,""],train:[7,3,1,""]},"pyFTS.models.ensemble.multiseasonal":{SeasonalEnsembleFTS:[7,2,1,""],train_individual_model:[7,1,1,""]},"pyFTS.models.ensemble.multiseasonal.SeasonalEnsembleFTS":{forecast_distribution:[7,3,1,""],train:[7,3,1,""],update_uod:[7,3,1,""]},"pyFTS.models.hofts":{HighOrderFLRG:[6,2,1,""],HighOrderFTS:[6,2,1,""]},"pyFTS.models.hofts.HighOrderFLRG":{append_lhs:[6,3,1,""],append_rhs:[6,3,1,""]},"pyFTS.models.hofts.HighOrderFTS":{configure_lags:[6,3,1,""],forecast:[6,3,1,""],generate_flrg:[6,3,1,""],generate_lhs_flrg:[6,3,1,""],train:[6,3,1,""]},"pyFTS.models.hwang":{HighOrderFTS:[6,2,1,""]},"pyFTS.models.hwang.HighOrderFTS":{configure_lags:[6,3,1,""],forecast:[6,3,1,""],train:[6,3,1,""]},"pyFTS.models.ifts":{IntervalFTS:[6,2,1,""]},"pyFTS.models.ifts.IntervalFTS":{forecast_interval:[6,3,1,""],get_lower:[6,3,1,""],get_sequence_membership:[6,3,1,""],get_upper:[6,3,1,""]},"pyFTS.models.ismailefendi":{ImprovedWeightedFLRG:[6,2,1,""],ImprovedWeightedFTS:[6,2,1,""]},"pyFTS.models.ismailefendi.ImprovedWeightedFLRG":{append_rhs:[6,3,1,""],weights:[6,3,1,""]},"pyFTS.models.ismailefendi.ImprovedWeightedFTS":{forecast:[6,3,1,""],generate_flrg:[6,3,1,""],train:[6,3,1,""]},"pyFTS.models.multivariate":{FLR:[8,0,0,"-"],common:[8,0,0,"-"],flrg:[8,0,0,"-"],mvfts:[8,0,0,"-"],variable:[8,0,0,"-"]},"pyFTS.models.multivariate.FLR":{FLR:[8,2,1,""]},"pyFTS.models.multivariate.FLR.FLR":{set_lhs:[8,3,1,""],set_rhs:[8,3,1,""]},"pyFTS.models.multivariate.common":{fuzzyfy_instance:[8,1,1,""]},"pyFTS.models.multivariate.flrg":{FLRG:[8,2,1,""]},"pyFTS.models.multivariate.flrg.FLRG":{append_rhs:[8,3,1,""],get_membership:[8,3,1,""],set_lhs:[8,3,1,""]},"pyFTS.models.multivariate.mvfts":{MVFTS:[8,2,1,""]},"pyFTS.models.multivariate.mvfts.MVFTS":{append_variable:[8,3,1,""],apply_transformations:[8,3,1,""],clone_parameters:[8,3,1,""],forecast:[8,3,1,""],format_data:[8,3,1,""],generate_flrg:[8,3,1,""],generate_flrs:[8,3,1,""],generate_lhs_flrs:[8,3,1,""],train:[8,3,1,""]},"pyFTS.models.multivariate.variable":{Variable:[8,2,1,""]},"pyFTS.models.multivariate.variable.Variable":{alias:[8,4,1,""],apply_inverse_transformations:[8,3,1,""],apply_transformations:[8,3,1,""],build:[8,3,1,""],data_label:[8,4,1,""],name:[8,4,1,""]},"pyFTS.models.nonstationary":{common:[9,0,0,"-"],cvfts:[9,0,0,"-"],flrg:[9,0,0,"-"],honsfts:[9,0,0,"-"],nsfts:[9,0,0,"-"],partitioners:[9,0,0,"-"],perturbation:[9,0,0,"-"],util:[9,0,0,"-"]},"pyFTS.models.nonstationary.common":{FuzzySet:[9,2,1,""],check_bounds:[9,1,1,""],check_bounds_index:[9,1,1,""],fuzzify:[9,1,1,""],fuzzySeries:[9,1,1,""],window_index:[9,1,1,""]},"pyFTS.models.nonstationary.common.FuzzySet":{get_lower:[9,3,1,""],get_midpoint:[9,3,1,""],get_upper:[9,3,1,""],location:[9,4,1,""],location_params:[9,4,1,""],membership:[9,3,1,""],noise:[9,4,1,""],noise_params:[9,4,1,""],perform_location:[9,3,1,""],perform_width:[9,3,1,""],perturbate_parameters:[9,3,1,""],width:[9,4,1,""],width_params:[9,4,1,""]},"pyFTS.models.nonstationary.cvfts":{ConditionalVarianceFTS:[9,2,1,""],HighOrderNonstationaryFLRG:[9,2,1,""]},"pyFTS.models.nonstationary.cvfts.ConditionalVarianceFTS":{forecast:[9,3,1,""],forecast_interval:[9,3,1,""],generate_flrg:[9,3,1,""],perturbation_factors:[9,3,1,""],perturbation_factors__old:[9,3,1,""],train:[9,3,1,""]},"pyFTS.models.nonstationary.cvfts.HighOrderNonstationaryFLRG":{append_lhs:[9,3,1,""],append_rhs:[9,3,1,""]},"pyFTS.models.nonstationary.flrg":{NonStationaryFLRG:[9,2,1,""]},"pyFTS.models.nonstationary.flrg.NonStationaryFLRG":{get_key:[9,3,1,""],get_lower:[9,3,1,""],get_membership:[9,3,1,""],get_midpoint:[9,3,1,""],get_upper:[9,3,1,""],unpack_args:[9,3,1,""]},"pyFTS.models.nonstationary.honsfts":{HighOrderNonStationaryFLRG:[9,2,1,""],HighOrderNonStationaryFTS:[9,2,1,""]},"pyFTS.models.nonstationary.honsfts.HighOrderNonStationaryFLRG":{append_lhs:[9,3,1,""],append_rhs:[9,3,1,""]},"pyFTS.models.nonstationary.honsfts.HighOrderNonStationaryFTS":{forecast:[9,3,1,""],forecast_interval:[9,3,1,""],generate_flrg:[9,3,1,""],train:[9,3,1,""]},"pyFTS.models.nonstationary.nsfts":{ConventionalNonStationaryFLRG:[9,2,1,""],NonStationaryFTS:[9,2,1,""]},"pyFTS.models.nonstationary.nsfts.ConventionalNonStationaryFLRG":{append_rhs:[9,3,1,""],get_key:[9,3,1,""]},"pyFTS.models.nonstationary.nsfts.NonStationaryFTS":{conditional_perturbation_factors:[9,3,1,""],forecast:[9,3,1,""],forecast_interval:[9,3,1,""],generate_flrg:[9,3,1,""],train:[9,3,1,""]},"pyFTS.models.nonstationary.partitioners":{PolynomialNonStationaryPartitioner:[9,2,1,""],SimpleNonStationaryPartitioner:[9,2,1,""],simplenonstationary_gridpartitioner_builder:[9,1,1,""]},"pyFTS.models.nonstationary.partitioners.PolynomialNonStationaryPartitioner":{build:[9,3,1,""],get_polynomial_perturbations:[9,3,1,""],poly_width:[9,3,1,""],scale_down:[9,3,1,""],scale_up:[9,3,1,""]},"pyFTS.models.nonstationary.partitioners.SimpleNonStationaryPartitioner":{build:[9,3,1,""]},"pyFTS.models.nonstationary.perturbation":{exponential:[9,1,1,""],linear:[9,1,1,""],periodic:[9,1,1,""],polynomial:[9,1,1,""]},"pyFTS.models.nonstationary.util":{plot_sets:[9,1,1,""],plot_sets_conditional:[9,1,1,""]},"pyFTS.models.pwfts":{ProbabilisticWeightedFLRG:[6,2,1,""],ProbabilisticWeightedFTS:[6,2,1,""],visualize_distributions:[6,1,1,""]},"pyFTS.models.pwfts.ProbabilisticWeightedFLRG":{append_rhs:[6,3,1,""],get_lower:[6,3,1,""],get_membership:[6,3,1,""],get_midpoint:[6,3,1,""],get_upper:[6,3,1,""],lhs_conditional_probability:[6,3,1,""],partition_function:[6,3,1,""],rhs_conditional_probability:[6,3,1,""],rhs_unconditional_probability:[6,3,1,""]},"pyFTS.models.pwfts.ProbabilisticWeightedFTS":{add_new_PWFLGR:[6,3,1,""],flrg_lhs_conditional_probability:[6,3,1,""],flrg_lhs_unconditional_probability:[6,3,1,""],flrg_rhs_conditional_probability:[6,3,1,""],forecast:[6,3,1,""],forecast_ahead:[6,3,1,""],forecast_ahead_distribution:[6,3,1,""],forecast_ahead_interval:[6,3,1,""],forecast_distribution:[6,3,1,""],forecast_interval:[6,3,1,""],generate_flrg:[6,3,1,""],generate_lhs_flrg:[6,3,1,""],get_lower:[6,3,1,""],get_midpoint:[6,3,1,""],get_upper:[6,3,1,""],interval_heuristic:[6,3,1,""],interval_quantile:[6,3,1,""],point_expected_value:[6,3,1,""],point_heuristic:[6,3,1,""],train:[6,3,1,""],update_model:[6,3,1,""]},"pyFTS.models.sadaei":{ExponentialyWeightedFLRG:[6,2,1,""],ExponentialyWeightedFTS:[6,2,1,""]},"pyFTS.models.sadaei.ExponentialyWeightedFLRG":{append_rhs:[6,3,1,""],weights:[6,3,1,""]},"pyFTS.models.sadaei.ExponentialyWeightedFTS":{forecast:[6,3,1,""],generate_flrg:[6,3,1,""],train:[6,3,1,""]},"pyFTS.models.seasonal":{SeasonalIndexer:[10,0,0,"-"],cmsfts:[10,0,0,"-"],common:[10,0,0,"-"],msfts:[10,0,0,"-"],partitioner:[10,0,0,"-"],sfts:[10,0,0,"-"]},"pyFTS.models.seasonal.SeasonalIndexer":{DataFrameSeasonalIndexer:[10,2,1,""],DateTimeSeasonalIndexer:[10,2,1,""],LinearSeasonalIndexer:[10,2,1,""],SeasonalIndexer:[10,2,1,""]},"pyFTS.models.seasonal.SeasonalIndexer.DataFrameSeasonalIndexer":{get_data:[10,3,1,""],get_data_by_season:[10,3,1,""],get_index_by_season:[10,3,1,""],get_season_by_index:[10,3,1,""],get_season_of_data:[10,3,1,""],set_data:[10,3,1,""]},"pyFTS.models.seasonal.SeasonalIndexer.DateTimeSeasonalIndexer":{get_data:[10,3,1,""],get_data_by_season:[10,3,1,""],get_index:[10,3,1,""],get_index_by_season:[10,3,1,""],get_season_by_index:[10,3,1,""],get_season_of_data:[10,3,1,""],set_data:[10,3,1,""]},"pyFTS.models.seasonal.SeasonalIndexer.LinearSeasonalIndexer":{get_data:[10,3,1,""],get_index_by_season:[10,3,1,""],get_season_by_index:[10,3,1,""],get_season_of_data:[10,3,1,""]},"pyFTS.models.seasonal.SeasonalIndexer.SeasonalIndexer":{get_data:[10,3,1,""],get_data_by_season:[10,3,1,""],get_index:[10,3,1,""],get_index_by_season:[10,3,1,""],get_season_by_index:[10,3,1,""],get_season_of_data:[10,3,1,""]},"pyFTS.models.seasonal.cmsfts":{ContextualMultiSeasonalFTS:[10,2,1,""],ContextualSeasonalFLRG:[10,2,1,""]},"pyFTS.models.seasonal.cmsfts.ContextualMultiSeasonalFTS":{forecast:[10,3,1,""],forecast_ahead:[10,3,1,""],generate_flrg:[10,3,1,""],get_midpoints:[10,3,1,""],train:[10,3,1,""]},"pyFTS.models.seasonal.cmsfts.ContextualSeasonalFLRG":{append_rhs:[10,3,1,""]},"pyFTS.models.seasonal.common":{DateTime:[10,2,1,""],FuzzySet:[10,2,1,""],strip_datepart:[10,1,1,""]},"pyFTS.models.seasonal.common.DateTime":{day_of_month:[10,4,1,""],day_of_week:[10,4,1,""],day_of_year:[10,4,1,""],hour:[10,4,1,""],hour_of_day:[10,4,1,""],hour_of_month:[10,4,1,""],hour_of_week:[10,4,1,""],hour_of_year:[10,4,1,""],minute_of_day:[10,4,1,""],minute_of_hour:[10,4,1,""],minute_of_month:[10,4,1,""],minute_of_week:[10,4,1,""],minute_of_year:[10,4,1,""],month:[10,4,1,""],second:[10,4,1,""],second_of_day:[10,4,1,""],second_of_hour:[10,4,1,""],second_of_minute:[10,4,1,""],year:[10,4,1,""]},"pyFTS.models.seasonal.common.FuzzySet":{membership:[10,3,1,""]},"pyFTS.models.seasonal.msfts":{MultiSeasonalFTS:[10,2,1,""]},"pyFTS.models.seasonal.msfts.MultiSeasonalFTS":{forecast:[10,3,1,""],forecast_ahead:[10,3,1,""],generate_flrg:[10,3,1,""],train:[10,3,1,""]},"pyFTS.models.seasonal.partitioner":{TimeGridPartitioner:[10,2,1,""]},"pyFTS.models.seasonal.partitioner.TimeGridPartitioner":{build:[10,3,1,""],plot:[10,3,1,""]},"pyFTS.models.seasonal.sfts":{SeasonalFLRG:[10,2,1,""],SeasonalFTS:[10,2,1,""]},"pyFTS.models.seasonal.sfts.SeasonalFLRG":{append_rhs:[10,3,1,""],get_key:[10,3,1,""]},"pyFTS.models.seasonal.sfts.SeasonalFTS":{forecast:[10,3,1,""],generate_flrg:[10,3,1,""],get_midpoints:[10,3,1,""],train:[10,3,1,""]},"pyFTS.models.song":{ConventionalFTS:[6,2,1,""]},"pyFTS.models.song.ConventionalFTS":{flr_membership_matrix:[6,3,1,""],forecast:[6,3,1,""],operation_matrix:[6,3,1,""],train:[6,3,1,""]},"pyFTS.models.yu":{WeightedFLRG:[6,2,1,""],WeightedFTS:[6,2,1,""]},"pyFTS.models.yu.WeightedFLRG":{append_rhs:[6,3,1,""],weights:[6,3,1,""]},"pyFTS.models.yu.WeightedFTS":{forecast:[6,3,1,""],generate_FLRG:[6,3,1,""],train:[6,3,1,""]},"pyFTS.partitioners":{CMeans:[11,0,0,"-"],Entropy:[11,0,0,"-"],FCM:[11,0,0,"-"],Grid:[11,0,0,"-"],Huarng:[11,0,0,"-"],Util:[11,0,0,"-"],parallel_util:[11,0,0,"-"],partitioner:[11,0,0,"-"]},"pyFTS.partitioners.CMeans":{CMeansPartitioner:[11,2,1,""],c_means:[11,1,1,""],distance:[11,1,1,""]},"pyFTS.partitioners.CMeans.CMeansPartitioner":{build:[11,3,1,""]},"pyFTS.partitioners.Entropy":{EntropyPartitioner:[11,2,1,""],PMF:[11,1,1,""],bestSplit:[11,1,1,""],entropy:[11,1,1,""],informationGain:[11,1,1,""],splitAbove:[11,1,1,""],splitBelow:[11,1,1,""]},"pyFTS.partitioners.Entropy.EntropyPartitioner":{build:[11,3,1,""]},"pyFTS.partitioners.FCM":{FCMPartitioner:[11,2,1,""],fuzzy_cmeans:[11,1,1,""],fuzzy_distance:[11,1,1,""],membership:[11,1,1,""]},"pyFTS.partitioners.FCM.FCMPartitioner":{build:[11,3,1,""]},"pyFTS.partitioners.Grid":{GridPartitioner:[11,2,1,""]},"pyFTS.partitioners.Grid.GridPartitioner":{build:[11,3,1,""]},"pyFTS.partitioners.Huarng":{HuarngPartitioner:[11,2,1,""]},"pyFTS.partitioners.Huarng.HuarngPartitioner":{build:[11,3,1,""]},"pyFTS.partitioners.Util":{explore_partitioners:[11,1,1,""],plot_partitioners:[11,1,1,""],plot_sets:[11,1,1,""]},"pyFTS.partitioners.parallel_util":{explore_partitioners:[11,1,1,""]},"pyFTS.partitioners.partitioner":{Partitioner:[11,2,1,""]},"pyFTS.partitioners.partitioner.Partitioner":{build:[11,3,1,""],get_name:[11,3,1,""],lower_set:[11,3,1,""],membership_function:[11,4,1,""],name:[11,4,1,""],partitions:[11,4,1,""],plot:[11,3,1,""],plot_set:[11,3,1,""],prefix:[11,4,1,""],setnames:[11,4,1,""],transformation:[11,4,1,""],upper_set:[11,3,1,""]},"pyFTS.probabilistic":{ProbabilityDistribution:[12,0,0,"-"],kde:[12,0,0,"-"]},"pyFTS.probabilistic.ProbabilityDistribution":{ProbabilityDistribution:[12,2,1,""]},"pyFTS.probabilistic.ProbabilityDistribution.ProbabilityDistribution":{append:[12,3,1,""],append_interval:[12,3,1,""],averageloglikelihood:[12,3,1,""],bins:[12,4,1,""],build_cdf_qtl:[12,3,1,""],crossentropy:[12,3,1,""],cummulative:[12,3,1,""],density:[12,3,1,""],differential_offset:[12,3,1,""],empiricalloglikelihood:[12,3,1,""],entropy:[12,3,1,""],expected_value:[12,3,1,""],kullbackleiblerdivergence:[12,3,1,""],labels:[12,4,1,""],plot:[12,3,1,""],pseudologlikelihood:[12,3,1,""],quantile:[12,3,1,""],set:[12,3,1,""],type:[12,4,1,""],uod:[12,4,1,""]},"pyFTS.probabilistic.kde":{KernelSmoothing:[12,2,1,""]},"pyFTS.probabilistic.kde.KernelSmoothing":{h:[12,4,1,""],kernel:[12,4,1,""],kernel_function:[12,3,1,""],probability:[12,3,1,""]},pyFTS:{benchmarks:[3,0,0,"-"],common:[4,0,0,"-"],conf:[2,0,0,"-"],data:[5,0,0,"-"],models:[6,0,0,"-"],partitioners:[11,0,0,"-"],probabilistic:[12,0,0,"-"]}},objnames:{"0":["py","module","Python module"],"1":["py","function","Python function"],"2":["py","class","Python class"],"3":["py","method","Python method"],"4":["py","attribute","Python attribute"]},objtypes:{"0":"py:module","1":"py:function","2":"py:class","3":"py:method","4":"py:attribute"},terms:{"261459a0":5,"57a":5,"boolean":[3,4,11],"case":4,"class":[3,4,6,7,8,9,10,11,12],"default":[3,4,5],"enum":10,"fa\u00e7ad":[3,4],"final":4,"float":[3,4,5],"function":[3,4,9,11,12],"guimar\u00e3":6,"h\u00e9non":5,"import":[4,8],"int":6,"na\u00efv":3,"new":[4,7,8],"organiza\u00e7\u00e3o":5,"petr\u00f4nio":6,"r\u00f6ssler":5,"return":[3,4,5,6,7,8,9,10,11,12],"short":[4,6],"true":[3,4],"var":[5,8],"while":4,And:3,FTS:[3,4,6,7,8,9,10],For:3,LHS:[4,6,9],RHS:[4,6,9],The:[3,4,11],Then:3,Use:10,abdullah:6,acc:4,acf:3,adapt:4,adaptiveexpect:4,add:[3,4,9],add_new_pwflgr:6,added:4,address:[3,4],affect:9,after:5,age:4,aged:4,aggreg:[4,7],ahead:[3,4,6,7,8,9,10],ahead_sliding_window:3,airpasseng:[1,2],alia:[4,8],all:[3,4],allmethodensembleft:7,alpha:[4,6,7,10],alpha_cut:4,alreadi:5,also:4,ambientai:5,analysi:3,analytic_tabular_datafram:3,analytical_data_column:3,angela:4,api:4,append:[4,7,8,12],append_interv:12,append_lh:[6,9],append_model:7,append_rh:[4,6,8,9,10],append_set:4,append_transform:4,append_vari:8,appendchild:4,appl:[6,11],appli:[3,4,8,11],apply_inverse_transform:[4,8],apply_transform:[4,8],approach:[3,11],arg:9,argument:3,arima:[1,2],arima_model:3,around:4,arrai:[4,5,9,10],artifici:[1,2],ascend:3,assign:4,assoc:3,ata:3,atmospher:5,atribut:3,attibut:4,attractor:5,auto:11,auto_upd:4,autocorrel:3,automat:4,averag:3,averageloglikelihood:12,avg:5,axes:9,axi:[3,4,11,12],azim:3,bar:3,base:[3,4,6,7,8,9,10,11,12],base_dataframe_column:3,batch:4,batch_sav:4,bell:4,bellmf:4,belo:5,belog:4,benchmark:[1,2,4],benchmark_method:3,benchmark_methods_paramet:3,benchmark_model:3,benchmark_onli:4,bestsplit:11,between:[3,4],bill:4,bin:[3,12],bisect:4,black:12,blue:3,bound:[4,5,6,9],box:[3,4],boxcox:4,boxljungstatist:3,boxpiercestatist:3,brasil:5,brasilia:5,brier:3,brier_scor:3,build:[8,9,10,11],build_cdf_qtl:12,build_method:3,build_tree_without_ord:4,built:11,c_mean:11,calcul:[4,9,10],call:4,camwa:11,can:[3,4],capabl:3,cast_dataframe_to_synthet:3,cast_dataframe_to_synthetic_interv:3,cast_dataframe_to_synthetic_point:3,cast_dataframe_to_synthetic_probabilist:3,ccst:5,center:4,centroid:[4,10],chang:[5,11],chao:5,check:[4,5],check_bound:[4,9],check_bounds_index:[4,9],check_ignore_list:3,check_replace_list:3,chen:[1,2,8,10],cheng:[1,2,11],chi:3,chi_squar:3,chia:6,child:4,chissom:6,clear:4,clip:4,clone_paramet:[4,8],cluster:[3,4],cmap:3,cmean:[1,2],cmeanspartition:11,cmsft:[2,6],coeffici:[3,5],color:[3,9,12],column:[4,8],common:[1,2,3,6,7,11],commun:5,compar:3,compare_residu:3,comparemodelsplot:3,comparemodelst:3,complet:5,complic:5,compon:7,composit:[1,2],compress:5,comput:[6,11],condens:3,conditional_perturbation_factor:9,conditionalvarianceft:9,conf:1,confer:6,configure_lag:6,conn:3,consid:4,const_t:9,constant:5,contain:[3,4,8],content:1,contextu:10,contextualmultiseasonalft:10,contextualseasonalflrg:10,contin:12,continu:[3,5,12],control:5,convent:[6,9,10],conventionalflrg:6,conventionalft:[6,8],conventionalnonstationaryflrg:9,copi:4,cost:11,count:4,counter:11,covavg:3,coverag:3,covstd:3,cox:4,creat:[3,4,11],create_benchmark_t:3,criteria:3,cross:4,crossentropi:12,crp:3,crps1avg:3,crps1std:3,crps2avg:3,crps2std:3,crps_distr:3,crps_interv:3,crpsavg:3,crpsstd:3,csv:5,cummul:12,current:3,current_milli_tim:4,cut:4,cvft:[2,6],dado:[4,5,11],data:[1,2,3,4,6,7,8,9,10,11,12],data_column:3,data_field:10,data_label:8,data_point:8,databas:3,datafram:[3,4,5,8,10],dataframeseasonalindex:10,dataset:[3,4,5],date:10,date_field:10,date_part:10,datepart:10,datetim:10,datetimeseasonalindex:10,david:4,day_of_month:10,day_of_week:10,day_of_year:10,dec:11,decis:3,decompress:5,deg:9,delet:4,deltadist:11,densiti:[3,12],depend:[3,4],dependeci:4,deri:6,design:4,detail:4,determin:3,determinist:[5,11],deviat:3,dict:[3,4],dictionari:3,differenti:[4,5],differential_offset:12,diffus:6,dill:4,dimension:5,directli:4,discours:[3,4,9,10,11,12],discret:12,disk:[4,11],dispi:[3,4],displac:9,displai:3,distanc:[3,11],distribut:[3,4,5,6,7,12],distributed_benchmark:[1,2],distributed_predict:4,distributed_train:4,dnf:5,doi10:6,doi:[3,5,11],dollar:6,don:5,download:5,draw_sets_on_axi:4,dump:3,dure:[3,4],dynam:5,each:[3,4,5,7,8],easi:4,easier:4,edward:5,efendi:6,effect:11,effici:4,electr:6,elev:3,empir:11,empiricalloglikelihood:12,enayatifar:6,end:9,endogen:8,energi:6,enrol:[1,2,6,10],ensembl:[2,4,6],ensembleft:7,entir:4,entropi:[1,2,12],entropypartition:11,enumer:10,enumerate2:4,environ:4,epanechnikov:12,equal:[3,4,6,7,8,9,10],equat:5,error:3,espaciai:5,estim:[3,12],etc:4,even:[10,11],exact:4,exchang:6,execut:3,exist:5,expect:[4,6],expected_valu:12,experi:3,expert:6,explore_partition:11,exponenti:[6,9],exponentiali:6,exponentialyweightedflrg:6,exponentialyweightedft:6,express:3,extens:8,extern:[3,4],externalforecast:3,externalmodel:3,extract:5,extract_measur:3,facil:[3,4,5,11],fall:3,fals:[3,4,8,9,11],fcm:[1,2],fcmpartition:11,fetch:4,field:[5,10],fig:[3,4],figur:4,file:[3,4,5,9,11],file_analyt:3,file_path:4,file_synthet:3,filenam:[3,4,5],filesystem:4,fill:3,filter:3,find:[4,10],find_best:3,find_g:4,find_gt:4,find_l:4,find_lt:4,first:[3,4,6,9,10],fit:[3,4,6,7,8,9,10],five:4,flag:4,flat:4,flow:5,flr:[1,2,6,9,10],flr_membership_matrix:6,flrg:[1,2,6,10],flrg_lhs_conditional_prob:6,flrg_lhs_unconditional_prob:6,flrg_rhs_conditional_prob:6,flrgtree:4,flrgtreenod:4,foreast:3,forecast:[3,4,6,7,8,9,10,11],forecast_ahead:[4,6,10],forecast_ahead_distribut:[3,4,6,7],forecast_ahead_interv:[3,4,6,7],forecast_distribut:[3,4,6,7],forecast_interv:[3,4,6,7,9],format_data:8,forward:4,found:4,frederico:6,from:[4,5,6,8],fset:8,fts:[1,2,3,6,7,8,9,10],fts_method:4,fuzz:6,fuzzi:[2,4,6,8,9,10,11],fuzzif:11,fuzzifi:[4,9],fuzzy_cmean:11,fuzzy_dist:11,fuzzydata:4,fuzzyf:4,fuzzyfy_inst:[4,8],fuzzyfy_seri:4,fuzzyfy_series_old:4,fuzzyseri:9,fuzzyset:[1,2,6,9,10],gadelha:6,gani:6,garibaldi:9,gaussian:[4,5],gaussmf:4,gener:[4,5,11],generate_flr:8,generate_flrg:[6,8,9,10],generate_gaussian_linear:5,generate_high_order_recurrent_flr:4,generate_indexed_flr:4,generate_lhs_flr:8,generate_lhs_flrg:6,generate_non_recurrent_flr:4,generate_recurrent_flr:4,generate_uniform_linear:5,get:[3,4,5],get_benchmark_interval_method:3,get_benchmark_point_method:3,get_benchmark_probabilistic_method:3,get_data:[5,10],get_data_by_season:10,get_datafram:5,get_dataframe_from_bd:3,get_distribution_interquantil:7,get_distribution_statist:3,get_fuzzyset:4,get_index:10,get_index_by_season:10,get_interv:7,get_interval_method:3,get_interval_statist:3,get_kei:[4,6,9,10],get_low:[4,6,9],get_maximum_membership_fuzzyset:4,get_maximum_membership_fuzzyset_index:4,get_membership:[4,6,8,9],get_midpoint:[4,6,9,10],get_models_forecast:7,get_nam:11,get_point:7,get_point_method:3,get_point_statist:3,get_polynomial_perturb:9,get_probabilistic_method:3,get_season_by_index:10,get_season_of_data:10,get_sequence_membership:6,get_uod:4,get_upp:[4,6,9],getchildren:4,getstr:4,given:[3,4,9,10,11],glass:5,gov:5,grant_bound:4,greater:4,grid:[1,2,3,10],gridpartition:[3,11],group:[4,6,8,9,10],hand:4,handl:[4,6],hard:4,harmoni:6,has_interval_forecast:4,has_point_forecast:4,has_probability_forecast:4,has_season:4,heavysid:3,heavyside_cdf:3,height:11,henon:[1,2],heteroskedast:9,high:[3,4,6,9],highorderflrg:6,highorderft:[6,9],highordernonstationaryflrg:9,highordernonstationaryft:9,histogram:12,hoang:6,hoft:[1,2,9],honsft:[2,6],horizon:[3,4],horizont:5,hossein:6,hour:10,hour_of_dai:10,hour_of_month:10,hour_of_week:10,hour_of_year:10,http:5,http_server:4,huarng:[1,2],huarngpartition:11,hwang:[1,2],hybrid:6,identifi:[3,4,6,9,10],ieee:[6,9],ift:[1,2],ignor:[3,10],imag:[4,11],implement:[4,10],improv:[6,11],improvedweightedflrg:6,improvedweightedft:6,inc:[3,4],increment:[4,5],ind:10,index:[0,3,4,7,10],index_field:10,index_season:10,indexedflr:4,indic:[3,4],inequ:3,infer:5,infil:3,inform:[3,4],informationgain:11,initi:[4,5],initial_valu:5,inmet:[1,2],innov:6,inp:5,input:[4,6,8,9,10],insert:4,insert_benchmark:3,insert_right:4,insid:[3,4,10],inst:[4,9],instanc:9,instead:4,instituto:5,integ:4,integr:5,intel:6,intellig:6,intern:6,internet:5,interpol:3,interpret:4,interv:[3,4,6,7,9,11,12],interval_dataframe_analytic_column:3,interval_dataframe_synthetic_column:3,interval_heurist:6,interval_quantil:6,interval_sliding_window:3,interval_to_interv:3,intervalft:6,introspect:4,invers:4,is_high_ord:4,is_multivari:4,ismail:6,ismailefendi:[1,2],item:4,itemgett:4,iter:[4,5],its:[5,6,8,10],jaroszewski:9,javedani:6,jeng:6,job:3,joblib:3,jonathan:9,jone:4,journal:5,jun:11,kde:[1,2],kei:4,kernel:12,kernel_funct:12,kernelsmooth:12,knearestneighbor:3,knn:[1,2],kullbackleiblerdiverg:12,kwarg:[3,4,6,7,8,9,10,11,12],label:[3,12],lag:[4,5],largest:4,last:4,lcolor:3,lee:6,left:4,legend:[3,4],len_tot:4,length:[3,4,6,7,8,9,10,11],less:4,lett:5,level:4,lgd:4,lhs_conditional_prob:6,librari:2,like:4,limit:4,lin:11,linear:[5,9],linearmodel:3,linearseasonalindex:10,linewidth:3,linspac:5,list:[3,4,5,6,7,8,9,10,11],ljung:3,lo_param:3,load:[4,6],load_env:4,load_obj:4,local:5,locat:[4,9],location_param:9,log:4,logic:[4,6,8,9,10],logist:4,logistic_map:[1,2],look:4,lookup:4,lorentz:[1,2],lorenz:5,loss:3,lower:[4,5,6,9],lower_set:11,mackei:5,mackey_glass:[1,2],mai:5,malaysia:6,mandatori:4,map:[3,5],mape:3,mape_interv:3,marcin:9,mass:4,match:4,math:[5,11],mathemat:5,matplotlib:4,max:[3,4],max_inc:5,max_ini:5,max_lag:[3,4,6,7,8,9,10],max_ord:3,maxim:4,maximum:[4,11],mean:[3,4,5],measur:[1,2],mech:6,membership:[1,2,6,8,9,10,11],membership_funct:11,memori:4,merg:4,meta:7,meteorologia:5,method:[3,4,5,6,7,8,9,10,11],metric:3,mft:3,midpoint:[4,9],min:4,min_inc:5,min_ini:5,min_ord:4,ming:6,minim:[3,4,6,7,8,9,10],minimum:4,minute_of_dai:10,minute_of_hour:10,minute_of_month:10,minute_of_week:10,minute_of_year:10,model:[1,2,3,4,5,11],modelo:3,models_fo:3,models_ho:3,modul:[0,1],month:10,monthli:3,move:3,msft:[2,6],mu_inc:5,mu_ini:5,much:4,multi:[3,4,10],multiseason:[2,6],multiseasonalft:10,multivari:[2,4,5,6],musikasuwan:9,mvft:[2,6],nacion:5,naiv:[1,2],name:[3,4,5,7,8,9,10,11],nasdaq:[1,2],natur:5,nbin:[4,6],ndata:[3,4,6,9],nearest:3,need:4,neighbor:3,nice:4,node:[3,4],nois:9,noise_param:9,non:[3,4,9],none:[3,4,5,7,8,9,10,11,12],nonperiod:5,nonstationari:[2,4,6],nonstationaryflrg:9,nonstationaryft:9,norm:6,norton:5,nov:11,now:4,npart:[9,11],nsft:[2,6],num:5,num_batch:4,num_season:10,number:[3,4,5,6,7,10,11,12],numpi:[4,5],obj:[3,4,11],object:[3,4,8,10,11,12],objectsist:3,occur:4,occurr:4,old:4,older:4,oldest:4,onc:4,one:[3,4,6,7,8,9,10],onli:3,only_lin:9,open_benchmark_db:3,oper:4,operation_matrix:6,option:3,order:[3,4,6,9,10],ordered_set:[4,9],ordin:4,org:5,origin:[3,4],original_max:4,original_min:4,oscil:5,other:[4,7,8],otherwis:3,outfil:3,output:11,over:4,own:8,packag:1,page:0,panda:[3,4,5,10],par1:9,par2:9,parallel:3,parallel_benchmark:[1,2],parallel_util:[1,2],param:[3,4,8,9,10,11],paramet:[3,4,5,6,7,8,9,10,11,12],part:9,partit:[3,4,9,10,11],partition:[1,2,3,4,6,7,8],partition_funct:[4,6],partitioners_method:3,partitioners_model:3,pass:3,past:4,path:[3,4,11],pattern:4,pct:9,pdf:12,percent:3,percentag:3,percentu:[3,4],perform:[9,10,11],perform_loc:9,perform_width:9,period:9,persist:4,persist_env:4,persist_obj:4,person:4,pertub:9,perturb:[2,6],perturbate_paramet:9,perturbation_factor:9,perturbation_factors__old:9,pesquisa:5,pftsexploreorderandpartit:3,phy:[5,6],physiolog:5,pictur:[3,4],pierc:3,pinbal:3,pinball_mean:3,plambda:4,plot:[3,10,11,12],plot_compared_intervals_ahead:3,plot_compared_seri:3,plot_dataframe_interv:3,plot_dataframe_interval_pinbal:3,plot_dataframe_point:3,plot_dataframe_probabilist:3,plot_density_rectang:3,plot_distribut:3,plot_interv:3,plot_partition:11,plot_point:3,plot_probability_distribut:3,plot_residu:3,plot_rul:4,plot_set:[9,11],plot_sets_condit:9,plotcompar:3,plotforecast:3,plotresidu:3,pmf:[3,11],pmf_to_cdf:3,point:[3,4,6,7,8,9,10,12],point_dataframe_analytic_column:3,point_dataframe_synthetic_column:3,point_expected_valu:6,point_heurist:6,point_sliding_window:3,point_to_interv:3,poit:3,poly_width:9,polynomi:9,polynomialnonstationarypartition:9,posit:[4,10],post:4,posterior:3,postprocess:4,power:[4,6],pprint:4,pre:4,prebuilt:3,predict:[3,4],prefix:11,preprocess:[4,8],print_distribution_statist:3,print_interval_statist:3,print_point_statist:3,probabil:3,probabilist:[1,2,3,4,6,7],probabilistic_dataframe_analytic_column:3,probabilistic_dataframe_synthetic_column:3,probabilisticweightedflrg:6,probabilisticweightedft:6,probabilitydist:3,probabilitydistribut:[1,2,3],probabl:[3,4,6,7,12],problem:6,procedur:[3,4],process:[3,4,5],process_common_data:3,process_interval_job:3,process_point_job:3,process_probabilistic_job:3,product:6,progress:3,project:11,provid:4,pseudologlikelihood:12,pwflrg:6,pwft:[1,2],python:2,q05:3,q25:3,q75:3,q95:3,quantil:[3,7,12],quantile_regress:3,quantileregress:3,quantreg:[1,2],rais:4,random_walk:5,rang:3,rank:3,rate:6,read:5,real:4,record:4,recurr:4,red:3,reference_data:3,refin:6,regress:3,relationship:[4,6,8,9,10],remov:4,ren:6,replac:3,repr:4,repres:[4,12],residu:3,residualanalysi:[1,2],resolut:3,respect:4,respons:10,result:[3,4],revers:4,review:3,rhs_conditional_prob:6,rhs_unconditional_prob:6,right:4,ringgit:6,rmse:3,rmse_interv:3,rmseavg:3,rmsestd:3,rng:9,robert:5,roger:4,roi:4,root:3,rossler:[1,2],rule:[4,8],rules_by_axi:4,run_ahead:3,run_interv:3,run_point:3,run_probabilist:3,sadaei:[1,2],salang:9,same:4,sampl:[3,5,6],sampler:7,save:[3,4,9,11],save_best:3,save_dataframe_interv:3,save_dataframe_point:3,save_dataframe_probabilist:3,save_model:4,scale:[3,4],scale_down:9,scale_param:3,scale_up:9,scan:4,scienc:5,score:3,search:[0,4,6],season:[2,3,4,6],seasonalensembleft:7,seasonalflrg:10,seasonalft:10,seasonalindex:[2,6],second:10,second_of_dai:10,second_of_hour:10,second_of_minut:10,selecaosimples_menorrms:3,sep:5,separ:5,sequenc:4,seri:[2,3,4,5,6,7,8,9,10,11],set:[3,4,6,8,9,10,11,12],set_data:10,set_lh:8,set_ord:4,set_rh:8,set_transform:7,setnam:11,sever:[3,11],severiano:6,sft:[2,6],shape:4,sharp:3,sharpavg:3,sharpstd:3,shortnam:4,show:[4,11],show_and_save_imag:4,shyi:6,side:4,sigma_inc:5,sigma_ini:5,sigmf:4,sigmoid:4,silva:6,simpl:[5,10],simple_model_predict:4,simple_model_train:4,simplenonstationary_gridpartitioner_build:9,simplenonstationarypartition:9,simpler:4,simplesearch_rms:3,singl:4,single_plot_residu:3,sintet:3,sistema:5,size:[3,4,9,11],slice:4,slide:[3,4],sliding_window:4,sliding_window_benchmark:3,smape:3,smith:4,smooth:4,social:11,solar:6,sonda:[1,2],song:[1,2,10],sort:4,sort_ascend:3,sort_column:3,sortedcollect:[1,2],sourc:[3,4,5,6,7,8,9,10,11,12],sp500:[1,2],space:3,specif:[3,4,6,7,8,9,10],split:[3,4,11],splitabov:11,splitbelow:11,sqlite3:3,squar:3,ssci:6,standard:[3,5],start:[3,4,9],start_dispy_clust:4,stat:[3,6],station:5,stationari:9,statist:3,statsmodel:3,step:[3,4,6,7,8,9,10],steps_ahead:[3,4],stochast:5,stop_dispy_clust:4,store:[3,4],strang:5,string:[4,8,11],strip_datepart:10,structur:4,submodul:1,subpackag:1,sum:6,sunspot:[1,2],superset:4,support:4,symmetr:3,symposium:6,synthet:[3,5],syst:[6,10,11],system:[5,6,9],tabular_dataframe_column:3,tag:3,taiex:[1,2,6],tam:[3,9,11,12],target:3,tau:[3,5],technol:11,tempor:[4,10],term:[3,6],test:[3,4],test_data:3,than:4,thei:4,theil:3,theilsinequ:3,theoret:3,thi:[4,5,6,9,10],thoma:4,those:4,thread:3,thres1:11,thres2:11,threshold:11,time:[2,3,4,5,6,7,8,9,10,11],time_from:3,time_to:3,timegridpartition:10,times2:3,titl:[3,11,12],tradit:6,train:[3,4,6,7,8,9,10,11],train_data:[3,7],train_individual_model:7,train_method:4,train_paramet:4,transact:9,transform:[1,2,3,8,9,11],transformations_param:4,transit:4,trapezoid:[4,11],trapmf:4,tree:[1,2],trend:6,trendweightedflrg:6,trendweightedft:6,triangular:4,trigger:4,trimf:4,tsa:3,two:5,type:[3,4,5,12],typeonlegend:3,uavg:3,under:4,unified_scaled_interv:3,unified_scaled_interval_pinbal:3,unified_scaled_point:3,unified_scaled_probabilist:3,uniform:5,uniqu:[4,6,9,10],uniquefilenam:4,unit:10,univari:5,univers:[3,4,9,10,11,12],unpack_arg:9,uod:[4,6,12],uod_clip:4,up_param:3,update_model:6,update_uod:7,updateuod:[4,8],upper:[4,5,6,9],upper_set:11,url:5,use:4,used:[3,4,6,11],using:[4,6],ustatist:3,ustd:3,usual:4,util:[1,2,6],val:11,valid:4,valu:[3,4,5,6,7,8,9,10,12],valueerror:4,variabl:[2,4,6],varianc:[4,5],veri:[5,6],verif:3,visualize_distribut:6,vmax:5,vmin:5,vol:[6,10,11],weather:3,weight:6,weightedflrg:6,weightedft:6,when:4,where:[3,4],which:4,white_nois:5,width:[9,11,12],width_param:9,window:[3,4],window_index:9,window_kei:3,window_s:9,windows:[3,4],winkler:3,winkler_mean:3,winkler_scor:3,without:4,work:4,www:5,year:10,yeh:11,you:4,young:4,younger:4,youngest:4},titles:["Welcome to pyFTS\u2019s documentation!","pyFTS","pyFTS package","pyFTS.benchmarks package","pyFTS.common package","pyFTS.data package","pyFTS.models package","pyFTS.models.ensemble package","pyFTS.models.multivariate package","pyFTS.models.nonstationary package","pyFTS.models.seasonal package","pyFTS.partitioners package","pyFTS.probabilistic package"],titleterms:{airpasseng:5,arima:3,artifici:5,benchmark:3,chen:6,cheng:6,cmean:11,cmsft:10,common:[4,5,8,9,10],composit:4,conf:2,content:[2,3,4,5,6,7,8,9,10,11,12],cvft:9,data:5,distributed_benchmark:3,document:0,enrol:5,ensembl:7,entropi:11,fcm:11,flr:[4,8],flrg:[4,8,9],fts:4,fuzzyset:4,grid:11,henon:5,hoft:6,honsft:9,huarng:11,hwang:6,ift:6,indic:0,inmet:5,ismailefendi:6,kde:12,knn:3,logistic_map:5,lorentz:5,mackey_glass:5,measur:3,membership:4,model:[6,7,8,9,10],modul:[2,3,4,5,6,7,8,9,10,11,12],msft:10,multiseason:7,multivari:8,mvft:8,naiv:3,nasdaq:5,nonstationari:9,nsft:9,packag:[2,3,4,5,6,7,8,9,10,11,12],parallel_benchmark:3,parallel_util:11,partition:[9,10,11],perturb:9,probabilist:12,probabilitydistribut:12,pwft:6,pyft:[0,1,2,3,4,5,6,7,8,9,10,11,12],quantreg:3,residualanalysi:3,rossler:5,sadaei:6,season:10,seasonalindex:10,sft:10,sonda:5,song:6,sortedcollect:4,sp500:5,submodul:[2,3,4,5,6,7,8,9,10,11,12],subpackag:[2,6],sunspot:5,tabl:0,taiex:5,transform:4,tree:4,util:[3,4,9,11],variabl:8,welcom:0}}) \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000..82665c4 --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,170 @@ +# -*- coding: utf-8 -*- +# +# Configuration file for the Sphinx documentation builder. +# +# This file does only contain a selection of the most common options. For a +# full list see the documentation: +# http://www.sphinx-doc.org/en/stable/config + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('../pyFTS')) + + +# -- Project information ----------------------------------------------------- + +project = 'pyFTS' +copyright = '2018, Machine Intelligence and Data Science Laboratory - UFMG - Brazil' +author = 'Machine Intelligence and Data Science Laboratory - UFMG - Brazil' + +# The short X.Y version +version = '' +# The full version, including alpha/beta/rc tags +release = '1.2.3' + + +# -- General configuration --------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.intersphinx', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.ifconfig', + 'sphinx.ext.viewcode', + 'sphinx.ext.githubpages', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path . +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = {} + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Custom sidebar templates, must be a dictionary that maps document names +# to template names. +# +# The default sidebars (for documents that don't match any pattern) are +# defined by theme itself. Builtin themes are using these templates by +# default: ``['localtoc.html', 'relations.html', 'sourcelink.html', +# 'searchbox.html']``. +# +# html_sidebars = {} + + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'pyFTSdoc' + + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'pyFTS.tex', 'pyFTS Documentation', + 'Machine Intelligence and Data Science Laboratory - UFMG - Brazil', 'manual'), +] + + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'pyfts', 'pyFTS Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'pyFTS', 'pyFTS Documentation', + author, 'pyFTS', 'One line description of project.', + 'Miscellaneous'), +] + + +# -- Extension configuration ------------------------------------------------- + +# -- Options for intersphinx extension --------------------------------------- + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/': None} diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000..5558b62 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,20 @@ +.. pyFTS documentation master file, created by + sphinx-quickstart on Wed Aug 29 13:47:28 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to pyFTS's documentation! +================================= + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..33cbc6a --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build +set SPHINXPROJ=pyFTS + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/docs/modules.rst b/docs/modules.rst new file mode 100644 index 0000000..9ae9899 --- /dev/null +++ b/docs/modules.rst @@ -0,0 +1,7 @@ +pyFTS +===== + +.. toctree:: + :maxdepth: 4 + + pyFTS diff --git a/docs/pyFTS.benchmarks.rst b/docs/pyFTS.benchmarks.rst new file mode 100644 index 0000000..ff8abee --- /dev/null +++ b/docs/pyFTS.benchmarks.rst @@ -0,0 +1,94 @@ +pyFTS.benchmarks package +======================== + +Submodules +---------- + +pyFTS.benchmarks.Measures module +-------------------------------- + +.. automodule:: pyFTS.benchmarks.Measures + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.ResidualAnalysis module +---------------------------------------- + +.. automodule:: pyFTS.benchmarks.ResidualAnalysis + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.Util module +---------------------------- + +.. automodule:: pyFTS.benchmarks.Util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.arima module +----------------------------- + +.. automodule:: pyFTS.benchmarks.arima + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.benchmarks module +---------------------------------- + +.. automodule:: pyFTS.benchmarks.benchmarks + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.distributed\_benchmarks module +----------------------------------------------- + +.. automodule:: pyFTS.benchmarks.distributed_benchmarks + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.knn module +--------------------------- + +.. automodule:: pyFTS.benchmarks.knn + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.naive module +----------------------------- + +.. automodule:: pyFTS.benchmarks.naive + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.parallel\_benchmarks module +-------------------------------------------- + +.. automodule:: pyFTS.benchmarks.parallel_benchmarks + :members: + :undoc-members: + :show-inheritance: + +pyFTS.benchmarks.quantreg module +-------------------------------- + +.. automodule:: pyFTS.benchmarks.quantreg + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.benchmarks + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.common.rst b/docs/pyFTS.common.rst new file mode 100644 index 0000000..69c5903 --- /dev/null +++ b/docs/pyFTS.common.rst @@ -0,0 +1,94 @@ +pyFTS.common package +==================== + +Submodules +---------- + +pyFTS.common.Composite module +----------------------------- + +.. automodule:: pyFTS.common.Composite + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.FLR module +----------------------- + +.. automodule:: pyFTS.common.FLR + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.FuzzySet module +---------------------------- + +.. automodule:: pyFTS.common.FuzzySet + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.Membership module +------------------------------ + +.. automodule:: pyFTS.common.Membership + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.SortedCollection module +------------------------------------ + +.. automodule:: pyFTS.common.SortedCollection + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.Transformations module +----------------------------------- + +.. automodule:: pyFTS.common.Transformations + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.Util module +------------------------ + +.. automodule:: pyFTS.common.Util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.flrg module +------------------------ + +.. automodule:: pyFTS.common.flrg + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.fts module +----------------------- + +.. automodule:: pyFTS.common.fts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.common.tree module +------------------------ + +.. automodule:: pyFTS.common.tree + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.common + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.data.rst b/docs/pyFTS.data.rst new file mode 100644 index 0000000..f451c32 --- /dev/null +++ b/docs/pyFTS.data.rst @@ -0,0 +1,134 @@ +pyFTS.data package +================== + +Submodules +---------- + +pyFTS.data.AirPassengers module +------------------------------- + +.. automodule:: pyFTS.data.AirPassengers + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.Enrollments module +----------------------------- + +.. automodule:: pyFTS.data.Enrollments + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.INMET module +----------------------- + +.. automodule:: pyFTS.data.INMET + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.NASDAQ module +------------------------ + +.. automodule:: pyFTS.data.NASDAQ + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.SONDA module +----------------------- + +.. automodule:: pyFTS.data.SONDA + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.SP500 module +----------------------- + +.. automodule:: pyFTS.data.SP500 + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.TAIEX module +----------------------- + +.. automodule:: pyFTS.data.TAIEX + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.artificial module +---------------------------- + +.. automodule:: pyFTS.data.artificial + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.common module +------------------------ + +.. automodule:: pyFTS.data.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.henon module +----------------------- + +.. automodule:: pyFTS.data.henon + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.logistic\_map module +------------------------------- + +.. automodule:: pyFTS.data.logistic_map + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.lorentz module +------------------------- + +.. automodule:: pyFTS.data.lorentz + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.mackey\_glass module +------------------------------- + +.. automodule:: pyFTS.data.mackey_glass + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.rossler module +------------------------- + +.. automodule:: pyFTS.data.rossler + :members: + :undoc-members: + :show-inheritance: + +pyFTS.data.sunspots module +-------------------------- + +.. automodule:: pyFTS.data.sunspots + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.data + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.models.ensemble.rst b/docs/pyFTS.models.ensemble.rst new file mode 100644 index 0000000..93822f8 --- /dev/null +++ b/docs/pyFTS.models.ensemble.rst @@ -0,0 +1,30 @@ +pyFTS.models.ensemble package +============================= + +Submodules +---------- + +pyFTS.models.ensemble.ensemble module +------------------------------------- + +.. automodule:: pyFTS.models.ensemble.ensemble + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.ensemble.multiseasonal module +------------------------------------------ + +.. automodule:: pyFTS.models.ensemble.multiseasonal + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.ensemble + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.models.multivariate.rst b/docs/pyFTS.models.multivariate.rst new file mode 100644 index 0000000..10ac3ef --- /dev/null +++ b/docs/pyFTS.models.multivariate.rst @@ -0,0 +1,54 @@ +pyFTS.models.multivariate package +================================= + +Submodules +---------- + +pyFTS.models.multivariate.FLR module +------------------------------------ + +.. automodule:: pyFTS.models.multivariate.FLR + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.common module +--------------------------------------- + +.. automodule:: pyFTS.models.multivariate.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.flrg module +------------------------------------- + +.. automodule:: pyFTS.models.multivariate.flrg + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.mvfts module +-------------------------------------- + +.. automodule:: pyFTS.models.multivariate.mvfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.multivariate.variable module +----------------------------------------- + +.. automodule:: pyFTS.models.multivariate.variable + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.multivariate + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.models.nonstationary.rst b/docs/pyFTS.models.nonstationary.rst new file mode 100644 index 0000000..351df32 --- /dev/null +++ b/docs/pyFTS.models.nonstationary.rst @@ -0,0 +1,78 @@ +pyFTS.models.nonstationary package +================================== + +Submodules +---------- + +pyFTS.models.nonstationary.common module +---------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.cvfts module +--------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.cvfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.flrg module +-------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.flrg + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.honsfts module +----------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.honsfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.nsfts module +--------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.nsfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.partitioners module +---------------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.partitioners + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.perturbation module +---------------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.perturbation + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.nonstationary.util module +-------------------------------------- + +.. automodule:: pyFTS.models.nonstationary.util + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.nonstationary + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.models.rst b/docs/pyFTS.models.rst new file mode 100644 index 0000000..91a0bd7 --- /dev/null +++ b/docs/pyFTS.models.rst @@ -0,0 +1,104 @@ +pyFTS.models package +==================== + +Subpackages +----------- + +.. toctree:: + + pyFTS.models.ensemble + pyFTS.models.multivariate + pyFTS.models.nonstationary + pyFTS.models.seasonal + +Submodules +---------- + +pyFTS.models.chen module +------------------------ + +.. automodule:: pyFTS.models.chen + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.cheng module +------------------------- + +.. automodule:: pyFTS.models.cheng + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.hofts module +------------------------- + +.. automodule:: pyFTS.models.hofts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.hwang module +------------------------- + +.. automodule:: pyFTS.models.hwang + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.ifts module +------------------------ + +.. automodule:: pyFTS.models.ifts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.ismailefendi module +-------------------------------- + +.. automodule:: pyFTS.models.ismailefendi + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.pwfts module +------------------------- + +.. automodule:: pyFTS.models.pwfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.sadaei module +-------------------------- + +.. automodule:: pyFTS.models.sadaei + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.song module +------------------------ + +.. automodule:: pyFTS.models.song + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.yu module +---------------------- + +.. automodule:: pyFTS.models.yu + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.models.seasonal.rst b/docs/pyFTS.models.seasonal.rst new file mode 100644 index 0000000..9c3cfba --- /dev/null +++ b/docs/pyFTS.models.seasonal.rst @@ -0,0 +1,62 @@ +pyFTS.models.seasonal package +============================= + +Submodules +---------- + +pyFTS.models.seasonal.SeasonalIndexer module +-------------------------------------------- + +.. automodule:: pyFTS.models.seasonal.SeasonalIndexer + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.cmsfts module +----------------------------------- + +.. automodule:: pyFTS.models.seasonal.cmsfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.common module +----------------------------------- + +.. automodule:: pyFTS.models.seasonal.common + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.msfts module +---------------------------------- + +.. automodule:: pyFTS.models.seasonal.msfts + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.partitioner module +---------------------------------------- + +.. automodule:: pyFTS.models.seasonal.partitioner + :members: + :undoc-members: + :show-inheritance: + +pyFTS.models.seasonal.sfts module +--------------------------------- + +.. automodule:: pyFTS.models.seasonal.sfts + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.models.seasonal + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.partitioners.rst b/docs/pyFTS.partitioners.rst new file mode 100644 index 0000000..b6ba008 --- /dev/null +++ b/docs/pyFTS.partitioners.rst @@ -0,0 +1,78 @@ +pyFTS.partitioners package +========================== + +Submodules +---------- + +pyFTS.partitioners.CMeans module +-------------------------------- + +.. automodule:: pyFTS.partitioners.CMeans + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Entropy module +--------------------------------- + +.. automodule:: pyFTS.partitioners.Entropy + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.FCM module +----------------------------- + +.. automodule:: pyFTS.partitioners.FCM + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Grid module +------------------------------ + +.. automodule:: pyFTS.partitioners.Grid + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Huarng module +-------------------------------- + +.. automodule:: pyFTS.partitioners.Huarng + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.Util module +------------------------------ + +.. automodule:: pyFTS.partitioners.Util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.parallel\_util module +---------------------------------------- + +.. automodule:: pyFTS.partitioners.parallel_util + :members: + :undoc-members: + :show-inheritance: + +pyFTS.partitioners.partitioner module +------------------------------------- + +.. automodule:: pyFTS.partitioners.partitioner + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.partitioners + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.probabilistic.rst b/docs/pyFTS.probabilistic.rst new file mode 100644 index 0000000..097a5a2 --- /dev/null +++ b/docs/pyFTS.probabilistic.rst @@ -0,0 +1,30 @@ +pyFTS.probabilistic package +=========================== + +Submodules +---------- + +pyFTS.probabilistic.ProbabilityDistribution module +-------------------------------------------------- + +.. automodule:: pyFTS.probabilistic.ProbabilityDistribution + :members: + :undoc-members: + :show-inheritance: + +pyFTS.probabilistic.kde module +------------------------------ + +.. automodule:: pyFTS.probabilistic.kde + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS.probabilistic + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/pyFTS.rst b/docs/pyFTS.rst new file mode 100644 index 0000000..707f7f7 --- /dev/null +++ b/docs/pyFTS.rst @@ -0,0 +1,34 @@ +pyFTS package +============= + +Subpackages +----------- + +.. toctree:: + + pyFTS.benchmarks + pyFTS.common + pyFTS.data + pyFTS.models + pyFTS.partitioners + pyFTS.probabilistic + +Submodules +---------- + +pyFTS.conf module +----------------- + +.. automodule:: pyFTS.conf + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: pyFTS + :members: + :undoc-members: + :show-inheritance: diff --git a/pyFTS/benchmarks/Measures.py b/pyFTS/benchmarks/Measures.py index 39c6fa9..2cae1a6 100644 --- a/pyFTS/benchmarks/Measures.py +++ b/pyFTS/benchmarks/Measures.py @@ -14,6 +14,7 @@ from pyFTS.probabilistic import ProbabilityDistribution def acf(data, k): """ Autocorrelation function estimative + :param data: :param k: :return: @@ -31,6 +32,7 @@ def acf(data, k): def rmse(targets, forecasts): """ Root Mean Squared Error + :param targets: :param forecasts: :return: @@ -45,6 +47,7 @@ def rmse(targets, forecasts): def rmse_interval(targets, forecasts): """ Root Mean Squared Error + :param targets: :param forecasts: :return: @@ -56,6 +59,7 @@ def rmse_interval(targets, forecasts): def mape(targets, forecasts): """ Mean Average Percentual Error + :param targets: :param forecasts: :return: @@ -70,6 +74,7 @@ def mape(targets, forecasts): def smape(targets, forecasts, type=2): """ Symmetric Mean Average Percentual Error + :param targets: :param forecasts: :param type: @@ -95,6 +100,7 @@ def mape_interval(targets, forecasts): def UStatistic(targets, forecasts): """ Theil's U Statistic + :param targets: :param forecasts: :return: @@ -116,6 +122,7 @@ def UStatistic(targets, forecasts): def TheilsInequality(targets, forecasts): """ Theil’s Inequality Coefficient + :param targets: :param forecasts: :return: @@ -132,6 +139,7 @@ def TheilsInequality(targets, forecasts): def BoxPierceStatistic(data, h): """ Q Statistic for Box-Pierce test + :param data: :param h: :return: @@ -147,6 +155,7 @@ def BoxPierceStatistic(data, h): def BoxLjungStatistic(data, h): """ Q Statistic for Ljung–Box test + :param data: :param h: :return: @@ -186,7 +195,8 @@ def coverage(targets, forecasts): def pinball(tau, target, forecast): """ - Pinball loss function. Measure the distance of forecast to the tau-quantile of the target + Pinball loss function. Measure the distance of forecast to the tau-quantile of the target + :param tau: quantile value in the range (0,1) :param target: :param forecast: @@ -201,6 +211,7 @@ def pinball(tau, target, forecast): def pinball_mean(tau, targets, forecasts): """ Mean pinball loss value of the forecast for a given tau-quantile of the targets + :param tau: quantile value in the range (0,1) :param targets: list of target values :param forecasts: list of prediction intervals @@ -227,6 +238,7 @@ def winkler_score(tau, target, forecast): def winkler_mean(tau, targets, forecasts): """ Mean Winkler score value of the forecast for a given tau-quantile of the targets + :param tau: quantile value in the range (0,1) :param targets: list of target values :param forecasts: list of prediction intervals @@ -280,6 +292,7 @@ def heavyside_cdf(bins, targets): def crps(targets, densities): ''' Continuous Ranked Probability Score + :param targets: a list with the target values :param densities: a list with pyFTS.probabil objectsistic.ProbabilityDistribution :return: float @@ -299,6 +312,7 @@ def crps(targets, densities): def get_point_statistics(data, model, **kwargs): ''' Condensate all measures for point forecasters + :param data: test data :param model: FTS model with point forecasting capability :param kwargs: @@ -356,6 +370,7 @@ def get_point_statistics(data, model, **kwargs): def get_interval_statistics(data, model, **kwargs): ''' Condensate all measures for point interval forecasters + :param data: test data :param model: FTS model with interval forecasting capability :param kwargs: @@ -402,6 +417,7 @@ def get_interval_statistics(data, model, **kwargs): def get_distribution_statistics(data, model, **kwargs): ''' Get CRPS statistic and time for a forecasting model + :param data: test data :param model: FTS model with probabilistic forecasting capability :param kwargs: diff --git a/pyFTS/benchmarks/ResidualAnalysis.py b/pyFTS/benchmarks/ResidualAnalysis.py index 9f63089..ae55eb5 100644 --- a/pyFTS/benchmarks/ResidualAnalysis.py +++ b/pyFTS/benchmarks/ResidualAnalysis.py @@ -20,6 +20,7 @@ def residuals(targets, forecasts, order=1): def chi_squared(q, h): """ Chi-Squared value + :param q: :param h: :return: @@ -31,6 +32,7 @@ def chi_squared(q, h): def compare_residuals(data, models): """ Compare residual's statistics of several models + :param data: test data :param models: :return: a Pandas dataframe with the Box-Ljung statistic for each model @@ -53,6 +55,7 @@ def compare_residuals(data, models): def plotResiduals(targets, models, tam=[8, 8], save=False, file=None): """ Plot residuals and statistics + :param targets: :param models: :param tam: diff --git a/pyFTS/benchmarks/Util.py b/pyFTS/benchmarks/Util.py index 7a68d9c..6dd39ca 100644 --- a/pyFTS/benchmarks/Util.py +++ b/pyFTS/benchmarks/Util.py @@ -164,6 +164,7 @@ def point_dataframe_analytic_columns(experiments): def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u, steps, method): """ Create a dataframe to store the benchmark results + :param experiments: dictionary with the execution results :param file: :param objs: diff --git a/pyFTS/benchmarks/benchmarks.py b/pyFTS/benchmarks/benchmarks.py index ef0859b..85d39b7 100644 --- a/pyFTS/benchmarks/benchmarks.py +++ b/pyFTS/benchmarks/benchmarks.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf8 -*- -"""Benchmarks to FTS methods""" +"""Benchmarks methods for FTS methods""" import datetime @@ -102,28 +102,27 @@ def sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs): :param train: percentual of sliding window data used to train the models :param kwargs: dict, optional arguments - :keyword - benchmark_methods: a list with Non FTS models to benchmark. The default is None. - benchmark_methods_parameters: a list with Non FTS models parameters. The default is None. - benchmark_models: A boolean value indicating if external FTS methods will be used on benchmark. The default is False. - build_methods: A boolean value indicating if the default FTS methods will be used on benchmark. The default is True. - dataset: the dataset name to identify the current set of benchmarks results on database. - distributed: A boolean value indicating if the forecasting procedure will be distributed in a dispy cluster. . The default is False - file: file path to save the results. The default is benchmarks.db. - inc: a float on interval [0,1] indicating the percentage of the windowsize to move the window - methods: a list with FTS class names. The default depends on the forecasting type and contains the list of all FTS methods. - models: a list with prebuilt FTS objects. The default is None. - nodes: a list with the dispy cluster nodes addresses. The default is [127.0.0.1]. - orders: a list with orders of the models (for high order models). The default is [1,2,3]. - partitions: a list with the numbers of partitions on the Universe of Discourse. The default is [10]. - partitioners_models: a list with prebuilt Universe of Discourse partitioners objects. The default is None. - partitioners_methods: a list with Universe of Discourse partitioners class names. The default is [partitioners.Grid.GridPartitioner]. - progress: If true a progress bar will be displayed during the benchmarks. The default is False. - start: in the multi step forecasting, the index of the data where to start forecasting. The default is 0. - steps_ahead: a list with the forecasting horizons, i. e., the number of steps ahead to forecast. The default is 1. - tag: a name to identify the current set of benchmarks results on database. - type: the forecasting type, one of these values: point(default), interval or distribution. The default is point. - transformations: a list with data transformations do apply . The default is [None]. + :keyword benchmark_methods: a list with Non FTS models to benchmark. The default is None. + :keyword benchmark_methods_parameters: a list with Non FTS models parameters. The default is None. + :keyword benchmark_models: A boolean value indicating if external FTS methods will be used on benchmark. The default is False. + :keyword build_methods: A boolean value indicating if the default FTS methods will be used on benchmark. The default is True. + :keyword dataset: the dataset name to identify the current set of benchmarks results on database. + :keyword distributed: A boolean value indicating if the forecasting procedure will be distributed in a dispy cluster. . The default is False + :keyword file: file path to save the results. The default is benchmarks.db. + :keyword inc: a float on interval [0,1] indicating the percentage of the windowsize to move the window + :keyword methods: a list with FTS class names. The default depends on the forecasting type and contains the list of all FTS methods. + :keyword models: a list with prebuilt FTS objects. The default is None. + :keyword nodes: a list with the dispy cluster nodes addresses. The default is [127.0.0.1]. + :keyword orders: a list with orders of the models (for high order models). The default is [1,2,3]. + :keyword partitions: a list with the numbers of partitions on the Universe of Discourse. The default is [10]. + :keyword partitioners_models: a list with prebuilt Universe of Discourse partitioners objects. The default is None. + :keyword partitioners_methods: a list with Universe of Discourse partitioners class names. The default is [partitioners.Grid.GridPartitioner]. + :keyword progress: If true a progress bar will be displayed during the benchmarks. The default is False. + :keyword start: in the multi step forecasting, the index of the data where to start forecasting. The default is 0. + :keyword steps_ahead: a list with the forecasting horizons, i. e., the number of steps ahead to forecast. The default is 1. + :keyword tag: a name to identify the current set of benchmarks results on database. + :keyword type: the forecasting type, one of these values: point(default), interval or distribution. The default is point. + :keyword transformations: a list with data transformations do apply . The default is [None]. """ tag = __pop('tag', None, kwargs) @@ -321,6 +320,7 @@ def sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs): def run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwargs): """ Point forecast benchmark function to be executed on cluster nodes + :param mfts: FTS model :param partitioner: Universe of Discourse partitioner :param train_data: data used to train the model @@ -384,6 +384,7 @@ def run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwarg def run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kwargs): """ Interval forecast benchmark function to be executed on cluster nodes + :param mfts: FTS model :param partitioner: Universe of Discourse partitioner :param train_data: data used to train the model @@ -442,6 +443,7 @@ def run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kw def run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None, **kwargs): """ Probabilistic forecast benchmark function to be executed on cluster nodes + :param mfts: FTS model :param partitioner: Universe of Discourse partitioner :param train_data: data used to train the model @@ -625,7 +627,8 @@ def plot_compared_intervals_ahead(original, models, colors, distributions, time_ save=False, file=None, tam=[20, 5], resolution=None, cmap='Blues', linewidth=1.5): """ - Plot the forecasts of several one step ahead models, by point or by interval + Plot the forecasts of several one step ahead models, by point or by interval + :param original: Original time series data (list) :param models: List of models to compare :param colors: List of models colors @@ -771,6 +774,7 @@ def plot_compared_series(original, models, colors, typeonlegend=False, save=Fals points=True, intervals=True, linewidth=1.5): """ Plot the forecasts of several one step ahead models, by point or by interval + :param original: Original time series data (list) :param models: List of models to compare :param colors: List of models colors diff --git a/pyFTS/common/Composite.py b/pyFTS/common/Composite.py index f182143..777739e 100644 --- a/pyFTS/common/Composite.py +++ b/pyFTS/common/Composite.py @@ -28,6 +28,7 @@ class FuzzySet(FuzzySet.FuzzySet): def membership(self, x): """ Calculate the membership value of a given input + :param x: input value :return: membership value of x at this fuzzy set """ @@ -39,6 +40,7 @@ class FuzzySet(FuzzySet.FuzzySet): def append(self, mf, parameters): """ Adds a new function to composition + :param mf: :param parameters: :return: @@ -49,6 +51,7 @@ class FuzzySet(FuzzySet.FuzzySet): def append_set(self, set): """ Adds a new function to composition + :param mf: :param parameters: :return: diff --git a/pyFTS/common/FLR.py b/pyFTS/common/FLR.py index 2b585c1..5046b34 100644 --- a/pyFTS/common/FLR.py +++ b/pyFTS/common/FLR.py @@ -15,11 +15,11 @@ class FLR(object): def __init__(self, LHS, RHS): """ Creates a Fuzzy Logical Relationship - :param LHS: Left Hand Side fuzzy set - :param RHS: Right Hand Side fuzzy set """ self.LHS = LHS + """Left Hand Side fuzzy set""" self.RHS = RHS + """Right Hand Side fuzzy set""" def __str__(self): return str(self.LHS) + " -> " + str(self.RHS) @@ -30,12 +30,10 @@ class IndexedFLR(FLR): def __init__(self, index, LHS, RHS): """ Create a Season Indexed Fuzzy Logical Relationship - :param index: seasonal index - :param LHS: Left Hand Side fuzzy set - :param RHS: Right Hand Side fuzzy set """ super(IndexedFLR, self).__init__(LHS, RHS) self.index = index + """seasonal index""" def __str__(self): return str(self.index) + ": "+ str(self.LHS) + " -> " + str(self.RHS) @@ -44,6 +42,7 @@ class IndexedFLR(FLR): def generate_high_order_recurrent_flr(fuzzyData): """ Create a ordered FLR set from a list of fuzzy sets with recurrence + :param fuzzyData: ordered list of fuzzy sets :return: ordered list of FLR """ @@ -65,6 +64,7 @@ def generate_high_order_recurrent_flr(fuzzyData): def generate_recurrent_flrs(fuzzyData): """ Create a ordered FLR set from a list of fuzzy sets with recurrence + :param fuzzyData: ordered list of fuzzy sets :return: ordered list of FLR """ @@ -82,6 +82,7 @@ def generate_recurrent_flrs(fuzzyData): def generate_non_recurrent_flrs(fuzzyData): """ Create a ordered FLR set from a list of fuzzy sets without recurrence + :param fuzzyData: ordered list of fuzzy sets :return: ordered list of FLR """ @@ -95,6 +96,7 @@ def generate_non_recurrent_flrs(fuzzyData): def generate_indexed_flrs(sets, indexer, data, transformation=None, alpha_cut=0.0): """ Create a season-indexed ordered FLR set from a list of fuzzy sets with recurrence + :param sets: fuzzy sets :param indexer: seasonality indexer :param data: original data diff --git a/pyFTS/common/FuzzySet.py b/pyFTS/common/FuzzySet.py index ec54ca6..69168d2 100644 --- a/pyFTS/common/FuzzySet.py +++ b/pyFTS/common/FuzzySet.py @@ -9,21 +9,24 @@ class FuzzySet(object): """ def __init__(self, name, mf, parameters, centroid, alpha=1.0, **kwargs): """ - Create a Fuzzy Set - :param name: fuzzy set name - :param mf: membership function - :param parameters: parameters of the membership function - :param centroid: fuzzy set center of mass + Create a Fuzzy Set """ self.name = name + """The fuzzy set name""" self.mf = mf + """The membership function""" self.parameters = parameters + """The parameters of the membership function""" self.centroid = centroid + """The fuzzy set center of mass (or midpoint)""" self.alpha = alpha + """The alpha cut value""" self.type = kwargs.get('type', 'common') + """The fuzzy set type (common, composite, nonstationary, etc)""" self.variable = kwargs.get('variable',None) - ":param Z: Partition function in respect to the membership function" + """In multivariate time series, indicate for which variable this fuzzy set belogs""" self.Z = None + """Partition function in respect to the membership function""" if self.mf == Membership.trimf: self.lower = min(parameters) self.upper = max(parameters) @@ -35,6 +38,7 @@ class FuzzySet(object): def membership(self, x): """ Calculate the membership value of a given input + :param x: input value :return: membership value of x at this fuzzy set """ @@ -43,6 +47,7 @@ class FuzzySet(object): def partition_function(self,uod=None, nbins=100): """ Calculate the partition function over the membership function. + :param uod: :param nbins: :return: @@ -59,6 +64,7 @@ class FuzzySet(object): def set_ordered(fuzzySets): + """Order a fuzzy set list by their centroids""" if len(fuzzySets) > 0: tmp1 = [fuzzySets[k] for k in fuzzySets.keys()] return [k.name for k in sorted(tmp1, key=lambda x: x.centroid)] @@ -67,6 +73,7 @@ def set_ordered(fuzzySets): def fuzzyfy_instance(inst, fuzzySets, ordered_sets=None): """ Calculate the membership values for a data point given fuzzy sets + :param inst: data point :param fuzzySets: dict of fuzzy sets :return: array of membership values @@ -84,6 +91,7 @@ def fuzzyfy_instance(inst, fuzzySets, ordered_sets=None): def fuzzyfy_instances(data, fuzzySets, ordered_sets=None): """ Calculate the membership values for a data point given fuzzy sets + :param inst: data point :param fuzzySets: dict of fuzzy sets :return: array of membership values @@ -100,6 +108,7 @@ def fuzzyfy_instances(data, fuzzySets, ordered_sets=None): def get_fuzzysets(inst, fuzzySets, ordered_sets=None, alpha_cut=0.0): """ Return the fuzzy sets which membership value for a inst is greater than the alpha_cut + :param inst: data point :param fuzzySets: dict of fuzzy sets :param alpha_cut: Minimal membership to be considered on fuzzyfication process @@ -115,6 +124,7 @@ def get_fuzzysets(inst, fuzzySets, ordered_sets=None, alpha_cut=0.0): def get_maximum_membership_fuzzyset(inst, fuzzySets, ordered_sets=None): """ Fuzzify a data point, returning the fuzzy set with maximum membership value + :param inst: data point :param fuzzySets: dict of fuzzy sets :return: fuzzy set with maximum membership @@ -129,6 +139,7 @@ def get_maximum_membership_fuzzyset(inst, fuzzySets, ordered_sets=None): def get_maximum_membership_fuzzyset_index(inst, fuzzySets): """ Fuzzify a data point, returning the fuzzy set with maximum membership value + :param inst: data point :param fuzzySets: dict of fuzzy sets :return: fuzzy set with maximum membership diff --git a/pyFTS/common/Membership.py b/pyFTS/common/Membership.py index 79c00a3..dbf44cb 100644 --- a/pyFTS/common/Membership.py +++ b/pyFTS/common/Membership.py @@ -9,7 +9,8 @@ from pyFTS import * def trimf(x, parameters): """ - Triangular fuzzy membership function + Triangular fuzzy membership function + :param x: data point :param parameters: a list with 3 real values :return: the membership value of x given the parameters @@ -27,7 +28,8 @@ def trimf(x, parameters): def trapmf(x, parameters): """ - Trapezoidal fuzzy membership function + Trapezoidal fuzzy membership function + :param x: data point :param parameters: a list with 4 real values :return: the membership value of x given the parameters @@ -46,7 +48,8 @@ def trapmf(x, parameters): def gaussmf(x, parameters): """ - Gaussian fuzzy membership function + Gaussian fuzzy membership function + :param x: data point :param parameters: a list with 2 real values (mean and variance) :return: the membership value of x given the parameters @@ -57,6 +60,7 @@ def gaussmf(x, parameters): def bellmf(x, parameters): """ Bell shaped membership function + :param x: :param parameters: :return: @@ -67,6 +71,7 @@ def bellmf(x, parameters): def sigmf(x, parameters): """ Sigmoid / Logistic membership function + :param x: :param parameters: an list with 2 real values (smoothness and midpoint) :return: diff --git a/pyFTS/common/Transformations.py b/pyFTS/common/Transformations.py index a14eacd..c04617a 100644 --- a/pyFTS/common/Transformations.py +++ b/pyFTS/common/Transformations.py @@ -19,6 +19,7 @@ class Transformation(object): def apply(self, data, param, **kwargs): """ Apply the transformation on input data + :param data: input data :param param: :param kwargs: diff --git a/pyFTS/common/Util.py b/pyFTS/common/Util.py index a0ab700..352d7bc 100644 --- a/pyFTS/common/Util.py +++ b/pyFTS/common/Util.py @@ -119,6 +119,7 @@ def uniquefilename(name): def show_and_save_image(fig, file, flag, lgd=None): """ Show and image and save on file + :param fig: Matplotlib Figure object :param file: filename to save the picture :param flag: if True the image will be saved @@ -142,6 +143,7 @@ def enumerate2(xs, start=0, step=1): def sliding_window(data, windowsize, train=0.8, inc=0.1, **kwargs): """ Sliding window method of cross validation for time series + :param data: the entire dataset :param windowsize: window size :param train: percentual of the window size will be used for training the models @@ -171,6 +173,7 @@ def sliding_window(data, windowsize, train=0.8, inc=0.1, **kwargs): def persist_obj(obj, file): """ Persist an object on filesystem. This function depends on Dill package + :param obj: object on memory :param file: file name to store the object """ @@ -181,6 +184,7 @@ def persist_obj(obj, file): def load_obj(file): """ Load to memory an object stored filesystem. This function depends on Dill package + :param file: file name where the object is stored :return: object """ @@ -192,6 +196,7 @@ def load_obj(file): def persist_env(file): """ Persist an entire environment on file. This function depends on Dill package + :param file: file name to store the environment """ dill.dump_session(file) diff --git a/pyFTS/common/flrg.py b/pyFTS/common/flrg.py index 7e8f990..3cc4d2e 100644 --- a/pyFTS/common/flrg.py +++ b/pyFTS/common/flrg.py @@ -11,8 +11,11 @@ class FLRG(object): def __init__(self, order, **kwargs): self.LHS = None + """Left Hand Side of the rule""" self.RHS = None + """Right Hand Side of the rule""" self.order = order + """Number of lags on LHS""" self.midpoint = None self.lower = None self.upper = None @@ -22,6 +25,7 @@ class FLRG(object): pass def get_key(self): + """Returns a unique identifier for this FLRG""" if self.key is None: if isinstance(self.LHS, (list, set)): names = [c for c in self.LHS] @@ -39,6 +43,13 @@ class FLRG(object): return self.key def get_membership(self, data, sets): + """ + Returns the membership value of the FLRG for the input data + + :param data: input data + :param sets: fuzzy sets + :return: the membership value + """ ret = 0.0 if isinstance(self.LHS, (list, set)): if len(self.LHS) == len(data): @@ -48,6 +59,12 @@ class FLRG(object): return ret def get_midpoint(self, sets): + """ + Returns the midpoint value for the RHS fuzzy sets + + :param sets: fuzzy sets + :return: the midpoint value + """ if self.midpoint is None: self.midpoint = np.nanmean(self.get_midpoints(sets)) return self.midpoint @@ -59,6 +76,12 @@ class FLRG(object): return np.array([sets[self.RHS[s]].centroid for s in self.RHS.keys()]) def get_lower(self, sets): + """ + Returns the lower bound value for the RHS fuzzy sets + + :param sets: fuzzy sets + :return: lower bound value + """ if self.lower is None: if isinstance(self.RHS, list): self.lower = min([sets[rhs].lower for rhs in self.RHS]) @@ -67,6 +90,12 @@ class FLRG(object): return self.lower def get_upper(self, sets): + """ + Returns the upper bound value for the RHS fuzzy sets + + :param sets: fuzzy sets + :return: upper bound value + """ if self.upper is None: if isinstance(self.RHS, list): self.upper = max([sets[rhs].upper for rhs in self.RHS]) diff --git a/pyFTS/common/fts.py b/pyFTS/common/fts.py index 5551113..17cb934 100644 --- a/pyFTS/common/fts.py +++ b/pyFTS/common/fts.py @@ -10,63 +10,65 @@ class FTS(object): def __init__(self, **kwargs): """ Create a Fuzzy Time Series model - :param kwargs: model specific parameters - - alpha_cut: Minimal membership to be considered on fuzzyfication process - auto_update: Boolean, indicate that model is incremental - benchmark_only: Boolean, indicates a façade for external (non-FTS) model used on benchmarks or ensembles. - indexer: SeasonalIndexer used for SeasonalModels, default: None - is_high_order: Boolean, if the model support orders greater than 1, default: False - is_multivariate = False - has_seasonality: Boolean, if the model support seasonal indexers, default: False - has_point_forecasting: Boolean, if the model support point forecasting, default: True - has_interval_forecasting: Boolean, if the model support interval forecasting, default: False - has_probability_forecasting: Boolean, if the model support probabilistic forecasting, default: False - max_lag: Integer, maximum lag index used by the model, default: 1 - min_order: Integer, minimal order supported for the model, default: 1 - name: Model name - order: model order (number of past lags are used on forecasting) - original_max: Real, the upper limit of the Universe of Discourse, the maximal value found on training data - original_min: Real, the lower limit of the Universe of Discourse, the minimal value found on training data - partitioner: partitioner object - sets: List, fuzzy sets used on this model - shortname: Acronymn for the model - transformations: List, data transformations (common.Transformations) applied on model pre and post processing, default: [] - transformations_param:List, specific parameters for each data transformation - uod_clip: If the test data will be clipped inside the training Universe of Discourse """ self.sets = {} + """The list of fuzzy sets used on this model""" self.flrgs = {} + """The list of Fuzzy Logical Relationship Groups - FLRG""" self.order = kwargs.get('order',1) + """A integer with the model order (number of past lags are used on forecasting)""" self.shortname = kwargs.get('name',"") + """A string with a short name or alias for the model""" self.name = kwargs.get('name',"") + """A string with the model name""" self.detail = kwargs.get('name',"") + """A string with the model detailed information""" self.is_high_order = False + """A boolean value indicating if the model support orders greater than 1, default: False""" self.min_order = 1 + """In high order models, this integer value indicates the minimal order supported for the model, default: 1""" self.has_seasonality = False + """A boolean value indicating if the model supports seasonal indexers, default: False""" self.has_point_forecasting = True + """A boolean value indicating if the model supports point forecasting, default: True""" self.has_interval_forecasting = False + """A boolean value indicating if the model supports interval forecasting, default: False""" self.has_probability_forecasting = False + """A boolean value indicating if the model support probabilistic forecasting, default: False""" self.is_multivariate = False + """A boolean value indicating if the model support multivariate time series (Pandas DataFrame), default: False""" self.dump = False self.transformations = [] + """A list with the data transformations (common.Transformations) applied on model pre and post processing, default: []""" self.transformations_param = [] + """A list with the specific parameters for each data transformation""" self.original_max = 0 + """A float with the upper limit of the Universe of Discourse, the maximal value found on training data""" self.original_min = 0 + """A float with the lower limit of the Universe of Discourse, the minimal value found on training data""" self.partitioner = kwargs.get("partitioner", None) + """A pyFTS.partitioners.Partitioner object with the Universe of Discourse partitioner used on the model. This is a mandatory dependecy. """ if self.partitioner != None: self.sets = self.partitioner.sets self.auto_update = False + """A boolean value indicating that model is incremental""" self.benchmark_only = False + """A boolean value indicating a façade for external (non-FTS) model used on benchmarks or ensembles.""" self.indexer = kwargs.get("indexer", None) + """An pyFTS.models.seasonal.Indexer object for indexing the time series data""" self.uod_clip = kwargs.get("uod_clip", True) + """Flag indicating if the test data will be clipped inside the training Universe of Discourse""" self.alpha_cut = kwargs.get("alpha_cut", 0.0) + """A float with the minimal membership to be considered on fuzzyfication process""" self.max_lag = self.order + """A integer indicating the largest lag used by the model. This value also indicates the minimum number of past lags + needed to forecast a single step ahead""" def fuzzy(self, data): """ Fuzzify a data point + :param data: data point :return: maximum membership fuzzy set """ @@ -83,15 +85,15 @@ class FTS(object): def predict(self, data, **kwargs): """ Forecast using trained model - :param data: time series with minimal length to the order of the model - :param kwargs: - :keyword - type: the forecasting type, one of these values: point(default), interval or distribution. - steps_ahead: The forecasting horizon, i. e., the number of steps ahead to forecast - start: in the multi step forecasting, the index of the data where to start forecasting - distributed: boolean, indicate if the forecasting procedure will be distributed in a dispy cluster - nodes: a list with the dispy cluster nodes addresses + :param data: time series with minimal length to the order of the model + + :keyword type: the forecasting type, one of these values: point(default), interval or distribution. + :keyword steps_ahead: The forecasting horizon, i. e., the number of steps ahead to forecast + :keyword start: in the multi step forecasting, the index of the data where to start forecasting + :keyword distributed: boolean, indicate if the forecasting procedure will be distributed in a dispy cluster + :keyword nodes: a list with the dispy cluster nodes addresses + :return: a numpy array with the forecasted data """ @@ -150,38 +152,42 @@ class FTS(object): def forecast(self, data, **kwargs): """ - Point forecast one step ahead - :param data: time series with minimal length to the order of the model - :param kwargs: - :return: + Point forecast one step ahead + + :param data: time series data with the minimal length equal to the max_lag of the model + :param kwargs: model specific parameters + :return: a list with the forecasted values """ raise NotImplementedError('This model do not perform one step ahead point forecasts!') def forecast_interval(self, data, **kwargs): """ Interval forecast one step ahead - :param data: - :param kwargs: - :return: + + :param data: time series data with the minimal length equal to the max_lag of the model + :param kwargs: model specific parameters + :return: a list with the forecasted intervals """ raise NotImplementedError('This model do not perform one step ahead interval forecasts!') def forecast_distribution(self, data, **kwargs): """ Probabilistic forecast one step ahead - :param data: - :param kwargs: - :return: + + :param data: time series data with the minimal length equal to the max_lag of the model + :param kwargs: model specific parameters + :return: a list with the forecasted Probability Distributions """ raise NotImplementedError('This model do not perform one step ahead distribution forecasts!') def forecast_ahead(self, data, steps, **kwargs): """ Point forecast n steps ahead - :param data: - :param steps: - :param kwargs: - :return: + + :param data: time series data with the minimal length equal to the max_lag of the model + :param steps: the number of steps ahead to forecast + :param kwargs: model specific parameters + :return: a list with the forecasted values """ ret = [] for k in np.arange(0,steps): @@ -198,49 +204,49 @@ class FTS(object): def forecast_ahead_interval(self, data, steps, **kwargs): """ Interval forecast n steps ahead - :param data: - :param steps: - :param kwargs: - :return: + + :param data: time series data with the minimal length equal to the max_lag of the model + :param steps: the number of steps ahead to forecast + :param kwargs: model specific parameters + :return: a list with the forecasted intervals """ raise NotImplementedError('This model do not perform multi step ahead interval forecasts!') def forecast_ahead_distribution(self, data, steps, **kwargs): """ Probabilistic forecast n steps ahead - :param data: - :param steps: - :param kwargs: - :return: + + :param data: time series data with the minimal length equal to the max_lag of the model + :param steps: the number of steps ahead to forecast + :param kwargs: model specific parameters + :return: a list with the forecasted Probability Distributions """ raise NotImplementedError('This model do not perform multi step ahead distribution forecasts!') def train(self, data, **kwargs): """ - - :param data: - :param sets: - :param order: - :param parameters: - :return: + Method specific parameter fitting + + :param data: training time series data + :param kwargs: Method specific parameters + """ pass def fit(self, ndata, **kwargs): """ + Fit the model's parameters based on the training data. - :param data: the training time series + :param ndata: training time series data :param kwargs: - :keyword - num_batches: split the training data in num_batches to save memory during the training process - save_model: save final model on disk - batch_save: save the model between each batch - file_path: path to save the model - distributed: boolean, indicate if the training procedure will be distributed in a dispy cluster - nodes: a list with the dispy cluster nodes addresses + :keyword num_batches: split the training data in num_batches to save memory during the training process + :keyword save_model: save final model on disk + :keyword batch_save: save the model between each batch + :keyword file_path: path to save the model + :keyword distributed: boolean, indicate if the training procedure will be distributed in a dispy cluster + :keyword nodes: a list with the dispy cluster nodes addresses - :return: """ import datetime @@ -333,6 +339,12 @@ class FTS(object): Util.persist_obj(self, file_path) def clone_parameters(self, model): + """ + Import the parameters values from other model + + :param model: + """ + self.order = model.order self.shortname = model.shortname self.name = model.name @@ -356,6 +368,13 @@ class FTS(object): self.indexer = model.indexer def merge(self, model): + """ + Merge the FLRG rules from other model + + :param model: source model + :return: + """ + for key in model.flrgs.keys(): flrg = model.flrgs[key] if flrg.get_key() not in self.flrgs: @@ -375,6 +394,16 @@ class FTS(object): self.transformations.append(transformation) def apply_transformations(self, data, params=None, updateUoD=False, **kwargs): + """ + Apply the data transformations for data preprocessing + + :param data: input data + :param params: transformation parameters + :param updateUoD: + :param kwargs: + :return: preprocessed data + """ + ndata = data if updateUoD: if min(data) < 0: @@ -397,6 +426,15 @@ class FTS(object): return ndata def apply_inverse_transformations(self, data, params=None, **kwargs): + """ + Apply the data transformations for data postprocessing + + :param data: input data + :param params: transformation parameters + :param updateUoD: + :param kwargs: + :return: postprocessed data + """ if len(self.transformations) > 0: if params is None: params = [None for k in self.transformations] @@ -412,13 +450,20 @@ class FTS(object): return [self.original_min, self.original_max] def __str__(self): + """String representation of the model""" + tmp = self.name + ":\n" for r in sorted(self.flrgs, key=lambda key: self.flrgs[key].get_midpoint(self.sets)): tmp = tmp + str(self.flrgs[r]) + "\n" return tmp def __len__(self): - return len(self.flrgs) + """ + The length (number of rules) of the model + + :return: number of rules + """ + return len(self.flrgs) def len_total(self): return sum([len(k) for k in self.flrgs]) diff --git a/pyFTS/data/AirPassengers.py b/pyFTS/data/AirPassengers.py index e10c9f3..3f2e42b 100644 --- a/pyFTS/data/AirPassengers.py +++ b/pyFTS/data/AirPassengers.py @@ -6,6 +6,7 @@ import numpy as np def get_data(): """ Get a simple univariate time series data. + :return: numpy array """ dat = get_dataframe() @@ -15,6 +16,7 @@ def get_data(): def get_dataframe(): """ Get the complete multivariate time series data. + :return: Pandas DataFrame """ dat = common.get_dataframe('AirPassengers.csv', diff --git a/pyFTS/data/Enrollments.py b/pyFTS/data/Enrollments.py index 808d27a..1fed76f 100644 --- a/pyFTS/data/Enrollments.py +++ b/pyFTS/data/Enrollments.py @@ -6,6 +6,7 @@ import numpy as np def get_data(): """ Get a simple univariate time series data. + :return: numpy array """ dat = get_dataframe() diff --git a/pyFTS/data/INMET.py b/pyFTS/data/INMET.py index c8aace0..a3e5ccc 100644 --- a/pyFTS/data/INMET.py +++ b/pyFTS/data/INMET.py @@ -1,17 +1,10 @@ -#-------------------- -#BDMEP - INMET -#-------------------- -#Estação : BELO HORIZONTE - MG (OMM: 83587) -#Latitude (graus) : -19.93 -#Longitude (graus) : -43.93 -#Altitude (metros): 915.00 -#Estação Operante -#Inicio de operação: 03/03/1910 -#Periodo solicitado dos dados: 01/01/2000 a 31/12/2012 -#Os dados listados abaixo são os que encontram-se digitados no BDMEP -#Hora em UTC +""" +INMET - Instituto Nacional Meteorologia / Brasil -# http://www.inmet.gov.br +Belo Horizonte station, from 2000-01-01 to 31/12/2012 + +Source: http://www.inmet.gov.br +""" from pyFTS.data import common import pandas as pd @@ -20,6 +13,7 @@ import pandas as pd def get_dataframe(): """ Get the complete multivariate time series data. + :return: Pandas DataFrame """ dat = common.get_dataframe('INMET.csv.bz2', diff --git a/pyFTS/data/NASDAQ.py b/pyFTS/data/NASDAQ.py index c61e513..18b24cf 100644 --- a/pyFTS/data/NASDAQ.py +++ b/pyFTS/data/NASDAQ.py @@ -6,6 +6,7 @@ import numpy as np def get_data(field="avg"): """ Get a simple univariate time series data. + :param field: the dataset field name to extract :return: numpy array """ @@ -17,6 +18,7 @@ def get_data(field="avg"): def get_dataframe(): """ Get the complete multivariate time series data. + :return: Pandas DataFrame """ dat = common.get_dataframe('NASDAQ.csv.bz2', diff --git a/pyFTS/data/SONDA.py b/pyFTS/data/SONDA.py index 8c6dc47..4f4887f 100644 --- a/pyFTS/data/SONDA.py +++ b/pyFTS/data/SONDA.py @@ -1,3 +1,11 @@ +""" +SONDA - Sistema de Organização Nacional de Dados Ambientais, from INPE - Instituto Nacional de Pesquisas Espaciais, Brasil. + +Brasilia station + +Source: http://sonda.ccst.inpe.br/ + +""" from pyFTS.data import common import pandas as pd import numpy as np @@ -6,6 +14,7 @@ import numpy as np def get_data(field): """ Get a simple univariate time series data. + :param field: the dataset field name to extract :return: numpy array """ @@ -17,6 +26,7 @@ def get_data(field): def get_dataframe(): """ Get the complete multivariate time series data. + :return: Pandas DataFrame """ dat = common.get_dataframe('SONDA_BSB.csv.bz2', diff --git a/pyFTS/data/SP500.py b/pyFTS/data/SP500.py index 2b105a6..0fa2471 100644 --- a/pyFTS/data/SP500.py +++ b/pyFTS/data/SP500.py @@ -6,6 +6,7 @@ import numpy as np def get_data(): """ Get the univariate time series data. + :return: numpy array """ dat = get_dataframe() @@ -15,6 +16,7 @@ def get_data(): def get_dataframe(): """ Get the complete multivariate time series data. + :return: Pandas DataFrame """ dat = common.get_dataframe('SP500.csv.bz2', diff --git a/pyFTS/data/TAIEX.py b/pyFTS/data/TAIEX.py index ff2099e..65723c4 100644 --- a/pyFTS/data/TAIEX.py +++ b/pyFTS/data/TAIEX.py @@ -6,6 +6,7 @@ import numpy as np def get_data(): """ Get the univariate time series data. + :return: numpy array """ dat = get_dataframe() @@ -16,6 +17,7 @@ def get_data(): def get_dataframe(): """ Get the complete multivariate time series data. + :return: Pandas DataFrame """ dat = common.get_dataframe('TAIEX.csv.bz2', diff --git a/pyFTS/data/artificial.py b/pyFTS/data/artificial.py index 6f5b702..fb943e3 100644 --- a/pyFTS/data/artificial.py +++ b/pyFTS/data/artificial.py @@ -8,6 +8,7 @@ import numpy as np def generate_gaussian_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=100, num=10, vmin=None, vmax=None): """ Generate data sampled from Gaussian distribution, with constant or linear changing parameters + :param mu_ini: Initial mean :param sigma_ini: Initial variance :param mu_inc: Mean increment after 'num' samples @@ -36,6 +37,7 @@ def generate_gaussian_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=100, num=1 def generate_uniform_linear(min_ini, max_ini, min_inc, max_inc, it=100, num=10, vmin=None, vmax=None): """ Generate data sampled from Uniform distribution, with constant or linear changing bounds + :param mu_ini: Initial mean :param sigma_ini: Initial variance :param mu_inc: Mean increment after 'num' samples diff --git a/pyFTS/data/common.py b/pyFTS/data/common.py index 8897fb8..3d7746b 100644 --- a/pyFTS/data/common.py +++ b/pyFTS/data/common.py @@ -11,6 +11,7 @@ def get_dataframe(filename, url, sep=";", compression='infer'): """ This method check if filename already exists, read the file and return its data. If the file don't already exists, it will be downloaded and decompressed. + :param filename: dataset local filename :param url: dataset internet URL :param sep: CSV field separator diff --git a/pyFTS/data/sunspots.py b/pyFTS/data/sunspots.py index f7c9a89..d429721 100644 --- a/pyFTS/data/sunspots.py +++ b/pyFTS/data/sunspots.py @@ -5,6 +5,7 @@ import numpy as np def get_data(): """ Get a simple univariate time series data. + :return: numpy array """ dat = get_dataframe() @@ -14,6 +15,7 @@ def get_data(): def get_dataframe(): """ Get the complete multivariate time series data. + :return: Pandas DataFrame """ dat = common.get_dataframe('sunspots.csv', diff --git a/pyFTS/models/ensemble/ensemble.py b/pyFTS/models/ensemble/ensemble.py index 2bfd0ff..9b23979 100644 --- a/pyFTS/models/ensemble/ensemble.py +++ b/pyFTS/models/ensemble/ensemble.py @@ -17,6 +17,9 @@ def sampler(data, quantiles): class EnsembleFTS(fts.FTS): + """ + Ensemble FTS + """ def __init__(self, **kwargs): super(EnsembleFTS, self).__init__(**kwargs) self.shortname = "Ensemble FTS" @@ -27,13 +30,21 @@ class EnsembleFTS(fts.FTS): self.has_probability_forecasting = True self.is_high_order = True self.models = [] + """A list of FTS models, the ensemble components""" self.parameters = [] + """A list with the parameters for each component model""" self.alpha = kwargs.get("alpha", 0.05) self.point_method = kwargs.get('point_method', 'mean') self.interval_method = kwargs.get('interval_method', 'quantile') self.order = 1 def append_model(self, model): + """ + Append a new model to the ensemble + + :param model: FTS model + + """ self.models.append(model) if model.order > self.order: self.order = model.order diff --git a/pyFTS/models/multivariate/mvfts.py b/pyFTS/models/multivariate/mvfts.py index 0ea8c44..ca03e25 100644 --- a/pyFTS/models/multivariate/mvfts.py +++ b/pyFTS/models/multivariate/mvfts.py @@ -20,6 +20,12 @@ class MVFTS(fts.FTS): self.name = "Multivariate FTS" def append_variable(self, var): + """ + Append a new endogenous variable to the model + + :param var: variable object + :return: + """ self.explanatory_variables.append(var) def format_data(self, data): diff --git a/pyFTS/models/multivariate/variable.py b/pyFTS/models/multivariate/variable.py index 08f5214..92e4a79 100644 --- a/pyFTS/models/multivariate/variable.py +++ b/pyFTS/models/multivariate/variable.py @@ -11,15 +11,18 @@ class Variable: def __init__(self, name, **kwargs): """ - :param name: Name of the variable + :param name: :param \**kwargs: See below :Keyword Arguments: * *alias* -- Alternative name for the variable """ self.name = name + """A string with the name of the variable""" self.alias = kwargs.get('alias', self.name) + """A string with the alias of the variable""" self.data_label = kwargs.get('data_label', self.name) + """A string with the column name on DataFrame""" self.type = kwargs.get('type', 'common') self.transformation = kwargs.get('transformation', None) self.transformation_params = kwargs.get('transformation_params', None) @@ -30,6 +33,11 @@ class Variable: self.build(**kwargs) def build(self, **kwargs): + """ + + :param kwargs: + :return: + """ fs = kwargs.get('partitioner', Grid.GridPartitioner) mf = kwargs.get('func', Membership.trimf) np = kwargs.get('npart', 10) diff --git a/pyFTS/models/nonstationary/common.py b/pyFTS/models/nonstationary/common.py index 20b10d2..408a6a4 100644 --- a/pyFTS/models/nonstationary/common.py +++ b/pyFTS/models/nonstationary/common.py @@ -20,27 +20,23 @@ class FuzzySet(FS.FuzzySet): def __init__(self, name, mf, parameters, **kwargs): """ Constructor - :param name: - :param mf: Fuzzy Membership Function - :param parameters: - :param kwargs: - - location: Pertubation function that affects the location of the membership function - - location_params: Parameters for location pertubation function - - width: Pertubation function that affects the width of the membership function - - width_params: Parameters for width pertubation function - - noise: Pertubation function that adds noise on the membership function - - noise_params: Parameters for noise pertubation function """ super(FuzzySet, self).__init__(name=name, mf=mf, parameters=parameters, centroid=None, alpha=1.0, **kwargs) self.location = kwargs.get("location", None) + """Pertubation function that affects the location of the membership function""" self.location_params = kwargs.get("location_params", None) + """Parameters for location pertubation function""" self.location_roots = kwargs.get("location_roots", 0) self.width = kwargs.get("width", None) + """Pertubation function that affects the width of the membership function""" self.width_params = kwargs.get("width_params", None) + """Parameters for width pertubation function""" self.width_roots = kwargs.get("width_roots", 0) self.noise = kwargs.get("noise", None) + """Pertubation function that adds noise on the membership function""" self.noise_params = kwargs.get("noise_params", None) + """Parameters for noise pertubation function""" self.perturbated_parameters = {} self.type = 'nonstationary' @@ -103,7 +99,9 @@ class FuzzySet(FS.FuzzySet): def membership(self, x, t): """ Calculate the membership value of a given input + :param x: input value + :param t: time displacement or perturbation parameters :return: membership value of x at this fuzzy set """ @@ -192,6 +190,7 @@ class FuzzySet(FS.FuzzySet): def fuzzify(inst, t, fuzzySets): """ Calculate the membership values for a data point given nonstationary fuzzy sets + :param inst: data points :param t: time displacement of the instance :param fuzzySets: list of fuzzy sets diff --git a/pyFTS/partitioners/Grid.py b/pyFTS/partitioners/Grid.py index 74f53c4..dc93ca0 100644 --- a/pyFTS/partitioners/Grid.py +++ b/pyFTS/partitioners/Grid.py @@ -14,11 +14,6 @@ class GridPartitioner(partitioner.Partitioner): def __init__(self, **kwargs): """ Even Length Grid Partitioner - :param data: Training data of which the universe of discourse will be extracted. The universe of discourse is the open interval between the minimum and maximum values of the training data. - :param npart: The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created - :param func: Fuzzy membership function (pyFTS.common.Membership) - :param transformation: data transformation to be applied on data - :param indexer: """ super(GridPartitioner, self).__init__(name="Grid", **kwargs) diff --git a/pyFTS/partitioners/partitioner.py b/pyFTS/partitioners/partitioner.py index 08060f3..e1ffbb1 100644 --- a/pyFTS/partitioners/partitioner.py +++ b/pyFTS/partitioners/partitioner.py @@ -11,21 +11,20 @@ class Partitioner(object): def __init__(self, **kwargs): """ Universe of Discourse partitioner scheme. Split data on several fuzzy sets - :param name: partitioner name - :param data: Training data of which the universe of discourse will be extracted. The universe of discourse is the open interval between the minimum and maximum values of the training data. - :param npart: The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created - :param func: Fuzzy membership function (pyFTS.common.Membership) - :param names: list of partitions names. If None is given the partitions will be auto named with prefix - :param prefix: prefix of auto generated partition names - :param transformation: data transformation to be applied on data """ self.name = kwargs.get('name',"") + """partitioner name""" self.partitions = kwargs.get('npart', 10) + """The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created""" self.sets = {} self.membership_function = kwargs.get('func', Membership.trimf) + """Fuzzy membership function (pyFTS.common.Membership)""" self.setnames = kwargs.get('names', None) + """list of partitions names. If None is given the partitions will be auto named with prefix""" self.prefix = kwargs.get('prefix', 'A') + """prefix of auto generated partition names""" self.transformation = kwargs.get('transformation', None) + """data transformation to be applied on data""" self.indexer = kwargs.get('indexer', None) self.variable = kwargs.get('variable', None) self.type = kwargs.get('type', 'common') @@ -70,7 +69,8 @@ class Partitioner(object): def build(self, data): """ Perform the partitioning of the Universe of Discourse - :param data: + + :param data: training data :return: """ pass diff --git a/pyFTS/probabilistic/ProbabilityDistribution.py b/pyFTS/probabilistic/ProbabilityDistribution.py index f1a31f8..5613975 100644 --- a/pyFTS/probabilistic/ProbabilityDistribution.py +++ b/pyFTS/probabilistic/ProbabilityDistribution.py @@ -13,13 +13,20 @@ class ProbabilityDistribution(object): """ def __init__(self, type = "KDE", **kwargs): self.uod = kwargs.get("uod", None) + """Universe of discourse""" self.data = [] self.type = type + """ + If type is histogram, the PDF is discrete + If type is KDE the PDF is continuous + """ self.bins = kwargs.get("bins", None) + """Number of bins on a discrete PDF""" self.labels = kwargs.get("bins_labels", None) + """Bins labels on a discrete PDF""" data = kwargs.get("data", None) diff --git a/pyFTS/probabilistic/kde.py b/pyFTS/probabilistic/kde.py index 37b3a8e..9eb95b5 100644 --- a/pyFTS/probabilistic/kde.py +++ b/pyFTS/probabilistic/kde.py @@ -12,7 +12,9 @@ class KernelSmoothing(object): """Kernel Density Estimation""" def __init__(self,h, kernel="epanechnikov"): self.h = h + """Width parameter""" self.kernel = kernel + """Kernel function""" self.transf = Transformations.Scale(min=0,max=1) def kernel_function(self, u): @@ -38,6 +40,13 @@ class KernelSmoothing(object): return 0.5 * np.exp(-np.abs(u)) def probability(self, x, data): + """ + Probability of the point x on data + + :param x: + :param data: + :return: + """ l = len(data) ndata = self.transf.apply(data)