- More data files and artificial data generators

This commit is contained in:
Petrônio Cândido 2018-02-27 12:56:05 -03:00
parent fc2e118576
commit 37862c661d
34 changed files with 26634 additions and 83 deletions

View File

@ -1,2 +1,6 @@
include data/Enrollments.csv
include data/AirPassengers.csv
include data/AirPassengers.csv
include data/NASDAQ.csv
include data/SP500.csv
include data/sunspots.csv
include data/TAIEX.csv

View File

@ -17,7 +17,7 @@ def residuals(targets, forecasts, order=1):
return np.array(targets[order:]) - np.array(forecasts[:-1])
def ChiSquared(q,h):
def chi_squared(q, h):
"""
Chi-Squared value
:param q:
@ -28,7 +28,7 @@ def ChiSquared(q,h):
return p
def compareResiduals(data, models):
def compare_residuals(data, models):
"""
Compare residual's statistics of several models
:param data:
@ -49,7 +49,7 @@ def compareResiduals(data, models):
ret += str(round(q1,2)) + " & "
q2 = Measures.BoxLjungStatistic(res, 10)
ret += str(round(q2,2)) + " & "
ret += str(ChiSquared(q2, 10))
ret += str(chi_squared(q2, 10))
ret += " \\\\ \n"
return ret
@ -131,9 +131,8 @@ def plot_residuals(targets, models, tam=[8, 8], save=False, file=None):
def single_plot_residuals(targets, forecasts, order, tam=[8, 8], save=False, file=None):
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=tam)
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=tam)
ax = axes
res = residuals(targets, forecasts, order)
ax[0].set_title("Residuals", size='large')

View File

@ -13,7 +13,7 @@ import matplotlib.cm as cmx
import matplotlib.colors as pltcolors
import matplotlib.pyplot as plt
import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
from pyFTS.probabilistic import ProbabilityDistribution
from pyFTS.models import song, chen, yu, ismailefendi, sadaei, hofts, pwfts, ifts, cheng, hwang
@ -30,8 +30,8 @@ rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
colors = ['grey', 'rosybrown', 'maroon', 'red','orange', 'yellow', 'olive', 'green',
'cyan', 'blue', 'darkblue', 'purple', 'darkviolet']
colors = ['grey', 'darkgrey', 'rosybrown', 'maroon', 'red','orange', 'gold', 'yellow', 'olive', 'green',
'darkgreen', 'cyan', 'lightblue','blue', 'darkblue', 'purple', 'darkviolet' ]
ncol = len(colors)
@ -281,7 +281,7 @@ def all_point_forecasters(data_train, data_test, partitions, max_order=3, statis
print_point_statistics(data_test, objs)
if residuals:
print(ResidualAnalysis.compareResiduals(data_test, objs))
print(ResidualAnalysis.compare_residuals(data_test, objs))
ResidualAnalysis.plot_residuals(data_test, objs, save=save, file=file, tam=tam)
if series:
@ -303,6 +303,8 @@ def all_point_forecasters(data_train, data_test, partitions, max_order=3, statis
plot_probability_distributions(pmfs, lcolors, tam=tam)
return models
def print_point_statistics(data, models, externalmodels = None, externalforecasts = None, indexers=None):
ret = "Model & Order & RMSE & SMAPE & Theil's U \\\\ \n"
@ -539,29 +541,32 @@ def plot_compared_series(original, models, colors, typeonlegend=False, save=Fals
ax.plot(original, color='black', label="Original", linewidth=linewidth*1.5)
for count, fts in enumerate(models, start=0):
if fts.has_point_forecasting and points:
forecasts = fts.forecast(original)
if isinstance(forecasts, np.ndarray):
forecasts = forecasts.tolist()
mi.append(min(forecasts) * 0.95)
ma.append(max(forecasts) * 1.05)
for k in np.arange(0, fts.order):
forecasts.insert(0, None)
lbl = fts.shortname + str(fts.order if fts.is_high_order and not fts.benchmark_only else "")
if typeonlegend: lbl += " (Point)"
ax.plot(forecasts, color=colors[count], label=lbl, ls="-",linewidth=linewidth)
try:
if fts.has_point_forecasting and points:
forecasts = fts.forecast(original)
if isinstance(forecasts, np.ndarray):
forecasts = forecasts.tolist()
mi.append(min(forecasts) * 0.95)
ma.append(max(forecasts) * 1.05)
for k in np.arange(0, fts.order):
forecasts.insert(0, None)
lbl = fts.shortname + str(fts.order if fts.is_high_order and not fts.benchmark_only else "")
if typeonlegend: lbl += " (Point)"
ax.plot(forecasts, color=colors[count], label=lbl, ls="-",linewidth=linewidth)
if fts.has_interval_forecasting and intervals:
forecasts = fts.forecast_interval(original)
lbl = fts.shortname + " " + str(fts.order if fts.is_high_order and not fts.benchmark_only else "")
if not points and intervals:
ls = "-"
else:
ls = "--"
tmpmi, tmpma = plot_interval(ax, forecasts, fts.order, label=lbl, typeonlegend=typeonlegend,
color=colors[count], ls=ls, linewidth=linewidth)
mi.append(tmpmi)
ma.append(tmpma)
if fts.has_interval_forecasting and intervals:
forecasts = fts.forecast_interval(original)
lbl = fts.shortname + " " + str(fts.order if fts.is_high_order and not fts.benchmark_only else "")
if not points and intervals:
ls = "-"
else:
ls = "--"
tmpmi, tmpma = plot_interval(ax, forecasts, fts.order, label=lbl, typeonlegend=typeonlegend,
color=colors[count], ls=ls, linewidth=linewidth)
mi.append(tmpmi)
ma.append(tmpma)
except ValueError as ex:
print(fts.shortname)
handles0, labels0 = ax.get_legend_handles_labels()
lgd = ax.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1))
@ -573,7 +578,7 @@ def plot_compared_series(original, models, colors, typeonlegend=False, save=Fals
ax.set_xlabel('T')
ax.set_xlim([0, len(original)])
Util.show_and_save_image(fig, file, save, lgd=legends)
#Util.show_and_save_image(fig, file, save, lgd=legends)
def plot_probability_distributions(pmfs, lcolors, tam=[15, 7]):
@ -1022,7 +1027,10 @@ def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=N
partitioner=Grid.GridPartitioner,transformation=None,indexer=None):
_3d = len(orders) > 1
ret = []
errors = np.array([[0 for k in range(len(partitions))] for kk in range(len(orders))])
if _3d:
errors = np.array([[0 for k in range(len(partitions))] for kk in range(len(orders))])
else:
errors = []
forecasted_best = []
fig = plt.figure(figsize=tam)
# fig.suptitle("Comparação de modelos ")
@ -1055,7 +1063,10 @@ def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=N
else:
forecasted = fts.forecast_interval(test)
error = 1.0 - Measures.rmse_interval(np.array(test[o:]), np.array(forecasted[:-1]))
errors[oc, pc] = error
if _3d:
errors[oc, pc] = error
else:
errors.append( error )
if error < min_rmse:
min_rmse = error
best = fts
@ -1067,9 +1078,8 @@ def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=N
# ax0.legend(handles0, labels0)
ax0.plot(test, label="Original", linewidth=3.0, color="black")
if _3d: ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
if not plotforecasts: ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
# ax1 = fig.add_axes([0.6, 0.5, 0.45, 0.45], projection='3d')
if _3d:
if _3d and not plotforecasts:
ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
ax1.set_title('Error Surface')
ax1.set_ylabel('Model order')
ax1.set_xlabel('Number of partitions')
@ -1079,9 +1089,9 @@ def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=N
else:
ax1 = fig.add_axes([0, 1, 0.9, 0.9])
ax1.set_title('Error Curve')
ax1.set_ylabel('Number of partitions')
ax1.set_xlabel('RMSE')
ax0.plot(errors,partitions)
ax1.set_xlabel('Number of partitions')
ax1.set_ylabel('RMSE')
ax1.plot(partitions, errors)
ret.append(best)
ret.append(forecasted_best)
ret.append(min_rmse)

View File

@ -31,8 +31,7 @@ def run_point(mfts, partitioner, train_data, test_data, window_key=None, transfo
:return: a dictionary with the benchmark results
"""
import time
from pyFTS import yu, hofts, pwfts,ismailefendi,sadaei, song, cheng, hwang
from pyFTS.models import chen
from pyFTS.models import yu, chen, hofts, pwfts,ismailefendi,sadaei, song, cheng, hwang
from pyFTS.partitioners import Grid, Entropy, FCM
from pyFTS.benchmarks import Measures, naive, arima, quantreg
from pyFTS.common import Transformations
@ -223,7 +222,7 @@ def run_interval(mfts, partitioner, train_data, test_data, window_key=None, tran
:return: a dictionary with the benchmark results
"""
import time
from pyFTS import hofts,ifts,pwfts
from pyFTS.models import hofts,ifts,pwfts
from pyFTS.partitioners import Grid, Entropy, FCM
from pyFTS.benchmarks import Measures, arima, quantreg
@ -424,8 +423,8 @@ def run_ahead(mfts, partitioner, train_data, test_data, steps, resolution, windo
"""
import time
import numpy as np
from pyFTS import hofts, ifts, pwfts
from pyFTS.models import ensemble
from pyFTS.models import hofts, ifts, pwfts
from pyFTS.models.ensemble import ensemble
from pyFTS.partitioners import Grid, Entropy, FCM
from pyFTS.benchmarks import Measures, arima
from pyFTS.models.seasonal import SeasonalIndexer

View File

@ -10,7 +10,7 @@ from copy import deepcopy
import numpy as np
from joblib import Parallel, delayed
from pyFTS.benchmarks import benchmarks, Util
from pyFTS.benchmarks import benchmarks, Util as bUtil
from pyFTS.common import Util
from pyFTS.partitioners import Grid

View File

@ -35,6 +35,7 @@ class IndexedFLR(FLR):
def __str__(self):
return str(self.index) + ": "+ self.LHS.name + " -> " + self.RHS.name
def generate_high_order_recurrent_flr(fuzzyData):
"""
Create a ordered FLR set from a list of fuzzy sets with recurrence
@ -55,7 +56,8 @@ def generate_high_order_recurrent_flr(fuzzyData):
flrs.append(tmp)
return flrs
def generateRecurrentFLRs(fuzzyData):
def generate_recurrent_flrs(fuzzyData):
"""
Create a ordered FLR set from a list of fuzzy sets with recurrence
:param fuzzyData: ordered list of fuzzy sets
@ -84,20 +86,20 @@ def generateRecurrentFLRs(fuzzyData):
return flrs
def generateNonRecurrentFLRs(fuzzyData):
def generate_non_recurrent_flrs(fuzzyData):
"""
Create a ordered FLR set from a list of fuzzy sets without recurrence
:param fuzzyData: ordered list of fuzzy sets
:return: ordered list of FLR
"""
flrs = generateRecurrentFLRs(fuzzyData)
flrs = generate_recurrent_flrs(fuzzyData)
tmp = {}
for flr in flrs: tmp[str(flr)] = flr
ret = [value for key, value in tmp.items()]
return ret
def generateIndexedFLRs(sets, indexer, data, transformation=None):
def generate_indexed_flrs(sets, indexer, data, transformation=None):
"""
Create a season-indexed ordered FLR set from a list of fuzzy sets with recurrence
:param sets: fuzzy sets

View File

@ -46,7 +46,7 @@ class Differential(Transformation):
for t in np.arange(0, self.lag): diff.insert(0, 0)
return diff
def inverse(self,data, param, **kwargs):
def inverse(self, data, param, **kwargs):
interval = kwargs.get("point_to_interval",False)
@ -56,11 +56,11 @@ class Differential(Transformation):
if not isinstance(data, list):
data = [data]
if not isinstance(param, list):
param = [param]
n = len(data)
# print(n)
# print(len(param))
if not interval:
inc = [data[t] + param[t] for t in np.arange(0, n)]
else:

View File

@ -1,7 +1,6 @@
import numpy as np
import pandas as pd
from pyFTS import tree
from pyFTS.common import FuzzySet, SortedCollection
from pyFTS.common import FuzzySet, SortedCollection, tree
class FTS(object):
@ -34,6 +33,8 @@ class FTS(object):
self.original_max = 0
self.original_min = 0
self.partitioner = kwargs.get("partitioner", None)
if self.partitioner != None:
self.sets = self.partitioner.sets
self.auto_update = False
self.benchmark_only = False
self.indexer = None
@ -199,7 +200,6 @@ class FTS(object):
params = [None for k in self.transformations]
for c, t in enumerate(reversed(self.transformations), start=0):
print(c)
ndata = t.inverse(data, params[c], **kwargs)
return ndata

View File

@ -1,8 +1,10 @@
import pandas as pd
import numpy as np
import pkg_resources
def get_data():
passengers = pd.read_csv("DataSets/AirPassengers.csv", sep=",")
filename = pkg_resources.resource_filename('pyFTS', 'data/AirPassengers.csv')
passengers = pd.read_csv(filename, sep=",")
passengers = np.array(passengers["Passengers"])
return passengers

View File

@ -3,9 +3,8 @@ import numpy as np
import os
import pkg_resources
def get_data():
#data_path = os.path.dirname(__file__)
#filename = os.path.join(data_path,"Enrollments.csv")
filename = pkg_resources.resource_filename('pyFTS', 'data/Enrollments.csv')
enrollments = pd.read_csv(filename, sep=";")
enrollments = np.array(enrollments["Enrollments"])

3927
pyFTS/data/NASDAQ.csv Normal file

File diff suppressed because it is too large Load Diff

11
pyFTS/data/NASDAQ.py Normal file
View File

@ -0,0 +1,11 @@
import pandas as pd
import numpy as np
import os
import pkg_resources
def get_data():
filename = pkg_resources.resource_filename('pyFTS', 'data/NASDAQ.csv')
dat = pd.read_csv(filename, sep=";")
dat = np.array(dat["avg"])
return dat

16924
pyFTS/data/SP500.csv Normal file

File diff suppressed because it is too large Load Diff

11
pyFTS/data/SP500.py Normal file
View File

@ -0,0 +1,11 @@
import pandas as pd
import numpy as np
import os
import pkg_resources
def get_data():
filename = pkg_resources.resource_filename('pyFTS', 'data/SP500.csv')
dat = pd.read_csv(filename, sep=",")
dat = np.array(dat["Avg"])
return dat

5261
pyFTS/data/TAIEX.csv Normal file

File diff suppressed because it is too large Load Diff

11
pyFTS/data/TAIEX.py Normal file
View File

@ -0,0 +1,11 @@
import pandas as pd
import numpy as np
import os
import pkg_resources
def get_data():
filename = pkg_resources.resource_filename('pyFTS', 'data/TAIEX.csv')
dat = pd.read_csv(filename, sep=";")
dat = np.array(dat["avg"])
return dat

56
pyFTS/data/artificial.py Normal file
View File

@ -0,0 +1,56 @@
import numpy as np
def generate_gaussian_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=100, num=10, vmin=None, vmax=None):
"""
Generate data sampled from Gaussian distribution, with constant or linear changing parameters
:param mu_ini: Initial mean
:param sigma_ini: Initial variance
:param mu_inc: Mean increment after 'num' samples
:param sigma_inc: Variance increment after 'num' samples
:param it: Number of iterations
:param num: Number of samples generated on each iteration
:param vmin: Lower bound value of generated data
:param vmax: Upper bound value of generated data
:return: A list of it*num float values
"""
mu = mu_ini
sigma = sigma_ini
ret = []
for k in np.arange(0,it):
tmp = np.random.normal(mu, sigma, num)
if vmin is not None:
tmp = np.maximum(np.full(num, vmin), tmp)
if vmax is not None:
tmp = np.minimum(np.full(num, vmax), tmp)
ret.extend(tmp)
mu += mu_inc
sigma += sigma_inc
return ret
def generate_uniform_linear(min_ini, max_ini, min_inc, max_inc, it=100, num=10, vmin=None, vmax=None):
"""
Generate data sampled from Uniform distribution, with constant or linear changing bounds
:param mu_ini: Initial mean
:param sigma_ini: Initial variance
:param mu_inc: Mean increment after 'num' samples
:param sigma_inc: Variance increment after 'num' samples
:param it: Number of iterations
:param num: Number of samples generated on each iteration
:param vmin: Lower bound value of generated data
:param vmax: Upper bound value of generated data
:return: A list of it*num float values
"""
_min = min_ini
_max = max_ini
ret = []
for k in np.arange(0,it):
tmp = np.random.uniform(_min, _max, num)
if vmin is not None:
tmp = np.maximum(np.full(num, vmin), tmp)
if vmax is not None:
tmp = np.minimum(np.full(num, vmax), tmp)
ret.extend(tmp)
_min += min_inc
_max += max_inc
return ret

310
pyFTS/data/sunspots.csv Normal file
View File

@ -0,0 +1,310 @@
YEAR,SUNACTIVITY
1700,5
1701,11
1702,16
1703,23
1704,36
1705,58
1706,29
1707,20
1708,10
1709,8
1710,3
1711,0
1712,0
1713,2
1714,11
1715,27
1716,47
1717,63
1718,60
1719,39
1720,28
1721,26
1722,22
1723,11
1724,21
1725,40
1726,78
1727,122
1728,103
1729,73
1730,47
1731,35
1732,11
1733,5
1734,16
1735,34
1736,70
1737,81
1738,111
1739,101
1740,73
1741,40
1742,20
1743,16
1744,5
1745,11
1746,22
1747,40
1748,60
1749,80.9
1750,83.4
1751,47.7
1752,47.8
1753,30.7
1754,12.2
1755,9.6
1756,10.2
1757,32.4
1758,47.6
1759,54
1760,62.9
1761,85.9
1762,61.2
1763,45.1
1764,36.4
1765,20.9
1766,11.4
1767,37.8
1768,69.8
1769,106.1
1770,100.8
1771,81.6
1772,66.5
1773,34.8
1774,30.6
1775,7
1776,19.8
1777,92.5
1778,154.4
1779,125.9
1780,84.8
1781,68.1
1782,38.5
1783,22.8
1784,10.2
1785,24.1
1786,82.9
1787,132
1788,130.9
1789,118.1
1790,89.9
1791,66.6
1792,60
1793,46.9
1794,41
1795,21.3
1796,16
1797,6.4
1798,4.1
1799,6.8
1800,14.5
1801,34
1802,45
1803,43.1
1804,47.5
1805,42.2
1806,28.1
1807,10.1
1808,8.1
1809,2.5
1810,0
1811,1.4
1812,5
1813,12.2
1814,13.9
1815,35.4
1816,45.8
1817,41.1
1818,30.1
1819,23.9
1820,15.6
1821,6.6
1822,4
1823,1.8
1824,8.5
1825,16.6
1826,36.3
1827,49.6
1828,64.2
1829,67
1830,70.9
1831,47.8
1832,27.5
1833,8.5
1834,13.2
1835,56.9
1836,121.5
1837,138.3
1838,103.2
1839,85.7
1840,64.6
1841,36.7
1842,24.2
1843,10.7
1844,15
1845,40.1
1846,61.5
1847,98.5
1848,124.7
1849,96.3
1850,66.6
1851,64.5
1852,54.1
1853,39
1854,20.6
1855,6.7
1856,4.3
1857,22.7
1858,54.8
1859,93.8
1860,95.8
1861,77.2
1862,59.1
1863,44
1864,47
1865,30.5
1866,16.3
1867,7.3
1868,37.6
1869,74
1870,139
1871,111.2
1872,101.6
1873,66.2
1874,44.7
1875,17
1876,11.3
1877,12.4
1878,3.4
1879,6
1880,32.3
1881,54.3
1882,59.7
1883,63.7
1884,63.5
1885,52.2
1886,25.4
1887,13.1
1888,6.8
1889,6.3
1890,7.1
1891,35.6
1892,73
1893,85.1
1894,78
1895,64
1896,41.8
1897,26.2
1898,26.7
1899,12.1
1900,9.5
1901,2.7
1902,5
1903,24.4
1904,42
1905,63.5
1906,53.8
1907,62
1908,48.5
1909,43.9
1910,18.6
1911,5.7
1912,3.6
1913,1.4
1914,9.6
1915,47.4
1916,57.1
1917,103.9
1918,80.6
1919,63.6
1920,37.6
1921,26.1
1922,14.2
1923,5.8
1924,16.7
1925,44.3
1926,63.9
1927,69
1928,77.8
1929,64.9
1930,35.7
1931,21.2
1932,11.1
1933,5.7
1934,8.7
1935,36.1
1936,79.7
1937,114.4
1938,109.6
1939,88.8
1940,67.8
1941,47.5
1942,30.6
1943,16.3
1944,9.6
1945,33.2
1946,92.6
1947,151.6
1948,136.3
1949,134.7
1950,83.9
1951,69.4
1952,31.5
1953,13.9
1954,4.4
1955,38
1956,141.7
1957,190.2
1958,184.8
1959,159
1960,112.3
1961,53.9
1962,37.6
1963,27.9
1964,10.2
1965,15.1
1966,47
1967,93.8
1968,105.9
1969,105.5
1970,104.5
1971,66.6
1972,68.9
1973,38
1974,34.5
1975,15.5
1976,12.6
1977,27.5
1978,92.5
1979,155.4
1980,154.6
1981,140.4
1982,115.9
1983,66.6
1984,45.9
1985,17.9
1986,13.4
1987,29.4
1988,100.2
1989,157.6
1990,142.6
1991,145.7
1992,94.3
1993,54.6
1994,29.9
1995,17.5
1996,8.6
1997,21.5
1998,64.3
1999,93.3
2000,119.6
2001,111
2002,104
2003,63.7
2004,40.4
2005,29.8
2006,15.2
2007,7.5
2008,2.9
1 YEAR SUNACTIVITY
2 1700 5
3 1701 11
4 1702 16
5 1703 23
6 1704 36
7 1705 58
8 1706 29
9 1707 20
10 1708 10
11 1709 8
12 1710 3
13 1711 0
14 1712 0
15 1713 2
16 1714 11
17 1715 27
18 1716 47
19 1717 63
20 1718 60
21 1719 39
22 1720 28
23 1721 26
24 1722 22
25 1723 11
26 1724 21
27 1725 40
28 1726 78
29 1727 122
30 1728 103
31 1729 73
32 1730 47
33 1731 35
34 1732 11
35 1733 5
36 1734 16
37 1735 34
38 1736 70
39 1737 81
40 1738 111
41 1739 101
42 1740 73
43 1741 40
44 1742 20
45 1743 16
46 1744 5
47 1745 11
48 1746 22
49 1747 40
50 1748 60
51 1749 80.9
52 1750 83.4
53 1751 47.7
54 1752 47.8
55 1753 30.7
56 1754 12.2
57 1755 9.6
58 1756 10.2
59 1757 32.4
60 1758 47.6
61 1759 54
62 1760 62.9
63 1761 85.9
64 1762 61.2
65 1763 45.1
66 1764 36.4
67 1765 20.9
68 1766 11.4
69 1767 37.8
70 1768 69.8
71 1769 106.1
72 1770 100.8
73 1771 81.6
74 1772 66.5
75 1773 34.8
76 1774 30.6
77 1775 7
78 1776 19.8
79 1777 92.5
80 1778 154.4
81 1779 125.9
82 1780 84.8
83 1781 68.1
84 1782 38.5
85 1783 22.8
86 1784 10.2
87 1785 24.1
88 1786 82.9
89 1787 132
90 1788 130.9
91 1789 118.1
92 1790 89.9
93 1791 66.6
94 1792 60
95 1793 46.9
96 1794 41
97 1795 21.3
98 1796 16
99 1797 6.4
100 1798 4.1
101 1799 6.8
102 1800 14.5
103 1801 34
104 1802 45
105 1803 43.1
106 1804 47.5
107 1805 42.2
108 1806 28.1
109 1807 10.1
110 1808 8.1
111 1809 2.5
112 1810 0
113 1811 1.4
114 1812 5
115 1813 12.2
116 1814 13.9
117 1815 35.4
118 1816 45.8
119 1817 41.1
120 1818 30.1
121 1819 23.9
122 1820 15.6
123 1821 6.6
124 1822 4
125 1823 1.8
126 1824 8.5
127 1825 16.6
128 1826 36.3
129 1827 49.6
130 1828 64.2
131 1829 67
132 1830 70.9
133 1831 47.8
134 1832 27.5
135 1833 8.5
136 1834 13.2
137 1835 56.9
138 1836 121.5
139 1837 138.3
140 1838 103.2
141 1839 85.7
142 1840 64.6
143 1841 36.7
144 1842 24.2
145 1843 10.7
146 1844 15
147 1845 40.1
148 1846 61.5
149 1847 98.5
150 1848 124.7
151 1849 96.3
152 1850 66.6
153 1851 64.5
154 1852 54.1
155 1853 39
156 1854 20.6
157 1855 6.7
158 1856 4.3
159 1857 22.7
160 1858 54.8
161 1859 93.8
162 1860 95.8
163 1861 77.2
164 1862 59.1
165 1863 44
166 1864 47
167 1865 30.5
168 1866 16.3
169 1867 7.3
170 1868 37.6
171 1869 74
172 1870 139
173 1871 111.2
174 1872 101.6
175 1873 66.2
176 1874 44.7
177 1875 17
178 1876 11.3
179 1877 12.4
180 1878 3.4
181 1879 6
182 1880 32.3
183 1881 54.3
184 1882 59.7
185 1883 63.7
186 1884 63.5
187 1885 52.2
188 1886 25.4
189 1887 13.1
190 1888 6.8
191 1889 6.3
192 1890 7.1
193 1891 35.6
194 1892 73
195 1893 85.1
196 1894 78
197 1895 64
198 1896 41.8
199 1897 26.2
200 1898 26.7
201 1899 12.1
202 1900 9.5
203 1901 2.7
204 1902 5
205 1903 24.4
206 1904 42
207 1905 63.5
208 1906 53.8
209 1907 62
210 1908 48.5
211 1909 43.9
212 1910 18.6
213 1911 5.7
214 1912 3.6
215 1913 1.4
216 1914 9.6
217 1915 47.4
218 1916 57.1
219 1917 103.9
220 1918 80.6
221 1919 63.6
222 1920 37.6
223 1921 26.1
224 1922 14.2
225 1923 5.8
226 1924 16.7
227 1925 44.3
228 1926 63.9
229 1927 69
230 1928 77.8
231 1929 64.9
232 1930 35.7
233 1931 21.2
234 1932 11.1
235 1933 5.7
236 1934 8.7
237 1935 36.1
238 1936 79.7
239 1937 114.4
240 1938 109.6
241 1939 88.8
242 1940 67.8
243 1941 47.5
244 1942 30.6
245 1943 16.3
246 1944 9.6
247 1945 33.2
248 1946 92.6
249 1947 151.6
250 1948 136.3
251 1949 134.7
252 1950 83.9
253 1951 69.4
254 1952 31.5
255 1953 13.9
256 1954 4.4
257 1955 38
258 1956 141.7
259 1957 190.2
260 1958 184.8
261 1959 159
262 1960 112.3
263 1961 53.9
264 1962 37.6
265 1963 27.9
266 1964 10.2
267 1965 15.1
268 1966 47
269 1967 93.8
270 1968 105.9
271 1969 105.5
272 1970 104.5
273 1971 66.6
274 1972 68.9
275 1973 38
276 1974 34.5
277 1975 15.5
278 1976 12.6
279 1977 27.5
280 1978 92.5
281 1979 155.4
282 1980 154.6
283 1981 140.4
284 1982 115.9
285 1983 66.6
286 1984 45.9
287 1985 17.9
288 1986 13.4
289 1987 29.4
290 1988 100.2
291 1989 157.6
292 1990 142.6
293 1991 145.7
294 1992 94.3
295 1993 54.6
296 1994 29.9
297 1995 17.5
298 1996 8.6
299 1997 21.5
300 1998 64.3
301 1999 93.3
302 2000 119.6
303 2001 111
304 2002 104
305 2003 63.7
306 2004 40.4
307 2005 29.8
308 2006 15.2
309 2007 7.5
310 2008 2.9

11
pyFTS/data/sunspots.py Normal file
View File

@ -0,0 +1,11 @@
import pandas as pd
import numpy as np
import os
import pkg_resources
def get_data():
filename = pkg_resources.resource_filename('pyFTS', 'data/sunspots.csv')
dat = pd.read_csv(filename, sep=",")
dat = np.array(dat["SUNACTIVITY"])
return dat

View File

@ -50,7 +50,7 @@ class ConventionalFTS(fts.FTS):
self.sets = sets
ndata = self.apply_transformations(data)
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
flrs = FLR.generateNonRecurrentFLRs(tmpdata)
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
self.flrgs = self.generateFLRG(flrs)
def forecast(self, data, **kwargs):

View File

@ -68,7 +68,7 @@ class ImprovedWeightedFTS(fts.FTS):
ndata = self.apply_transformations(data)
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
flrs = FLR.generateRecurrentFLRs(tmpdata)
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.flrgs = self.generateFLRG(flrs)
def forecast(self, data, **kwargs):

View File

@ -34,7 +34,7 @@ class ConditionalVarianceFTS(chen.ConventionalFTS):
self.max_tx = max(ndata)
tmpdata = common.fuzzySeries(ndata, self.sets, method='fuzzy', const_t=0)
flrs = FLR.generateNonRecurrentFLRs(tmpdata)
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
self.flrgs = self.generate_flrg(flrs)
def generate_flrg(self, flrs, **kwargs):

View File

@ -105,7 +105,7 @@ class HighOrderNonStationaryFTS(hofts.HighOrderFTS):
ndata = self.apply_transformations(data)
#tmpdata = common.fuzzyfy_series_old(ndata, self.sets)
#flrs = FLR.generateRecurrentFLRs(ndata)
#flrs = FLR.generate_recurrent_flrs(ndata)
window_size = parameters if parameters is not None else 1
self.flrgs = self.generate_flrg(ndata, window_size=window_size)

View File

@ -54,7 +54,7 @@ class NonStationaryFTS(fts.FTS):
window_size = parameters if parameters is not None else 1
tmpdata = common.fuzzySeries(ndata, self.sets, window_size, method=self.method)
#print([k[0].name for k in tmpdata])
flrs = FLR.generateRecurrentFLRs(tmpdata)
flrs = FLR.generate_recurrent_flrs(tmpdata)
#print([str(k) for k in flrs])
self.flrgs = self.generate_flrg(flrs)

View File

@ -125,7 +125,7 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
for s in self.sets: self.setsDict[s.name] = s
if parameters == 'Monotonic':
tmpdata = FuzzySet.fuzzyfy_series_old(data, sets)
flrs = FLR.generateRecurrentFLRs(tmpdata)
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.flrgs = self.generateFLRG(flrs)
else:
self.flrgs = self.generate_flrg(data)

View File

@ -67,7 +67,7 @@ class ExponentialyWeightedFTS(fts.FTS):
self.sets = sets
ndata = self.apply_transformations(data)
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
flrs = FLR.generateRecurrentFLRs(tmpdata)
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.flrgs = self.generateFLRG(flrs, self.c)
def forecast(self, data, **kwargs):

View File

@ -59,7 +59,7 @@ class ContextualMultiSeasonalFTS(sfts.SeasonalFTS):
def train(self, data, sets, order=1, parameters=None):
self.sets = sets
self.seasonality = parameters
flrs = FLR.generateIndexedFLRs(self.sets, self.indexer, data)
flrs = FLR.generate_indexed_flrs(self.sets, self.indexer, data)
self.flrgs = self.generateFLRG(flrs)
def getMidpoints(self, flrg, data):

View File

@ -36,7 +36,7 @@ class MultiSeasonalFTS(sfts.SeasonalFTS):
self.sets = sets
self.seasonality = parameters
#ndata = self.indexer.set_data(data,self.doTransformations(self.indexer.get_data(data)))
flrs = FLR.generateIndexedFLRs(self.sets, self.indexer, data)
flrs = FLR.generate_indexed_flrs(self.sets, self.indexer, data)
self.flrgs = self.generateFLRG(flrs)
def forecast(self, data, **kwargs):

View File

@ -64,7 +64,7 @@ class SeasonalFTS(fts.FTS):
self.sets = sets
ndata = self.apply_transformations(data)
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
flrs = FLR.generateRecurrentFLRs(tmpdata)
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.flrgs = self.generateFLRG(flrs)
def forecast(self, data, **kwargs):

View File

@ -38,10 +38,11 @@ class ConventionalFTS(fts.FTS):
return r
def train(self, data, sets,order=1,parameters=None):
self.sets = sets
if sets != None:
self.sets = sets
ndata = self.apply_transformations(data)
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
flrs = FLR.generateNonRecurrentFLRs(tmpdata)
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
self.R = self.operation_matrix(flrs)
def forecast(self, data, **kwargs):
@ -67,6 +68,6 @@ class ConventionalFTS(fts.FTS):
ret.append( sum(mp)/len(mp))
ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]])
ret = self.apply_inverse_transformations(ret, params=[data])
return ret

View File

@ -60,7 +60,7 @@ class WeightedFTS(fts.FTS):
self.sets = sets
ndata = self.apply_transformations(data)
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
flrs = FLR.generateRecurrentFLRs(tmpdata)
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.flrgs = self.generate_FLRG(flrs)
def forecast(self, data, **kwargs):

View File

@ -21,11 +21,11 @@ def plot_sets(data, sets, titles, tam=[12, 10], save=False, file=None):
#print(h)
fig, axes = plt.subplots(nrows=num, ncols=1,figsize=tam)
for k in np.arange(0,num):
#ax = fig.add_axes([0.05, 1-(k*h), 0.9, h*0.7]) # left, bottom, width, height
ticks = []
x = []
ax = axes[k]
ax.set_title(titles[k])
ax.set_ylim([0, 1.1])
ax.set_xlim([minx, maxx])
for s in sets[k]:
if s.mf == Membership.trimf:
ax.plot(s.parameters,[0,1,0])
@ -35,6 +35,10 @@ def plot_sets(data, sets, titles, tam=[12, 10], save=False, file=None):
ax.plot(tmpx, tmpy)
elif s.mf == Membership.trapmf:
ax.plot(s.parameters, [0, 1, 1, 0])
ticks.append(str(round(s.centroid, 0)) + '\n' + s.name)
x.append(s.centroid)
ax.xaxis.set_ticklabels(ticks)
ax.xaxis.set_ticks(x)
plt.tight_layout()
@ -44,7 +48,7 @@ def plot_sets(data, sets, titles, tam=[12, 10], save=False, file=None):
def plot_partitioners(data, objs, tam=[12, 10], save=False, file=None):
sets = [k.sets for k in objs]
titles = [k.name for k in objs]
plot_sets(data,sets,titles,tam,save,file)
plot_sets(data, sets, titles, tam, save, file)
def explore_partitioners(data, npart, methods=None, mf=None, tam=[12, 10], save=False, file=None):
@ -59,6 +63,9 @@ def explore_partitioners(data, npart, methods=None, mf=None, tam=[12, 10], save=
for p in methods:
for m in mf:
obj = p(data, npart,m)
obj.name = obj.name + " - " + obj.membership_function.__name__
objs.append(obj)
plot_partitioners(data, objs, tam, save, file)
plot_partitioners(data, objs, tam, save, file)
return objs

View File

@ -1,5 +1,6 @@
from pyFTS.common import FuzzySet, Membership
import numpy as np
import matplotlib.pylab as plt
class Partitioner(object):
@ -70,6 +71,8 @@ class Partitioner(object):
ax.set_title(self.name)
ax.set_ylim([0, 1])
ax.set_xlim([self.min, self.max])
ticks = []
x = []
for s in self.sets:
if s.mf == Membership.trimf:
ax.plot([s.parameters[0], s.parameters[1], s.parameters[2]], [0, 1, 0])
@ -77,6 +80,10 @@ class Partitioner(object):
tmpx = [kk for kk in np.arange(s.lower, s.upper)]
tmpy = [s.membership(kk) for kk in np.arange(s.lower, s.upper)]
ax.plot(tmpx, tmpy)
ticks.append(str(round(s.centroid,0))+'\n'+s.name)
x.append(s.centroid)
plt.xticks(x,ticks)
def __str__(self):
tmp = self.name + ":\n"

View File

@ -1,10 +1,9 @@
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pyFTS.common import FuzzySet,SortedCollection
from pyFTS.common import FuzzySet,SortedCollection,tree
from pyFTS.probabilistic import kde
from pyFTS import tree
from pyFTS.common import SortedCollection
class ProbabilityDistribution(object):
"""