Several bugfixes and refactorings
This commit is contained in:
parent
b672cdf08a
commit
0764d249ea
@ -267,17 +267,17 @@ def get_point_statistics(data, model, indexer=None):
|
|||||||
try:
|
try:
|
||||||
ret.append(np.round(rmse(ndata, nforecasts), 2))
|
ret.append(np.round(rmse(ndata, nforecasts), 2))
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print(ex)
|
print('Error in RMSE: {}'.format(ex))
|
||||||
ret.append(np.nan)
|
ret.append(np.nan)
|
||||||
try:
|
try:
|
||||||
ret.append(np.round(smape(ndata, nforecasts), 2))
|
ret.append(np.round(smape(ndata, nforecasts), 2))
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print(ex)
|
print('Error in SMAPE: {}'.format(ex))
|
||||||
ret.append(np.nan)
|
ret.append(np.nan)
|
||||||
try:
|
try:
|
||||||
ret.append(np.round(UStatistic(ndata, nforecasts), 2))
|
ret.append(np.round(UStatistic(ndata, nforecasts), 2))
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print(ex)
|
print('Error in U: {}'.format(ex))
|
||||||
ret.append(np.nan)
|
ret.append(np.nan)
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
@ -34,7 +34,7 @@ class ConventionalFLRG(object):
|
|||||||
class ConventionalFTS(fts.FTS):
|
class ConventionalFTS(fts.FTS):
|
||||||
"""Conventional Fuzzy Time Series"""
|
"""Conventional Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(ConventionalFTS, self).__init__(1, "CFTS " + name)
|
super(ConventionalFTS, self).__init__(1, "CFTS " + name, **kwargs)
|
||||||
self.name = "Conventional FTS"
|
self.name = "Conventional FTS"
|
||||||
self.detail = "Chen"
|
self.detail = "Chen"
|
||||||
self.flrgs = {}
|
self.flrgs = {}
|
||||||
|
@ -43,7 +43,7 @@ class TrendWeightedFLRG(yu.WeightedFLRG):
|
|||||||
class TrendWeightedFTS(yu.WeightedFTS):
|
class TrendWeightedFTS(yu.WeightedFTS):
|
||||||
"""First Order Trend Weighted Fuzzy Time Series"""
|
"""First Order Trend Weighted Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(TrendWeightedFTS, self).__init__("")
|
super(TrendWeightedFTS, self).__init__("", **kwargs)
|
||||||
self.shortname = "TWFTS " + name
|
self.shortname = "TWFTS " + name
|
||||||
self.name = "Trend Weighted FTS"
|
self.name = "Trend Weighted FTS"
|
||||||
self.detail = "Cheng"
|
self.detail = "Cheng"
|
||||||
|
@ -37,11 +37,17 @@ class FuzzySet:
|
|||||||
return self.mf(x, self.parameters)
|
return self.mf(x, self.parameters)
|
||||||
|
|
||||||
def partition_function(self,uod=None, nbins=100):
|
def partition_function(self,uod=None, nbins=100):
|
||||||
|
"""
|
||||||
|
Calculate the partition function over the membership function.
|
||||||
|
:param uod:
|
||||||
|
:param nbins:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
if self.Z is None and uod is not None:
|
if self.Z is None and uod is not None:
|
||||||
self.Z = 0.0
|
self.Z = 0.0
|
||||||
for k in np.linspace(uod[0], uod[1], nbins):
|
for k in np.linspace(uod[0], uod[1], nbins):
|
||||||
self.Z += self.membership(k)
|
self.Z += self.membership(k)
|
||||||
else:
|
|
||||||
return self.Z
|
return self.Z
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
@ -10,7 +10,7 @@ def trimf(x, parameters):
|
|||||||
:param parameters: a list with 3 real values
|
:param parameters: a list with 3 real values
|
||||||
:return: the membership value of x given the parameters
|
:return: the membership value of x given the parameters
|
||||||
"""
|
"""
|
||||||
xx = round(x, 3)
|
xx = np.round(x, 3)
|
||||||
if xx < parameters[0]:
|
if xx < parameters[0]:
|
||||||
return 0
|
return 0
|
||||||
elif parameters[0] <= xx < parameters[1]:
|
elif parameters[0] <= xx < parameters[1]:
|
||||||
|
@ -92,6 +92,15 @@ class EnsembleFTS(fts.FTS):
|
|||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def get_distribution_interquantile(self,forecasts, alpha):
|
||||||
|
size = len(forecasts)
|
||||||
|
qt_lower = int(np.ceil(size * alpha)) - 1
|
||||||
|
qt_upper = int(np.ceil(size * (1- alpha))) - 1
|
||||||
|
|
||||||
|
ret = sorted(forecasts)[qt_lower : qt_upper]
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
def forecast(self, data, **kwargs):
|
def forecast(self, data, **kwargs):
|
||||||
|
|
||||||
if "method" in kwargs:
|
if "method" in kwargs:
|
||||||
|
@ -11,7 +11,7 @@ from pyFTS.benchmarks import arima, quantreg
|
|||||||
from pyFTS.common import Transformations, Util as cUtil
|
from pyFTS.common import Transformations, Util as cUtil
|
||||||
import scipy.stats as st
|
import scipy.stats as st
|
||||||
from pyFTS.ensemble import ensemble
|
from pyFTS.ensemble import ensemble
|
||||||
from pyFTS.models import msfts
|
from pyFTS.models import msfts, cmsfts
|
||||||
from pyFTS.probabilistic import ProbabilityDistribution, kde
|
from pyFTS.probabilistic import ProbabilityDistribution, kde
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from joblib import Parallel, delayed
|
from joblib import Parallel, delayed
|
||||||
@ -25,7 +25,7 @@ def train_individual_model(partitioner, train_data, indexer):
|
|||||||
|
|
||||||
print(_key)
|
print(_key)
|
||||||
|
|
||||||
model = msfts.MultiSeasonalFTS(_key, indexer=indexer)
|
model = cmsfts.ContextualMultiSeasonalFTS(_key, indexer=indexer)
|
||||||
model.appendTransformation(partitioner.transformation)
|
model.appendTransformation(partitioner.transformation)
|
||||||
model.train(train_data, partitioner.sets, order=1)
|
model.train(train_data, partitioner.sets, order=1)
|
||||||
|
|
||||||
@ -74,17 +74,22 @@ class SeasonalEnsembleFTS(ensemble.EnsembleFTS):
|
|||||||
|
|
||||||
ret = []
|
ret = []
|
||||||
|
|
||||||
h = kwargs.get("h",10)
|
smooth = kwargs.get("smooth", "KDE")
|
||||||
|
alpha = kwargs.get("alpha", None)
|
||||||
|
|
||||||
for k in data.index:
|
for k in data.index:
|
||||||
|
|
||||||
tmp = self.get_models_forecasts(data.ix[k])
|
tmp = self.get_models_forecasts(data.ix[k])
|
||||||
|
|
||||||
|
if alpha is None:
|
||||||
tmp = np.ravel(tmp).tolist()
|
tmp = np.ravel(tmp).tolist()
|
||||||
|
else:
|
||||||
|
tmp = self.get_distribution_interquantile( np.ravel(tmp).tolist(), alpha)
|
||||||
|
|
||||||
name = str(self.indexer.get_index(data.ix[k]))
|
name = str(self.indexer.get_index(data.ix[k]))
|
||||||
|
|
||||||
dist = ProbabilityDistribution.ProbabilityDistribution("KDE", uod=[self.original_min, self.original_max],
|
dist = ProbabilityDistribution.ProbabilityDistribution(smooth,
|
||||||
|
uod=[self.original_min, self.original_max],
|
||||||
data=tmp, name=name, **kwargs)
|
data=tmp, name=name, **kwargs)
|
||||||
|
|
||||||
ret.append(dist)
|
ret.append(dist)
|
||||||
|
@ -33,7 +33,7 @@ class FTS(object):
|
|||||||
self.transformations_param = []
|
self.transformations_param = []
|
||||||
self.original_max = 0
|
self.original_max = 0
|
||||||
self.original_min = 0
|
self.original_min = 0
|
||||||
self.partitioner = None
|
self.partitioner = kwargs.get("partitioner", None)
|
||||||
self.auto_update = False
|
self.auto_update = False
|
||||||
self.benchmark_only = False
|
self.benchmark_only = False
|
||||||
self.indexer = None
|
self.indexer = None
|
||||||
@ -219,6 +219,9 @@ class FTS(object):
|
|||||||
grid[k] += 1
|
grid[k] += 1
|
||||||
return grid
|
return grid
|
||||||
|
|
||||||
|
def get_UoD(self):
|
||||||
|
return [self.original_min, self.original_max]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ class HighOrderFLRG(object):
|
|||||||
class HighOrderFTS(fts.FTS):
|
class HighOrderFTS(fts.FTS):
|
||||||
"""Conventional High Order Fuzzy Time Series"""
|
"""Conventional High Order Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(HighOrderFTS, self).__init__(1, "HOFTS" + name)
|
super(HighOrderFTS, self).__init__(1, "HOFTS" + name, **kwargs)
|
||||||
self.name = "High Order FTS"
|
self.name = "High Order FTS"
|
||||||
self.shortname = "HOFTS" + name
|
self.shortname = "HOFTS" + name
|
||||||
self.detail = "Chen"
|
self.detail = "Chen"
|
||||||
|
@ -12,7 +12,7 @@ from pyFTS import fts
|
|||||||
|
|
||||||
class HighOrderFTS(fts.FTS):
|
class HighOrderFTS(fts.FTS):
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(HighOrderFTS, self).__init__(1, name)
|
super(HighOrderFTS, self).__init__(1, name, **kwargs)
|
||||||
self.is_high_order = True
|
self.is_high_order = True
|
||||||
self.min_order = 2
|
self.min_order = 2
|
||||||
self.name = "Hwang High Order FTS"
|
self.name = "Hwang High Order FTS"
|
||||||
|
@ -9,7 +9,7 @@ from pyFTS import hofts, fts, tree
|
|||||||
class IntervalFTS(hofts.HighOrderFTS):
|
class IntervalFTS(hofts.HighOrderFTS):
|
||||||
"""High Order Interval Fuzzy Time Series"""
|
"""High Order Interval Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(IntervalFTS, self).__init__(order=1, name="IFTS " + name)
|
super(IntervalFTS, self).__init__(order=1, name="IFTS " + name, **kwargs)
|
||||||
self.shortname = "IFTS " + name
|
self.shortname = "IFTS " + name
|
||||||
self.name = "Interval FTS"
|
self.name = "Interval FTS"
|
||||||
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H. (2016)"
|
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H. (2016)"
|
||||||
|
@ -43,7 +43,7 @@ class ImprovedWeightedFLRG(object):
|
|||||||
class ImprovedWeightedFTS(fts.FTS):
|
class ImprovedWeightedFTS(fts.FTS):
|
||||||
"""First Order Improved Weighted Fuzzy Time Series"""
|
"""First Order Improved Weighted Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(ImprovedWeightedFTS, self).__init__(1, "IWFTS " + name)
|
super(ImprovedWeightedFTS, self).__init__(1, "IWFTS " + name, **kwargs)
|
||||||
self.name = "Improved Weighted FTS"
|
self.name = "Improved Weighted FTS"
|
||||||
self.detail = "Ismail & Efendi"
|
self.detail = "Ismail & Efendi"
|
||||||
self.setsDict = {}
|
self.setsDict = {}
|
||||||
|
@ -30,7 +30,7 @@ class ContextualMultiSeasonalFTS(sfts.SeasonalFTS):
|
|||||||
"""
|
"""
|
||||||
Contextual Multi-Seasonal Fuzzy Time Series
|
Contextual Multi-Seasonal Fuzzy Time Series
|
||||||
"""
|
"""
|
||||||
def __init__(self, order, name, indexer, **kwargs):
|
def __init__(self, name, indexer, **kwargs):
|
||||||
super(ContextualMultiSeasonalFTS, self).__init__("CMSFTS")
|
super(ContextualMultiSeasonalFTS, self).__init__("CMSFTS")
|
||||||
self.name = "Contextual Multi Seasonal FTS"
|
self.name = "Contextual Multi Seasonal FTS"
|
||||||
self.shortname = "CMSFTS " + name
|
self.shortname = "CMSFTS " + name
|
||||||
@ -75,7 +75,7 @@ class ContextualMultiSeasonalFTS(sfts.SeasonalFTS):
|
|||||||
index = self.indexer.get_season_of_data(data)
|
index = self.indexer.get_season_of_data(data)
|
||||||
ndata = self.indexer.get_data(data)
|
ndata = self.indexer.get_data(data)
|
||||||
|
|
||||||
for k in np.arange(1, len(data)):
|
for k in np.arange(0, len(data)):
|
||||||
|
|
||||||
flrg = self.flrgs[str(index[k])]
|
flrg = self.flrgs[str(index[k])]
|
||||||
|
|
||||||
@ -85,7 +85,7 @@ class ContextualMultiSeasonalFTS(sfts.SeasonalFTS):
|
|||||||
|
|
||||||
ret.append(sum(mp) / len(mp))
|
ret.append(sum(mp) / len(mp))
|
||||||
|
|
||||||
ret = self.doInverseTransformations(ret, params=[ndata[self.order - 1:]])
|
ret = self.doInverseTransformations(ret, params=[ndata])
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
@ -200,7 +200,7 @@ class DateTimeSeasonalIndexer(SeasonalIndexer):
|
|||||||
return data[self.data_fields].tolist()
|
return data[self.data_fields].tolist()
|
||||||
|
|
||||||
def get_index(self, data):
|
def get_index(self, data):
|
||||||
return data[self.date_field]
|
return data[self.date_field].tolist() if isinstance(data, pd.DataFrame) else data[self.date_field]
|
||||||
|
|
||||||
def set_data(self, data, value):
|
def set_data(self, data, value):
|
||||||
raise Exception("Operation not available!")
|
raise Exception("Operation not available!")
|
@ -44,7 +44,7 @@ class ProbabilityDistribution(object):
|
|||||||
self.name = kwargs.get("name", "")
|
self.name = kwargs.get("name", "")
|
||||||
|
|
||||||
def set(self, value, density):
|
def set(self, value, density):
|
||||||
k = self.index.find_ge(value)
|
k = self.index.find_ge(np.round(value,3))
|
||||||
self.distribution[k] = density
|
self.distribution[k] = density
|
||||||
|
|
||||||
def append(self, values):
|
def append(self, values):
|
||||||
@ -95,7 +95,7 @@ class ProbabilityDistribution(object):
|
|||||||
ret = 0
|
ret = 0
|
||||||
for k in self.bins:
|
for k in self.bins:
|
||||||
if k < value:
|
if k < value:
|
||||||
ret += self.distribution[k]
|
ret += self.density(k)
|
||||||
else:
|
else:
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
@ -156,7 +156,7 @@ class ProbabilityDistribution(object):
|
|||||||
_s += np.log(k)
|
_s += np.log(k)
|
||||||
return _s / len(data)
|
return _s / len(data)
|
||||||
|
|
||||||
def plot(self,axis=None,color="black",tam=[10, 6]):
|
def plot(self,axis=None,color="black",tam=[10, 6], title = None):
|
||||||
if axis is None:
|
if axis is None:
|
||||||
fig = plt.figure(figsize=tam)
|
fig = plt.figure(figsize=tam)
|
||||||
axis = fig.add_subplot(111)
|
axis = fig.add_subplot(111)
|
||||||
@ -168,9 +168,10 @@ class ProbabilityDistribution(object):
|
|||||||
yp = [0 for k in self.data]
|
yp = [0 for k in self.data]
|
||||||
axis.plot(self.data, yp, c="red")
|
axis.plot(self.data, yp, c="red")
|
||||||
|
|
||||||
|
if title is None:
|
||||||
|
title = self.name
|
||||||
axis.plot(self.bins, ys, c=color)
|
axis.plot(self.bins, ys, c=color)
|
||||||
axis.set_title(self.name)
|
axis.set_title(title)
|
||||||
|
|
||||||
axis.set_xlabel('Universe of Discourse')
|
axis.set_xlabel('Universe of Discourse')
|
||||||
axis.set_ylabel('Probability')
|
axis.set_ylabel('Probability')
|
||||||
|
@ -15,11 +15,26 @@ class KernelSmoothing(object):
|
|||||||
|
|
||||||
def kernel(self, u):
|
def kernel(self, u):
|
||||||
if self.method == "epanechnikov":
|
if self.method == "epanechnikov":
|
||||||
return (3/4)*(1 - u**2)
|
tmp = (3/4)*(1.0 - u**2)
|
||||||
|
return tmp if tmp > 0 else 0
|
||||||
elif self.method == "gaussian":
|
elif self.method == "gaussian":
|
||||||
return (1/np.sqrt(2*np.pi))*np.exp(-0.5*u**2)
|
return (1.0/np.sqrt(2*np.pi))*np.exp(-0.5*u**2)
|
||||||
elif self.method == "uniform":
|
elif self.method == "uniform":
|
||||||
return 0.5
|
return 0.5
|
||||||
|
elif self.method == "triangular":
|
||||||
|
tmp = 1.0 - np.abs(u)
|
||||||
|
return tmp if tmp > 0 else 0
|
||||||
|
elif self.method == "logistic":
|
||||||
|
return 1.0/(np.exp(u)+2+np.exp(-u))
|
||||||
|
elif self.method == "cosine":
|
||||||
|
return (np.pi/4.0)*np.cos((np.pi/2.0)*u)
|
||||||
|
elif self.method == "sigmoid":
|
||||||
|
return (2.0/np.pi)*(1.0/(np.exp(u)+np.exp(-u)))
|
||||||
|
elif self.method == "tophat":
|
||||||
|
return 1 if np.abs(u) < 0.5 else 0
|
||||||
|
elif self.method == "exponential":
|
||||||
|
return 0.5 * np.exp(-np.abs(u))
|
||||||
|
|
||||||
|
|
||||||
def probability(self, x, data):
|
def probability(self, x, data):
|
||||||
l = len(data)
|
l = len(data)
|
||||||
|
211
pyFTS/pwfts.py
211
pyFTS/pwfts.py
@ -7,6 +7,7 @@ import math
|
|||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
from pyFTS.common import FLR, FuzzySet, SortedCollection
|
from pyFTS.common import FLR, FuzzySet, SortedCollection
|
||||||
from pyFTS import hofts, ifts, tree
|
from pyFTS import hofts, ifts, tree
|
||||||
|
from pyFTS.probabilistic import ProbabilityDistribution
|
||||||
|
|
||||||
|
|
||||||
class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG):
|
class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG):
|
||||||
@ -15,6 +16,7 @@ class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG):
|
|||||||
super(ProbabilisticWeightedFLRG, self).__init__(order)
|
super(ProbabilisticWeightedFLRG, self).__init__(order)
|
||||||
self.RHS = {}
|
self.RHS = {}
|
||||||
self.frequency_count = 0.0
|
self.frequency_count = 0.0
|
||||||
|
self.Z = None
|
||||||
|
|
||||||
def appendRHS(self, c):
|
def appendRHS(self, c):
|
||||||
self.frequency_count += 1.0
|
self.frequency_count += 1.0
|
||||||
@ -30,9 +32,29 @@ class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG):
|
|||||||
else:
|
else:
|
||||||
self.RHS[c.name] = mv
|
self.RHS[c.name] = mv
|
||||||
|
|
||||||
def get_probability(self, c):
|
def get_RHSprobability(self, c):
|
||||||
return self.RHS[c] / self.frequency_count
|
return self.RHS[c] / self.frequency_count
|
||||||
|
|
||||||
|
def get_LHSprobability(self, x, norm, uod, nbins):
|
||||||
|
pk = self.frequency_count / norm
|
||||||
|
mv = []
|
||||||
|
for set in self.LHS:
|
||||||
|
mv.append( set.membership(x) )
|
||||||
|
|
||||||
|
min_mv = np.prod(mv)
|
||||||
|
tmp = pk * (min_mv / self.partition_function(uod, nbins=nbins))
|
||||||
|
|
||||||
|
return tmp
|
||||||
|
|
||||||
|
def partition_function(self, uod, nbins=100):
|
||||||
|
if self.Z is None:
|
||||||
|
self.Z = 0.0
|
||||||
|
for k in np.linspace(uod[0], uod[1], nbins):
|
||||||
|
for set in self.LHS:
|
||||||
|
self.Z += set.membership(k)
|
||||||
|
|
||||||
|
return self.Z
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
tmp2 = ""
|
tmp2 = ""
|
||||||
for c in sorted(self.RHS):
|
for c in sorted(self.RHS):
|
||||||
@ -45,7 +67,7 @@ class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG):
|
|||||||
class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
||||||
"""High Order Probabilistic Weighted Fuzzy Time Series"""
|
"""High Order Probabilistic Weighted Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(ProbabilisticWeightedFTS, self).__init__(order=1, name=name)
|
super(ProbabilisticWeightedFTS, self).__init__(order=1, name=name, **kwargs)
|
||||||
self.shortname = "PWFTS " + name
|
self.shortname = "PWFTS " + name
|
||||||
self.name = "Probabilistic FTS"
|
self.name = "Probabilistic FTS"
|
||||||
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H."
|
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H."
|
||||||
@ -57,30 +79,40 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
self.is_high_order = True
|
self.is_high_order = True
|
||||||
self.auto_update = kwargs.get('update',False)
|
self.auto_update = kwargs.get('update',False)
|
||||||
|
|
||||||
def train(self, data, sets, order=1,parameters=None):
|
def train(self, data, sets, order=1,parameters='Fuzzy'):
|
||||||
|
|
||||||
data = self.doTransformations(data, updateUoD=True)
|
data = self.doTransformations(data, updateUoD=True)
|
||||||
|
|
||||||
self.order = order
|
self.order = order
|
||||||
|
if sets is None and self.partitioner is not None:
|
||||||
|
self.sets = self.partitioner.sets
|
||||||
|
self.original_min = self.partitioner.min
|
||||||
|
self.original_max = self.partitioner.max
|
||||||
|
else:
|
||||||
self.sets = sets
|
self.sets = sets
|
||||||
for s in self.sets: self.setsDict[s.name] = s
|
for s in self.sets: self.setsDict[s.name] = s
|
||||||
|
if parameters == 'Monotonic':
|
||||||
tmpdata = FuzzySet.fuzzySeries(data, sets)
|
tmpdata = FuzzySet.fuzzySeries(data, sets)
|
||||||
flrs = FLR.generateRecurrentFLRs(tmpdata)
|
flrs = FLR.generateRecurrentFLRs(tmpdata)
|
||||||
self.flrgs = self.generateFLRG(flrs)
|
self.flrgs = self.generateFLRG(flrs)
|
||||||
#self.flrgs = self.generateFLRG2(data)
|
else:
|
||||||
|
self.flrgs = self.generateFLRGfuzzy(data)
|
||||||
|
|
||||||
def generateFLRG2(self, data):
|
def generateFLRGfuzzy(self, data):
|
||||||
flrgs = {}
|
flrgs = {}
|
||||||
l = len(data)
|
l = len(data)
|
||||||
for k in np.arange(self.order, l):
|
for k in np.arange(self.order, l):
|
||||||
if self.dump: print("FLR: " + str(k))
|
if self.dump: print("FLR: " + str(k))
|
||||||
flrg = ProbabilisticWeightedFLRG(self.order)
|
|
||||||
|
|
||||||
sample = data[k - self.order: k]
|
sample = data[k - self.order: k]
|
||||||
|
|
||||||
mvs = FuzzySet.fuzzyInstances(sample, self.sets)
|
mvs = FuzzySet.fuzzyInstances(sample, self.sets)
|
||||||
lags = {}
|
lags = {}
|
||||||
|
|
||||||
|
mv = FuzzySet.fuzzyInstance(data[k], self.sets)
|
||||||
|
tmp = np.argwhere(mv)
|
||||||
|
idx = np.ravel(tmp) # flatten the array
|
||||||
|
|
||||||
for o in np.arange(0, self.order):
|
for o in np.arange(0, self.order):
|
||||||
_sets = [self.sets[kk] for kk in np.arange(0, len(self.sets)) if mvs[o][kk] > 0]
|
_sets = [self.sets[kk] for kk in np.arange(0, len(self.sets)) if mvs[o][kk] > 0]
|
||||||
|
|
||||||
@ -92,25 +124,25 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
|
|
||||||
# Trace the possible paths
|
# Trace the possible paths
|
||||||
for p in root.paths():
|
for p in root.paths():
|
||||||
|
flrg = ProbabilisticWeightedFLRG(self.order)
|
||||||
path = list(reversed(list(filter(None.__ne__, p))))
|
path = list(reversed(list(filter(None.__ne__, p))))
|
||||||
|
|
||||||
lhs_mv = []
|
tmp_path = []
|
||||||
for c, e in enumerate(path, start=0):
|
for c, e in enumerate(path, start=0):
|
||||||
lhs_mv.append( e.membership( sample[c] ) )
|
tmp_path.append( e.membership( sample[c] ) )
|
||||||
flrg.appendLHS(e)
|
flrg.appendLHS(e)
|
||||||
|
|
||||||
|
lhs_mv = np.prod(tmp_path)
|
||||||
|
|
||||||
if flrg.strLHS() not in flrgs:
|
if flrg.strLHS() not in flrgs:
|
||||||
flrgs[flrg.strLHS()] = flrg;
|
flrgs[flrg.strLHS()] = flrg;
|
||||||
|
|
||||||
mv = FuzzySet.fuzzyInstance(data[k], self.sets)
|
for st in idx:
|
||||||
|
flrgs[flrg.strLHS()].appendRHSFuzzy(self.sets[st], lhs_mv*mv[st])
|
||||||
|
|
||||||
rhs_mv = [mv[kk] for kk in np.arange(0, len(self.sets)) if mv[kk] > 0]
|
tmp_fq = sum([lhs_mv*kk for kk in mv if kk > 0])
|
||||||
_sets = [self.sets[kk] for kk in np.arange(0, len(self.sets)) if mv[kk] > 0]
|
|
||||||
|
|
||||||
for c, e in enumerate(_sets, start=0):
|
self.global_frequency_count = self.global_frequency_count + tmp_fq
|
||||||
flrgs[flrg.strLHS()].appendRHSFuzzy(e,rhs_mv[c]*max(lhs_mv))
|
|
||||||
|
|
||||||
self.global_frequency_count += max(lhs_mv)
|
|
||||||
|
|
||||||
return (flrgs)
|
return (flrgs)
|
||||||
|
|
||||||
@ -159,26 +191,44 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
self.flrgs[tmp.strLHS()] = tmp;
|
self.flrgs[tmp.strLHS()] = tmp;
|
||||||
self.global_frequency_count += 1
|
self.global_frequency_count += 1
|
||||||
|
|
||||||
def get_probability(self, flrg):
|
def get_flrg_global_probability(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.strLHS() in self.flrgs:
|
||||||
return self.flrgs[flrg.strLHS()].frequency_count / self.global_frequency_count
|
return self.flrgs[flrg.strLHS()].frequency_count / self.global_frequency_count
|
||||||
else:
|
else:
|
||||||
self.add_new_PWFLGR(flrg)
|
self.add_new_PWFLGR(flrg)
|
||||||
return self.get_probability(flrg)
|
return self.get_flrg_global_probability(flrg)
|
||||||
|
|
||||||
def getMidpoints(self, flrg):
|
def getMidpoints(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.strLHS() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.strLHS()]
|
||||||
ret = sum(np.array([tmp.get_probability(s) * self.setsDict[s].centroid for s in tmp.RHS]))
|
ret = sum(np.array([tmp.get_RHSprobability(s) * self.setsDict[s].centroid for s in tmp.RHS]))
|
||||||
else:
|
else:
|
||||||
pi = 1 / len(flrg.LHS)
|
pi = 1 / len(flrg.LHS)
|
||||||
ret = sum(np.array([pi * s.centroid for s in flrg.LHS]))
|
ret = sum(np.array([pi * s.centroid for s in flrg.LHS]))
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def get_conditional_probability(self, x, flrg):
|
||||||
|
|
||||||
|
if flrg.strLHS() in self.flrgs:
|
||||||
|
_flrg = self.flrgs[flrg.strLHS()]
|
||||||
|
cond = []
|
||||||
|
for s in _flrg.RHS:
|
||||||
|
_set = self.setsDict[s]
|
||||||
|
tmp = _flrg.get_RHSprobability(s) * (_set.membership(x) / _set.partition_function(uod=self.get_UoD()))
|
||||||
|
cond.append(tmp)
|
||||||
|
ret = sum(np.array(cond))
|
||||||
|
else:
|
||||||
|
##########################################
|
||||||
|
# this may be the problem! TEST IT!!!
|
||||||
|
##########################################
|
||||||
|
pi = 1 / len(flrg.LHS)
|
||||||
|
ret = sum(np.array([pi * self.setsDict[s].membership(x) for s in flrg.LHS]))
|
||||||
|
return ret
|
||||||
|
|
||||||
def getUpper(self, flrg):
|
def getUpper(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.strLHS() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.strLHS()]
|
||||||
ret = sum(np.array([tmp.get_probability(s) * self.setsDict[s].upper for s in tmp.RHS]))
|
ret = sum(np.array([tmp.get_RHSprobability(s) * self.setsDict[s].upper for s in tmp.RHS]))
|
||||||
else:
|
else:
|
||||||
pi = 1 / len(flrg.LHS)
|
pi = 1 / len(flrg.LHS)
|
||||||
ret = sum(np.array([pi * s.upper for s in flrg.LHS]))
|
ret = sum(np.array([pi * s.upper for s in flrg.LHS]))
|
||||||
@ -187,7 +237,7 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
def getLower(self, flrg):
|
def getLower(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.strLHS() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.strLHS()]
|
||||||
ret = sum(np.array([tmp.get_probability(s) * self.setsDict[s].lower for s in tmp.RHS]))
|
ret = sum(np.array([tmp.get_RHSprobability(s) * self.setsDict[s].lower for s in tmp.RHS]))
|
||||||
else:
|
else:
|
||||||
pi = 1 / len(flrg.LHS)
|
pi = 1 / len(flrg.LHS)
|
||||||
ret = sum(np.array([pi * s.lower for s in flrg.LHS]))
|
ret = sum(np.array([pi * s.lower for s in flrg.LHS]))
|
||||||
@ -287,15 +337,13 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
affected_flrgs.append(flrg)
|
affected_flrgs.append(flrg)
|
||||||
affected_flrgs_memberships.append(mv[kk])
|
affected_flrgs_memberships.append(mv[kk])
|
||||||
|
|
||||||
count = 0
|
for count, flrg in enumerate(affected_flrgs):
|
||||||
for flrg in affected_flrgs:
|
|
||||||
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
|
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
|
||||||
norm = self.get_probability(flrg) * affected_flrgs_memberships[count]
|
norm = self.get_flrg_global_probability(flrg) * affected_flrgs_memberships[count]
|
||||||
if norm == 0:
|
if norm == 0:
|
||||||
norm = self.get_probability(flrg) # * 0.001
|
norm = self.get_flrg_global_probability(flrg) # * 0.001
|
||||||
mp.append(norm * self.getMidpoints(flrg))
|
mp.append(norm * self.getMidpoints(flrg))
|
||||||
norms.append(norm)
|
norms.append(norm)
|
||||||
count = count + 1
|
|
||||||
|
|
||||||
# gerar o intervalo
|
# gerar o intervalo
|
||||||
norm = sum(norms)
|
norm = sum(norms)
|
||||||
@ -394,16 +442,14 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
affected_flrgs.append(flrg)
|
affected_flrgs.append(flrg)
|
||||||
affected_flrgs_memberships.append(mv[kk])
|
affected_flrgs_memberships.append(mv[kk])
|
||||||
|
|
||||||
count = 0
|
for count, flrg in enumerate(affected_flrgs):
|
||||||
for flrg in affected_flrgs:
|
|
||||||
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
|
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
|
||||||
norm = self.get_probability(flrg) * affected_flrgs_memberships[count]
|
norm = self.get_flrg_global_probability(flrg) * affected_flrgs_memberships[count]
|
||||||
if norm == 0:
|
if norm == 0:
|
||||||
norm = self.get_probability(flrg) # * 0.001
|
norm = self.get_flrg_global_probability(flrg) # * 0.001
|
||||||
up.append(norm * self.getUpper(flrg))
|
up.append(norm * self.getUpper(flrg))
|
||||||
lo.append(norm * self.getLower(flrg))
|
lo.append(norm * self.getLower(flrg))
|
||||||
norms.append(norm)
|
norms.append(norm)
|
||||||
count = count + 1
|
|
||||||
|
|
||||||
# gerar o intervalo
|
# gerar o intervalo
|
||||||
norm = sum(norms)
|
norm = sum(norms)
|
||||||
@ -418,6 +464,32 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def forecastDistribution(self, data, **kwargs):
|
||||||
|
|
||||||
|
ret = []
|
||||||
|
|
||||||
|
smooth = kwargs.get("smooth", "KDE")
|
||||||
|
alpha = kwargs.get("alpha", None)
|
||||||
|
|
||||||
|
for k in data.index:
|
||||||
|
|
||||||
|
tmp = self.get_models_forecasts(data.ix[k])
|
||||||
|
|
||||||
|
if alpha is None:
|
||||||
|
tmp = np.ravel(tmp).tolist()
|
||||||
|
else:
|
||||||
|
tmp = self.get_distribution_interquantile( np.ravel(tmp).tolist(), alpha)
|
||||||
|
|
||||||
|
name = str(self.indexer.get_index(data.ix[k]))
|
||||||
|
|
||||||
|
dist = ProbabilityDistribution.ProbabilityDistribution(smooth,
|
||||||
|
uod=[self.original_min, self.original_max],
|
||||||
|
data=tmp, name=name, **kwargs)
|
||||||
|
|
||||||
|
ret.append(dist)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
def forecastAhead(self, data, steps, **kwargs):
|
def forecastAhead(self, data, steps, **kwargs):
|
||||||
ret = [data[k] for k in np.arange(len(data) - self.order, len(data))]
|
ret = [data[k] for k in np.arange(len(data) - self.order, len(data))]
|
||||||
|
|
||||||
@ -558,10 +630,87 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
df = pd.DataFrame(ret, columns=sorted(grid))
|
df = pd.DataFrame(ret, columns=sorted(grid))
|
||||||
return df
|
return df
|
||||||
|
|
||||||
|
def density(self, x, num_bins=100):
|
||||||
|
affected_flrgs = []
|
||||||
|
affected_flrgs_memberships = []
|
||||||
|
mv = FuzzySet.fuzzyInstance(x, self.sets)
|
||||||
|
tmp = np.argwhere(mv)
|
||||||
|
idx = np.ravel(tmp)
|
||||||
|
|
||||||
|
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
|
||||||
|
if x <= self.sets[0].lower:
|
||||||
|
idx = [0]
|
||||||
|
elif x >= self.sets[-1].upper:
|
||||||
|
idx = [len(self.sets) - 1]
|
||||||
|
else:
|
||||||
|
raise Exception(x)
|
||||||
|
|
||||||
|
for kk in idx:
|
||||||
|
flrg = ProbabilisticWeightedFLRG(self.order)
|
||||||
|
flrg.appendLHS(self.sets[kk])
|
||||||
|
affected_flrgs.append(flrg)
|
||||||
|
affected_flrgs_memberships.append(mv[kk])
|
||||||
|
|
||||||
|
total_prob = 0.0
|
||||||
|
|
||||||
|
for count, flrg in enumerate(affected_flrgs):
|
||||||
|
_flrg = self.flrgs[flrg.strLHS()]
|
||||||
|
pk = _flrg.frequency_count / self.global_frequency_count
|
||||||
|
priori = pk * (affected_flrgs_memberships[count]) # / _flrg.partition_function(uod=self.get_UoD(), nbins=num_bins))
|
||||||
|
#print(flrg.strLHS() + ": PK=" + str(pk) + " Priori=" + str(priori))
|
||||||
|
#posteriori = self.get_conditional_probability(k, flrg) * priori
|
||||||
|
total_prob += priori
|
||||||
|
|
||||||
|
return total_prob
|
||||||
|
|
||||||
|
def AprioriPDF(self, **kwargs):
|
||||||
|
nbins = kwargs.get('num_bins', 100)
|
||||||
|
pdf = ProbabilityDistribution.ProbabilityDistribution(uod=[self.original_min, self.original_max], num_bins=nbins)
|
||||||
|
t = 0.0
|
||||||
|
|
||||||
|
for k in pdf.bins:
|
||||||
|
#print("BIN: " + str(k) )
|
||||||
|
affected_flrgs = []
|
||||||
|
affected_flrgs_memberships = []
|
||||||
|
|
||||||
|
mv = FuzzySet.fuzzyInstance(k, self.sets)
|
||||||
|
tmp = np.argwhere(mv)
|
||||||
|
idx = np.ravel(tmp)
|
||||||
|
|
||||||
|
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
|
||||||
|
if k <= self.sets[0].lower:
|
||||||
|
idx = [0]
|
||||||
|
elif k >= self.sets[-1].upper:
|
||||||
|
idx = [len(self.sets) - 1]
|
||||||
|
else:
|
||||||
|
raise Exception(k)
|
||||||
|
|
||||||
|
for kk in idx:
|
||||||
|
flrg = ProbabilisticWeightedFLRG(self.order)
|
||||||
|
flrg.appendLHS(self.sets[kk])
|
||||||
|
affected_flrgs.append(flrg)
|
||||||
|
affected_flrgs_memberships.append(mv[kk])
|
||||||
|
|
||||||
|
total_prob = 0.0
|
||||||
|
|
||||||
|
for count, flrg in enumerate(affected_flrgs):
|
||||||
|
_flrg = self.flrgs[flrg.strLHS()]
|
||||||
|
pk = _flrg.frequency_count / self.global_frequency_count
|
||||||
|
priori = pk * (affected_flrgs_memberships[count] / _flrg.partition_function(uod=self.get_UoD()))
|
||||||
|
#print(flrg.strLHS() + ": PK=" + str(pk) + " Priori=" + str(priori))
|
||||||
|
posteriori = self.get_conditional_probability(k, flrg) * priori
|
||||||
|
total_prob += posteriori
|
||||||
|
|
||||||
|
t += total_prob
|
||||||
|
pdf.set(k, total_prob)
|
||||||
|
|
||||||
|
print(t)
|
||||||
|
|
||||||
|
return pdf
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
tmp = self.name + ":\n"
|
tmp = self.name + ":\n"
|
||||||
for r in sorted(self.flrgs):
|
for r in sorted(self.flrgs):
|
||||||
p = round(self.flrgs[r].frequencyCount / self.global_frequency_count, 3)
|
p = round(self.flrgs[r].frequency_count / self.global_frequency_count, 3)
|
||||||
tmp = tmp + "(" + str(p) + ") " + str(self.flrgs[r]) + "\n"
|
tmp = tmp + "(" + str(p) + ") " + str(self.flrgs[r]) + "\n"
|
||||||
return tmp
|
return tmp
|
||||||
|
@ -47,7 +47,7 @@ class ExponentialyWeightedFLRG(object):
|
|||||||
class ExponentialyWeightedFTS(fts.FTS):
|
class ExponentialyWeightedFTS(fts.FTS):
|
||||||
"""First Order Exponentialy Weighted Fuzzy Time Series"""
|
"""First Order Exponentialy Weighted Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(ExponentialyWeightedFTS, self).__init__(1, "EWFTS")
|
super(ExponentialyWeightedFTS, self).__init__(1, "EWFTS", **kwargs)
|
||||||
self.name = "Exponentialy Weighted FTS"
|
self.name = "Exponentialy Weighted FTS"
|
||||||
self.detail = "Sadaei"
|
self.detail = "Sadaei"
|
||||||
self.c = 1
|
self.c = 1
|
||||||
|
@ -37,7 +37,7 @@ class SeasonalFLRG(FLR.FLR):
|
|||||||
class SeasonalFTS(fts.FTS):
|
class SeasonalFTS(fts.FTS):
|
||||||
"""First Order Seasonal Fuzzy Time Series"""
|
"""First Order Seasonal Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(SeasonalFTS, self).__init__(1, "SFTS")
|
super(SeasonalFTS, self).__init__(1, "SFTS", **kwargs)
|
||||||
self.name = "Seasonal FTS"
|
self.name = "Seasonal FTS"
|
||||||
self.detail = "Chen"
|
self.detail = "Chen"
|
||||||
self.seasonality = 1
|
self.seasonality = 1
|
||||||
|
@ -12,7 +12,7 @@ from pyFTS import fts
|
|||||||
class ConventionalFTS(fts.FTS):
|
class ConventionalFTS(fts.FTS):
|
||||||
"""Traditional Fuzzy Time Series"""
|
"""Traditional Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(ConventionalFTS, self).__init__(1, "FTS " + name)
|
super(ConventionalFTS, self).__init__(1, "FTS " + name, **kwargs)
|
||||||
self.name = "Traditional FTS"
|
self.name = "Traditional FTS"
|
||||||
self.detail = "Song & Chissom"
|
self.detail = "Song & Chissom"
|
||||||
self.R = None
|
self.R = None
|
||||||
|
@ -35,13 +35,28 @@ from pyFTS import pwfts
|
|||||||
from pyFTS import tree
|
from pyFTS import tree
|
||||||
from pyFTS.benchmarks import benchmarks as bchmk
|
from pyFTS.benchmarks import benchmarks as bchmk
|
||||||
|
|
||||||
enrollments_fs1 = Grid.GridPartitioner(enrollments, 6).sets
|
uod = [10162, 21271]
|
||||||
for s in enrollments_fs1:
|
|
||||||
print(s)
|
|
||||||
|
|
||||||
pfts1_enrollments = pwfts.ProbabilisticWeightedFTS("1")
|
enrollments_fs1 = Grid.GridPartitioner(enrollments, 6)
|
||||||
pfts1_enrollments.train(enrollments, enrollments_fs1, 1)
|
for s in enrollments_fs1.sets:
|
||||||
|
print(s.partition_function(uod, 100))
|
||||||
|
|
||||||
|
pfts1_enrollments = pwfts.ProbabilisticWeightedFTS("1", partitioner=enrollments_fs1)
|
||||||
|
pfts1_enrollments.train(enrollments, None, 1)
|
||||||
pfts1_enrollments.shortname = "1st Order"
|
pfts1_enrollments.shortname = "1st Order"
|
||||||
|
|
||||||
|
print(pfts1_enrollments)
|
||||||
|
|
||||||
|
#pfts1_enrollments.AprioriPDF
|
||||||
|
norm = pfts1_enrollments.global_frequency_count
|
||||||
|
uod = pfts1_enrollments.get_UoD()
|
||||||
|
print(uod)
|
||||||
|
for k in sorted(pfts1_enrollments.flrgs.keys()):
|
||||||
|
flrg = pfts1_enrollments.flrgs[k]
|
||||||
|
#tmp = flrg.get_LHSprobability(15000, norm, uod, 100)
|
||||||
|
print(flrg.partition_function(uod,100))
|
||||||
|
|
||||||
|
'''
|
||||||
pfts2_enrollments = pwfts.ProbabilisticWeightedFTS("2")
|
pfts2_enrollments = pwfts.ProbabilisticWeightedFTS("2")
|
||||||
pfts2_enrollments.dump = False
|
pfts2_enrollments.dump = False
|
||||||
pfts2_enrollments.shortname = "2nd Order"
|
pfts2_enrollments.shortname = "2nd Order"
|
||||||
@ -55,7 +70,7 @@ bchmk.plot_compared_series(enrollments,[pfts1_enrollments,pfts2_enrollments, pft
|
|||||||
["red","blue","green"], linewidth=2,
|
["red","blue","green"], linewidth=2,
|
||||||
typeonlegend=True,save=False,file="pictures/pwfts_enrollments_interval.png",
|
typeonlegend=True,save=False,file="pictures/pwfts_enrollments_interval.png",
|
||||||
tam=[20,7],points=False, intervals=True)
|
tam=[20,7],points=False, intervals=True)
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -41,7 +41,7 @@ class WeightedFLRG(object):
|
|||||||
class WeightedFTS(fts.FTS):
|
class WeightedFTS(fts.FTS):
|
||||||
"""First Order Weighted Fuzzy Time Series"""
|
"""First Order Weighted Fuzzy Time Series"""
|
||||||
def __init__(self, name, **kwargs):
|
def __init__(self, name, **kwargs):
|
||||||
super(WeightedFTS, self).__init__(1, "WFTS " + name)
|
super(WeightedFTS, self).__init__(1, "WFTS " + name, **kwargs)
|
||||||
self.name = "Weighted FTS"
|
self.name = "Weighted FTS"
|
||||||
self.detail = "Yu"
|
self.detail = "Yu"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user