Notebooks and method bugfixes and refactoring
This commit is contained in:
parent
37862c661d
commit
7a2dd7e54c
@ -90,7 +90,7 @@ def run_point(mfts, partitioner, train_data, test_data, window_key=None, transfo
|
|||||||
mfts.append_transformation(transformation)
|
mfts.append_transformation(transformation)
|
||||||
|
|
||||||
_start = time.time()
|
_start = time.time()
|
||||||
mfts.train(train_data, partitioner.sets, order=mfts.order)
|
mfts.train(train_data, sets=partitioner.sets, order=mfts.order)
|
||||||
_end = time.time()
|
_end = time.time()
|
||||||
times = _end - _start
|
times = _end - _start
|
||||||
|
|
||||||
@ -273,7 +273,7 @@ def all_point_forecasters(data_train, data_test, partitions, max_order=3, statis
|
|||||||
#print(model)
|
#print(model)
|
||||||
if transformation is not None:
|
if transformation is not None:
|
||||||
model.append_transformation(transformation)
|
model.append_transformation(transformation)
|
||||||
model.train(data_train, data_train_fs.sets, order=model.order)
|
model.train(data_train, sets=data_train_fs.sets, order=model.order)
|
||||||
objs.append(model)
|
objs.append(model)
|
||||||
lcolors.append( colors[count % ncol] )
|
lcolors.append( colors[count % ncol] )
|
||||||
|
|
||||||
@ -385,7 +385,7 @@ def interval_sliding_window(data, windowsize, train=0.8, models=None, partitione
|
|||||||
mfts.append_transformation(transformation)
|
mfts.append_transformation(transformation)
|
||||||
|
|
||||||
_start = time.time()
|
_start = time.time()
|
||||||
mfts.train(training, data_train_fs.sets)
|
mfts.train(training, sets=data_train_fs.sets)
|
||||||
_end = time.time()
|
_end = time.time()
|
||||||
_tdiff = _end - _start
|
_tdiff = _end - _start
|
||||||
|
|
||||||
@ -419,7 +419,7 @@ def interval_sliding_window(data, windowsize, train=0.8, models=None, partitione
|
|||||||
mfts.append_transformation(transformation)
|
mfts.append_transformation(transformation)
|
||||||
|
|
||||||
_start = time.time()
|
_start = time.time()
|
||||||
mfts.train(training, data_train_fs.sets, order=order)
|
mfts.train(training, sets=data_train_fs.sets, order=order)
|
||||||
_end = time.time()
|
_end = time.time()
|
||||||
|
|
||||||
_tdiff = _end - _start
|
_tdiff = _end - _start
|
||||||
@ -476,7 +476,7 @@ def all_interval_forecasters(data_train, data_test, partitions, max_order=3,save
|
|||||||
for count, model in Util.enumerate2(models, start=0, step=2):
|
for count, model in Util.enumerate2(models, start=0, step=2):
|
||||||
if transformation is not None:
|
if transformation is not None:
|
||||||
model.append_transformation(transformation)
|
model.append_transformation(transformation)
|
||||||
model.train(data_train, data_train_fs, order=model.order)
|
model.train(data_train, sets=data_train_fs, order=model.order)
|
||||||
objs.append(model)
|
objs.append(model)
|
||||||
lcolors.append( colors[count % ncol] )
|
lcolors.append( colors[count % ncol] )
|
||||||
|
|
||||||
@ -635,7 +635,7 @@ def ahead_sliding_window(data, windowsize, train, steps, models=None, resolution
|
|||||||
mfts.append_transformation(transformation)
|
mfts.append_transformation(transformation)
|
||||||
|
|
||||||
_start = time.time()
|
_start = time.time()
|
||||||
mfts.train(train, data_train_fs.sets)
|
mfts.train(train, sets=data_train_fs.sets)
|
||||||
_end = time.time()
|
_end = time.time()
|
||||||
|
|
||||||
_tdiff = _end - _start
|
_tdiff = _end - _start
|
||||||
@ -670,7 +670,7 @@ def ahead_sliding_window(data, windowsize, train, steps, models=None, resolution
|
|||||||
mfts.append_transformation(transformation)
|
mfts.append_transformation(transformation)
|
||||||
|
|
||||||
_start = time.time()
|
_start = time.time()
|
||||||
mfts.train(train, data_train_fs.sets, order=order)
|
mfts.train(train, sets=data_train_fs.sets, order=order)
|
||||||
_end = time.time()
|
_end = time.time()
|
||||||
|
|
||||||
_tdiff = _end - _start
|
_tdiff = _end - _start
|
||||||
@ -705,7 +705,7 @@ def all_ahead_forecasters(data_train, data_test, partitions, start, steps, resol
|
|||||||
if not mfts.is_high_order:
|
if not mfts.is_high_order:
|
||||||
if transformation is not None:
|
if transformation is not None:
|
||||||
mfts.append_transformation(transformation)
|
mfts.append_transformation(transformation)
|
||||||
mfts.train(data_train, data_train_fs)
|
mfts.train(data_train, sets=data_train_fs.sets)
|
||||||
objs.append(mfts)
|
objs.append(mfts)
|
||||||
lcolors.append( colors[count % ncol] )
|
lcolors.append( colors[count % ncol] )
|
||||||
else:
|
else:
|
||||||
@ -714,7 +714,7 @@ def all_ahead_forecasters(data_train, data_test, partitions, start, steps, resol
|
|||||||
mfts = model(" n = " + str(order))
|
mfts = model(" n = " + str(order))
|
||||||
if transformation is not None:
|
if transformation is not None:
|
||||||
mfts.append_transformation(transformation)
|
mfts.append_transformation(transformation)
|
||||||
mfts.train(data_train, data_train_fs, order=order)
|
mfts.train(data_train, sets=data_train_fs.sets, order=order)
|
||||||
objs.append(mfts)
|
objs.append(mfts)
|
||||||
lcolors.append(colors[count % ncol])
|
lcolors.append(colors[count % ncol])
|
||||||
|
|
||||||
@ -896,7 +896,7 @@ def SelecaoSimples_MenorRMSE(original, parameters, modelo):
|
|||||||
for p in parameters:
|
for p in parameters:
|
||||||
sets = Grid.GridPartitioner(original, p).sets
|
sets = Grid.GridPartitioner(original, p).sets
|
||||||
fts = modelo(str(p) + " particoes")
|
fts = modelo(str(p) + " particoes")
|
||||||
fts.train(original, sets)
|
fts.train(original, sets=sets)
|
||||||
# print(original)
|
# print(original)
|
||||||
forecasted = fts.forecast(original)
|
forecasted = fts.forecast(original)
|
||||||
forecasted.insert(0, original[0])
|
forecasted.insert(0, original[0])
|
||||||
@ -936,7 +936,7 @@ def SelecaoSimples_MenorRMSE(original, parameters, modelo):
|
|||||||
for p in parameters:
|
for p in parameters:
|
||||||
sets = Grid.GridPartitionerTrimf(difffts, p)
|
sets = Grid.GridPartitionerTrimf(difffts, p)
|
||||||
fts = modelo(str(p) + " particoes")
|
fts = modelo(str(p) + " particoes")
|
||||||
fts.train(difffts, sets)
|
fts.train(difffts, sets=sets)
|
||||||
forecasted = fts.forecast(difffts)
|
forecasted = fts.forecast(difffts)
|
||||||
forecasted.insert(0, difffts[0])
|
forecasted.insert(0, difffts[0])
|
||||||
ax2.plot(forecasted, label=fts.name)
|
ax2.plot(forecasted, label=fts.name)
|
||||||
@ -1050,7 +1050,7 @@ def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=N
|
|||||||
for oc, o in enumerate(orders, start=0):
|
for oc, o in enumerate(orders, start=0):
|
||||||
fts = model("q = " + str(p) + " n = " + str(o))
|
fts = model("q = " + str(p) + " n = " + str(o))
|
||||||
fts.append_transformation(transformation)
|
fts.append_transformation(transformation)
|
||||||
fts.train(train, sets, o, parameters=parameters)
|
fts.train(train, sets=sets, order=o, parameters=parameters)
|
||||||
if not intervals:
|
if not intervals:
|
||||||
forecasted = fts.forecast(test)
|
forecasted = fts.forecast(test)
|
||||||
if not fts.has_seasonality:
|
if not fts.has_seasonality:
|
||||||
@ -1128,7 +1128,7 @@ def sliding_window_simple_search(data, windowsize, model, partitions, orders, sa
|
|||||||
_error = []
|
_error = []
|
||||||
for ct, train, test in Util.sliding_window(data, windowsize, 0.8):
|
for ct, train, test in Util.sliding_window(data, windowsize, 0.8):
|
||||||
fts = model("q = " + str(p) + " n = " + str(o))
|
fts = model("q = " + str(p) + " n = " + str(o))
|
||||||
fts.train(data, sets, o, parameters=parameters)
|
fts.train(data, sets=sets, order=o, parameters=parameters)
|
||||||
if not intervals:
|
if not intervals:
|
||||||
forecasted = fts.forecast(test)
|
forecasted = fts.forecast(test)
|
||||||
if not fts.has_seasonality:
|
if not fts.has_seasonality:
|
||||||
@ -1191,7 +1191,7 @@ def pftsExploreOrderAndPartitions(data,save=False, file=None):
|
|||||||
for order in np.arange(1, 6):
|
for order in np.arange(1, 6):
|
||||||
fts = pwfts.ProbabilisticWeightedFTS("")
|
fts = pwfts.ProbabilisticWeightedFTS("")
|
||||||
fts.shortname = "n = " + str(order)
|
fts.shortname = "n = " + str(order)
|
||||||
fts.train(data, data_fs1, order=order)
|
fts.train(data, sets=data_fs1.sets, order=order)
|
||||||
point_forecasts = fts.forecast(data)
|
point_forecasts = fts.forecast(data)
|
||||||
interval_forecasts = fts.forecast_interval(data)
|
interval_forecasts = fts.forecast_interval(data)
|
||||||
lower = [kk[0] for kk in interval_forecasts]
|
lower = [kk[0] for kk in interval_forecasts]
|
||||||
@ -1213,7 +1213,7 @@ def pftsExploreOrderAndPartitions(data,save=False, file=None):
|
|||||||
data_fs = Grid.GridPartitioner(data, partitions).sets
|
data_fs = Grid.GridPartitioner(data, partitions).sets
|
||||||
fts = pwfts.ProbabilisticWeightedFTS("")
|
fts = pwfts.ProbabilisticWeightedFTS("")
|
||||||
fts.shortname = "q = " + str(partitions)
|
fts.shortname = "q = " + str(partitions)
|
||||||
fts.train(data, data_fs, 1)
|
fts.train(data, sets=data_fs.sets, order=1)
|
||||||
point_forecasts = fts.forecast(data)
|
point_forecasts = fts.forecast(data)
|
||||||
interval_forecasts = fts.forecast_interval(data)
|
interval_forecasts = fts.forecast_interval(data)
|
||||||
lower = [kk[0] for kk in interval_forecasts]
|
lower = [kk[0] for kk in interval_forecasts]
|
||||||
|
@ -148,7 +148,7 @@ class FTS(object):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError('This model do not perform multi step ahead distribution forecasts!')
|
raise NotImplementedError('This model do not perform multi step ahead distribution forecasts!')
|
||||||
|
|
||||||
def train(self, data, sets, order=1, parameters=None):
|
def train(self, data, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
:param data:
|
:param data:
|
||||||
@ -166,7 +166,7 @@ class FTS(object):
|
|||||||
:param kwargs:
|
:param kwargs:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
self.train(data, sets=None)
|
self.train(data, **kwargs)
|
||||||
|
|
||||||
def append_transformation(self, transformation):
|
def append_transformation(self, transformation):
|
||||||
if transformation is not None:
|
if transformation is not None:
|
||||||
|
@ -28,6 +28,7 @@ def generate_gaussian_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=100, num=1
|
|||||||
sigma += sigma_inc
|
sigma += sigma_inc
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def generate_uniform_linear(min_ini, max_ini, min_inc, max_inc, it=100, num=10, vmin=None, vmax=None):
|
def generate_uniform_linear(min_ini, max_ini, min_inc, max_inc, it=100, num=10, vmin=None, vmax=None):
|
||||||
"""
|
"""
|
||||||
Generate data sampled from Uniform distribution, with constant or linear changing bounds
|
Generate data sampled from Uniform distribution, with constant or linear changing bounds
|
||||||
@ -53,4 +54,21 @@ def generate_uniform_linear(min_ini, max_ini, min_inc, max_inc, it=100, num=10,
|
|||||||
ret.extend(tmp)
|
ret.extend(tmp)
|
||||||
_min += min_inc
|
_min += min_inc
|
||||||
_max += max_inc
|
_max += max_inc
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def white_noise(n=500):
|
||||||
|
return np.random.normal(0, 1, n)
|
||||||
|
|
||||||
|
|
||||||
|
def random_walk(n=500, type='gaussian'):
|
||||||
|
if type == 'gaussian':
|
||||||
|
tmp = generate_gaussian_linear(0, 1, 0, 0, it=1, num=n)
|
||||||
|
else:
|
||||||
|
tmp = generate_uniform_linear(-1, 1, 0, 0, it=1, num=n)
|
||||||
|
ret = [0]
|
||||||
|
for i in range(n):
|
||||||
|
ret.append(tmp[i] + ret[i])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ class ConventionalFTS(fts.FTS):
|
|||||||
self.detail = "Chen"
|
self.detail = "Chen"
|
||||||
self.flrgs = {}
|
self.flrgs = {}
|
||||||
|
|
||||||
def generateFLRG(self, flrs):
|
def generate_flrg(self, flrs):
|
||||||
flrgs = {}
|
flrgs = {}
|
||||||
for flr in flrs:
|
for flr in flrs:
|
||||||
if flr.LHS.name in flrgs:
|
if flr.LHS.name in flrgs:
|
||||||
@ -46,12 +46,13 @@ class ConventionalFTS(fts.FTS):
|
|||||||
flrgs[flr.LHS.name].append(flr.RHS)
|
flrgs[flr.LHS.name].append(flr.RHS)
|
||||||
return (flrgs)
|
return (flrgs)
|
||||||
|
|
||||||
def train(self, data, sets,order=1,parameters=None):
|
def train(self, data, **kwargs):
|
||||||
self.sets = sets
|
if kwargs.get('sets', None) is not None:
|
||||||
|
self.sets = kwargs.get('sets', None)
|
||||||
ndata = self.apply_transformations(data)
|
ndata = self.apply_transformations(data)
|
||||||
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
|
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
|
||||||
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
|
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
|
||||||
self.flrgs = self.generateFLRG(flrs)
|
self.flrgs = self.generate_flrg(flrs)
|
||||||
|
|
||||||
def forecast(self, data, **kwargs):
|
def forecast(self, data, **kwargs):
|
||||||
|
|
||||||
@ -74,6 +75,6 @@ class ConventionalFTS(fts.FTS):
|
|||||||
|
|
||||||
ret.append(_flrg.get_midpoint())
|
ret.append(_flrg.get_midpoint())
|
||||||
|
|
||||||
ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]])
|
ret = self.apply_inverse_transformations(ret, params=[data])
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
@ -16,28 +16,31 @@ class TrendWeightedFLRG(yu.WeightedFLRG):
|
|||||||
"""
|
"""
|
||||||
def __init__(self, LHS, **kwargs):
|
def __init__(self, LHS, **kwargs):
|
||||||
super(TrendWeightedFLRG, self).__init__(LHS, **kwargs)
|
super(TrendWeightedFLRG, self).__init__(LHS, **kwargs)
|
||||||
|
self.w = None
|
||||||
|
|
||||||
def weights(self):
|
def weights(self):
|
||||||
count_nochange = 0.0
|
if self.w is None:
|
||||||
count_up = 0.0
|
count_nochange = 0.0
|
||||||
count_down = 0.0
|
count_up = 0.0
|
||||||
weights = []
|
count_down = 0.0
|
||||||
|
weights = []
|
||||||
|
|
||||||
for c in self.RHS:
|
for c in self.RHS:
|
||||||
tmp = 0
|
tmp = 0
|
||||||
if self.LHS.centroid == c.centroid:
|
if self.LHS.centroid == c.centroid:
|
||||||
count_nochange += 1.0
|
count_nochange += 1.0
|
||||||
tmp = count_nochange
|
tmp = count_nochange
|
||||||
elif self.LHS.centroid > c.centroid:
|
elif self.LHS.centroid > c.centroid:
|
||||||
count_down += 1.0
|
count_down += 1.0
|
||||||
tmp = count_down
|
tmp = count_down
|
||||||
else:
|
else:
|
||||||
count_up += 1.0
|
count_up += 1.0
|
||||||
tmp = count_up
|
tmp = count_up
|
||||||
weights.append(tmp)
|
weights.append(tmp)
|
||||||
|
|
||||||
tot = sum(weights)
|
tot = sum(weights)
|
||||||
return np.array([k / tot for k in weights])
|
self.w = np.array([k / tot for k in weights])
|
||||||
|
return self.w
|
||||||
|
|
||||||
|
|
||||||
class TrendWeightedFTS(yu.WeightedFTS):
|
class TrendWeightedFTS(yu.WeightedFTS):
|
||||||
|
@ -16,19 +16,19 @@ class HighOrderFLRG(flrg.FLRG):
|
|||||||
self.RHS = {}
|
self.RHS = {}
|
||||||
self.strlhs = ""
|
self.strlhs = ""
|
||||||
|
|
||||||
def appendRHS(self, c):
|
def append_rhs(self, c):
|
||||||
if c.name not in self.RHS:
|
if c.name not in self.RHS:
|
||||||
self.RHS[c.name] = c
|
self.RHS[c.name] = c
|
||||||
|
|
||||||
def strLHS(self):
|
def str_lhs(self):
|
||||||
if len(self.strlhs) == 0:
|
if len(self.strlhs) == 0:
|
||||||
for c in self.LHS:
|
for c in self.LHS:
|
||||||
if len(self.strlhs) > 0:
|
if len(self.strlhs) > 0:
|
||||||
self.strlhs += ", "
|
self.strlhs += ", "
|
||||||
self.strlhs = self.strlhs + str(c)
|
self.strlhs = self.strlhs + str(c.name)
|
||||||
return self.strlhs
|
return self.strlhs
|
||||||
|
|
||||||
def appendLHS(self, c):
|
def append_lhs(self, c):
|
||||||
self.LHS.append(c)
|
self.LHS.append(c)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -37,7 +37,7 @@ class HighOrderFLRG(flrg.FLRG):
|
|||||||
if len(tmp) > 0:
|
if len(tmp) > 0:
|
||||||
tmp = tmp + ","
|
tmp = tmp + ","
|
||||||
tmp = tmp + c
|
tmp = tmp + c
|
||||||
return self.strLHS() + " -> " + tmp
|
return self.str_lhs() + " -> " + tmp
|
||||||
|
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
@ -51,7 +51,7 @@ class HighOrderFTS(fts.FTS):
|
|||||||
self.name = "High Order FTS"
|
self.name = "High Order FTS"
|
||||||
self.shortname = "HOFTS" + name
|
self.shortname = "HOFTS" + name
|
||||||
self.detail = "Chen"
|
self.detail = "Chen"
|
||||||
self.order = 1
|
self.order = kwargs.get('order',1)
|
||||||
self.setsDict = {}
|
self.setsDict = {}
|
||||||
self.is_high_order = True
|
self.is_high_order = True
|
||||||
|
|
||||||
@ -83,13 +83,13 @@ class HighOrderFTS(fts.FTS):
|
|||||||
flrg = HighOrderFLRG(self.order)
|
flrg = HighOrderFLRG(self.order)
|
||||||
|
|
||||||
for kk in np.arange(k - self.order, k):
|
for kk in np.arange(k - self.order, k):
|
||||||
flrg.appendLHS(flrs[kk].LHS)
|
flrg.append_lhs(flrs[kk].LHS)
|
||||||
|
|
||||||
if flrg.strLHS() in flrgs:
|
if flrg.str_lhs() in flrgs:
|
||||||
flrgs[flrg.strLHS()].appendRHS(flrs[k].RHS)
|
flrgs[flrg.str_lhs()].append_rhs(flrs[k].RHS)
|
||||||
else:
|
else:
|
||||||
flrgs[flrg.strLHS()] = flrg;
|
flrgs[flrg.str_lhs()] = flrg;
|
||||||
flrgs[flrg.strLHS()].appendRHS(flrs[k].RHS)
|
flrgs[flrg.str_lhs()].append_rhs(flrs[k].RHS)
|
||||||
return (flrgs)
|
return (flrgs)
|
||||||
|
|
||||||
def generate_flrg(self, data):
|
def generate_flrg(self, data):
|
||||||
@ -118,23 +118,25 @@ class HighOrderFTS(fts.FTS):
|
|||||||
flrg = HighOrderFLRG(self.order)
|
flrg = HighOrderFLRG(self.order)
|
||||||
path = list(reversed(list(filter(None.__ne__, p))))
|
path = list(reversed(list(filter(None.__ne__, p))))
|
||||||
|
|
||||||
for lhs in enumerate(path, start=0):
|
for lhs in path:
|
||||||
flrg.appendLHS(lhs)
|
flrg.append_lhs(lhs)
|
||||||
|
|
||||||
if flrg.strLHS() not in flrgs:
|
if flrg.str_lhs() not in flrgs:
|
||||||
flrgs[flrg.strLHS()] = flrg;
|
flrgs[flrg.str_lhs()] = flrg;
|
||||||
|
|
||||||
for st in rhs:
|
for st in rhs:
|
||||||
flrgs[flrg.strLHS()].appendRHS(st)
|
flrgs[flrg.str_lhs()].append_rhs(st)
|
||||||
|
|
||||||
return flrgs
|
return flrgs
|
||||||
|
|
||||||
def train(self, data, sets, order=1,parameters=None):
|
def train(self, data, **kwargs):
|
||||||
|
|
||||||
data = self.apply_transformations(data, updateUoD=True)
|
data = self.apply_transformations(data, updateUoD=True)
|
||||||
|
|
||||||
self.order = order
|
self.order = kwargs.get('order',2)
|
||||||
self.sets = sets
|
|
||||||
|
if kwargs.get('sets', None) is not None:
|
||||||
|
self.sets = kwargs.get('sets', None)
|
||||||
for s in self.sets: self.setsDict[s.name] = s
|
for s in self.sets: self.setsDict[s.name] = s
|
||||||
self.flrgs = self.generate_flrg(data)
|
self.flrgs = self.generate_flrg(data)
|
||||||
|
|
||||||
@ -153,12 +155,12 @@ class HighOrderFTS(fts.FTS):
|
|||||||
tmpdata = FuzzySet.fuzzyfy_series_old(ndata[k - self.order: k], self.sets)
|
tmpdata = FuzzySet.fuzzyfy_series_old(ndata[k - self.order: k], self.sets)
|
||||||
tmpflrg = HighOrderFLRG(self.order)
|
tmpflrg = HighOrderFLRG(self.order)
|
||||||
|
|
||||||
for s in tmpdata: tmpflrg.appendLHS(s)
|
for s in tmpdata: tmpflrg.append_lhs(s)
|
||||||
|
|
||||||
if tmpflrg.strLHS() not in self.flrgs:
|
if tmpflrg.str_lhs() not in self.flrgs:
|
||||||
ret.append(tmpdata[-1].centroid)
|
ret.append(tmpdata[-1].centroid)
|
||||||
else:
|
else:
|
||||||
flrg = self.flrgs[tmpflrg.strLHS()]
|
flrg = self.flrgs[tmpflrg.str_lhs()]
|
||||||
ret.append(flrg.get_midpoint())
|
ret.append(flrg.get_midpoint())
|
||||||
|
|
||||||
ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]])
|
ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]])
|
||||||
|
@ -50,6 +50,7 @@ class HighOrderFTS(fts.FTS):
|
|||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def train(self, data, sets, order=1, parameters=None):
|
def train(self, data, **kwargs):
|
||||||
self.sets = sets
|
if kwargs.get('sets', None) is not None:
|
||||||
self.order = order
|
self.sets = kwargs.get('sets', None)
|
||||||
|
self.order = kwargs.get('order', 1)
|
@ -24,16 +24,16 @@ class IntervalFTS(hofts.HighOrderFTS):
|
|||||||
self.is_high_order = True
|
self.is_high_order = True
|
||||||
|
|
||||||
def get_upper(self, flrg):
|
def get_upper(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.str_lhs()]
|
||||||
ret = tmp.get_upper()
|
ret = tmp.get_upper()
|
||||||
else:
|
else:
|
||||||
ret = flrg.LHS[-1].upper
|
ret = flrg.LHS[-1].upper
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def get_lower(self, flrg):
|
def get_lower(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.str_lhs()]
|
||||||
ret = tmp.get_lower()
|
ret = tmp.get_lower()
|
||||||
else:
|
else:
|
||||||
ret = flrg.LHS[-1].lower
|
ret = flrg.LHS[-1].lower
|
||||||
@ -93,7 +93,7 @@ class IntervalFTS(hofts.HighOrderFTS):
|
|||||||
for p in root.paths():
|
for p in root.paths():
|
||||||
path = list(reversed(list(filter(None.__ne__, p))))
|
path = list(reversed(list(filter(None.__ne__, p))))
|
||||||
flrg = hofts.HighOrderFLRG(self.order)
|
flrg = hofts.HighOrderFLRG(self.order)
|
||||||
for kk in path: flrg.appendLHS(self.sets[kk])
|
for kk in path: flrg.append_lhs(self.sets[kk])
|
||||||
|
|
||||||
affected_flrgs.append(flrg)
|
affected_flrgs.append(flrg)
|
||||||
|
|
||||||
@ -115,7 +115,7 @@ class IntervalFTS(hofts.HighOrderFTS):
|
|||||||
|
|
||||||
for kk in idx:
|
for kk in idx:
|
||||||
flrg = hofts.HighOrderFLRG(self.order)
|
flrg = hofts.HighOrderFLRG(self.order)
|
||||||
flrg.appendLHS(self.sets[kk])
|
flrg.append_lhs(self.sets[kk])
|
||||||
affected_flrgs.append(flrg)
|
affected_flrgs.append(flrg)
|
||||||
affected_flrgs_memberships.append(mv[kk])
|
affected_flrgs_memberships.append(mv[kk])
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ class ImprovedWeightedFLRG(flrg.FLRG):
|
|||||||
self.RHS = {}
|
self.RHS = {}
|
||||||
self.rhs_counts = {}
|
self.rhs_counts = {}
|
||||||
self.count = 0.0
|
self.count = 0.0
|
||||||
|
self.w = None
|
||||||
|
|
||||||
def append(self, c):
|
def append(self, c):
|
||||||
if c.name not in self.RHS:
|
if c.name not in self.RHS:
|
||||||
@ -27,7 +28,9 @@ class ImprovedWeightedFLRG(flrg.FLRG):
|
|||||||
self.count += 1.0
|
self.count += 1.0
|
||||||
|
|
||||||
def weights(self):
|
def weights(self):
|
||||||
return np.array([self.rhs_counts[c] / self.count for c in self.RHS.keys()])
|
if self.w is None:
|
||||||
|
self.w = np.array([self.rhs_counts[c] / self.count for c in self.RHS.keys()])
|
||||||
|
return self.w
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
tmp = self.LHS.name + " -> "
|
tmp = self.LHS.name + " -> "
|
||||||
@ -50,7 +53,7 @@ class ImprovedWeightedFTS(fts.FTS):
|
|||||||
self.detail = "Ismail & Efendi"
|
self.detail = "Ismail & Efendi"
|
||||||
self.setsDict = {}
|
self.setsDict = {}
|
||||||
|
|
||||||
def generateFLRG(self, flrs):
|
def generate_flrg(self, flrs):
|
||||||
flrgs = {}
|
flrgs = {}
|
||||||
for flr in flrs:
|
for flr in flrs:
|
||||||
if flr.LHS.name in flrgs:
|
if flr.LHS.name in flrgs:
|
||||||
@ -60,8 +63,9 @@ class ImprovedWeightedFTS(fts.FTS):
|
|||||||
flrgs[flr.LHS.name].append(flr.RHS)
|
flrgs[flr.LHS.name].append(flr.RHS)
|
||||||
return (flrgs)
|
return (flrgs)
|
||||||
|
|
||||||
def train(self, data, sets, order=1, parameters=None):
|
def train(self, data, **kwargs):
|
||||||
self.sets = sets
|
if kwargs.get('sets', None) is not None:
|
||||||
|
self.sets = kwargs.get('sets', None)
|
||||||
|
|
||||||
for s in self.sets: self.setsDict[s.name] = s
|
for s in self.sets: self.setsDict[s.name] = s
|
||||||
|
|
||||||
@ -69,7 +73,7 @@ class ImprovedWeightedFTS(fts.FTS):
|
|||||||
|
|
||||||
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
|
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
|
||||||
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
||||||
self.flrgs = self.generateFLRG(flrs)
|
self.flrgs = self.generate_flrg(flrs)
|
||||||
|
|
||||||
def forecast(self, data, **kwargs):
|
def forecast(self, data, **kwargs):
|
||||||
l = 1
|
l = 1
|
||||||
@ -95,6 +99,6 @@ class ImprovedWeightedFTS(fts.FTS):
|
|||||||
|
|
||||||
ret.append(mp.dot(flrg.weights()))
|
ret.append(mp.dot(flrg.weights()))
|
||||||
|
|
||||||
ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]])
|
ret = self.apply_inverse_transformations(ret, params=[data])
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
@ -88,7 +88,7 @@ class HighOrderNonStationaryFTS(hofts.HighOrderFTS):
|
|||||||
flrgs[flrg.strLHS()] = flrg;
|
flrgs[flrg.strLHS()] = flrg;
|
||||||
|
|
||||||
for st in rhs:
|
for st in rhs:
|
||||||
flrgs[flrg.strLHS()].appendRHS(st)
|
flrgs[flrg.strLHS()].append_rhs(st)
|
||||||
|
|
||||||
# flrgs = sorted(flrgs, key=lambda flrg: flrg.get_midpoint(0, window_size=1))
|
# flrgs = sorted(flrgs, key=lambda flrg: flrg.get_midpoint(0, window_size=1))
|
||||||
|
|
||||||
@ -144,7 +144,7 @@ class HighOrderNonStationaryFTS(hofts.HighOrderFTS):
|
|||||||
affected_flrgs.append(flrg)
|
affected_flrgs.append(flrg)
|
||||||
# affected_flrgs_memberships.append(flrg.get_membership(sample, disp))
|
# affected_flrgs_memberships.append(flrg.get_membership(sample, disp))
|
||||||
|
|
||||||
# print(flrg.strLHS())
|
# print(flrg.str_lhs())
|
||||||
|
|
||||||
# the FLRG is here because of the bounds verification
|
# the FLRG is here because of the bounds verification
|
||||||
mv = []
|
mv = []
|
||||||
@ -196,14 +196,14 @@ class HighOrderNonStationaryFTS(hofts.HighOrderFTS):
|
|||||||
tmp.append(common.check_bounds(sample[-1], self.sets, tdisp))
|
tmp.append(common.check_bounds(sample[-1], self.sets, tdisp))
|
||||||
elif len(affected_flrgs) == 1:
|
elif len(affected_flrgs) == 1:
|
||||||
flrg = affected_flrgs[0]
|
flrg = affected_flrgs[0]
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
tmp.append(self.flrgs[flrg.strLHS()].get_midpoint(tdisp))
|
tmp.append(self.flrgs[flrg.str_lhs()].get_midpoint(tdisp))
|
||||||
else:
|
else:
|
||||||
tmp.append(flrg.LHS[-1].get_midpoint(tdisp))
|
tmp.append(flrg.LHS[-1].get_midpoint(tdisp))
|
||||||
else:
|
else:
|
||||||
for ct, aset in enumerate(affected_flrgs):
|
for ct, aset in enumerate(affected_flrgs):
|
||||||
if aset.strLHS() in self.flrgs:
|
if aset.str_lhs() in self.flrgs:
|
||||||
tmp.append(self.flrgs[aset.strLHS()].get_midpoint(tdisp) *
|
tmp.append(self.flrgs[aset.str_lhs()].get_midpoint(tdisp) *
|
||||||
affected_flrgs_memberships[ct])
|
affected_flrgs_memberships[ct])
|
||||||
else:
|
else:
|
||||||
tmp.append(aset.LHS[-1].get_midpoint(tdisp)*
|
tmp.append(aset.LHS[-1].get_midpoint(tdisp)*
|
||||||
@ -250,19 +250,19 @@ class HighOrderNonStationaryFTS(hofts.HighOrderFTS):
|
|||||||
upper.append(aset.get_upper(tdisp))
|
upper.append(aset.get_upper(tdisp))
|
||||||
elif len(affected_flrgs) == 1:
|
elif len(affected_flrgs) == 1:
|
||||||
_flrg = affected_flrgs[0]
|
_flrg = affected_flrgs[0]
|
||||||
if _flrg.strLHS() in self.flrgs:
|
if _flrg.str_lhs() in self.flrgs:
|
||||||
lower.append(self.flrgs[_flrg.strLHS()].get_lower(tdisp))
|
lower.append(self.flrgs[_flrg.str_lhs()].get_lower(tdisp))
|
||||||
upper.append(self.flrgs[_flrg.strLHS()].get_upper(tdisp))
|
upper.append(self.flrgs[_flrg.str_lhs()].get_upper(tdisp))
|
||||||
else:
|
else:
|
||||||
lower.append(_flrg.LHS[-1].get_lower(tdisp))
|
lower.append(_flrg.LHS[-1].get_lower(tdisp))
|
||||||
upper.append(_flrg.LHS[-1].get_upper(tdisp))
|
upper.append(_flrg.LHS[-1].get_upper(tdisp))
|
||||||
else:
|
else:
|
||||||
for ct, aset in enumerate(affected_flrgs):
|
for ct, aset in enumerate(affected_flrgs):
|
||||||
if aset.strLHS() in self.flrgs:
|
if aset.str_lhs() in self.flrgs:
|
||||||
lower.append(self.flrgs[aset.strLHS()].get_lower(tdisp) *
|
lower.append(self.flrgs[aset.str_lhs()].get_lower(tdisp) *
|
||||||
affected_flrgs_memberships[ct])
|
affected_flrgs_memberships[ct])
|
||||||
upper.append(self.flrgs[aset.strLHS()].get_upper(tdisp) *
|
upper.append(self.flrgs[aset.str_lhs()].get_upper(tdisp) *
|
||||||
affected_flrgs_memberships[ct])
|
affected_flrgs_memberships[ct])
|
||||||
else:
|
else:
|
||||||
lower.append(aset.LHS[-1].get_lower(tdisp) *
|
lower.append(aset.LHS[-1].get_lower(tdisp) *
|
||||||
affected_flrgs_memberships[ct])
|
affected_flrgs_memberships[ct])
|
||||||
|
@ -19,7 +19,7 @@ class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG):
|
|||||||
self.frequency_count = 0.0
|
self.frequency_count = 0.0
|
||||||
self.Z = None
|
self.Z = None
|
||||||
|
|
||||||
def appendRHS(self, c):
|
def append_rhs(self, c):
|
||||||
self.frequency_count += 1.0
|
self.frequency_count += 1.0
|
||||||
if c.name in self.RHS:
|
if c.name in self.RHS:
|
||||||
self.rhs_count[c.name] += 1.0
|
self.rhs_count[c.name] += 1.0
|
||||||
@ -91,7 +91,7 @@ class ProbabilisticWeightedFLRG(hofts.HighOrderFLRG):
|
|||||||
if len(tmp2) > 0:
|
if len(tmp2) > 0:
|
||||||
tmp2 = tmp2 + ", "
|
tmp2 = tmp2 + ", "
|
||||||
tmp2 = tmp2 + "(" + str(round(self.rhs_count[c] / self.frequency_count, 3)) + ")" + c
|
tmp2 = tmp2 + "(" + str(round(self.rhs_count[c] / self.frequency_count, 3)) + ")" + c
|
||||||
return self.strLHS() + " -> " + tmp2
|
return self.str_lhs() + " -> " + tmp2
|
||||||
|
|
||||||
|
|
||||||
class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
||||||
@ -111,20 +111,22 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
self.interval_method = kwargs.get('interval_method','extremum')
|
self.interval_method = kwargs.get('interval_method','extremum')
|
||||||
self.alpha = kwargs.get('alpha', 0.05)
|
self.alpha = kwargs.get('alpha', 0.05)
|
||||||
|
|
||||||
def train(self, data, sets, order=1,parameters='Fuzzy'):
|
def train(self, data, **kwargs):
|
||||||
|
|
||||||
data = self.apply_transformations(data, updateUoD=True)
|
data = self.apply_transformations(data, updateUoD=True)
|
||||||
|
|
||||||
self.order = order
|
parameters = kwargs.get('parameters','Fuzzy')
|
||||||
if sets is None and self.partitioner is not None:
|
|
||||||
|
self.order = kwargs.get('order',1)
|
||||||
|
if kwargs.get('sets',None) is None and self.partitioner is not None:
|
||||||
self.sets = self.partitioner.sets
|
self.sets = self.partitioner.sets
|
||||||
self.original_min = self.partitioner.min
|
self.original_min = self.partitioner.min
|
||||||
self.original_max = self.partitioner.max
|
self.original_max = self.partitioner.max
|
||||||
else:
|
else:
|
||||||
self.sets = sets
|
self.sets = kwargs.get('sets',None)
|
||||||
for s in self.sets: self.setsDict[s.name] = s
|
for s in self.sets: self.setsDict[s.name] = s
|
||||||
if parameters == 'Monotonic':
|
if parameters == 'Monotonic':
|
||||||
tmpdata = FuzzySet.fuzzyfy_series_old(data, sets)
|
tmpdata = FuzzySet.fuzzyfy_series_old(data, self.sets)
|
||||||
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
||||||
self.flrgs = self.generateFLRG(flrs)
|
self.flrgs = self.generateFLRG(flrs)
|
||||||
else:
|
else:
|
||||||
@ -162,15 +164,15 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
tmp_path = []
|
tmp_path = []
|
||||||
for c, e in enumerate(path, start=0):
|
for c, e in enumerate(path, start=0):
|
||||||
tmp_path.append( e.membership( sample[c] ) )
|
tmp_path.append( e.membership( sample[c] ) )
|
||||||
flrg.appendLHS(e)
|
flrg.append_lhs(e)
|
||||||
|
|
||||||
lhs_mv = np.prod(tmp_path)
|
lhs_mv = np.prod(tmp_path)
|
||||||
|
|
||||||
if flrg.strLHS() not in flrgs:
|
if flrg.str_lhs() not in flrgs:
|
||||||
flrgs[flrg.strLHS()] = flrg;
|
flrgs[flrg.str_lhs()] = flrg;
|
||||||
|
|
||||||
for st in idx:
|
for st in idx:
|
||||||
flrgs[flrg.strLHS()].appendRHSFuzzy(self.sets[st], lhs_mv*mv[st])
|
flrgs[flrg.str_lhs()].appendRHSFuzzy(self.sets[st], lhs_mv * mv[st])
|
||||||
|
|
||||||
tmp_fq = sum([lhs_mv*kk for kk in mv if kk > 0])
|
tmp_fq = sum([lhs_mv*kk for kk in mv if kk > 0])
|
||||||
|
|
||||||
@ -186,14 +188,14 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
flrg = ProbabilisticWeightedFLRG(self.order)
|
flrg = ProbabilisticWeightedFLRG(self.order)
|
||||||
|
|
||||||
for kk in np.arange(k - self.order, k):
|
for kk in np.arange(k - self.order, k):
|
||||||
flrg.appendLHS(flrs[kk].LHS)
|
flrg.append_lhs(flrs[kk].LHS)
|
||||||
if self.dump: print("LHS: " + str(flrs[kk]))
|
if self.dump: print("LHS: " + str(flrs[kk]))
|
||||||
|
|
||||||
if flrg.strLHS() in flrgs:
|
if flrg.str_lhs() in flrgs:
|
||||||
flrgs[flrg.strLHS()].appendRHS(flrs[k-1].RHS)
|
flrgs[flrg.str_lhs()].append_rhs(flrs[k - 1].RHS)
|
||||||
else:
|
else:
|
||||||
flrgs[flrg.strLHS()] = flrg
|
flrgs[flrg.str_lhs()] = flrg
|
||||||
flrgs[flrg.strLHS()].appendRHS(flrs[k-1].RHS)
|
flrgs[flrg.str_lhs()].append_rhs(flrs[k - 1].RHS)
|
||||||
if self.dump: print("RHS: " + str(flrs[k-1]))
|
if self.dump: print("RHS: " + str(flrs[k-1]))
|
||||||
|
|
||||||
self.global_frequency_count += 1
|
self.global_frequency_count += 1
|
||||||
@ -205,34 +207,34 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
|
|
||||||
flrg = ProbabilisticWeightedFLRG(self.order)
|
flrg = ProbabilisticWeightedFLRG(self.order)
|
||||||
|
|
||||||
for k in np.arange(0, self.order): flrg.appendLHS(fzzy[k])
|
for k in np.arange(0, self.order): flrg.append_lhs(fzzy[k])
|
||||||
|
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
self.flrgs[flrg.strLHS()].appendRHS(fzzy[self.order])
|
self.flrgs[flrg.str_lhs()].append_rhs(fzzy[self.order])
|
||||||
else:
|
else:
|
||||||
self.flrgs[flrg.strLHS()] = flrg
|
self.flrgs[flrg.str_lhs()] = flrg
|
||||||
self.flrgs[flrg.strLHS()].appendRHS(fzzy[self.order])
|
self.flrgs[flrg.str_lhs()].append_rhs(fzzy[self.order])
|
||||||
|
|
||||||
self.global_frequency_count += 1
|
self.global_frequency_count += 1
|
||||||
|
|
||||||
def add_new_PWFLGR(self, flrg):
|
def add_new_PWFLGR(self, flrg):
|
||||||
if flrg.strLHS() not in self.flrgs:
|
if flrg.str_lhs() not in self.flrgs:
|
||||||
tmp = ProbabilisticWeightedFLRG(self.order)
|
tmp = ProbabilisticWeightedFLRG(self.order)
|
||||||
for fs in flrg.LHS: tmp.appendLHS(fs)
|
for fs in flrg.LHS: tmp.append_lhs(fs)
|
||||||
tmp.appendRHS(flrg.LHS[-1])
|
tmp.append_rhs(flrg.LHS[-1])
|
||||||
self.flrgs[tmp.strLHS()] = tmp;
|
self.flrgs[tmp.str_lhs()] = tmp;
|
||||||
self.global_frequency_count += 1
|
self.global_frequency_count += 1
|
||||||
|
|
||||||
def get_flrg_global_probability(self, flrg):
|
def get_flrg_global_probability(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
return self.flrgs[flrg.strLHS()].frequency_count / self.global_frequency_count
|
return self.flrgs[flrg.str_lhs()].frequency_count / self.global_frequency_count
|
||||||
else:
|
else:
|
||||||
self.add_new_PWFLGR(flrg)
|
self.add_new_PWFLGR(flrg)
|
||||||
return self.get_flrg_global_probability(flrg)
|
return self.get_flrg_global_probability(flrg)
|
||||||
|
|
||||||
def get_midpoint(self, flrg):
|
def get_midpoint(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.str_lhs()]
|
||||||
ret = tmp.get_midpoint() #sum(np.array([tmp.get_RHSprobability(s) * self.setsDict[s].centroid for s in tmp.RHS]))
|
ret = tmp.get_midpoint() #sum(np.array([tmp.get_RHSprobability(s) * self.setsDict[s].centroid for s in tmp.RHS]))
|
||||||
else:
|
else:
|
||||||
pi = 1 / len(flrg.LHS)
|
pi = 1 / len(flrg.LHS)
|
||||||
@ -241,8 +243,8 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
|
|
||||||
def get_conditional_probability(self, x, flrg):
|
def get_conditional_probability(self, x, flrg):
|
||||||
|
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
_flrg = self.flrgs[flrg.strLHS()]
|
_flrg = self.flrgs[flrg.str_lhs()]
|
||||||
cond = []
|
cond = []
|
||||||
for s in _flrg.RHS:
|
for s in _flrg.RHS:
|
||||||
_set = self.setsDict[s]
|
_set = self.setsDict[s]
|
||||||
@ -258,8 +260,8 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def get_upper(self, flrg):
|
def get_upper(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.str_lhs()]
|
||||||
ret = tmp.get_upper()
|
ret = tmp.get_upper()
|
||||||
else:
|
else:
|
||||||
pi = 1 / len(flrg.LHS)
|
pi = 1 / len(flrg.LHS)
|
||||||
@ -267,8 +269,8 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def get_lower(self, flrg):
|
def get_lower(self, flrg):
|
||||||
if flrg.strLHS() in self.flrgs:
|
if flrg.str_lhs() in self.flrgs:
|
||||||
tmp = self.flrgs[flrg.strLHS()]
|
tmp = self.flrgs[flrg.str_lhs()]
|
||||||
ret = tmp.get_lower()
|
ret = tmp.get_lower()
|
||||||
else:
|
else:
|
||||||
pi = 1 / len(flrg.LHS)
|
pi = 1 / len(flrg.LHS)
|
||||||
@ -324,7 +326,7 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
for p in root.paths():
|
for p in root.paths():
|
||||||
path = list(reversed(list(filter(None.__ne__, p))))
|
path = list(reversed(list(filter(None.__ne__, p))))
|
||||||
flrg = hofts.HighOrderFLRG(self.order)
|
flrg = hofts.HighOrderFLRG(self.order)
|
||||||
for kk in path: flrg.appendLHS(self.sets[kk])
|
for kk in path: flrg.append_lhs(self.sets[kk])
|
||||||
|
|
||||||
assert len(flrg.LHS) == subset.size, str(subset) + " -> " + str([s.name for s in flrg.LHS])
|
assert len(flrg.LHS) == subset.size, str(subset) + " -> " + str([s.name for s in flrg.LHS])
|
||||||
|
|
||||||
@ -350,7 +352,7 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
|
|
||||||
for kk in idx:
|
for kk in idx:
|
||||||
flrg = hofts.HighOrderFLRG(self.order)
|
flrg = hofts.HighOrderFLRG(self.order)
|
||||||
flrg.appendLHS(self.sets[kk])
|
flrg.append_lhs(self.sets[kk])
|
||||||
affected_flrgs.append(flrg)
|
affected_flrgs.append(flrg)
|
||||||
affected_flrgs_memberships.append(mv[kk])
|
affected_flrgs_memberships.append(mv[kk])
|
||||||
|
|
||||||
@ -446,7 +448,7 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
for p in root.paths():
|
for p in root.paths():
|
||||||
path = list(reversed(list(filter(None.__ne__, p))))
|
path = list(reversed(list(filter(None.__ne__, p))))
|
||||||
flrg = hofts.HighOrderFLRG(self.order)
|
flrg = hofts.HighOrderFLRG(self.order)
|
||||||
for kk in path: flrg.appendLHS(self.sets[kk])
|
for kk in path: flrg.append_lhs(self.sets[kk])
|
||||||
|
|
||||||
assert len(flrg.LHS) == subset.size, str(subset) + " -> " + str([s.name for s in flrg.LHS])
|
assert len(flrg.LHS) == subset.size, str(subset) + " -> " + str([s.name for s in flrg.LHS])
|
||||||
|
|
||||||
@ -473,7 +475,7 @@ class ProbabilisticWeightedFTS(ifts.IntervalFTS):
|
|||||||
|
|
||||||
for kk in idx:
|
for kk in idx:
|
||||||
flrg = hofts.HighOrderFLRG(self.order)
|
flrg = hofts.HighOrderFLRG(self.order)
|
||||||
flrg.appendLHS(self.sets[kk])
|
flrg.append_lhs(self.sets[kk])
|
||||||
affected_flrgs.append(flrg)
|
affected_flrgs.append(flrg)
|
||||||
affected_flrgs_memberships.append(mv[kk])
|
affected_flrgs_memberships.append(mv[kk])
|
||||||
for count, flrg in enumerate(affected_flrgs):
|
for count, flrg in enumerate(affected_flrgs):
|
||||||
|
@ -8,6 +8,8 @@ refined exponentially weighted fuzzy time series and an improved harmony search,
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from pyFTS.common import FuzzySet,FLR,fts, flrg
|
from pyFTS.common import FuzzySet,FLR,fts, flrg
|
||||||
|
|
||||||
|
default_c = 1.1
|
||||||
|
|
||||||
|
|
||||||
class ExponentialyWeightedFLRG(flrg.FLRG):
|
class ExponentialyWeightedFLRG(flrg.FLRG):
|
||||||
"""First Order Exponentialy Weighted Fuzzy Logical Relationship Group"""
|
"""First Order Exponentialy Weighted Fuzzy Logical Relationship Group"""
|
||||||
@ -16,16 +18,19 @@ class ExponentialyWeightedFLRG(flrg.FLRG):
|
|||||||
self.LHS = LHS
|
self.LHS = LHS
|
||||||
self.RHS = []
|
self.RHS = []
|
||||||
self.count = 0.0
|
self.count = 0.0
|
||||||
self.c = kwargs.get("c",2.0)
|
self.c = kwargs.get("c",default_c)
|
||||||
|
self.w = None
|
||||||
|
|
||||||
def append(self, c):
|
def append(self, c):
|
||||||
self.RHS.append(c)
|
self.RHS.append(c)
|
||||||
self.count = self.count + 1.0
|
self.count = self.count + 1.0
|
||||||
|
|
||||||
def weights(self):
|
def weights(self):
|
||||||
wei = [self.c ** k for k in np.arange(0.0, self.count, 1.0)]
|
if self.w is None:
|
||||||
tot = sum(wei)
|
wei = [self.c ** k for k in np.arange(0.0, self.count, 1.0)]
|
||||||
return np.array([k / tot for k in wei])
|
tot = sum(wei)
|
||||||
|
self.w = np.array([k / tot for k in wei])
|
||||||
|
return self.w
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
tmp = self.LHS.name + " -> "
|
tmp = self.LHS.name + " -> "
|
||||||
@ -50,9 +55,9 @@ class ExponentialyWeightedFTS(fts.FTS):
|
|||||||
super(ExponentialyWeightedFTS, self).__init__(1, "EWFTS", **kwargs)
|
super(ExponentialyWeightedFTS, self).__init__(1, "EWFTS", **kwargs)
|
||||||
self.name = "Exponentialy Weighted FTS"
|
self.name = "Exponentialy Weighted FTS"
|
||||||
self.detail = "Sadaei"
|
self.detail = "Sadaei"
|
||||||
self.c = 1
|
self.c = kwargs.get('c', default_c)
|
||||||
|
|
||||||
def generateFLRG(self, flrs, c):
|
def generate_flrg(self, flrs, c):
|
||||||
flrgs = {}
|
flrgs = {}
|
||||||
for flr in flrs:
|
for flr in flrs:
|
||||||
if flr.LHS.name in flrgs:
|
if flr.LHS.name in flrgs:
|
||||||
@ -62,13 +67,14 @@ class ExponentialyWeightedFTS(fts.FTS):
|
|||||||
flrgs[flr.LHS.name].append(flr.RHS)
|
flrgs[flr.LHS.name].append(flr.RHS)
|
||||||
return (flrgs)
|
return (flrgs)
|
||||||
|
|
||||||
def train(self, data, sets,order=1,parameters=1.05):
|
def train(self, data, **kwargs):
|
||||||
self.c = parameters
|
self.c = kwargs.get('parameters', default_c)
|
||||||
self.sets = sets
|
if kwargs.get('sets', None) is not None:
|
||||||
|
self.sets = kwargs.get('sets', None)
|
||||||
ndata = self.apply_transformations(data)
|
ndata = self.apply_transformations(data)
|
||||||
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
|
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
|
||||||
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
||||||
self.flrgs = self.generateFLRG(flrs, self.c)
|
self.flrgs = self.generate_flrg(flrs, self.c)
|
||||||
|
|
||||||
def forecast(self, data, **kwargs):
|
def forecast(self, data, **kwargs):
|
||||||
l = 1
|
l = 1
|
||||||
@ -95,6 +101,6 @@ class ExponentialyWeightedFTS(fts.FTS):
|
|||||||
|
|
||||||
ret.append(mp.dot(flrg.weights()))
|
ret.append(mp.dot(flrg.weights()))
|
||||||
|
|
||||||
ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]])
|
ret = self.apply_inverse_transformations(ret, params=[data])
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
@ -37,9 +37,9 @@ class ConventionalFTS(fts.FTS):
|
|||||||
|
|
||||||
return r
|
return r
|
||||||
|
|
||||||
def train(self, data, sets,order=1,parameters=None):
|
def train(self, data, **kwargs):
|
||||||
if sets != None:
|
if kwargs.get('sets', None) is not None:
|
||||||
self.sets = sets
|
self.sets = kwargs.get('sets', None)
|
||||||
ndata = self.apply_transformations(data)
|
ndata = self.apply_transformations(data)
|
||||||
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
|
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
|
||||||
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
|
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
|
||||||
@ -71,3 +71,7 @@ class ConventionalFTS(fts.FTS):
|
|||||||
ret = self.apply_inverse_transformations(ret, params=[data])
|
ret = self.apply_inverse_transformations(ret, params=[data])
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
tmp = self.name + ":\n"
|
||||||
|
return tmp + str(self.R)
|
||||||
|
@ -56,10 +56,11 @@ class WeightedFTS(fts.FTS):
|
|||||||
flrgs[flr.LHS.name].append(flr.RHS)
|
flrgs[flr.LHS.name].append(flr.RHS)
|
||||||
return (flrgs)
|
return (flrgs)
|
||||||
|
|
||||||
def train(self, data, sets,order=1,parameters=None):
|
def train(self, data, **kwargs):
|
||||||
self.sets = sets
|
if kwargs.get('sets', None) is not None:
|
||||||
|
self.sets = kwargs.get('sets', None)
|
||||||
ndata = self.apply_transformations(data)
|
ndata = self.apply_transformations(data)
|
||||||
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, sets)
|
tmpdata = FuzzySet.fuzzyfy_series_old(ndata, self.sets)
|
||||||
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
flrs = FLR.generate_recurrent_flrs(tmpdata)
|
||||||
self.flrgs = self.generate_FLRG(flrs)
|
self.flrgs = self.generate_FLRG(flrs)
|
||||||
|
|
||||||
@ -88,6 +89,6 @@ class WeightedFTS(fts.FTS):
|
|||||||
|
|
||||||
ret.append(mp.dot(flrg.weights()))
|
ret.append(mp.dot(flrg.weights()))
|
||||||
|
|
||||||
ret = self.apply_inverse_transformations(ret, params=[data[self.order - 1:]])
|
ret = self.apply_inverse_transformations(ret, params=[data])
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
315
pyFTS/notebooks/Benchmarks.ipynb
Normal file
315
pyFTS/notebooks/Benchmarks.ipynb
Normal file
File diff suppressed because one or more lines are too long
462
pyFTS/notebooks/Chen - ConventionalFTS.ipynb
Normal file
462
pyFTS/notebooks/Chen - ConventionalFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
462
pyFTS/notebooks/Cheng - TrendWeightedFTS.ipynb
Normal file
462
pyFTS/notebooks/Cheng - TrendWeightedFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
601
pyFTS/notebooks/HighOrderFTS.ipynb
Normal file
601
pyFTS/notebooks/HighOrderFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
447
pyFTS/notebooks/Hwang - HighOrderFTS.ipynb
Normal file
447
pyFTS/notebooks/Hwang - HighOrderFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
460
pyFTS/notebooks/Ismail & Efendi - ImprovedWeightedFTS.ipynb
Normal file
460
pyFTS/notebooks/Ismail & Efendi - ImprovedWeightedFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
315
pyFTS/notebooks/Partitioners.ipynb
Normal file
315
pyFTS/notebooks/Partitioners.ipynb
Normal file
File diff suppressed because one or more lines are too long
508
pyFTS/notebooks/Sadaei et Al - ExponentialyWeightedFTS.ipynb
Normal file
508
pyFTS/notebooks/Sadaei et Al - ExponentialyWeightedFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
477
pyFTS/notebooks/Song - ConventionalFTS.ipynb
Normal file
477
pyFTS/notebooks/Song - ConventionalFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
461
pyFTS/notebooks/Yu - WeightedFTS.ipynb
Normal file
461
pyFTS/notebooks/Yu - WeightedFTS.ipynb
Normal file
File diff suppressed because one or more lines are too long
@ -8,7 +8,16 @@ from pyFTS.partitioners import partitioner
|
|||||||
|
|
||||||
class GridPartitioner(partitioner.Partitioner):
|
class GridPartitioner(partitioner.Partitioner):
|
||||||
"""Even Length Grid Partitioner"""
|
"""Even Length Grid Partitioner"""
|
||||||
|
|
||||||
def __init__(self, data, npart, func = Membership.trimf, transformation=None, indexer=None):
|
def __init__(self, data, npart, func = Membership.trimf, transformation=None, indexer=None):
|
||||||
|
"""
|
||||||
|
Even Length Grid Partitioner
|
||||||
|
:param data: Training data of which the universe of discourse will be extracted. The universe of discourse is the open interval between the minimum and maximum values of the training data.
|
||||||
|
:param npart: The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created
|
||||||
|
:param func: Fuzzy membership function (pyFTS.common.Membership)
|
||||||
|
:param transformation: data transformation to be applied on data
|
||||||
|
:param indexer:
|
||||||
|
"""
|
||||||
super(GridPartitioner, self).__init__("Grid", data, npart, func=func, transformation=transformation, indexer=indexer)
|
super(GridPartitioner, self).__init__("Grid", data, npart, func=func, transformation=transformation, indexer=indexer)
|
||||||
|
|
||||||
def build(self, data):
|
def build(self, data):
|
||||||
@ -18,7 +27,7 @@ class GridPartitioner(partitioner.Partitioner):
|
|||||||
partlen = dlen / self.partitions
|
partlen = dlen / self.partitions
|
||||||
|
|
||||||
count = 0
|
count = 0
|
||||||
for c in np.linspace(self.min, self.max, self.partitions):
|
for c in np.arange(self.min, self.max, partlen):
|
||||||
if self.membership_function == Membership.trimf:
|
if self.membership_function == Membership.trimf:
|
||||||
sets.append(
|
sets.append(
|
||||||
FuzzySet.FuzzySet(self.prefix + str(count), Membership.trimf, [c - partlen, c, c + partlen],c))
|
FuzzySet.FuzzySet(self.prefix + str(count), Membership.trimf, [c - partlen, c, c + partlen],c))
|
||||||
|
@ -12,9 +12,9 @@ class Partitioner(object):
|
|||||||
"""
|
"""
|
||||||
Universe of Discourse partitioner scheme. Split data on several fuzzy sets
|
Universe of Discourse partitioner scheme. Split data on several fuzzy sets
|
||||||
:param name: partitioner name
|
:param name: partitioner name
|
||||||
:param data: original data to be partitioned
|
:param data: Training data of which the universe of discourse will be extracted. The universe of discourse is the open interval between the minimum and maximum values of the training data.
|
||||||
:param npart: number of partitions
|
:param npart: The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created
|
||||||
:param func: membership function
|
:param func: Fuzzy membership function (pyFTS.common.Membership)
|
||||||
:param names: list of partitions names. If None is given the partitions will be auto named with prefix
|
:param names: list of partitions names. If None is given the partitions will be auto named with prefix
|
||||||
:param prefix: prefix of auto generated partition names
|
:param prefix: prefix of auto generated partition names
|
||||||
:param transformation: data transformation to be applied on data
|
:param transformation: data transformation to be applied on data
|
||||||
|
@ -9,9 +9,7 @@ import numpy as np
|
|||||||
import pandas as pd
|
import pandas as pd
|
||||||
from pyFTS.common import Transformations
|
from pyFTS.common import Transformations
|
||||||
|
|
||||||
#from pyFTS.benchmarks import benchmarks as bchmk
|
from pyFTS.benchmarks import benchmarks as bchmk
|
||||||
|
|
||||||
os.chdir("/home/petronio/dados/Dropbox/Doutorado/Codigos/")
|
|
||||||
|
|
||||||
bc = Transformations.BoxCox(0)
|
bc = Transformations.BoxCox(0)
|
||||||
diff = Transformations.Differential(1)
|
diff = Transformations.Differential(1)
|
||||||
@ -21,358 +19,21 @@ diff = Transformations.Differential(1)
|
|||||||
DATASETS
|
DATASETS
|
||||||
"""
|
"""
|
||||||
|
|
||||||
#enrollments = pd.read_csv("DataSets/Enrollments.csv", sep=";")
|
from pyFTS.data import Enrollments
|
||||||
#enrollments = np.array(enrollments["Enrollments"])
|
|
||||||
|
|
||||||
passengers = pd.read_csv("DataSets/AirPassengers.csv", sep=",")
|
data = Enrollments.get_data()
|
||||||
passengers = np.array(passengers["Passengers"])
|
|
||||||
|
|
||||||
#sunspots = pd.read_csv("DataSets/sunspots.csv", sep=",")
|
|
||||||
#sunspots = np.array(sunspots["SUNACTIVITY"])
|
|
||||||
|
|
||||||
#gauss = random.normal(0,1.0,5000)
|
|
||||||
#gauss_teste = random.normal(0,1.0,400)
|
|
||||||
|
|
||||||
#taiexpd = pd.read_csv("DataSets/TAIEX.csv", sep=",")
|
|
||||||
#taiex = np.array(taiexpd["avg"][:5000])
|
|
||||||
#del(taiexpd)
|
|
||||||
|
|
||||||
#nasdaqpd = pd.read_csv("DataSets/NASDAQ_IXIC.csv", sep=",")
|
|
||||||
#nasdaq = np.array(nasdaqpd["avg"][0:5000])
|
|
||||||
#del(nasdaqpd)
|
|
||||||
|
|
||||||
#sp500pd = pd.read_csv("DataSets/S&P500.csv", sep=",")
|
|
||||||
#sp500 = np.array(sp500pd["Avg"][11000:])
|
|
||||||
#del(sp500pd)
|
|
||||||
|
|
||||||
#sondapd = pd.read_csv("DataSets/SONDA_BSB_HOURLY_AVG.csv", sep=";")
|
|
||||||
#sondapd = sondapd.dropna(axis=0, how='any')
|
|
||||||
#sonda = np.array(sondapd["glo_avg"])
|
|
||||||
#del(sondapd)
|
|
||||||
|
|
||||||
#bestpd = pd.read_csv("DataSets/BEST_TAVG.csv", sep=";")
|
|
||||||
#best = np.array(bestpd["Anomaly"])
|
|
||||||
#del(bestpd)
|
|
||||||
|
|
||||||
#print(lag)
|
|
||||||
#print(a)
|
|
||||||
#'''
|
|
||||||
'''
|
|
||||||
sonda = pd.read_csv("DataSets/SONDA_BSB_15MIN_AVG.csv", sep=";")
|
|
||||||
|
|
||||||
sonda['data'] = pd.to_datetime(sonda['data'])
|
|
||||||
|
|
||||||
sonda = sonda[:][527041:].dropna()
|
|
||||||
|
|
||||||
sonda.index = np.arange(0,len(sonda.index))
|
|
||||||
|
|
||||||
sonda_treino = sonda[:105313].dropna()
|
|
||||||
sonda_teste = sonda[105314:].dropna()
|
|
||||||
'''
|
|
||||||
|
|
||||||
from pyFTS.partitioners import Grid
|
from pyFTS.partitioners import Grid
|
||||||
from pyFTS import song, chen, yu, sadaei, ismailefendi, cheng
|
from pyFTS.models import song, chen, yu, sadaei, ismailefendi, cheng, hofts
|
||||||
|
|
||||||
train = passengers[:100]
|
train = data
|
||||||
test = passengers[100:]
|
test = data
|
||||||
|
|
||||||
fs = Grid.GridPartitioner(train, 10, transformation=bc)
|
fs = Grid.GridPartitioner(train, 10) #, transformation=bc)
|
||||||
|
|
||||||
methods = [song.ConventionalFTS, chen.ConventionalFTS, yu.WeightedFTS, sadaei.ExponentialyWeightedFTS,
|
#tmp = bchmk.simpleSearch_RMSE(train, test, hofts.HighOrderFTS, range(4,12), [2], tam=[10, 5])
|
||||||
ismailefendi.ImprovedWeightedFTS, cheng.TrendWeightedFTS]
|
|
||||||
|
|
||||||
#fig, axes = plt.subplots(nrows=1, ncols=1, figsize=[15, 5])
|
model = hofts.HighOrderFTS("", partitioner=fs)
|
||||||
|
model.fit(train, order=3)
|
||||||
|
|
||||||
#axes.plot(test, label="Original")
|
print(model)
|
||||||
|
|
||||||
for method in methods:
|
|
||||||
model = method("")
|
|
||||||
model.append_transformation(bc)
|
|
||||||
model.train(train, sets=fs.sets)
|
|
||||||
|
|
||||||
forecasts = model.forecast(test)
|
|
||||||
|
|
||||||
print(forecasts)
|
|
||||||
|
|
||||||
#ix_m15 = SeasonalIndexer.DateTimeSeasonalIndexer('data',[SeasonalIndexer.DateTime.minute],[15],'glo_avg', name='m15')
|
|
||||||
|
|
||||||
#fs1 = Grid.GridPartitioner(sonda_treino, 50, transformation=diff, indexer=ix_m15)
|
|
||||||
|
|
||||||
#ix = cUtil.load_obj("models/sonda_ix_Mhm15.pkl")
|
|
||||||
|
|
||||||
#fs = cUtil.load_obj("models/sonda_fs_Entropy40_diff.pkl")
|
|
||||||
|
|
||||||
#from pyFTS.models import msfts
|
|
||||||
|
|
||||||
#obj = msfts.MultiSeasonalFTS("sonda_msfts_Entropy40_Mhm15", indexer=ix)
|
|
||||||
|
|
||||||
#obj.append_transformation(diff)
|
|
||||||
|
|
||||||
#obj.train(sonda_treino, fs.sets)
|
|
||||||
|
|
||||||
#cUtil.persist_obj(obj, "models/sonda_msfts_Entropy40_Mhm15.pkl")
|
|
||||||
|
|
||||||
#ftse = cUtil.load_obj("models/sonda_ensemble_msfts.pkl")
|
|
||||||
|
|
||||||
#tmp = ftse.forecast_distribution(sonda_teste[850:860], h=0.5, method="gaussian")
|
|
||||||
|
|
||||||
#print(tmp[0])
|
|
||||||
|
|
||||||
#'''
|
|
||||||
|
|
||||||
'''
|
|
||||||
from pyFTS.models.seasonal import SeasonalIndexer
|
|
||||||
|
|
||||||
indexers = []
|
|
||||||
|
|
||||||
for i in ["models/sonda_ix_Mhm15.pkl"]: #, "models/sonda_ix_m15.pkl", "models/sonda_ix_Mh.pkl", ]:
|
|
||||||
obj = cUtil.load_obj(i)
|
|
||||||
indexers.append( obj )
|
|
||||||
print(obj)
|
|
||||||
|
|
||||||
partitioners = []
|
|
||||||
|
|
||||||
transformations = [""] #, "_diff"]
|
|
||||||
for max_part in [30, 40, 50, 60, 70, 80, 90]:
|
|
||||||
for t in transformations:
|
|
||||||
obj = cUtil.load_obj("models/sonda_fs_grid_" + str(max_part) + t + ".pkl")
|
|
||||||
partitioners.append( obj )
|
|
||||||
print(obj)
|
|
||||||
|
|
||||||
|
|
||||||
from pyFTS.ensemble import ensemble, multiseasonal
|
|
||||||
|
|
||||||
fts = multiseasonal.SeasonalEnsembleFTS("sonda_msfts_Mhm15")
|
|
||||||
|
|
||||||
fts.indexers = indexers
|
|
||||||
fts.partitioners = partitioners
|
|
||||||
|
|
||||||
fts.indexer = indexers[0]
|
|
||||||
|
|
||||||
fts.train(sonda_treino, sets=None)
|
|
||||||
'''
|
|
||||||
#'''
|
|
||||||
|
|
||||||
#ix = cUtil.load_obj("models/sonda_ix_m15.pkl")
|
|
||||||
|
|
||||||
#ftse = cUtil.load_obj("models/msfts_Grid40_diff_Mhm15.pkl")
|
|
||||||
|
|
||||||
#ftse.indexer = ix
|
|
||||||
|
|
||||||
#ftse.update_uod(sonda_treino)
|
|
||||||
|
|
||||||
#tmp = ftse.forecast_distribution(sonda_teste,h=1)
|
|
||||||
|
|
||||||
#tmp = ftse.forecast(sonda_teste,h=1)
|
|
||||||
|
|
||||||
#tmp[5].plot()
|
|
||||||
#'''
|
|
||||||
|
|
||||||
'''
|
|
||||||
from pyFTS.benchmarks import benchmarks as bchmk
|
|
||||||
#from pyFTS.benchmarks import distributed_benchmarks as bchmk
|
|
||||||
#from pyFTS.benchmarks import parallel_benchmarks as bchmk
|
|
||||||
from pyFTS.benchmarks import Util
|
|
||||||
from pyFTS.benchmarks import arima, quantreg, Measures
|
|
||||||
|
|
||||||
#Util.cast_dataframe_to_synthetic_point("experiments/taiex_point_analitic.csv","experiments/taiex_point_sintetic.csv",11)
|
|
||||||
|
|
||||||
#Util.plot_dataframe_point("experiments/taiex_point_sintetic.csv","experiments/taiex_point_analitic.csv",11)
|
|
||||||
"""
|
|
||||||
arima100 = arima.ARIMA("", alpha=0.25)
|
|
||||||
#tmp.append_transformation(diff)
|
|
||||||
arima100.train(passengers, None, order=(1,0,0))
|
|
||||||
|
|
||||||
arima101 = arima.ARIMA("", alpha=0.25)
|
|
||||||
#tmp.append_transformation(diff)
|
|
||||||
arima101.train(passengers, None, order=(1,0,1))
|
|
||||||
|
|
||||||
arima200 = arima.ARIMA("", alpha=0.25)
|
|
||||||
#tmp.append_transformation(diff)
|
|
||||||
arima200.train(passengers, None, order=(2,0,0))
|
|
||||||
|
|
||||||
arima201 = arima.ARIMA("", alpha=0.25)
|
|
||||||
#tmp.append_transformation(diff)
|
|
||||||
arima201.train(passengers, None, order=(2,0,1))
|
|
||||||
|
|
||||||
|
|
||||||
#tmp = quantreg.QuantileRegression("", alpha=0.25, dist=True)
|
|
||||||
#tmp.append_transformation(diff)
|
|
||||||
#tmp.train(sunspots[:150], None, order=1)
|
|
||||||
#teste = tmp.forecast_ahead_interval(sunspots[150:155], 5)
|
|
||||||
#teste = tmp.forecast_ahead_distribution(nasdaq[1600:1604], steps=5, resolution=50)
|
|
||||||
|
|
||||||
bchmk.plot_compared_series(enrollments,[tmp], ['blue','red'], points=False, intervals=True)
|
|
||||||
|
|
||||||
#print(sunspots[150:155])
|
|
||||||
#print(teste)
|
|
||||||
|
|
||||||
#kk = Measures.get_interval_statistics(nasdaq[1600:1605], tmp)
|
|
||||||
|
|
||||||
#print(kk)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
bchmk.point_sliding_window(sonda, 9000, train=0.8, inc=0.4,#models=[yu.WeightedFTS], # #
|
|
||||||
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
|
|
||||||
partitions= np.arange(10,200,step=10), #transformation=diff,
|
|
||||||
dump=True, save=True, file="experiments/sondaws_point_analytic.csv",
|
|
||||||
nodes=['192.168.0.103', '192.168.0.106', '192.168.0.108', '192.168.0.109']) #, depends=[hofts, ifts])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
bchmk.point_sliding_window(sonda, 9000, train=0.8, inc=0.4, #models=[yu.WeightedFTS], # #
|
|
||||||
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
|
|
||||||
partitions= np.arange(3,20,step=2), #transformation=diff,
|
|
||||||
dump=True, save=True, file="experiments/sondaws_point_analytic_diff.csv",
|
|
||||||
nodes=['192.168.0.103', '192.168.0.106', '192.168.0.108', '192.168.0.109']) #, depends=[hofts, ifts])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
bchmk.interval_sliding_window(best, 5000, train=0.8, inc=0.8,#models=[yu.WeightedFTS], # #
|
|
||||||
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
|
|
||||||
partitions= np.arange(10,200,step=10),
|
|
||||||
dump=True, save=True, file="experiments/best"
|
|
||||||
"_interval_analytic.csv",
|
|
||||||
nodes=['192.168.0.103', '192.168.0.106', '192.168.0.108', '192.168.0.109']) #, depends=[hofts, ifts])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
bchmk.interval_sliding_window(taiex, 2000, train=0.8, inc=0.1, #models=[yu.WeightedFTS], # #
|
|
||||||
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
|
|
||||||
partitions= np.arange(3,20,step=2), transformation=diff,
|
|
||||||
dump=True, save=True, file="experiments/taiex_interval_analytic_diff.csv",
|
|
||||||
nodes=['192.168.0.103', '192.168.0.106', '192.168.0.108', '192.168.0.109']) #, depends=[hofts, ifts])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
bchmk.ahead_sliding_window(sonda, 10000, steps=10, resolution=10, train=0.2, inc=0.2,
|
|
||||||
partitioners=[Grid.GridPartitioner],
|
|
||||||
partitions= np.arange(10,200,step=10), indexer=ix,
|
|
||||||
dump=True, save=True, file="experiments/sondawind_ahead_analytic.csv",
|
|
||||||
nodes=['192.168.0.106', '192.168.0.108', '192.168.0.109']) #, depends=[hofts, ifts])
|
|
||||||
|
|
||||||
|
|
||||||
bchmk.ahead_sliding_window(sonda, 10000, steps=10, resolution=10, train=0.2, inc=0.2,
|
|
||||||
partitioners=[Grid.GridPartitioner],
|
|
||||||
partitions= np.arange(3,20,step=2), transformation=diff, indexer=ix,
|
|
||||||
dump=True, save=True, file="experiments/sondawind_ahead_analytic_diff.csv",
|
|
||||||
nodes=['192.168.0.106', '192.168.0.108', '192.168.0.109']) #, depends=[hofts, ifts])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
from pyFTS import pwfts
|
|
||||||
from pyFTS.common import Transformations
|
|
||||||
from pyFTS.partitioners import Grid
|
|
||||||
|
|
||||||
#diff = Transformations.Differential(1)
|
|
||||||
#fs = Grid.GridPartitioner(best, 190) #, transformation=diff)
|
|
||||||
|
|
||||||
|
|
||||||
#model = pwfts.ProbabilisticWeightedFTS("FTS 1")
|
|
||||||
#model.append_transformation(diff)
|
|
||||||
#model.train(best[0:1600],fs.sets, order=3)
|
|
||||||
|
|
||||||
#bchmk.plot_compared_intervals_ahead(best[1600:1700],[model], ['blue','red'],
|
|
||||||
# distributions=[True], save=True, file="pictures/best_ahead_forecasts",
|
|
||||||
# time_from=40, time_to=60, resolution=100)
|
|
||||||
|
|
||||||
experiments = [
|
|
||||||
["experiments/taiex_point_synthetic_diff.csv","experiments/taiex_point_analytic_diff.csv",16],
|
|
||||||
["experiments/nasdaq_point_synthetic_diff.csv","experiments/nasdaq_point_analytic_diff.csv", 11],
|
|
||||||
["experiments/sp500_point_synthetic_diff.csv","experiments/sp500_point_analytic_diff.csv", 21],
|
|
||||||
["experiments/best_point_synthetic_diff.csv","experiments/best_point_analytic_diff.csv", 13],
|
|
||||||
["experiments/sondasun_point_synthetic_diff.csv","experiments/sondasun_point_analytic_diff.csv", 15],
|
|
||||||
["experiments/sondawind_point_synthetic_diff.csv","experiments/sondawind_point_analytic_diff.csv", 8],
|
|
||||||
["experiments/gauss_point_synthetic_diff.csv","experiments/gauss_point_analytic_diff.csv", 16]
|
|
||||||
]
|
|
||||||
|
|
||||||
Util.unified_scaled_point(experiments,tam=[15,8],save=True,file="pictures/unified_experiments_point.png",
|
|
||||||
ignore=['ARIMA(1,0,0)','ARIMA(2,0,0)','ARIMA(2,0,1)','ARIMA(2,0,2)','QAR(2)'],
|
|
||||||
replace=[['ARIMA','ARIMA'],['QAR','QAR']])
|
|
||||||
'''
|
|
||||||
|
|
||||||
'''
|
|
||||||
experiments = [
|
|
||||||
["experiments/taiex_interval_synthetic.csv","experiments/taiex_interval_analytic.csv",16],
|
|
||||||
["experiments/nasdaq_interval_synthetic_diff.csv","experiments/nasdaq_interval_analytic_diff.csv",11],
|
|
||||||
["experiments/sp500_interval_synthetic_diff.csv","experiments/sp500_interval_analytic_diff.csv", 11],
|
|
||||||
["experiments/best_interval_synthetic_diff.csv","experiments/best_interval_analytic_diff.csv",13],
|
|
||||||
["experiments/sondasun_interval_synthetic_diff.csv","experiments/sondasun_interval_analytic_diff.csv",8],
|
|
||||||
["experiments/sondawind_interval_synthetic_diff.csv","experiments/sondawind_interval_analytic_diff.csv",8],
|
|
||||||
["experiments/gauss_interval_synthetic_diff.csv","experiments/gauss_interval_analytic_diff.csv", 8]
|
|
||||||
]
|
|
||||||
|
|
||||||
Util.unified_scaled_interval(experiments,tam=[15,8],save=True,file="pictures/unified_experiments_interval.png",
|
|
||||||
ignore=['ARIMA(1,0,0)', 'ARIMA(2,0,0)', 'ARIMA(2,0,1)', 'ARIMA(2,0,2)', 'QAR(2)'],
|
|
||||||
replace=[['ARIMA(1,0,1) - 0.05', 'ARIMA 0.05'], ['ARIMA(1,0,1) - 0.25', 'ARIMA 0.25'],
|
|
||||||
['QAR(1) - 0.05', 'QAR 0.05'], ['QAR(1) - 0.25', 'QAR 0.25']])
|
|
||||||
|
|
||||||
Util.unified_scaled_interval_pinball(experiments,tam=[15,8],save=True,file="pictures/unified_experiments_interval_pinball.png",
|
|
||||||
ignore=['ARIMA(1,0,0)', 'ARIMA(2,0,0)', 'ARIMA(2,0,1)', 'ARIMA(2,0,2)', 'QAR(2)'],
|
|
||||||
replace=[['ARIMA(1,0,1) - 0.05', 'ARIMA 0.05'], ['ARIMA(1,0,1) - 0.25', 'ARIMA 0.25'],
|
|
||||||
['QAR(1) - 0.05', 'QAR 0.05'], ['QAR(1) - 0.25', 'QAR 0.25']])
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
'''
|
|
||||||
experiments = [
|
|
||||||
["experiments/taiex_ahead_synthetic_diff.csv","experiments/taiex_ahead_analytic_diff.csv",16],
|
|
||||||
["experiments/nasdaq_ahead_synthetic_diff.csv","experiments/nasdaq_ahead_analytic_diff.csv",11],
|
|
||||||
["experiments/sp500_ahead_synthetic_diff.csv","experiments/sp500_ahead_analytic_diff.csv", 21],
|
|
||||||
["experiments/best_ahead_synthetic_diff.csv","experiments/best_ahead_analytic_diff.csv", 24],
|
|
||||||
["experiments/sondasun_ahead_synthetic_diff.csv","experiments/sondasun_ahead_analytic_diff.csv",13],
|
|
||||||
["experiments/sondawind_ahead_synthetic_diff.csv","experiments/sondawind_ahead_analytic_diff.csv", 13],
|
|
||||||
["experiments/gauss_ahead_synthetic_diff.csv","experiments/gauss_ahead_analytic_diff.csv",16]
|
|
||||||
]
|
|
||||||
|
|
||||||
Util.unified_scaled_ahead(experiments,tam=[15,8],save=True,file="pictures/unified_experiments_ahead.png",
|
|
||||||
ignore=['ARIMA(1,0,0)', 'ARIMA(0,0,1)', 'ARIMA(2,0,0)', 'ARIMA(2,0,1)',
|
|
||||||
'ARIMA(2,0,2)', 'QAR(2)', 'ARIMA0.05'],
|
|
||||||
replace=[['ARIMA(1,0,1) - 0.05', 'ARIMA 0.05'], ['ARIMA(1,0,1) - 0.25', 'ARIMA 0.25'],
|
|
||||||
['QAR(1) - 0.05', 'QAR 0.05'], ['QAR(1) - 0.25', 'QAR 0.25']])
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
'''
|
|
||||||
from pyFTS.partitioners import Grid
|
|
||||||
|
|
||||||
from pyFTS import sfts
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#print(ix.get_season_of_data(best[:2000]))
|
|
||||||
|
|
||||||
#print(ix.get_season_by_index(45))
|
|
||||||
|
|
||||||
#ix = SeasonalIndexer.LinearSeasonalIndexer([720,24],[False,True,False])
|
|
||||||
|
|
||||||
#print(ix.get_season_of_data(sonda[6500:9000])[-20:])
|
|
||||||
|
|
||||||
diff = Transformations.Differential(1)
|
|
||||||
|
|
||||||
fs = Grid.GridPartitioner(sonda[:9000], 10, transformation=diff)
|
|
||||||
|
|
||||||
|
|
||||||
tmp = sfts.SeasonalFTS("")
|
|
||||||
tmp.indexer = ix
|
|
||||||
tmp.append_transformation(diff)
|
|
||||||
|
|
||||||
#tmp = pwfts.ProbabilisticWeightedFTS("")
|
|
||||||
|
|
||||||
#tmp.append_transformation(diff)
|
|
||||||
|
|
||||||
tmp.train(sonda[:9000], fs.sets, order=1)
|
|
||||||
|
|
||||||
x = tmp.forecast(sonda[:1610])
|
|
||||||
|
|
||||||
#print(taiex[1600:1610])
|
|
||||||
print(x)
|
|
||||||
'''
|
|
Loading…
Reference in New Issue
Block a user