Bugfix issue #9

This commit is contained in:
Petrônio Cândido 2018-09-26 14:34:23 -03:00
parent a414c3d7b8
commit 9653b7c0f9
7 changed files with 25 additions and 64 deletions

View File

@ -74,13 +74,15 @@ def __binary_search(x, fuzzy_sets, ordered_sets):
"""
max_len = len(fuzzy_sets)
first = 0
last = max_len
last = max_len-1
while first <= last:
midpoint = (first + last) // 2
fs = ordered_sets[midpoint]
fs1 = ordered_sets[midpoint - 1] if midpoint > 0 else ordered_sets[0]
fs2 = ordered_sets[midpoint + 1] if midpoint < max_len else ordered_sets[max_len]
if fuzzy_sets[fs1].centroid <= x <= fuzzy_sets[fs2].centroid:
return (midpoint-1, midpoint, midpoint+1)
else:

View File

@ -50,7 +50,7 @@ class ConventionalFTS(fts.FTS):
def train(self, data, **kwargs):
tmpdata = FuzzySet.fuzzyfy_series(data, self.sets, method='maximum')
tmpdata = FuzzySet.fuzzyfy(data, partitioner=self.partitioner, method='maximum', mode='sets')
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
self.generate_flrg(flrs)

View File

@ -62,7 +62,7 @@ class ImprovedWeightedFTS(fts.FTS):
def train(self, ndata, **kwargs):
tmpdata = FuzzySet.fuzzyfy_series(ndata, self.sets, method='maximum')
tmpdata = FuzzySet.fuzzyfy(ndata, partitioner=self.partitioner, method='maximum', mode='sets')
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.generate_flrg(flrs)

View File

@ -66,7 +66,7 @@ class ExponentialyWeightedFTS(fts.FTS):
self.flrgs[flr.LHS].append_rhs(flr.RHS)
def train(self, data, **kwargs):
tmpdata = FuzzySet.fuzzyfy_series(data, self.sets, method='maximum')
tmpdata = FuzzySet.fuzzyfy(data, partitioner=self.partitioner, method='maximum', mode='sets')
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.generate_flrg(flrs, self.c)

View File

@ -50,7 +50,7 @@ class ConventionalFTS(fts.FTS):
def train(self, data, **kwargs):
tmpdata = FuzzySet.fuzzyfy_series(data, self.sets, method='maximum')
tmpdata = FuzzySet.fuzzyfy(data, partitioner=self.partitioner, method='maximum', mode='sets')
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
self.operation_matrix(flrs)

View File

@ -58,7 +58,7 @@ class WeightedFTS(fts.FTS):
self.flrgs[flr.LHS].append_rhs(flr.RHS)
def train(self, ndata, **kwargs):
tmpdata = FuzzySet.fuzzyfy_series(ndata, self.sets, method='maximum')
tmpdata = FuzzySet.fuzzyfy(ndata, partitioner=self.partitioner, method='maximum', mode='sets')
flrs = FLR.generate_recurrent_flrs(tmpdata)
self.generate_FLRG(flrs)

View File

@ -9,6 +9,9 @@ import matplotlib.pylab as plt
import pandas as pd
from pyFTS.common import Util as cUtil, FuzzySet
from pyFTS.partitioners import Grid, Util as pUtil
from pyFTS.benchmarks import benchmarks as bchmk
from pyFTS.models import chen
from pyFTS.common import Transformations
tdiff = Transformations.Differential(1)
@ -16,72 +19,29 @@ tdiff = Transformations.Differential(1)
from pyFTS.data import TAIEX, SP500, NASDAQ
dataset = TAIEX.get_data()
partitioner = Grid.GridPartitioner(data=dataset, npart=30)
model = chen.ConventionalFTS(partitioner=partitioner)
model.fit(dataset)
print(model)
#dataset = SP500.get_data()[11500:16000]
#dataset = NASDAQ.get_data()
#print(len(dataset))
from pyFTS.partitioners import Grid, Util as pUtil
partitioner = Grid.GridPartitioner(data=dataset[:2000], npart=20) #, transformation=tdiff)
print(partitioner)
#print(FuzzySet.__binary_search(7000, partitioner.sets, partitioner.ordered_sets))
print(FuzzySet.fuzzyfy([5000, 7000, 8000], partitioner, mode='vector', method='fuzzy', alpha_cut=.5))
print(FuzzySet.fuzzyfy([5000, 7000, 8000], partitioner, mode='sets', method='fuzzy', alpha_cut=.5))
"""
from pyFTS.benchmarks import benchmarks as bchmk, Util as bUtil, Measures, knn, quantreg, arima, naive
from pyFTS.models import pwfts, song, chen, ifts, hofts
from pyFTS.models.ensemble import ensemble
print(partitioner)
#model = chen.ConventionalFTS(partitioner=partitioner)
model = hofts.HighOrderFTS(partitioner=partitioner,order=2)
#model.append_transformation(tdiff)
model.fit(dataset[:2000])
print(model)
print(model.predict([3500, 7000], steps_ahead=5))
#cUtil.plot_rules(model, size=[20,20], rules_by_axis=5, columns=1)
#
print("fim")
'''
model = knn.KNearestNeighbors(order=3)
#model = ensemble.AllMethodEnsembleFTS("", partitioner=partitioner)
#model = arima.ARIMA("", order=(2,0,2))
#model = quantreg.QuantileRegression("", order=2, dist=True)
#model.append_transformation(tdiff)
model.fit(dataset[:800])
print(Measures.get_distribution_statistics(dataset[800:1000], model))
#tmp = model.predict(dataset[800:1000], type='distribution')
#for tmp2 in tmp:
# print(tmp2)
#'''
'''
bchmk.sliding_window_benchmarks(dataset, 1000, train=0.8, inc=0.2,
methods=[hofts.HighOrderFTS], #[pwfts.ProbabilisticWeightedFTS],
methods=[chen.ConventionalFTS], #[pwfts.ProbabilisticWeightedFTS],
benchmark_models=False,
transformations=[None],
orders=[1, 2, 3],
partitions=np.arange(30, 80, 5),
#orders=[1, 2, 3],
partitions=np.arange(10, 100, 2),
progress=False, type="point",
#steps_ahead=[1,2,4,6,8,10],
distributed=True, nodes=['192.168.0.110', '192.168.0.107', '192.168.0.106'],
distributed=False, nodes=['192.168.0.110', '192.168.0.107', '192.168.0.106'],
file="benchmarks.db", dataset="TAIEX", tag="comparisons")
@ -260,4 +220,3 @@ f, ax = plt.subplots(1, 1, figsize=[20,15])
bchmk.plot_distribution(ax, 'blue', tmp, f, 0, reference_data=dataset[train_split:train_split+200])
'''
"""