- Song & Chissom Conventional FTS

This commit is contained in:
Petrônio Cândido de Lima e Silva 2017-05-06 17:04:37 -03:00
parent 3365fa72f1
commit fb9c3585be
4 changed files with 99 additions and 17 deletions

View File

@ -17,7 +17,7 @@ from mpl_toolkits.mplot3d import Axes3D
from pyFTS.partitioners import partitioner, Grid, Huarng, Entropy, FCM
from pyFTS.benchmarks import Measures, naive, arima, ResidualAnalysis, ProbabilityDistribution, Util, quantreg
from pyFTS.common import Membership, FuzzySet, FLR, Transformations, Util
from pyFTS import fts, chen, yu, ismailefendi, sadaei, hofts, hwang, pwfts, ifts
from pyFTS import fts, chen, yu, ismailefendi, sadaei, hofts, hwang, pwfts, ifts, cheng
from copy import deepcopy
colors = ['grey', 'rosybrown', 'maroon', 'red','orange', 'yellow', 'olive', 'green',
@ -31,11 +31,11 @@ nsty = len(styles)
def get_benchmark_point_methods():
"""Return all non FTS methods for point forecast"""
return [naive.Naive, arima.ARIMA]
return [naive.Naive, arima.ARIMA, quantreg.QuantileRegression]
def get_point_methods():
"""Return all FTS methods for point forecast"""
return [chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,
return [chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS, cheng.TrendWeightedFTS,
sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, pwfts.ProbabilisticWeightedFTS]
def get_benchmark_interval_methods():

View File

@ -16,6 +16,7 @@ class QuantileRegression(fts.FTS):
self.is_high_order = True
self.has_point_forecasting = True
self.has_interval_forecasting = True
self.has_probability_forecasting = True
self.benchmark_only = True
self.minOrder = 1
self.alpha = 0.5

67
song.py Normal file
View File

@ -0,0 +1,67 @@
import numpy as np
from pyFTS.common import FuzzySet, FLR
from pyFTS import fts
class ConventionalFTS(fts.FTS):
"""Conventional Fuzzy Time Series"""
def __init__(self, name, **kwargs):
super(ConventionalFTS, self).__init__(1, "CFTS " + name)
self.name = "Conventional FTS"
self.detail = "Song & Chissom"
self.R = None
def flr_membership_matrix(self, flr):
lm = [flr.LHS.membership(k.centroid) for k in self.sets]
rm = [flr.RHS.membership(k.centroid) for k in self.sets]
r = np.zeros((len(self.sets), len(self.sets)))
for k in range(0,len(self.sets)):
for l in range(0, len(self.sets)):
r[k][l] = min(lm[k],rm[l])
return r
def operation_matrix(self, flrs):
r = np.zeros((len(self.sets),len(self.sets)))
for k in flrs:
mm = self.flr_membership_matrix(k)
for k in range(0, len(self.sets)):
for l in range(0, len(self.sets)):
r[k][l] = max(r[k][l], mm[k][l])
return r
def train(self, data, sets,order=1,parameters=None):
self.sets = sets
ndata = self.doTransformations(data)
tmpdata = FuzzySet.fuzzySeries(ndata, sets)
flrs = FLR.generateNonRecurrentFLRs(tmpdata)
self.R = self.operation_matrix(flrs)
def forecast(self, data, **kwargs):
ndata = np.array(self.doTransformations(data))
l = len(ndata)
npart = len(self.sets)
ret = []
for k in np.arange(0, l):
mv = FuzzySet.fuzzyInstance(ndata[k], self.sets)
r = [max([ min(self.R[i][j], mv[j]) for j in np.arange(0,npart) ]) for i in np.arange(0,npart)]
fs = np.ravel(np.argwhere(r == max(r)))
if len(fs) == 1:
ret.append(self.sets[fs[0]].centroid)
else:
mp = [self.sets[s].centroid for s in fs]
ret.append( sum(mp)/len(mp))
ret = self.doInverseTransformations(ret, params=[data[self.order - 1:]])
return ret

View File

@ -17,16 +17,30 @@ from pyFTS.benchmarks import naive, arima
from pyFTS.benchmarks import Measures
from numpy import random
os.chdir("/home/petronio/dados/Dropbox/Doutorado/Codigos/")
enrollments = pd.read_csv("DataSets/Enrollments.csv", sep=";")
enrollments = np.array(enrollments["Enrollments"])
from pyFTS import song
enrollments_fs = Grid.GridPartitioner(enrollments, 10).sets
model = song.ConventionalFTS('')
model.train(enrollments,enrollments_fs)
teste = model.forecast(enrollments)
print(teste)
#print(FCM.FCMPartitionerTrimf.__module__)
#gauss = random.normal(0,1.0,1000)
#gauss_teste = random.normal(0,1.0,400)
os.chdir("/home/petronio/dados/Dropbox/Doutorado/Codigos/")
taiexpd = pd.read_csv("DataSets/TAIEX.csv", sep=",")
taiex = np.array(taiexpd["avg"][:5000])
#taiexpd = pd.read_csv("DataSets/TAIEX.csv", sep=",")
#taiex = np.array(taiexpd["avg"][:5000])
#from statsmodels.tsa.arima_model import ARIMA as stats_arima
from statsmodels.tsa.tsatools import lagmat
@ -38,7 +52,7 @@ from statsmodels.tsa.tsatools import lagmat
#print(lag)
#print(a)
from pyFTS.benchmarks import distributed_benchmarks as bchmk
#from pyFTS.benchmarks import distributed_benchmarks as bchmk
#from pyFTS.benchmarks import parallel_benchmarks as bchmk
#from pyFTS.benchmarks import benchmarks as bchmk
#from pyFTS.benchmarks import arima
@ -52,11 +66,11 @@ from pyFTS.benchmarks import distributed_benchmarks as bchmk
#bchmk.teste(taiex,['192.168.0.109', '192.168.0.101'])
bchmk.point_sliding_window(taiex,2000,train=0.8, #models=[yu.WeightedFTS], # #
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
partitions= np.arange(10,200,step=5), #transformation=diff,
dump=True, save=False, file="experiments/nasdaq_point_distributed.csv",
nodes=['192.168.1.42']) #, depends=[hofts, ifts])
#bchmk.point_sliding_window(taiex,2000,train=0.8, #models=[yu.WeightedFTS], # #
# partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
# partitions= np.arange(10,200,step=5), #transformation=diff,
# dump=True, save=False, file="experiments/nasdaq_point_distributed.csv",
# nodes=['192.168.1.42']) #, depends=[hofts, ifts])
#bchmk.testa(taiex,[10,20],partitioners=[Grid.GridPartitioner], nodes=['192.168.0.109', '192.168.0.101'])
@ -83,10 +97,10 @@ bchmk.point_sliding_window(taiex,2000,train=0.8, #models=[yu.WeightedFTS], # #
# gauss,2000,train=0.8, dump=True, save=True, file="experiments/arima_gauss.csv")
bchmk.interval_sliding_window(gauss,2000,train=0.8, #transformation=diff, #models=[pwfts.ProbabilisticWeightedFTS], # #
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
partitions= np.arange(10,200,step=5), #
dump=True, save=False, file="experiments/nasdaq_interval.csv")
#bchmk.interval_sliding_window(gauss,2000,train=0.8, #transformation=diff, #models=[pwfts.ProbabilisticWeightedFTS], # #
# partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
# partitions= np.arange(10,200,step=5), #
# dump=True, save=False, file="experiments/nasdaq_interval.csv")
#3bchmk.ahead_sliding_window(taiex,2000,train=0.8, steps=20, resolution=250, #transformation=diff, #models=[pwfts.ProbabilisticWeightedFTS], # #
# partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],