diff --git a/benchmarks.py b/benchmarks.py index c406b99..1036f55 100644 --- a/benchmarks.py +++ b/benchmarks.py @@ -5,7 +5,7 @@ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn.cross_validation import KFold -from pyFTS import common +from pyFTS import * def Teste(par): x = np.arange(1,par) @@ -13,20 +13,20 @@ def Teste(par): plt.plot(x,y) # Erro quadrático médio -def rmse(predictions,targets): - return np.sqrt(np.mean((predictions-targets)**2)) +def rmse(forecastions,targets): + return np.sqrt(np.mean((forecastions-targets)**2)) # Erro Percentual médio -def mape(predictions,targets): - return np.mean(abs(predictions-targets)/predictions) +def mape(forecastions,targets): + return np.mean(abs(forecastions-targets)/forecastions) def plotComparedSeries(original,fts,title): fig = plt.figure(figsize=[20,6]) ax = fig.add_subplot(111) - predicted = [fts.predict(xx) for xx in original] - error = rmse(original,predicted) + forecasted = [fts.forecast(xx) for xx in original] + error = rmse(original,forecasted) ax.plot(original,color='b',label="Original") - ax.plot(predicted,color='r',label="Predicted") + ax.plot(forecasted,color='r',label="Predicted") handles0, labels0 = ax.get_legend_handles_labels() ax.legend(handles0,labels0) ax.set_title(title) @@ -35,12 +35,12 @@ def plotComparedSeries(original,fts,title): ax.set_xlim([0,len(original)]) ax.set_ylim([min(original),max(original)]) -def plotCompared(original,predicted,labels,title): +def plotCompared(original,forecasted,labels,title): fig = plt.figure(figsize=[13,6]) ax = fig.add_subplot(111) ax.plot(original,color='k',label="Original") - for c in range(0,len(predicted)): - ax.plot(predicted[c],label=labels[c]) + for c in range(0,len(forecasted)): + ax.plot(forecasted[c],label=labels[c]) handles0, labels0 = ax.get_legend_handles_labels() ax.legend(handles0,labels0) ax.set_title(title) @@ -53,7 +53,7 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo): nfolds = 5 ret = [] errors = np.array([[0 for k in parameters] for z in np.arange(0,nfolds)]) - predicted_best = [] + forecasted_best = [] print("Série Original") fig = plt.figure(figsize=[18,10]) fig.suptitle("Comparação de modelos ") @@ -73,29 +73,29 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo): test = original[test_ix] min_rmse = 100000.0 best_fold = None - predicted_best_fold = [] + forecasted_best_fold = [] errors_fold = [] pc = 0 #Parameter count for p in parameters: - sets = common.GridPartitionerTrimf(train,p) + sets = partitioner.GridPartitionerTrimf(train,p) fts = modelo(str(p)+ " particoes") - fts.learn(train,sets) - predicted = [fts.predict(xx) for xx in test] - error = rmse(np.array(predicted),np.array(test)) + fts.train(train,sets) + forecasted = [fts.forecast(xx) for xx in test] + error = rmse(np.array(forecasted),np.array(test)) errors_fold.append(error) print(fc, p, error) errors[fc,pc] = error if error < min_rmse: min_rmse = error best_fold = fts - predicted_best_fold = predicted + forecasted_best_fold = forecasted pc = pc + 1 - predicted_best_fold = [best_fold.predict(xx) for xx in original] - ax0.plot(predicted_best_fold,label=best_fold.name) + forecasted_best_fold = [best_fold.forecast(xx) for xx in original] + ax0.plot(forecasted_best_fold,label=best_fold.name) if np.mean(errors_fold) < min_rmse_fold: min_rmse_fold = np.mean(errors) best = best_fold - predicted_best = predicted_best_fold + forecasted_best = forecasted_best_fold fc = fc + 1 handles0, labels0 = ax0.get_legend_handles_labels() ax0.legend(handles0, labels0) @@ -108,12 +108,12 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo): X,Y = np.meshgrid(np.arange(0,nfolds),parameters) surf = ax1.plot_surface(X, Y, errors.T, rstride=1, cstride=1, antialiased=True) ret.append(best) - ret.append(predicted_best) + ret.append(forecasted_best) # Modelo diferencial print("\nSérie Diferencial") errors = np.array([[0 for k in parameters] for z in np.arange(0,nfolds)]) - predictedd_best = [] + forecastedd_best = [] ax2 = fig.add_axes([0, 0, 0.65, 0.45]) #left, bottom, width, height ax2.set_xlim([0,len(original)]) ax2.set_ylim([min(original),max(original)]) @@ -132,15 +132,15 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo): test = diff[test_ix] min_rmse = 100000.0 best_fold = None - predicted_best_fold = [] + forecasted_best_fold = [] errors_fold = [] pc = 0 for p in parameters: - sets = GridPartitionerTrimf(train,p) + sets = partitioner.GridPartitionerTrimf(train,p) fts = modelo(str(p)+ " particoes") - fts.learn(train,sets) - predicted = [fts.predictDiff(test,xx) for xx in np.arange(len(test))] - error = rmse(np.array(predicted),np.array(test)) + fts.train(train,sets) + forecasted = [fts.forecastDiff(test,xx) for xx in np.arange(len(test))] + error = rmse(np.array(forecasted),np.array(test)) print(fc, p,error) errors[fc,pc] = error errors_fold.append(error) @@ -148,12 +148,12 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo): min_rmse = error best_fold = fts pc = pc + 1 - predicted_best_fold = [best_fold.predictDiff(original, xx) for xx in np.arange(len(original))] - ax2.plot(predicted_best_fold,label=best_fold.name) + forecasted_best_fold = [best_fold.forecastDiff(original, xx) for xx in np.arange(len(original))] + ax2.plot(forecasted_best_fold,label=best_fold.name) if np.mean(errors_fold) < min_rmse_fold: min_rmse_fold = np.mean(errors) best = best_fold - predicted_best = predicted_best_fold + forecasted_best = forecasted_best_fold fc = fc + 1 handles0, labels0 = ax2.get_legend_handles_labels() ax2.legend(handles0, labels0) @@ -166,13 +166,13 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo): X,Y = np.meshgrid(np.arange(0,nfolds),parameters) surf = ax3.plot_surface(X, Y, errors.T, rstride=1, cstride=1, antialiased=True) ret.append(best) - ret.append(predicted_best) + ret.append(forecasted_best) return ret def SelecaoSimples_MenorRMSE(original,parameters,modelo): ret = [] errors = [] - predicted_best = [] + forecasted_best = [] print("Série Original") fig = plt.figure(figsize=[20,12]) fig.suptitle("Comparação de modelos ") @@ -186,18 +186,18 @@ def SelecaoSimples_MenorRMSE(original,parameters,modelo): min_rmse = 100000.0 best = None for p in parameters: - sets = common.GridPartitionerTrimf(original,p) + sets = partitioner.GridPartitionerTrimf(original,p) fts = modelo(str(p)+ " particoes") - fts.learn(original,sets) - predicted = [fts.predict(xx) for xx in original] - ax0.plot(predicted,label=fts.name) - error = rmse(np.array(predicted),np.array(original)) + fts.train(original,sets) + forecasted = [fts.forecast(xx) for xx in original] + ax0.plot(forecasted,label=fts.name) + error = rmse(np.array(forecasted),np.array(original)) print(p,error) errors.append(error) if error < min_rmse: min_rmse = error best = fts - predicted_best = predicted + forecasted_best = forecasted handles0, labels0 = ax0.get_legend_handles_labels() ax0.legend(handles0, labels0) ax1 = fig.add_axes([0.7, 0.5, 0.3, 0.45]) #left, bottom, width, height @@ -207,34 +207,35 @@ def SelecaoSimples_MenorRMSE(original,parameters,modelo): ax1.set_xlim([min(parameters),max(parameters)]) ax1.plot(parameters,errors) ret.append(best) - ret.append(predicted_best) + ret.append(forecasted_best) # Modelo diferencial print("\nSérie Diferencial") + difffts = common.differential(original) errors = [] - predictedd_best = [] + forecastedd_best = [] ax2 = fig.add_axes([0, 0, 0.65, 0.45]) #left, bottom, width, height - ax2.set_xlim([0,len(original)]) - ax2.set_ylim([min(original),max(original)]) + ax2.set_xlim([0,len(difffts)]) + ax2.set_ylim([min(difffts),max(difffts)]) ax2.set_title('Série Temporal') ax2.set_ylabel('F(T)') ax2.set_xlabel('T') - ax2.plot(original,label="Original") + ax2.plot(difffts,label="Original") min_rmse = 100000.0 bestd = None for p in parameters: - sets = common.GridPartitionerTrimf(common.differential(original),p) + sets = partitioner.GridPartitionerTrimf(difffts,p) fts = modelo(str(p)+ " particoes") - fts.learn(common.differential(original),sets) - predicted = [fts.predictDiff(original, xx) for xx in range(1,len(original))] - predicted.insert(0,original[0]) - ax2.plot(predicted,label=fts.name) - error = rmse(np.array(predicted),np.array(original)) + fts.train(difffts,sets) + forecasted = [fts.forecast(xx) for xx in difffts] + #forecasted.insert(0,difffts[0]) + ax2.plot(forecasted,label=fts.name) + error = rmse(np.array(forecasted),np.array(difffts)) print(p,error) errors.append(error) if error < min_rmse: min_rmse = error bestd = fts - predictedd_best = predicted + forecastedd_best = forecasted handles0, labels0 = ax2.get_legend_handles_labels() ax2.legend(handles0, labels0) ax3 = fig.add_axes([0.7, 0, 0.3, 0.45]) #left, bottom, width, height @@ -244,7 +245,7 @@ def SelecaoSimples_MenorRMSE(original,parameters,modelo): ax3.set_xlim([min(parameters),max(parameters)]) ax3.plot(parameters,errors) ret.append(bestd) - ret.append(predictedd_best) + ret.append(forecastedd_best) return ret def compareModelsPlot(original,models_fo,models_ho): @@ -254,10 +255,10 @@ def compareModelsPlot(original,models_fo,models_ho): rows = [] for model in models_fo: fts = model["model"] - ax0.plot(model["predicted"], label=model["name"]) + ax0.plot(model["forecasted"], label=model["name"]) for model in models_ho: fts = model["model"] - ax0.plot(model["predicted"], label=model["name"]) + ax0.plot(model["forecasted"], label=model["name"]) handles0, labels0 = ax0.get_legend_handles_labels() ax0.legend(handles0, labels0) @@ -268,13 +269,13 @@ def compareModelsTable(original,models_fo,models_ho): rows = [] for model in models_fo: fts = model["model"] - error_r = rmse(model["predicted"],original) - error_m = round(mape(model["predicted"],original)*100,2) + error_r = rmse(model["forecasted"],original) + error_m = round(mape(model["forecasted"],original)*100,2) rows.append([model["name"],fts.order,len(fts.sets),error_r,error_m]) for model in models_ho: fts = model["model"] - error_r = rmse(model["predicted"][fts.order:],original[fts.order:]) - error_m = round(mape(model["predicted"][fts.order:],original[fts.order:])*100,2) + error_r = rmse(model["forecasted"][fts.order:],original[fts.order:]) + error_m = round(mape(model["forecasted"][fts.order:],original[fts.order:])*100,2) rows.append([model["name"],fts.order,len(fts.sets),error_r,error_m]) ax1 = fig.add_axes([0, 0, 1, 1]) #left, bottom, width, height ax1.set_xticks([]) @@ -312,7 +313,7 @@ from pyFTS import hwang def HOSelecaoSimples_MenorRMSE(original,parameters,orders): ret = [] errors = np.array([[0 for k in range(len(parameters))] for kk in range(len(orders))]) - predicted_best = [] + forecasted_best = [] print("Série Original") fig = plt.figure(figsize=[20,12]) fig.suptitle("Comparação de modelos ") @@ -329,20 +330,20 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders): for p in parameters: oc = 0 for o in orders: - sets = common.GridPartitionerTrimf(original,p) + sets = partitioner.GridPartitionerTrimf(original,p) fts = hwang.HighOrderFTS(o,"k = " + str(p)+ " w = " + str(o)) - fts.learn(original,sets) - predicted = [fts.predict(original, xx) for xx in range(o,len(original))] - error = rmse(np.array(predicted),np.array(original[o:])) + fts.train(original,sets) + forecasted = [fts.forecast(original, xx) for xx in range(o,len(original))] + error = rmse(np.array(forecasted),np.array(original[o:])) for kk in range(o): - predicted.insert(0,None) - ax0.plot(predicted,label=fts.name) + forecasted.insert(0,None) + ax0.plot(forecasted,label=fts.name) print(o,p,error) errors[oc,pc] = error if error < min_rmse: min_rmse = error best = fts - predicted_best = predicted + forecasted_best = forecasted oc = oc + 1 pc = pc + 1 handles0, labels0 = ax0.get_legend_handles_labels() @@ -356,12 +357,12 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders): X,Y = np.meshgrid(parameters,orders) surf = ax1.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True) ret.append(best) - ret.append(predicted_best) + ret.append(forecasted_best) # Modelo diferencial print("\nSérie Diferencial") errors = np.array([[0 for k in range(len(parameters))] for kk in range(len(orders))]) - predictedd_best = [] + forecastedd_best = [] ax2 = fig.add_axes([0, 0, 0.6, 0.45]) #left, bottom, width, height ax2.set_xlim([0,len(original)]) ax2.set_ylim([min(original),max(original)]) @@ -375,20 +376,20 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders): for p in parameters: oc = 0 for o in orders: - sets = common.GridPartitionerTrimf(common.differential(original),p) + sets = partitioner.GridPartitionerTrimf(common.differential(original),p) fts = hwang.HighOrderFTS(o,"k = " + str(p)+ " w = " + str(o)) - fts.learn(original,sets) - predicted = [fts.predictDiff(original, xx) for xx in range(o,len(original))] - error = rmse(np.array(predicted),np.array(original[o:])) + fts.train(original,sets) + forecasted = [fts.forecastDiff(original, xx) for xx in range(o,len(original))] + error = rmse(np.array(forecasted),np.array(original[o:])) for kk in range(o): - predicted.insert(0,None) - ax2.plot(predicted,label=fts.name) + forecasted.insert(0,None) + ax2.plot(forecasted,label=fts.name) print(o,p,error) errors[oc,pc] = error if error < min_rmse: min_rmse = error bestd = fts - predictedd_best = predicted + forecastedd_best = forecasted oc = oc + 1 pc = pc + 1 handles0, labels0 = ax2.get_legend_handles_labels() @@ -402,5 +403,5 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders): X,Y = np.meshgrid(parameters,orders) surf = ax3.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True) ret.append(bestd) - ret.append(predictedd_best) + ret.append(forecastedd_best) return ret diff --git a/chen.py b/chen.py index 0efe40b..2e35651 100644 --- a/chen.py +++ b/chen.py @@ -1,3 +1,4 @@ +import numpy as np from pyFTS import * class ConventionalFLRG: @@ -21,15 +22,18 @@ class ConventionalFLRG: class ConventionalFTS(fts.FTS): def __init__(self,name): super(ConventionalFTS, self).__init__(1,name) + self.flrgs = {} def forecast(self,data): + + mv = common.fuzzyInstance(data, self.sets) + + actual = self.sets[ np.argwhere( mv == max(mv) )[0,0] ] - actual = self.fuzzy(data) - - if actual["fuzzyset"] not in self.flrgs: - return self.sets[actual["fuzzyset"]].centroid + if actual.name not in self.flrgs: + return actual.centroid - flrg = self.flrgs[actual["fuzzyset"]] + flrg = self.flrgs[actual.name] count = 0.0 denom = 0.0 @@ -39,24 +43,20 @@ class ConventionalFTS(fts.FTS): count = count + 1.0 return denom/count - + + def generateFLRG(self, flrs): + flrgs = {} + for flr in flrs: + if flr.LHS in flrgs: + flrgs[flr.LHS].append(flr.RHS) + else: + flrgs[flr.LHS] = ConventionalFLRG(flr.LHS); + flrgs[flr.LHS].append(flr.RHS) + return (flrgs) + def train(self, data, sets): - last = {"fuzzyset":"", "membership":0.0} - actual = {"fuzzyset":"", "membership":0.0} - - for s in sets: - self.sets[s.name] = s - - self.flrgs = {} - count = 1 - for inst in data: - actual = self.fuzzy(inst) - - if count > self.order: - if last["fuzzyset"] not in self.flrgs: - self.flrgs[last["fuzzyset"]] = ConventionalFLRG(last["fuzzyset"]) - - self.flrgs[last["fuzzyset"]].append(actual["fuzzyset"]) - count = count + 1 - last = actual - + self.sets = sets + tmpdata = common.fuzzySeries(data,sets) + flrs = common.generateNonRecurrentFLRs(tmpdata) + self.flrgs = self.generateFLRG(flrs) + diff --git a/common.py b/common.py index b99703b..a552abd 100644 --- a/common.py +++ b/common.py @@ -42,12 +42,13 @@ def sigmf(x,parameters): class FuzzySet: - def __init__(self,name,mf,parameters,centroid): self.name = name self.mf = mf self.parameters = parameters self.centroid = centroid + self.lower = min(parameters) + self.upper = max(parameters) def membership(self,x): return self.mf(x,self.parameters) @@ -55,16 +56,37 @@ class FuzzySet: def __str__(self): return self.name + ": " + str(self.mf) + "(" + str(self.parameters) + ")" +class FLR: + def __init__(self,LHS,RHS): + self.LHS = LHS + self.RHS = RHS + + def __str__(self): + return str(self.LHS) + " -> " + str(self.RHS) -def GridPartitionerTrimf(data,npart,names = None,prefix = "A"): - sets = [] - dmax = max(data) - dmin = min(data) - dlen = dmax - dmin - partlen = dlen / npart - partition = dmin - for c in range(npart): - sets.append( FuzzySet(prefix+str(c),trimf,[partition-partlen, partition, partition+partlen], partition ) ) - partition = partition + partlen - - return sets +def fuzzyInstance(inst, fuzzySets): + mv = np.array([ fs.membership(inst) for fs in fuzzySets]) + return mv + + +def fuzzySeries(data,fuzzySets): + fts = [] + for item in data: + mv = fuzzyInstance(item,fuzzySets) + fts.append(fuzzySets[ np.argwhere(mv == max(mv) )[0,0] ]) + return fts + + +def generateNonRecurrentFLRs(fuzzyData): + flrs = {} + for i in range(2,len(fuzzyData)): + tmp = FLR(fuzzyData[i-1],fuzzyData[i]) + flrs[str(tmp)] = tmp + ret = [value for key, value in flrs.items()] + return ret + +def generateRecurrentFLRs(fuzzyData): + flrs = [] + for i in range(2,len(fuzzyData)): + flrs[i-1] = FLR(fuzzyData[i-1],fuzzyData[i]) + return flrs diff --git a/fts.py b/fts.py index 7def69a..f1cf7b1 100644 --- a/fts.py +++ b/fts.py @@ -24,12 +24,6 @@ class FTS: def train(self, data, sets): pass - def predict(self,data): - return self.forecast(data) - - def predictDiff(self,data,t): - return data[t] + self.forecast(data[t-1]-data[t]) - def __str__(self): tmp = self.name + ":\n" for r in self.flrgs.keys(): diff --git a/partitioner.py b/partitioner.py new file mode 100644 index 0000000..305c31e --- /dev/null +++ b/partitioner.py @@ -0,0 +1,17 @@ +import numpy as np +from pyFTS import * + +#print(common.__dict__) + +def GridPartitionerTrimf(data,npart,names = None,prefix = "A"): + sets = [] + dmax = max(data) + dmin = min(data) + dlen = dmax - dmin + partlen = dlen / npart + partition = dmin + for c in range(npart): + sets.append(common.FuzzySet(prefix+str(c),common.trimf,[partition-partlen, partition, partition+partlen], partition ) ) + partition = partition + partlen + + return sets