Refatoração dos códigos para padronizar com a rfts - Common e Chen

This commit is contained in:
Petrônio Cândido de Lima e Silva 2016-10-18 15:45:07 -02:00
parent 9ad5af49a4
commit 9ad28b07f2
5 changed files with 154 additions and 120 deletions

View File

@ -5,7 +5,7 @@ import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import KFold from sklearn.cross_validation import KFold
from pyFTS import common from pyFTS import *
def Teste(par): def Teste(par):
x = np.arange(1,par) x = np.arange(1,par)
@ -13,20 +13,20 @@ def Teste(par):
plt.plot(x,y) plt.plot(x,y)
# Erro quadrático médio # Erro quadrático médio
def rmse(predictions,targets): def rmse(forecastions,targets):
return np.sqrt(np.mean((predictions-targets)**2)) return np.sqrt(np.mean((forecastions-targets)**2))
# Erro Percentual médio # Erro Percentual médio
def mape(predictions,targets): def mape(forecastions,targets):
return np.mean(abs(predictions-targets)/predictions) return np.mean(abs(forecastions-targets)/forecastions)
def plotComparedSeries(original,fts,title): def plotComparedSeries(original,fts,title):
fig = plt.figure(figsize=[20,6]) fig = plt.figure(figsize=[20,6])
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
predicted = [fts.predict(xx) for xx in original] forecasted = [fts.forecast(xx) for xx in original]
error = rmse(original,predicted) error = rmse(original,forecasted)
ax.plot(original,color='b',label="Original") ax.plot(original,color='b',label="Original")
ax.plot(predicted,color='r',label="Predicted") ax.plot(forecasted,color='r',label="Predicted")
handles0, labels0 = ax.get_legend_handles_labels() handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0,labels0) ax.legend(handles0,labels0)
ax.set_title(title) ax.set_title(title)
@ -35,12 +35,12 @@ def plotComparedSeries(original,fts,title):
ax.set_xlim([0,len(original)]) ax.set_xlim([0,len(original)])
ax.set_ylim([min(original),max(original)]) ax.set_ylim([min(original),max(original)])
def plotCompared(original,predicted,labels,title): def plotCompared(original,forecasted,labels,title):
fig = plt.figure(figsize=[13,6]) fig = plt.figure(figsize=[13,6])
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
ax.plot(original,color='k',label="Original") ax.plot(original,color='k',label="Original")
for c in range(0,len(predicted)): for c in range(0,len(forecasted)):
ax.plot(predicted[c],label=labels[c]) ax.plot(forecasted[c],label=labels[c])
handles0, labels0 = ax.get_legend_handles_labels() handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0,labels0) ax.legend(handles0,labels0)
ax.set_title(title) ax.set_title(title)
@ -53,7 +53,7 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo):
nfolds = 5 nfolds = 5
ret = [] ret = []
errors = np.array([[0 for k in parameters] for z in np.arange(0,nfolds)]) errors = np.array([[0 for k in parameters] for z in np.arange(0,nfolds)])
predicted_best = [] forecasted_best = []
print("Série Original") print("Série Original")
fig = plt.figure(figsize=[18,10]) fig = plt.figure(figsize=[18,10])
fig.suptitle("Comparação de modelos ") fig.suptitle("Comparação de modelos ")
@ -73,29 +73,29 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo):
test = original[test_ix] test = original[test_ix]
min_rmse = 100000.0 min_rmse = 100000.0
best_fold = None best_fold = None
predicted_best_fold = [] forecasted_best_fold = []
errors_fold = [] errors_fold = []
pc = 0 #Parameter count pc = 0 #Parameter count
for p in parameters: for p in parameters:
sets = common.GridPartitionerTrimf(train,p) sets = partitioner.GridPartitionerTrimf(train,p)
fts = modelo(str(p)+ " particoes") fts = modelo(str(p)+ " particoes")
fts.learn(train,sets) fts.train(train,sets)
predicted = [fts.predict(xx) for xx in test] forecasted = [fts.forecast(xx) for xx in test]
error = rmse(np.array(predicted),np.array(test)) error = rmse(np.array(forecasted),np.array(test))
errors_fold.append(error) errors_fold.append(error)
print(fc, p, error) print(fc, p, error)
errors[fc,pc] = error errors[fc,pc] = error
if error < min_rmse: if error < min_rmse:
min_rmse = error min_rmse = error
best_fold = fts best_fold = fts
predicted_best_fold = predicted forecasted_best_fold = forecasted
pc = pc + 1 pc = pc + 1
predicted_best_fold = [best_fold.predict(xx) for xx in original] forecasted_best_fold = [best_fold.forecast(xx) for xx in original]
ax0.plot(predicted_best_fold,label=best_fold.name) ax0.plot(forecasted_best_fold,label=best_fold.name)
if np.mean(errors_fold) < min_rmse_fold: if np.mean(errors_fold) < min_rmse_fold:
min_rmse_fold = np.mean(errors) min_rmse_fold = np.mean(errors)
best = best_fold best = best_fold
predicted_best = predicted_best_fold forecasted_best = forecasted_best_fold
fc = fc + 1 fc = fc + 1
handles0, labels0 = ax0.get_legend_handles_labels() handles0, labels0 = ax0.get_legend_handles_labels()
ax0.legend(handles0, labels0) ax0.legend(handles0, labels0)
@ -108,12 +108,12 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo):
X,Y = np.meshgrid(np.arange(0,nfolds),parameters) X,Y = np.meshgrid(np.arange(0,nfolds),parameters)
surf = ax1.plot_surface(X, Y, errors.T, rstride=1, cstride=1, antialiased=True) surf = ax1.plot_surface(X, Y, errors.T, rstride=1, cstride=1, antialiased=True)
ret.append(best) ret.append(best)
ret.append(predicted_best) ret.append(forecasted_best)
# Modelo diferencial # Modelo diferencial
print("\nSérie Diferencial") print("\nSérie Diferencial")
errors = np.array([[0 for k in parameters] for z in np.arange(0,nfolds)]) errors = np.array([[0 for k in parameters] for z in np.arange(0,nfolds)])
predictedd_best = [] forecastedd_best = []
ax2 = fig.add_axes([0, 0, 0.65, 0.45]) #left, bottom, width, height ax2 = fig.add_axes([0, 0, 0.65, 0.45]) #left, bottom, width, height
ax2.set_xlim([0,len(original)]) ax2.set_xlim([0,len(original)])
ax2.set_ylim([min(original),max(original)]) ax2.set_ylim([min(original),max(original)])
@ -132,15 +132,15 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo):
test = diff[test_ix] test = diff[test_ix]
min_rmse = 100000.0 min_rmse = 100000.0
best_fold = None best_fold = None
predicted_best_fold = [] forecasted_best_fold = []
errors_fold = [] errors_fold = []
pc = 0 pc = 0
for p in parameters: for p in parameters:
sets = GridPartitionerTrimf(train,p) sets = partitioner.GridPartitionerTrimf(train,p)
fts = modelo(str(p)+ " particoes") fts = modelo(str(p)+ " particoes")
fts.learn(train,sets) fts.train(train,sets)
predicted = [fts.predictDiff(test,xx) for xx in np.arange(len(test))] forecasted = [fts.forecastDiff(test,xx) for xx in np.arange(len(test))]
error = rmse(np.array(predicted),np.array(test)) error = rmse(np.array(forecasted),np.array(test))
print(fc, p,error) print(fc, p,error)
errors[fc,pc] = error errors[fc,pc] = error
errors_fold.append(error) errors_fold.append(error)
@ -148,12 +148,12 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo):
min_rmse = error min_rmse = error
best_fold = fts best_fold = fts
pc = pc + 1 pc = pc + 1
predicted_best_fold = [best_fold.predictDiff(original, xx) for xx in np.arange(len(original))] forecasted_best_fold = [best_fold.forecastDiff(original, xx) for xx in np.arange(len(original))]
ax2.plot(predicted_best_fold,label=best_fold.name) ax2.plot(forecasted_best_fold,label=best_fold.name)
if np.mean(errors_fold) < min_rmse_fold: if np.mean(errors_fold) < min_rmse_fold:
min_rmse_fold = np.mean(errors) min_rmse_fold = np.mean(errors)
best = best_fold best = best_fold
predicted_best = predicted_best_fold forecasted_best = forecasted_best_fold
fc = fc + 1 fc = fc + 1
handles0, labels0 = ax2.get_legend_handles_labels() handles0, labels0 = ax2.get_legend_handles_labels()
ax2.legend(handles0, labels0) ax2.legend(handles0, labels0)
@ -166,13 +166,13 @@ def SelecaoKFold_MenorRMSE(original,parameters,modelo):
X,Y = np.meshgrid(np.arange(0,nfolds),parameters) X,Y = np.meshgrid(np.arange(0,nfolds),parameters)
surf = ax3.plot_surface(X, Y, errors.T, rstride=1, cstride=1, antialiased=True) surf = ax3.plot_surface(X, Y, errors.T, rstride=1, cstride=1, antialiased=True)
ret.append(best) ret.append(best)
ret.append(predicted_best) ret.append(forecasted_best)
return ret return ret
def SelecaoSimples_MenorRMSE(original,parameters,modelo): def SelecaoSimples_MenorRMSE(original,parameters,modelo):
ret = [] ret = []
errors = [] errors = []
predicted_best = [] forecasted_best = []
print("Série Original") print("Série Original")
fig = plt.figure(figsize=[20,12]) fig = plt.figure(figsize=[20,12])
fig.suptitle("Comparação de modelos ") fig.suptitle("Comparação de modelos ")
@ -186,18 +186,18 @@ def SelecaoSimples_MenorRMSE(original,parameters,modelo):
min_rmse = 100000.0 min_rmse = 100000.0
best = None best = None
for p in parameters: for p in parameters:
sets = common.GridPartitionerTrimf(original,p) sets = partitioner.GridPartitionerTrimf(original,p)
fts = modelo(str(p)+ " particoes") fts = modelo(str(p)+ " particoes")
fts.learn(original,sets) fts.train(original,sets)
predicted = [fts.predict(xx) for xx in original] forecasted = [fts.forecast(xx) for xx in original]
ax0.plot(predicted,label=fts.name) ax0.plot(forecasted,label=fts.name)
error = rmse(np.array(predicted),np.array(original)) error = rmse(np.array(forecasted),np.array(original))
print(p,error) print(p,error)
errors.append(error) errors.append(error)
if error < min_rmse: if error < min_rmse:
min_rmse = error min_rmse = error
best = fts best = fts
predicted_best = predicted forecasted_best = forecasted
handles0, labels0 = ax0.get_legend_handles_labels() handles0, labels0 = ax0.get_legend_handles_labels()
ax0.legend(handles0, labels0) ax0.legend(handles0, labels0)
ax1 = fig.add_axes([0.7, 0.5, 0.3, 0.45]) #left, bottom, width, height ax1 = fig.add_axes([0.7, 0.5, 0.3, 0.45]) #left, bottom, width, height
@ -207,34 +207,35 @@ def SelecaoSimples_MenorRMSE(original,parameters,modelo):
ax1.set_xlim([min(parameters),max(parameters)]) ax1.set_xlim([min(parameters),max(parameters)])
ax1.plot(parameters,errors) ax1.plot(parameters,errors)
ret.append(best) ret.append(best)
ret.append(predicted_best) ret.append(forecasted_best)
# Modelo diferencial # Modelo diferencial
print("\nSérie Diferencial") print("\nSérie Diferencial")
difffts = common.differential(original)
errors = [] errors = []
predictedd_best = [] forecastedd_best = []
ax2 = fig.add_axes([0, 0, 0.65, 0.45]) #left, bottom, width, height ax2 = fig.add_axes([0, 0, 0.65, 0.45]) #left, bottom, width, height
ax2.set_xlim([0,len(original)]) ax2.set_xlim([0,len(difffts)])
ax2.set_ylim([min(original),max(original)]) ax2.set_ylim([min(difffts),max(difffts)])
ax2.set_title('Série Temporal') ax2.set_title('Série Temporal')
ax2.set_ylabel('F(T)') ax2.set_ylabel('F(T)')
ax2.set_xlabel('T') ax2.set_xlabel('T')
ax2.plot(original,label="Original") ax2.plot(difffts,label="Original")
min_rmse = 100000.0 min_rmse = 100000.0
bestd = None bestd = None
for p in parameters: for p in parameters:
sets = common.GridPartitionerTrimf(common.differential(original),p) sets = partitioner.GridPartitionerTrimf(difffts,p)
fts = modelo(str(p)+ " particoes") fts = modelo(str(p)+ " particoes")
fts.learn(common.differential(original),sets) fts.train(difffts,sets)
predicted = [fts.predictDiff(original, xx) for xx in range(1,len(original))] forecasted = [fts.forecast(xx) for xx in difffts]
predicted.insert(0,original[0]) #forecasted.insert(0,difffts[0])
ax2.plot(predicted,label=fts.name) ax2.plot(forecasted,label=fts.name)
error = rmse(np.array(predicted),np.array(original)) error = rmse(np.array(forecasted),np.array(difffts))
print(p,error) print(p,error)
errors.append(error) errors.append(error)
if error < min_rmse: if error < min_rmse:
min_rmse = error min_rmse = error
bestd = fts bestd = fts
predictedd_best = predicted forecastedd_best = forecasted
handles0, labels0 = ax2.get_legend_handles_labels() handles0, labels0 = ax2.get_legend_handles_labels()
ax2.legend(handles0, labels0) ax2.legend(handles0, labels0)
ax3 = fig.add_axes([0.7, 0, 0.3, 0.45]) #left, bottom, width, height ax3 = fig.add_axes([0.7, 0, 0.3, 0.45]) #left, bottom, width, height
@ -244,7 +245,7 @@ def SelecaoSimples_MenorRMSE(original,parameters,modelo):
ax3.set_xlim([min(parameters),max(parameters)]) ax3.set_xlim([min(parameters),max(parameters)])
ax3.plot(parameters,errors) ax3.plot(parameters,errors)
ret.append(bestd) ret.append(bestd)
ret.append(predictedd_best) ret.append(forecastedd_best)
return ret return ret
def compareModelsPlot(original,models_fo,models_ho): def compareModelsPlot(original,models_fo,models_ho):
@ -254,10 +255,10 @@ def compareModelsPlot(original,models_fo,models_ho):
rows = [] rows = []
for model in models_fo: for model in models_fo:
fts = model["model"] fts = model["model"]
ax0.plot(model["predicted"], label=model["name"]) ax0.plot(model["forecasted"], label=model["name"])
for model in models_ho: for model in models_ho:
fts = model["model"] fts = model["model"]
ax0.plot(model["predicted"], label=model["name"]) ax0.plot(model["forecasted"], label=model["name"])
handles0, labels0 = ax0.get_legend_handles_labels() handles0, labels0 = ax0.get_legend_handles_labels()
ax0.legend(handles0, labels0) ax0.legend(handles0, labels0)
@ -268,13 +269,13 @@ def compareModelsTable(original,models_fo,models_ho):
rows = [] rows = []
for model in models_fo: for model in models_fo:
fts = model["model"] fts = model["model"]
error_r = rmse(model["predicted"],original) error_r = rmse(model["forecasted"],original)
error_m = round(mape(model["predicted"],original)*100,2) error_m = round(mape(model["forecasted"],original)*100,2)
rows.append([model["name"],fts.order,len(fts.sets),error_r,error_m]) rows.append([model["name"],fts.order,len(fts.sets),error_r,error_m])
for model in models_ho: for model in models_ho:
fts = model["model"] fts = model["model"]
error_r = rmse(model["predicted"][fts.order:],original[fts.order:]) error_r = rmse(model["forecasted"][fts.order:],original[fts.order:])
error_m = round(mape(model["predicted"][fts.order:],original[fts.order:])*100,2) error_m = round(mape(model["forecasted"][fts.order:],original[fts.order:])*100,2)
rows.append([model["name"],fts.order,len(fts.sets),error_r,error_m]) rows.append([model["name"],fts.order,len(fts.sets),error_r,error_m])
ax1 = fig.add_axes([0, 0, 1, 1]) #left, bottom, width, height ax1 = fig.add_axes([0, 0, 1, 1]) #left, bottom, width, height
ax1.set_xticks([]) ax1.set_xticks([])
@ -312,7 +313,7 @@ from pyFTS import hwang
def HOSelecaoSimples_MenorRMSE(original,parameters,orders): def HOSelecaoSimples_MenorRMSE(original,parameters,orders):
ret = [] ret = []
errors = np.array([[0 for k in range(len(parameters))] for kk in range(len(orders))]) errors = np.array([[0 for k in range(len(parameters))] for kk in range(len(orders))])
predicted_best = [] forecasted_best = []
print("Série Original") print("Série Original")
fig = plt.figure(figsize=[20,12]) fig = plt.figure(figsize=[20,12])
fig.suptitle("Comparação de modelos ") fig.suptitle("Comparação de modelos ")
@ -329,20 +330,20 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders):
for p in parameters: for p in parameters:
oc = 0 oc = 0
for o in orders: for o in orders:
sets = common.GridPartitionerTrimf(original,p) sets = partitioner.GridPartitionerTrimf(original,p)
fts = hwang.HighOrderFTS(o,"k = " + str(p)+ " w = " + str(o)) fts = hwang.HighOrderFTS(o,"k = " + str(p)+ " w = " + str(o))
fts.learn(original,sets) fts.train(original,sets)
predicted = [fts.predict(original, xx) for xx in range(o,len(original))] forecasted = [fts.forecast(original, xx) for xx in range(o,len(original))]
error = rmse(np.array(predicted),np.array(original[o:])) error = rmse(np.array(forecasted),np.array(original[o:]))
for kk in range(o): for kk in range(o):
predicted.insert(0,None) forecasted.insert(0,None)
ax0.plot(predicted,label=fts.name) ax0.plot(forecasted,label=fts.name)
print(o,p,error) print(o,p,error)
errors[oc,pc] = error errors[oc,pc] = error
if error < min_rmse: if error < min_rmse:
min_rmse = error min_rmse = error
best = fts best = fts
predicted_best = predicted forecasted_best = forecasted
oc = oc + 1 oc = oc + 1
pc = pc + 1 pc = pc + 1
handles0, labels0 = ax0.get_legend_handles_labels() handles0, labels0 = ax0.get_legend_handles_labels()
@ -356,12 +357,12 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders):
X,Y = np.meshgrid(parameters,orders) X,Y = np.meshgrid(parameters,orders)
surf = ax1.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True) surf = ax1.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True)
ret.append(best) ret.append(best)
ret.append(predicted_best) ret.append(forecasted_best)
# Modelo diferencial # Modelo diferencial
print("\nSérie Diferencial") print("\nSérie Diferencial")
errors = np.array([[0 for k in range(len(parameters))] for kk in range(len(orders))]) errors = np.array([[0 for k in range(len(parameters))] for kk in range(len(orders))])
predictedd_best = [] forecastedd_best = []
ax2 = fig.add_axes([0, 0, 0.6, 0.45]) #left, bottom, width, height ax2 = fig.add_axes([0, 0, 0.6, 0.45]) #left, bottom, width, height
ax2.set_xlim([0,len(original)]) ax2.set_xlim([0,len(original)])
ax2.set_ylim([min(original),max(original)]) ax2.set_ylim([min(original),max(original)])
@ -375,20 +376,20 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders):
for p in parameters: for p in parameters:
oc = 0 oc = 0
for o in orders: for o in orders:
sets = common.GridPartitionerTrimf(common.differential(original),p) sets = partitioner.GridPartitionerTrimf(common.differential(original),p)
fts = hwang.HighOrderFTS(o,"k = " + str(p)+ " w = " + str(o)) fts = hwang.HighOrderFTS(o,"k = " + str(p)+ " w = " + str(o))
fts.learn(original,sets) fts.train(original,sets)
predicted = [fts.predictDiff(original, xx) for xx in range(o,len(original))] forecasted = [fts.forecastDiff(original, xx) for xx in range(o,len(original))]
error = rmse(np.array(predicted),np.array(original[o:])) error = rmse(np.array(forecasted),np.array(original[o:]))
for kk in range(o): for kk in range(o):
predicted.insert(0,None) forecasted.insert(0,None)
ax2.plot(predicted,label=fts.name) ax2.plot(forecasted,label=fts.name)
print(o,p,error) print(o,p,error)
errors[oc,pc] = error errors[oc,pc] = error
if error < min_rmse: if error < min_rmse:
min_rmse = error min_rmse = error
bestd = fts bestd = fts
predictedd_best = predicted forecastedd_best = forecasted
oc = oc + 1 oc = oc + 1
pc = pc + 1 pc = pc + 1
handles0, labels0 = ax2.get_legend_handles_labels() handles0, labels0 = ax2.get_legend_handles_labels()
@ -402,5 +403,5 @@ def HOSelecaoSimples_MenorRMSE(original,parameters,orders):
X,Y = np.meshgrid(parameters,orders) X,Y = np.meshgrid(parameters,orders)
surf = ax3.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True) surf = ax3.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True)
ret.append(bestd) ret.append(bestd)
ret.append(predictedd_best) ret.append(forecastedd_best)
return ret return ret

50
chen.py
View File

@ -1,3 +1,4 @@
import numpy as np
from pyFTS import * from pyFTS import *
class ConventionalFLRG: class ConventionalFLRG:
@ -21,15 +22,18 @@ class ConventionalFLRG:
class ConventionalFTS(fts.FTS): class ConventionalFTS(fts.FTS):
def __init__(self,name): def __init__(self,name):
super(ConventionalFTS, self).__init__(1,name) super(ConventionalFTS, self).__init__(1,name)
self.flrgs = {}
def forecast(self,data): def forecast(self,data):
mv = common.fuzzyInstance(data, self.sets)
actual = self.sets[ np.argwhere( mv == max(mv) )[0,0] ]
actual = self.fuzzy(data) if actual.name not in self.flrgs:
return actual.centroid
if actual["fuzzyset"] not in self.flrgs:
return self.sets[actual["fuzzyset"]].centroid
flrg = self.flrgs[actual["fuzzyset"]] flrg = self.flrgs[actual.name]
count = 0.0 count = 0.0
denom = 0.0 denom = 0.0
@ -39,24 +43,20 @@ class ConventionalFTS(fts.FTS):
count = count + 1.0 count = count + 1.0
return denom/count return denom/count
def generateFLRG(self, flrs):
flrgs = {}
for flr in flrs:
if flr.LHS in flrgs:
flrgs[flr.LHS].append(flr.RHS)
else:
flrgs[flr.LHS] = ConventionalFLRG(flr.LHS);
flrgs[flr.LHS].append(flr.RHS)
return (flrgs)
def train(self, data, sets): def train(self, data, sets):
last = {"fuzzyset":"", "membership":0.0} self.sets = sets
actual = {"fuzzyset":"", "membership":0.0} tmpdata = common.fuzzySeries(data,sets)
flrs = common.generateNonRecurrentFLRs(tmpdata)
for s in sets: self.flrgs = self.generateFLRG(flrs)
self.sets[s.name] = s
self.flrgs = {}
count = 1
for inst in data:
actual = self.fuzzy(inst)
if count > self.order:
if last["fuzzyset"] not in self.flrgs:
self.flrgs[last["fuzzyset"]] = ConventionalFLRG(last["fuzzyset"])
self.flrgs[last["fuzzyset"]].append(actual["fuzzyset"])
count = count + 1
last = actual

View File

@ -42,12 +42,13 @@ def sigmf(x,parameters):
class FuzzySet: class FuzzySet:
def __init__(self,name,mf,parameters,centroid): def __init__(self,name,mf,parameters,centroid):
self.name = name self.name = name
self.mf = mf self.mf = mf
self.parameters = parameters self.parameters = parameters
self.centroid = centroid self.centroid = centroid
self.lower = min(parameters)
self.upper = max(parameters)
def membership(self,x): def membership(self,x):
return self.mf(x,self.parameters) return self.mf(x,self.parameters)
@ -55,16 +56,37 @@ class FuzzySet:
def __str__(self): def __str__(self):
return self.name + ": " + str(self.mf) + "(" + str(self.parameters) + ")" return self.name + ": " + str(self.mf) + "(" + str(self.parameters) + ")"
class FLR:
def __init__(self,LHS,RHS):
self.LHS = LHS
self.RHS = RHS
def __str__(self):
return str(self.LHS) + " -> " + str(self.RHS)
def GridPartitionerTrimf(data,npart,names = None,prefix = "A"): def fuzzyInstance(inst, fuzzySets):
sets = [] mv = np.array([ fs.membership(inst) for fs in fuzzySets])
dmax = max(data) return mv
dmin = min(data)
dlen = dmax - dmin
partlen = dlen / npart def fuzzySeries(data,fuzzySets):
partition = dmin fts = []
for c in range(npart): for item in data:
sets.append( FuzzySet(prefix+str(c),trimf,[partition-partlen, partition, partition+partlen], partition ) ) mv = fuzzyInstance(item,fuzzySets)
partition = partition + partlen fts.append(fuzzySets[ np.argwhere(mv == max(mv) )[0,0] ])
return fts
return sets
def generateNonRecurrentFLRs(fuzzyData):
flrs = {}
for i in range(2,len(fuzzyData)):
tmp = FLR(fuzzyData[i-1],fuzzyData[i])
flrs[str(tmp)] = tmp
ret = [value for key, value in flrs.items()]
return ret
def generateRecurrentFLRs(fuzzyData):
flrs = []
for i in range(2,len(fuzzyData)):
flrs[i-1] = FLR(fuzzyData[i-1],fuzzyData[i])
return flrs

6
fts.py
View File

@ -24,12 +24,6 @@ class FTS:
def train(self, data, sets): def train(self, data, sets):
pass pass
def predict(self,data):
return self.forecast(data)
def predictDiff(self,data,t):
return data[t] + self.forecast(data[t-1]-data[t])
def __str__(self): def __str__(self):
tmp = self.name + ":\n" tmp = self.name + ":\n"
for r in self.flrgs.keys(): for r in self.flrgs.keys():

17
partitioner.py Normal file
View File

@ -0,0 +1,17 @@
import numpy as np
from pyFTS import *
#print(common.__dict__)
def GridPartitionerTrimf(data,npart,names = None,prefix = "A"):
sets = []
dmax = max(data)
dmin = min(data)
dlen = dmax - dmin
partlen = dlen / npart
partition = dmin
for c in range(npart):
sets.append(common.FuzzySet(prefix+str(c),common.trimf,[partition-partlen, partition, partition+partlen], partition ) )
partition = partition + partlen
return sets