Métricas: sharpness,resolution e coverage

This commit is contained in:
Petrônio Cândido de Lima e Silva 2016-10-27 17:14:17 -02:00
parent 4160b7d8dd
commit 3f757fb977
4 changed files with 101 additions and 49 deletions

View File

@ -13,37 +13,87 @@ def Teste(par):
plt.plot(x,y) plt.plot(x,y)
# Erro quadrático médio # Erro quadrático médio
def rmse(forecasts,targets): def rmse(targets, forecasts):
return np.sqrt(np.nanmean((forecasts-targets)**2)) return np.sqrt(np.nanmean((forecasts-targets)**2))
def rmse_interval(targets, forecasts):
fmean = [np.mean(i) for i in forecasts]
return np.sqrt(np.nanmean((fmean-targets)**2))
# Erro Percentual médio # Erro Percentual médio
def mape(forecasts,targets): def mape(targets, forecasts):
return np.mean(abs(forecasts-targets)/forecasts) return np.mean(abs(forecasts-targets)/forecasts)*100
def plotComparedSeries(original,fts,parameters): def mape_interval(targets, forecasts):
fig = plt.figure(figsize=[20,6]) fmean = [np.mean(i) for i in forecasts]
return np.mean(abs(fmean-targets)/fmean)*100
#Sharpness - Mean size of the intervals
def sharpness(forecasts):
tmp = [i[1] - i[0] for i in forecasts ]
return np.mean(tmp)
#Resolution - Standard deviation of the intervals
def resolution(forecasts):
shp = sharpness(forecasts)
tmp = [abs((i[1] - i[0]) - shp) for i in forecasts ]
return np.mean(tmp)
# Percent of
def coverage(targets,forecasts):
preds = []
for i in np.arange(0,len(forecasts)):
if targets[i] >= forecasts[i][0] and targets[i] <= forecasts[i][1] :
preds.append(1)
else:
preds.append(1)
return np.mean(preds)
def getIntervalStatistics(original,models):
ret = "Model & RMSE & MAPE & Sharpness & Resolution & Coverage \\ \n"
for fts in models:
forecasts = fts.forecast(original)
ret = ret + fts.shortname + " & "
ret = ret + str( round(rmse_interval(original[fts.order :],forecasts),2)) + " & "
ret = ret + str( round(mape_interval(original[fts.order :],forecasts),2)) + " & "
ret = ret + str( round(sharpness(forecasts),2)) + " & "
ret = ret + str( round(resolution(forecasts),2)) + " & "
ret = ret + str( round(coverage(original[fts.order :],forecasts),2)) + " \\ \n"
return ret
def plotComparedSeries(original,models, colors):
fig = plt.figure(figsize=[25,10])
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
forecasted = fts.forecast(original)
#error = rmse(original[1:],forecasted[0:-1]) mi = []
#np.append(original,[None]) ma = []
ax.plot(original,color='b',label="Original")
if fts.isInterval: ax.plot(original,color='black',label="Original")
lower = [kk[0] for kk in forecasted] count = 0
upper = [kk[1] for kk in forecasted] for fts in models:
ax.set_ylim([min(lower),max(upper)]) forecasted = fts.forecast(original)
for k in np.arange(0,fts.order): if fts.isInterval:
lower.insert(0,None) lower = [kk[0] for kk in forecasted]
upper.insert(0,None) upper = [kk[1] for kk in forecasted]
ax.plot(lower,color='r',label="Predicted") mi.append(min(lower))
ax.plot(upper,color='r') ma.append(max(upper))
for k in np.arange(0,fts.order):
else: lower.insert(0,None)
forecasted.insert(0,None) upper.insert(0,None)
ax.plot(forecasted,color='r',label="Predicted") ax.plot(lower,color=colors[count],label=fts.shortname)
ax.set_ylim([np.nanmin(forecasted),np.nanmax(forecasted)]) ax.plot(upper,color=colors[count])
handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0,labels0) else:
ax.set_title(fts.name) mi.append(min(forecasted))
ma.append(max(forecasted))
forecasted.insert(0,None)
ax.plot(forecasted,color=colors[count],label=fts.shortname)
handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0,labels0)
count = count + 1
#ax.set_title(fts.name)
ax.set_ylim([min(mi),max(ma)])
ax.set_ylabel('F(T)') ax.set_ylabel('F(T)')
ax.set_xlabel('T') ax.set_xlabel('T')
ax.set_xlim([0,len(original)]) ax.set_xlim([0,len(original)])

View File

@ -33,7 +33,7 @@ class HighOrderFLRG:
class HighOrderFTS(fts.FTS): class HighOrderFTS(fts.FTS):
def __init__(self,name): def __init__(self,name):
super(HighOrderFTS, self).__init__(1,"HOFTS") super(HighOrderFTS, self).__init__(1,"HOFTS" + name)
self.name = "High Order FTS" self.name = "High Order FTS"
self.detail = "Chen" self.detail = "Chen"
self.order = 1 self.order = 1

23
ifts.py
View File

@ -3,7 +3,8 @@ from pyFTS import *
class IntervalFTS(hofts.HighOrderFTS): class IntervalFTS(hofts.HighOrderFTS):
def __init__(self,name): def __init__(self,name):
super(IntervalFTS, self).__init__("IFTS") super(IntervalFTS, self).__init__("IFTS " + name)
self.shortname = "IFTS " + name
self.name = "Interval FTS" self.name = "Interval FTS"
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H." self.detail = "Silva, P.; Guimarães, F.; Sadaei, H."
self.flrgs = {} self.flrgs = {}
@ -49,8 +50,8 @@ class IntervalFTS(hofts.HighOrderFTS):
for k in np.arange(self.order,l): for k in np.arange(self.order,l):
flrs = [] affected_flrgs = []
mvs = [] affected_flrgs_memberships = []
up = [] up = []
lo = [] lo = []
@ -81,10 +82,10 @@ class IntervalFTS(hofts.HighOrderFTS):
flrg = hofts.HighOrderFLRG(self.order) flrg = hofts.HighOrderFLRG(self.order)
for kk in path: flrg.appendLHS(self.sets[ kk ]) for kk in path: flrg.appendLHS(self.sets[ kk ])
flrs.append(flrg) affected_flrgs.append(flrg)
# Acha a pertinência geral de cada FLRG # Acha a pertinência geral de cada FLRG
mvs.append(min(self.getSequenceMembership(subset, flrg.LHS))) affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS)))
else: else:
mv = common.fuzzyInstance(ndata[k],self.sets) mv = common.fuzzyInstance(ndata[k],self.sets)
@ -93,18 +94,18 @@ class IntervalFTS(hofts.HighOrderFTS):
for kk in idx: for kk in idx:
flrg = hofts.HighOrderFLRG(self.order) flrg = hofts.HighOrderFLRG(self.order)
flrg.appendLHS(self.sets[ kk ]) flrg.appendLHS(self.sets[ kk ])
flrs.append(flrg) affected_flrgs.append(flrg)
mvs.append(mv[kk]) affected_flrgs_memberships.append(mv[kk])
count = 0 count = 0
for flrg in flrs: for flrg in affected_flrgs:
# achar o os bounds de cada FLRG, ponderados pela pertinência # achar o os bounds de cada FLRG, ponderados pela pertinência
up.append( mvs[count] * self.getUpper(flrg) ) up.append( affected_flrgs_memberships[count] * self.getUpper(flrg) )
lo.append( mvs[count] * self.getLower(flrg) ) lo.append( affected_flrgs_memberships[count] * self.getLower(flrg) )
count = count + 1 count = count + 1
# gerar o intervalo # gerar o intervalo
norm = sum(mvs) norm = sum(affected_flrgs_memberships)
ret.append( [ sum(lo)/norm, sum(up)/norm ] ) ret.append( [ sum(lo)/norm, sum(up)/norm ] )
return ret return ret

View File

@ -28,6 +28,7 @@ class ProbabilisticFLRG(hofts.HighOrderFLRG):
class ProbabilisticIntervalFTS(ifts.IntervalFTS): class ProbabilisticIntervalFTS(ifts.IntervalFTS):
def __init__(self,name): def __init__(self,name):
super(ProbabilisticIntervalFTS, self).__init__("PIFTS") super(ProbabilisticIntervalFTS, self).__init__("PIFTS")
self.shortname = "PIFTS " + name
self.name = "Probabilistic Interval FTS" self.name = "Probabilistic Interval FTS"
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H." self.detail = "Silva, P.; Guimarães, F.; Sadaei, H."
self.flrgs = {} self.flrgs = {}
@ -84,8 +85,8 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
for k in np.arange(self.order,l): for k in np.arange(self.order,l):
flrs = [] affected_flrgs = []
mvs = [] affected_flrgs_memberships = []
norms = [] norms = []
up = [] up = []
@ -118,10 +119,10 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
for kk in path: flrg.appendLHS(self.sets[ kk ]) for kk in path: flrg.appendLHS(self.sets[ kk ])
## ##
flrs.append( flrg ) affected_flrgs.append( flrg )
# Acha a pertinência geral de cada FLRG # Acha a pertinência geral de cada FLRG
mvs.append(min(self.getSequenceMembership(subset, flrg.LHS))) affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS)))
else: else:
mv = common.fuzzyInstance(ndata[k],self.sets) # get all membership values mv = common.fuzzyInstance(ndata[k],self.sets) # get all membership values
@ -130,13 +131,13 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
for kk in idx: for kk in idx:
flrg = hofts.HighOrderFLRG(self.order) flrg = hofts.HighOrderFLRG(self.order)
flrg.appendLHS(self.sets[ kk ]) flrg.appendLHS(self.sets[ kk ])
flrs.append( flrg ) affected_flrgs.append( flrg )
mvs.append(mv[kk]) affected_flrgs_memberships.append(mv[kk])
count = 0 count = 0
for flrg in flrs: for flrg in affected_flrgs:
# achar o os bounds de cada FLRG, ponderados pela pertinência # achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
norm = self.getProbability(flrg) * mvs[count] norm = self.getProbability(flrg) * affected_flrgs_memberships[count]
up.append( norm * self.getUpper(flrg) ) up.append( norm * self.getUpper(flrg) )
lo.append( norm * self.getLower(flrg) ) lo.append( norm * self.getLower(flrg) )
norms.append(norm) norms.append(norm)