Métricas: sharpness,resolution e coverage
This commit is contained in:
parent
4160b7d8dd
commit
3f757fb977
@ -13,37 +13,87 @@ def Teste(par):
|
||||
plt.plot(x,y)
|
||||
|
||||
# Erro quadrático médio
|
||||
def rmse(forecasts,targets):
|
||||
def rmse(targets, forecasts):
|
||||
return np.sqrt(np.nanmean((forecasts-targets)**2))
|
||||
|
||||
# Erro Percentual médio
|
||||
def mape(forecasts,targets):
|
||||
return np.mean(abs(forecasts-targets)/forecasts)
|
||||
def rmse_interval(targets, forecasts):
|
||||
fmean = [np.mean(i) for i in forecasts]
|
||||
return np.sqrt(np.nanmean((fmean-targets)**2))
|
||||
|
||||
def plotComparedSeries(original,fts,parameters):
|
||||
fig = plt.figure(figsize=[20,6])
|
||||
# Erro Percentual médio
|
||||
def mape(targets, forecasts):
|
||||
return np.mean(abs(forecasts-targets)/forecasts)*100
|
||||
|
||||
def mape_interval(targets, forecasts):
|
||||
fmean = [np.mean(i) for i in forecasts]
|
||||
return np.mean(abs(fmean-targets)/fmean)*100
|
||||
|
||||
#Sharpness - Mean size of the intervals
|
||||
def sharpness(forecasts):
|
||||
tmp = [i[1] - i[0] for i in forecasts ]
|
||||
return np.mean(tmp)
|
||||
|
||||
#Resolution - Standard deviation of the intervals
|
||||
def resolution(forecasts):
|
||||
shp = sharpness(forecasts)
|
||||
tmp = [abs((i[1] - i[0]) - shp) for i in forecasts ]
|
||||
return np.mean(tmp)
|
||||
|
||||
# Percent of
|
||||
def coverage(targets,forecasts):
|
||||
preds = []
|
||||
for i in np.arange(0,len(forecasts)):
|
||||
if targets[i] >= forecasts[i][0] and targets[i] <= forecasts[i][1] :
|
||||
preds.append(1)
|
||||
else:
|
||||
preds.append(1)
|
||||
return np.mean(preds)
|
||||
|
||||
def getIntervalStatistics(original,models):
|
||||
ret = "Model & RMSE & MAPE & Sharpness & Resolution & Coverage \\ \n"
|
||||
for fts in models:
|
||||
forecasts = fts.forecast(original)
|
||||
ret = ret + fts.shortname + " & "
|
||||
ret = ret + str( round(rmse_interval(original[fts.order :],forecasts),2)) + " & "
|
||||
ret = ret + str( round(mape_interval(original[fts.order :],forecasts),2)) + " & "
|
||||
ret = ret + str( round(sharpness(forecasts),2)) + " & "
|
||||
ret = ret + str( round(resolution(forecasts),2)) + " & "
|
||||
ret = ret + str( round(coverage(original[fts.order :],forecasts),2)) + " \\ \n"
|
||||
return ret
|
||||
|
||||
def plotComparedSeries(original,models, colors):
|
||||
fig = plt.figure(figsize=[25,10])
|
||||
ax = fig.add_subplot(111)
|
||||
|
||||
mi = []
|
||||
ma = []
|
||||
|
||||
ax.plot(original,color='black',label="Original")
|
||||
count = 0
|
||||
for fts in models:
|
||||
forecasted = fts.forecast(original)
|
||||
#error = rmse(original[1:],forecasted[0:-1])
|
||||
#np.append(original,[None])
|
||||
ax.plot(original,color='b',label="Original")
|
||||
if fts.isInterval:
|
||||
lower = [kk[0] for kk in forecasted]
|
||||
upper = [kk[1] for kk in forecasted]
|
||||
ax.set_ylim([min(lower),max(upper)])
|
||||
mi.append(min(lower))
|
||||
ma.append(max(upper))
|
||||
for k in np.arange(0,fts.order):
|
||||
lower.insert(0,None)
|
||||
upper.insert(0,None)
|
||||
ax.plot(lower,color='r',label="Predicted")
|
||||
ax.plot(upper,color='r')
|
||||
ax.plot(lower,color=colors[count],label=fts.shortname)
|
||||
ax.plot(upper,color=colors[count])
|
||||
|
||||
else:
|
||||
mi.append(min(forecasted))
|
||||
ma.append(max(forecasted))
|
||||
forecasted.insert(0,None)
|
||||
ax.plot(forecasted,color='r',label="Predicted")
|
||||
ax.set_ylim([np.nanmin(forecasted),np.nanmax(forecasted)])
|
||||
ax.plot(forecasted,color=colors[count],label=fts.shortname)
|
||||
|
||||
handles0, labels0 = ax.get_legend_handles_labels()
|
||||
ax.legend(handles0,labels0)
|
||||
ax.set_title(fts.name)
|
||||
count = count + 1
|
||||
#ax.set_title(fts.name)
|
||||
ax.set_ylim([min(mi),max(ma)])
|
||||
ax.set_ylabel('F(T)')
|
||||
ax.set_xlabel('T')
|
||||
ax.set_xlim([0,len(original)])
|
||||
|
2
hofts.py
2
hofts.py
@ -33,7 +33,7 @@ class HighOrderFLRG:
|
||||
|
||||
class HighOrderFTS(fts.FTS):
|
||||
def __init__(self,name):
|
||||
super(HighOrderFTS, self).__init__(1,"HOFTS")
|
||||
super(HighOrderFTS, self).__init__(1,"HOFTS" + name)
|
||||
self.name = "High Order FTS"
|
||||
self.detail = "Chen"
|
||||
self.order = 1
|
||||
|
23
ifts.py
23
ifts.py
@ -3,7 +3,8 @@ from pyFTS import *
|
||||
|
||||
class IntervalFTS(hofts.HighOrderFTS):
|
||||
def __init__(self,name):
|
||||
super(IntervalFTS, self).__init__("IFTS")
|
||||
super(IntervalFTS, self).__init__("IFTS " + name)
|
||||
self.shortname = "IFTS " + name
|
||||
self.name = "Interval FTS"
|
||||
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H."
|
||||
self.flrgs = {}
|
||||
@ -49,8 +50,8 @@ class IntervalFTS(hofts.HighOrderFTS):
|
||||
|
||||
for k in np.arange(self.order,l):
|
||||
|
||||
flrs = []
|
||||
mvs = []
|
||||
affected_flrgs = []
|
||||
affected_flrgs_memberships = []
|
||||
|
||||
up = []
|
||||
lo = []
|
||||
@ -81,10 +82,10 @@ class IntervalFTS(hofts.HighOrderFTS):
|
||||
flrg = hofts.HighOrderFLRG(self.order)
|
||||
for kk in path: flrg.appendLHS(self.sets[ kk ])
|
||||
|
||||
flrs.append(flrg)
|
||||
affected_flrgs.append(flrg)
|
||||
|
||||
# Acha a pertinência geral de cada FLRG
|
||||
mvs.append(min(self.getSequenceMembership(subset, flrg.LHS)))
|
||||
affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS)))
|
||||
else:
|
||||
|
||||
mv = common.fuzzyInstance(ndata[k],self.sets)
|
||||
@ -93,18 +94,18 @@ class IntervalFTS(hofts.HighOrderFTS):
|
||||
for kk in idx:
|
||||
flrg = hofts.HighOrderFLRG(self.order)
|
||||
flrg.appendLHS(self.sets[ kk ])
|
||||
flrs.append(flrg)
|
||||
mvs.append(mv[kk])
|
||||
affected_flrgs.append(flrg)
|
||||
affected_flrgs_memberships.append(mv[kk])
|
||||
|
||||
count = 0
|
||||
for flrg in flrs:
|
||||
for flrg in affected_flrgs:
|
||||
# achar o os bounds de cada FLRG, ponderados pela pertinência
|
||||
up.append( mvs[count] * self.getUpper(flrg) )
|
||||
lo.append( mvs[count] * self.getLower(flrg) )
|
||||
up.append( affected_flrgs_memberships[count] * self.getUpper(flrg) )
|
||||
lo.append( affected_flrgs_memberships[count] * self.getLower(flrg) )
|
||||
count = count + 1
|
||||
|
||||
# gerar o intervalo
|
||||
norm = sum(mvs)
|
||||
norm = sum(affected_flrgs_memberships)
|
||||
ret.append( [ sum(lo)/norm, sum(up)/norm ] )
|
||||
|
||||
return ret
|
||||
|
19
pifts.py
19
pifts.py
@ -28,6 +28,7 @@ class ProbabilisticFLRG(hofts.HighOrderFLRG):
|
||||
class ProbabilisticIntervalFTS(ifts.IntervalFTS):
|
||||
def __init__(self,name):
|
||||
super(ProbabilisticIntervalFTS, self).__init__("PIFTS")
|
||||
self.shortname = "PIFTS " + name
|
||||
self.name = "Probabilistic Interval FTS"
|
||||
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H."
|
||||
self.flrgs = {}
|
||||
@ -84,8 +85,8 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
|
||||
|
||||
for k in np.arange(self.order,l):
|
||||
|
||||
flrs = []
|
||||
mvs = []
|
||||
affected_flrgs = []
|
||||
affected_flrgs_memberships = []
|
||||
norms = []
|
||||
|
||||
up = []
|
||||
@ -118,10 +119,10 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
|
||||
for kk in path: flrg.appendLHS(self.sets[ kk ])
|
||||
|
||||
##
|
||||
flrs.append( flrg )
|
||||
affected_flrgs.append( flrg )
|
||||
|
||||
# Acha a pertinência geral de cada FLRG
|
||||
mvs.append(min(self.getSequenceMembership(subset, flrg.LHS)))
|
||||
affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS)))
|
||||
else:
|
||||
|
||||
mv = common.fuzzyInstance(ndata[k],self.sets) # get all membership values
|
||||
@ -130,13 +131,13 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
|
||||
for kk in idx:
|
||||
flrg = hofts.HighOrderFLRG(self.order)
|
||||
flrg.appendLHS(self.sets[ kk ])
|
||||
flrs.append( flrg )
|
||||
mvs.append(mv[kk])
|
||||
affected_flrgs.append( flrg )
|
||||
affected_flrgs_memberships.append(mv[kk])
|
||||
|
||||
count = 0
|
||||
for flrg in flrs:
|
||||
# achar o os bounds de cada FLRG, ponderados pela pertinência
|
||||
norm = self.getProbability(flrg) * mvs[count]
|
||||
for flrg in affected_flrgs:
|
||||
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
|
||||
norm = self.getProbability(flrg) * affected_flrgs_memberships[count]
|
||||
up.append( norm * self.getUpper(flrg) )
|
||||
lo.append( norm * self.getLower(flrg) )
|
||||
norms.append(norm)
|
||||
|
Loading…
Reference in New Issue
Block a user