Otimizações nos Benchmarks e correções de erros em PFTS

This commit is contained in:
Petrônio Cândido de Lima e Silva 2017-01-12 19:25:10 -02:00
parent ba1b4fbae6
commit 53029681d8
2 changed files with 74 additions and 28 deletions

View File

@ -32,8 +32,8 @@ def plotDistribution(dist):
vmin=0, vmax=1, edgecolors=None) vmin=0, vmax=1, edgecolors=None)
def plotComparedSeries(original, models, colors): def plotComparedSeries(original, models, colors, typeonlegend=False, save=False, file=None,tam=[20, 5]):
fig = plt.figure(figsize=[15, 5]) fig = plt.figure(figsize=tam)
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
mi = [] mi = []
@ -48,7 +48,9 @@ def plotComparedSeries(original, models, colors):
ma.append(max(forecasted)) ma.append(max(forecasted))
for k in np.arange(0, fts.order): for k in np.arange(0, fts.order):
forecasted.insert(0, None) forecasted.insert(0, None)
ax.plot(forecasted, color=colors[count], label=fts.shortname, ls="-") lbl = fts.shortname
if typeonlegend: lbl += " (Point)"
ax.plot(forecasted, color=colors[count], label=lbl, ls="-")
if fts.hasIntervalForecasting: if fts.hasIntervalForecasting:
forecasted = fts.forecastInterval(original) forecasted = fts.forecastInterval(original)
@ -59,7 +61,9 @@ def plotComparedSeries(original, models, colors):
for k in np.arange(0, fts.order): for k in np.arange(0, fts.order):
lower.insert(0, None) lower.insert(0, None)
upper.insert(0, None) upper.insert(0, None)
ax.plot(lower, color=colors[count], label=fts.shortname,ls="--") lbl = fts.shortname
if typeonlegend: lbl += " (Interval)"
ax.plot(lower, color=colors[count], label=lbl,ls="--")
ax.plot(upper, color=colors[count],ls="--") ax.plot(upper, color=colors[count],ls="--")
handles0, labels0 = ax.get_legend_handles_labels() handles0, labels0 = ax.get_legend_handles_labels()
@ -71,32 +75,58 @@ def plotComparedSeries(original, models, colors):
ax.set_xlabel('T') ax.set_xlabel('T')
ax.set_xlim([0, len(original)]) ax.set_xlim([0, len(original)])
if save:
fig.savefig(file)
plt.close(fig)
def plotComparedIntervalsAhead(original, models, colors, distributions, time_from, time_to):
fig = plt.figure(figsize=[25, 10]) def plotComparedIntervalsAhead(original, models, colors, distributions, time_from, time_to, interpol=False, save=False, file=None,tam=[20, 5]):
fig = plt.figure(figsize=tam)
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
percentile = (max(original) - min(original))/100
mi = [] mi = []
ma = [] ma = []
count = 0 count = 0
for fts in models: for fts in models:
if fts.hasDistributionForecasting and distributions[count]: if fts.hasDistributionForecasting and distributions[count]:
density = fts.forecastDistributionAhead(original[:time_from], time_to, 25) density = fts.forecastAheadDistribution(original[time_from - fts.order:time_from], time_to, percentile)
y = density.columns
t = len(y)
# interpol between time_from and time_from+1
if interpol:
diffs = [density[q][0] / 50 for q in density]
for p in np.arange(0, 50):
xx = [(time_from - 1) + 0.02 * p for q in np.arange(0, t)]
alpha2 = np.array([diffs[q] * p for q in np.arange(0, t)]) * 100
ax.scatter(xx, y, c=alpha2, marker='s', linewidths=0, cmap='Oranges',
norm=pltcolors.Normalize(vmin=0, vmax=1), vmin=0, vmax=1, edgecolors=None)
for k in density.index: for k in density.index:
alpha = np.array([density[x][k] for x in density]) * 100 alpha = np.array([density[q][k] for q in density]) * 100
x = [time_from + fts.order + k for x in np.arange(0, len(alpha))]
y = density.columns x = [time_from + k for x in np.arange(0, t)]
ax.scatter(x, y, c=alpha, marker='s', linewidths=0, cmap='Oranges', ax.scatter(x, y, c=alpha, marker='s', linewidths=0, cmap='Oranges',
norm=pltcolors.Normalize(vmin=0, vmax=1), vmin=0, vmax=1, edgecolors=None) norm=pltcolors.Normalize(vmin=0, vmax=1), vmin=0, vmax=1, edgecolors=None)
if interpol and k < max(density.index):
diffs = [(density[q][k + 1] - density[q][k])/50 for q in density]
for p in np.arange(0,50):
xx = [time_from + k + 0.02*p for q in np.arange(0, t)]
alpha2 = np.array([density[density.columns[q]][k] + diffs[q]*p for q in np.arange(0, t)]) * 100
ax.scatter(xx, y, c=alpha2, marker='s', linewidths=0, cmap='Oranges',
norm=pltcolors.Normalize(vmin=0, vmax=1), vmin=0, vmax=1, edgecolors=None)
if fts.hasIntervalForecasting: if fts.hasIntervalForecasting:
forecasts = fts.forecastAhead(original[:time_from], time_to) forecasts = fts.forecastAheadInterval(original[time_from - fts.order:time_from], time_to)
lower = [kk[0] for kk in forecasts] lower = [kk[0] for kk in forecasts]
upper = [kk[1] for kk in forecasts] upper = [kk[1] for kk in forecasts]
mi.append(min(lower)) mi.append(min(lower))
ma.append(max(upper)) ma.append(max(upper))
for k in np.arange(0, time_from): for k in np.arange(0, time_from-fts.order):
lower.insert(0, None) lower.insert(0, None)
upper.insert(0, None) upper.insert(0, None)
ax.plot(lower, color=colors[count], label=fts.shortname) ax.plot(lower, color=colors[count], label=fts.shortname)
@ -110,16 +140,20 @@ def plotComparedIntervalsAhead(original, models, colors, distributions, time_fro
forecasts.insert(0, None) forecasts.insert(0, None)
ax.plot(forecasts, color=colors[count], label=fts.shortname) ax.plot(forecasts, color=colors[count], label=fts.shortname)
handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0, labels0)
count = count + 1 count = count + 1
ax.plot(original, color='black', label="Original") ax.plot(original, color='black', label="Original")
handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0, labels0, loc=2)
# ax.set_title(fts.name) # ax.set_title(fts.name)
ax.set_ylim([min(mi), max(ma)]) ax.set_ylim([min(mi), max(ma)])
ax.set_ylabel('F(T)') ax.set_ylabel('F(T)')
ax.set_xlabel('T') ax.set_xlabel('T')
ax.set_xlim([0, len(original)]) ax.set_xlim([0, len(original)])
if save:
fig.savefig(file)
plt.close(fig)
def plotCompared(original, forecasts, labels, title): def plotCompared(original, forecasts, labels, title):
fig = plt.figure(figsize=[13, 6]) fig = plt.figure(figsize=[13, 6])

40
pfts.py
View File

@ -12,9 +12,9 @@ class ProbabilisticFLRG(hofts.HighOrderFLRG):
self.frequencyCount = 0.0 self.frequencyCount = 0.0
def appendRHS(self, c): def appendRHS(self, c):
self.frequencyCount += 1 self.frequencyCount += 1.0
if c.name in self.RHS: if c.name in self.RHS:
self.RHS[c.name] += 1 self.RHS[c.name] += 1.0
else: else:
self.RHS[c.name] = 1.0 self.RHS[c.name] = 1.0
@ -26,7 +26,7 @@ class ProbabilisticFLRG(hofts.HighOrderFLRG):
for c in sorted(self.RHS): for c in sorted(self.RHS):
if len(tmp2) > 0: if len(tmp2) > 0:
tmp2 = tmp2 + ", " tmp2 = tmp2 + ", "
tmp2 = tmp2 + c + "(" + str(round(self.RHS[c] / self.frequencyCount, 3)) + ")" tmp2 = tmp2 + "(" + str(round(self.RHS[c] / self.frequencyCount, 3)) + ")" + c
return self.strLHS() + " -> " + tmp2 return self.strLHS() + " -> " + tmp2
@ -60,21 +60,31 @@ class ProbabilisticFTS(ifts.IntervalFTS):
flrgs[flrg.strLHS()].appendRHS(flrs[k-1].RHS) flrgs[flrg.strLHS()].appendRHS(flrs[k-1].RHS)
if self.dump: print("RHS: " + str(flrs[k-1])) if self.dump: print("RHS: " + str(flrs[k-1]))
self.globalFrequency = self.globalFrequency + 1 self.globalFrequency += 1
return (flrgs) return (flrgs)
def addNewPFLGR(self,flrg):
if flrg.strLHS() not in self.flrgs:
tmp = ProbabilisticFLRG(self.order)
for fs in flrg.LHS: tmp.appendLHS(fs)
tmp.appendRHS(flrg.LHS[-1])
self.flrgs[tmp.strLHS()] = tmp;
self.globalFrequency += 1
def getProbability(self, flrg): def getProbability(self, flrg):
if flrg.strLHS() in self.flrgs: if flrg.strLHS() in self.flrgs:
return self.flrgs[flrg.strLHS()].frequencyCount / self.globalFrequency return self.flrgs[flrg.strLHS()].frequencyCount / self.globalFrequency
else: else:
return 1.0 / self.globalFrequency self.addNewPFLGR(flrg)
return self.getProbability(flrg)
def getMidpoints(self, flrg): def getMidpoints(self, flrg):
if flrg.strLHS() in self.flrgs: if flrg.strLHS() in self.flrgs:
tmp = self.flrgs[flrg.strLHS()] tmp = self.flrgs[flrg.strLHS()]
ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].centroid for s in tmp.RHS])) ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].centroid for s in tmp.RHS]))
else: else:
ret = sum(np.array([0.33 * s.centroid for s in flrg.LHS])) pi = 1 / len(flrg.LHS)
ret = sum(np.array([pi * s.centroid for s in flrg.LHS]))
return ret return ret
def getUpper(self, flrg): def getUpper(self, flrg):
@ -82,7 +92,8 @@ class ProbabilisticFTS(ifts.IntervalFTS):
tmp = self.flrgs[flrg.strLHS()] tmp = self.flrgs[flrg.strLHS()]
ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].upper for s in tmp.RHS])) ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].upper for s in tmp.RHS]))
else: else:
ret = sum(np.array([0.33 * s.upper for s in flrg.LHS])) pi = 1 / len(flrg.LHS)
ret = sum(np.array([pi * s.upper for s in flrg.LHS]))
return ret return ret
def getLower(self, flrg): def getLower(self, flrg):
@ -90,7 +101,8 @@ class ProbabilisticFTS(ifts.IntervalFTS):
tmp = self.flrgs[flrg.strLHS()] tmp = self.flrgs[flrg.strLHS()]
ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].lower for s in tmp.RHS])) ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].lower for s in tmp.RHS]))
else: else:
ret = sum(np.array([0.33 * s.lower for s in flrg.LHS])) pi = 1 / len(flrg.LHS)
ret = sum(np.array([pi * s.lower for s in flrg.LHS]))
return ret return ret
def forecast(self, data): def forecast(self, data):
@ -224,9 +236,9 @@ class ProbabilisticFTS(ifts.IntervalFTS):
idx = np.ravel(tmp) # flatten the array idx = np.ravel(tmp) # flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
if math.ceil(instance) <= self.sets[0].lower: if instance <= self.sets[0].lower:
idx = [0] idx = [0]
elif math.ceil(instance) >= self.sets[-1].upper: elif instance >= self.sets[-1].upper:
idx = [len(self.sets) - 1] idx = [len(self.sets) - 1]
else: else:
raise Exception(instance) raise Exception(instance)
@ -262,9 +274,9 @@ class ProbabilisticFTS(ifts.IntervalFTS):
idx = np.ravel(tmp) # flatten the array idx = np.ravel(tmp) # flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
if math.ceil(ndata[k]) <= self.sets[0].lower: if ndata[k] <= self.sets[0].lower:
idx = [0] idx = [0]
elif math.ceil(ndata[k]) >= self.sets[-1].upper: elif ndata[k] >= self.sets[-1].upper:
idx = [len(self.sets) - 1] idx = [len(self.sets) - 1]
else: else:
raise Exception(ndata[k]) raise Exception(ndata[k])
@ -312,7 +324,7 @@ class ProbabilisticFTS(ifts.IntervalFTS):
def forecastAheadInterval(self, data, steps): def forecastAheadInterval(self, data, steps):
ret = [[data[k], data[k]] for k in np.arange(len(data) - self.order, len(data))] ret = [[data[k], data[k]] for k in np.arange(len(data) - self.order, len(data))]
for k in np.arange(self.order - 1, steps): for k in np.arange(self.order, steps+self.order):
if ret[-1][0] <= self.sets[0].lower and ret[-1][1] >= self.sets[-1].upper: if ret[-1][0] <= self.sets[0].lower and ret[-1][1] >= self.sets[-1].upper:
ret.append(ret[-1]) ret.append(ret[-1])
@ -390,7 +402,7 @@ class ProbabilisticFTS(ifts.IntervalFTS):
intervals = self.forecastAheadInterval(data, steps) intervals = self.forecastAheadInterval(data, steps)
for k in np.arange(self.order, steps): for k in np.arange(self.order, steps+self.order):
grid = self.getGridClean(resolution) grid = self.getGridClean(resolution)
grid = self.gridCount(grid, resolution, intervals[k]) grid = self.gridCount(grid, resolution, intervals[k])