Corrections and optimizations in IFTS and PIFTS codes

This commit is contained in:
Petrônio Cândido de Lima e Silva 2016-11-08 14:08:06 -02:00
parent c0342f5684
commit 7c1e79b30d
6 changed files with 123 additions and 26 deletions

View File

@ -1,6 +1,7 @@
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import matplotlib as plt import matplotlib as plt
import matplotlib.colors as pltcolors
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d import Axes3D
from sklearn.cross_validation import KFold from sklearn.cross_validation import KFold
@ -66,7 +67,7 @@ def plotDistribution(dist):
alpha = np.array([dist[x][k] for x in dist])*100 alpha = np.array([dist[x][k] for x in dist])*100
x = [k for x in np.arange(0,len(alpha))] x = [k for x in np.arange(0,len(alpha))]
y = dist.columns y = dist.columns
plt.scatter(x,y,c=alpha,marker='s',linewidths=0,cmap='Reds',edgecolors=None) plt.scatter(x,y,c=alpha,marker='s',linewidths=0,cmap='Oranges',norm=pltcolors.Normalize(vmin=0,vmax=1),vmin=0,vmax=1,edgecolors=None)
def plotComparedSeries(original,models, colors): def plotComparedSeries(original,models, colors):
fig = plt.figure(figsize=[25,10]) fig = plt.figure(figsize=[25,10])
@ -79,6 +80,7 @@ def plotComparedSeries(original,models, colors):
count = 0 count = 0
for fts in models: for fts in models:
forecasted = fts.forecast(original) forecasted = fts.forecast(original)
if fts.isInterval: if fts.isInterval:
lower = [kk[0] for kk in forecasted] lower = [kk[0] for kk in forecasted]
upper = [kk[1] for kk in forecasted] upper = [kk[1] for kk in forecasted]
@ -106,20 +108,28 @@ def plotComparedSeries(original,models, colors):
ax.set_xlim([0,len(original)]) ax.set_xlim([0,len(original)])
def plotComparedIntervalsAhead(original,models, colors, time_from, time_to): def plotComparedIntervalsAhead(original,models, colors, distributions, time_from, time_to):
fig = plt.figure(figsize=[25,10]) fig = plt.figure(figsize=[25,10])
ax = fig.add_subplot(111) ax = fig.add_subplot(111)
mi = [] mi = []
ma = [] ma = []
ax.plot(original,color='black',label="Original")
count = 0 count = 0
for fts in models: for fts in models:
if fts.isDensity and distributions[count]:
density = fts.forecastDistributionAhead(original[:time_from],time_to,25)
for k in density.index:
alpha = np.array([density[x][k] for x in density])*100
x = [time_from + fts.order + k for x in np.arange(0,len(alpha))]
y = density.columns
ax.scatter(x,y,c=alpha,marker='s',linewidths=0,cmap='Oranges',
norm=pltcolors.Normalize(vmin=0,vmax=1),vmin=0,vmax=1,edgecolors=None)
if fts.isInterval: if fts.isInterval:
forecasted = fts.forecastAhead(original[:time_from],time_to) forecasts = fts.forecastAhead(original[:time_from],time_to)
lower = [kk[0] for kk in forecasted] lower = [kk[0] for kk in forecasts]
upper = [kk[1] for kk in forecasted] upper = [kk[1] for kk in forecasts]
mi.append(min(lower)) mi.append(min(lower))
ma.append(max(upper)) ma.append(max(upper))
for k in np.arange(0,time_from): for k in np.arange(0,time_from):
@ -129,15 +139,17 @@ def plotComparedIntervalsAhead(original,models, colors, time_from, time_to):
ax.plot(upper,color=colors[count]) ax.plot(upper,color=colors[count])
else: else:
forecasted = fts.forecast(original) forecasts = fts.forecast(original)
mi.append(min(forecasted)) mi.append(min(forecasts))
ma.append(max(forecasted)) ma.append(max(forecasts))
forecasted.insert(0,None) for k in np.arange(0,time_from):
ax.plot(forecasted,color=colors[count],label=fts.shortname) forecasts.insert(0,None)
ax.plot(forecasts,color=colors[count],label=fts.shortname)
handles0, labels0 = ax.get_legend_handles_labels() handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0,labels0) ax.legend(handles0,labels0)
count = count + 1 count = count + 1
ax.plot(original,color='black',label="Original")
#ax.set_title(fts.name) #ax.set_title(fts.name)
ax.set_ylim([min(mi),max(ma)]) ax.set_ylim([min(mi),max(ma)])
ax.set_ylabel('F(T)') ax.set_ylabel('F(T)')

View File

@ -8,12 +8,13 @@ def differential(original):
return np.array(diff) return np.array(diff)
def trimf(x,parameters): def trimf(x,parameters):
if(x < parameters[0]): xx = round(x,3)
if(xx < parameters[0]):
return 0 return 0
elif(x >= parameters[0] and x < parameters[1]): elif(xx >= parameters[0] and xx < parameters[1]):
return (x-parameters[0])/(parameters[1]-parameters[0]) return (x-parameters[0])/(parameters[1]-parameters[0])
elif(x >= parameters[1] and x <= parameters[2]): elif(xx >= parameters[1] and xx <= parameters[2]):
return (parameters[2]-x)/(parameters[2]-parameters[1]) return (parameters[2]-xx)/(parameters[2]-parameters[1])
else: else:
return 0 return 0

1
fts.py
View File

@ -11,6 +11,7 @@ class FTS:
self.detail = name self.detail = name
self.isSeasonal = False self.isSeasonal = False
self.isInterval = False self.isInterval = False
self.isDensity = False
def fuzzy(self,data): def fuzzy(self,data):
best = {"fuzzyset":"", "membership":0.0} best = {"fuzzyset":"", "membership":0.0}

View File

@ -27,6 +27,8 @@ class IntervalFTS(hofts.HighOrderFTS):
return ret return ret
def getSequenceMembership(self, data, fuzzySets): def getSequenceMembership(self, data, fuzzySets):
#print(data)
#print(fuzzySets)
mb = [ fuzzySets[k].membership( data[k] ) for k in np.arange(0,len(data)) ] mb = [ fuzzySets[k].membership( data[k] ) for k in np.arange(0,len(data)) ]
return mb return mb

View File

@ -1,4 +1,5 @@
import numpy as np import numpy as np
import math
from pyFTS import * from pyFTS import *
#print(common.__dict__) #print(common.__dict__)
@ -10,10 +11,10 @@ def GridPartitionerTrimf(data,npart,names = None,prefix = "A"):
dmin = min(data) dmin = min(data)
dmin = dmin - dmin*0.10 dmin = dmin - dmin*0.10
dlen = dmax - dmin dlen = dmax - dmin
partlen = dlen / npart partlen = math.ceil(dlen / npart)
partition = dmin partition = math.ceil(dmin)
for c in range(npart): for c in range(npart):
sets.append(common.FuzzySet(prefix+str(c),common.trimf,[partition-partlen, partition, partition+partlen], partition ) ) sets.append(common.FuzzySet(prefix+str(c),common.trimf,[round(partition-partlen,3), partition, partition+partlen], partition ) )
partition = partition + partlen partition = partition + partlen
return sets return sets

View File

@ -1,5 +1,6 @@
import numpy as np import numpy as np
import pandas as pd import pandas as pd
import math
from pyFTS import * from pyFTS import *
class ProbabilisticFLRG(hofts.HighOrderFLRG): class ProbabilisticFLRG(hofts.HighOrderFLRG):
@ -35,6 +36,7 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
self.flrgs = {} self.flrgs = {}
self.globalFrequency = 0 self.globalFrequency = 0
self.isInterval = True self.isInterval = True
self.isDensity = True
def generateFLRG(self, flrs): def generateFLRG(self, flrs):
flrgs = {} flrgs = {}
@ -58,14 +60,16 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
if flrg.strLHS() in self.flrgs: if flrg.strLHS() in self.flrgs:
return self.flrgs[ flrg.strLHS() ].frequencyCount / self.globalFrequency return self.flrgs[ flrg.strLHS() ].frequencyCount / self.globalFrequency
else: else:
return 1/ self.globalFrequency return 1.0 / self.globalFrequency
def getUpper(self,flrg): def getUpper(self,flrg):
if flrg.strLHS() in self.flrgs: if flrg.strLHS() in self.flrgs:
tmp = self.flrgs[ flrg.strLHS() ] tmp = self.flrgs[ flrg.strLHS() ]
ret = sum(np.array([ tmp.getProbability(s) * self.setsDict[s].upper for s in tmp.RHS])) ret = sum(np.array([ tmp.getProbability(s) * self.setsDict[s].upper for s in tmp.RHS]))
else: else:
ret = flrg.LHS[-1].upper #print("hit" + flrg.strLHS())
#ret = flrg.LHS[-1].upper
ret = sum(np.array([ 0.33 * s.upper for s in flrg.LHS]))
return ret return ret
def getLower(self,flrg): def getLower(self,flrg):
@ -73,7 +77,9 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
tmp = self.flrgs[ flrg.strLHS() ] tmp = self.flrgs[ flrg.strLHS() ]
ret = sum(np.array([ tmp.getProbability(s) * self.setsDict[s].lower for s in tmp.RHS])) ret = sum(np.array([ tmp.getProbability(s) * self.setsDict[s].lower for s in tmp.RHS]))
else: else:
ret = flrg.LHS[-1].lower #print("hit" + flrg.strLHS())
#ret = flrg.LHS[-1].lower
ret = sum(np.array([ 0.33 * s.lower for s in flrg.LHS]))
return ret return ret
def forecast(self,data): def forecast(self,data):
@ -88,6 +94,8 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
for k in np.arange(self.order-1,l): for k in np.arange(self.order-1,l):
#print(k)
affected_flrgs = [] affected_flrgs = []
affected_flrgs_memberships = [] affected_flrgs_memberships = []
norms = [] norms = []
@ -107,15 +115,18 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
idx = np.ravel(tmp) #flatten the array idx = np.ravel(tmp) #flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
if instance <= self.sets[0].lower: #print("high order - idx.size == 0 - " + str(instance))
if math.ceil(instance) <= self.sets[0].lower:
idx = [0] idx = [0]
if instance >= self.sets[-1].upper: elif math.ceil(instance) >= self.sets[-1].upper:
idx = [len(self.sets)-1] idx = [len(self.sets)-1]
#print(idx)
else:
raise Exception( instance )
#print(idx)
lags[count] = idx lags[count] = idx
count = count + 1 count = count + 1
# Build the tree with all possible paths # Build the tree with all possible paths
root = tree.FLRGTreeNode(None) root = tree.FLRGTreeNode(None)
@ -129,26 +140,43 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
flrg = hofts.HighOrderFLRG(self.order) flrg = hofts.HighOrderFLRG(self.order)
for kk in path: flrg.appendLHS(self.sets[ kk ]) for kk in path: flrg.appendLHS(self.sets[ kk ])
assert len(flrg.LHS) == subset.size, str(subset) + " -> " + str([s.name for s in flrg.LHS])
## ##
affected_flrgs.append( flrg ) affected_flrgs.append( flrg )
# Find the general membership of FLRG # Find the general membership of FLRG
affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS))) affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS)))
#print(self.getSequenceMembership(subset, flrg.LHS))
else: else:
mv = common.fuzzyInstance(ndata[k],self.sets) # get all membership values mv = common.fuzzyInstance(ndata[k],self.sets) # get all membership values
tmp = np.argwhere( mv ) # get the indices of values > 0 tmp = np.argwhere( mv ) # get the indices of values > 0
idx = np.ravel(tmp) # flatten the array idx = np.ravel(tmp) # flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
#print("idx.size == 0")
if math.ceil(ndata[k]) <= self.sets[0].lower:
idx = [0]
elif math.ceil(ndata[k]) >= self.sets[-1].upper:
idx = [len(self.sets)-1]
#print(idx)
else:
raise Exception( ndata[k] )
#print(idx)
for kk in idx: for kk in idx:
flrg = hofts.HighOrderFLRG(self.order) flrg = hofts.HighOrderFLRG(self.order)
flrg.appendLHS(self.sets[ kk ]) flrg.appendLHS(self.sets[ kk ])
affected_flrgs.append( flrg ) affected_flrgs.append( flrg )
#print(mv[kk])
affected_flrgs_memberships.append(mv[kk]) affected_flrgs_memberships.append(mv[kk])
count = 0 count = 0
for flrg in affected_flrgs: for flrg in affected_flrgs:
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência # achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
norm = self.getProbability(flrg) * affected_flrgs_memberships[count] norm = self.getProbability(flrg) * affected_flrgs_memberships[count]
if norm == 0:
norm = self.getProbability(flrg) # * 0.001
up.append( norm * self.getUpper(flrg) ) up.append( norm * self.getUpper(flrg) )
lo.append( norm * self.getLower(flrg) ) lo.append( norm * self.getLower(flrg) )
norms.append(norm) norms.append(norm)
@ -158,6 +186,7 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
norm = sum(norms) norm = sum(norms)
if norm == 0: if norm == 0:
ret.append( [ 0, 0 ] ) ret.append( [ 0, 0 ] )
print("disparou")
else: else:
ret.append( [ sum(lo)/norm, sum(up)/norm ] ) ret.append( [ sum(lo)/norm, sum(up)/norm ] )
@ -165,12 +194,16 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
def forecastAhead(self,data,steps): def forecastAhead(self,data,steps):
ret = [[data[k],data[k]] for k in np.arange(len(data)-self.order,len(data))] ret = [[data[k],data[k]] for k in np.arange(len(data)-self.order,len(data))]
for k in np.arange(self.order,steps): #print(ret)
for k in np.arange(self.order-1,steps):
if ret[-1][0] <= self.sets[0].lower and ret[-1][1] >= self.sets[-1].upper: if ret[-1][0] <= self.sets[0].lower and ret[-1][1] >= self.sets[-1].upper:
ret.append(ret[-1]) ret.append(ret[-1])
#print("disparou")
else: else:
lower = self.forecast( [ret[x][0] for x in np.arange(k-self.order,k)] ) lower = self.forecast( [ret[x][0] for x in np.arange(k-self.order,k)] )
upper = self.forecast( [ret[x][1] for x in np.arange(k-self.order,k)] ) upper = self.forecast( [ret[x][1] for x in np.arange(k-self.order,k)] )
ret.append([np.min(lower),np.max(upper)]) ret.append([np.min(lower),np.max(upper)])
return ret return ret
@ -188,6 +221,53 @@ class ProbabilisticIntervalFTS(ifts.IntervalFTS):
grid[sbin] = grid[sbin] + 1 grid[sbin] = grid[sbin] + 1
return grid return grid
def forecastDistributionAhead2(self,data,steps,resolution):
ret = []
intervals = self.forecastAhead(data,steps)
for k in np.arange(self.order,steps):
grid = self.getGridClean(resolution)
grid = self.gridCount(grid,resolution, intervals[k])
lags = {}
cc = 0
for x in np.arange(k-self.order,k):
tmp = []
for qt in np.arange(0,100,5):
tmp.append(intervals[x][0] + qt*(intervals[x][1]-intervals[x][0])/100)
tmp.append(intervals[x][1] - qt*(intervals[x][1]-intervals[x][0])/100)
tmp.append(intervals[x][0] + (intervals[x][1]-intervals[x][0])/2)
lags[cc] = tmp
cc = cc + 1
# Build the tree with all possible paths
root = tree.FLRGTreeNode(None)
self.buildTree(root,lags,0)
# Trace the possible paths and build the PFLRG's
for p in root.paths():
path = list(reversed(list(filter(None.__ne__, p))))
subset = [kk for kk in path]
qtle = self.forecast(subset)
grid = self.gridCount(grid,resolution, np.ravel(qtle))
tmp = np.array([ grid[k] for k in sorted(grid) ])
ret.append( tmp/sum(tmp) )
grid = self.getGridClean(resolution)
df = pd.DataFrame(ret, columns=sorted(grid))
return df
def forecastDistributionAhead(self,data,steps,resolution): def forecastDistributionAhead(self,data,steps,resolution):
ret = [] ret = []