Refactorings

This commit is contained in:
Petrônio Cândido de Lima e Silva 2017-02-27 15:53:29 -03:00
parent 84e6e1abbf
commit 7f13a24402
11 changed files with 637 additions and 402 deletions

View File

@ -2,7 +2,6 @@
import numpy as np
import pandas as pd
from pyFTS.common import FuzzySet,SortedCollection
# Autocorrelation function estimative
@ -33,6 +32,7 @@ def mape(targets, forecasts):
def smape(targets, forecasts, type=2):
return mape(targets, forecasts)
if type == 1:
return np.mean(np.abs(forecasts - targets) / ((forecasts + targets)/2))
elif type == 2:
@ -52,8 +52,10 @@ def UStatistic(targets, forecasts):
naive = []
y = []
for k in np.arange(0,l-1):
y.append((forecasts[k ] - targets[k ]) ** 2)
naive.append((targets[k + 1] - targets[k]) ** 2)
#y.append((forecasts[k ] - targets[k ]) ** 2)
y.append(((forecasts[k + 1] - targets[k + 1]) / targets[k]) ** 2)
#naive.append((targets[k + 1] - targets[k]) ** 2)
naive.append(((targets[k + 1] - targets[k]) / targets[k]) ** 2)
return np.sqrt(sum(y) / sum(naive))
@ -109,39 +111,3 @@ def coverage(targets, forecasts):
else:
preds.append(0)
return np.mean(preds)
def pmf_to_cdf(density):
ret = []
for row in density.index:
tmp = []
prev = 0
for col in density.columns:
prev += density[col][row]
tmp.append( prev )
ret.append(tmp)
df = pd.DataFrame(ret, columns=density.columns)
return df
def heavyside_cdf(bins, targets):
ret = []
for t in targets:
result = [1 if b >= t else 0 for b in bins]
ret.append(result)
df = pd.DataFrame(ret, columns=bins)
return df
# Continuous Ranked Probability Score
def crps(targets, densities):
l = len(densities.columns)
n = len(densities.index)
Ff = pmf_to_cdf(densities)
Fa = heavyside_cdf(densities.columns, targets)
_crps = float(0.0)
for k in densities.index:
_crps += sum([ (Ff[col][k]-Fa[col][k])**2 for col in densities.columns])
return _crps / float(l * n)

View File

@ -5,14 +5,13 @@ import numpy as np
import pandas as pd
import matplotlib as plt
import matplotlib.colors as pltcolors
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# from sklearn.cross_validation import KFold
from pyFTS.benchmarks import Measures, naive, ResidualAnalysis, ProbabilityDistribution
from pyFTS.benchmarks import Measures, naive, ResidualAnalysis
from pyFTS.partitioners import Grid
from pyFTS.common import Membership, FuzzySet, FLR, Transformations, Util
from pyFTS import fts, chen, yu, ismailefendi, sadaei, hofts, hwang, pwfts, ifts
from pyFTS import fts, chen, yu, ismailefendi, sadaei, hofts, hwang, pfts, ifts
colors = ['grey', 'rosybrown', 'maroon', 'red','orange', 'yellow', 'olive', 'green',
'cyan', 'blue', 'darkblue', 'purple', 'darkviolet']
@ -23,113 +22,21 @@ styles = ['-','--','-.',':','.']
nsty = len(styles)
def get_point_methods():
return [naive.Naive, chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,
sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, pwfts.ProbabilisticWeightedFTS]
def sliding_window(data, windowsize, train=0.8,models=None,partitioners=[Grid.GridPartitioner],
partitions=[10], max_order=3,transformation=None,indexer=None):
if models is None:
models = get_point_methods()
objs = {}
lcolors = {}
rmse = {}
smape = {}
u = {}
for train,test in Util.sliding_window(data, windowsize, train):
for partition in partitions:
for partitioner in partitioners:
pttr = str(partitioner.__module__).split('.')[-1]
if transformation is not None:
data_train_fs = partitioner(transformation.apply(train), partition)
else:
data_train_fs = partitioner(train, partition)
for count, model in enumerate(models, start=0):
mfts = model("")
_key = mfts.shortname + " " + pttr+ " q = " +str(partition)
mfts.partitioner = data_train_fs
if not mfts.isHighOrder:
if _key not in objs:
objs[_key] = mfts
lcolors[_key] = colors[count % ncol]
rmse[_key] = []
smape[_key] = []
u[_key] = []
if transformation is not None:
mfts.appendTransformation(transformation)
mfts.train(train, data_train_fs.sets)
_rmse, _smape, _u = get_point_statistics(test, mfts, indexer)
rmse[_key].append(_rmse)
smape[_key].append(_smape)
u[_key].append(_u)
else:
for order in np.arange(1, max_order + 1):
if order >= mfts.minOrder:
mfts = model("")
_key = mfts.shortname + " n = " + str(order) + " " + pttr + " q = " + str(partition)
mfts.partitioner = data_train_fs
if _key not in objs:
objs[_key] = mfts
lcolors[_key] = colors[count % ncol]
rmse[_key] = []
smape[_key] = []
u[_key] = []
if transformation is not None:
mfts.appendTransformation(transformation)
mfts.train(train, data_train_fs.sets, order=order)
_rmse, _smape, _u = get_point_statistics(test, mfts, indexer)
rmse[_key].append(_rmse)
smape[_key].append(_smape)
u[_key].append(_u)
ret = "Model\t&Order\t&Scheme\t&Partitions\t&RMSE AVG\t&RMSE STD\t& SMAPE AVG\t& SMAPE STD\t& U AVG\t& U STD \\\\ \n"
for k in sorted(objs.keys()):
mfts = objs[k]
ret += mfts.shortname + "\t &"
ret += str( mfts.order ) + "\t &"
ret += mfts.partitioner.name + "\t &"
ret += str(mfts.partitioner.partitions) + "\t &"
ret += str(round(np.mean(rmse[k]),2)) + "\t &"
ret += str(round(np.std(rmse[k]), 2)) + "\t &"
ret += str(round(np.mean(smape[k]), 2)) + "\t &"
ret += str(round(np.std(smape[k]), 2)) + "\t &"
ret += str(round(np.mean(u[k]), 2)) + "\t &"
ret += str(round(np.std(u[k]), 2)) + "\t &"
ret += str(len(mfts)) + "\\\\ \n"
print(ret)
def allPointForecasters(data_train, data_test, partitions, max_order=3, statistics=True, residuals=True,
series=True, save=False, file=None, tam=[20, 5], models=None, transformation=None,
distributions=False):
series=True, save=False, file=None, tam=[20, 5], models=None, transformation=None):
if models is None:
models = get_point_methods()
models = [naive.Naive, chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,
sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, pfts.ProbabilisticFTS]
objs = []
if transformation is not None:
data_train_fs = Grid.GridPartitionerTrimf(transformation.apply(data_train),partitions)
else:
data_train_fs = Grid.GridPartitionerTrimf(data_train, partitions)
data_train_fs = Grid.GridPartitioner(data_train, partitions, transformation=transformation).sets
# if transformation is not None:
# data_train_fs = Grid.GridPartitionerTrimf(transformation.apply(data_train),partitions)
# else:
# data_train_fs = Grid.GridPartitionerTrimf(data_train, partitions)
count = 1
@ -152,10 +59,10 @@ def allPointForecasters(data_train, data_test, partitions, max_order=3, statisti
mfts.appendTransformation(transformation)
mfts.train(data_train, data_train_fs, order=order)
objs.append(mfts)
lcolors.append(colors[(count + order) % ncol])
lcolors.append(colors[count % ncol])
if statistics:
print(print_point_statistics(data_test, objs))
print(getPointStatistics(data_test, objs))
if residuals:
print(ResidualAnalysis.compareResiduals(data_test, objs))
@ -165,53 +72,16 @@ def allPointForecasters(data_train, data_test, partitions, max_order=3, statisti
plotComparedSeries(data_test, objs, lcolors, typeonlegend=False, save=save, file=file, tam=tam,
intervals=False)
if distributions:
lcolors.insert(0,'black')
pmfs = []
pmfs.append(
ProbabilityDistribution.ProbabilityDistribution("Original", 100, [min(data_test), max(data_test)], data=data_test) )
for m in objs:
forecasts = m.forecast(data_test)
pmfs.append(
ProbabilityDistribution.ProbabilityDistribution(m.shortname, 100, [min(data_test), max(data_test)],
data=forecasts))
print(getProbabilityDistributionStatistics(pmfs,data_test))
plotProbabilityDistributions(pmfs, lcolors,tam=tam)
def get_point_statistics(data, model, indexer=None):
if indexer is not None:
ndata = np.array(indexer.get_data(data[model.order:]))
else:
ndata = np.array(data[model.order:])
if model.isMultivariate or indexer is None:
forecasts = model.forecast(data)
elif not model.isMultivariate and indexer is not None:
forecasts = model.forecast(indexer.get_data(data))
if model.hasSeasonality:
nforecasts = np.array(forecasts)
else:
nforecasts = np.array(forecasts[:-1])
ret = list()
ret.append(np.round(Measures.rmse(ndata, nforecasts), 2))
ret.append(np.round(Measures.smape(ndata, nforecasts), 2))
ret.append(np.round(Measures.UStatistic(ndata, nforecasts), 2))
return ret
def print_point_statistics(data, models, externalmodels = None, externalforecasts = None, indexers=None):
ret = "Model & Order & RMSE & SMAPE & Theil's U \\\\ \n"
for count,model in enumerate(models,start=0):
_rmse, _smape, _u = get_point_statistics(data, model, indexers[count])
ret += model.shortname + " & "
ret += str(model.order) + " & "
ret += str(_rmse) + " & "
ret += str(_smape)+ " & "
ret += str(_u)
def getPointStatistics(data, models, externalmodels = None, externalforecasts = None):
ret = "Model & Order & RMSE & MAPE & Theil's U \\\\ \n"
for fts in models:
forecasts = fts.forecast(data)
ret += fts.shortname + " & "
ret += str(fts.order) + " & "
ret += str(round(Measures.rmse(np.array(data[fts.order:]), np.array(forecasts[:-1])), 2)) + " & "
ret += str(round(Measures.smape(np.array(data[fts.order:]), np.array(forecasts[:-1])), 2))+ " & "
ret += str(round(Measures.UStatistic(np.array(data[fts.order:]), np.array(forecasts[:-1])), 2))
#ret += str(round(Measures.TheilsInequality(np.array(data[fts.order:]), np.array(forecasts[:-1])), 4))
ret += " \\\\ \n"
if externalmodels is not None:
@ -219,28 +89,17 @@ def print_point_statistics(data, models, externalmodels = None, externalforecast
for k in np.arange(0,l):
ret += externalmodels[k] + " & "
ret += " 1 & "
ret += str(round(Measures.rmse(data, externalforecasts[k][:-1]), 2)) + " & "
ret += str(round(Measures.smape(data, externalforecasts[k][:-1]), 2))+ " & "
ret += str(round(Measures.UStatistic(data, externalforecasts[k][:-1]), 2))
ret += str(round(Measures.rmse(data[fts.order:], externalforecasts[k][:-1]), 2)) + " & "
ret += str(round(Measures.smape(data[fts.order:], externalforecasts[k][:-1]), 2))+ " & "
ret += str(round(Measures.UStatistic(np.array(data[fts.order:]), np.array(forecasts[:-1])), 2))
ret += " \\\\ \n"
return ret
def getProbabilityDistributionStatistics(pmfs, data):
ret = "Model & Entropy & Empirical Likelihood & Pseudo Likelihood \\\\ \n"
for k in pmfs:
ret += k.name + " & "
ret += str(k.entropy()) + " & "
ret += str(k.empiricalloglikelihood())+ " & "
ret += str(k.pseudologlikelihood(data))
ret += " \\\\ \n"
return ret
def allIntervalForecasters(data_train, data_test, partitions, max_order=3,save=False, file=None, tam=[20, 5],
models=None, transformation=None):
if models is None:
models = [ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS]
models = [ifts.IntervalFTS, pfts.ProbabilisticFTS]
objs = []
@ -296,7 +155,7 @@ def plotDistribution(dist):
def plotComparedSeries(original, models, colors, typeonlegend=False, save=False, file=None, tam=[20, 5],
points=True, intervals=True, linewidth=1.5):
intervals=True):
fig = plt.figure(figsize=tam)
ax = fig.add_subplot(111)
@ -305,10 +164,10 @@ def plotComparedSeries(original, models, colors, typeonlegend=False, save=False,
legends = []
ax.plot(original, color='black', label="Original", linewidth=linewidth*1.5)
ax.plot(original, color='black', label="Original", linewidth=1.5)
for count, fts in enumerate(models, start=0):
if fts.hasPointForecasting and points:
if fts.hasPointForecasting and not intervals:
forecasted = fts.forecast(original)
mi.append(min(forecasted) * 0.95)
ma.append(max(forecasted) * 1.05)
@ -316,7 +175,7 @@ def plotComparedSeries(original, models, colors, typeonlegend=False, save=False,
forecasted.insert(0, None)
lbl = fts.shortname
if typeonlegend: lbl += " (Point)"
ax.plot(forecasted, color=colors[count], label=lbl, ls="-",linewidth=linewidth)
ax.plot(forecasted, color=colors[count], label=lbl, ls="-")
if fts.hasIntervalForecasting and intervals:
forecasted = fts.forecastInterval(original)
@ -329,8 +188,8 @@ def plotComparedSeries(original, models, colors, typeonlegend=False, save=False,
upper.insert(0, None)
lbl = fts.shortname
if typeonlegend: lbl += " (Interval)"
ax.plot(lower, color=colors[count], label=lbl, ls="--",linewidth=linewidth)
ax.plot(upper, color=colors[count], ls="--",linewidth=linewidth)
ax.plot(lower, color=colors[count], label=lbl, ls="-")
ax.plot(upper, color=colors[count], ls="-")
handles0, labels0 = ax.get_legend_handles_labels()
lgd = ax.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1))
@ -345,82 +204,11 @@ def plotComparedSeries(original, models, colors, typeonlegend=False, save=False,
Util.showAndSaveImage(fig, file, save, lgd=legends)
def plotProbabilityDistributions(pmfs,lcolors,tam=[15, 7]):
fig = plt.figure(figsize=tam)
ax = fig.add_subplot(111)
for k,m in enumerate(pmfs,start=0):
m.plot(ax, color=lcolors[k])
handles0, labels0 = ax.get_legend_handles_labels()
ax.legend(handles0, labels0)
def allAheadForecasters(data_train, data_test, partitions, start, steps, resolution = None, max_order=3,save=False, file=None, tam=[20, 5],
models=None, transformation=None, option=2):
if models is None:
models = [pwfts.ProbabilisticWeightedFTS]
if resolution is None: resolution = (max(data_train) - min(data_train)) / 100
objs = []
if transformation is not None:
data_train_fs = Grid.GridPartitionerTrimf(transformation.apply(data_train),partitions)
else:
data_train_fs = Grid.GridPartitionerTrimf(data_train, partitions)
lcolors = []
for count, model in Util.enumerate2(models, start=0, step=2):
mfts = model("")
if not mfts.isHighOrder:
if transformation is not None:
mfts.appendTransformation(transformation)
mfts.train(data_train, data_train_fs)
objs.append(mfts)
lcolors.append( colors[count % ncol] )
else:
for order in np.arange(1,max_order+1):
if order >= mfts.minOrder:
mfts = model(" n = " + str(order))
if transformation is not None:
mfts.appendTransformation(transformation)
mfts.train(data_train, data_train_fs, order=order)
objs.append(mfts)
lcolors.append(colors[count % ncol])
distributions = [False for k in objs]
distributions[0] = True
print(getDistributionStatistics(data_test[start:], objs, steps, resolution))
#plotComparedIntervalsAhead(data_test, objs, lcolors, distributions=, save=save, file=file, tam=tam, intervals=True)
def getDistributionStatistics(original, models, steps, resolution):
ret = "Model & Order & Interval & Distribution \\\\ \n"
for fts in models:
densities1 = fts.forecastAheadDistribution(original,steps,resolution, parameters=3)
densities2 = fts.forecastAheadDistribution(original, steps, resolution, parameters=2)
ret += fts.shortname + " & "
ret += str(fts.order) + " & "
ret += str(round(Measures.crps(original, densities1), 3)) + " & "
ret += str(round(Measures.crps(original, densities2), 3)) + " \\\\ \n"
return ret
def plotComparedIntervalsAhead(original, models, colors, distributions, time_from, time_to,
interpol=False, save=False, file=None, tam=[20, 5], resolution=None,
cmap='Blues',option=2):
interpol=False, save=False, file=None, tam=[20, 5], resolution=None):
fig = plt.figure(figsize=tam)
ax = fig.add_subplot(111)
cm = plt.get_cmap(cmap)
cNorm = pltcolors.Normalize(vmin=0, vmax=1)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
if resolution is None: resolution = (max(original) - min(original)) / 100
mi = []
@ -429,44 +217,26 @@ def plotComparedIntervalsAhead(original, models, colors, distributions, time_fro
for count, fts in enumerate(models, start=0):
if fts.hasDistributionForecasting and distributions[count]:
density = fts.forecastAheadDistribution(original[time_from - fts.order:time_from],
time_to, resolution, parameters=option)
time_to, resolution, parameters=True)
Y = []
X = []
C = []
S = []
y = density.columns
t = len(y)
ss = time_to ** 2
for k in density.index:
#alpha = [scalarMap.to_rgba(density[col][k]) for col in density.columns]
col = [density[col][k]*5 for col in density.columns]
alpha = np.array([density[q][k] for q in density]) * 100
x = [time_from + k for x in np.arange(0, t)]
s = [ss for x in np.arange(0, t)]
ic = resolution/10
for cc in np.arange(0, resolution, ic):
Y.append(y + cc)
X.append(x)
C.append(col)
S.append(s)
Y = np.hstack(Y)
X = np.hstack(X)
C = np.hstack(C)
S = np.hstack(S)
s = ax.scatter(X, Y, c=C, marker='s',s=S, linewidths=0, edgecolors=None, cmap=cmap)
s.set_clim([0, 1])
cb = fig.colorbar(s)
cb.set_label('Density')
for cc in np.arange(0, resolution, 5):
ax.scatter(x, y + cc, c=alpha, marker='s', linewidths=0, cmap='Oranges', edgecolors=None)
if interpol and k < max(density.index):
diffs = [(density[q][k + 1] - density[q][k]) / 50 for q in density]
for p in np.arange(0, 50):
xx = [time_from + k + 0.02 * p for q in np.arange(0, t)]
alpha2 = np.array(
[density[density.columns[q]][k] + diffs[q] * p for q in np.arange(0, t)]) * 100
ax.scatter(xx, y, c=alpha2, marker='s', linewidths=0, cmap='Oranges',
norm=pltcolors.Normalize(vmin=0, vmax=1), vmin=0, vmax=1, edgecolors=None)
if fts.hasIntervalForecasting:
forecasts = fts.forecastAheadInterval(original[time_from - fts.order:time_from], time_to)
@ -508,8 +278,6 @@ def plotComparedIntervalsAhead(original, models, colors, distributions, time_fro
ax.set_xlabel('T')
ax.set_xlim([0, len(original)])
#plt.colorbar()
Util.showAndSaveImage(fig, file, save)
@ -795,8 +563,7 @@ def compareModelsTable(original, models_fo, models_ho):
def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=None, tam=[10, 15],
plotforecasts=False, elev=30, azim=144, intervals=False,parameters=None):
_3d = len(orders) > 1
plotforecasts=False, elev=30, azim=144, intervals=False):
ret = []
errors = np.array([[0 for k in range(len(partitions))] for kk in range(len(orders))])
forecasted_best = []
@ -817,16 +584,13 @@ def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=N
sets = Grid.GridPartitionerTrimf(train, p)
for oc, o in enumerate(orders, start=0):
fts = model("q = " + str(p) + " n = " + str(o))
fts.train(train, sets, o,parameters=parameters)
fts.train(train, sets, o)
if not intervals:
forecasted = fts.forecast(test)
if not fts.hasSeasonality:
error = Measures.rmse(np.array(test[o:]), np.array(forecasted[:-1]))
else:
error = Measures.rmse(np.array(test[o:]), np.array(forecasted))
error = Measures.rmse(np.array(test[o:]), np.array(forecasted[:-1]))
for kk in range(o):
forecasted.insert(0, None)
if plotforecasts: ax0.plot(forecasted, label=fts.name)
if plotforecasts: ax0.plot(forecasted, label=fts.name)
else:
forecasted = fts.forecastInterval(test)
error = 1.0 - Measures.rmse_interval(np.array(test[o:]), np.array(forecasted[:-1]))
@ -841,22 +605,15 @@ def simpleSearch_RMSE(train, test, model, partitions, orders, save=False, file=N
# handles0, labels0 = ax0.get_legend_handles_labels()
# ax0.legend(handles0, labels0)
ax0.plot(test, label="Original", linewidth=3.0, color="black")
if _3d: ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
if not plotforecasts: ax1 = Axes3D(fig, rect=[0, 1, 0.9, 0.9], elev=elev, azim=azim)
# ax1 = fig.add_axes([0.6, 0.5, 0.45, 0.45], projection='3d')
if _3d:
ax1.set_title('Error Surface')
ax1.set_ylabel('Model order')
ax1.set_xlabel('Number of partitions')
ax1.set_zlabel('RMSE')
X, Y = np.meshgrid(partitions, orders)
surf = ax1.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True)
else:
ax1 = fig.add_axes([0, 1, 0.9, 0.9])
ax1.set_title('Error Curve')
ax1.set_ylabel('Number of partitions')
ax1.set_xlabel('RMSE')
ax0.plot(errors,partitions)
ax1.set_title('Error Surface')
ax1.set_ylabel('Model order')
ax1.set_xlabel('Number of partitions')
ax1.set_zlabel('RMSE')
X, Y = np.meshgrid(partitions, orders)
surf = ax1.plot_surface(X, Y, errors, rstride=1, cstride=1, antialiased=True)
ret.append(best)
ret.append(forecasted_best)
@ -877,7 +634,7 @@ def pftsExploreOrderAndPartitions(data,save=False, file=None):
axes[2].set_title('Interval Forecasts by Order')
for order in np.arange(1, 6):
fts = pwfts.ProbabilisticWeightedFTS("")
fts = pfts.ProbabilisticFTS("")
fts.shortname = "n = " + str(order)
fts.train(data, data_fs1, order=order)
point_forecasts = fts.forecast(data)
@ -899,7 +656,7 @@ def pftsExploreOrderAndPartitions(data,save=False, file=None):
for partitions in np.arange(5, 11):
data_fs = Grid.GridPartitionerTrimf(data, partitions)
fts = pwfts.ProbabilisticWeightedFTS("")
fts = pfts.ProbabilisticFTS("")
fts.shortname = "q = " + str(partitions)
fts.train(data, data_fs, 1)
point_forecasts = fts.forecast(data)
@ -926,5 +683,4 @@ def pftsExploreOrderAndPartitions(data,save=False, file=None):
plt.tight_layout()
Util.showAndSaveImage(fig, file, save)
Util.showAndSaveImage(fig, file, save)

View File

@ -30,11 +30,16 @@ def enumerate2(xs, start=0, step=1):
yield (start, x)
start += step
def sliding_window(data, windowsize, train=0.8):
def sliding_window(data, windowsize, train=0.8, inc=0.1):
l = len(data)
ttrain = int(round(windowsize * train, 0))
for count in np.arange(0,l,windowsize):
yield ( data[count : count + ttrain], data[count + ttrain : count + windowsize] )
ic = int(round(windowsize * inc, 0))
for count in np.arange(0,l-windowsize+ic,ic):
if count + windowsize > l:
_end = l
else:
_end = count + windowsize
yield (count, data[count : count + ttrain], data[count + ttrain : _end] )
def persist_obj(obj, file):

View File

@ -77,18 +77,14 @@ def c_means(k, dados, tam):
return centroides
class CMeansPartitioner(partitioner.Partitioner):
def __init__(self, npart,data,func = Membership.trimf):
super(CMeansPartitioner, self).__init__("CMeans ",data,npart,func)
def __init__(self, data, npart, func = Membership.trimf, transformation=None):
super(CMeansPartitioner, self).__init__("CMeans", data, npart, func=func, transformation=transformation)
def build(self, data):
sets = []
dmax = max(data)
dmax += dmax * 0.10
dmin = min(data)
dmin -= dmin * 0.10
centroides = c_means(self.partitions, data, 1)
centroides.append(dmax)
centroides.append(dmin)
centroides.append(self.max)
centroides.append(self.min)
centroides = list(set(centroides))
centroides.sort()
for c in np.arange(1, len(centroides) - 1):

View File

@ -77,19 +77,15 @@ def bestSplit(data, npart):
return [threshold]
class EntropyPartitioner(partitioner.Partitioner):
def __init__(self, data,npart,func = Membership.trimf):
super(EntropyPartitioner, self).__init__("Entropy" ,data,npart,func)
def __init__(self, data, npart, func = Membership.trimf, transformation=None):
super(EntropyPartitioner, self).__init__("Entropy", data, npart, func=func, transformation=transformation)
def build(self, data):
sets = []
dmax = max(data)
dmax += dmax * 0.10
dmin = min(data)
dmin -= dmin * 0.10
partitions = bestSplit(data, self.partitions)
partitions.append(dmin)
partitions.append(dmax)
partitions.append(self.min)
partitions.append(self.max)
partitions = list(set(partitions))
partitions.sort()
for c in np.arange(1, len(partitions) - 1):

View File

@ -101,18 +101,15 @@ def fuzzy_cmeans(k, dados, tam, m, deltadist=0.001):
class FCMPartitioner(partitioner.Partitioner):
def __init__(self, data,npart,func = Membership.trimf):
super(FCMPartitioner, self).__init__("FCM ",data,npart,func)
def __init__(self, data,npart,func = Membership.trimf, transformation=None):
super(FCMPartitioner, self).__init__("FCM", data, npart, func=func, transformation=transformation)
def build(self,data):
sets = []
dmax = max(data)
dmax = dmax + dmax*0.10
dmin = min(data)
dmin = dmin - dmin*0.10
centroides = fuzzy_cmeans(self.partitions, data, 1, 2)
centroides.append(dmax)
centroides.append(dmin)
centroides.append(self.max)
centroides.append(self.min)
centroides = list(set(centroides))
centroides.sort()
for c in np.arange(1,len(centroides)-1):

View File

@ -7,28 +7,17 @@ from pyFTS.partitioners import partitioner
class GridPartitioner(partitioner.Partitioner):
def __init__(self, data,npart,func = Membership.trimf):
super(GridPartitioner, self).__init__("Grid",data,npart,func)
def __init__(self, data,npart,func = Membership.trimf, transformation=None):
super(GridPartitioner, self).__init__("Grid", data, npart, func=func, transformation=transformation)
def build(self, data):
sets = []
_min = min(data)
if _min < 0:
dmin = _min * 1.1
else:
dmin = _min * 0.9
_max = max(data)
if _max > 0:
dmax = _max * 1.1
else:
dmax = _max * 0.9
dlen = dmax - dmin
dlen = self.max - self.min
partlen = dlen / self.partitions
count = 0
for c in np.arange(dmin, dmax, partlen):
for c in np.arange(self.min, self.max, partlen):
if self.membership_function == Membership.trimf:
sets.append(
FuzzySet.FuzzySet(self.prefix + str(count), Membership.trimf, [c - partlen, c, c + partlen],c))

View File

@ -11,11 +11,12 @@ from pyFTS.partitioners import partitioner
class HuarngPartitioner(partitioner.Partitioner):
def __init__(self, npart,data,func = Membership.trimf):
super(HuarngPartitioner, self).__init__("Huarng",data,npart,func)
def __init__(self, data,npart,func = Membership.trimf, transformation=None):
super(HuarngPartitioner, self).__init__("Huarng", data, npart, func=func, transformation=transformation)
def build(self, data):
data2 = Transformations.differential(data)
diff = Transformations.Differential(1)
data2 = diff.apply(data)
davg = np.abs( np.mean(data2) / 2 )
if davg <= 1.0:
@ -28,13 +29,10 @@ class HuarngPartitioner(partitioner.Partitioner):
base = 100
sets = []
dmax = max(data)
dmax += dmax * 0.10
dmin = min(data)
dmin -= dmin * 0.10
dlen = dmax - dmin
dlen = self.max - self.min
npart = math.ceil(dlen / base)
partition = math.ceil(dmin)
partition = math.ceil(self.min)
for c in range(npart):
sets.append(
FuzzySet.FuzzySet(self.prefix + str(c), Membership.trimf, [partition - base, partition, partition + base], partition))

View File

@ -2,27 +2,34 @@ from pyFTS.common import FuzzySet, Membership
import numpy as np
class Partitioner(object):
def __init__(self,name,data,npart,func = Membership.trimf, names=None, prefix="A"):
def __init__(self,name,data,npart,func = Membership.trimf, names=None, prefix="A", transformation=None):
self.name = name
self.partitions = npart
self.sets = []
self.membership_function = func
self.setnames = names
self.prefix = prefix
_min = min(data)
self.transformation = transformation
if transformation is not None:
ndata = transformation.apply(data)
else:
ndata = data
_min = min(ndata)
if _min < 0:
self.min = _min * 1.1
else:
self.min = _min * 0.9
_max = max(data)
_max = max(ndata)
if _max > 0:
self.max = _max * 1.1
else:
self.max = _max * 0.9
self.sets = self.build(data)
self.sets = self.build(ndata)
def build(self,data):
def build(self, data):
pass
def plot(self,ax):

487
pfts.py Normal file
View File

@ -0,0 +1,487 @@
#!/usr/bin/python
# -*- coding: utf8 -*-
import numpy as np
import pandas as pd
import math
from operator import itemgetter
from pyFTS.common import FuzzySet, SortedCollection
from pyFTS import hofts, ifts, tree
class ProbabilisticFLRG(hofts.HighOrderFLRG):
def __init__(self, order):
super(ProbabilisticFLRG, self).__init__(order)
self.RHS = {}
self.frequencyCount = 0.0
def appendRHS(self, c):
self.frequencyCount += 1.0
if c.name in self.RHS:
self.RHS[c.name] += 1.0
else:
self.RHS[c.name] = 1.0
def getProbability(self, c):
return self.RHS[c] / self.frequencyCount
def __str__(self):
tmp2 = ""
for c in sorted(self.RHS):
if len(tmp2) > 0:
tmp2 = tmp2 + ", "
tmp2 = tmp2 + "(" + str(round(self.RHS[c] / self.frequencyCount, 3)) + ")" + c
return self.strLHS() + " -> " + tmp2
class ProbabilisticFTS(ifts.IntervalFTS):
def __init__(self, name):
super(ProbabilisticFTS, self).__init__("PFTS")
self.shortname = "PFTS " + name
self.name = "Probabilistic FTS"
self.detail = "Silva, P.; Guimarães, F.; Sadaei, H."
self.flrgs = {}
self.globalFrequency = 0
self.hasPointForecasting = True
self.hasIntervalForecasting = True
self.hasDistributionForecasting = True
self.isHighOrder = True
def generateFLRG(self, flrs):
flrgs = {}
l = len(flrs)
for k in np.arange(self.order, l+1):
if self.dump: print("FLR: " + str(k))
flrg = ProbabilisticFLRG(self.order)
for kk in np.arange(k - self.order, k):
flrg.appendLHS(flrs[kk].LHS)
if self.dump: print("LHS: " + str(flrs[kk]))
if flrg.strLHS() in flrgs:
flrgs[flrg.strLHS()].appendRHS(flrs[k-1].RHS)
else:
flrgs[flrg.strLHS()] = flrg;
flrgs[flrg.strLHS()].appendRHS(flrs[k-1].RHS)
if self.dump: print("RHS: " + str(flrs[k-1]))
self.globalFrequency += 1
return (flrgs)
def addNewPFLGR(self,flrg):
if flrg.strLHS() not in self.flrgs:
tmp = ProbabilisticFLRG(self.order)
for fs in flrg.LHS: tmp.appendLHS(fs)
tmp.appendRHS(flrg.LHS[-1])
self.flrgs[tmp.strLHS()] = tmp;
self.globalFrequency += 1
def getProbability(self, flrg):
if flrg.strLHS() in self.flrgs:
return self.flrgs[flrg.strLHS()].frequencyCount / self.globalFrequency
else:
self.addNewPFLGR(flrg)
return self.getProbability(flrg)
def getMidpoints(self, flrg):
if flrg.strLHS() in self.flrgs:
tmp = self.flrgs[flrg.strLHS()]
ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].centroid for s in tmp.RHS]))
else:
pi = 1 / len(flrg.LHS)
ret = sum(np.array([pi * s.centroid for s in flrg.LHS]))
return ret
def getUpper(self, flrg):
if flrg.strLHS() in self.flrgs:
tmp = self.flrgs[flrg.strLHS()]
ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].upper for s in tmp.RHS]))
else:
pi = 1 / len(flrg.LHS)
ret = sum(np.array([pi * s.upper for s in flrg.LHS]))
return ret
def getLower(self, flrg):
if flrg.strLHS() in self.flrgs:
tmp = self.flrgs[flrg.strLHS()]
ret = sum(np.array([tmp.getProbability(s) * self.setsDict[s].lower for s in tmp.RHS]))
else:
pi = 1 / len(flrg.LHS)
ret = sum(np.array([pi * s.lower for s in flrg.LHS]))
return ret
def forecast(self, data):
ndata = np.array(self.doTransformations(data))
l = len(ndata)
ret = []
for k in np.arange(self.order - 1, l):
# print(k)
affected_flrgs = []
affected_flrgs_memberships = []
norms = []
mp = []
# Find the sets which membership > 0 for each lag
count = 0
lags = {}
if self.order > 1:
subset = ndata[k - (self.order - 1): k + 1]
for instance in subset:
mb = FuzzySet.fuzzyInstance(instance, self.sets)
tmp = np.argwhere(mb)
idx = np.ravel(tmp) # flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
if instance <= self.sets[0].lower:
idx = [0]
elif instance >= self.sets[-1].upper:
idx = [len(self.sets) - 1]
else:
raise Exception(instance)
lags[count] = idx
count = count + 1
# Build the tree with all possible paths
root = tree.FLRGTreeNode(None)
self.buildTree(root, lags, 0)
# Trace the possible paths and build the PFLRG's
for p in root.paths():
path = list(reversed(list(filter(None.__ne__, p))))
flrg = hofts.HighOrderFLRG(self.order)
for kk in path: flrg.appendLHS(self.sets[kk])
assert len(flrg.LHS) == subset.size, str(subset) + " -> " + str([s.name for s in flrg.LHS])
##
affected_flrgs.append(flrg)
# Find the general membership of FLRG
affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS)))
else:
mv = FuzzySet.fuzzyInstance(ndata[k], self.sets) # get all membership values
tmp = np.argwhere(mv) # get the indices of values > 0
idx = np.ravel(tmp) # flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
if ndata[k] <= self.sets[0].lower:
idx = [0]
elif ndata[k] >= self.sets[-1].upper:
idx = [len(self.sets) - 1]
else:
raise Exception(ndata[k])
for kk in idx:
flrg = hofts.HighOrderFLRG(self.order)
flrg.appendLHS(self.sets[kk])
affected_flrgs.append(flrg)
affected_flrgs_memberships.append(mv[kk])
count = 0
for flrg in affected_flrgs:
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
norm = self.getProbability(flrg) * affected_flrgs_memberships[count]
if norm == 0:
norm = self.getProbability(flrg) # * 0.001
mp.append(norm * self.getMidpoints(flrg))
norms.append(norm)
count = count + 1
# gerar o intervalo
norm = sum(norms)
if norm == 0:
ret.append(0)
else:
ret.append(sum(mp) / norm)
ret = self.doInverseTransformations(ret, params=[data[self.order - 1:]])
return ret
def forecastInterval(self, data):
ndata = np.array(self.doTransformations(data))
l = len(ndata)
ret = []
for k in np.arange(self.order - 1, l):
# print(k)
affected_flrgs = []
affected_flrgs_memberships = []
norms = []
up = []
lo = []
# Find the sets which membership > 0 for each lag
count = 0
lags = {}
if self.order > 1:
subset = ndata[k - (self.order - 1): k + 1]
for instance in subset:
mb = FuzzySet.fuzzyInstance(instance, self.sets)
tmp = np.argwhere(mb)
idx = np.ravel(tmp) # flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
if instance <= self.sets[0].lower:
idx = [0]
elif instance >= self.sets[-1].upper:
idx = [len(self.sets) - 1]
else:
raise Exception(instance)
lags[count] = idx
count = count + 1
# Build the tree with all possible paths
root = tree.FLRGTreeNode(None)
self.buildTree(root, lags, 0)
# Trace the possible paths and build the PFLRG's
for p in root.paths():
path = list(reversed(list(filter(None.__ne__, p))))
flrg = hofts.HighOrderFLRG(self.order)
for kk in path: flrg.appendLHS(self.sets[kk])
assert len(flrg.LHS) == subset.size, str(subset) + " -> " + str([s.name for s in flrg.LHS])
##
affected_flrgs.append(flrg)
# Find the general membership of FLRG
affected_flrgs_memberships.append(min(self.getSequenceMembership(subset, flrg.LHS)))
else:
mv = FuzzySet.fuzzyInstance(ndata[k], self.sets) # get all membership values
tmp = np.argwhere(mv) # get the indices of values > 0
idx = np.ravel(tmp) # flatten the array
if idx.size == 0: # the element is out of the bounds of the Universe of Discourse
if ndata[k] <= self.sets[0].lower:
idx = [0]
elif ndata[k] >= self.sets[-1].upper:
idx = [len(self.sets) - 1]
else:
raise Exception(ndata[k])
for kk in idx:
flrg = hofts.HighOrderFLRG(self.order)
flrg.appendLHS(self.sets[kk])
affected_flrgs.append(flrg)
affected_flrgs_memberships.append(mv[kk])
count = 0
for flrg in affected_flrgs:
# achar o os bounds de cada FLRG, ponderados pela probabilidade e pertinência
norm = self.getProbability(flrg) * affected_flrgs_memberships[count]
if norm == 0:
norm = self.getProbability(flrg) # * 0.001
up.append(norm * self.getUpper(flrg))
lo.append(norm * self.getLower(flrg))
norms.append(norm)
count = count + 1
# gerar o intervalo
norm = sum(norms)
if norm == 0:
ret.append([0, 0])
else:
lo_ = self.doInverseTransformations(sum(lo) / norm, params=[data[k - (self.order - 1): k + 1]])
up_ = self.doInverseTransformations(sum(up) / norm, params=[data[k - (self.order - 1): k + 1]])
ret.append([lo_, up_])
return ret
def forecastAhead(self, data, steps):
ret = [data[k] for k in np.arange(len(data) - self.order, len(data))]
for k in np.arange(self.order - 1, steps):
if ret[-1] <= self.sets[0].lower or ret[-1] >= self.sets[-1].upper:
ret.append(ret[-1])
else:
mp = self.forecast([ret[x] for x in np.arange(k - self.order, k)])
ret.append(mp)
return ret
def forecastAheadInterval(self, data, steps):
ret = [[data[k], data[k]] for k in np.arange(len(data) - self.order, len(data))]
for k in np.arange(self.order, steps+self.order):
if ret[-1][0] <= self.sets[0].lower and ret[-1][1] >= self.sets[-1].upper:
ret.append(ret[-1])
else:
lower = self.forecastInterval([ret[x][0] for x in np.arange(k - self.order, k)])
upper = self.forecastInterval([ret[x][1] for x in np.arange(k - self.order, k)])
ret.append([np.min(lower), np.max(upper)])
return ret
def getGridClean(self, resolution):
grid = {}
if len(self.transformations) == 0:
_min = self.sets[0].lower
_max = self.sets[-1].upper
else:
_min = self.original_min
_max = self.original_max
for sbin in np.arange(_min,_max, resolution):
grid[sbin] = 0
return grid
def gridCount(self, grid, resolution, index, interval):
#print(interval)
for k in index.inside(interval[0],interval[1]):
#print(k)
grid[k] += 1
return grid
def gridCountPoint(self, grid, resolution, index, point):
k = index.find_ge(point)
# print(k)
grid[k] += 1
return grid
def buildTreeWithoutOrder(self, node, lags, level):
if level not in lags:
return
for s in lags[level]:
node.appendChild(tree.FLRGTreeNode(s))
for child in node.getChildren():
self.buildTreeWithoutOrder(child, lags, level + 1)
def forecastAheadDistribution(self, data, steps, resolution, parameters=None):
ret = []
intervals = self.forecastAheadInterval(data, steps)
grid = self.getGridClean(resolution)
index = SortedCollection.SortedCollection(iterable=grid.keys())
if parameters is None:
grids = []
for k in np.arange(0, steps):
grids.append(self.getGridClean(resolution))
for k in np.arange(self.order, steps + self.order):
lags = {}
cc = 0
for i in intervals[k - self.order : k]:
quantiles = []
for qt in np.arange(0, 50, 2):
quantiles.append(i[0] + qt * ((i[1] - i[0]) / 100))
quantiles.append(i[1] - qt * ((i[1] - i[0]) / 100))
quantiles.append(i[0] + ((i[1] - i[0]) / 2))
quantiles = list(set(quantiles))
quantiles.sort()
lags[cc] = quantiles
cc += 1
# Build the tree with all possible paths
root = tree.FLRGTreeNode(None)
self.buildTreeWithoutOrder(root, lags, 0)
# Trace the possible paths
for p in root.paths():
path = list(reversed(list(filter(None.__ne__, p))))
qtle = self.forecastInterval(path)
grids[k - self.order] = self.gridCount(grids[k - self.order], resolution, index, np.ravel(qtle))
for k in np.arange(0, steps):
tmp = np.array([grids[k][q] for q in sorted(grids[k])])
ret.append(tmp / sum(tmp))
grid = self.getGridClean(resolution)
df = pd.DataFrame(ret, columns=sorted(grid))
return df
else:
print("novo")
ret = []
for k in np.arange(self.order, steps + self.order):
grid = self.getGridClean(resolution)
grid = self.gridCount(grid, resolution, index, intervals[k])
for qt in np.arange(0, 50, 1):
# print(qt)
qtle_lower = self.forecastInterval(
[intervals[x][0] + qt * ((intervals[x][1] - intervals[x][0]) / 100) for x in
np.arange(k - self.order, k)])
grid = self.gridCount(grid, resolution, index, np.ravel(qtle_lower))
qtle_upper = self.forecastInterval(
[intervals[x][1] - qt * ((intervals[x][1] - intervals[x][0]) / 100) for x in
np.arange(k - self.order, k)])
grid = self.gridCount(grid, resolution, index, np.ravel(qtle_upper))
qtle_mid = self.forecastInterval(
[intervals[x][0] + (intervals[x][1] - intervals[x][0]) / 2 for x in np.arange(k - self.order, k)])
grid = self.gridCount(grid, resolution, index, np.ravel(qtle_mid))
tmp = np.array([grid[k] for k in sorted(grid)])
ret.append(tmp / sum(tmp))
grid = self.getGridClean(resolution)
df = pd.DataFrame(ret, columns=sorted(grid))
return df
def __str__(self):
tmp = self.name + ":\n"
for r in sorted(self.flrgs):
p = round(self.flrgs[r].frequencyCount / self.globalFrequency, 3)
tmp = tmp + "(" + str(p) + ") " + str(self.flrgs[r]) + "\n"
return tmp

View File

@ -9,23 +9,61 @@ import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
from pyFTS.partitioners import Grid, Entropy, FCM
from pyFTS.partitioners import Grid, Entropy, FCM, Huarng
from pyFTS.common import FLR,FuzzySet,Membership,Transformations
from pyFTS import fts,hofts,ifts,pwfts,tree, chen
from pyFTS import fts,hofts,ifts,pwfts,tree, chen, pfts
from pyFTS.benchmarks import benchmarks as bchmk
from pyFTS.benchmarks import naive
from pyFTS.benchmarks import Measures
from numpy import random
#print(FCM.FCMPartitionerTrimf.__module__)
gauss = random.normal(0,1.0,2000)
#gauss = random.normal(0,1.0,2000)
#gauss_teste = random.normal(0,1.0,400)
os.chdir("/home/petronio/dados/Dropbox/Doutorado/Disciplinas/AdvancedFuzzyTimeSeriesModels/")
#taiexpd = pd.read_csv("DataSets/TAIEX.csv", sep=",")
#taiex = np.array(taiexpd["avg"][:5000])
taiex = pd.read_csv("DataSets/TAIEX.csv", sep=",")
taiex_treino = np.array(taiex["avg"][2500:3900])
taiex_teste = np.array(taiex["avg"][3901:4500])
#print(len(taiex))
#from pyFTS.common import Util
#, ,
bchmk.sliding_window(gauss,500,train=0.7,partitioners=[Grid.GridPartitioner, FCM.FCMPartitioner, Entropy.EntropyPartitioner])
diff = Transformations.Differential(1)
#bchmk.sliding_window(taiex,2000,train=0.8, #transformation=diff, #models=[pwfts.ProbabilisticWeightedFTS],
# partitioners=[Grid.GridPartitioner, FCM.FCMPartitioner, Entropy.EntropyPartitioner],
# partitions=[10, 15, 20, 25, 30, 35, 40], dump=True, save=True, file="experiments/points.csv")
bchmk.allPointForecasters(taiex_treino, taiex_treino, 7, transformation=diff,
models=[ naive.Naive, pfts.ProbabilisticFTS, pwfts.ProbabilisticWeightedFTS],
statistics=True, residuals=False, series=False)
data_train_fs = Grid.GridPartitioner(taiex_treino, 10, transformation=diff).sets
fts1 = pfts.ProbabilisticFTS("")
fts1.appendTransformation(diff)
fts1.train(taiex_treino, data_train_fs, order=1)
print(fts1.forecast([5000, 5000]))
fts2 = pwfts.ProbabilisticWeightedFTS("")
fts2.appendTransformation(diff)
fts2.train(taiex_treino, data_train_fs, order=1)
print(fts2.forecast([5000, 5000]))
#tmp = Grid.GridPartitioner(taiex_treino,7,transformation=diff)
#for s in tmp.sets: print(s)