From 8df4f9c749111cf10a3afaaccda3f2014a793978 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=C3=B4nio=20C=C3=A2ndido=20de=20Lima=20e=20Silva?= Date: Mon, 8 May 2017 17:50:35 -0300 Subject: [PATCH] - Several bugfixes in benchmarks methods and optimizations --- benchmarks/arima.py | 18 ++++++++++++------ benchmarks/distributed_benchmarks.py | 2 +- tests/general.py | 11 ++++++++--- 3 files changed, 21 insertions(+), 10 deletions(-) diff --git a/benchmarks/arima.py b/benchmarks/arima.py index 0c40b13..437f642 100644 --- a/benchmarks/arima.py +++ b/benchmarks/arima.py @@ -24,11 +24,11 @@ class ARIMA(fts.FTS): self.benchmark_only = True self.min_order = 1 - def train(self, data, sets, order=(2,1,1), parameters=None): + def train(self, data, sets, order, parameters=None): self.p = order[0] self.d = order[1] self.q = order[2] - self.order = max([self.p, self.d, self.q]) + self.order = self.p + self.q self.shortname = "ARIMA(" + str(self.p) + "," + str(self.d) + "," + str(self.q) + ")" old_fit = self.model_fit @@ -51,13 +51,19 @@ class ARIMA(fts.FTS): ret = [] - ar = np.array([self.ar(ndata[k - self.p: k]) for k in np.arange(self.p, l)]) + if self.d == 0: + ar = np.array([self.ar(ndata[k - self.p: k]) for k in np.arange(self.p, l)]) + else: + ar = np.array([ndata[k] + self.ar(ndata[k - self.p: k]) for k in np.arange(self.p, l)]) - residuals = np.array([ar[k - self.p] - ndata[k] for k in np.arange(self.p, l)]) + if self.q > 0: + residuals = np.array([ndata[k] - ar[k - self.p] for k in np.arange(self.p, l)]) - ma = np.array([self.ma(residuals[k - self.q: k]) for k in np.arange(self.q, len(ar) + 1)]) + ma = np.array([self.ma(residuals[k - self.q: k]) for k in np.arange(self.q, len(residuals))]) - ret = ar + ma + ret = ar[self.q:] + ma + else: + ret = ar ret = self.doInverseTransformations(ret, params=[data[self.order - 1:]]) diff --git a/benchmarks/distributed_benchmarks.py b/benchmarks/distributed_benchmarks.py index 84cb1d0..988a1bd 100644 --- a/benchmarks/distributed_benchmarks.py +++ b/benchmarks/distributed_benchmarks.py @@ -101,7 +101,7 @@ def point_sliding_window(data, windowsize, train=0.8, models=None, partitioners= quantreg.QuantileRegression, quantreg.QuantileRegression] if benchmark_models_parameters is None: - benchmark_models_parameters = [None, (1, 0, 1), (1, 1, 1), (2, 1, 1), (2, 1, 2), 1, 2] + benchmark_models_parameters = [None, (1, 0, 0), (1, 0, 1), (2, 0, 1), (2, 0, 2), 1, 2] cluster = dispy.JobCluster(run_point, nodes=nodes) #, depends=dependencies) diff --git a/tests/general.py b/tests/general.py index 797a9fb..3881cf1 100644 --- a/tests/general.py +++ b/tests/general.py @@ -64,9 +64,14 @@ from pyFTS.benchmarks import arima, quantreg #Util.plot_dataframe_point("experiments/taiex_point_sintetic.csv","experiments/taiex_point_analitic.csv",11) #tmp = arima.ARIMA("") -#tmp.train(taiex[:1600],(2,1,1)) -#teste = tmp.forecast(taiex[1600:2000]) +#tmp.train(taiex[:1600], None, order=(1,0,1)) +#teste = tmp.forecast(taiex[1600:1610]) +#tmp = quantreg.QuantileRegression("") +#tmp.train(taiex[:1600], None, order=1) +#teste = tmp.forecast(taiex[1600:1610]) + +#print(taiex[1600:1610]) #print(teste) #bchmk.teste(taiex,['192.168.0.109', '192.168.0.101']) @@ -75,7 +80,7 @@ bchmk.point_sliding_window(taiex,2000,train=0.8, #models=[yu.WeightedFTS], # # partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ], partitions= np.arange(10,200,step=10), #transformation=diff, dump=True, save=True, file="experiments/taiex_point_analytic.csv", - nodes=['192.168.0.102', '192.168.0.109']) #, depends=[hofts, ifts]) + nodes=['192.168.0.102', '192.168.0.109', '192.168.0.106']) #, depends=[hofts, ifts]) #bchmk.testa(taiex,[10,20],partitioners=[Grid.GridPartitioner], nodes=['192.168.0.109', '192.168.0.101'])