Bugfixes in benchmarks

This commit is contained in:
Petrônio Cândido 2018-07-09 12:08:03 -03:00
parent 52544aca88
commit 4a4e168448
3 changed files with 9 additions and 2 deletions

View File

@ -334,6 +334,8 @@ def get_point_statistics(data, model, **kwargs):
nforecasts = np.array(forecasts[:-1]) nforecasts = np.array(forecasts[:-1])
print(model.shortname)
ret.append(np.round(rmse(ndata[model.max_lag:], nforecasts), 2)) ret.append(np.round(rmse(ndata[model.max_lag:], nforecasts), 2))
ret.append(np.round(mape(ndata[model.max_lag:], nforecasts), 2)) ret.append(np.round(mape(ndata[model.max_lag:], nforecasts), 2))
ret.append(np.round(UStatistic(ndata[model.max_lag:], nforecasts), 2)) ret.append(np.round(UStatistic(ndata[model.max_lag:], nforecasts), 2))

View File

@ -39,6 +39,7 @@ class ARIMA(fts.FTS):
self.d = order[1] self.d = order[1]
self.q = order[2] self.q = order[2]
self.order = self.p + self.q + (self.q - 1 if self.q > 0 else 0) self.order = self.p + self.q + (self.q - 1 if self.q > 0 else 0)
self.max_lag = self.order
self.d = len(self.transformations) self.d = len(self.transformations)
self.shortname = "ARIMA(" + str(self.p) + "," + str(self.d) + "," + str(self.q) + ") - " + str(self.alpha) self.shortname = "ARIMA(" + str(self.p) + "," + str(self.d) + "," + str(self.q) + ") - " + str(self.alpha)

View File

@ -104,6 +104,8 @@ def sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs):
:keyword :keyword
benchmark_methods: a list with Non FTS models to benchmark. The default is None. benchmark_methods: a list with Non FTS models to benchmark. The default is None.
benchmark_methods_parameters: a list with Non FTS models parameters. The default is None. benchmark_methods_parameters: a list with Non FTS models parameters. The default is None.
benchmark_models: A boolean value indicating if external FTS methods will be used on benchmark. The default is False.
build_methods: A boolean value indicating if the default FTS methods will be used on benchmark. The default is True.
dataset: the dataset name to identify the current set of benchmarks results on database. dataset: the dataset name to identify the current set of benchmarks results on database.
distributed: A boolean value indicating if the forecasting procedure will be distributed in a dispy cluster. . The default is False distributed: A boolean value indicating if the forecasting procedure will be distributed in a dispy cluster. . The default is False
file: file path to save the results. The default is benchmarks.db. file: file path to save the results. The default is benchmarks.db.
@ -146,7 +148,7 @@ def sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs):
pool = [] if models is None else models pool = [] if models is None else models
if models is None and methods is None: if methods is None:
if type == 'point': if type == 'point':
methods = get_point_methods() methods = get_point_methods()
elif type == 'interval': elif type == 'interval':
@ -154,7 +156,9 @@ def sliding_window_benchmarks(data, windowsize, train=0.8, **kwargs):
elif type == 'distribution': elif type == 'distribution':
methods = get_probabilistic_methods() methods = get_probabilistic_methods()
if models is None: build_methods = __pop("build_methods", True, kwargs)
if build_methods:
for method in methods: for method in methods:
mfts = method() mfts = method()