diff --git a/img/icon.png b/img/icon.png new file mode 100644 index 0000000..81fbcf5 Binary files /dev/null and b/img/icon.png differ diff --git a/img/logo.png b/img/logo.png new file mode 100644 index 0000000..a3d4369 Binary files /dev/null and b/img/logo.png differ diff --git a/pyFTS/benchmarks/Util.py b/pyFTS/benchmarks/Util.py index 812e80b..1dc76f5 100644 --- a/pyFTS/benchmarks/Util.py +++ b/pyFTS/benchmarks/Util.py @@ -47,8 +47,8 @@ def find_best(dataframe, criteria, ascending): def point_dataframe_synthetic_columns(): - return ["Model", "Order", "Scheme", "Partitions", "Size", "RMSEAVG", "RMSESTD", "SMAPEAVG", "SMAPESTD", "UAVG", - "USTD", "TIMEAVG", "TIMESTD"] + return ["Model", "Order", "Scheme", "Partitions", "Size", "Steps", "Method", "RMSEAVG", "RMSESTD", + "SMAPEAVG", "SMAPESTD", "UAVG","USTD", "TIMEAVG", "TIMESTD"] def point_dataframe_analytic_columns(experiments): @@ -58,11 +58,13 @@ def point_dataframe_analytic_columns(experiments): columns.insert(2, "Scheme") columns.insert(3, "Partitions") columns.insert(4, "Size") - columns.insert(5, "Measure") + columns.insert(5, "Steps") + columns.insert(6, "Method") + columns.insert(7, "Measure") return columns -def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u): +def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u, steps, method): """ Create a dataframe to store the benchmark results :param experiments: dictionary with the execution results @@ -90,6 +92,8 @@ def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, mod.append(mfts.partitioner.name) mod.append(mfts.partitioner.partitions) mod.append(len(mfts)) + mod.append(steps) + mod.append(method) else: mod.append('-') mod.append('-') @@ -122,17 +126,17 @@ def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, s = '-' p = '-' l = '-' - print([n, o, s, p, l]) - tmp = [n, o, s, p, l, 'RMSE'] + print([n, o, s, p, l, steps, method]) + tmp = [n, o, s, p, l, steps, method, 'RMSE'] tmp.extend(rmse[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'SMAPE'] + tmp = [n, o, s, p, l, steps, method, 'SMAPE'] tmp.extend(smape[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'U'] + tmp = [n, o, s, p, l, steps, method, 'U'] tmp.extend(u[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'TIME'] + tmp = [n, o, s, p, l, steps, method, 'TIME'] tmp.extend(times[k]) ret.append(deepcopy(tmp)) except Exception as ex: @@ -401,7 +405,8 @@ def check_ignore_list(b, ignore): return flag -def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, synthetic, times, q05, q25, q75, q95): +def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, synthetic, times, + q05, q25, q75, q95, steps, method): ret = [] if synthetic: for k in sorted(objs.keys()): @@ -409,14 +414,19 @@ def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, mfts = objs[k] mod.append(mfts.shortname) mod.append(mfts.order) + l = len(mfts) if not mfts.benchmark_only: mod.append(mfts.partitioner.name) mod.append(mfts.partitioner.partitions) - l = len(mfts) + mod.append(l) + mod.append(steps) + mod.append(method) else: mod.append('-') mod.append('-') - l = '-' + mod.append('-') + mod.append(steps) + mod.append(method) mod.append(round(np.nanmean(sharpness[k]), 2)) mod.append(round(np.nanstd(sharpness[k]), 2)) mod.append(round(np.nanmean(resolution[k]), 2)) @@ -452,28 +462,28 @@ def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, p = '-' l = '-' - tmp = [n, o, s, p, l, 'Sharpness'] + tmp = [n, o, s, p, l, steps, method, 'Sharpness'] tmp.extend(sharpness[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'Resolution'] + tmp = [n, o, s, p, l, steps, method, 'Resolution'] tmp.extend(resolution[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'Coverage'] + tmp = [n, o, s, p, l, steps, method, 'Coverage'] tmp.extend(coverage[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'TIME'] + tmp = [n, o, s, p, l, steps, method, 'TIME'] tmp.extend(times[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'Q05'] + tmp = [n, o, s, p, l, steps, method, 'Q05'] tmp.extend(q05[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'Q25'] + tmp = [n, o, s, p, l, steps, method, 'Q25'] tmp.extend(q25[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'Q75'] + tmp = [n, o, s, p, l, steps, method, 'Q75'] tmp.extend(q75[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'Q95'] + tmp = [n, o, s, p, l, steps, method, 'Q95'] tmp.extend(q95[k]) ret.append(deepcopy(tmp)) except Exception as ex: @@ -492,18 +502,19 @@ def interval_dataframe_analytic_columns(experiments): columns.insert(2, "Scheme") columns.insert(3, "Partitions") columns.insert(4, "Size") - columns.insert(5, "Measure") + columns.insert(5, "Steps") + columns.insert(6, "Method") + columns.insert(7, "Measure") return columns def interval_dataframe_synthetic_columns(): - columns = ["Model", "Order", "Scheme", "Partitions", "SHARPAVG", "SHARPSTD", "RESAVG", "RESSTD", "COVAVG", + columns = ["Model", "Order", "Scheme", "Partitions","SIZE", "Steps","Method" "SHARPAVG", "SHARPSTD", "RESAVG", "RESSTD", "COVAVG", "COVSTD", "TIMEAVG", "TIMESTD", "Q05AVG", "Q05STD", "Q25AVG", "Q25STD", "Q75AVG", "Q75STD", "Q95AVG", "Q95STD"] return columns - def cast_dataframe_to_synthetic_interval(infile, outfile, experiments): columns = interval_dataframe_analytic_columns(experiments) dat = pd.read_csv(infile, sep=";", usecols=columns) @@ -863,7 +874,7 @@ def plot_dataframe_interval_pinball(file_synthetic, file_analytic, experiments, Util.show_and_save_image(fig, file, save) -def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, synthetic): +def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, synthetic, steps, method): """ Save benchmark results for m-step ahead probabilistic forecasters :param experiments: @@ -893,11 +904,15 @@ def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, syn if not mfts.benchmark_only: mod.append(mfts.partitioner.name) mod.append(mfts.partitioner.partitions) - l = len(mfts) + mod.append(len(mfts)) + mod.append(steps) + mod.append(method) else: mod.append('-') mod.append('-') - l = '-' + mod.append('-') + mod.append(steps) + mod.append(method) mod.append(np.round(np.nanmean(crps[k]), 2)) mod.append(np.round(np.nanstd(crps[k]), 2)) mod.append(l) @@ -925,10 +940,10 @@ def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, syn s = '-' p = '-' l = '-' - tmp = [n, o, s, p, l, 'CRPS'] + tmp = [n, o, s, p, l, steps, method, 'CRPS'] tmp.extend(crps[k]) ret.append(deepcopy(tmp)) - tmp = [n, o, s, p, l, 'TIME'] + tmp = [n, o, s, p, l, steps, method, 'TIME'] tmp.extend(times[k]) ret.append(deepcopy(tmp)) except Exception as ex: @@ -940,7 +955,6 @@ def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, syn return dat - def probabilistic_dataframe_analytic_columns(experiments): columns = [str(k) for k in np.arange(0, experiments)] columns.insert(0, "Model") @@ -948,12 +962,14 @@ def probabilistic_dataframe_analytic_columns(experiments): columns.insert(2, "Scheme") columns.insert(3, "Partitions") columns.insert(4, "Size") - columns.insert(5, "Measure") + columns.insert(5, "Steps") + columns.insert(6, "Method") + columns.insert(7, "Measure") return columns def probabilistic_dataframe_synthetic_columns(): - columns = ["Model", "Order", "Scheme", "Partitions", "CRPSAVG", "CRPSSTD", + columns = ["Model", "Order", "Scheme", "Partitions","Size", "Steps", "Method", "CRPSAVG", "CRPSSTD", "TIMEAVG", "TIMESTD"] return columns diff --git a/pyFTS/benchmarks/benchmarks.py b/pyFTS/benchmarks/benchmarks.py index 4ec24bc..409503f 100644 --- a/pyFTS/benchmarks/benchmarks.py +++ b/pyFTS/benchmarks/benchmarks.py @@ -293,6 +293,9 @@ def run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwarg transformation = kwargs.get('transformation', None) indexer = kwargs.get('indexer', None) + steps_ahead = kwargs.get('steps_ahead', 1) + method = kwargs.get('method', None) + if mfts.benchmark_only: _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") else: @@ -313,7 +316,8 @@ def run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwarg _end = time.time() times += _end - _start - ret = {'key': _key, 'obj': mfts, 'rmse': _rmse, 'smape': _smape, 'u': _u, 'time': times, 'window': window_key} + ret = {'key': _key, 'obj': mfts, 'rmse': _rmse, 'smape': _smape, 'u': _u, 'time': times, 'window': window_key, + 'steps': steps_ahead, 'method': method} return ret @@ -346,6 +350,9 @@ def run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kw transformation = kwargs.get('transformation', None) indexer = kwargs.get('indexer', None) + steps_ahead = kwargs.get('steps_ahead', 1) + method = kwargs.get('method', None) + if mfts.benchmark_only: _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha) else: @@ -367,7 +374,8 @@ def run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kw times += _end - _start ret = {'key': _key, 'obj': mfts, 'sharpness': _sharp, 'resolution': _res, 'coverage': _cov, 'time': times, - 'Q05': _q05, 'Q25': _q25, 'Q75': _q75, 'Q95': _q95, 'window': window_key} + 'Q05': _q05, 'Q25': _q25, 'Q75': _q75, 'Q95': _q95, 'window': window_key, + 'steps': steps_ahead, 'method': method} return ret @@ -403,6 +411,9 @@ def run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None, transformation = kwargs.get('transformation', None) indexer = kwargs.get('indexer', None) + steps_ahead = kwargs.get('steps_ahead', 1) + method = kwargs.get('method', None) + if mfts.benchmark_only: _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha) else: @@ -429,7 +440,8 @@ def run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None, _crps1 = np.nan _t1 = np.nan - ret = {'key': _key, 'obj': mfts, 'CRPS': _crps1, 'time': _t1, 'window': window_key} + ret = {'key': _key, 'obj': mfts, 'CRPS': _crps1, 'time': _t1, 'window': window_key, + 'steps': steps_ahead, 'method': method} return ret @@ -466,6 +478,8 @@ def process_point_jobs(jobs, experiments, save=False, file=None, sintetic=False) smape = {} u = {} times = {} + steps = None + method = None for job in jobs: _key = job['key'] @@ -475,6 +489,8 @@ def process_point_jobs(jobs, experiments, save=False, file=None, sintetic=False) smape[_key] = [] u[_key] = [] times[_key] = [] + steps[_key] = job['steps'] + method[_key] = job['method'] rmse[_key].append(job['rmse']) smape[_key].append(job['smape']) u[_key].append(job['u']) diff --git a/pyFTS/common/fts.py b/pyFTS/common/fts.py index 43f607c..13aec21 100644 --- a/pyFTS/common/fts.py +++ b/pyFTS/common/fts.py @@ -89,17 +89,17 @@ class FTS(object): steps_ahead = kwargs.get("steps_ahead", None) - if type == 'point' and steps_ahead == None: + if type == 'point' and (steps_ahead == None or steps_ahead == 1): ret = self.forecast(ndata, **kwargs) - elif type == 'point' and steps_ahead != None: + elif type == 'point' and steps_ahead > 1: ret = self.forecast_ahead(ndata, steps_ahead, **kwargs) - elif type == 'interval' and steps_ahead == None: + elif type == 'interval' and (steps_ahead == None or steps_ahead == 1): ret = self.forecast_interval(ndata, **kwargs) - elif type == 'interval' and steps_ahead != None: + elif type == 'interval' and steps_ahead > 1: ret = self.forecast_ahead_interval(ndata, steps_ahead, **kwargs) - elif type == 'distribution' and steps_ahead == None: + elif type == 'distribution' and (steps_ahead == None or steps_ahead == 1): ret = self.forecast_distribution(ndata, **kwargs) - elif type == 'distribution' and steps_ahead != None: + elif type == 'distribution' and steps_ahead > 1: ret = self.forecast_ahead_distribution(ndata, steps_ahead, **kwargs) else: raise ValueError('The argument \'type\' has an unknown value.')