Logo and icon

This commit is contained in:
Petrônio Cândido 2018-04-12 17:47:54 -03:00
parent 3ed39c0198
commit ff23d874bc
5 changed files with 72 additions and 40 deletions

BIN
img/icon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 KiB

BIN
img/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

View File

@ -47,8 +47,8 @@ def find_best(dataframe, criteria, ascending):
def point_dataframe_synthetic_columns(): def point_dataframe_synthetic_columns():
return ["Model", "Order", "Scheme", "Partitions", "Size", "RMSEAVG", "RMSESTD", "SMAPEAVG", "SMAPESTD", "UAVG", return ["Model", "Order", "Scheme", "Partitions", "Size", "Steps", "Method", "RMSEAVG", "RMSESTD",
"USTD", "TIMEAVG", "TIMESTD"] "SMAPEAVG", "SMAPESTD", "UAVG","USTD", "TIMEAVG", "TIMESTD"]
def point_dataframe_analytic_columns(experiments): def point_dataframe_analytic_columns(experiments):
@ -58,11 +58,13 @@ def point_dataframe_analytic_columns(experiments):
columns.insert(2, "Scheme") columns.insert(2, "Scheme")
columns.insert(3, "Partitions") columns.insert(3, "Partitions")
columns.insert(4, "Size") columns.insert(4, "Size")
columns.insert(5, "Measure") columns.insert(5, "Steps")
columns.insert(6, "Method")
columns.insert(7, "Measure")
return columns return columns
def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u): def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape, times, u, steps, method):
""" """
Create a dataframe to store the benchmark results Create a dataframe to store the benchmark results
:param experiments: dictionary with the execution results :param experiments: dictionary with the execution results
@ -90,6 +92,8 @@ def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape,
mod.append(mfts.partitioner.name) mod.append(mfts.partitioner.name)
mod.append(mfts.partitioner.partitions) mod.append(mfts.partitioner.partitions)
mod.append(len(mfts)) mod.append(len(mfts))
mod.append(steps)
mod.append(method)
else: else:
mod.append('-') mod.append('-')
mod.append('-') mod.append('-')
@ -122,17 +126,17 @@ def save_dataframe_point(experiments, file, objs, rmse, save, synthetic, smape,
s = '-' s = '-'
p = '-' p = '-'
l = '-' l = '-'
print([n, o, s, p, l]) print([n, o, s, p, l, steps, method])
tmp = [n, o, s, p, l, 'RMSE'] tmp = [n, o, s, p, l, steps, method, 'RMSE']
tmp.extend(rmse[k]) tmp.extend(rmse[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'SMAPE'] tmp = [n, o, s, p, l, steps, method, 'SMAPE']
tmp.extend(smape[k]) tmp.extend(smape[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'U'] tmp = [n, o, s, p, l, steps, method, 'U']
tmp.extend(u[k]) tmp.extend(u[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'TIME'] tmp = [n, o, s, p, l, steps, method, 'TIME']
tmp.extend(times[k]) tmp.extend(times[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
except Exception as ex: except Exception as ex:
@ -401,7 +405,8 @@ def check_ignore_list(b, ignore):
return flag return flag
def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, synthetic, times, q05, q25, q75, q95): def save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, synthetic, times,
q05, q25, q75, q95, steps, method):
ret = [] ret = []
if synthetic: if synthetic:
for k in sorted(objs.keys()): for k in sorted(objs.keys()):
@ -409,14 +414,19 @@ def save_dataframe_interval(coverage, experiments, file, objs, resolution, save,
mfts = objs[k] mfts = objs[k]
mod.append(mfts.shortname) mod.append(mfts.shortname)
mod.append(mfts.order) mod.append(mfts.order)
l = len(mfts)
if not mfts.benchmark_only: if not mfts.benchmark_only:
mod.append(mfts.partitioner.name) mod.append(mfts.partitioner.name)
mod.append(mfts.partitioner.partitions) mod.append(mfts.partitioner.partitions)
l = len(mfts) mod.append(l)
mod.append(steps)
mod.append(method)
else: else:
mod.append('-') mod.append('-')
mod.append('-') mod.append('-')
l = '-' mod.append('-')
mod.append(steps)
mod.append(method)
mod.append(round(np.nanmean(sharpness[k]), 2)) mod.append(round(np.nanmean(sharpness[k]), 2))
mod.append(round(np.nanstd(sharpness[k]), 2)) mod.append(round(np.nanstd(sharpness[k]), 2))
mod.append(round(np.nanmean(resolution[k]), 2)) mod.append(round(np.nanmean(resolution[k]), 2))
@ -452,28 +462,28 @@ def save_dataframe_interval(coverage, experiments, file, objs, resolution, save,
p = '-' p = '-'
l = '-' l = '-'
tmp = [n, o, s, p, l, 'Sharpness'] tmp = [n, o, s, p, l, steps, method, 'Sharpness']
tmp.extend(sharpness[k]) tmp.extend(sharpness[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'Resolution'] tmp = [n, o, s, p, l, steps, method, 'Resolution']
tmp.extend(resolution[k]) tmp.extend(resolution[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'Coverage'] tmp = [n, o, s, p, l, steps, method, 'Coverage']
tmp.extend(coverage[k]) tmp.extend(coverage[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'TIME'] tmp = [n, o, s, p, l, steps, method, 'TIME']
tmp.extend(times[k]) tmp.extend(times[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'Q05'] tmp = [n, o, s, p, l, steps, method, 'Q05']
tmp.extend(q05[k]) tmp.extend(q05[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'Q25'] tmp = [n, o, s, p, l, steps, method, 'Q25']
tmp.extend(q25[k]) tmp.extend(q25[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'Q75'] tmp = [n, o, s, p, l, steps, method, 'Q75']
tmp.extend(q75[k]) tmp.extend(q75[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'Q95'] tmp = [n, o, s, p, l, steps, method, 'Q95']
tmp.extend(q95[k]) tmp.extend(q95[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
except Exception as ex: except Exception as ex:
@ -492,18 +502,19 @@ def interval_dataframe_analytic_columns(experiments):
columns.insert(2, "Scheme") columns.insert(2, "Scheme")
columns.insert(3, "Partitions") columns.insert(3, "Partitions")
columns.insert(4, "Size") columns.insert(4, "Size")
columns.insert(5, "Measure") columns.insert(5, "Steps")
columns.insert(6, "Method")
columns.insert(7, "Measure")
return columns return columns
def interval_dataframe_synthetic_columns(): def interval_dataframe_synthetic_columns():
columns = ["Model", "Order", "Scheme", "Partitions", "SHARPAVG", "SHARPSTD", "RESAVG", "RESSTD", "COVAVG", columns = ["Model", "Order", "Scheme", "Partitions","SIZE", "Steps","Method" "SHARPAVG", "SHARPSTD", "RESAVG", "RESSTD", "COVAVG",
"COVSTD", "TIMEAVG", "TIMESTD", "Q05AVG", "Q05STD", "Q25AVG", "Q25STD", "Q75AVG", "Q75STD", "Q95AVG", "Q95STD"] "COVSTD", "TIMEAVG", "TIMESTD", "Q05AVG", "Q05STD", "Q25AVG", "Q25STD", "Q75AVG", "Q75STD", "Q95AVG", "Q95STD"]
return columns return columns
def cast_dataframe_to_synthetic_interval(infile, outfile, experiments): def cast_dataframe_to_synthetic_interval(infile, outfile, experiments):
columns = interval_dataframe_analytic_columns(experiments) columns = interval_dataframe_analytic_columns(experiments)
dat = pd.read_csv(infile, sep=";", usecols=columns) dat = pd.read_csv(infile, sep=";", usecols=columns)
@ -863,7 +874,7 @@ def plot_dataframe_interval_pinball(file_synthetic, file_analytic, experiments,
Util.show_and_save_image(fig, file, save) Util.show_and_save_image(fig, file, save)
def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, synthetic): def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, synthetic, steps, method):
""" """
Save benchmark results for m-step ahead probabilistic forecasters Save benchmark results for m-step ahead probabilistic forecasters
:param experiments: :param experiments:
@ -893,11 +904,15 @@ def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, syn
if not mfts.benchmark_only: if not mfts.benchmark_only:
mod.append(mfts.partitioner.name) mod.append(mfts.partitioner.name)
mod.append(mfts.partitioner.partitions) mod.append(mfts.partitioner.partitions)
l = len(mfts) mod.append(len(mfts))
mod.append(steps)
mod.append(method)
else: else:
mod.append('-') mod.append('-')
mod.append('-') mod.append('-')
l = '-' mod.append('-')
mod.append(steps)
mod.append(method)
mod.append(np.round(np.nanmean(crps[k]), 2)) mod.append(np.round(np.nanmean(crps[k]), 2))
mod.append(np.round(np.nanstd(crps[k]), 2)) mod.append(np.round(np.nanstd(crps[k]), 2))
mod.append(l) mod.append(l)
@ -925,10 +940,10 @@ def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, syn
s = '-' s = '-'
p = '-' p = '-'
l = '-' l = '-'
tmp = [n, o, s, p, l, 'CRPS'] tmp = [n, o, s, p, l, steps, method, 'CRPS']
tmp.extend(crps[k]) tmp.extend(crps[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
tmp = [n, o, s, p, l, 'TIME'] tmp = [n, o, s, p, l, steps, method, 'TIME']
tmp.extend(times[k]) tmp.extend(times[k])
ret.append(deepcopy(tmp)) ret.append(deepcopy(tmp))
except Exception as ex: except Exception as ex:
@ -940,7 +955,6 @@ def save_dataframe_probabilistic(experiments, file, objs, crps, times, save, syn
return dat return dat
def probabilistic_dataframe_analytic_columns(experiments): def probabilistic_dataframe_analytic_columns(experiments):
columns = [str(k) for k in np.arange(0, experiments)] columns = [str(k) for k in np.arange(0, experiments)]
columns.insert(0, "Model") columns.insert(0, "Model")
@ -948,12 +962,14 @@ def probabilistic_dataframe_analytic_columns(experiments):
columns.insert(2, "Scheme") columns.insert(2, "Scheme")
columns.insert(3, "Partitions") columns.insert(3, "Partitions")
columns.insert(4, "Size") columns.insert(4, "Size")
columns.insert(5, "Measure") columns.insert(5, "Steps")
columns.insert(6, "Method")
columns.insert(7, "Measure")
return columns return columns
def probabilistic_dataframe_synthetic_columns(): def probabilistic_dataframe_synthetic_columns():
columns = ["Model", "Order", "Scheme", "Partitions", "CRPSAVG", "CRPSSTD", columns = ["Model", "Order", "Scheme", "Partitions","Size", "Steps", "Method", "CRPSAVG", "CRPSSTD",
"TIMEAVG", "TIMESTD"] "TIMEAVG", "TIMESTD"]
return columns return columns

View File

@ -293,6 +293,9 @@ def run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwarg
transformation = kwargs.get('transformation', None) transformation = kwargs.get('transformation', None)
indexer = kwargs.get('indexer', None) indexer = kwargs.get('indexer', None)
steps_ahead = kwargs.get('steps_ahead', 1)
method = kwargs.get('method', None)
if mfts.benchmark_only: if mfts.benchmark_only:
_key = mfts.shortname + str(mfts.order if mfts.order is not None else "") _key = mfts.shortname + str(mfts.order if mfts.order is not None else "")
else: else:
@ -313,7 +316,8 @@ def run_point(mfts, partitioner, train_data, test_data, window_key=None, **kwarg
_end = time.time() _end = time.time()
times += _end - _start times += _end - _start
ret = {'key': _key, 'obj': mfts, 'rmse': _rmse, 'smape': _smape, 'u': _u, 'time': times, 'window': window_key} ret = {'key': _key, 'obj': mfts, 'rmse': _rmse, 'smape': _smape, 'u': _u, 'time': times, 'window': window_key,
'steps': steps_ahead, 'method': method}
return ret return ret
@ -346,6 +350,9 @@ def run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kw
transformation = kwargs.get('transformation', None) transformation = kwargs.get('transformation', None)
indexer = kwargs.get('indexer', None) indexer = kwargs.get('indexer', None)
steps_ahead = kwargs.get('steps_ahead', 1)
method = kwargs.get('method', None)
if mfts.benchmark_only: if mfts.benchmark_only:
_key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha) _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha)
else: else:
@ -367,7 +374,8 @@ def run_interval(mfts, partitioner, train_data, test_data, window_key=None, **kw
times += _end - _start times += _end - _start
ret = {'key': _key, 'obj': mfts, 'sharpness': _sharp, 'resolution': _res, 'coverage': _cov, 'time': times, ret = {'key': _key, 'obj': mfts, 'sharpness': _sharp, 'resolution': _res, 'coverage': _cov, 'time': times,
'Q05': _q05, 'Q25': _q25, 'Q75': _q75, 'Q95': _q95, 'window': window_key} 'Q05': _q05, 'Q25': _q25, 'Q75': _q75, 'Q95': _q95, 'window': window_key,
'steps': steps_ahead, 'method': method}
return ret return ret
@ -403,6 +411,9 @@ def run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None,
transformation = kwargs.get('transformation', None) transformation = kwargs.get('transformation', None)
indexer = kwargs.get('indexer', None) indexer = kwargs.get('indexer', None)
steps_ahead = kwargs.get('steps_ahead', 1)
method = kwargs.get('method', None)
if mfts.benchmark_only: if mfts.benchmark_only:
_key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha) _key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha)
else: else:
@ -429,7 +440,8 @@ def run_probabilistic(mfts, partitioner, train_data, test_data, window_key=None,
_crps1 = np.nan _crps1 = np.nan
_t1 = np.nan _t1 = np.nan
ret = {'key': _key, 'obj': mfts, 'CRPS': _crps1, 'time': _t1, 'window': window_key} ret = {'key': _key, 'obj': mfts, 'CRPS': _crps1, 'time': _t1, 'window': window_key,
'steps': steps_ahead, 'method': method}
return ret return ret
@ -466,6 +478,8 @@ def process_point_jobs(jobs, experiments, save=False, file=None, sintetic=False)
smape = {} smape = {}
u = {} u = {}
times = {} times = {}
steps = None
method = None
for job in jobs: for job in jobs:
_key = job['key'] _key = job['key']
@ -475,6 +489,8 @@ def process_point_jobs(jobs, experiments, save=False, file=None, sintetic=False)
smape[_key] = [] smape[_key] = []
u[_key] = [] u[_key] = []
times[_key] = [] times[_key] = []
steps[_key] = job['steps']
method[_key] = job['method']
rmse[_key].append(job['rmse']) rmse[_key].append(job['rmse'])
smape[_key].append(job['smape']) smape[_key].append(job['smape'])
u[_key].append(job['u']) u[_key].append(job['u'])

View File

@ -89,17 +89,17 @@ class FTS(object):
steps_ahead = kwargs.get("steps_ahead", None) steps_ahead = kwargs.get("steps_ahead", None)
if type == 'point' and steps_ahead == None: if type == 'point' and (steps_ahead == None or steps_ahead == 1):
ret = self.forecast(ndata, **kwargs) ret = self.forecast(ndata, **kwargs)
elif type == 'point' and steps_ahead != None: elif type == 'point' and steps_ahead > 1:
ret = self.forecast_ahead(ndata, steps_ahead, **kwargs) ret = self.forecast_ahead(ndata, steps_ahead, **kwargs)
elif type == 'interval' and steps_ahead == None: elif type == 'interval' and (steps_ahead == None or steps_ahead == 1):
ret = self.forecast_interval(ndata, **kwargs) ret = self.forecast_interval(ndata, **kwargs)
elif type == 'interval' and steps_ahead != None: elif type == 'interval' and steps_ahead > 1:
ret = self.forecast_ahead_interval(ndata, steps_ahead, **kwargs) ret = self.forecast_ahead_interval(ndata, steps_ahead, **kwargs)
elif type == 'distribution' and steps_ahead == None: elif type == 'distribution' and (steps_ahead == None or steps_ahead == 1):
ret = self.forecast_distribution(ndata, **kwargs) ret = self.forecast_distribution(ndata, **kwargs)
elif type == 'distribution' and steps_ahead != None: elif type == 'distribution' and steps_ahead > 1:
ret = self.forecast_ahead_distribution(ndata, steps_ahead, **kwargs) ret = self.forecast_ahead_distribution(ndata, steps_ahead, **kwargs)
else: else:
raise ValueError('The argument \'type\' has an unknown value.') raise ValueError('The argument \'type\' has an unknown value.')