2017-05-05 22:01:45 +04:00
|
|
|
"""
|
|
|
|
dispy Distributed Benchmarks to FTS methods
|
|
|
|
|
|
|
|
To enable a dispy cluster node:
|
|
|
|
|
|
|
|
python3 /usr/local/bin/dispynode.py -i [local IP] -d
|
|
|
|
"""
|
|
|
|
|
2017-05-08 20:12:08 +04:00
|
|
|
import datetime
|
|
|
|
import time
|
2017-05-08 22:20:16 +04:00
|
|
|
from copy import deepcopy
|
2017-05-08 20:12:08 +04:00
|
|
|
|
2017-04-06 06:45:11 +04:00
|
|
|
import dispy
|
|
|
|
import dispy.httpd
|
|
|
|
import numpy as np
|
2017-05-08 20:12:08 +04:00
|
|
|
|
2017-05-08 21:49:45 +04:00
|
|
|
from pyFTS.benchmarks import benchmarks, Util as bUtil, naive, quantreg, arima
|
2017-05-08 20:12:08 +04:00
|
|
|
from pyFTS.common import Util
|
|
|
|
from pyFTS.partitioners import Grid
|
2017-04-06 06:45:11 +04:00
|
|
|
|
|
|
|
|
|
|
|
def run_point(mfts, partitioner, train_data, test_data, window_key=None, transformation=None, indexer=None):
|
2017-05-05 22:33:27 +04:00
|
|
|
"""
|
|
|
|
Point forecast benchmark function to be executed on cluster nodes
|
|
|
|
:param mfts: FTS model
|
|
|
|
:param partitioner: Universe of Discourse partitioner
|
|
|
|
:param train_data: data used to train the model
|
|
|
|
:param test_data: ata used to test the model
|
|
|
|
:param window_key: id of the sliding window
|
|
|
|
:param transformation: data transformation
|
|
|
|
:param indexer: seasonal indexer
|
|
|
|
:return: a dictionary with the benchmark results
|
|
|
|
"""
|
2017-04-06 06:45:11 +04:00
|
|
|
import time
|
2017-05-08 21:49:45 +04:00
|
|
|
from pyFTS import yu,chen,hofts,ifts,pwfts,ismailefendi,sadaei, song, cheng, hwang
|
2017-04-06 06:45:11 +04:00
|
|
|
from pyFTS.partitioners import Grid, Entropy, FCM
|
2017-05-08 21:49:45 +04:00
|
|
|
from pyFTS.benchmarks import Measures, naive, arima, quantreg
|
2017-05-09 17:27:47 +04:00
|
|
|
from pyFTS.common import Transformations
|
2017-04-06 06:45:11 +04:00
|
|
|
|
2017-05-08 21:49:45 +04:00
|
|
|
tmp = [song.ConventionalFTS, chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,
|
2017-05-09 18:08:39 +04:00
|
|
|
cheng.TrendWeightedFTS, sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, hwang.HighOrderFTS,
|
|
|
|
pwfts.ProbabilisticWeightedFTS]
|
2017-04-06 06:45:11 +04:00
|
|
|
|
|
|
|
tmp2 = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner]
|
|
|
|
|
2017-05-08 21:49:45 +04:00
|
|
|
tmp4 = [naive.Naive, arima.ARIMA, quantreg.QuantileRegression]
|
|
|
|
|
2017-04-06 06:45:11 +04:00
|
|
|
tmp3 = [Measures.get_point_statistics]
|
|
|
|
|
2017-05-09 17:27:47 +04:00
|
|
|
tmp5 = [Transformations.Differential]
|
|
|
|
|
2017-05-08 21:49:45 +04:00
|
|
|
if mfts.benchmark_only:
|
|
|
|
_key = mfts.shortname + str(mfts.order if mfts.order is not None else "")
|
|
|
|
else:
|
|
|
|
pttr = str(partitioner.__module__).split('.')[-1]
|
|
|
|
_key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions)
|
|
|
|
mfts.partitioner = partitioner
|
2017-05-14 04:37:10 +04:00
|
|
|
|
|
|
|
if transformation is not None:
|
|
|
|
mfts.appendTransformation(transformation)
|
2017-04-06 06:45:11 +04:00
|
|
|
|
|
|
|
_start = time.time()
|
|
|
|
mfts.train(train_data, partitioner.sets, order=mfts.order)
|
|
|
|
_end = time.time()
|
|
|
|
times = _end - _start
|
|
|
|
|
|
|
|
_start = time.time()
|
|
|
|
_rmse, _smape, _u = Measures.get_point_statistics(test_data, mfts, indexer)
|
|
|
|
_end = time.time()
|
|
|
|
times += _end - _start
|
|
|
|
|
|
|
|
ret = {'key': _key, 'obj': mfts, 'rmse': _rmse, 'smape': _smape, 'u': _u, 'time': times, 'window': window_key}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
2017-05-14 04:03:49 +04:00
|
|
|
def point_sliding_window(data, windowsize, train=0.8, inc=0.1, models=None, partitioners=[Grid.GridPartitioner],
|
2017-04-06 06:45:11 +04:00
|
|
|
partitions=[10], max_order=3, transformation=None, indexer=None, dump=False,
|
2017-05-08 21:49:45 +04:00
|
|
|
benchmark_models=None, benchmark_models_parameters = None,
|
2017-04-06 06:45:11 +04:00
|
|
|
save=False, file=None, sintetic=False,nodes=None, depends=None):
|
2017-05-05 22:33:27 +04:00
|
|
|
"""
|
|
|
|
Distributed sliding window benchmarks for FTS point forecasters
|
|
|
|
:param data:
|
|
|
|
:param windowsize: size of sliding window
|
|
|
|
:param train: percentual of sliding window data used to train the models
|
2017-05-14 04:03:49 +04:00
|
|
|
:param inc: percentual of window is used do increment
|
2017-05-05 22:33:27 +04:00
|
|
|
:param models: FTS point forecasters
|
|
|
|
:param partitioners: Universe of Discourse partitioner
|
|
|
|
:param partitions: the max number of partitions on the Universe of Discourse
|
|
|
|
:param max_order: the max order of the models (for high order models)
|
|
|
|
:param transformation: data transformation
|
|
|
|
:param indexer: seasonal indexer
|
|
|
|
:param dump:
|
2017-05-08 21:49:45 +04:00
|
|
|
:param benchmark_models: Non FTS models to benchmark
|
|
|
|
:param benchmark_models_parameters: Non FTS models parameters
|
2017-05-05 22:33:27 +04:00
|
|
|
:param save: save results
|
|
|
|
:param file: file path to save the results
|
|
|
|
:param sintetic: if true only the average and standard deviation of the results
|
|
|
|
:param nodes: list of cluster nodes to distribute tasks
|
|
|
|
:param depends: list of module dependencies
|
|
|
|
:return: DataFrame with the results
|
|
|
|
"""
|
2017-04-06 06:45:11 +04:00
|
|
|
|
|
|
|
cluster = dispy.JobCluster(run_point, nodes=nodes) #, depends=dependencies)
|
|
|
|
|
|
|
|
http_server = dispy.httpd.DispyHTTPServer(cluster)
|
|
|
|
|
|
|
|
_process_start = time.time()
|
|
|
|
|
|
|
|
print("Process Start: {0: %H:%M:%S}".format(datetime.datetime.now()))
|
|
|
|
|
2017-05-24 07:31:05 +04:00
|
|
|
|
2017-04-06 06:45:11 +04:00
|
|
|
jobs = []
|
|
|
|
objs = {}
|
|
|
|
rmse = {}
|
|
|
|
smape = {}
|
|
|
|
u = {}
|
|
|
|
times = {}
|
|
|
|
|
2017-05-24 07:31:05 +04:00
|
|
|
pool = build_model_pool_point(models, max_order, benchmark_models, benchmark_models_parameters)
|
2017-04-06 06:45:11 +04:00
|
|
|
|
|
|
|
experiments = 0
|
2017-05-14 04:03:49 +04:00
|
|
|
for ct, train, test in Util.sliding_window(data, windowsize, train, inc):
|
2017-04-06 06:45:11 +04:00
|
|
|
experiments += 1
|
|
|
|
|
2017-05-08 21:49:45 +04:00
|
|
|
benchmarks_only = {}
|
|
|
|
|
2017-04-06 06:45:11 +04:00
|
|
|
if dump: print('\nWindow: {0}\n'.format(ct))
|
|
|
|
|
|
|
|
for partition in partitions:
|
|
|
|
|
|
|
|
for partitioner in partitioners:
|
|
|
|
|
|
|
|
data_train_fs = partitioner(train, partition, transformation=transformation)
|
|
|
|
|
2017-05-08 21:49:45 +04:00
|
|
|
for _id, m in enumerate(pool,start=0):
|
|
|
|
if m.benchmark_only and m.shortname in benchmarks_only:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
benchmarks_only[m.shortname] = m
|
2017-04-06 06:45:11 +04:00
|
|
|
job = cluster.submit(m, data_train_fs, train, test, ct, transformation)
|
2017-05-08 21:49:45 +04:00
|
|
|
job.id = _id # associate an ID to identify jobs (if needed later)
|
2017-04-06 06:45:11 +04:00
|
|
|
jobs.append(job)
|
|
|
|
|
|
|
|
for job in jobs:
|
|
|
|
tmp = job()
|
|
|
|
if job.status == dispy.DispyJob.Finished and tmp is not None:
|
|
|
|
if tmp['key'] not in objs:
|
|
|
|
objs[tmp['key']] = tmp['obj']
|
|
|
|
rmse[tmp['key']] = []
|
|
|
|
smape[tmp['key']] = []
|
|
|
|
u[tmp['key']] = []
|
|
|
|
times[tmp['key']] = []
|
|
|
|
rmse[tmp['key']].append(tmp['rmse'])
|
|
|
|
smape[tmp['key']].append(tmp['smape'])
|
|
|
|
u[tmp['key']].append(tmp['u'])
|
|
|
|
times[tmp['key']].append(tmp['time'])
|
|
|
|
print(tmp['key'], tmp['window'])
|
|
|
|
else:
|
|
|
|
print(job.exception)
|
|
|
|
print(job.stdout)
|
|
|
|
|
|
|
|
_process_end = time.time()
|
|
|
|
|
|
|
|
print("Process End: {0: %H:%M:%S}".format(datetime.datetime.now()))
|
|
|
|
|
|
|
|
print("Process Duration: {0}".format(_process_end - _process_start))
|
|
|
|
|
|
|
|
cluster.wait() # wait for all jobs to finish
|
|
|
|
|
|
|
|
cluster.print_status()
|
|
|
|
|
|
|
|
http_server.shutdown() # this waits until browser gets all updates
|
|
|
|
cluster.close()
|
|
|
|
|
|
|
|
return bUtil.save_dataframe_point(experiments, file, objs, rmse, save, sintetic, smape, times, u)
|
|
|
|
|
|
|
|
|
2017-05-24 07:31:05 +04:00
|
|
|
def build_model_pool_point(models, max_order, benchmark_models, benchmark_models_parameters):
|
|
|
|
pool = []
|
|
|
|
|
|
|
|
if benchmark_models is None and models is None:
|
|
|
|
benchmark_models = [arima.ARIMA, arima.ARIMA, arima.ARIMA, arima.ARIMA,
|
|
|
|
quantreg.QuantileRegression, quantreg.QuantileRegression]
|
|
|
|
|
|
|
|
if benchmark_models_parameters is None:
|
|
|
|
benchmark_models_parameters = [(1, 0, 0), (1, 0, 1), (2, 0, 1), (2, 0, 2), 1, 2]
|
|
|
|
|
|
|
|
if models is None:
|
|
|
|
models = benchmarks.get_point_methods()
|
|
|
|
for model in models:
|
|
|
|
mfts = model("")
|
|
|
|
|
|
|
|
if mfts.is_high_order:
|
|
|
|
for order in np.arange(1, max_order + 1):
|
|
|
|
if order >= mfts.min_order:
|
|
|
|
mfts = model("")
|
|
|
|
mfts.order = order
|
|
|
|
pool.append(mfts)
|
|
|
|
else:
|
|
|
|
mfts.order = 1
|
|
|
|
pool.append(mfts)
|
|
|
|
|
|
|
|
if benchmark_models is not None:
|
|
|
|
for count, model in enumerate(benchmark_models, start=0):
|
|
|
|
par = benchmark_models_parameters[count]
|
|
|
|
mfts = model(str(par if par is not None else ""))
|
|
|
|
mfts.order = par
|
|
|
|
pool.append(mfts)
|
|
|
|
return pool
|
|
|
|
|
|
|
|
|
2017-05-07 04:19:04 +04:00
|
|
|
def run_interval(mfts, partitioner, train_data, test_data, window_key=None, transformation=None, indexer=None):
|
2017-05-05 22:33:27 +04:00
|
|
|
"""
|
|
|
|
Interval forecast benchmark function to be executed on cluster nodes
|
|
|
|
:param mfts: FTS model
|
|
|
|
:param partitioner: Universe of Discourse partitioner
|
|
|
|
:param train_data: data used to train the model
|
|
|
|
:param test_data: ata used to test the model
|
|
|
|
:param window_key: id of the sliding window
|
|
|
|
:param transformation: data transformation
|
|
|
|
:param indexer: seasonal indexer
|
|
|
|
:return: a dictionary with the benchmark results
|
|
|
|
"""
|
2017-04-07 02:23:29 +04:00
|
|
|
import time
|
|
|
|
from pyFTS import hofts,ifts,pwfts
|
|
|
|
from pyFTS.partitioners import Grid, Entropy, FCM
|
2017-05-14 05:32:40 +04:00
|
|
|
from pyFTS.benchmarks import Measures, arima, quantreg
|
2017-04-07 02:23:29 +04:00
|
|
|
|
|
|
|
tmp = [hofts.HighOrderFTS, ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS]
|
|
|
|
|
|
|
|
tmp2 = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner]
|
2017-04-06 06:45:11 +04:00
|
|
|
|
2017-05-14 04:03:49 +04:00
|
|
|
tmp4 = [arima.ARIMA, quantreg.QuantileRegression]
|
|
|
|
|
2017-04-07 02:23:29 +04:00
|
|
|
tmp3 = [Measures.get_interval_statistics]
|
2017-04-06 06:45:11 +04:00
|
|
|
|
2017-05-14 04:03:49 +04:00
|
|
|
if mfts.benchmark_only:
|
|
|
|
_key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha)
|
|
|
|
else:
|
|
|
|
pttr = str(partitioner.__module__).split('.')[-1]
|
|
|
|
_key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions)
|
|
|
|
mfts.partitioner = partitioner
|
2017-05-14 04:37:10 +04:00
|
|
|
|
|
|
|
if transformation is not None:
|
|
|
|
mfts.appendTransformation(transformation)
|
2017-04-07 02:23:29 +04:00
|
|
|
|
|
|
|
_start = time.time()
|
|
|
|
mfts.train(train_data, partitioner.sets, order=mfts.order)
|
|
|
|
_end = time.time()
|
|
|
|
times = _end - _start
|
|
|
|
|
|
|
|
_start = time.time()
|
2017-05-14 04:03:49 +04:00
|
|
|
_sharp, _res, _cov, _q05, _q25, _q75, _q95 = Measures.get_interval_statistics(test_data, mfts)
|
2017-04-07 02:23:29 +04:00
|
|
|
_end = time.time()
|
|
|
|
times += _end - _start
|
|
|
|
|
2017-05-07 04:19:04 +04:00
|
|
|
ret = {'key': _key, 'obj': mfts, 'sharpness': _sharp, 'resolution': _res, 'coverage': _cov, 'time': times,
|
2017-05-14 04:03:49 +04:00
|
|
|
'Q05': _q05, 'Q25': _q25, 'Q75': _q75, 'Q95': _q95, 'window': window_key}
|
2017-04-07 02:23:29 +04:00
|
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
2017-05-14 04:03:49 +04:00
|
|
|
def interval_sliding_window(data, windowsize, train=0.8, inc=0.1, models=None, partitioners=[Grid.GridPartitioner],
|
|
|
|
partitions=[10], max_order=3, transformation=None, indexer=None, dump=False,
|
|
|
|
benchmark_models=None, benchmark_models_parameters = None,
|
|
|
|
save=False, file=None, sintetic=False,nodes=None, depends=None):
|
2017-05-05 22:33:27 +04:00
|
|
|
"""
|
2017-05-15 21:06:26 +04:00
|
|
|
Distributed sliding window benchmarks for FTS point_to_interval forecasters
|
2017-05-05 22:33:27 +04:00
|
|
|
:param data:
|
|
|
|
:param windowsize: size of sliding window
|
|
|
|
:param train: percentual of sliding window data used to train the models
|
2017-05-14 04:03:49 +04:00
|
|
|
:param inc:
|
2017-05-05 22:33:27 +04:00
|
|
|
:param models: FTS point forecasters
|
|
|
|
:param partitioners: Universe of Discourse partitioner
|
|
|
|
:param partitions: the max number of partitions on the Universe of Discourse
|
|
|
|
:param max_order: the max order of the models (for high order models)
|
|
|
|
:param transformation: data transformation
|
|
|
|
:param indexer: seasonal indexer
|
|
|
|
:param dump:
|
2017-05-14 04:03:49 +04:00
|
|
|
:param benchmark_models:
|
|
|
|
:param benchmark_models_parameters:
|
2017-05-05 22:33:27 +04:00
|
|
|
:param save: save results
|
|
|
|
:param file: file path to save the results
|
|
|
|
:param sintetic: if true only the average and standard deviation of the results
|
|
|
|
:param nodes: list of cluster nodes to distribute tasks
|
|
|
|
:param depends: list of module dependencies
|
|
|
|
:return: DataFrame with the results
|
|
|
|
"""
|
2017-04-07 02:23:29 +04:00
|
|
|
|
2017-05-14 15:54:41 +04:00
|
|
|
alphas = [0.05, 0.25]
|
2017-05-14 04:03:49 +04:00
|
|
|
|
|
|
|
if benchmark_models is None and models is None:
|
|
|
|
benchmark_models = [arima.ARIMA, arima.ARIMA, arima.ARIMA, arima.ARIMA,
|
|
|
|
quantreg.QuantileRegression, quantreg.QuantileRegression]
|
|
|
|
|
|
|
|
if benchmark_models_parameters is None:
|
|
|
|
benchmark_models_parameters = [(1, 0, 0), (1, 0, 1), (2, 0, 1), (2, 0, 2), 1, 2]
|
|
|
|
|
2017-05-14 05:32:40 +04:00
|
|
|
cluster = dispy.JobCluster(run_interval, nodes=nodes) #, depends=dependencies)
|
2017-04-07 02:23:29 +04:00
|
|
|
|
|
|
|
http_server = dispy.httpd.DispyHTTPServer(cluster)
|
|
|
|
|
|
|
|
_process_start = time.time()
|
|
|
|
|
|
|
|
print("Process Start: {0: %H:%M:%S}".format(datetime.datetime.now()))
|
|
|
|
|
|
|
|
pool = []
|
2017-04-06 06:45:11 +04:00
|
|
|
jobs = []
|
2017-04-07 02:23:29 +04:00
|
|
|
objs = {}
|
|
|
|
sharpness = {}
|
|
|
|
resolution = {}
|
|
|
|
coverage = {}
|
2017-05-14 04:03:49 +04:00
|
|
|
q05 = {}
|
|
|
|
q25 = {}
|
|
|
|
q75 = {}
|
|
|
|
q95 = {}
|
2017-04-07 02:23:29 +04:00
|
|
|
times = {}
|
|
|
|
|
|
|
|
if models is None:
|
|
|
|
models = benchmarks.get_interval_methods()
|
|
|
|
|
|
|
|
for model in models:
|
|
|
|
mfts = model("")
|
|
|
|
|
2017-05-03 00:16:49 +04:00
|
|
|
if mfts.is_high_order:
|
2017-04-07 02:23:29 +04:00
|
|
|
for order in np.arange(1, max_order + 1):
|
2017-05-03 00:16:49 +04:00
|
|
|
if order >= mfts.min_order:
|
2017-04-07 02:23:29 +04:00
|
|
|
mfts = model("")
|
|
|
|
mfts.order = order
|
|
|
|
pool.append(mfts)
|
|
|
|
else:
|
2017-05-14 04:03:49 +04:00
|
|
|
mfts.order = 1
|
2017-04-07 02:23:29 +04:00
|
|
|
pool.append(mfts)
|
|
|
|
|
2017-05-14 04:03:49 +04:00
|
|
|
if benchmark_models is not None:
|
|
|
|
for count, model in enumerate(benchmark_models, start=0):
|
|
|
|
for a in alphas:
|
|
|
|
par = benchmark_models_parameters[count]
|
|
|
|
mfts = model(str(par if par is not None else ""), alpha=a)
|
|
|
|
mfts.order = par
|
|
|
|
pool.append(mfts)
|
|
|
|
|
2017-04-07 02:23:29 +04:00
|
|
|
experiments = 0
|
2017-05-14 04:03:49 +04:00
|
|
|
for ct, train, test in Util.sliding_window(data, windowsize, train, inc=inc):
|
2017-04-07 02:23:29 +04:00
|
|
|
experiments += 1
|
|
|
|
|
2017-05-14 04:03:49 +04:00
|
|
|
benchmarks_only = {}
|
|
|
|
|
2017-04-07 02:23:29 +04:00
|
|
|
if dump: print('\nWindow: {0}\n'.format(ct))
|
|
|
|
|
|
|
|
for partition in partitions:
|
|
|
|
|
|
|
|
for partitioner in partitioners:
|
|
|
|
|
|
|
|
data_train_fs = partitioner(train, partition, transformation=transformation)
|
|
|
|
|
|
|
|
for id, m in enumerate(pool,start=0):
|
2017-05-14 04:03:49 +04:00
|
|
|
if m.benchmark_only and m.shortname in benchmarks_only:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
benchmarks_only[m.shortname] = m
|
2017-04-07 02:23:29 +04:00
|
|
|
job = cluster.submit(m, data_train_fs, train, test, ct, transformation)
|
|
|
|
job.id = id # associate an ID to identify jobs (if needed later)
|
|
|
|
jobs.append(job)
|
2017-04-06 06:45:11 +04:00
|
|
|
|
|
|
|
for job in jobs:
|
2017-04-07 02:23:29 +04:00
|
|
|
tmp = job()
|
|
|
|
if job.status == dispy.DispyJob.Finished and tmp is not None:
|
|
|
|
if tmp['key'] not in objs:
|
|
|
|
objs[tmp['key']] = tmp['obj']
|
|
|
|
sharpness[tmp['key']] = []
|
|
|
|
resolution[tmp['key']] = []
|
|
|
|
coverage[tmp['key']] = []
|
|
|
|
times[tmp['key']] = []
|
2017-05-14 04:03:49 +04:00
|
|
|
q05[tmp['key']] = []
|
|
|
|
q25[tmp['key']] = []
|
|
|
|
q75[tmp['key']] = []
|
|
|
|
q95[tmp['key']] = []
|
2017-04-06 06:45:11 +04:00
|
|
|
|
2017-04-07 02:23:29 +04:00
|
|
|
sharpness[tmp['key']].append(tmp['sharpness'])
|
|
|
|
resolution[tmp['key']].append(tmp['resolution'])
|
|
|
|
coverage[tmp['key']].append(tmp['coverage'])
|
|
|
|
times[tmp['key']].append(tmp['time'])
|
2017-05-14 04:03:49 +04:00
|
|
|
q05[tmp['key']].append(tmp['Q05'])
|
|
|
|
q25[tmp['key']].append(tmp['Q25'])
|
|
|
|
q75[tmp['key']].append(tmp['Q75'])
|
|
|
|
q95[tmp['key']].append(tmp['Q95'])
|
2017-04-07 02:23:29 +04:00
|
|
|
print(tmp['key'])
|
|
|
|
else:
|
|
|
|
print(job.exception)
|
|
|
|
print(job.stdout)
|
2017-04-06 06:45:11 +04:00
|
|
|
|
2017-04-07 02:23:29 +04:00
|
|
|
_process_end = time.time()
|
|
|
|
|
|
|
|
print("Process End: {0: %H:%M:%S}".format(datetime.datetime.now()))
|
|
|
|
|
|
|
|
print("Process Duration: {0}".format(_process_end - _process_start))
|
|
|
|
|
|
|
|
cluster.wait() # wait for all jobs to finish
|
|
|
|
|
|
|
|
cluster.print_status()
|
|
|
|
|
|
|
|
http_server.shutdown() # this waits until browser gets all updates
|
|
|
|
cluster.close()
|
2017-04-06 06:45:11 +04:00
|
|
|
|
2017-05-14 04:03:49 +04:00
|
|
|
return bUtil.save_dataframe_interval(coverage, experiments, file, objs, resolution, save, sharpness, sintetic,
|
|
|
|
times, q05, q25, q75, q95)
|
2017-05-07 04:19:04 +04:00
|
|
|
|
|
|
|
|
|
|
|
def run_ahead(mfts, partitioner, train_data, test_data, steps, resolution, window_key=None, transformation=None, indexer=None):
|
|
|
|
"""
|
|
|
|
Probabilistic m-step ahead forecast benchmark function to be executed on cluster nodes
|
|
|
|
:param mfts: FTS model
|
|
|
|
:param partitioner: Universe of Discourse partitioner
|
|
|
|
:param train_data: data used to train the model
|
|
|
|
:param test_data: ata used to test the model
|
|
|
|
:param steps:
|
|
|
|
:param resolution:
|
|
|
|
:param window_key: id of the sliding window
|
|
|
|
:param transformation: data transformation
|
|
|
|
:param indexer: seasonal indexer
|
|
|
|
:return: a dictionary with the benchmark results
|
|
|
|
"""
|
|
|
|
import time
|
2017-05-20 20:43:39 +04:00
|
|
|
import numpy as np
|
|
|
|
from pyFTS import hofts, ifts, pwfts, ensemble
|
2017-05-07 04:19:04 +04:00
|
|
|
from pyFTS.partitioners import Grid, Entropy, FCM
|
|
|
|
from pyFTS.benchmarks import Measures, arima, quantreg
|
2017-05-22 01:06:16 +04:00
|
|
|
from pyFTS.models.seasonal import SeasonalIndexer
|
2017-05-07 04:19:04 +04:00
|
|
|
|
2017-05-20 20:43:39 +04:00
|
|
|
tmp = [hofts.HighOrderFTS, ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS, arima.ARIMA, ensemble.AllMethodEnsembleFTS]
|
2017-05-07 04:19:04 +04:00
|
|
|
|
|
|
|
tmp2 = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitioner]
|
|
|
|
|
2017-05-22 01:06:16 +04:00
|
|
|
tmp3 = [Measures.get_distribution_statistics, SeasonalIndexer.SeasonalIndexer, SeasonalIndexer.LinearSeasonalIndexer]
|
2017-05-07 04:19:04 +04:00
|
|
|
|
2017-05-20 20:43:39 +04:00
|
|
|
if mfts.benchmark_only:
|
|
|
|
_key = mfts.shortname + str(mfts.order if mfts.order is not None else "") + str(mfts.alpha)
|
|
|
|
else:
|
|
|
|
pttr = str(partitioner.__module__).split('.')[-1]
|
|
|
|
_key = mfts.shortname + " n = " + str(mfts.order) + " " + pttr + " q = " + str(partitioner.partitions)
|
|
|
|
mfts.partitioner = partitioner
|
|
|
|
|
2017-05-07 04:19:04 +04:00
|
|
|
if transformation is not None:
|
|
|
|
mfts.appendTransformation(transformation)
|
|
|
|
|
2017-05-22 01:04:10 +04:00
|
|
|
if mfts.has_seasonality:
|
|
|
|
mfts.indexer = indexer
|
|
|
|
|
2017-05-07 04:19:04 +04:00
|
|
|
try:
|
|
|
|
_start = time.time()
|
|
|
|
mfts.train(train_data, partitioner.sets, order=mfts.order)
|
|
|
|
_end = time.time()
|
|
|
|
times = _end - _start
|
|
|
|
|
|
|
|
_crps1, _crps2, _t1, _t2 = Measures.get_distribution_statistics(test_data, mfts, steps=steps,
|
|
|
|
resolution=resolution)
|
|
|
|
_t1 += times
|
|
|
|
_t2 += times
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
|
|
|
_crps1 = np.nan
|
|
|
|
_crps2 = np.nan
|
|
|
|
_t1 = np.nan
|
|
|
|
_t2 = np.nan
|
|
|
|
|
|
|
|
ret = {'key': _key, 'obj': mfts, 'CRPS_Interval': _crps1, 'CRPS_Distribution': _crps2, 'TIME_Interval': _t1,
|
|
|
|
'TIME_Distribution': _t2, 'window': window_key}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
2017-05-20 20:43:39 +04:00
|
|
|
def ahead_sliding_window(data, windowsize, steps, resolution, train=0.8, inc=0.1, models=None, partitioners=[Grid.GridPartitioner],
|
2017-05-07 04:19:04 +04:00
|
|
|
partitions=[10], max_order=3, transformation=None, indexer=None, dump=False,
|
2017-05-20 20:43:39 +04:00
|
|
|
benchmark_models=None, benchmark_models_parameters = None,
|
|
|
|
save=False, file=None, synthetic=False, nodes=None):
|
2017-05-07 04:19:04 +04:00
|
|
|
"""
|
|
|
|
Distributed sliding window benchmarks for FTS probabilistic forecasters
|
|
|
|
:param data:
|
|
|
|
:param windowsize: size of sliding window
|
|
|
|
:param train: percentual of sliding window data used to train the models
|
|
|
|
:param steps:
|
|
|
|
:param resolution:
|
|
|
|
:param models: FTS point forecasters
|
|
|
|
:param partitioners: Universe of Discourse partitioner
|
|
|
|
:param partitions: the max number of partitions on the Universe of Discourse
|
|
|
|
:param max_order: the max order of the models (for high order models)
|
|
|
|
:param transformation: data transformation
|
|
|
|
:param indexer: seasonal indexer
|
|
|
|
:param dump:
|
|
|
|
:param save: save results
|
|
|
|
:param file: file path to save the results
|
2017-05-20 20:43:39 +04:00
|
|
|
:param synthetic: if true only the average and standard deviation of the results
|
2017-05-07 04:19:04 +04:00
|
|
|
:param nodes: list of cluster nodes to distribute tasks
|
|
|
|
:param depends: list of module dependencies
|
|
|
|
:return: DataFrame with the results
|
|
|
|
"""
|
2017-05-20 20:43:39 +04:00
|
|
|
|
|
|
|
alphas = [0.05, 0.25]
|
|
|
|
|
|
|
|
if benchmark_models is None and models is None:
|
|
|
|
benchmark_models = [arima.ARIMA, arima.ARIMA, arima.ARIMA, arima.ARIMA, arima.ARIMA]
|
|
|
|
|
|
|
|
if benchmark_models_parameters is None:
|
|
|
|
benchmark_models_parameters = [(1, 0, 0), (1, 0, 1), (2, 0, 0), (2, 0, 1), (2, 0, 2)]
|
|
|
|
|
|
|
|
cluster = dispy.JobCluster(run_ahead, nodes=nodes) # , depends=dependencies)
|
2017-05-07 04:19:04 +04:00
|
|
|
|
|
|
|
http_server = dispy.httpd.DispyHTTPServer(cluster)
|
|
|
|
|
|
|
|
_process_start = time.time()
|
|
|
|
|
|
|
|
print("Process Start: {0: %H:%M:%S}".format(datetime.datetime.now()))
|
|
|
|
|
|
|
|
pool = []
|
|
|
|
jobs = []
|
|
|
|
objs = {}
|
|
|
|
crps_interval = {}
|
|
|
|
crps_distr = {}
|
|
|
|
times1 = {}
|
|
|
|
times2 = {}
|
|
|
|
|
|
|
|
if models is None:
|
|
|
|
models = benchmarks.get_probabilistic_methods()
|
|
|
|
|
|
|
|
for model in models:
|
|
|
|
mfts = model("")
|
|
|
|
|
|
|
|
if mfts.is_high_order:
|
|
|
|
for order in np.arange(1, max_order + 1):
|
|
|
|
if order >= mfts.min_order:
|
|
|
|
mfts = model("")
|
|
|
|
mfts.order = order
|
|
|
|
pool.append(mfts)
|
|
|
|
else:
|
|
|
|
pool.append(mfts)
|
|
|
|
|
2017-05-20 20:43:39 +04:00
|
|
|
if benchmark_models is not None:
|
|
|
|
for count, model in enumerate(benchmark_models, start=0):
|
|
|
|
for a in alphas:
|
|
|
|
par = benchmark_models_parameters[count]
|
|
|
|
mfts = model(str(par if par is not None else ""), alpha=a, dist=True)
|
|
|
|
mfts.order = par
|
|
|
|
pool.append(mfts)
|
|
|
|
|
2017-05-07 04:19:04 +04:00
|
|
|
experiments = 0
|
2017-05-20 20:43:39 +04:00
|
|
|
for ct, train, test in Util.sliding_window(data, windowsize, train, inc=inc):
|
2017-05-07 04:19:04 +04:00
|
|
|
experiments += 1
|
|
|
|
|
2017-05-20 20:43:39 +04:00
|
|
|
benchmarks_only = {}
|
|
|
|
|
2017-05-07 04:19:04 +04:00
|
|
|
if dump: print('\nWindow: {0}\n'.format(ct))
|
|
|
|
|
|
|
|
for partition in partitions:
|
|
|
|
|
|
|
|
for partitioner in partitioners:
|
|
|
|
|
|
|
|
data_train_fs = partitioner(train, partition, transformation=transformation)
|
|
|
|
|
|
|
|
for id, m in enumerate(pool,start=0):
|
2017-05-20 20:43:39 +04:00
|
|
|
if m.benchmark_only and m.shortname in benchmarks_only:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
benchmarks_only[m.shortname] = m
|
2017-05-22 01:04:10 +04:00
|
|
|
job = cluster.submit(m, data_train_fs, train, test, steps, resolution, ct, transformation, indexer)
|
2017-05-07 04:19:04 +04:00
|
|
|
job.id = id # associate an ID to identify jobs (if needed later)
|
|
|
|
jobs.append(job)
|
|
|
|
|
|
|
|
for job in jobs:
|
|
|
|
tmp = job()
|
|
|
|
if job.status == dispy.DispyJob.Finished and tmp is not None:
|
|
|
|
if tmp['key'] not in objs:
|
|
|
|
objs[tmp['key']] = tmp['obj']
|
|
|
|
crps_interval[tmp['key']] = []
|
|
|
|
crps_distr[tmp['key']] = []
|
|
|
|
times1[tmp['key']] = []
|
|
|
|
times2[tmp['key']] = []
|
|
|
|
crps_interval[tmp['key']].append(tmp['CRPS_Interval'])
|
|
|
|
crps_distr[tmp['key']].append(tmp['CRPS_Distribution'])
|
|
|
|
times1[tmp['key']].append(tmp['TIME_Interval'])
|
|
|
|
times2[tmp['key']].append(tmp['TIME_Distribution'])
|
|
|
|
|
|
|
|
else:
|
|
|
|
print(job.exception)
|
|
|
|
print(job.stdout)
|
|
|
|
|
|
|
|
_process_end = time.time()
|
|
|
|
|
|
|
|
print("Process End: {0: %H:%M:%S}".format(datetime.datetime.now()))
|
|
|
|
|
|
|
|
print("Process Duration: {0}".format(_process_end - _process_start))
|
|
|
|
|
|
|
|
cluster.wait() # wait for all jobs to finish
|
|
|
|
|
|
|
|
cluster.print_status()
|
|
|
|
|
|
|
|
http_server.shutdown() # this waits until browser gets all updates
|
|
|
|
cluster.close()
|
|
|
|
|
2017-05-20 20:43:39 +04:00
|
|
|
return bUtil.save_dataframe_ahead(experiments, file, objs, crps_interval, crps_distr, times1, times2, save, synthetic)
|