- Issue #3 - Code documentation with PEP 257 compliance
This commit is contained in:
parent
18e795bcd3
commit
9da53a4845
@ -200,6 +200,7 @@ def crps(targets, densities):
|
|||||||
|
|
||||||
|
|
||||||
def get_point_statistics(data, model, indexer=None):
|
def get_point_statistics(data, model, indexer=None):
|
||||||
|
"""Condensate all measures for point forecasters"""
|
||||||
if indexer is not None:
|
if indexer is not None:
|
||||||
ndata = np.array(indexer.get_data(data[model.order:]))
|
ndata = np.array(indexer.get_data(data[model.order:]))
|
||||||
else:
|
else:
|
||||||
@ -232,6 +233,7 @@ def get_point_statistics(data, model, indexer=None):
|
|||||||
|
|
||||||
|
|
||||||
def get_interval_statistics(original, model):
|
def get_interval_statistics(original, model):
|
||||||
|
"""Condensate all measures for interval forecasters"""
|
||||||
ret = list()
|
ret = list()
|
||||||
forecasts = model.forecastInterval(original)
|
forecasts = model.forecastInterval(original)
|
||||||
ret.append(round(sharpness(forecasts), 2))
|
ret.append(round(sharpness(forecasts), 2))
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
#!/usr/bin/python
|
#!/usr/bin/python
|
||||||
# -*- coding: utf8 -*-
|
# -*- coding: utf8 -*-
|
||||||
|
|
||||||
|
"""Benchmarks to FTS methods"""
|
||||||
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import time
|
import time
|
||||||
@ -12,7 +15,7 @@ import matplotlib.pyplot as plt
|
|||||||
from mpl_toolkits.mplot3d import Axes3D
|
from mpl_toolkits.mplot3d import Axes3D
|
||||||
# from sklearn.cross_validation import KFold
|
# from sklearn.cross_validation import KFold
|
||||||
from pyFTS.partitioners import partitioner, Grid, Huarng, Entropy, FCM
|
from pyFTS.partitioners import partitioner, Grid, Huarng, Entropy, FCM
|
||||||
from pyFTS.benchmarks import Measures, naive, arima, ResidualAnalysis, ProbabilityDistribution, Util
|
from pyFTS.benchmarks import Measures, naive, arima, ResidualAnalysis, ProbabilityDistribution, Util, quantreg
|
||||||
from pyFTS.common import Membership, FuzzySet, FLR, Transformations, Util
|
from pyFTS.common import Membership, FuzzySet, FLR, Transformations, Util
|
||||||
from pyFTS import fts, chen, yu, ismailefendi, sadaei, hofts, hwang, pwfts, ifts
|
from pyFTS import fts, chen, yu, ismailefendi, sadaei, hofts, hwang, pwfts, ifts
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
@ -27,17 +30,38 @@ styles = ['-','--','-.',':','.']
|
|||||||
nsty = len(styles)
|
nsty = len(styles)
|
||||||
|
|
||||||
def get_benchmark_point_methods():
|
def get_benchmark_point_methods():
|
||||||
|
"""Return all non FTS methods for point forecast"""
|
||||||
return [naive.Naive, arima.ARIMA]
|
return [naive.Naive, arima.ARIMA]
|
||||||
|
|
||||||
def get_point_methods():
|
def get_point_methods():
|
||||||
|
"""Return all FTS methods for point forecast"""
|
||||||
return [chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,
|
return [chen.ConventionalFTS, yu.WeightedFTS, ismailefendi.ImprovedWeightedFTS,
|
||||||
sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, pwfts.ProbabilisticWeightedFTS]
|
sadaei.ExponentialyWeightedFTS, hofts.HighOrderFTS, pwfts.ProbabilisticWeightedFTS]
|
||||||
|
|
||||||
|
def get_benchmark_interval_methods():
|
||||||
|
"""Return all non FTS methods for interval forecast"""
|
||||||
|
return [quantreg.QuantileRegression]
|
||||||
|
|
||||||
def get_interval_methods():
|
def get_interval_methods():
|
||||||
|
"""Return all FTS methods for interval forecast"""
|
||||||
return [ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS]
|
return [ifts.IntervalFTS, pwfts.ProbabilisticWeightedFTS]
|
||||||
|
|
||||||
|
|
||||||
def external_point_sliding_window(models, parameters, data, windowsize,train=0.8, dump=False, save=False, file=None, sintetic=True):
|
def external_point_sliding_window(models, parameters, data, windowsize,train=0.8, dump=False,
|
||||||
|
save=False, file=None, sintetic=True):
|
||||||
|
"""
|
||||||
|
Sliding window benchmarks for non FTS point forecasters
|
||||||
|
:param models: non FTS point forecasters
|
||||||
|
:param parameters: parameters for each model
|
||||||
|
:param data: data set
|
||||||
|
:param windowsize: size of sliding window
|
||||||
|
:param train: percentual of sliding window data used to train the models
|
||||||
|
:param dump:
|
||||||
|
:param save: save results
|
||||||
|
:param file: file path to save the results
|
||||||
|
:param sintetic: if true only the average and standard deviation of the results
|
||||||
|
:return: DataFrame with the results
|
||||||
|
"""
|
||||||
objs = {}
|
objs = {}
|
||||||
lcolors = {}
|
lcolors = {}
|
||||||
rmse = {}
|
rmse = {}
|
||||||
@ -91,6 +115,23 @@ def external_point_sliding_window(models, parameters, data, windowsize,train=0.8
|
|||||||
def point_sliding_window(data, windowsize, train=0.8,models=None,partitioners=[Grid.GridPartitioner],
|
def point_sliding_window(data, windowsize, train=0.8,models=None,partitioners=[Grid.GridPartitioner],
|
||||||
partitions=[10], max_order=3,transformation=None,indexer=None,dump=False,
|
partitions=[10], max_order=3,transformation=None,indexer=None,dump=False,
|
||||||
save=False, file=None, sintetic=True):
|
save=False, file=None, sintetic=True):
|
||||||
|
"""
|
||||||
|
Sliding window benchmarks for FTS point forecasters
|
||||||
|
:param data:
|
||||||
|
:param windowsize: size of sliding window
|
||||||
|
:param train: percentual of sliding window data used to train the models
|
||||||
|
:param models: FTS point forecasters
|
||||||
|
:param partitioners: Universe of Discourse partitioner
|
||||||
|
:param partitions: the max number of partitions on the Universe of Discourse
|
||||||
|
:param max_order: the max order of the models (for high order models)
|
||||||
|
:param transformation: data transformation
|
||||||
|
:param indexer: seasonal indexer
|
||||||
|
:param dump:
|
||||||
|
:param save: save results
|
||||||
|
:param file: file path to save the results
|
||||||
|
:param sintetic: if true only the average and standard deviation of the results
|
||||||
|
:return: DataFrame with the results
|
||||||
|
"""
|
||||||
|
|
||||||
_process_start = time.time()
|
_process_start = time.time()
|
||||||
|
|
||||||
@ -210,7 +251,23 @@ def point_sliding_window(data, windowsize, train=0.8,models=None,partitioners=[G
|
|||||||
def all_point_forecasters(data_train, data_test, partitions, max_order=3, statistics=True, residuals=True,
|
def all_point_forecasters(data_train, data_test, partitions, max_order=3, statistics=True, residuals=True,
|
||||||
series=True, save=False, file=None, tam=[20, 5], models=None, transformation=None,
|
series=True, save=False, file=None, tam=[20, 5], models=None, transformation=None,
|
||||||
distributions=False):
|
distributions=False):
|
||||||
|
"""
|
||||||
|
Fixed data benchmark for FTS point forecasters
|
||||||
|
:param data_train: data used to train the models
|
||||||
|
:param data_test: data used to test the models
|
||||||
|
:param partitions: the max number of partitions on the Universe of Discourse
|
||||||
|
:param max_order: the max order of the models (for high order models)
|
||||||
|
:param statistics: print statistics
|
||||||
|
:param residuals: print and plot residuals
|
||||||
|
:param series: plot time series
|
||||||
|
:param save: save results
|
||||||
|
:param file: file path to save the results
|
||||||
|
:param tam: figure dimensions to plot the graphs
|
||||||
|
:param models: list of models to benchmark
|
||||||
|
:param transformation: data transformation
|
||||||
|
:param distributions: plot distributions
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
if models is None:
|
if models is None:
|
||||||
models = get_point_methods()
|
models = get_point_methods()
|
||||||
|
|
||||||
|
@ -1,3 +1,11 @@
|
|||||||
|
"""
|
||||||
|
dispy Distributed Benchmarks to FTS methods
|
||||||
|
|
||||||
|
To enable a dispy cluster node:
|
||||||
|
|
||||||
|
python3 /usr/local/bin/dispynode.py -i [local IP] -d
|
||||||
|
"""
|
||||||
|
|
||||||
import random
|
import random
|
||||||
import dispy
|
import dispy
|
||||||
import dispy.httpd
|
import dispy.httpd
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
"""
|
||||||
|
joblib Parallelized Benchmarks to FTS methods
|
||||||
|
"""
|
||||||
|
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from joblib import Parallel, delayed
|
from joblib import Parallel, delayed
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
@ -64,9 +68,9 @@ def point_sliding_window(data, windowsize, train=0.8, models=None, partitioners=
|
|||||||
for model in benchmarks.get_point_methods():
|
for model in benchmarks.get_point_methods():
|
||||||
mfts = model("")
|
mfts = model("")
|
||||||
|
|
||||||
if mfts.isHighOrder:
|
if mfts.is_high_order:
|
||||||
for order in np.arange(1, max_order + 1):
|
for order in np.arange(1, max_order + 1):
|
||||||
if order >= mfts.minOrder:
|
if order >= mfts.min_order:
|
||||||
mfts = model("")
|
mfts = model("")
|
||||||
mfts.order = order
|
mfts.order = order
|
||||||
pool.append(mfts)
|
pool.append(mfts)
|
||||||
@ -162,9 +166,9 @@ def interval_sliding_window(data, windowsize, train=0.8, models=None, partitione
|
|||||||
for model in benchmarks.get_interval_methods():
|
for model in benchmarks.get_interval_methods():
|
||||||
mfts = model("")
|
mfts = model("")
|
||||||
|
|
||||||
if mfts.isHighOrder:
|
if mfts.is_high_order:
|
||||||
for order in np.arange(1, max_order + 1):
|
for order in np.arange(1, max_order + 1):
|
||||||
if order >= mfts.minOrder:
|
if order >= mfts.min_order:
|
||||||
mfts = model("")
|
mfts = model("")
|
||||||
mfts.order = order
|
mfts.order = order
|
||||||
pool.append(mfts)
|
pool.append(mfts)
|
||||||
@ -261,9 +265,9 @@ def ahead_sliding_window(data, windowsize, train, steps,resolution, models=None,
|
|||||||
for model in benchmarks.get_interval_methods():
|
for model in benchmarks.get_interval_methods():
|
||||||
mfts = model("")
|
mfts = model("")
|
||||||
|
|
||||||
if mfts.isHighOrder:
|
if mfts.is_high_order:
|
||||||
for order in np.arange(1, max_order + 1):
|
for order in np.arange(1, max_order + 1):
|
||||||
if order >= mfts.minOrder:
|
if order >= mfts.min_order:
|
||||||
mfts = model("")
|
mfts = model("")
|
||||||
mfts.order = order
|
mfts.order = order
|
||||||
pool.append(mfts)
|
pool.append(mfts)
|
||||||
|
@ -55,7 +55,7 @@ from pyFTS.benchmarks import distributed_benchmarks as bchmk
|
|||||||
bchmk.point_sliding_window(taiex,2000,train=0.8, #models=[yu.WeightedFTS], # #
|
bchmk.point_sliding_window(taiex,2000,train=0.8, #models=[yu.WeightedFTS], # #
|
||||||
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
|
partitioners=[Grid.GridPartitioner], #Entropy.EntropyPartitioner], # FCM.FCMPartitioner, ],
|
||||||
partitions= np.arange(10,200,step=5), #transformation=diff,
|
partitions= np.arange(10,200,step=5), #transformation=diff,
|
||||||
dump=False, save=False, file="experiments/nasdaq_point_distributed.csv",
|
dump=True, save=False, file="experiments/nasdaq_point_distributed.csv",
|
||||||
nodes=['192.168.1.42']) #, depends=[hofts, ifts])
|
nodes=['192.168.1.42']) #, depends=[hofts, ifts])
|
||||||
|
|
||||||
#bchmk.testa(taiex,[10,20],partitioners=[Grid.GridPartitioner], nodes=['192.168.0.109', '192.168.0.101'])
|
#bchmk.testa(taiex,[10,20],partitioners=[Grid.GridPartitioner], nodes=['192.168.0.109', '192.168.0.101'])
|
||||||
|
4
yu.py
4
yu.py
@ -3,8 +3,8 @@ from pyFTS.common import FuzzySet,FLR
|
|||||||
from pyFTS import fts
|
from pyFTS import fts
|
||||||
|
|
||||||
|
|
||||||
class WeightedFLRG(fts.FTS):
|
class WeightedFLRG(object):
|
||||||
def __init__(self, order, LHS, **kwargs):
|
def __init__(self, LHS, **kwargs):
|
||||||
self.LHS = LHS
|
self.LHS = LHS
|
||||||
self.RHS = []
|
self.RHS = []
|
||||||
self.count = 1.0
|
self.count = 1.0
|
||||||
|
Loading…
Reference in New Issue
Block a user