Refactoring: Type Hints

This commit is contained in:
Petrônio Cândido 2021-08-15 15:14:32 -03:00
parent ad3e857024
commit cee6474034
11 changed files with 84 additions and 78 deletions

View File

@ -12,15 +12,15 @@ class FuzzySet(object):
""" """
Create a Fuzzy Set Create a Fuzzy Set
""" """
self.name = name self.name : str = name
"""The fuzzy set name""" """The fuzzy set name"""
self.mf = mf self.mf = mf
"""The membership function""" """The membership function"""
self.parameters = parameters self.parameters : list = parameters
"""The parameters of the membership function""" """The parameters of the membership function"""
self.centroid = centroid self.centroid : float = centroid
"""The fuzzy set center of mass (or midpoint)""" """The fuzzy set center of mass (or midpoint)"""
self.alpha = alpha self.alpha : float = alpha
"""The alpha cut value""" """The alpha cut value"""
self.type = kwargs.get('type', 'common') self.type = kwargs.get('type', 'common')
"""The fuzzy set type (common, composite, nonstationary, etc)""" """The fuzzy set type (common, composite, nonstationary, etc)"""
@ -170,7 +170,7 @@ def set_ordered(fuzzy_sets):
return [k.name for k in sorted(tmp1, key=lambda x: x.centroid)] return [k.name for k in sorted(tmp1, key=lambda x: x.centroid)]
def fuzzyfy_instance(inst, fuzzy_sets, ordered_sets=None): def fuzzyfy_instance(inst, fuzzy_sets : dict , ordered_sets : list =None):
""" """
Calculate the membership values for a data point given fuzzy sets Calculate the membership values for a data point given fuzzy sets
@ -191,7 +191,7 @@ def fuzzyfy_instance(inst, fuzzy_sets, ordered_sets=None):
return mv return mv
def fuzzyfy_instances(data, fuzzy_sets, ordered_sets=None): def fuzzyfy_instances(data : list, fuzzy_sets : dict, ordered_sets=None) -> list:
""" """
Calculate the membership values for a data point given fuzzy sets Calculate the membership values for a data point given fuzzy sets
@ -209,7 +209,7 @@ def fuzzyfy_instances(data, fuzzy_sets, ordered_sets=None):
return ret return ret
def get_fuzzysets(inst, fuzzy_sets, ordered_sets=None, alpha_cut=0.0): def get_fuzzysets(inst, fuzzy_sets : dict, ordered_sets : list =None, alpha_cut : flota =0.0) -> list:
""" """
Return the fuzzy sets which membership value for a inst is greater than the alpha_cut Return the fuzzy sets which membership value for a inst is greater than the alpha_cut
@ -232,7 +232,7 @@ def get_fuzzysets(inst, fuzzy_sets, ordered_sets=None, alpha_cut=0.0):
raise ex raise ex
def get_maximum_membership_fuzzyset(inst, fuzzy_sets, ordered_sets=None): def get_maximum_membership_fuzzyset(inst, fuzzy_sets, ordered_sets=None) -> FuzzySet:
""" """
Fuzzify a data point, returning the fuzzy set with maximum membership value Fuzzify a data point, returning the fuzzy set with maximum membership value
@ -248,7 +248,7 @@ def get_maximum_membership_fuzzyset(inst, fuzzy_sets, ordered_sets=None):
return fuzzy_sets[key] return fuzzy_sets[key]
def get_maximum_membership_fuzzyset_index(inst, fuzzy_sets): def get_maximum_membership_fuzzyset_index(inst, fuzzy_sets) -> int:
""" """
Fuzzify a data point, returning the fuzzy set with maximum membership value Fuzzify a data point, returning the fuzzy set with maximum membership value

View File

@ -15,6 +15,3 @@ from pyFTS.common.transformations.trend import LinearTrend
from pyFTS.common.transformations.som import SOMTransformation from pyFTS.common.transformations.som import SOMTransformation
from pyFTS.common.transformations.autoencoder import AutoencoderTransformation from pyFTS.common.transformations.autoencoder import AutoencoderTransformation
from pyFTS.common.transformations.normalization import Normalization from pyFTS.common.transformations.normalization import Normalization

View File

@ -1,6 +1,9 @@
import numpy as np import numpy as np
import pandas as pd import pandas as pd
from pyFTS.common import FuzzySet, SortedCollection, tree, Util from pyFTS.common import FuzzySet, Util
from pyFTS.common.transformations import transformation
from pyFTS.partitioners import partitioner
from pyFTS.probabilistic import ProbabilityDistribution
class FTS(object): class FTS(object):
@ -11,72 +14,72 @@ class FTS(object):
""" """
Create a Fuzzy Time Series model Create a Fuzzy Time Series model
""" """
self.flrgs = {} self.flrgs: dict = {}
"""The list of Fuzzy Logical Relationship Groups - FLRG""" """The list of Fuzzy Logical Relationship Groups - FLRG"""
self.order = kwargs.get('order',1) self.order : int = kwargs.get('order',1)
"""A integer with the model order (number of past lags are used on forecasting)""" """A integer with the model order (number of past lags are used on forecasting)"""
self.shortname = kwargs.get('name',"") self.shortname : str = kwargs.get('name',"")
"""A string with a short name or alias for the model""" """A string with a short name or alias for the model"""
self.name = kwargs.get('name',"") self.name : str = kwargs.get('name',"")
"""A string with the model name""" """A string with the model name"""
self.detail = kwargs.get('name',"") self.detail : str = kwargs.get('name',"")
"""A string with the model detailed information""" """A string with the model detailed information"""
self.is_wrapper = False self.is_wrapper : bool = False
"""Indicates that this model is a wrapper for other(s) method(s)""" """Indicates that this model is a wrapper for other(s) method(s)"""
self.is_high_order = False self.is_high_order : bool = False
"""A boolean value indicating if the model support orders greater than 1, default: False""" """A boolean value indicating if the model support orders greater than 1, default: False"""
self.min_order = 1 self.min_order : int = 1
"""In high order models, this integer value indicates the minimal order supported for the model, default: 1""" """In high order models, this integer value indicates the minimal order supported for the model, default: 1"""
self.has_seasonality = False self.has_seasonality : bool = False
"""A boolean value indicating if the model supports seasonal indexers, default: False""" """A boolean value indicating if the model supports seasonal indexers, default: False"""
self.has_point_forecasting = True self.has_point_forecasting : bool = True
"""A boolean value indicating if the model supports point forecasting, default: True""" """A boolean value indicating if the model supports point forecasting, default: True"""
self.has_interval_forecasting = False self.has_interval_forecasting : bool = False
"""A boolean value indicating if the model supports interval forecasting, default: False""" """A boolean value indicating if the model supports interval forecasting, default: False"""
self.has_probability_forecasting = False self.has_probability_forecasting : bool = False
"""A boolean value indicating if the model support probabilistic forecasting, default: False""" """A boolean value indicating if the model support probabilistic forecasting, default: False"""
self.is_multivariate = False self.is_multivariate : bool = False
"""A boolean value indicating if the model support multivariate time series (Pandas DataFrame), default: False""" """A boolean value indicating if the model support multivariate time series (Pandas DataFrame), default: False"""
self.is_clustered = False self.is_clustered : bool = False
"""A boolean value indicating if the model support multivariate time series (Pandas DataFrame), but works like """A boolean value indicating if the model support multivariate time series (Pandas DataFrame), but works like
a monovariate method, default: False""" a monovariate method, default: False"""
self.dump = False self.dump : bool = False
self.transformations = [] self.transformations : list[transformation.Transformation] = []
"""A list with the data transformations (common.Transformations) applied on model pre and post processing, default: []""" """A list with the data transformations (common.Transformations) applied on model pre and post processing, default: []"""
self.transformations_param = [] self.transformations_param : list = []
"""A list with the specific parameters for each data transformation""" """A list with the specific parameters for each data transformation"""
self.original_max = 0 self.original_max : float = 0.0
"""A float with the upper limit of the Universe of Discourse, the maximal value found on training data""" """A float with the upper limit of the Universe of Discourse, the maximal value found on training data"""
self.original_min = 0 self.original_min : float = 0.0
"""A float with the lower limit of the Universe of Discourse, the minimal value found on training data""" """A float with the lower limit of the Universe of Discourse, the minimal value found on training data"""
self.partitioner = kwargs.get("partitioner", None) self.partitioner : partitioner.Partitioner = kwargs.get("partitioner", None)
"""A pyFTS.partitioners.Partitioner object with the Universe of Discourse partitioner used on the model. This is a mandatory dependecy. """ """A pyFTS.partitioners.Partitioner object with the Universe of Discourse partitioner used on the model. This is a mandatory dependecy. """
if self.partitioner != None: if self.partitioner != None:
self.sets = self.partitioner.sets self.sets = self.partitioner.sets
self.auto_update = False self.auto_update : bool = False
"""A boolean value indicating that model is incremental""" """A boolean value indicating that model is incremental"""
self.benchmark_only = False self.benchmark_only : bool = False
"""A boolean value indicating a façade for external (non-FTS) model used on benchmarks or ensembles.""" """A boolean value indicating a façade for external (non-FTS) model used on benchmarks or ensembles."""
self.indexer = kwargs.get("indexer", None) self.indexer = kwargs.get("indexer", None)
"""An pyFTS.models.seasonal.Indexer object for indexing the time series data""" """An pyFTS.models.seasonal.Indexer object for indexing the time series data"""
self.uod_clip = kwargs.get("uod_clip", True) self.uod_clip : bool = kwargs.get("uod_clip", True)
"""Flag indicating if the test data will be clipped inside the training Universe of Discourse""" """Flag indicating if the test data will be clipped inside the training Universe of Discourse"""
self.alpha_cut = kwargs.get("alpha_cut", 0.0) self.alpha_cut : float = kwargs.get("alpha_cut", 0.0)
"""A float with the minimal membership to be considered on fuzzyfication process""" """A float with the minimal membership to be considered on fuzzyfication process"""
self.lags = kwargs.get("lags", None) self.lags : list[int] = kwargs.get("lags", None)
"""The list of lag indexes for high order models""" """The list of lag indexes for high order models"""
self.max_lag = self.order self.max_lag : int = self.order
"""A integer indicating the largest lag used by the model. This value also indicates the minimum number of past lags """A integer indicating the largest lag used by the model. This value also indicates the minimum number of past lags
needed to forecast a single step ahead""" needed to forecast a single step ahead"""
self.log = pd.DataFrame([],columns=["Datetime","Operation","Value"]) self.log : pd.DataFrame = pd.DataFrame([],columns=["Datetime","Operation","Value"])
"""""" """"""
self.is_time_variant = False self.is_time_variant : bool = False
"""A boolean value indicating if this model is time variant""" """A boolean value indicating if this model is time variant"""
self.standard_horizon = kwargs.get("standard_horizon", 1) self.standard_horizon : int = kwargs.get("standard_horizon", 1)
"""Standard forecasting horizon (Default: 1)""" """Standard forecasting horizon (Default: 1)"""
def fuzzy(self, data): def fuzzy(self, data) -> dict:
""" """
Fuzzify a data point Fuzzify a data point
@ -198,7 +201,7 @@ class FTS(object):
return ret return ret
def forecast(self, data, **kwargs): def forecast(self, data, **kwargs) -> list:
""" """
Point forecast one step ahead Point forecast one step ahead
@ -208,7 +211,7 @@ class FTS(object):
""" """
raise NotImplementedError('This model do not perform one step ahead point forecasts!') raise NotImplementedError('This model do not perform one step ahead point forecasts!')
def forecast_interval(self, data, **kwargs): def forecast_interval(self, data, **kwargs) -> list:
""" """
Interval forecast one step ahead Interval forecast one step ahead
@ -218,7 +221,7 @@ class FTS(object):
""" """
raise NotImplementedError('This model do not perform one step ahead interval forecasts!') raise NotImplementedError('This model do not perform one step ahead interval forecasts!')
def forecast_distribution(self, data, **kwargs): def forecast_distribution(self, data, **kwargs) -> list[ProbabilityDistribution.ProbabilityDistribution]:
""" """
Probabilistic forecast one step ahead Probabilistic forecast one step ahead
@ -228,7 +231,7 @@ class FTS(object):
""" """
raise NotImplementedError('This model do not perform one step ahead distribution forecasts!') raise NotImplementedError('This model do not perform one step ahead distribution forecasts!')
def forecast_multivariate(self, data, **kwargs): def forecast_multivariate(self, data, **kwargs) -> pd.DataFrame:
""" """
Multivariate forecast one step ahead Multivariate forecast one step ahead
@ -239,7 +242,7 @@ class FTS(object):
raise NotImplementedError('This model do not perform one step ahead multivariate forecasts!') raise NotImplementedError('This model do not perform one step ahead multivariate forecasts!')
def forecast_ahead(self, data, steps, **kwargs): def forecast_ahead(self, data, steps, **kwargs) -> list:
""" """
Point forecast from 1 to H steps ahead, where H is given by the steps parameter Point forecast from 1 to H steps ahead, where H is given by the steps parameter
@ -269,7 +272,7 @@ class FTS(object):
return ret[-steps:] return ret[-steps:]
def forecast_ahead_interval(self, data, steps, **kwargs): def forecast_ahead_interval(self, data, steps, **kwargs) -> list:
""" """
Interval forecast from 1 to H steps ahead, where H is given by the steps parameter Interval forecast from 1 to H steps ahead, where H is given by the steps parameter
@ -280,7 +283,7 @@ class FTS(object):
""" """
raise NotImplementedError('This model do not perform multi step ahead interval forecasts!') raise NotImplementedError('This model do not perform multi step ahead interval forecasts!')
def forecast_ahead_distribution(self, data, steps, **kwargs): def forecast_ahead_distribution(self, data, steps, **kwargs) -> list[ProbabilityDistribution.ProbabilityDistribution]:
""" """
Probabilistic forecast from 1 to H steps ahead, where H is given by the steps parameter Probabilistic forecast from 1 to H steps ahead, where H is given by the steps parameter
@ -291,7 +294,7 @@ class FTS(object):
""" """
raise NotImplementedError('This model do not perform multi step ahead distribution forecasts!') raise NotImplementedError('This model do not perform multi step ahead distribution forecasts!')
def forecast_ahead_multivariate(self, data, steps, **kwargs): def forecast_ahead_multivariate(self, data, steps, **kwargs) -> pd.DataFrame:
""" """
Multivariate forecast n step ahead Multivariate forecast n step ahead
@ -302,7 +305,7 @@ class FTS(object):
""" """
raise NotImplementedError('This model do not perform one step ahead multivariate forecasts!') raise NotImplementedError('This model do not perform one step ahead multivariate forecasts!')
def forecast_step(self, data, step, **kwargs): def forecast_step(self, data, step, **kwargs) -> list:
""" """
Point forecast for H steps ahead, where H is given by the step parameter Point forecast for H steps ahead, where H is given by the step parameter
@ -573,7 +576,7 @@ class FTS(object):
else: else:
return data return data
def get_UoD(self): def get_UoD(self) -> set:
""" """
Returns the interval of the known bounds of the universe of discourse (UoD), i. e., Returns the interval of the known bounds of the universe of discourse (UoD), i. e.,
the known minimum and maximum values of the time series. the known minimum and maximum values of the time series.
@ -585,7 +588,7 @@ class FTS(object):
else: else:
return (self.original_min, self.original_max) return (self.original_min, self.original_max)
def offset(self): def offset(self) -> int:
""" """
Returns the number of lags to skip in the input test data in order to synchronize it with Returns the number of lags to skip in the input test data in order to synchronize it with
the forecasted values given by the predict function. This is necessary due to the order of the the forecasted values given by the predict function. This is necessary due to the order of the

View File

@ -9,7 +9,7 @@ import pandas as pd
import numpy as np import numpy as np
def get_data(): def get_data() -> np.ndarray:
""" """
Get a simple univariate time series data. Get a simple univariate time series data.
@ -19,7 +19,7 @@ def get_data():
dat = np.array(dat["Passengers"]) dat = np.array(dat["Passengers"])
return dat return dat
def get_dataframe(): def get_dataframe() -> pd.DataFrame:
""" """
Get the complete multivariate time series data. Get the complete multivariate time series data.

View File

@ -16,7 +16,7 @@ class SignalEmulator(object):
self.components = [] self.components = []
"""Components of the signal""" """Components of the signal"""
def stationary_gaussian(self, mu, sigma, **kwargs): def stationary_gaussian(self, mu:float, sigma:float, **kwargs):
""" """
Creates a continuous Gaussian signal with mean mu and variance sigma. Creates a continuous Gaussian signal with mean mu and variance sigma.
@ -36,7 +36,7 @@ class SignalEmulator(object):
'parameters': parameters, 'args': kwargs}) 'parameters': parameters, 'args': kwargs})
return self return self
def incremental_gaussian(self, mu, sigma, **kwargs): def incremental_gaussian(self, mu:float, sigma:float, **kwargs):
""" """
Creates an additive gaussian interference on a previous signal Creates an additive gaussian interference on a previous signal

View File

@ -7,7 +7,7 @@ from pathlib import Path
from urllib import request from urllib import request
def get_dataframe(filename, url, sep=";", compression='infer'): def get_dataframe(filename: str, url: str, sep:str=";", compression:str='infer') -> pd.DataFrame:
""" """
This method check if filename already exists, read the file and return its data. This method check if filename already exists, read the file and return its data.
If the file don't already exists, it will be downloaded and decompressed. If the file don't already exists, it will be downloaded and decompressed.

View File

@ -9,7 +9,7 @@ from pyFTS.partitioners import partitioner
class SingletonPartitioner(partitioner.Partitioner): class SingletonPartitioner(partitioner.Partitioner):
"""Singleton Partitioner""" """Singleton Partitioner: Create singleton fuzzy sets for each distinct value in UoD"""
def __init__(self, **kwargs): def __init__(self, **kwargs):
""" """
@ -17,12 +17,12 @@ class SingletonPartitioner(partitioner.Partitioner):
""" """
super(SingletonPartitioner, self).__init__(name="Singleton", **kwargs) super(SingletonPartitioner, self).__init__(name="Singleton", **kwargs)
def build(self, data): def build(self, data : list):
sets = {} sets = {}
kwargs = {'type': self.type, 'variable': self.variable} kwargs = {'type': self.type, 'variable': self.variable}
for count, instance in enumerate(data): for count, instance in enumerate(set(data)):
_name = self.get_name(count) _name = self.get_name(count)
sets[_name] = FuzzySet.FuzzySet(_name, Membership.singleton, [instance], instance, **kwargs) sets[_name] = FuzzySet.FuzzySet(_name, Membership.singleton, [instance], instance, **kwargs)

View File

@ -18,7 +18,11 @@ all_methods = [Grid.GridPartitioner, Entropy.EntropyPartitioner, FCM.FCMPartitio
mfs = [Membership.trimf, Membership.gaussmf, Membership.trapmf] mfs = [Membership.trimf, Membership.gaussmf, Membership.trapmf]
def plot_sets(data, sets, titles, size=[12, 10], save=False, file=None, axis=None): def plot_sets(data, sets: dict, titles : list, size=[12, 10], save=False, file=None, axis=None):
"""
Plot all fuzzy sets in a Partitioner
"""
num = len(sets) num = len(sets)
if axis is None: if axis is None:
@ -53,7 +57,7 @@ def plot_sets(data, sets, titles, size=[12, 10], save=False, file=None, axis=Non
def plot_partitioners(data, objs, tam=[12, 10], save=False, file=None, axis=None): def plot_partitioners(data, objs, tam=[12, 10], save=False, file=None, axis=None):
sets = [k.sets for k in objs] sets = [k.sets for k in objs]
titles = [k.name for k in objs] titles = [k.name for k in objs]
plot_sets(data, sets, titles, tam, save, file, axis) plot_sets(sets, titles, tam, save, file, axis)
def explore_partitioners(data, npart, methods=None, mf=None, transformation=None, def explore_partitioners(data, npart, methods=None, mf=None, transformation=None,
@ -84,6 +88,6 @@ def explore_partitioners(data, npart, methods=None, mf=None, transformation=None
obj.name = obj.name + " - " + obj.membership_function.__name__ obj.name = obj.name + " - " + obj.membership_function.__name__
objs.append(obj) objs.append(obj)
plot_partitioners(data, objs, size, save, file) plot_partitioners(objs, size, save, file)
return objs return objs

View File

@ -28,5 +28,5 @@ def explore_partitioners(data, npart, methods=None, mf=None, tam=[12, 10], save=
objs = np.ravel(objs).tolist() objs = np.ravel(objs).tolist()
Util.plot_partitioners(data, objs, tam, save, file) Util.plot_partitioners(objs, tam, save, file)

View File

@ -14,16 +14,17 @@ class Partitioner(object):
""" """
Universe of Discourse partitioner scheme. Split data on several fuzzy sets Universe of Discourse partitioner scheme. Split data on several fuzzy sets
""" """
self.name = kwargs.get('name',"") self.name : str = kwargs.get('name',"")
"""partitioner name""" """partitioner name"""
self.partitions = kwargs.get('npart', 10) self.partitions : int = kwargs.get('npart', 10)
"""The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created""" """The number of universe of discourse partitions, i.e., the number of fuzzy sets that will be created"""
self.sets = {} self.sets : dict = {}
"""The fuzzy sets dictionary"""
self.membership_function = kwargs.get('func', Membership.trimf) self.membership_function = kwargs.get('func', Membership.trimf)
"""Fuzzy membership function (pyFTS.common.Membership)""" """Fuzzy membership function (pyFTS.common.Membership)"""
self.setnames = kwargs.get('names', None) self.setnames : list' = kwargs.get('names', None)
"""list of partitions names. If None is given the partitions will be auto named with prefix""" """list of partitions names. If None is given the partitions will be auto named with prefix"""
self.prefix = kwargs.get('prefix', 'A') self.prefix : str'' = kwargs.get('prefix', 'A')
"""prefix of auto generated partition names""" """prefix of auto generated partition names"""
self.transformation = kwargs.get('transformation', None) self.transformation = kwargs.get('transformation', None)
"""data transformation to be applied on data""" """data transformation to be applied on data"""
@ -34,13 +35,13 @@ class Partitioner(object):
"""The type of fuzzy sets that are generated by this partitioner""" """The type of fuzzy sets that are generated by this partitioner"""
self.ordered_sets = None self.ordered_sets = None
"""A ordered list of the fuzzy sets names, sorted by their middle point""" """A ordered list of the fuzzy sets names, sorted by their middle point"""
self.kdtree = None self.kdtree : KDTree = None
"""A spatial index to help in fuzzyfication""" """A spatial index to help in fuzzyfication"""
self.margin = kwargs.get("margin", 0.1) self.margin : float = kwargs.get("margin", 0.1)
"""The upper and lower exceeding margins for the known UoD. The default value is .1""" """The upper and lower exceeding margins for the known UoD. The default value is .1"""
self.lower_margin = kwargs.get("lower_margin", self.margin) self.lower_margin : float = kwargs.get("lower_margin", self.margin)
"""Specific lower exceeding margins for the known UoD. The default value is the self.margin parameter""" """Specific lower exceeding margins for the known UoD. The default value is the self.margin parameter"""
self.upper_margin = kwargs.get("lower_margin", self.margin) self.upper_margin : float = kwargs.get("lower_margin", self.margin)
"""Specific upper exceeding margins for the known UoD. The default value is the self.margin parameter""" """Specific upper exceeding margins for the known UoD. The default value is the self.margin parameter"""
if kwargs.get('preprocess',True): if kwargs.get('preprocess',True):

View File

@ -8,6 +8,7 @@ from typing import Tuple
class SOMPartitioner: class SOMPartitioner:
"""Self Organized Map Partitioner"""
def __init__(self, def __init__(self,
grid_dimension: Tuple, grid_dimension: Tuple,
**kwargs): **kwargs):
@ -19,8 +20,8 @@ class SOMPartitioner:
# debug attributes # debug attributes
self.name = 'Kohonen Self Organizing Maps FTS' self.name = 'Kohonen Self Organizing Map Partitioner'
self.shortname = 'SOM-FTS' self.shortname = 'SOM-Partitioner'
def __repr__(self): def __repr__(self):
status = "is trained" if self.is_trained else "not trained" status = "is trained" if self.is_trained else "not trained"