Conditional Variance FTS
This commit is contained in:
parent
2ea599fdb8
commit
e9b86d7025
@ -167,7 +167,7 @@ def sharpness(forecasts):
|
||||
def resolution(forecasts):
|
||||
"""Resolution - Standard deviation of the intervals"""
|
||||
shp = sharpness(forecasts)
|
||||
tmp = [abs((i[1] - i[0]) - shp) for i in forecasts]
|
||||
tmp = [abs((i[1] - i[0]) - shp) for i in forecasts]
|
||||
return np.mean(tmp)
|
||||
|
||||
|
||||
|
@ -70,6 +70,14 @@ def generateRecurrentFLRs(fuzzyData):
|
||||
for r in rhs:
|
||||
tmp = FLR(l, r)
|
||||
flrs.append(tmp)
|
||||
elif isinstance(lhs, list) and not isinstance(rhs, list):
|
||||
for l in lhs:
|
||||
tmp = FLR(l, rhs)
|
||||
flrs.append(tmp)
|
||||
elif not isinstance(lhs, list) and isinstance(rhs, list):
|
||||
for r in rhs:
|
||||
tmp = FLR(lhs, r)
|
||||
flrs.append(tmp)
|
||||
else:
|
||||
tmp = FLR(lhs,rhs)
|
||||
flrs.append(tmp)
|
||||
|
@ -102,8 +102,40 @@ def getMaxMembershipFuzzySetIndex(inst, fuzzySets):
|
||||
return np.argwhere(mv == max(mv))[0, 0]
|
||||
|
||||
|
||||
def fuzzySeries(data, fuzzySets):
|
||||
def fuzzySeries(data, fuzzySets, method='maximum'):
|
||||
fts = []
|
||||
for item in data:
|
||||
fts.append(getMaxMembershipFuzzySet(item, fuzzySets))
|
||||
return fts
|
||||
|
||||
|
||||
def fuzzifySeries(data, fuzzySets, method='maximum'):
|
||||
fts = []
|
||||
for t, i in enumerate(data):
|
||||
mv = np.array([fs.membership(i) for fs in fuzzySets])
|
||||
if len(mv) == 0:
|
||||
sets = check_bounds(i, fuzzySets)
|
||||
else:
|
||||
if method == 'fuzzy':
|
||||
ix = np.ravel(np.argwhere(mv > 0.0))
|
||||
sets = [fuzzySets[i] for i in ix]
|
||||
elif method == 'maximum':
|
||||
mx = max(mv)
|
||||
ix = np.ravel(np.argwhere(mv == mx))
|
||||
sets = fuzzySets[ix[0]]
|
||||
fts.append(sets)
|
||||
return fts
|
||||
|
||||
|
||||
def check_bounds(data, sets):
|
||||
if data < sets[0].get_lower():
|
||||
return sets[0]
|
||||
elif data > sets[-1].get_upper():
|
||||
return sets[-1]
|
||||
|
||||
|
||||
def check_bounds_index(data, sets):
|
||||
if data < sets[0].get_lower():
|
||||
return 0
|
||||
elif data > sets[-1].get_upper():
|
||||
return len(sets) -1
|
||||
|
@ -107,7 +107,7 @@ class FuzzySet(FS.FuzzySet):
|
||||
|
||||
self.perturbate_parameters(t)
|
||||
|
||||
tmp = self.mf(x, self.perturbated_parameters[t])
|
||||
tmp = self.mf(x, self.perturbated_parameters[str(t)])
|
||||
|
||||
if self.noise is not None:
|
||||
tmp += self.noise(t, self.noise_params)
|
||||
@ -115,16 +115,20 @@ class FuzzySet(FS.FuzzySet):
|
||||
return tmp
|
||||
|
||||
def perturbate_parameters(self, t):
|
||||
if t not in self.perturbated_parameters:
|
||||
if str(t) not in self.perturbated_parameters:
|
||||
param = self.parameters
|
||||
param = self.perform_location(t, param)
|
||||
param = self.perform_width(t, param)
|
||||
self.perturbated_parameters[t] = param
|
||||
if isinstance(t, (list, set)):
|
||||
param = self.perform_location(t[0], param)
|
||||
param = self.perform_width(t[1], param)
|
||||
else:
|
||||
param = self.perform_location(t, param)
|
||||
param = self.perform_width(t, param)
|
||||
self.perturbated_parameters[str(t)] = param
|
||||
|
||||
def get_midpoint(self, t):
|
||||
|
||||
self.perturbate_parameters(t)
|
||||
param = self.perturbated_parameters[t]
|
||||
param = self.perturbated_parameters[str(t)]
|
||||
|
||||
if self.mf == Membership.gaussmf:
|
||||
return param[0]
|
||||
@ -140,7 +144,7 @@ class FuzzySet(FS.FuzzySet):
|
||||
def get_lower(self, t):
|
||||
|
||||
self.perturbate_parameters(t)
|
||||
param = self.perturbated_parameters[t]
|
||||
param = self.perturbated_parameters[str(t)]
|
||||
|
||||
if self.mf == Membership.gaussmf:
|
||||
return param[0] - 3*param[1]
|
||||
@ -156,7 +160,7 @@ class FuzzySet(FS.FuzzySet):
|
||||
def get_upper(self, t):
|
||||
|
||||
self.perturbate_parameters(t)
|
||||
param = self.perturbated_parameters[t]
|
||||
param = self.perturbated_parameters[str(t)]
|
||||
|
||||
if self.mf == Membership.gaussmf:
|
||||
return param[0] + 3*param[1]
|
||||
@ -183,105 +187,6 @@ class FuzzySet(FS.FuzzySet):
|
||||
return self.name + ": " + str(self.mf.__name__) + tmp
|
||||
|
||||
|
||||
class PolynomialNonStationaryPartitioner(partitioner.Partitioner):
|
||||
"""
|
||||
Non Stationary Universe of Discourse Partitioner
|
||||
"""
|
||||
|
||||
def __init__(self, data, part, **kwargs):
|
||||
""""""
|
||||
super(PolynomialNonStationaryPartitioner, self).__init__(name=part.name, data=data, npart=part.partitions,
|
||||
func=part.membership_function, names=part.setnames,
|
||||
prefix=part.prefix, transformation=part.transformation,
|
||||
indexer=part.indexer)
|
||||
|
||||
self.sets = []
|
||||
|
||||
loc_params, wid_params = self.get_polynomial_perturbations(data, **kwargs)
|
||||
|
||||
for ct, set in enumerate(part.sets):
|
||||
loc_roots = np.roots(loc_params[ct])[0]
|
||||
wid_roots = np.roots(wid_params[ct])[0]
|
||||
tmp = FuzzySet(set.name, set.mf, set.parameters,
|
||||
location=perturbation.polynomial,
|
||||
location_params=loc_params[ct],
|
||||
location_roots=loc_roots, #**kwargs)
|
||||
width=perturbation.polynomial,
|
||||
width_params=wid_params[ct],
|
||||
width_roots=wid_roots, **kwargs)
|
||||
|
||||
self.sets.append(tmp)
|
||||
|
||||
def poly_width(self, par1, par2, rng, deg):
|
||||
a = np.polyval(par1, rng)
|
||||
b = np.polyval(par2, rng)
|
||||
diff = [b[k] - a[k] for k in rng]
|
||||
tmp = np.polyfit(rng, diff, deg=deg)
|
||||
return tmp
|
||||
|
||||
def scale_up(self,x,pct):
|
||||
if x > 0: return x*(1+pct)
|
||||
else: return x*pct
|
||||
|
||||
def scale_down(self,x,pct):
|
||||
if x > 0: return x*pct
|
||||
else: return x*(1+pct)
|
||||
|
||||
def get_polynomial_perturbations(self, data, **kwargs):
|
||||
w = kwargs.get("window_size", int(len(data) / 5))
|
||||
deg = kwargs.get("degree", 2)
|
||||
xmax = [data[0]]
|
||||
tmax = [0]
|
||||
xmin = [data[0]]
|
||||
tmin = [0]
|
||||
|
||||
l = len(data)
|
||||
|
||||
for i in np.arange(0, l, w):
|
||||
sample = data[i:i + w]
|
||||
tx = max(sample)
|
||||
xmax.append(tx)
|
||||
tmax.append(np.ravel(np.argwhere(data == tx)).tolist()[0])
|
||||
tn = min(sample)
|
||||
xmin.append(tn)
|
||||
tmin.append(np.ravel(np.argwhere(data == tn)).tolist()[0])
|
||||
|
||||
cmax = np.polyfit(tmax, xmax, deg=deg)
|
||||
cmin = np.polyfit(tmin, xmin, deg=deg)
|
||||
|
||||
cmed = []
|
||||
|
||||
for d in np.arange(0, deg + 1):
|
||||
cmed.append(np.linspace(cmin[d], cmax[d], self.partitions)[1:self.partitions - 1])
|
||||
|
||||
loc_params = [cmin.tolist()]
|
||||
for i in np.arange(0, self.partitions - 2):
|
||||
tmp = [cmed[k][i] for k in np.arange(0, deg + 1)]
|
||||
loc_params.append(tmp)
|
||||
loc_params.append(cmax.tolist())
|
||||
|
||||
rng = np.arange(0, l)
|
||||
|
||||
clen = []
|
||||
|
||||
for i in np.arange(1, self.partitions-1):
|
||||
tmp = self.poly_width(loc_params[i - 1], loc_params[i + 1], rng, deg)
|
||||
clen.append(tmp)
|
||||
|
||||
tmp = self.poly_width(loc_params[0], loc_params[1], rng, deg)
|
||||
clen.insert(0, tmp)
|
||||
|
||||
tmp = self.poly_width(loc_params[self.partitions-2], loc_params[self.partitions-1], rng, deg)
|
||||
clen.append(tmp)
|
||||
|
||||
tmp = (loc_params, clen)
|
||||
|
||||
return tmp
|
||||
|
||||
def build(self, data):
|
||||
pass
|
||||
|
||||
|
||||
def fuzzify(inst, t, fuzzySets):
|
||||
"""
|
||||
Calculate the membership values for a data point given nonstationary fuzzy sets
|
||||
@ -299,10 +204,10 @@ def fuzzify(inst, t, fuzzySets):
|
||||
return ret
|
||||
|
||||
|
||||
def fuzzySeries(data, fuzzySets, window_size=1, method='fuzzy'):
|
||||
def fuzzySeries(data, fuzzySets, window_size=1, method='fuzzy', const_t= None):
|
||||
fts = []
|
||||
for t, i in enumerate(data):
|
||||
tdisp = window_index(t, window_size)
|
||||
tdisp = window_index(t, window_size) if const_t is None else const_t
|
||||
mv = np.array([fs.membership(i, tdisp) for fs in fuzzySets])
|
||||
if len(mv) == 0:
|
||||
sets = [check_bounds(i, fuzzySets, tdisp)]
|
||||
@ -318,6 +223,8 @@ def fuzzySeries(data, fuzzySets, window_size=1, method='fuzzy'):
|
||||
|
||||
|
||||
def window_index(t, window_size):
|
||||
if isinstance(t, (list, set)):
|
||||
return t
|
||||
return t - (t % window_size)
|
||||
|
||||
|
||||
|
180
pyFTS/nonstationary/cvfts.py
Normal file
180
pyFTS/nonstationary/cvfts.py
Normal file
@ -0,0 +1,180 @@
|
||||
import numpy as np
|
||||
from pyFTS import fts, flrg, chen
|
||||
from pyFTS.nonstationary import common, perturbation, nsfts
|
||||
from pyFTS.common import Transformations, FuzzySet, FLR
|
||||
|
||||
|
||||
class ConditionalVarianceFTS(chen.ConventionalFTS):
|
||||
def __init__(self, name, **kwargs):
|
||||
super(ConditionalVarianceFTS, self).__init__("CVFTS " + name, **kwargs)
|
||||
self.name = "Conditional Variance FTS"
|
||||
self.detail = ""
|
||||
self.flrgs = {}
|
||||
#self.appendTransformation(Transformations.Differential(1))
|
||||
if self.partitioner is None:
|
||||
self.min_tx = None
|
||||
self.max_tx = None
|
||||
else:
|
||||
self.min_tx = self.partitioner.min
|
||||
self.max_tx = self.partitioner.max
|
||||
self.appendTransformation(self.partitioner.transformation)
|
||||
|
||||
self.min_stack = [0,0,0]
|
||||
self.max_stack = [0,0,0]
|
||||
|
||||
def train(self, data, sets = None, order=1,parameters=None):
|
||||
if sets is not None:
|
||||
self.sets = sets
|
||||
else:
|
||||
self.sets = self.partitioner.sets
|
||||
|
||||
ndata = self.doTransformations(data)
|
||||
|
||||
self.min_tx = min(ndata)
|
||||
self.max_tx = max(ndata)
|
||||
|
||||
tmpdata = common.fuzzySeries(ndata, self.sets, method='fuzzy', const_t=0)
|
||||
flrs = FLR.generateNonRecurrentFLRs(tmpdata)
|
||||
self.flrgs = self.generate_flrg(flrs)
|
||||
|
||||
def generate_flrg(self, flrs, **kwargs):
|
||||
flrgs = {}
|
||||
for flr in flrs:
|
||||
if flr.LHS.name in flrgs:
|
||||
flrgs[flr.LHS.name].append(flr.RHS)
|
||||
else:
|
||||
flrgs[flr.LHS.name] = nsfts.ConventionalNonStationaryFLRG(flr.LHS)
|
||||
flrgs[flr.LHS.name].append(flr.RHS)
|
||||
return flrgs
|
||||
|
||||
def _smooth(self, a):
|
||||
return .1 * a[0] + .3 * a[1] + .6 * a[2]
|
||||
|
||||
def perturbation_factors(self, data):
|
||||
_max = 0
|
||||
_min = 0
|
||||
if data < self.min_tx:
|
||||
_min = data - self.min_tx if data < 0 else self.min_tx - data
|
||||
elif data > self.max_tx:
|
||||
_max = data - self.max_tx if data > 0 else self.max_tx - data
|
||||
self.min_stack.pop(2)
|
||||
self.min_stack.insert(0,_min)
|
||||
_min = min(self.min_stack)
|
||||
self.max_stack.pop(2)
|
||||
self.max_stack.insert(0, _max)
|
||||
_max = max(self.max_stack)
|
||||
|
||||
location = np.linspace(_min, _max, self.partitioner.partitions)
|
||||
scale = [abs(location[0] - location[2])]
|
||||
scale.extend([abs(location[k-1] - location[k+1]) for k in np.arange(1,self.partitioner.partitions-1)])
|
||||
scale.append(abs(location[-1] - location[-3]))
|
||||
|
||||
perturb = [[location[k], scale[k]] for k in np.arange(0, self.partitioner.partitions)]
|
||||
|
||||
return perturb
|
||||
|
||||
def _affected_sets(self, sample, perturb):
|
||||
|
||||
affected_sets = [[ct, set.membership(sample, perturb[ct])]
|
||||
for ct, set in enumerate(self.sets)
|
||||
if set.membership(sample, perturb[ct]) > 0.0]
|
||||
|
||||
if len(affected_sets) == 0:
|
||||
if sample < self.sets[0].get_lower(perturb[0]):
|
||||
affected_sets.append([0, 1])
|
||||
elif sample < self.sets[-1].get_lower(perturb[-1]):
|
||||
affected_sets.append([len(self.sets) - 1, 1])
|
||||
|
||||
|
||||
return affected_sets
|
||||
|
||||
def forecast(self, data, **kwargs):
|
||||
ndata = np.array(self.doTransformations(data))
|
||||
|
||||
l = len(ndata)
|
||||
|
||||
ret = []
|
||||
|
||||
for k in np.arange(0, l):
|
||||
|
||||
sample = ndata[k]
|
||||
|
||||
perturb = self.perturbation_factors(sample)
|
||||
|
||||
affected_sets = self._affected_sets(sample, perturb)
|
||||
|
||||
tmp = []
|
||||
|
||||
if len(affected_sets) == 1:
|
||||
ix = affected_sets[0][0]
|
||||
aset = self.sets[ix]
|
||||
if aset.name in self.flrgs:
|
||||
tmp.append(self.flrgs[aset.name].get_midpoint(perturb[ix]))
|
||||
else:
|
||||
print('naive')
|
||||
tmp.append(aset.get_midpoint(perturb[ix]))
|
||||
else:
|
||||
for aset in affected_sets:
|
||||
ix = aset[0]
|
||||
fs = self.sets[ix]
|
||||
tdisp = perturb[ix]
|
||||
if fs.name in self.flrgs:
|
||||
tmp.append(self.flrgs[fs.name].get_midpoint(tdisp) * aset[1])
|
||||
else:
|
||||
tmp.append(fs.get_midpoint(tdisp) * aset[1])
|
||||
|
||||
pto = sum(tmp)
|
||||
|
||||
ret.append(pto)
|
||||
|
||||
ret = self.doInverseTransformations(ret, params=[data[self.order - 1:]])
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def forecastInterval(self, data, **kwargs):
|
||||
ndata = np.array(self.doTransformations(data))
|
||||
|
||||
l = len(ndata)
|
||||
|
||||
ret = []
|
||||
|
||||
for k in np.arange(0, l):
|
||||
|
||||
sample = ndata[k]
|
||||
|
||||
perturb = self.perturbation_factors(sample)
|
||||
|
||||
affected_sets = self._affected_sets(sample, perturb)
|
||||
|
||||
upper = []
|
||||
lower = []
|
||||
|
||||
if len(affected_sets) == 1:
|
||||
ix = affected_sets[0][0]
|
||||
aset = self.sets[ix]
|
||||
if aset.name in self.flrgs:
|
||||
lower.append(self.flrgs[aset.name].get_lower(perturb[ix]))
|
||||
upper.append(self.flrgs[aset.name].get_upper(perturb[ix]))
|
||||
else:
|
||||
lower.append(aset.get_lower(perturb[ix]))
|
||||
upper.append(aset.get_upper(perturb[ix]))
|
||||
else:
|
||||
for aset in affected_sets:
|
||||
ix = aset[0]
|
||||
fs = self.sets[ix]
|
||||
tdisp = perturb[ix]
|
||||
if fs.name in self.flrgs:
|
||||
lower.append(self.flrgs[fs.name].get_lower(tdisp) * aset[1])
|
||||
upper.append(self.flrgs[fs.name].get_upper(tdisp) * aset[1])
|
||||
else:
|
||||
lower.append(fs.get_lower(tdisp) * aset[1])
|
||||
upper.append(fs.get_upper(tdisp) * aset[1])
|
||||
|
||||
itvl = [sum(lower), sum(upper)]
|
||||
|
||||
ret.append(itvl)
|
||||
|
||||
ret = self.doInverseTransformations(ret, params=[data[self.order - 1:]])
|
||||
|
||||
return ret
|
@ -23,7 +23,7 @@ class NonStationaryFLRG(flrg.FLRG):
|
||||
|
||||
def get_midpoint(self, t, window_size=1):
|
||||
if len(self.RHS) > 0:
|
||||
if isinstance(self.RHS, (list,set)):
|
||||
if isinstance(self.RHS, (list, set)):
|
||||
tmp = [r.get_midpoint(common.window_index(t, window_size)) for r in self.RHS]
|
||||
elif isinstance(self.RHS, dict):
|
||||
tmp = [self.RHS[r].get_midpoint(common.window_index(t, window_size)) for r in self.RHS.keys()]
|
||||
|
122
pyFTS/nonstationary/partitioners.py
Normal file
122
pyFTS/nonstationary/partitioners.py
Normal file
@ -0,0 +1,122 @@
|
||||
import numpy as np
|
||||
from pyFTS.partitioners import partitioner
|
||||
from pyFTS.nonstationary import common, perturbation
|
||||
|
||||
|
||||
class PolynomialNonStationaryPartitioner(partitioner.Partitioner):
|
||||
"""
|
||||
Non Stationary Universe of Discourse Partitioner
|
||||
"""
|
||||
|
||||
def __init__(self, data, part, **kwargs):
|
||||
""""""
|
||||
super(PolynomialNonStationaryPartitioner, self).__init__(name=part.name, data=data, npart=part.partitions,
|
||||
func=part.membership_function, names=part.setnames,
|
||||
prefix=part.prefix, transformation=part.transformation,
|
||||
indexer=part.indexer)
|
||||
|
||||
self.sets = []
|
||||
|
||||
loc_params, wid_params = self.get_polynomial_perturbations(data, **kwargs)
|
||||
|
||||
for ct, set in enumerate(part.sets):
|
||||
loc_roots = np.roots(loc_params[ct])[0]
|
||||
wid_roots = np.roots(wid_params[ct])[0]
|
||||
tmp = common.FuzzySet(set.name, set.mf, set.parameters,
|
||||
location=perturbation.polynomial,
|
||||
location_params=loc_params[ct],
|
||||
location_roots=loc_roots, #**kwargs)
|
||||
width=perturbation.polynomial,
|
||||
width_params=wid_params[ct],
|
||||
width_roots=wid_roots, **kwargs)
|
||||
|
||||
self.sets.append(tmp)
|
||||
|
||||
def poly_width(self, par1, par2, rng, deg):
|
||||
a = np.polyval(par1, rng)
|
||||
b = np.polyval(par2, rng)
|
||||
diff = [b[k] - a[k] for k in rng]
|
||||
tmp = np.polyfit(rng, diff, deg=deg)
|
||||
return tmp
|
||||
|
||||
def scale_up(self,x,pct):
|
||||
if x > 0: return x*(1+pct)
|
||||
else: return x*pct
|
||||
|
||||
def scale_down(self,x,pct):
|
||||
if x > 0: return x*pct
|
||||
else: return x*(1+pct)
|
||||
|
||||
def get_polynomial_perturbations(self, data, **kwargs):
|
||||
w = kwargs.get("window_size", int(len(data) / 5))
|
||||
deg = kwargs.get("degree", 2)
|
||||
xmax = [data[0]]
|
||||
tmax = [0]
|
||||
xmin = [data[0]]
|
||||
tmin = [0]
|
||||
|
||||
l = len(data)
|
||||
|
||||
for i in np.arange(0, l, w):
|
||||
sample = data[i:i + w]
|
||||
tx = max(sample)
|
||||
xmax.append(tx)
|
||||
tmax.append(np.ravel(np.argwhere(data == tx)).tolist()[0])
|
||||
tn = min(sample)
|
||||
xmin.append(tn)
|
||||
tmin.append(np.ravel(np.argwhere(data == tn)).tolist()[0])
|
||||
|
||||
cmax = np.polyfit(tmax, xmax, deg=deg)
|
||||
cmin = np.polyfit(tmin, xmin, deg=deg)
|
||||
|
||||
cmed = []
|
||||
|
||||
for d in np.arange(0, deg + 1):
|
||||
cmed.append(np.linspace(cmin[d], cmax[d], self.partitions)[1:self.partitions - 1])
|
||||
|
||||
loc_params = [cmin.tolist()]
|
||||
for i in np.arange(0, self.partitions - 2):
|
||||
tmp = [cmed[k][i] for k in np.arange(0, deg + 1)]
|
||||
loc_params.append(tmp)
|
||||
loc_params.append(cmax.tolist())
|
||||
|
||||
rng = np.arange(0, l)
|
||||
|
||||
clen = []
|
||||
|
||||
for i in np.arange(1, self.partitions-1):
|
||||
tmp = self.poly_width(loc_params[i - 1], loc_params[i + 1], rng, deg)
|
||||
clen.append(tmp)
|
||||
|
||||
tmp = self.poly_width(loc_params[0], loc_params[1], rng, deg)
|
||||
clen.insert(0, tmp)
|
||||
|
||||
tmp = self.poly_width(loc_params[self.partitions-2], loc_params[self.partitions-1], rng, deg)
|
||||
clen.append(tmp)
|
||||
|
||||
tmp = (loc_params, clen)
|
||||
|
||||
return tmp
|
||||
|
||||
def build(self, data):
|
||||
pass
|
||||
|
||||
|
||||
class ConstantNonStationaryPartitioner(partitioner.Partitioner):
|
||||
"""
|
||||
Non Stationary Universe of Discourse Partitioner
|
||||
"""
|
||||
|
||||
def __init__(self, data, part, **kwargs):
|
||||
""""""
|
||||
super(ConstantNonStationaryPartitioner, self).__init__(name=part.name, data=data, npart=part.partitions,
|
||||
func=part.membership_function, names=part.setnames,
|
||||
prefix=part.prefix, transformation=part.transformation,
|
||||
indexer=part.indexer)
|
||||
|
||||
self.sets = []
|
||||
|
||||
for set in part.sets:
|
||||
tmp = common.FuzzySet(set.name, set.mf, set.parameters, **kwargs)
|
||||
|
||||
self.sets.append(tmp)
|
@ -19,7 +19,7 @@ def plot_sets(sets, start=0, end=10, step=1, tam=[5, 5], colors=None,
|
||||
for t in range:
|
||||
tdisp = t - (t % window_size)
|
||||
set.membership(0, tdisp)
|
||||
param = set.perturbated_parameters[tdisp]
|
||||
param = set.perturbated_parameters[str(tdisp)]
|
||||
|
||||
if set.mf == Membership.trimf:
|
||||
if t == start:
|
||||
@ -34,7 +34,7 @@ def plot_sets(sets, start=0, end=10, step=1, tam=[5, 5], colors=None,
|
||||
for t in range:
|
||||
tdisp = t - (t % window_size)
|
||||
set.membership(0, tdisp)
|
||||
param = set.perturbated_parameters[tdisp]
|
||||
param = set.perturbated_parameters[str(tdisp)]
|
||||
tmp.append(np.polyval(param, tdisp))
|
||||
axes.plot(range, tmp, ls="--", c="blue")
|
||||
|
||||
@ -51,3 +51,41 @@ def plot_sets(sets, start=0, end=10, step=1, tam=[5, 5], colors=None,
|
||||
plt.tight_layout()
|
||||
|
||||
Util.showAndSaveImage(fig, file, save)
|
||||
|
||||
|
||||
def plot_sets_conditional(model, data, start=0, end=10, step=1, tam=[5, 5], colors=None,
|
||||
save=False, file=None, axes=None):
|
||||
|
||||
range = np.arange(start,end,step)
|
||||
ticks = []
|
||||
if axes is None:
|
||||
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=tam)
|
||||
|
||||
for ct, set in enumerate(model.sets):
|
||||
for t in range:
|
||||
tdisp = model.perturbation_factors(data[t])
|
||||
set.perturbate_parameters(tdisp[ct])
|
||||
param = set.perturbated_parameters[str(tdisp[ct])]
|
||||
|
||||
if set.mf == Membership.trimf:
|
||||
if t == start:
|
||||
line = axes.plot([t, t+1, t], param, label=set.name)
|
||||
set.metadata['color'] = line[0].get_color()
|
||||
else:
|
||||
axes.plot([t, t + 1, t], param,c=set.metadata['color'])
|
||||
|
||||
ticks.extend(["t+"+str(t),""])
|
||||
|
||||
axes.set_ylabel("Universe of Discourse")
|
||||
axes.set_xlabel("Time")
|
||||
plt.xticks([k for k in range], ticks, rotation='vertical')
|
||||
|
||||
handles0, labels0 = axes.get_legend_handles_labels()
|
||||
lgd = axes.legend(handles0, labels0, loc=2, bbox_to_anchor=(1, 1))
|
||||
|
||||
if data is not None:
|
||||
axes.plot(np.arange(start, start + len(data), 1), data,c="black")
|
||||
|
||||
plt.tight_layout()
|
||||
|
||||
Util.showAndSaveImage(fig, file, save)
|
||||
|
@ -1,13 +1,14 @@
|
||||
import os
|
||||
import numpy as np
|
||||
from pyFTS.common import Membership
|
||||
from pyFTS.nonstationary import common,perturbation,util,nsfts, honsfts
|
||||
from pyFTS.common import Membership, Transformations
|
||||
from pyFTS.nonstationary import common,perturbation, partitioners, util,nsfts, honsfts, cvfts
|
||||
from pyFTS.partitioners import Grid
|
||||
import matplotlib.pyplot as plt
|
||||
import pandas as pd
|
||||
os.chdir("/home/petronio/Dropbox/Doutorado/Codigos/")
|
||||
|
||||
"""
|
||||
diff = Transformations.Differential(1)
|
||||
|
||||
def generate_heteroskedastic_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=10, num=35):
|
||||
mu = mu_ini
|
||||
sigma = sigma_ini
|
||||
@ -19,34 +20,55 @@ def generate_heteroskedastic_linear(mu_ini, sigma_ini, mu_inc, sigma_inc, it=10,
|
||||
return ret
|
||||
|
||||
|
||||
lmv1 = generate_heteroskedastic_linear(1,0.1,1,0.3)
|
||||
#lmv1 = generate_heteroskedastic_linear(5,0.1,0,0.2)
|
||||
#lmv1 = generate_heteroskedastic_linear(1,0.1,1,0.3)
|
||||
lmv1 = generate_heteroskedastic_linear(5,0.1,0,0.2)
|
||||
#lmv1 = generate_heteroskedastic_linear(1,0.3,1,0)
|
||||
|
||||
ns = 5 #number of fuzzy sets
|
||||
lmv1 = diff.apply(lmv1)
|
||||
|
||||
ns = 10 #number of fuzzy sets
|
||||
ts = 200
|
||||
train = lmv1[:ts]
|
||||
test = lmv1[ts:]
|
||||
w = 25
|
||||
deg = 4
|
||||
|
||||
tmp_fs = Grid.GridPartitioner(train[:35], 10)
|
||||
tmp_fs = Grid.GridPartitioner(train, 10)
|
||||
|
||||
fs = common.PolynomialNonStationaryPartitioner(train, tmp_fs, window_size=35, degree=1)
|
||||
|
||||
nsfts1 = nsfts.NonStationaryFTS("", partitioner=fs)
|
||||
|
||||
nsfts1.train(train[:100])
|
||||
|
||||
print(fs)
|
||||
|
||||
print(nsfts1)
|
||||
|
||||
tmp = nsfts1.forecast(test[:10], time_displacement=200)
|
||||
|
||||
print(tmp)
|
||||
#fs = partitioners.PolynomialNonStationaryPartitioner(train, tmp_fs, window_size=35, degree=1)
|
||||
fs = partitioners.ConstantNonStationaryPartitioner(train, tmp_fs,
|
||||
location=perturbation.polynomial,
|
||||
location_params=[1,0],
|
||||
location_roots=0,
|
||||
width=perturbation.polynomial,
|
||||
width_params=[1,0],
|
||||
width_roots=0)
|
||||
"""
|
||||
perturb = [0.5, 0.25]
|
||||
for i in [0,1]:
|
||||
print(fs.sets[i].parameters)
|
||||
fs.sets[i].perturbate_parameters(perturb[i])
|
||||
for i in [0,1]:
|
||||
print(fs.sets[i].perturbated_parameters[perturb[i]])
|
||||
"""
|
||||
#nsfts1 = nsfts.NonStationaryFTS("", partitioner=fs)
|
||||
|
||||
nsfts1 = cvfts.ConditionalVarianceFTS("", partitioner=fs)
|
||||
|
||||
nsfts1.train(train)
|
||||
|
||||
#print(fs)
|
||||
|
||||
#print(nsfts1)
|
||||
|
||||
#tmp = nsfts1.forecast(test[50:60])
|
||||
|
||||
#print(tmp)
|
||||
#print(test[50:60])
|
||||
|
||||
util.plot_sets_conditional(nsfts1, test, end=150, step=1,tam=[10, 5])
|
||||
print('')
|
||||
"""
|
||||
passengers = pd.read_csv("DataSets/AirPassengers.csv", sep=",")
|
||||
passengers = np.array(passengers["Passengers"])
|
||||
|
||||
@ -83,6 +105,7 @@ print([k[1] for k in tmpi])
|
||||
# window_size=ws, only_lines=False)
|
||||
|
||||
#fig, axes = plt.subplots(nrows=1, ncols=1, figsize=[15,5])
|
||||
"""
|
||||
|
||||
"""
|
||||
axes.plot(testp, label="Original")
|
||||
|
Loading…
Reference in New Issue
Block a user