190 lines
6.3 KiB
Python
190 lines
6.3 KiB
Python
import numpy as np
|
|
from pyFTS.models import hofts
|
|
from pyFTS.models.nonstationary import common,nsfts
|
|
from pyFTS.common import FLR, flrg, tree
|
|
|
|
class HighOrderNonstationaryFLRG(hofts.HighOrderFTS):
|
|
"""Conventional High Order Fuzzy Logical Relationship Group"""
|
|
def __init__(self, order, **kwargs):
|
|
super(HighOrderNonstationaryFLRG, self).__init__(order, **kwargs)
|
|
self.LHS = []
|
|
self.RHS = {}
|
|
self.strlhs = ""
|
|
|
|
def append_rhs(self, c, **kwargs):
|
|
if c not in self.RHS:
|
|
self.RHS[c] = c
|
|
|
|
def append_lhs(self, c):
|
|
self.LHS.append(c)
|
|
|
|
def __str__(self):
|
|
tmp = ""
|
|
for c in sorted(self.RHS):
|
|
if len(tmp) > 0:
|
|
tmp = tmp + ","
|
|
tmp = tmp + c
|
|
return self.get_key() + " -> " + tmp
|
|
|
|
|
|
def __len__(self):
|
|
return len(self.RHS)
|
|
|
|
class ConditionalVarianceFTS(hofts.HighOrderFTS):
|
|
def __init__(self, **kwargs):
|
|
super(ConditionalVarianceFTS, self).__init__(**kwargs)
|
|
self.name = "Conditional Variance FTS"
|
|
self.shortname = "CVFTS "
|
|
self.detail = ""
|
|
self.flrgs = {}
|
|
if self.partitioner is not None:
|
|
self.append_transformation(self.partitioner.transformation)
|
|
|
|
self.min_stack = [0,0,0]
|
|
self.max_stack = [0,0,0]
|
|
self.uod_clip = False
|
|
self.order = 1
|
|
self.min_order = 1
|
|
|
|
def train(self, ndata, **kwargs):
|
|
|
|
tmpdata = common.fuzzySeries(ndata, self.sets, self.partitioner.ordered_sets, method='fuzzy', const_t=0)
|
|
flrs = FLR.generate_non_recurrent_flrs(tmpdata)
|
|
self.generate_flrg(flrs)
|
|
|
|
def generate_flrg(self, flrs, **kwargs):
|
|
for flr in flrs:
|
|
if flr.LHS.name in self.flrgs:
|
|
self.flrgs[flr.LHS.name].append_rhs(flr.RHS)
|
|
else:
|
|
self.flrgs[flr.LHS.name] = nsfts.ConventionalNonStationaryFLRG(flr.LHS)
|
|
self.flrgs[flr.LHS.name].append_rhs(flr.RHS)
|
|
|
|
|
|
def _smooth(self, a):
|
|
return .1 * a[0] + .3 * a[1] + .6 * a[2]
|
|
|
|
def perturbation_factors(self, data):
|
|
_max = 0
|
|
_min = 0
|
|
if data < self.original_min:
|
|
_min = data - self.original_min if data < 0 else self.original_min - data
|
|
elif data > self.original_max:
|
|
_max = data - self.original_max if data > 0 else self.original_max - data
|
|
self.min_stack.pop(2)
|
|
self.min_stack.insert(0,_min)
|
|
_min = min(self.min_stack)
|
|
self.max_stack.pop(2)
|
|
self.max_stack.insert(0, _max)
|
|
_max = max(self.max_stack)
|
|
|
|
location = np.linspace(_min, _max, self.partitioner.partitions)
|
|
scale = [abs(location[0] - location[2])]
|
|
scale.extend([abs(location[k-1] - location[k+1]) for k in np.arange(1,self.partitioner.partitions-1)])
|
|
scale.append(abs(location[-1] - location[-3]))
|
|
|
|
perturb = [[location[k], scale[k]] for k in np.arange(0, self.partitioner.partitions)]
|
|
|
|
return perturb
|
|
|
|
def _affected_sets(self, sample, perturb):
|
|
|
|
affected_sets = [[ct, self.sets[key].membership(sample, perturb[ct])]
|
|
for ct, key in enumerate(self.partitioner.ordered_sets)
|
|
if self.sets[key].membership(sample, perturb[ct]) > 0.0]
|
|
|
|
if len(affected_sets) == 0:
|
|
if sample < self.partitioner.lower_set().get_lower(perturb[0]):
|
|
affected_sets.append([0, 1])
|
|
elif sample > self.partitioner.upper_set().get_upper(perturb[-1]):
|
|
affected_sets.append([len(self.sets) - 1, 1])
|
|
|
|
|
|
return affected_sets
|
|
|
|
def forecast(self, ndata, **kwargs):
|
|
l = len(ndata)
|
|
|
|
ret = []
|
|
|
|
for k in np.arange(0, l):
|
|
|
|
sample = ndata[k]
|
|
|
|
perturb = self.perturbation_factors(sample)
|
|
|
|
affected_sets = self._affected_sets(sample, perturb)
|
|
|
|
tmp = []
|
|
|
|
if len(affected_sets) == 1:
|
|
ix = affected_sets[0][0]
|
|
aset = self.partitioner.ordered_sets[ix]
|
|
if aset in self.flrgs:
|
|
tmp.append(self.flrgs[aset].get_midpoint(perturb[ix]))
|
|
else:
|
|
fuzzy_set = self.sets[aset]
|
|
tmp.append(fuzzy_set.get_midpoint(perturb[ix]))
|
|
else:
|
|
for aset in affected_sets:
|
|
ix = aset[0]
|
|
fs = self.partitioner.ordered_sets[ix]
|
|
tdisp = perturb[ix]
|
|
if fs in self.flrgs:
|
|
tmp.append(self.flrgs[fs].get_midpoint(tdisp) * aset[1])
|
|
else:
|
|
fuzzy_set = self.sets[fs]
|
|
tmp.append(fuzzy_set.get_midpoint(tdisp) * aset[1])
|
|
|
|
pto = sum(tmp)
|
|
|
|
ret.append(pto)
|
|
|
|
return ret
|
|
|
|
|
|
def forecast_interval(self, ndata, **kwargs):
|
|
l = len(ndata)
|
|
|
|
ret = []
|
|
|
|
for k in np.arange(0, l):
|
|
|
|
sample = ndata[k]
|
|
|
|
perturb = self.perturbation_factors(sample)
|
|
|
|
affected_sets = self._affected_sets(sample, perturb)
|
|
|
|
upper = []
|
|
lower = []
|
|
|
|
if len(affected_sets) == 1:
|
|
ix = affected_sets[0][0]
|
|
aset = self.partitioner.ordered_sets[ix]
|
|
if aset in self.flrgs:
|
|
lower.append(self.flrgs[aset].get_lower(perturb[ix]))
|
|
upper.append(self.flrgs[aset].get_upper(perturb[ix]))
|
|
else:
|
|
fuzzy_set = self.sets[aset]
|
|
lower.append(fuzzy_set.get_lower(perturb[ix]))
|
|
upper.append(fuzzy_set.get_upper(perturb[ix]))
|
|
else:
|
|
for aset in affected_sets:
|
|
ix = aset[0]
|
|
fs = self.partitioner.ordered_sets[ix]
|
|
tdisp = perturb[ix]
|
|
if fs in self.flrgs:
|
|
lower.append(self.flrgs[fs].get_lower(tdisp) * aset[1])
|
|
upper.append(self.flrgs[fs].get_upper(tdisp) * aset[1])
|
|
else:
|
|
fuzzy_set = self.sets[fs]
|
|
lower.append(fuzzy_set.get_lower(tdisp) * aset[1])
|
|
upper.append(fuzzy_set.get_upper(tdisp) * aset[1])
|
|
|
|
itvl = [sum(lower), sum(upper)]
|
|
|
|
ret.append(itvl)
|
|
|
|
return ret
|