From 5804f3d5f24332bb7a5babbc935a2712e514d721 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=C3=B4nio=20C=C3=A2ndido?= Date: Wed, 29 Jan 2020 16:39:23 -0300 Subject: [PATCH] Improvements in FCM_FTS --- pyFTS/fcm/GA.py | 18 +++++++++++++++++- pyFTS/fcm/GD.py | 41 ++++++++++++++++++++++++++++------------- pyFTS/fcm/common.py | 3 ++- pyFTS/fcm/fts.py | 1 + pyFTS/tests/fcm_fts.py | 18 +++++++++++++++--- 5 files changed, 63 insertions(+), 18 deletions(-) diff --git a/pyFTS/fcm/GA.py b/pyFTS/fcm/GA.py index 9e0f3bf..3fda2ba 100644 --- a/pyFTS/fcm/GA.py +++ b/pyFTS/fcm/GA.py @@ -35,7 +35,10 @@ def genotype(): """ num_concepts = parameters['num_concepts'] order = parameters['order'] - ind = dict(weights=[np.random.normal(0,1.,(num_concepts,num_concepts)) for k in range(order)]) + ind = dict( + weights=[np.random.normal(0, 1., (num_concepts,num_concepts)) for k in range(order)], + bias=[np.random.normal(0, 1., num_concepts) for k in range(order)] + ) return ind @@ -77,6 +80,7 @@ def phenotype(individual, train): model = fts.FCM_FTS(partitioner=partitioner, order=order) model.fcm.weights = individual['weights'] + model.fcm.bias = individual['bias'] return model @@ -165,6 +169,15 @@ def crossover(parents): descendent['weights'][k] = np.array(new_weight).reshape(weights1.shape) + new_bias = [] + bias1 = parents[0]['bias'][k] + bias2 = parents[1]['bias'][k] + + for row, a in enumerate(weights1): + new_bias.append(.7 * bias1[row] + .3 * bias2[row]) + + descendent['bias'][k] = np.array(new_bias).reshape(bias1.shape) + return descendent @@ -194,6 +207,9 @@ def mutation(individual, pmut): individual['weights'][k][row, col] += np.random.normal(0, .5, 1) individual['weights'][k][row, col] = np.clip(individual['weights'][k][row, col], -1, 1) + individual['bias'][k][row] += np.random.normal(0, .5, 1) + + return individual diff --git a/pyFTS/fcm/GD.py b/pyFTS/fcm/GD.py index 0694911..88a190b 100644 --- a/pyFTS/fcm/GD.py +++ b/pyFTS/fcm/GD.py @@ -6,31 +6,46 @@ def GD(data, model, **kwargs): momentum = kwargs.get('momentum', None) iterations = kwargs.get('iterations', 1) num_concepts = model.partitioner.partitions - weights = [np.random.normal(0,.01,(num_concepts,num_concepts)) for k in range(model.order)] - last_gradient = [None for k in range(model.order) ] + weights = [np.random.normal(0,.1,(num_concepts,num_concepts)) for k in range(model.order)] + bias = [np.random.normal(0,.1,num_concepts) for k in range(model.order)] + last_gradientW = [None for k in range(model.order) ] + last_gradientB = [None for k in range(model.order)] for it in np.arange(iterations): for i in np.arange(model.order, len(data)): #i = np.random.randint(model.order, len(data)-model.order) sample = data[i-model.order : i] - target = data[i] + target = model.partitioner.fuzzyfy(data[i], mode='vector') + #target = data[i] model.fcm.weights = weights + model.fcm.bias = bias inputs = model.partitioner.fuzzyfy(sample, mode='vector') - activations = model.fcm.activate(inputs) + #activations = model.fcm.activate(inputs) + activations = [model.fcm.activation_function(inputs[k]) for k in np.arange(model.order)] forecast = model.predict(sample)[0] - error = target - forecast #)**2 - if str(error) == 'nan' or error == np.nan or error == np.Inf: - print('error') - print(error) + error = target - model.partitioner.fuzzyfy(forecast, mode='vector') #)**2 + #error = target - forecast + #if str(error) == 'nan' or error == np.nan or error == np.Inf: + #print(error) + print(np.dot(error,error)) + for k in np.arange(model.order): deriv = error * model.fcm.activation_function(activations[k], deriv=True) + #deriv = error * activations[k] if momentum is not None: - if last_gradient[k] is None: - last_gradient[k] = deriv*inputs[k] + if last_gradientW[k] is None: + last_gradientW[k] = deriv * inputs[k] + last_gradientB[k] = deriv + + tmp_gradw = (momentum * last_gradientW[k]) + alpha*deriv*inputs[k] + weights[k] -= tmp_gradw + last_gradientW[k] = tmp_gradw + + tmp_gradB = (momentum * last_gradientB[k]) + alpha * deriv + bias[k] -= tmp_gradB + last_gradientB[k] = tmp_gradB - tmp_grad = (momentum * last_gradient[k]) + alpha*deriv*inputs[k] - weights[k] -= tmp_grad - last_gradient[k] = tmp_grad else: weights[k] -= alpha*deriv*inputs[k] + bias[k] -= alpha*deriv return weights \ No newline at end of file diff --git a/pyFTS/fcm/common.py b/pyFTS/fcm/common.py index 89f8605..1ecdd74 100644 --- a/pyFTS/fcm/common.py +++ b/pyFTS/fcm/common.py @@ -8,11 +8,12 @@ class FuzzyCognitiveMap(object): self.order = kwargs.get('order',1) self.concepts = kwargs.get('partitioner',None) self.weights = [] + self.bias = [] self.activation_function = kwargs.get('activation_function', Activations.sigmoid) def activate(self, concepts): dot_products = np.zeros(len(self.concepts)) for k in np.arange(0, self.order): - dot_products += np.dot(np.array(concepts[k]).T, self.weights[k]) + dot_products += np.dot(np.array(concepts[k]).T, self.weights[k]) + self.bias[k] return self.activation_function( dot_products ) diff --git a/pyFTS/fcm/fts.py b/pyFTS/fcm/fts.py index e97eb25..9346dd5 100644 --- a/pyFTS/fcm/fts.py +++ b/pyFTS/fcm/fts.py @@ -18,6 +18,7 @@ class FCM_FTS(hofts.HighOrderFTS): GA.parameters['partitioner'] = self.partitioner ret = GA.execute(data, **kwargs) self.fcm.weights = ret['weights'] + self.fcm.bias = ret['bias'] elif method == 'GD': self.fcm.weights = GD.GD(data, self, **kwargs) diff --git a/pyFTS/tests/fcm_fts.py b/pyFTS/tests/fcm_fts.py index 04dceee..4774a23 100644 --- a/pyFTS/tests/fcm_fts.py +++ b/pyFTS/tests/fcm_fts.py @@ -8,7 +8,7 @@ import pandas as pd from pyFTS.fcm import fts as fcm_fts from pyFTS.partitioners import Grid -from pyFTS.common import Util +from pyFTS.common import Util, Membership df = pd.read_csv('https://query.data.world/s/56i2vkijbvxhtv5gagn7ggk3zw3ksi', sep=';') @@ -18,12 +18,24 @@ data = df['glo_avg'].values[:] train = data[:7000] test = data[7000:7500] -fs = Grid.GridPartitioner(data=train, npart=5) +fs = Grid.GridPartitioner(data=train, npart=5, func=Membership.trimf) model = fcm_fts.FCM_FTS(partitioner=fs, order=2, activation_function = Activations.relu) +model.fit(train, method='GD', alpha=0.5, momentum=None, iteractions=1 ) -model.fit(train, method='GD', alpha=0.02, momentum=0.8, iteractions=3 ) +''' +model.fit(train, method='GA', ngen=15, #number of generations + mgen=7, # stop after mgen generations without improvement + npop=15, # number of individuals on population + pcruz=.5, # crossover percentual of population + pmut=.3, # mutation percentual of population + window_size = 7000, + train_rate = .8, + increment_rate =.2, + experiments=1 + ) +''' Util.persist_obj(model, 'fcm_fts10c') '''