Improvements in FCM_FTS
This commit is contained in:
parent
dd5ac3de20
commit
5804f3d5f2
@ -35,7 +35,10 @@ def genotype():
|
|||||||
"""
|
"""
|
||||||
num_concepts = parameters['num_concepts']
|
num_concepts = parameters['num_concepts']
|
||||||
order = parameters['order']
|
order = parameters['order']
|
||||||
ind = dict(weights=[np.random.normal(0,1.,(num_concepts,num_concepts)) for k in range(order)])
|
ind = dict(
|
||||||
|
weights=[np.random.normal(0, 1., (num_concepts,num_concepts)) for k in range(order)],
|
||||||
|
bias=[np.random.normal(0, 1., num_concepts) for k in range(order)]
|
||||||
|
)
|
||||||
return ind
|
return ind
|
||||||
|
|
||||||
|
|
||||||
@ -77,6 +80,7 @@ def phenotype(individual, train):
|
|||||||
model = fts.FCM_FTS(partitioner=partitioner, order=order)
|
model = fts.FCM_FTS(partitioner=partitioner, order=order)
|
||||||
|
|
||||||
model.fcm.weights = individual['weights']
|
model.fcm.weights = individual['weights']
|
||||||
|
model.fcm.bias = individual['bias']
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
@ -165,6 +169,15 @@ def crossover(parents):
|
|||||||
|
|
||||||
descendent['weights'][k] = np.array(new_weight).reshape(weights1.shape)
|
descendent['weights'][k] = np.array(new_weight).reshape(weights1.shape)
|
||||||
|
|
||||||
|
new_bias = []
|
||||||
|
bias1 = parents[0]['bias'][k]
|
||||||
|
bias2 = parents[1]['bias'][k]
|
||||||
|
|
||||||
|
for row, a in enumerate(weights1):
|
||||||
|
new_bias.append(.7 * bias1[row] + .3 * bias2[row])
|
||||||
|
|
||||||
|
descendent['bias'][k] = np.array(new_bias).reshape(bias1.shape)
|
||||||
|
|
||||||
return descendent
|
return descendent
|
||||||
|
|
||||||
|
|
||||||
@ -194,6 +207,9 @@ def mutation(individual, pmut):
|
|||||||
individual['weights'][k][row, col] += np.random.normal(0, .5, 1)
|
individual['weights'][k][row, col] += np.random.normal(0, .5, 1)
|
||||||
individual['weights'][k][row, col] = np.clip(individual['weights'][k][row, col], -1, 1)
|
individual['weights'][k][row, col] = np.clip(individual['weights'][k][row, col], -1, 1)
|
||||||
|
|
||||||
|
individual['bias'][k][row] += np.random.normal(0, .5, 1)
|
||||||
|
|
||||||
|
|
||||||
return individual
|
return individual
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,31 +6,46 @@ def GD(data, model, **kwargs):
|
|||||||
momentum = kwargs.get('momentum', None)
|
momentum = kwargs.get('momentum', None)
|
||||||
iterations = kwargs.get('iterations', 1)
|
iterations = kwargs.get('iterations', 1)
|
||||||
num_concepts = model.partitioner.partitions
|
num_concepts = model.partitioner.partitions
|
||||||
weights = [np.random.normal(0,.01,(num_concepts,num_concepts)) for k in range(model.order)]
|
weights = [np.random.normal(0,.1,(num_concepts,num_concepts)) for k in range(model.order)]
|
||||||
last_gradient = [None for k in range(model.order) ]
|
bias = [np.random.normal(0,.1,num_concepts) for k in range(model.order)]
|
||||||
|
last_gradientW = [None for k in range(model.order) ]
|
||||||
|
last_gradientB = [None for k in range(model.order)]
|
||||||
for it in np.arange(iterations):
|
for it in np.arange(iterations):
|
||||||
for i in np.arange(model.order, len(data)):
|
for i in np.arange(model.order, len(data)):
|
||||||
#i = np.random.randint(model.order, len(data)-model.order)
|
#i = np.random.randint(model.order, len(data)-model.order)
|
||||||
sample = data[i-model.order : i]
|
sample = data[i-model.order : i]
|
||||||
target = data[i]
|
target = model.partitioner.fuzzyfy(data[i], mode='vector')
|
||||||
|
#target = data[i]
|
||||||
model.fcm.weights = weights
|
model.fcm.weights = weights
|
||||||
|
model.fcm.bias = bias
|
||||||
inputs = model.partitioner.fuzzyfy(sample, mode='vector')
|
inputs = model.partitioner.fuzzyfy(sample, mode='vector')
|
||||||
activations = model.fcm.activate(inputs)
|
#activations = model.fcm.activate(inputs)
|
||||||
|
activations = [model.fcm.activation_function(inputs[k]) for k in np.arange(model.order)]
|
||||||
forecast = model.predict(sample)[0]
|
forecast = model.predict(sample)[0]
|
||||||
error = target - forecast #)**2
|
error = target - model.partitioner.fuzzyfy(forecast, mode='vector') #)**2
|
||||||
if str(error) == 'nan' or error == np.nan or error == np.Inf:
|
#error = target - forecast
|
||||||
print('error')
|
#if str(error) == 'nan' or error == np.nan or error == np.Inf:
|
||||||
print(error)
|
#print(error)
|
||||||
|
print(np.dot(error,error))
|
||||||
|
|
||||||
for k in np.arange(model.order):
|
for k in np.arange(model.order):
|
||||||
deriv = error * model.fcm.activation_function(activations[k], deriv=True)
|
deriv = error * model.fcm.activation_function(activations[k], deriv=True)
|
||||||
|
#deriv = error * activations[k]
|
||||||
if momentum is not None:
|
if momentum is not None:
|
||||||
if last_gradient[k] is None:
|
if last_gradientW[k] is None:
|
||||||
last_gradient[k] = deriv*inputs[k]
|
last_gradientW[k] = deriv * inputs[k]
|
||||||
|
last_gradientB[k] = deriv
|
||||||
|
|
||||||
|
tmp_gradw = (momentum * last_gradientW[k]) + alpha*deriv*inputs[k]
|
||||||
|
weights[k] -= tmp_gradw
|
||||||
|
last_gradientW[k] = tmp_gradw
|
||||||
|
|
||||||
|
tmp_gradB = (momentum * last_gradientB[k]) + alpha * deriv
|
||||||
|
bias[k] -= tmp_gradB
|
||||||
|
last_gradientB[k] = tmp_gradB
|
||||||
|
|
||||||
tmp_grad = (momentum * last_gradient[k]) + alpha*deriv*inputs[k]
|
|
||||||
weights[k] -= tmp_grad
|
|
||||||
last_gradient[k] = tmp_grad
|
|
||||||
else:
|
else:
|
||||||
weights[k] -= alpha*deriv*inputs[k]
|
weights[k] -= alpha*deriv*inputs[k]
|
||||||
|
bias[k] -= alpha*deriv
|
||||||
|
|
||||||
return weights
|
return weights
|
@ -8,11 +8,12 @@ class FuzzyCognitiveMap(object):
|
|||||||
self.order = kwargs.get('order',1)
|
self.order = kwargs.get('order',1)
|
||||||
self.concepts = kwargs.get('partitioner',None)
|
self.concepts = kwargs.get('partitioner',None)
|
||||||
self.weights = []
|
self.weights = []
|
||||||
|
self.bias = []
|
||||||
self.activation_function = kwargs.get('activation_function', Activations.sigmoid)
|
self.activation_function = kwargs.get('activation_function', Activations.sigmoid)
|
||||||
|
|
||||||
def activate(self, concepts):
|
def activate(self, concepts):
|
||||||
dot_products = np.zeros(len(self.concepts))
|
dot_products = np.zeros(len(self.concepts))
|
||||||
for k in np.arange(0, self.order):
|
for k in np.arange(0, self.order):
|
||||||
dot_products += np.dot(np.array(concepts[k]).T, self.weights[k])
|
dot_products += np.dot(np.array(concepts[k]).T, self.weights[k]) + self.bias[k]
|
||||||
return self.activation_function( dot_products )
|
return self.activation_function( dot_products )
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ class FCM_FTS(hofts.HighOrderFTS):
|
|||||||
GA.parameters['partitioner'] = self.partitioner
|
GA.parameters['partitioner'] = self.partitioner
|
||||||
ret = GA.execute(data, **kwargs)
|
ret = GA.execute(data, **kwargs)
|
||||||
self.fcm.weights = ret['weights']
|
self.fcm.weights = ret['weights']
|
||||||
|
self.fcm.bias = ret['bias']
|
||||||
elif method == 'GD':
|
elif method == 'GD':
|
||||||
self.fcm.weights = GD.GD(data, self, **kwargs)
|
self.fcm.weights = GD.GD(data, self, **kwargs)
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ import pandas as pd
|
|||||||
|
|
||||||
from pyFTS.fcm import fts as fcm_fts
|
from pyFTS.fcm import fts as fcm_fts
|
||||||
from pyFTS.partitioners import Grid
|
from pyFTS.partitioners import Grid
|
||||||
from pyFTS.common import Util
|
from pyFTS.common import Util, Membership
|
||||||
|
|
||||||
df = pd.read_csv('https://query.data.world/s/56i2vkijbvxhtv5gagn7ggk3zw3ksi', sep=';')
|
df = pd.read_csv('https://query.data.world/s/56i2vkijbvxhtv5gagn7ggk3zw3ksi', sep=';')
|
||||||
|
|
||||||
@ -18,12 +18,24 @@ data = df['glo_avg'].values[:]
|
|||||||
train = data[:7000]
|
train = data[:7000]
|
||||||
test = data[7000:7500]
|
test = data[7000:7500]
|
||||||
|
|
||||||
fs = Grid.GridPartitioner(data=train, npart=5)
|
fs = Grid.GridPartitioner(data=train, npart=5, func=Membership.trimf)
|
||||||
|
|
||||||
model = fcm_fts.FCM_FTS(partitioner=fs, order=2, activation_function = Activations.relu)
|
model = fcm_fts.FCM_FTS(partitioner=fs, order=2, activation_function = Activations.relu)
|
||||||
|
|
||||||
|
model.fit(train, method='GD', alpha=0.5, momentum=None, iteractions=1 )
|
||||||
|
|
||||||
model.fit(train, method='GD', alpha=0.02, momentum=0.8, iteractions=3 )
|
'''
|
||||||
|
model.fit(train, method='GA', ngen=15, #number of generations
|
||||||
|
mgen=7, # stop after mgen generations without improvement
|
||||||
|
npop=15, # number of individuals on population
|
||||||
|
pcruz=.5, # crossover percentual of population
|
||||||
|
pmut=.3, # mutation percentual of population
|
||||||
|
window_size = 7000,
|
||||||
|
train_rate = .8,
|
||||||
|
increment_rate =.2,
|
||||||
|
experiments=1
|
||||||
|
)
|
||||||
|
'''
|
||||||
|
|
||||||
Util.persist_obj(model, 'fcm_fts10c')
|
Util.persist_obj(model, 'fcm_fts10c')
|
||||||
'''
|
'''
|
||||||
|
Loading…
Reference in New Issue
Block a user