Bugfixes in DEHO

This commit is contained in:
Petrônio Cândido 2019-08-05 17:51:15 -03:00
parent 5b7e4edcd7
commit 9efe7ba453
3 changed files with 70 additions and 44 deletions

View File

@ -76,24 +76,10 @@ def random_genotype(**kwargs):
for v in explanatory_variables: for v in explanatory_variables:
var = vars[v] var = vars[v]
if var['type'] == 'common': param = random_param(var)
npart = random.randint(7, 50)
else:
npart = var['npart']
param = {
'mf': random.randint(1, 4),
'npart': npart,
'partitioner': 1, #random.randint(1, 2),
'alpha': random.uniform(0, .5)
}
explanatory_params.append(param) explanatory_params.append(param)
target_params = { target_params = random_param(tvar)
'mf': random.randint(1, 4),
'npart': random.randint(7, 50),
'partitioner': 1, #random.randint(1, 2),
'alpha': random.uniform(0, .5)
}
return genotype( return genotype(
explanatory_variables, explanatory_variables,
@ -102,6 +88,20 @@ def random_genotype(**kwargs):
) )
def random_param(var):
if var['type'] == 'common':
npart = random.randint(7, 50)
else:
npart = var['npart']
param = {
'mf': random.randint(1, 4),
'npart': npart,
'partitioner': 1, # random.randint(1, 2),
'alpha': random.uniform(0, .5)
}
return param
def phenotype(individual, train, fts_method, parameters={}, **kwargs): def phenotype(individual, train, fts_method, parameters={}, **kwargs):
vars = kwargs.get('variables', None) vars = kwargs.get('variables', None)
tvar = kwargs.get('target_variable', None) tvar = kwargs.get('target_variable', None)
@ -246,6 +246,10 @@ def crossover(population, **kwargs):
""" """
import random import random
vars = kwargs.get('variables', None)
tvar = kwargs.get('target_variable', None)
n = len(population) - 1 n = len(population) - 1
r1,r2 = 0,0 r1,r2 = 0,0
@ -280,7 +284,7 @@ def crossover(population, **kwargs):
if ix in best['explanatory_variables'] and ix in worst['explanatory_variables']: if ix in best['explanatory_variables'] and ix in worst['explanatory_variables']:
bix = best['explanatory_variables'].index(ix) bix = best['explanatory_variables'].index(ix)
wix = worst['explanatory_variables'].index(ix) wix = worst['explanatory_variables'].index(ix)
param = crossover_variable_params(best['explanatory_params'][bix], worst['explanatory_params'][wix]) param = crossover_variable_params(best['explanatory_params'][bix], worst['explanatory_params'][wix], vars[ix])
elif ix in best['explanatory_variables']: elif ix in best['explanatory_variables']:
bix = best['explanatory_variables'].index(ix) bix = best['explanatory_variables'].index(ix)
param = best['explanatory_params'][bix] param = best['explanatory_params'][bix]
@ -291,15 +295,18 @@ def crossover(population, **kwargs):
explanatory_variables.append(ix) explanatory_variables.append(ix)
explanatory_params.append(param) explanatory_params.append(param)
tparams = crossover_variable_params(best['target_params'], worst['target_params']) tparams = crossover_variable_params(best['target_params'], worst['target_params'], tvar)
descendent = genotype(explanatory_variables, explanatory_params, tparams, None, None) descendent = genotype(explanatory_variables, explanatory_params, tparams)
return descendent return descendent
def crossover_variable_params(best, worst): def crossover_variable_params(best, worst, var):
npart = int(round(.7 * best['npart'] + .3 * worst['npart'])) if var['type'] == 'common':
npart = int(round(.7 * best['npart'] + .3 * worst['npart']))
else:
npart = best['npart']
alpha = float(.7 * best['alpha'] + .3 * worst['alpha']) alpha = float(.7 * best['alpha'] + .3 * worst['alpha'])
rnd = random.uniform(0, 1) rnd = random.uniform(0, 1)
mf = best['mf'] if rnd < .7 else worst['mf'] mf = best['mf'] if rnd < .7 else worst['mf']
@ -317,14 +324,32 @@ def mutation(individual, **kwargs):
:return: :return:
""" """
vars = kwargs.get('variables', None)
tvar = kwargs.get('target_variable', None)
l = len(vars)
il = len(individual['explanatory_variables'])
rnd = random.uniform(0, 1)
if rnd > .9 and il > 1:
rnd = random.randint(0, il-1)
val = individual['explanatory_variables'][rnd]
individual['explanatory_variables'].remove(val)
individual['explanatory_params'].pop(rnd)
elif rnd < .1 and il < l:
rnd = random.randint(0, l-1)
while rnd in individual['explanatory_variables']:
rnd = random.randint(0, l-1)
individual['explanatory_variables'].append(rnd)
individual['explanatory_params'].append(random_param(vars[rnd]))
for ct in np.arange(len(individual['explanatory_variables'])): for ct in np.arange(len(individual['explanatory_variables'])):
rnd = random.uniform(0, 1) rnd = random.uniform(0, 1)
if rnd > .5: if rnd > .5:
mutate_variable_params(individual['explanatory_params'][ct]) mutate_variable_params(individual['explanatory_params'][ct], vars[ct])
rnd = random.uniform(0, 1) rnd = random.uniform(0, 1)
if rnd > .5: if rnd > .5:
mutate_variable_params(individual['target_params']) mutate_variable_params(individual['target_params'], tvar)
individual['f1'] = None individual['f1'] = None
individual['f2'] = None individual['f2'] = None
@ -332,8 +357,9 @@ def mutation(individual, **kwargs):
return individual return individual
def mutate_variable_params(param): def mutate_variable_params(param, var):
param['npart'] = min(50, max(3, int(param['npart'] + np.random.normal(0, 4)))) if var['type']=='common':
param['npart'] = min(50, max(3, int(param['npart'] + np.random.normal(0, 4))))
param['alpha'] = min(.5, max(0, param['alpha'] + np.random.normal(0, .5))) param['alpha'] = min(.5, max(0, param['alpha'] + np.random.normal(0, .5)))
param['mf'] = random.randint(1, 4) param['mf'] = random.randint(1, 4)
param['partitioner'] = random.randint(1, 2) param['partitioner'] = random.randint(1, 2)
@ -428,7 +454,7 @@ def persist_statistics(datasetname, statistics):
def log_result(datasetname, fts_method, result): def log_result(datasetname, fts_method, result):
import json import json
with open('result_{}{}.json'.format(fts_method,datasetname), 'w') as file: with open('result_{}{}.json'.format(fts_method,datasetname), 'a+') as file:
file.write(json.dumps(result)) file.write(json.dumps(result))
print(result) print(result)

View File

@ -156,9 +156,12 @@ class Partitioner(object):
nearest = self.search(data, type='index') nearest = self.search(data, type='index')
mv = np.zeros(self.partitions) mv = np.zeros(self.partitions)
for ix in nearest: try:
tmp = self[ix].membership(data) for ix in nearest:
mv[ix] = tmp if tmp >= alpha_cut else 0. tmp = self[ix].membership(data)
mv[ix] = tmp if tmp >= alpha_cut else 0.
except:
print(ix)
ix = np.ravel(np.argwhere(mv > 0.)) ix = np.ravel(np.argwhere(mv > 0.))
@ -316,19 +319,16 @@ class Partitioner(object):
it represents the fuzzy set name. it represents the fuzzy set name.
:return: the fuzzy set :return: the fuzzy set
""" """
try: if isinstance(item, (int, np.int, np.int8, np.int16, np.int32, np.int64)):
if isinstance(item, (int, np.int, np.int8, np.int16, np.int32, np.int64)): if item < 0 or item >= self.partitions:
if item < 0 or item >= self.partitions: raise ValueError("The fuzzy set index must be between 0 and {}.".format(self.partitions))
raise ValueError("The fuzzy set index must be between 0 and {}.".format(self.partitions)) return self.sets[self.ordered_sets[item]]
return self.sets[self.ordered_sets[item]] elif isinstance(item, str):
elif isinstance(item, str): if item not in self.sets:
if item not in self.sets: raise ValueError("The fuzzy set with name {} does not exist.".format(item))
raise ValueError("The fuzzy set with name {} does not exist.".format(item)) return self.sets[item]
return self.sets[item] else:
else: raise ValueError("The parameter 'item' must be an integer or a string and the value informed was {} of type {}!".format(item, type(item)))
raise ValueError("The parameter 'item' must be an integer or a string and the value informed was {} of type {}!".format(item, type(item)))
except Exception as ex:
logging.exception("Error")
def __iter__(self): def __iter__(self):

View File

@ -50,14 +50,14 @@ explanatory_variables =[
{'name': 'Temperature', 'data_label': 'temperature', 'type': 'common'}, {'name': 'Temperature', 'data_label': 'temperature', 'type': 'common'},
{'name': 'Daily', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.minute_of_day, 'npart': 24 }, {'name': 'Daily', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.minute_of_day, 'npart': 24 },
{'name': 'Weekly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_week, 'npart': 7 }, {'name': 'Weekly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_week, 'npart': 7 },
#{'name': 'Monthly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_month, 'npart': 4 }, {'name': 'Monthly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_month, 'npart': 4 },
{'name': 'Yearly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_year, 'npart': 12 } {'name': 'Yearly', 'data_label': 'time', 'type': 'seasonal', 'seasonality': DateTime.day_of_year, 'npart': 12 }
] ]
target_variable = {'name': 'Load', 'data_label': 'load', 'type': 'common'} target_variable = {'name': 'Load', 'data_label': 'load', 'type': 'common'}
nodes=['192.168.28.38'] nodes=['192.168.28.38']
deho_mv.execute(datsetname, dataset, deho_mv.execute(datsetname, dataset,
ngen=10, npop=10,psel=0.6, pcross=.5, pmut=.3, ngen=20, npop=15,psel=0.6, pcross=.5, pmut=.3,
window_size=2000, train_rate=.9, increment_rate=1, window_size=2000, train_rate=.9, increment_rate=1,
experiments=1, experiments=1,
fts_method=wmvfts.WeightedMVFTS, fts_method=wmvfts.WeightedMVFTS,