toolbox.register("individual", tools.initCycle, creator.Individual, funcs_seq, n=1)
strategy = cma.StrategyOnePlusLambda(parent=parent, sigma=cma_sigma, lambda_=lmbda)
pop, log = algorithms.eaGenerateUpdate(toolbox, ngen=run.gen_num, stats=stats, halloffame=hof, verbose=True)
Can anyone help?--
You received this message because you are subscribed to the Google Groups "deap-users" group.
To unsubscribe from this group and stop receiving emails from it, send an email to deap-users+...@googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
You will notice that the evolution progress is relatively flat, whereas previously evolution progress looked more like this.
Notice the incremental improvement towards a fitness of 1.0, particularly in the first ~50 generations.
The evaluation function is implemented like this.
def evaluate(individual, deap_aa_seq, ref_angle, hydro_index, run, trans=True):
if trans: trans_ind = transform_ind(individual, deap_aa_seq) else: trans_ind = individual
structure = build_peptide_fast(trans_ind, deap_aa_seq)
hpmv, unit_vec, norm_hpm, amph_fit = calculate_amph(structure, deap_aa_seq, ref_angle, hydro_index, run) assert 0 <= amph_fit <= 1, "Amphipathic fitness isn't normalised!"
return amph_fit,
This is where the individual is transformed.
def transform_ind(ind, aa_seq):
phis = ind[0::2] psis = ind[1::2] trans_ind = [] append = trans_ind.append
for aa, phi, psi in zip(aa_seq, phis, psis): trans_phi, trans_psi = get_dihedrals(AA_DICT_1_to_3[aa]) append(trans_phi) append(trans_psi)
return trans_ind
This is where relevant parameters for the statistical models are gathered.
def get_dihedrals(aa, num=1):
phi_mu = AA_STATS[aa]['phi_mu'] phi_std = AA_STATS[aa]['phi_std'] psi_mu = AA_STATS[aa]['psi_mu'] psi_std = AA_STATS[aa]['psi_std'] covar = AA_STATS[aa]['covariance']
if aa == 'GLY': # special case for glycine because it's bimodal phi, psi = get_gly_angles(num) else: phi, psi = get_aa_angles(phi_mu, phi_std, psi_mu, psi_std, covar, num)
return phi, psi
This is the function for all the amino acids, except glycine.
def get_aa_angles(phi_mu, phi_std, psi_mu, psi_std, covar, num=1):
phi, psi = multivariate_normal([phi_mu, psi_mu], covar, size=num).T
return phi, psi
And this is the function for selecting glycine angles (the parameters for the GMM are pre-computed based on available glycine data).
def get_gly_angles(num):
g = mixture.GMM(n_components=2) g.converged_ = True g.covars_ = np.array([[302.20669879, 542.40686993], [238.31054863, 360.50980976]]) g.means_ = np.array([[-70.62229728, -27.70590334], [85.05020584, 6.95850818]]) g.weights_ = np.array([0.39771791, 0.60228209]) xy_samples = g.sample(num) phi, psi = zip(*xy_samples)
return phi, psi
What I assume is happening I'm not applying the changes to the transformed individual correctly, such that the CMA-ES is unable "learn" in the genotype space because I keep overwriting the values it produces in the phenotype.
Any advice for how I might be able to address this?
You can remap these values to another distribution (or range) without much worries. You can do the remapping at evaluation time (without modifying the original individual).