Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
252 changes: 131 additions & 121 deletions src/ik_evolution_2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,13 @@ template <int memetic> struct IKEvolution2 : IKBase
std::vector<size_t> quaternion_genes;
aligned_vector<double> genes_min, genes_max, genes_span;
aligned_vector<double> gradient, temp;
size_t memetic_evolution_gens, evolution_gens, memetic_opt_gens;
size_t population_size, child_count, species_count, elite_count;

IKEvolution2(const IKParams& p)
: IKBase(p)
{
setParams(p);
}

#ifdef ENABLE_CPP_OPTLIB
Expand Down Expand Up @@ -108,6 +111,15 @@ template <int memetic> struct IKEvolution2 : IKBase

const std::vector<double>& getSolution() const { return solution; }

void setParams(const IKParams& p) override {
population_size = p.population_size;
child_count = p.child_count;
species_count = p.species_count;
memetic_opt_gens = p.memetic_opt_gens;
memetic_evolution_gens = p.memetic_evolution_gens;
elite_count = std::min(p.elite_count, p.population_size);
}

void initialize(const Problem& problem)
{
BLOCKPROFILER("initialization");
Expand All @@ -133,12 +145,8 @@ template <int memetic> struct IKEvolution2 : IKBase
// init temporary buffer with positions of inactive joints
temp_joint_variables = initial_guess;

// params
size_t population_size = 2;
size_t child_count = 16;

// initialize population on current island
species.resize(2);
species.resize(species_count);
for(auto& s : species)
{
// create individuals
Expand Down Expand Up @@ -251,10 +259,10 @@ template <int memetic> struct IKEvolution2 : IKBase

auto gene_count = children[0].genes.size();

size_t s = (children.size() - population.size()) * gene_count + children.size() * 4 + 4;
size_t s = (children.size() - population.size()) * gene_count + children.size() * 32 + 32;

auto* __restrict__ rr = fast_random_gauss_n(s);
rr = (const double*)(((size_t)rr + 3) / 4 * 4);
rr = (const double*)(((size_t)rr + 31) / 32 * 32);

/*rmask.resize(s);
for(auto& m : rmask) m = fast_random() < 0.1 ? 1.0 : 0.0;
Expand All @@ -263,8 +271,14 @@ template <int memetic> struct IKEvolution2 : IKBase
for(size_t child_index = population.size(); child_index < children.size(); child_index++)
{
double mutation_rate = (1 << fast_random_index(16)) * (1.0 / (1 << 23));
auto& parent = population[0];
auto& parent2 = population[1];
// randomly select distinct parents and make parent2 the larger index
// this is done to ensure that the same behavior is done as before when population size is 2
size_t p1, p2;
p1 = fast_random_index(population.size());
do { p2 = fast_random_index(population.size()); } while (p1 == p2 && population.size() > 1);
if (p1 > p2) std::swap(p1, p2);
auto& parent = population[p1];
auto& parent2 = population[p2];
double fmix = (child_index % 2 == 0) * 0.2;
double gradient_factor = child_index % 3;

Expand All @@ -279,7 +293,7 @@ template <int memetic> struct IKEvolution2 : IKBase
auto __attribute__((aligned(32)))* __restrict__ child_genes = child.genes.data();
auto __attribute__((aligned(32)))* __restrict__ child_gradients = child.gradients.data();

#pragma omp simd aligned(genes_span : 32), aligned(genes_min : 32), aligned(genes_max : 32), aligned(parent_genes : 32), aligned(parent_gradients : 32), aligned(parent2_genes : 32), aligned(parent2_gradients : 32), aligned(child_genes : 32), aligned(child_gradients : 32) aligned(rr : 32)
#pragma omp simd aligned(genes_span : 32), aligned(genes_min : 32), aligned(genes_max : 32), aligned(parent_genes : 32), aligned(parent_gradients : 32), aligned(parent2_genes : 32), aligned(parent2_gradients : 32), aligned(child_genes : 32), aligned(child_gradients : 32), aligned(rr : 32)
#pragma unroll
for(size_t gene_index = 0; gene_index < gene_count; gene_index++)
{
Expand All @@ -298,7 +312,7 @@ template <int memetic> struct IKEvolution2 : IKBase
child_genes[gene_index] = gene;
child_gradients[gene_index] = mix(parent_gradient, gene - parent_gene, 0.3);
}
rr += (gene_count + 3) / 4 * 4;
rr += (gene_count + 31) / 32 * 32;
// dm += (gene_count + 3) / 4 * 4;

/*if(problem.tip_link_indices.size() > 1)
Expand Down Expand Up @@ -347,7 +361,7 @@ template <int memetic> struct IKEvolution2 : IKBase

// run evolution for a few generations
size_t generation_count = 16;
if(memetic) generation_count = 8;
if(memetic) generation_count = memetic_evolution_gens;
for(size_t generation = 0; generation < generation_count; generation++)
{
// BLOCKPROFILER("evolution");
Expand Down Expand Up @@ -438,133 +452,129 @@ template <int memetic> struct IKEvolution2 : IKBase

if(memetic == 'q' || memetic == 'l')
{
for (int elite_i = 0; elite_i < elite_count; ++elite_i) {
// init
auto &individual = population[elite_i];
gradient.resize(problem.active_variables.size());
if (genotypes.empty()) genotypes.emplace_back();
phenotypes2.resize(1);
phenotypes3.resize(1);

// differentiation step size
double dp = 0.0000001;
if (fast_random() < 0.5) dp = -dp;

for (size_t generation = 0; generation < memetic_opt_gens; generation++)
// for(size_t generation = 0; generation < 32; generation++)
{

// init
auto& individual = population[0];
gradient.resize(problem.active_variables.size());
if(genotypes.empty()) genotypes.emplace_back();
phenotypes2.resize(1);
phenotypes3.resize(1);

// differentiation step size
double dp = 0.0000001;
if(fast_random() < 0.5) dp = -dp;
if (canceled) break;

for(size_t generation = 0; generation < 8; generation++)
// for(size_t generation = 0; generation < 32; generation++)
{
// compute gradient
temp = individual.genes;
genotypes[0] = temp.data();
model.computeApproximateMutations(1, genotypes.data(), phenotypes2);
double f2p = computeFitnessActiveVariables(phenotypes2[0], genotypes[0]);
double fa = f2p + computeSecondaryFitnessActiveVariables(genotypes[0]);
for (size_t i = 0; i < problem.active_variables.size(); i++) {
double *pp = &(genotypes[0][i]);
genotypes[0][i] = individual.genes[i] + dp;
model.computeApproximateMutation1(problem.active_variables[i], +dp, phenotypes2[0],
phenotypes3[0]);
double fb = computeCombinedFitnessActiveVariables(phenotypes3[0], genotypes[0]);
genotypes[0][i] = individual.genes[i];
double d = fb - fa;
gradient[i] = d;
}

if(canceled) break;
// normalize gradient
double sum = dp * dp;
for (size_t i = 0; i < problem.active_variables.size(); i++)
sum += fabs(gradient[i]);
double f = 1.0 / sum * dp;
for (size_t i = 0; i < problem.active_variables.size(); i++)
gradient[i] *= f;

// sample support points for line search
for (size_t i = 0; i < problem.active_variables.size(); i++)
genotypes[0][i] = individual.genes[i] - gradient[i];
model.computeApproximateMutations(1, genotypes.data(), phenotypes3);
double f1 = computeCombinedFitnessActiveVariables(phenotypes3[0], genotypes[0]);

double f2 = fa;

for (size_t i = 0; i < problem.active_variables.size(); i++)
genotypes[0][i] = individual.genes[i] + gradient[i];
model.computeApproximateMutations(1, genotypes.data(), phenotypes3);
double f3 = computeCombinedFitnessActiveVariables(phenotypes3[0], genotypes[0]);

// quadratic step size
if (memetic == 'q') {

// compute step size
double v1 = (f2 - f1); // f / j
double v2 = (f3 - f2); // f / j
double v = (v1 + v2) * 0.5; // f / j
double a = (v1 - v2); // f / j^2
double step_size = v / a; // (f / j) / (f / j^2) = f / j / f * j * j = j

// double v1 = (f2 - f1) / dp;
// double v2 = (f3 - f2) / dp;
// double v = (v1 + v2) * 0.5;
// double a = (v2 - v1) / dp;
// // v * x + a * x * x = 0;
// // v = - a * x
// // - v / a = x
// // x = -v / a;
// double step_size = -v / a / dp;

// for(double f : { 1.0, 0.5, 0.25 })
{

// compute gradient
temp = individual.genes;
genotypes[0] = temp.data();
model.computeApproximateMutations(1, genotypes.data(), phenotypes2);
double f2p = computeFitnessActiveVariables(phenotypes2[0], genotypes[0]);
double fa = f2p + computeSecondaryFitnessActiveVariables(genotypes[0]);
for(size_t i = 0; i < problem.active_variables.size(); i++)
{
double* pp = &(genotypes[0][i]);
genotypes[0][i] = individual.genes[i] + dp;
model.computeApproximateMutation1(problem.active_variables[i], +dp, phenotypes2[0], phenotypes3[0]);
double fb = computeCombinedFitnessActiveVariables(phenotypes3[0], genotypes[0]);
genotypes[0][i] = individual.genes[i];
double d = fb - fa;
gradient[i] = d;
}
double f = 1.0;

// move by step size along gradient and compute fitness
for (size_t i = 0; i < problem.active_variables.size(); i++)
genotypes[0][i] = modelInfo.clip(
individual.genes[i] + gradient[i] * step_size * f,
problem.active_variables[i]);
model.computeApproximateMutations(1, genotypes.data(), phenotypes2);
double f4p = computeFitnessActiveVariables(phenotypes2[0], genotypes[0]);

// accept new position if better
if (f4p < f2p) {
individual.genes = temp;
continue;
} else {
break;
}
}

// normalize gradient
double sum = dp * dp;
for(size_t i = 0; i < problem.active_variables.size(); i++)
sum += fabs(gradient[i]);
double f = 1.0 / sum * dp;
for(size_t i = 0; i < problem.active_variables.size(); i++)
gradient[i] *= f;

// sample support points for line search
for(size_t i = 0; i < problem.active_variables.size(); i++)
genotypes[0][i] = individual.genes[i] - gradient[i];
model.computeApproximateMutations(1, genotypes.data(), phenotypes3);
double f1 = computeCombinedFitnessActiveVariables(phenotypes3[0], genotypes[0]);

double f2 = fa;

for(size_t i = 0; i < problem.active_variables.size(); i++)
genotypes[0][i] = individual.genes[i] + gradient[i];
model.computeApproximateMutations(1, genotypes.data(), phenotypes3);
double f3 = computeCombinedFitnessActiveVariables(phenotypes3[0], genotypes[0]);

// quadratic step size
if(memetic == 'q')
{
// break;
}

// compute step size
double v1 = (f2 - f1); // f / j
double v2 = (f3 - f2); // f / j
double v = (v1 + v2) * 0.5; // f / j
double a = (v1 - v2); // f / j^2
double step_size = v / a; // (f / j) / (f / j^2) = f / j / f * j * j = j

// double v1 = (f2 - f1) / dp;
// double v2 = (f3 - f2) / dp;
// double v = (v1 + v2) * 0.5;
// double a = (v2 - v1) / dp;
// // v * x + a * x * x = 0;
// // v = - a * x
// // - v / a = x
// // x = -v / a;
// double step_size = -v / a / dp;

// for(double f : { 1.0, 0.5, 0.25 })
{
// linear step size
if (memetic == 'l') {

double f = 1.0;
// compute step size
double cost_diff = (f3 - f1) * 0.5; // f / j
double step_size = f2 / cost_diff; // f / (f / j) = j

// move by step size along gradient and compute fitness
for(size_t i = 0; i < problem.active_variables.size(); i++)
genotypes[0][i] = modelInfo.clip(individual.genes[i] + gradient[i] * step_size * f, problem.active_variables[i]);
for (size_t i = 0; i < problem.active_variables.size(); i++)
temp[i] = modelInfo.clip(individual.genes[i] - gradient[i] * step_size,
problem.active_variables[i]);
model.computeApproximateMutations(1, genotypes.data(), phenotypes2);
double f4p = computeFitnessActiveVariables(phenotypes2[0], genotypes[0]);

// accept new position if better
if(f4p < f2p)
{
if (f4p < f2p) {
individual.genes = temp;
continue;
}
else
{
} else {
break;
}
}

// break;
}

// linear step size
if(memetic == 'l')
{

// compute step size
double cost_diff = (f3 - f1) * 0.5; // f / j
double step_size = f2 / cost_diff; // f / (f / j) = j

// move by step size along gradient and compute fitness
for(size_t i = 0; i < problem.active_variables.size(); i++)
temp[i] = modelInfo.clip(individual.genes[i] - gradient[i] * step_size, problem.active_variables[i]);
model.computeApproximateMutations(1, genotypes.data(), phenotypes2);
double f4p = computeFitnessActiveVariables(phenotypes2[0], genotypes[0]);

// accept new position if better
if(f4p < f2p)
{
individual.genes = temp;
continue;
}
else
{
break;
}
}
}
}
Expand Down
21 changes: 16 additions & 5 deletions src/kinematics_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,11 +215,22 @@ struct BioIKKinematicsPlugin : kinematics::KinematicsBase {
getRosParam("drot", ikparams.drot, DBL_MAX);
getRosParam("dtwist", ikparams.dtwist, 1e-5);

// initialize parameters for ik_evolution_1
getRosParam("no_wipeout", ikparams.opt_no_wipeout, false);
getRosParam("population_size", ikparams.population_size, 8);
getRosParam("elite_count", ikparams.elite_count, 4);
getRosParam("linear_fitness", ikparams.linear_fitness, false);
if (ikparams.solver_class_name == "bio1")
{
// initialize parameters for ik_evolution_1
getRosParam("no_wipeout", ikparams.opt_no_wipeout, false);
getRosParam("population_size", ikparams.population_size, 8);
getRosParam("elite_count", ikparams.elite_count, 4);
getRosParam("linear_fitness", ikparams.linear_fitness, false);
} else {
// parameters for bio2
getRosParam("population_size", ikparams.population_size, 2);
getRosParam("child_count", ikparams.child_count, 16);
getRosParam("species_count", ikparams.species_count, 2);
getRosParam("memetic_evolution_gens", ikparams.memetic_evolution_gens, 8);
getRosParam("memetic_opt_gens", ikparams.memetic_opt_gens, 8);
getRosParam("elite_count", ikparams.elite_count, 1);
}

temp_state.reset(new moveit::core::RobotState(robot_model_));

Expand Down
12 changes: 10 additions & 2 deletions src/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,11 +72,19 @@ struct IKParams
double drot;
double dtwist;

// ik_evolution_1 parameters
bool opt_no_wipeout;
// common parameters
int population_size;
int elite_count;

// ik_evolution_1 parameters
bool opt_no_wipeout;
bool linear_fitness;

// bio2 parameters
int child_count;
int species_count;
int memetic_evolution_gens;
int memetic_opt_gens;
};

// Uncomment to enable logging
Expand Down