This commit is contained in:
2016-11-25 19:31:00 -08:00
commit 001a46260a
73 changed files with 8363 additions and 0 deletions

15
lab4/CMakeLists.txt Normal file
View File

@@ -0,0 +1,15 @@
cmake_minimum_required(VERSION 3.5.1)
project(Lab4)
set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread")
file(GLOB SOURCES "*.cpp")
file(GLOB HEADERS "*.h" "*.hpp")
set(SOURCE_FILES ${HEADERS} ${SOURCES} )
add_executable(Lab4 ${SOURCE_FILES})

BIN
lab4/CS417_Lab4.pdf Normal file

Binary file not shown.

View File

@@ -0,0 +1,169 @@
#pragma once
#include "search_function.h"
class differential_evolution : public search_function {
public:
differential_evolution(function f) : search_function(f) {};
double search(int permutations, int dimensionality) {
// Set up random start
std::vector<std::vector<double>> population = generate_population(50, dimensionality);
for (int g = 0; g < maximum_generation_number; g++) {
for (int i = 0; i < population.size(); i++) {
// Create the U, temp vector to hold values
std::vector<double> u(dimensionality, 0);
// select a random dimension
int j_rand = rand() % dimensionality;
for (int j = j_rand; j < dimensionality; j++){
// Accept changes if rng returns < 0.9
if (randomMT() * 1.0 / RAND_MAX < 0.9)
u.at(j) = check_bounds(compute_with_strategy(&population, j, i));
else
u.at(j) = population.at(i).at(j);
}
// If the new population has a better fitness, replace it
if (func.compute(population.at(i)) > func.compute(u))
population.at(i) = u;
}
}
// Generation is done, return the best value from the population
std::sort(population.begin(), population.end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
return func.compute(population[0]);
};
void set_strategy(int strategy){
this->strategy = strategy;
}
private:
// G
int maximum_generation_number = 30;
// Tuning variable
double tuning_variable_f = 0.8;
double tuning_variable_lambda = 1.0;
int strategy = 0;
// Compute using different strategies
double compute_with_strategy(std::vector<std::vector<double>> *population, int j, int i){
// Setup and find the best solution in the population that was passed in
std::vector<double> best_solution;
double best_fitness = 999999999999;
for (auto p: *population){
double val = func.compute(p);
if (val < best_fitness){
best_fitness = val;
best_solution = p;
}
}
// Depending on the strategy, determine the new solution at J
switch(strategy){
case 1: {
std::vector<int> r = distinct_indices(2, population[0].size());
return best_solution[j] + tuning_variable_f * (population->at(r[0]).at(j) - population->at(r[1]).at(j));
}
case 2: {
std::vector<int> r = distinct_indices(3, population[0].size());
return population->at(r[0]).at(j) + tuning_variable_f * (population->at(r[1]).at(j) - population->at(r[2]).at(j));
}
case 3:{
std::vector<int> r = distinct_indices(2, population[0].size());
return population->at(i).at(j) + tuning_variable_lambda * (best_solution.at(j) - population->at(i).at(j)) + tuning_variable_f * (population->at(r[0]).at(j) - population->at(r[1]).at(j));
}
case 4:{
std::vector<int> r = distinct_indices(4, population[0].size());
return best_solution.at(j) + tuning_variable_f * (population->at(r[0]).at(j) + population->at(r[1]).at(j) - population->at(r[2]).at(j) - population->at(r[3]).at(j));
}
case 5:{
std::vector<int> r = distinct_indices(5, population[0].size());
return population->at(r[4]).at(j) + tuning_variable_f * (population->at(r[0]).at(j) + population->at(r[1]).at(j) - population->at(r[2]).at(j) - population->at(r[3]).at(j));
}
case 6:{
std::vector<int> r = distinct_indices(2, population[0].size());
return best_solution.at(j) + tuning_variable_f * (population->at(r[0]).at(j) - population->at(r[1]).at(j));
}
case 7:{
std::vector<int> r = distinct_indices(3, population[0].size());
return population->at(r[0]).at(j) + tuning_variable_f * (population->at(r[1]).at(j) - population->at(r[2]).at(j));
}
case 8:{
std::vector<int> r = distinct_indices(2, population[0].size());
return population->at(i).at(j) + tuning_variable_lambda * (best_solution.at(j) - population->at(i).at(j)) + tuning_variable_f * (population->at(r[0]).at(j) - population->at(r[1]).at(j));
}
case 9:{
std::vector<int> r = distinct_indices(4, population[0].size());
return best_solution.at(j) + tuning_variable_f * (population->at(r[0]).at(j) + population->at(r[1]).at(j) - population->at(r[2]).at(j) - population->at(r[3]).at(j));
}
case 10:{
std::vector<int> r = distinct_indices(5, population[0].size());
return population->at(r[4]).at(j) + tuning_variable_f * (population->at(r[0]).at(j) + population->at(r[1]).at(j) - population->at(r[2]).at(j) - population->at(r[3]).at(j));
}
}
}
std::vector<int> distinct_indices(int count, int max){
std::vector<int> indices;
for (int q = 0; q < count; q++) {
int val = 1 + (rand() % (max - 1));
while (std::count(indices.begin(), indices.end(), val) != 0)
val = randomMT() % max;
indices.push_back(val);
}
return indices;
}
};

87
lab4/firefly.hpp Normal file
View File

@@ -0,0 +1,87 @@
#pragma once
#include "search_function.h"
class firefly : public search_function {
public:
firefly(function f) : search_function(f) {};
double search(int permutations, int dimensionality) {
// Set up random start population
for (int p = 0; p < population_size; p++) {
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++){
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
population.push_back(tmp);
}
for (int t = 0; t < iterations; t++){
// Compare each fly to each other
for (int i = 0; i < population.size(); i++){
for (int j = 0; j < population.size(); j++){
// If it finds one brighter
if (func.compute(population.at(j)) < func.compute(population.at(i))){
// Move towards it for each dimension
for (int d = 0; d < dimensionality; d++){
population.at(i).at(d) =
population.at(i).at(d) +
attractiveness(distance(population.at(i), population.at(j))) *
(population.at(j).at(d) - population.at(i).at(d)) + alpha *
rand_between(-0.5, 0.5);
check_solution_bounds(&population.at(i));
}
}
}
}
}
// Sort the population and return the best fitness
std::sort(population.begin(), population.end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
return func.compute(population.front());
};
private:
// Distance between two vector points
double distance(std::vector<double> a, std::vector<double> b){
double sum = 0;
for (int i = 0; i < a.size(); i++){
sum += pow(a.at(i) - b.at(i), 2);
}
return sqrt(sum);
}
// Inverse square law basically
double attractiveness(double distance){
return beta * exp(-gamma * distance * distance);
}
// Constants
double gamma = 1.0;
double alpha = 0.5;
double beta = 0.2;
int iterations = 100;
int population_size = 50;
std::vector<std::vector<double>> population;
};

319
lab4/functions.hpp Normal file
View File

@@ -0,0 +1,319 @@
std::vector<double> c = {0.806,0.517,0.1,0.908,0.965,0.669,0.524,0.902,0.351,0.876,0.462,
0.491,0.463,0.741,0.352,0.869,0.813,0.811,0.0828,0.964,0.789,0.360,0.369,
0.992,0.332,0.817,0.632,0.883,0.608,0.326};
double a[][10] =
{
{9.681,0.667,4.783,9.095,3.517,9.325,6.544,0.211,5.122,2.02},
{9.4,2.041,3.788,7.931,2.882,2.672,3.568,1.284,7.033,7.374},
{8.025,9.152,5.114,7.621,4.564,4.711,2.996,6.126,0.734,4.982},
{2.196,0.415,5.649,6.979,9.510,9.166,6.304,6.054,9.377,1.426},
{8.074,8.777,3.467,1.863,6.708,6.349,4.534,0.276,7.633,1.567},
{7.650,5.658,0.720,2.764,3.278,5.283,7.474,6.274,1.409,8.208},
{1.256,3.605,8.623,6.905,4.584,8.133,6.071,6.888,4.187,5.448},
{8.314,2.261,4.24,1.781,4.124,0.932,8.129,8.658,1.208,5.762},
{0.226,8.858,1.42,0.954,1.622,4.698,6.228,9.096,0.972,7.637},
{7.305,2.228,1.242,5.928,9.133,1.826,4.06,5.204,8.713,8.247},
{0.652,7.027,0.508,4.876,8.807,4.632,5.808,6.937,3.291,7.016},
{2.699,3.516,5.847,4.119,4.461,7.496,8.817,0.69,6.593,9.789},
{8.327,3.897,2.017,9.57,9.825,1.15,1.395,3.885,6.354,0.109},
{2.132,7.006,7.136,2.641,1.882,5.943,7.273,7.691,2.88,0.564},
{4.707,5.579,4.08,0.581,9.698,8.542,8.077,8.515,9.231,4.67},
{8.304,7.559,8.567,0.322,7.128,8.392,1.472,8.524,2.277,7.826},
{8.632,4.409,4.832,5.768,7.05,6.715,1.711,4.323,4.405,4.591},
{4.887,9.112,0.17,8.967,9.693,9.867,7.508,7.77,8.382,6.74},
{2.44,6.686,4.299,1.007,7.008,1.427,9.398,8.48,9.95,1.675},
{6.306,8.583,6.084,1.138,4.350,3.134,7.853,6.061,7.457,2.258},
{0.652,2.343,1.37,0.821,1.31,1.063,0.689,8.819,8.833,9.07},
{5.558,1.272,5.756,9.857,2.279,2.764,1.284,1.677,1.244,1.234},
{3.352,7.549,9.817,9.437,8.687,4.167,2.57,6.54,0.228,0.027},
{8.798,0.88,2.37,0.168,1.701,3.68,1.231,2.39,2.499,0.064},
{1.46,8.057,1.337,7.217,7.914,3.615,9.981,9.198,5.292,1.224},
{0.432,8.645,8.774,0.249,8.081,7.461,4.416,0.652,4.002,4.644},
{0.679,2.8,5.523,3.049,2.968,7.225,6.73,4.199,9.614,9.229},
{4.263,1.074,7.286,5.599,8.291,5.2,9.214,8.272,4.398,4.506},
{9.496,4.83,3.15,8.27,5.079,1.231,5.731,9.494,1.883,9.732},
{4.138,2.562,2.532,9.661,5.611,5.5,6.886,2.341,9.699,6.5}
};
double schwefel(std::vector<double> input){
int upper_bound = 512;
int lower_bound = -512;
double sum = 0;
for (int i = 0; i < input.size(); i++){
sum += (-input[i]) * std::sin(std::sqrt(std::abs(input[i])));
}
return sum;
}
double first_de_jong(std::vector<double> input){
int upper_bound = 100;
int lower_bound = -100;
double sum = 0;
for (int i = 0; i < input.size(); i++){
sum += std::pow(input[i], 2);
}
return sum;
}
double rosenbrock(std::vector<double> input){
int upper_bound = 100;
int lower_bound = -100;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++){
sum += 100 * std::pow((std::pow(input[i], 2) - input[i + 1]), 2) + std::pow((1 - input[i]), 2);
}
return sum;
}
double rastrigin(std::vector<double> input){
int upper_bound = 30;
int lower_bound = -30;
double sum = 0;
for (int i = 0; i < input.size(); i++){
sum += std::pow(input[i], 2) - 10 * std::cos(2 * M_PI * input[i]);
}
sum *= 2 * input.size();
return sum;
}
double griewangk(std::vector<double> input){
int upper_bound = 500;
int lower_bound = -500;
double sum = 0;
for (int i = 0; i < input.size(); i++){
sum += std::pow(input[i], 2) / 4000;
}
double product = 1;
for (int i = 0; i < input.size(); i++){
product *= std::cos(input[i] / sqrt(i + 1));
}
return 1 + sum - product;
}
double sine_envelope_sine_wave(std::vector<double> input){
int upper_bound = 30;
int lower_bound = -30;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++){
sum += 0.5 + (std::pow(std::sin(std::pow(input[i], 2) + std::pow(input[i + 1], 2) - 0.5), 2)) /
(1 + 0.001 * (std::pow(input[i], 2) + std::pow(input[i + 1], 2)));
}
return sum;
}
double stretched_v_sine_wave(std::vector<double> input){
int upper_bound = 30;
int lower_bound = -30;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++){
sum += std::pow(std::pow(input[i], 2) + std::pow(input[i + 1], 2), 1.0 / 4) *
std::pow(std::sin(50 * std::pow(std::pow(input[i], 2) + std::pow(input[i + 1], 2), 1.0 / 10)), 2) + 1;
}
return sum;
}
double ackleys_one(std::vector<double> input){
int upper_bound = 32;
int lower_bound = -32;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++){
sum += (1.0 / pow(M_E, 0.2)) *
std::sqrt(std::pow(input[i], 2) + std::pow(input[i + 1], 2)) +
3 * std::cos(2 * input[i]) +
std::sin(2 * input[i + 1]);
}
return sum;
}
double ackleys_two(std::vector<double> input){
int upper_bound = 32;
int lower_bound = -32;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++){
sum += 20 + M_E -
(20 / (std::pow(M_E, 0.2) * std::sqrt(((std::pow(input[i], 2) + std::pow(input[i+1], 2) + 1) / 2)))) -
std::pow(M_E, 0.5 * std::cos(2 * M_PI * input[i]) + cos(2 * M_PI * input[i + 1]));
}
return sum;
}
double egg_holder(std::vector<double> input){
int upper_bound = 500;
int lower_bound = -500;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++) {
sum += -input[i] * std::sin(std::sqrt(abs(input[i] - input[i + 1] - 47))) -
(input[i + 1] + 47) * std::sin(std::sqrt(std::abs(input[i + 1] + 47 + input[i] / 2)));
}
return sum;
}
double rana(std::vector<double> input){
int upper_bound = 500;
int lower_bound = -500;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++) {
sum += input[i] * std::sin(std::sqrt(std::abs(input[i + 1] - input[i] + 1))) *
std::cos(std::sqrt(std::abs(input[i + 1] + input[i] + 1))) +
(input[i + 1] + 1) *
std::cos(std::sqrt(std::abs(input[i + 1] - input[i] + 1))) *
std::sin(std::sqrt(std::abs(input[i + 1] + input[i] + 1)));
}
return sum;
}
double pathological(std::vector<double> input){
int upper_bound = 100;
int lower_bound = -100;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++) {
sum += 0.5 +
(std::pow(std::sin(std::sqrt(100 * std::pow(input[i], 2) + std::pow(input[i + 1], 2))), 2) - 0.5) /
(1 + 0.001 * std::pow(std::pow(input[i], 2) - 2 * input[i] * input[i + 1] + std::pow(input[i + 1], 2), 2));
}
return sum;
}
double michalewicz(std::vector<double> input){
int upper_bound = M_PI;
int lower_bound = 0;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++) {
sum += std::sin(input[i]) * std::pow(std::sin(i * std::pow(input[i], 2) / M_PI), 20);
}
return -sum;
}
double masters_cosine_wave(std::vector<double> input){
int upper_bound = 30;
int lower_bound = -30;
double sum = 0;
for (int i = 0; i < input.size() - 1; i++) {
sum += std::pow(M_E, -(1/8) * (std::pow(input[i], 2) + std::pow(input[i + 1], 2) + 0.5 * input[i + 1] * input[i])) *
std::cos(4 * std::sqrt(std::pow(input[i], 2) + std::pow(input[i + 1], 2) + 0.5 * input[i] * input[i + 1]));
}
return -sum;
}
double shekels_foxholes(std::vector<double> input){
int upper_bound = 10;
int lower_bound = 0;
double sum = 0;
for (int i = 0; i < c.size() - 1; i++) {
double bottom_sum = 0;
for (int q = 0; q < input.size(); q++){
bottom_sum = std::pow(input.at(q) - a[i][q], 2);
}
sum += 1 / (bottom_sum + c[i]);
}
return -sum;
}
double set_within(double val, double prior_upper, double prior_lower, double after_upper, double after_lower){
return ((after_upper - after_lower) * (val - prior_lower) / (prior_upper - prior_lower)) + after_lower;
}
struct function {
double (*function_pointer)(std::vector<double>);
double range = 0;
double upper_bound = 0;
double lower_bound = 0;
timer t;
function(){};
function(double (*func)(std::vector<double>), double upper_bound, double lower_bound) {
function_pointer = func;
this->upper_bound = upper_bound;
this->lower_bound = lower_bound;
}
double compute(std::vector<double> input) {
for (auto v: input) {
if (v <= lower_bound && v >= upper_bound) {
std::cout << "Function exceeded bounds";
return 0;
}
}
double res = function_pointer(input);
return res;
};
};

167
lab4/genetic_algorithm.hpp Normal file
View File

@@ -0,0 +1,167 @@
#pragma once
#include "search_function.h"
class genetic_algorithm : public search_function {
public:
genetic_algorithm(function f) : search_function(f) {};
double search(int permutations, int dimensionality) {
elitism = elitism_rate * number_of_solutions;
// Set up random start population
std::vector<std::vector<double>> population;
for (int p = 0; p < number_of_solutions; p++) {
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++){
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
population.push_back(tmp);
}
for (int i = 0; i < max_iterations; i++){
// Setup the random new population
std::vector<std::vector<double>> new_population;
for (int p = 0; p < number_of_solutions; p++) {
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++){
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
new_population.push_back(tmp);
}
for (int s = 0; s < number_of_solutions; s += 2){
auto p1p2 = select(&population);
crossover(&std::get<0>(p1p2), &std::get<1>(p1p2));
mutate(&std::get<0>(p1p2));
mutate(&std::get<1>(p1p2));
}
reduce(&population, &new_population);
for (auto q: population){
double val = func.compute(q);
if (val < best_fitness)
best_fitness = val;
}
}
return best_fitness;
};
private:
double crossover_rate = 0.90;
double elitism_rate = 0.2;
int elitism = 10;
double mutation_rate = 0.008;
double mutation_range = 0.1;
double muration_precision = 3.5;
double best_fitness = 99999999999999999;
int number_of_solutions = 50;
int max_iterations = 100;
double get_fitness(std::vector<std::vector<double>> *population){
double fitness_sum = 0;
for (auto p: *population){
double fitness = func.compute(p);
if (fitness >= 0)
fitness_sum += 1 / (1 + fitness);
else
fitness_sum += 1 + abs(fitness);
}
return fitness_sum;
}
int select_parent(std::vector<std::vector<double>> *population){
double r = fmod(randomMT(), total_fitness(population));
int s = 0;
while (s < population->size()-1 && (r - func.compute(population->at(s)) > 0)) {
r -= func.compute(population->at(s++));
}
return s;
}
std::tuple<std::vector<double>, std::vector<double>> select(std::vector<std::vector<double>> *population){
auto p1 = population->at(select_parent(population));
auto p2 = population->at(select_parent(population));
return std::make_tuple(p1, p2);
};
void mutate(std::vector<double> *solution){
for (auto i: *solution){
if ((randomMT() * 1.0 / UINT32_MAX) < mutation_rate){
i += ((randomMT() * 1.0 / UINT32_MAX) * 2 - 1) * (func.upper_bound - func.lower_bound) *
mutation_range * pow(2, (-1 * (randomMT() * 1.0 / UINT32_MAX) * muration_precision));
}
}
}
void crossover(std::vector<double> *parent1, std::vector<double> *parent2){
if ((randomMT() * 1.0 / UINT32_MAX) < crossover_rate){
int d = randomMT() % (parent1->size() - 1) + 1;
int dim = parent1->size();
std::vector<double> temp;
temp.insert(temp.begin(), parent1->begin(), parent1->begin() + d);
parent1->erase(parent1->begin(), parent1->begin() + d);
parent1->insert(parent1->end(), parent2->begin() + dim - d, parent2->end());
parent2->erase(parent2->begin() + dim - d, parent2->end());
parent2->insert(parent2->end(), temp.begin(), temp.end());
}
}
void reduce(std::vector<std::vector<double>> *population, std::vector<std::vector<double>> *new_population){
std::sort(population->begin(), population->end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
std::sort(new_population->begin(), new_population->end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
for (int s = 0; s < elitism; s++) {
new_population->at(elitism + 1 - s) = population->at(s);
}
*population = *new_population;
}
double total_fitness(std::vector<std::vector<double>> *population){
double val = 0;
for (auto i: *population)
val += func.compute(i);
return val;
}
};

138
lab4/harmony.hpp Normal file
View File

@@ -0,0 +1,138 @@
#pragma once
#include "search_function.h"
class harmony : public search_function {
public:
harmony(function f) : search_function(f) {};
double search(int permutations, int dimensionality) {
this->dimensionality = dimensionality;
// Set up random start population
for (int p = 0; p < population_size; p++) {
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++){
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
population.push_back(tmp);
}
double best_fitness = func.compute(population.at(0));
// Do a thousand iterations
int r = 0;
while (r < 1000){
// The rng to determine which of the 3 we chose
double rng = rand_between(0, 1);
// Accept up the the specified percentage, re init the rest
if (rng > r_accept) {
accept_and_generate();
}
// Harmonize
else if (rng > r_pa) {
for (int p = 0; p < population.size(); p++) {
for (int d = 0; d < dimensionality; d++) {
population.at(p).at(d) = population.at(p).at(d) + rand_between(1, 2) * rand_between(-1, 1);
}
}
}
// Introduce some stochasticity
else{
population.clear();
for (int p = 0; p < population_size; p++) {
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++){
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
population.push_back(tmp);
}
}
// Sort the array and find the best fitness, record
std::sort(population.begin(), population.end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
if (func.compute(population.at(0)) < best_fitness)
best_fitness = func.compute(population.at(0));
r++;
}
return func.compute(population.at(0));
};
private:
double r_accept = 0.8;
double r_pa = 0.1;
int population_size = 50;
int dimensionality = 0;
void accept_and_generate(){
// Resort the population so the best will be at(0)
std::sort(population.begin(), population.end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
// Keep up to the accept rate
std::vector<std::vector<double>> tmp_pop;
for (int i = 0; (1.0 * i / population.size()) < r_accept; i++){
tmp_pop.push_back(population.at(i));
}
// Repopulate with random values
for (int p = 0; p < population_size - tmp_pop.size(); p++) {
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++){
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
tmp_pop.push_back(tmp);
}
population = tmp_pop;
}
std::vector<std::vector<double>> population;
};

65
lab4/iterative_local.hpp Normal file
View File

@@ -0,0 +1,65 @@
#pragma once
#include "search_function.h"
class iterative_local : public search_function {
public:
iterative_local(function f) : search_function(f) {};
double search(int permutations, int dimensionality) {
// Set up random start
std::vector<double> global_best_solution;
for (int i = 0; i < dimensionality; i++) {
global_best_solution.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
// 30 iteration max
int iteration_max = 30;
for (int i = 0; i < iteration_max; i++){
// Random new solution
std::vector<double> best_solution;
for (int i = 0; i < dimensionality; i++) {
best_solution.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
std::vector<double> temp_solution = best_solution;
// While a better solution is still being found
bool better_solution_found = true;
while (better_solution_found) {
better_solution_found = false;
double delta = 0.11;
temp_solution = best_solution;
std::vector<double> new_solution(dimensionality);
// Set up the new solution
for (int i = 0; i < dimensionality; i++) {
temp_solution[i] += delta;
new_solution[i] = best_solution[i] - delta * (func.compute(temp_solution) - func.compute(best_solution));
temp_solution[i] = best_solution[i];
// temp[i] - delta * new with delta, and the old without
}
// test it
if (func.compute(new_solution) < func.compute(best_solution)) {
best_solution = new_solution;
better_solution_found = true;
}
}
// Check to see if we found a better global solution
if (func.compute(best_solution) < func.compute(global_best_solution)){
global_best_solution = best_solution;
}
}
};
};

53
lab4/local.hpp Normal file
View File

@@ -0,0 +1,53 @@
#pragma once
#include "search_function.h"
class local : public search_function {
public:
local(function f) : search_function(f) {
};
double search(int permutations, int dimensionality) {
// Set up the initial soution
std::vector<double> best_solution;
for (int i = 0; i < dimensionality; i++) {
best_solution.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
std::vector<double> temp_solution = best_solution;
// While a better solution is being found
bool better_solution_found = true;
while (better_solution_found) {
better_solution_found = false;
double delta = 0.11;
temp_solution = best_solution;
std::vector<double> new_solution(dimensionality);
for (int i = 0; i < dimensionality; i++) {
temp_solution[i] += delta;
new_solution[i] = best_solution[i] - delta * (func.compute(temp_solution) - func.compute(best_solution));
temp_solution[i] = best_solution[i];
// temp[i] - delta * new with delta, and the old without
}
// Check to see if we found a better solution
if (func.compute(new_solution) < func.compute(best_solution)) {
best_solution = new_solution;
better_solution_found = true;
}
}
return func.compute(best_solution);
};
};

105
lab4/main.cpp Normal file
View File

@@ -0,0 +1,105 @@
#include <iostream>
#include <vector>
#include <cmath>
#include <map>
#include <chrono>
#include <cstring>
#include "twister.c"
#include "util.hpp"
#include "functions.hpp"
#include "random_walk.hpp"
#include "local.hpp"
#include "iterative_local.hpp"
#include "differential_evolution.hpp"
#include "genetic_algorithm.hpp"
#include "particle_swrm.hpp"
#include "soma.hpp"
#include "harmony.hpp"
#include "firefly.hpp"
int main(int argc, char* args[]) {
std::map<int, function> function_lookup;
function_lookup.emplace(std::make_pair(0, function(&schwefel, 512, -512)));
function_lookup.emplace(std::make_pair(1, function(&first_de_jong, 100, -100)));
function_lookup.emplace(std::make_pair(2, function(&rosenbrock, 100, -100)));
function_lookup.emplace(std::make_pair(3, function(&rastrigin, 30, -30)));
function_lookup.emplace(std::make_pair(4, function(&griewangk, 500, -500)));
function_lookup.emplace(std::make_pair(5, function(&sine_envelope_sine_wave, 30, -30)));
function_lookup.emplace(std::make_pair(6, function(&stretched_v_sine_wave, 30, -30)));
function_lookup.emplace(std::make_pair(7, function(&ackleys_one, 32, -32)));
function_lookup.emplace(std::make_pair(8, function(&ackleys_two, 32, -32)));
function_lookup.emplace(std::make_pair(9, function(&egg_holder, 500, -500)));
function_lookup.emplace(std::make_pair(10, function(&rana, 500, -500)));
function_lookup.emplace(std::make_pair(11, function(&pathological, 100, -100)));
function_lookup.emplace(std::make_pair(12, function(&michalewicz, M_PI, 0)));
function_lookup.emplace(std::make_pair(13, function(&masters_cosine_wave, 30, -30)));
function_lookup.emplace(std::make_pair(14, function(&shekels_foxholes, 10, 0)));
function f;
int dimensionality = 0;
double seed = 0;
int search_function = 0;
// // Get the command line args
// if (argc == 1){
// char arg_str[200];
// std::cin.get(arg_str, 200);
// char t = ' ';
//
// f = function_lookup[atoi(strtok(arg_str, &t))];
// dimensionality = atoi(strtok(NULL, &t));
// seed = atoi(strtok(NULL, &t));
// search_function = atoi(strtok(NULL, &t));
//
// } else {
//
// f = function_lookup[atoi(args[1])];
// dimensionality = atoi(args[2]);
// seed = atoi(args[3]);
// search_function = atoi(args[4]);
// }
srand(time(nullptr));
f = function_lookup[12];
dimensionality = 20;
seed = rand();
search_function = 7;
// Set up the search functions
seedMT(seed);
random_walk random_walk_search(f);
local local_search(f);
iterative_local iterative_local_search(f);
differential_evolution differential_evolution_search(f);
genetic_algorithm genetic_algorithm_search(f);
particle_swarm particle_swarm_search(f);
soma soma_search(f);
firefly firefly_search(f);
harmony harmony_search(f);
// return the results of the search
if (search_function == 0)
std::cout << random_walk_search.search(1, dimensionality) << std::endl;
else if (search_function == 1)
std::cout << local_search.search(1, dimensionality) << std::endl;
else if (search_function == 2)
std::cout << iterative_local_search.search(1, dimensionality) << std::endl;
else if (search_function == 3)
std::cout << differential_evolution_search.search(1, dimensionality) << std::endl;
else if (search_function == 4)
std::cout << genetic_algorithm_search.search(1, dimensionality) << std::endl;
else if (search_function == 5)
std::cout << particle_swarm_search.search(1, dimensionality) << std::endl;
else if (search_function == 6)
std::cout << soma_search.search(1, dimensionality) << std::endl;
else if (search_function == 7)
std::cout << firefly_search.search(1, dimensionality) << std::endl;
else if (search_function == 8)
std::cout << harmony_search.search(1, dimensionality) << std::endl;
return 0;
}

145
lab4/particle_swrm.hpp Normal file
View File

@@ -0,0 +1,145 @@
#pragma once
#include "search_function.h"
struct particle {
// Personal best
double pb_fitness = 999999999999999;
std::vector<double> pb_solution;
// Current solution
double fitness = 9999999999;
std::vector<double> solution;
std::vector<double> velocity;
double v_max = 4.0;
double c1 = 0.2;
double c2 = 0.2;
double weight = 0.9;
int dimensionality;
std::vector<double> *gb_solution;
double *gb_fitness;
function *func;
particle(int dimensionality, std::vector<double> *gb_solution, double *gb_fitness, function *func) : dimensionality(dimensionality){
this->gb_solution = gb_solution;
this->gb_fitness = gb_fitness;
this->func = func;
// Blank initial solution and assign it also to pb
pb_solution, solution = std::vector<double>(dimensionality, 0);
// Init the velocity
for (int i = 0; i < dimensionality; i++)
velocity.push_back(rand_between(-v_max/3, v_max/3));
}
void update_fitness(){
fitness = func->compute(solution);
if (fitness < pb_fitness){
pb_solution = solution;
pb_fitness = fitness;
}
if (fitness < *gb_fitness){
*gb_solution = solution;
*gb_fitness = fitness;
}
}
void update_velocity(){
for (int d = 0; d < dimensionality; d++){
velocity.at(d) = weight * velocity.at(d) + c1 * rand_between(0, 1) * (pb_solution.at(d) - solution.at(d)) +
c2 * rand_between(0, 1) * (gb_solution->at(d) - solution.at(d));
if (velocity.at(d) > v_max)
velocity.at(d) = v_max;
else if (velocity.at(d) < -v_max)
velocity.at(d) = -v_max;
solution.at(d) += velocity.at(d);
if (solution.at(d) < func->lower_bound)
solution.at(d) = func->lower_bound;
else if (solution.at(d) > func->upper_bound)
solution.at(d) = func->upper_bound;
}
}
};
class particle_swarm : public search_function {
public:
particle_swarm(function f) : search_function(f) {};
double search(int permutations, int dimensionality) {
for (int i = 0; i < number_of_particles; i++){
particle p(dimensionality, &gb_solution, &gb_fitness, &func);
p.solution = generate_solution(dimensionality);
p.update_fitness();
particles.push_back(p);
}
for (int i = 0; i < max_iterations; i++){
for (int p = 0; p < particles.size(); p++){
particles.at(p).update_fitness();
particles.at(p).update_velocity();
}
}
return gb_fitness;
};
private:
// The global best solution and fitness
double gb_fitness = 99999999999999;
std::vector<double> gb_solution;
int number_of_particles = 100;
std::vector<particle> particles;
int max_iterations = 100;
};

41
lab4/random_walk.hpp Normal file
View File

@@ -0,0 +1,41 @@
#pragma once
#include "search_function.h"
#include <algorithm>
class random_walk : public search_function {
public:
random_walk(function f) : search_function(f) {
}
double search(int permutations, int dimensionality) {
timer t;
t.start();
std::vector<double> r;
for (int i = 0; i < permutations; i++){
std::vector<double> dimension_vals;
for (int i = 0; i < dimensionality; i++) {
auto val = fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound;
dimension_vals.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
r.push_back(func.compute(dimension_vals));
}
t.end();
std::sort(r.begin(), r.end(), std::less<double>());
return r[0];
}
};

BIN
lab4/results/Data.ods Normal file

Binary file not shown.

6
lab4/results/README Normal file
View File

@@ -0,0 +1,6 @@
The optimization functions can be either ran with the python script "run.py"
which will run all 3 search methods with all 15 search functions.
Or you can call the executable directly from the command line. Eg.
./Lab4 <0-14> <dimensionality> <seed> <1-8>

6
lab4/results/out Normal file
View File

@@ -0,0 +1,6 @@
Timer values

55
lab4/results/run.py Normal file
View File

@@ -0,0 +1,55 @@
from subprocess import check_output
import subprocess
import random
import matplotlib.pyplot as plt
import time
random.seed()
loc = "../build/Lab4"
f = open('out','w')
f.write("Timer values")
for function in range(6, 9):
print("Method: " + str(function))
for q in range(0, 15):
print("Function: " + str(q))
f.write(str(q) + "\n")
start = time.time()
subprocess.call([
loc,
str(q),
str(20),
str(random.randint(0, 2147483646)),
str(function)
])
end = time.time()
f.write(str(end-start) + "\n")
f.close()

39
lab4/results/run2.py Normal file
View File

@@ -0,0 +1,39 @@
from subprocess import check_output
import subprocess
import random
import matplotlib.pyplot as plt
import time
random.seed()
loc = "../build/Lab4"
search = 8
for function in range(0, 15):
print("Function: " + str(function))
for q in range(0, 10):
subprocess.call([
loc,
str(function),
str(20),
str(random.randint(0, 2147483646)),
str(search)
])

BIN
lab4/results/writeup.pdf Normal file

Binary file not shown.

193
lab4/results/writeup.tex Normal file
View File

@@ -0,0 +1,193 @@
\documentclass[paper=a4, fontsize=11pt]{scrartcl}
\usepackage[T1]{fontenc}
\usepackage{fourier}
\usepackage[english]{babel}
\usepackage[protrusion=true,expansion=true]{microtype}
\usepackage{amsmath,amsfonts,amsthm}
\usepackage[pdftex]{graphicx}
\usepackage{url}
\usepackage{sectsty}
\usepackage{rotating}
\allsectionsfont{\centering \normalfont\scshape}
\usepackage{fancyhdr}
\pagestyle{fancyplain}
\fancyhead{}
\fancyfoot[L]{}
\fancyfoot[C]{}
\fancyfoot[R]{\thepage}
\renewcommand{\headrulewidth}{0pt}
\renewcommand{\footrulewidth}{0pt}
\setlength{\headheight}{13.6pt}
\numberwithin{equation}{section}
\numberwithin{figure}{section}
\numberwithin{table}{section}
\newcommand{\horrule}[1]{\rule{\linewidth}{#1}}
\title{
%\vspace{-1in}
\usefont{OT1}{bch}{b}{n}
\normalfont \normalsize \textsc{Central Washington University of the Computer Science Department} \\ [25pt]
\horrule{0.5pt} \\[0.4cm]
\huge Project 4 \\
\horrule{2pt} \\[0.5cm]
}
\author{\normalsize Mitchell Hansen \\[-6pt]}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\maketitle
\section{Introduction}
For this lab we again took our 15 optimization functions and ran them through
3 new methods of determining the global minimum. The functions being:
The Self Organizing Migrating Algorithm (SOMA) which uses an evolutional approach
, The Firefly Algorithm (FA) which uses an evolutional swarm approach
similar to Particle Swarm, and the Harmony Search Algorithm (HS) which uses
another evolutional style approach.
\section{Methods}
For each of the 3 search methods and all 15 of the search functions we ran
tests for a set amount of iterations using the python scripts we wrote
for the previous lab. These results were then written out to a file from which
we calculated the min, max, range, etc.
We also slightly modified the way FA calculated the step when changin it's direction.
Instead of using a random distribution when calculating the FA random step
we used a suggested regular distribution that we found when researching the
problem online.
\section{Analysis}
This lab produced both surprising and disappointing results when evaluating the search functions.
The most surprising results are that of SOMA, where it regularly equaled or beat the most accurate
search function so far which has been Differential Evolution (DE). Of the 15 functions tested, SOMA
had better results than DE for 6 of the functions. The rest of the 9 functions tested were very close
to the results found by the DE search function.
The disappointing results produced by this lab lay in the other two functions tested. HS and FA produced
very similar results overall, most possibly because they are not all that much better than particle
swarm of which they are in the same class. Between the two search functions, there was very seldom
a difference of more than 20 to 30 percent in their fitnesses. We think it's best to compare these
two functions to their cousin Particle Swarm (PSO) as it is the closest related method.
In relation to PSO the functions did very similarly for the later functions (>4). The lower functions (<4)
all produced results which were much more inaccurate than traditional particle swarm. In regards to the
later functions, HS was only able to beat a PSO search method under function 6 with 11.28 determined as
the minimum, with PSO returning 12.15. FA wasn't able to beat PSO in function 6 but it did come close
with 13.32. Another notable point to look at is function 15 where HS was able to find the minimum at
-18.70, but FA was unable to capture the minimum with 16.40.
\section{Conclusion}
The conclusion for this lab is a pessimistic one, SOMA was a great success and a valuable
addition to our growing library of search functions. But are overshadowed by the poor performance
not only in the accuracy of answers, but also the run times of FA and HS. PSO was able to
outperform HS and FA in almost every aspect, and that is even telling as PSO performs
poorly when compared to other search functions we have used for some specific functions.
Walking away from this lab, I believe that we will add SOMA to our list of highly accurate
and performance search methods, while leaving FA and HS for problem sets which they are most
suited to.
Although we scrutinized our code to an appropriate degree we can't rule out the poor performance
of the FA and HS algorithms being caused by incorrect implementations. We're reasonably
sure that this is not the case though as the comparable performance to PSO was expected.
Additionally SOMA was not entirely without faults either. Function 10 for example shows an almost 250
percent increase in the minimum value over it's DE an PSO counterparts. This is worrisome as
all of the other results produced by SOMA were within 20 to 30 percent of PSO and DE.
\begin{figure}
\section{Results}
\caption{Computation comparison of SOMA, HS and FA}
\hskip+4.0cm
\rotatebox{90.0}{
\scalebox{0.7}{
\small \centering
\label{Tab1d}
\begin{tabular}{c|lllll|lllll|lllll}
\noalign{\smallskip}\hline\noalign{\smallskip}
Problem & \multicolumn{5}{c}{SOMA}
& \multicolumn{5}{|c|}{HS}
& \multicolumn{5}{c}{FA} \\
\noalign{\smallskip}\hline\noalign{\smallskip}
& Avg & Median & Range & SD & T(s) & Avg & Median & Range & SD & T(s) & Avg & Median & Range & SD & T(s) \\ \noalign{\smallskip}\hline\noalign{\smallskip}
$f_1$ & -7299.02 & -7386.83 & 1503.58 & 452.97 & 0.08 & -1736.64 & -1762.09 & 901.78 & 340.48 & 4.73 & -2078.10 & -2121.93 & 1297.24 & 956.34 & 2.00 \\
$f_2$ & 73.06 & 33.94 & 332.07 & 100.36 & 0.05 & 38678.19 & 39044.00 & 14740.90 & 5545.68 & 3.63 & 39611.35 & 41038.00 & 15876.60 & 8095.76 & 0.87 \\
$f_3$ & 149.76 & 119.57 & 338.84 & 99.33 & 0.10 & 15223266000.00 & 15499350000.00 & 9746940000.00 & 3077498860.01 & 3.87 & 13940456000.00 & 13970750000.00 & 11865890000.00 & 3765178464.71 & 1.06 \\
$f_4$ & -7758.64 & -7737.15 & 362.37 & 131.58 & 0.20 & 132487.90 & 138380.00 & 75080.00 & 22337.69 & 4.71 & 135166.40 & 136884.00 & 22462.00 & 30028.16 & 2.09 \\
$f_5$ & 45.26 & 35.50 & 124.00 & 36.68 & 0.07 & 249.51 & 249.76 & 66.18 & 22.99 & 4.75 & 219.01 & 229.32 & 130.89 & 46.52 & 2.18 \\
$f_6$ & 13.70 & 13.84 & 2.91 & 0.99 & 0.00 & 11.28 & 11.24 & 0.82 & 0.24 & 5.90 & 13.32 & 13.25 & 1.35 & 3.11 & 2.89 \\
$f_7$ & 36.00 & 34.30 & 24.57 & 8.03 & 0.04 & 34.10 & 34.92 & 5.20 & 1.82 & 6.54 & 45.15 & 44.93 & 8.66 & 9.88 & 4.59 \\
$f_8$ & 14.49 & 15.01 & 72.42 & 28.43 & 0.10 & 279.82 & 282.06 & 57.12 & 17.79 & 5.95 & 273.29 & 278.48 & 37.49 & 62.42 & 3.66 \\
$f_9$ & 141.72 & 212.47 & 376.52 & 154.36 & 0.09 & 305.15 & 306.60 & 19.46 & 6.85 & 7.10 & 342.88 & 354.12 & 71.60 & 74.94 & 4.89 \\
$f_{10}$ & -13566.43 & -14173.65 & 3948.20 & 1297.42 & 0.55 & -3194.74 & -2846.66 & 2290.95 & 832.19 & 6.07 & -3332.62 & -3378.12 & 2053.65 & 1555.81 & 3.43 \\
$f_{11}$ & -8428.54 & -8806.80 & 3527.50 & 1076.29 & 0.58 & -2037.88 & -1976.69 & 1404.16 & 421.07 & 8.39 & -2009.29 & -1998.01 & 1467.36 & 1004.39 & 6.10 \\
$f_{12}$ & 8.91 & 8.88 & 0.87 & 0.27 & 0.01 & 7.78 & 7.74 & 0.59 & 0.19 & 5.76 & 8.71 & 8.77 & 0.44 & 2.14 & 3.10 \\
$f_{13}$ & -2.36 & -2.38 & 2.90 & 0.81 & 0.01 & -5.46 & -5.55 & 1.53 & 0.48 & 7.33 & -2.53 & -2.52 & 2.33 & 1.43 & 4.73 \\
$f_{14}$ & -7.39 & -7.74 & 8.78 & 3.13 & 0.01 & -12.86 & -12.81 & 2.15 & 0.67 & 5.41 & -7.47 & -7.21 & 6.35 & 4.04 & 3.20 \\
$f_{15}$ & -17.46 & -18.45 & 6.22 & 2.14 & 0.03 & -18.70 & -18.70 & 0.00 & 0.00 & 11.09 & -16.40 & -17.37 & 6.14 & 6.23 & 9.65 \\ \noalign{\smallskip}\hline\noalign{\smallskip}
& & & & & & & & & & & & & & & \\
\noalign{\smallskip}\hline\noalign{\smallskip} \multicolumn{16}{l}{\tiny $^1$ ThinkPad, 3.4GHz Intel Core i7 (3rd gen), 16 GB RAM}
\end{tabular}
}}
\end{figure}
\newpage
\begin{figure}
\section{Previous Results}
\caption{Computation comparison of DE, GA and PSO}
\hskip+4.0cm
\rotatebox{90.0}{
\scalebox{0.7}{
\small \centering
\label{Tab1d}
\begin{tabular}{c|lllll|lllll|lllll}
\noalign{\smallskip}\hline\noalign{\smallskip}
Problem & \multicolumn{5}{c}{DE}& \multicolumn{5}{|c|}{GA}
& \multicolumn{5}{c}{PSO} \\
\noalign{\smallskip}\hline\noalign{\smallskip}
& Avg & Median & Range & SD & T(s) & Avg & Median & Range & SD & T(s) & Avg & Median & Range & SD & T(s) \\ \noalign{\smallskip}\hline\noalign{\smallskip}
$f_1$ & -6112.33 & -6084.59 & 114.26 & 47.83 & 1.14 & -3276.12 & -3292.95 & 943.02 & 245.68 & 2.69 & -2871.98 & -2904.39 & 1194.77 & 322.06 & 0.12 \\
$f_2$ & 129.53 & 25.00 & 900.00 & 251.52 & 0.53 & 23185.53 & 22853.00 & 10310.00 & 3148.43 & 0.72 & 0.17 & 0.15 & 0.25 & 0.08 & 0.09 \\
$f_3$ & 26105.67 & 10019.00 & 168100.00 & 43662.88 & 0.78 & 5291234666.67 & 5017400000.00 & 5739020000.00 & 1539343402.74 & 0.68 & 421.98 & 200.19 & 1657.68 & 497.31 & 0.10 \\
$f_4$ & -7600.00 & -7960.00 & 2560.00 & 728.99 & 1.00 & 79752.00 & 81520.00 & 23240.00 & 8507.40 & 2.12 & -5206.62 & -5324.98 & 3479.78 & 1178.83 & 0.13 \\
$f_5$ & 0.00 & 0.00 & 0.00 & 0.00 & 1.08 & 145.86 & 150.55 & 51.89 & 17.68 & 2.31 & 9.17 & 8.93 & 5.88 & 1.95 & 0.13 \\
$f_6$ & 12.38 & 12.71 & 2.19 & 0.60 & 1.46 & 12.04 & 11.97 & 0.67 & 0.22 & 2.52 & 12.15 & 12.18 & 1.25 & 0.33 & 0.14 \\
$f_7$ & 19.06 & 19.01 & 0.62 & 0.16 & 1.67 & 36.69 & 36.60 & 5.76 & 1.54 & 4.20 & 20.55 & 20.45 & 2.63 & 0.68 & 0.18 \\
$f_8$ & 58.74 & 58.73 & 4.74 & 1.54 & 1.60 & 212.86 & 213.95 & 41.20 & 11.06 & 3.41 & -9.92 & -11.64 & 35.51 & 9.72 & 0.10 \\
$f_9$ & -83.30 & -80.69 & 21.87 & 6.99 & 2.09 & 276.38 & 276.83 & 14.65 & 4.35 & 4.10 & 251.53 & 288.37 & 173.05 & 64.83 & 0.14 \\
$f_{10}$ & -4959.12 & -4579.12 & 2896.23 & 966.10 & 3.02 & -4778.37 & -4822.17 & 978.82 & 327.79 & 4.72 & -4107.05 & -3830.50 & 2663.98 & 711.61 & 0.13 \\
$f_{11}$ & -8478.48 & -8821.20 & 5161.40 & 1330.20 & 3.56 & -3188.30 & -3181.83 & 1334.30 & 339.30 & 8.34 & -2899.33 & -2888.72 & 901.67 & 227.81 & 0.21 \\
$f_{12}$ & 0.00 & 0.00 & 0.00 & 0.00 & 1.48 & 8.00 & 8.01 & 0.69 & 0.17 & 2.70 & 7.02 & 7.08 & 1.30 & 0.37 & 0.15 \\
$f_{13}$ & -4.28 & -4.22 & 2.71 & 0.83 & 3.06 & -4.27 & -4.22 & 2.30 & 0.57 & 5.54 & -10.39 & -9.86 & 4.92 & 1.50 & 0.14 \\
$f_{14}$ & -18.99 & -19.00 & 0.04 & 0.01 & 1.47 & -10.88 & -10.53 & 3.70 & 1.00 & 3.65 & -16.07 & -16.15 & 5.22 & 1.59 & 0.14 \\
$f_{15}$ & -21.91 & -23.03 & 8.39 & 2.95 & 6.05 & -14.64 & -14.64 & 0.00 & 0.00 & 12.55 & -18.70 & -18.70 & 0.00 & 0.00 & 0.27 \\ \noalign{\smallskip}\hline\noalign{\smallskip}
& & & & & & & & & & & & & & & \\
\noalign{\smallskip}\hline\noalign{\smallskip} \multicolumn{16}{l}{\tiny $^1$ ThinkPad, 3.4GHz Intel Core i7 (3rd gen), 16 GB RAM}
\end{tabular}
}}
\end{figure}
\end{document}

56
lab4/search_function.h Normal file
View File

@@ -0,0 +1,56 @@
#pragma once
class search_function {
public:
function func;
search_function(function f) : func(f) {
};
virtual double search(int permutations, int dimensionality) = 0;
protected:
std::vector<double> generate_solution(int dimensionality){
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++) {
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
return tmp;
}
std::vector<std::vector<double>> generate_population(int dimensionality, int population_count){
std::vector<std::vector<double>> tmp;
for (int i = 0; i < dimensionality; i++) {
tmp.push_back(generate_solution(dimensionality));
}
return tmp;
}
double check_bounds(double input){
if (input > func.upper_bound)
return func.upper_bound;
else if (input < func.lower_bound)
return func.lower_bound;
else
return input;
}
void check_solution_bounds(std::vector<double> *input){
for (auto &i: *input){
if (i > func.upper_bound)
i = func.upper_bound;
else if (i < func.lower_bound)
i = func.lower_bound;
}
}
};

149
lab4/soma.hpp Normal file
View File

@@ -0,0 +1,149 @@
#pragma once
#include "search_function.h"
class soma : public search_function {
public:
soma(function f) : search_function(f) {};
double search(int permutations, int dimensionality) {
// Set the parameters defined on page 8
path_length = 3.5;
specimen_step = 0.11;
perturbation = 0.11;
migrations = 1000;
min_div = 0.1;
// Set the population size
population_size = dimensionality * 0.5;
if (population_size < 10)
population_size = 10;
// Set up random start population
for (int p = 0; p < population_size; p++) {
std::vector<double> tmp;
for (int i = 0; i < dimensionality; i++){
tmp.push_back(fmod(randomMT(), (func.upper_bound * 2)) + func.lower_bound);
}
population.push_back(tmp);
}
// Sort the population so the leader is at(0)
std::sort(population.begin(), population.end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
for (int m = 0; m < migrations; m++){
std::vector<double> leader_solution = population.at(0);
double leader_fitness = func.compute(leader_solution);
for (int i = 1; i < population.size(); i++){
// We need the best fitness and the starting point for each
// individual in the population. start_solution is not mutated,
// while best_* and population.at(i) both are
double best_fitness = func.compute(population.at(i));
std::vector<double> best_solution = population.at(i);
std::vector<double> start_solution = population.at(i);
for (double p = 0; p < path_length; p += specimen_step){
// Generate the perturbation vector for each step,
// seems to give better results than per migration
std::vector<double> perturbation_vector(dimensionality, 0);
for (auto &q: perturbation_vector){
double val = rand_between(0, 1);
if (val < perturbation)
q = 1;
else
q = 0;
}
// Mutate the individual
for (int j = 0; j < dimensionality; j++){
population.at(i).at(j) = start_solution.at(j) + (leader_solution.at(j) - start_solution.at(j)) * p * perturbation_vector.at(j);
}
check_solution_bounds(&population.at(i));
// If this step beat the individuals best, update it
if (func.compute(population.at(i)) < best_fitness){
best_fitness = func.compute(population.at(i));
best_solution = population.at(i);
}
}
// population.at(i) is now at the end of the step.
// set it to the best solution it found along the way
population.at(i) = best_solution;
}
// Early exit if the different between the leader and any others are less than
// a defined constant. 1 is a little to lenient.
for (int r = 1; r < population.size(); r++){
if (std::abs(func.compute(population.at(r)) - leader_fitness) < min_div){
return leader_fitness;
}
}
// Resort the population so the leader will be at(0)
std::sort(population.begin(), population.end(), [this](std::vector<double> a, std::vector<double> b){
return this->func.compute(a) < this->func.compute(b);
});
// Test the front position. If the leader was usurped then replace it.
if (func.compute(population.front()) < leader_fitness) {
leader_solution = population.front();
leader_fitness = func.compute(leader_solution);
}
}
// Return the best solution found
return func.compute(population.front());
};
private:
double specimen_step;
double path_length;
int population_size;
double perturbation;
double min_div;
int migrations;
std::vector<std::vector<double>> population;
};

181
lab4/twister.c Normal file
View File

@@ -0,0 +1,181 @@
// http://www.math.keio.ac.jp/~matumoto/ver980409.html
// This is the ``Mersenne Twister'' random number generator MT19937, which
// generates pseudorandom integers uniformly distributed in 0..(2^32 - 1)
// starting from any odd seed in 0..(2^32 - 1). This version is a recode
// by Shawn Cokus (Cokus@math.washington.edu) on March 8, 1998 of a version by
// Takuji Nishimura (who had suggestions from Topher Cooper and Marc Rieffel in
// July-August 1997).
//
// Effectiveness of the recoding (on Goedel2.math.washington.edu, a DEC Alpha
// running OSF/1) using GCC -O3 as a compiler: before recoding: 51.6 sec. to
// generate 300 million random numbers; after recoding: 24.0 sec. for the same
// (i.e., 46.5% of original time), so speed is now about 12.5 million random
// number generations per second on this machine.
//
// According to the URL <http://www.math.keio.ac.jp/~matumoto/emt.html>
// (and paraphrasing a bit in places), the Mersenne Twister is ``designed
// with consideration of the flaws of various existing generators,'' has
// a period of 2^19937 - 1, gives a sequence that is 623-dimensionally
// equidistributed, and ``has passed many stringent tests, including the
// die-hard test of G. Marsaglia and the load test of P. Hellekalek and
// S. Wegenkittl.'' It is efficient in memory usage (typically using 2506
// to 5012 bytes of static data, depending on data type sizes, and the code
// is quite short as well). It generates random numbers in batches of 624
// at a time, so the caching and pipelining of modern systems is exploited.
// It is also divide- and mod-free.
//
// This library is free software; you can redistribute it and/or modify it
// under the terms of the GNU Library General Public License as published by
// the Free Software Foundation (either version 2 of the License or, at your
// option, any later version). This library is distributed in the hope that
// it will be useful, but WITHOUT ANY WARRANTY, without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
// the GNU Library General Public License for more details. You should have
// received a copy of the GNU Library General Public License along with this
// library; if not, write to the Free Software Foundation, Inc., 59 Temple
// Place, Suite 330, Boston, MA 02111-1307, USA.
//
// The code as Shawn received it included the following notice:
//
// Copyright (C) 1997 Makoto Matsumoto and Takuji Nishimura. When
// you use this, send an e-mail to <matumoto@math.keio.ac.jp> with
// an appropriate reference to your work.
//
// It would be nice to CC: <Cokus@math.washington.edu> when you write.
//
#include <stdio.h>
#include <stdlib.h>
//
// uint32 must be an unsigned integer type capable of holding at least 32
// bits; exactly 32 should be fastest, but 64 is better on an Alpha with
// GCC at -O3 optimization so try your options and see what's best for you
//
typedef unsigned long uint32;
#define N (624) // length of state vector
#define M (397) // a period parameter
#define K (0x9908B0DFU) // a magic constant
#define hiBit(u) ((u) & 0x80000000U) // mask all but highest bit of u
#define loBit(u) ((u) & 0x00000001U) // mask all but lowest bit of u
#define loBits(u) ((u) & 0x7FFFFFFFU) // mask the highest bit of u
#define mixBits(u, v) (hiBit(u)|loBits(v)) // move hi bit of u to hi bit of v
static uint32 state[N+1]; // state vector + 1 extra to not violate ANSI C
static uint32 *next; // next random value is computed from here
static int left = -1; // can *next++ this many times before reloading
void seedMT(uint32 seed)
{
//
// We initialize state[0..(N-1)] via the generator
//
// x_new = (69069 * x_old) mod 2^32
//
// from Line 15 of Table 1, p. 106, Sec. 3.3.4 of Knuth's
// _The Art of Computer Programming_, Volume 2, 3rd ed.
//
// Notes (SJC): I do not know what the initial state requirements
// of the Mersenne Twister are, but it seems this seeding generator
// could be better. It achieves the maximum period for its modulus
// (2^30) iff x_initial is odd (p. 20-21, Sec. 3.2.1.2, Knuth); if
// x_initial can be even, you have sequences like 0, 0, 0, ...;
// 2^31, 2^31, 2^31, ...; 2^30, 2^30, 2^30, ...; 2^29, 2^29 + 2^31,
// 2^29, 2^29 + 2^31, ..., etc. so I force seed to be odd below.
//
// Even if x_initial is odd, if x_initial is 1 mod 4 then
//
// the lowest bit of x is always 1,
// the next-to-lowest bit of x is always 0,
// the 2nd-from-lowest bit of x alternates ... 0 1 0 1 0 1 0 1 ... ,
// the 3rd-from-lowest bit of x 4-cycles ... 0 1 1 0 0 1 1 0 ... ,
// the 4th-from-lowest bit of x has the 8-cycle ... 0 0 0 1 1 1 1 0 ... ,
// ...
//
// and if x_initial is 3 mod 4 then
//
// the lowest bit of x is always 1,
// the next-to-lowest bit of x is always 1,
// the 2nd-from-lowest bit of x alternates ... 0 1 0 1 0 1 0 1 ... ,
// the 3rd-from-lowest bit of x 4-cycles ... 0 0 1 1 0 0 1 1 ... ,
// the 4th-from-lowest bit of x has the 8-cycle ... 0 0 1 1 1 1 0 0 ... ,
// ...
//
// The generator's potency (min. s>=0 with (69069-1)^s = 0 mod 2^32) is
// 16, which seems to be alright by p. 25, Sec. 3.2.1.3 of Knuth. It
// also does well in the dimension 2..5 spectral tests, but it could be
// better in dimension 6 (Line 15, Table 1, p. 106, Sec. 3.3.4, Knuth).
//
// Note that the random number user does not see the values generated
// here directly since reloadMT() will always munge them first, so maybe
// none of all of this matters. In fact, the seed values made here could
// even be extra-special desirable if the Mersenne Twister theory says
// so-- that's why the only change I made is to restrict to odd seeds.
//
register uint32 x = (seed | 1U) & 0xFFFFFFFFU, *s = state;
register int j;
for(left=0, *s++=x, j=N; --j;
*s++ = (x*=69069U) & 0xFFFFFFFFU);
}
uint32 reloadMT(void)
{
register uint32 *p0=state, *p2=state+2, *pM=state+M, s0, s1;
register int j;
if(left < -1)
seedMT(4357U);
left=N-1, next=state+1;
for(s0=state[0], s1=state[1], j=N-M+1; --j; s0=s1, s1=*p2++)
*p0++ = *pM++ ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
for(pM=state, j=M; --j; s0=s1, s1=*p2++)
*p0++ = *pM++ ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
s1=state[0], *p0 = *pM ^ (mixBits(s0, s1) >> 1) ^ (loBit(s1) ? K : 0U);
s1 ^= (s1 >> 11);
s1 ^= (s1 << 7) & 0x9D2C5680U;
s1 ^= (s1 << 15) & 0xEFC60000U;
return(s1 ^ (s1 >> 18));
}
uint32 randomMT(void)
{
uint32 y;
if(--left < 0)
return(reloadMT());
y = *next++;
y ^= (y >> 11);
y ^= (y << 7) & 0x9D2C5680U;
y ^= (y << 15) & 0xEFC60000U;
return(y ^ (y >> 18));
}
#ifdef NOCOMPILE
int main(void)
{
int j;
// you can seed with any uint32, but the best are odds in 0..(2^32 - 1)
seedMT(4357U);
// print the first 2,002 random numbers seven to a line as an example
for(j=0; j<2002; j++)
printf(" %10lu%s", (unsigned long) randomMT(), (j%7)==6 ? "\n" : "");
return(EXIT_SUCCESS);
}
#endif

12
lab4/util.hpp Normal file
View File

@@ -0,0 +1,12 @@
struct timer{
std::chrono::high_resolution_clock::time_point t1;
std::chrono::high_resolution_clock::time_point t2;
void start(){t1 = std::chrono::high_resolution_clock::now();}
void end(){t2 = std::chrono::high_resolution_clock::now();}
double duration(){ return std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();}
};
double rand_between(double lower, double upper){
return lower + (randomMT() * 1.0 / UINT32_MAX) * (upper - lower);
}