Merge branch 'master' into memory_leak_avltree

This commit is contained in:
David Leal
2023-02-03 18:22:38 -06:00
committed by GitHub
4 changed files with 272 additions and 2 deletions

View File

@@ -168,6 +168,7 @@
## Machine Learning
* [A Star Search](https://github.com/TheAlgorithms/C-Plus-Plus/blob/HEAD/machine_learning/a_star_search.cpp)
* [Adaline Learning](https://github.com/TheAlgorithms/C-Plus-Plus/blob/HEAD/machine_learning/adaline_learning.cpp)
* [K Nearest Neighbors](https://github.com/TheAlgorithms/C-Plus-Plus/blob/HEAD/machine_learning/k_nearest_neighbors.cpp)
* [Kohonen Som Topology](https://github.com/TheAlgorithms/C-Plus-Plus/blob/HEAD/machine_learning/kohonen_som_topology.cpp)
* [Kohonen Som Trace](https://github.com/TheAlgorithms/C-Plus-Plus/blob/HEAD/machine_learning/kohonen_som_trace.cpp)
* [Neural Network](https://github.com/TheAlgorithms/C-Plus-Plus/blob/HEAD/machine_learning/neural_network.cpp)

View File

@@ -0,0 +1,73 @@
/**
* @file
* @brief [Find whether a given number is power of 2]
* (https://www.geeksforgeeks.org/program-to-find-whether-a-given-number-is-power-of-2/) implementation
*
* @details
* We are given a positive integer number. We need to check whether the number is power of
* 2 or not.
*
* A binary number consists of two digits. They are 0 & 1. Digit 1 is known as
* set bit in computer terms.
* Worst Case Time Complexity: O(1)
* Space complexity: O(1)
* @author [Prafful Gupta](https://github.com/EcstaticPG-25811)
*/
#include <cassert> /// for assert
#include <iostream> /// for IO operations
/**
* @namespace bit_manipulation
* @brief Bit manipulation algorithms
*/
namespace bit_manipulation {
/**
* @brief The main function implements check for power of 2
* @param n is the number who will be checked
* @returns either true or false
*/
bool isPowerOfTwo(
std ::int64_t n) { // int64_t is preferred over int so that
// no Overflow can be there.
return n > 0 && !(n & n - 1); // If we subtract a power of 2 numbers by 1
// then all unset bits after the only set bit become set; and the set bit becomes unset.
// If a number n is a power of 2 then bitwise and of n-1 and n will be zero.
// The expression n&(n-1) will not work when n is 0.
// To handle this case also, our expression will become n& (!n&(n-1))
}
} // namespace bit_manipulation
/**
* @brief Self-test implementations
* @returns void
*/
static void test() {
// n = 4 return true
assert(bit_manipulation::isPowerOfTwo(4) == true);
// n = 6 return false
assert(bit_manipulation::isPowerOfTwo(6) == false);
// n = 13 return false
assert(bit_manipulation::isPowerOfTwo(13) == false);
// n = 64 return true
assert(bit_manipulation::isPowerOfTwo(64) == true);
// n = 15 return false
assert(bit_manipulation::isPowerOfTwo(15) == false);
// n = 32 return true
assert(bit_manipulation::isPowerOfTwo(32) == true);
// n = 97 return false
assert(bit_manipulation::isPowerOfTwo(97) == false);
// n = 1024 return true
assert(bit_manipulation::isPowerOfTwo(1024) == true);
std::cout << "All test cases successfully passed!" << std::endl;
}
/**
* @brief Main function
* @returns 0 on exit
*/
int main() {
test(); // run self-test implementations
return 0;
}

View File

@@ -6,8 +6,8 @@ if(OpenGL_FOUND)
include(ExternalProject)
ExternalProject_Add (
FREEGLUT-PRJ
URL https://github.com/FreeGLUTProject/freeglut/releases/download/v3.2.1/freeglut-3.2.1.tar.gz
URL_MD5 cd5c670c1086358598a6d4a9d166949d
URL https://github.com/FreeGLUTProject/freeglut/releases/download/v3.2.2/freeglut-3.2.2.tar.gz
URL_MD5 485c1976165315fc42c0b0a1802816d9
CMAKE_GENERATOR ${CMAKE_GENERATOR}
CMAKE_GENERATOR_TOOLSET ${CMAKE_GENERATOR_TOOLSET}
CMAKE_GENERATOR_PLATFORM ${CMAKE_GENERATOR_PLATFORM}

View File

@@ -0,0 +1,196 @@
/**
* @file
* @brief Implementation of [K-Nearest Neighbors algorithm]
* (https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm).
* @author [Luiz Carlos Cosmi Filho](https://github.com/luizcarloscf)
* @details K-nearest neighbors algorithm, also known as KNN or k-NN, is a
* supervised learning classifier, which uses proximity to make classifications.
* This implementantion uses the Euclidean Distance as distance metric to find
* the K-nearest neighbors.
*/
#include <algorithm> /// for std::transform and std::sort
#include <cassert> /// for assert
#include <cmath> /// for std::pow and std::sqrt
#include <iostream> /// for std::cout
#include <numeric> /// for std::accumulate
#include <unordered_map> /// for std::unordered_map
#include <vector> /// for std::vector
/**
* @namespace machine_learning
* @brief Machine learning algorithms
*/
namespace machine_learning {
/**
* @namespace k_nearest_neighbors
* @brief Functions for the [K-Nearest Neighbors algorithm]
* (https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm) implementation
*/
namespace k_nearest_neighbors {
/**
* @brief Compute the Euclidean distance between two vectors.
*
* @tparam T typename of the vector
* @param a first unidimentional vector
* @param b second unidimentional vector
* @return double scalar representing the Euclidean distance between provided
* vectors
*/
template <typename T>
double euclidean_distance(const std::vector<T>& a, const std::vector<T>& b) {
std::vector<double> aux;
std::transform(a.begin(), a.end(), b.begin(), std::back_inserter(aux),
[](T x1, T x2) { return std::pow((x1 - x2), 2); });
aux.shrink_to_fit();
return std::sqrt(std::accumulate(aux.begin(), aux.end(), 0.0));
}
/**
* @brief K-Nearest Neighbors (Knn) class using Euclidean distance as
* distance metric.
*/
class Knn {
private:
std::vector<std::vector<double>> X_{}; ///< attributes vector
std::vector<int> Y_{}; ///< labels vector
public:
/**
* @brief Construct a new Knn object.
* @details Using lazy-learning approch, just holds in memory the dataset.
* @param X attributes vector
* @param Y labels vector
*/
explicit Knn(std::vector<std::vector<double>>& X, std::vector<int>& Y)
: X_(X), Y_(Y){};
/**
* Copy Constructor for class Knn.
*
* @param model instance of class to be copied
*/
Knn(const Knn& model) = default;
/**
* Copy assignment operator for class Knn
*/
Knn& operator=(const Knn& model) = default;
/**
* Move constructor for class Knn
*/
Knn(Knn&&) = default;
/**
* Move assignment operator for class Knn
*/
Knn& operator=(Knn&&) = default;
/**
* @brief Destroy the Knn object
*/
~Knn() = default;
/**
* @brief Classify sample.
* @param sample sample
* @param k number of neighbors
* @return int label of most frequent neighbors
*/
int predict(std::vector<double>& sample, int k) {
std::vector<int> neighbors;
std::vector<std::pair<double, int>> distances;
for (size_t i = 0; i < this->X_.size(); ++i) {
auto current = this->X_.at(i);
auto label = this->Y_.at(i);
auto distance = euclidean_distance(current, sample);
distances.emplace_back(distance, label);
}
std::sort(distances.begin(), distances.end());
for (int i = 0; i < k; i++) {
auto label = distances.at(i).second;
neighbors.push_back(label);
}
std::unordered_map<int, int> frequency;
for (auto neighbor : neighbors) {
++frequency[neighbor];
}
std::pair<int, int> predicted;
predicted.first = -1;
predicted.second = -1;
for (auto& kv : frequency) {
if (kv.second > predicted.second) {
predicted.second = kv.second;
predicted.first = kv.first;
}
}
return predicted.first;
}
};
} // namespace k_nearest_neighbors
} // namespace machine_learning
/**
* @brief Self-test implementations
* @returns void
*/
static void test() {
std::cout << "------- Test 1 -------" << std::endl;
std::vector<std::vector<double>> X1 = {{0.0, 0.0}, {0.25, 0.25},
{0.0, 0.5}, {0.5, 0.5},
{1.0, 0.5}, {1.0, 1.0}};
std::vector<int> Y1 = {1, 1, 1, 1, 2, 2};
auto model1 = machine_learning::k_nearest_neighbors::Knn(X1, Y1);
std::vector<double> sample1 = {1.2, 1.2};
std::vector<double> sample2 = {0.1, 0.1};
std::vector<double> sample3 = {0.1, 0.5};
std::vector<double> sample4 = {1.0, 0.75};
assert(model1.predict(sample1, 2) == 2);
assert(model1.predict(sample2, 2) == 1);
assert(model1.predict(sample3, 2) == 1);
assert(model1.predict(sample4, 2) == 2);
std::cout << "... Passed" << std::endl;
std::cout << "------- Test 2 -------" << std::endl;
std::vector<std::vector<double>> X2 = {
{0.0, 0.0, 0.0}, {0.25, 0.25, 0.0}, {0.0, 0.5, 0.0}, {0.5, 0.5, 0.0},
{1.0, 0.5, 0.0}, {1.0, 1.0, 0.0}, {1.0, 1.0, 1.0}, {1.5, 1.5, 1.0}};
std::vector<int> Y2 = {1, 1, 1, 1, 2, 2, 3, 3};
auto model2 = machine_learning::k_nearest_neighbors::Knn(X2, Y2);
std::vector<double> sample5 = {1.2, 1.2, 0.0};
std::vector<double> sample6 = {0.1, 0.1, 0.0};
std::vector<double> sample7 = {0.1, 0.5, 0.0};
std::vector<double> sample8 = {1.0, 0.75, 1.0};
assert(model2.predict(sample5, 2) == 2);
assert(model2.predict(sample6, 2) == 1);
assert(model2.predict(sample7, 2) == 1);
assert(model2.predict(sample8, 2) == 3);
std::cout << "... Passed" << std::endl;
std::cout << "------- Test 3 -------" << std::endl;
std::vector<std::vector<double>> X3 = {{0.0}, {1.0}, {2.0}, {3.0},
{4.0}, {5.0}, {6.0}, {7.0}};
std::vector<int> Y3 = {1, 1, 1, 1, 2, 2, 2, 2};
auto model3 = machine_learning::k_nearest_neighbors::Knn(X3, Y3);
std::vector<double> sample9 = {0.5};
std::vector<double> sample10 = {2.9};
std::vector<double> sample11 = {5.5};
std::vector<double> sample12 = {7.5};
assert(model3.predict(sample9, 3) == 1);
assert(model3.predict(sample10, 3) == 1);
assert(model3.predict(sample11, 3) == 2);
assert(model3.predict(sample12, 3) == 2);
std::cout << "... Passed" << std::endl;
}
/**
* @brief Main function
* @param argc commandline argument count (ignored)
* @param argv commandline array of arguments (ignored)
* @return int 0 on exit
*/
int main(int argc, char* argv[]) {
test(); // run self-test implementations
return 0;
}