diff --git a/DIRECTORY.md b/DIRECTORY.md index 679e6f7d8..d678276bc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -98,6 +98,7 @@ * [Adaline Learning](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/adaline_learning.cpp) * [Kohonen Som Topology](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/kohonen_som_topology.cpp) * [Kohonen Som Trace](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/kohonen_som_trace.cpp) + * [Ordinary Least Squares Regressor](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/machine_learning/ordinary_least_squares_regressor.cpp) ## Math * [Armstrong Number](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/math/armstrong_number.cpp) @@ -145,7 +146,6 @@ * [Ode Forward Euler](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ode_forward_euler.cpp) * [Ode Midpoint Euler](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ode_midpoint_euler.cpp) * [Ode Semi Implicit Euler](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ode_semi_implicit_euler.cpp) - * [Ordinary Least Squares Regressor](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/ordinary_least_squares_regressor.cpp) * [Qr Decompose](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/qr_decompose.h) * [Qr Decomposition](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/qr_decomposition.cpp) * [Qr Eigen Values](https://github.com/TheAlgorithms/C-Plus-Plus/blob/master/numerical_methods/qr_eigen_values.cpp) diff --git a/numerical_methods/ordinary_least_squares_regressor.cpp b/machine_learning/ordinary_least_squares_regressor.cpp similarity index 98% rename from numerical_methods/ordinary_least_squares_regressor.cpp rename to machine_learning/ordinary_least_squares_regressor.cpp index dcbbcf8c7..896504e20 100644 --- a/numerical_methods/ordinary_least_squares_regressor.cpp +++ b/machine_learning/ordinary_least_squares_regressor.cpp @@ -355,7 +355,7 @@ std::vector predict_OLS_regressor(std::vector> const &X, } /** Self test checks */ -void test() { +void ols_test() { int F = 3, N = 5; /* test function = x^2 -5 */ @@ -368,12 +368,12 @@ void test() { // perform regression modelling std::vector beta1 = fit_OLS_regressor(data1, Y1); // create test data set with same features = x, x^2, x^3 - std::vector> test1( + std::vector> test_data1( {{-2, 4, -8}, {2, 4, 8}, {-10, 100, -1000}, {10, 100, 1000}}); // expected regression outputs std::vector expected1({-1, -1, 95, 95}); // predicted regression outputs - std::vector out1 = predict_OLS_regressor(test1, beta1); + std::vector out1 = predict_OLS_regressor(test_data1, beta1); // compare predicted results are within +-0.01 limit of expected for (size_t rows = 0; rows < out1.size(); rows++) assert(std::abs(out1[rows] - expected1[rows]) < 0.01); @@ -389,12 +389,12 @@ void test() { // perform regression modelling std::vector beta2 = fit_OLS_regressor(data2, Y2); // create test data set with same features = x, x^2, x^3 - std::vector> test2( + std::vector> test_data2( {{-2, 4, -8}, {2, 4, 8}, {-10, 100, -1000}, {10, 100, 1000}}); // expected regression outputs std::vector expected2({-104, -88, -1000, 1000}); // predicted regression outputs - std::vector out2 = predict_OLS_regressor(test2, beta2); + std::vector out2 = predict_OLS_regressor(test_data2, beta2); // compare predicted results are within +-0.01 limit of expected for (size_t rows = 0; rows < out2.size(); rows++) assert(std::abs(out2[rows] - expected2[rows]) < 0.01); @@ -408,7 +408,7 @@ void test() { * main function */ int main() { - test(); + ols_test(); size_t N, F;