Algorithms_in_C++  1.0.0
Set of algorithms implemented in C++.
machine_learning::neural_network::NeuralNetwork Class Reference
Collaboration diagram for machine_learning::neural_network::NeuralNetwork:
[legend]

Public Member Functions

 NeuralNetwork ()=default
 
 NeuralNetwork (const std::vector< std::pair< int, std::string >> &config)
 
 NeuralNetwork (const NeuralNetwork &model)=default
 
 ~NeuralNetwork ()=default
 
NeuralNetworkoperator= (const NeuralNetwork &model)=default
 
 NeuralNetwork (NeuralNetwork &&)=default
 
NeuralNetworkoperator= (NeuralNetwork &&)=default
 
std::pair< std::vector< std::vector< std::valarray< double > > >, std::vector< std::vector< std::valarray< double > > > > get_XY_from_csv (const std::string &file_name, const bool &last_label, const bool &normalize, const int &slip_lines=1)
 
std::vector< std::valarray< double > > single_predict (const std::vector< std::valarray< double >> &X)
 
std::vector< std::vector< std::valarray< double > > > batch_predict (const std::vector< std::vector< std::valarray< double >>> &X)
 
void fit (const std::vector< std::vector< std::valarray< double >>> &X_, const std::vector< std::vector< std::valarray< double >>> &Y_, const int &epochs=100, const double &learning_rate=0.01, const size_t &batch_size=32, const bool &shuffle=true)
 
void fit_from_csv (const std::string &file_name, const bool &last_label, const int &epochs, const double &learning_rate, const bool &normalize, const int &slip_lines=1, const size_t &batch_size=32, const bool &shuffle=true)
 
void evaluate (const std::vector< std::vector< std::valarray< double >>> &X, const std::vector< std::vector< std::valarray< double >>> &Y)
 
void evaluate_from_csv (const std::string &file_name, const bool &last_label, const bool &normalize, const int &slip_lines=1)
 
void save_model (const std::string &_file_name)
 
NeuralNetwork load_model (const std::string &file_name)
 
void summary ()
 

Private Member Functions

 NeuralNetwork (const std::vector< std::pair< int, std::string >> &config, const std::vector< std::vector< std::valarray< double >>> &kernals)
 
std::vector< std::vector< std::valarray< double > > > __detailed_single_prediction (const std::vector< std::valarray< double >> &X)
 

Private Attributes

std::vector< neural_network::layers::DenseLayerlayers
 

Detailed Description

NeuralNetwork class is implements MLP. This class is used by actual user to create and train networks.

Constructor & Destructor Documentation

◆ NeuralNetwork() [1/5]

machine_learning::neural_network::NeuralNetwork::NeuralNetwork ( const std::vector< std::pair< int, std::string >> &  config,
const std::vector< std::vector< std::valarray< double >>> &  kernals 
)
inlineprivate

Private Constructor for class NeuralNetwork. This constructor is used internally to load model.

Parameters
configvector containing pair (neurons, activation)
kernalsvector containing all pretrained kernals
271  {
272  // First layer should not have activation
273  if(config.begin() -> second != "none") {
274  std::cerr << "ERROR: First layer can't have activation other than none";
275  std::cerr << std::endl;
276  std::exit(EXIT_FAILURE);
277  }
278  // Network should have atleast two layers
279  if(config.size() <= 1) {
280  std::cerr << "ERROR: Invalid size of network, ";
281  std::cerr << "Atleast two layers are required";
282  std::exit(EXIT_FAILURE);
283  }
284  // Reconstructing all pretrained layers
285  for(size_t i = 0; i < config.size(); i++) {
286  layers.emplace_back(neural_network::layers::DenseLayer(config[i].first,
287  config[i].second,
288  kernals[i]));
289  }
290  std::cout << "INFO: Network constructed successfully" << std::endl;
291  }
Here is the call graph for this function:

◆ NeuralNetwork() [2/5]

machine_learning::neural_network::NeuralNetwork::NeuralNetwork ( )
default

Default Constructor for class NeuralNetwork. This constructor is used to create empty variable of type NeuralNetwork class.

◆ NeuralNetwork() [3/5]

machine_learning::neural_network::NeuralNetwork::NeuralNetwork ( const std::vector< std::pair< int, std::string >> &  config)
inlineexplicit

Constructor for class NeuralNetwork. This constructor is used by user.

Parameters
configvector containing pair (neurons, activation)
322  {
323  // First layer should not have activation
324  if(config.begin() -> second != "none") {
325  std::cerr << "ERROR: First layer can't have activation other than none";
326  std::cerr << std::endl;
327  std::exit(EXIT_FAILURE);
328  }
329  // Network should have atleast two layers
330  if(config.size() <= 1) {
331  std::cerr << "ERROR: Invalid size of network, ";
332  std::cerr << "Atleast two layers are required";
333  std::exit(EXIT_FAILURE);
334  }
335  // Separately creating first layer so it can have unit matrix
336  // as kernal.
337  layers.push_back(neural_network::layers::DenseLayer(config[0].first,
338  config[0].second,
339  {config[0].first, config[0].first},
340  false));
341  // Creating remaining layers
342  for(size_t i = 1; i < config.size(); i++) {
343  layers.push_back(neural_network::layers::DenseLayer(config[i].first,
344  config[i].second,
345  {config[i - 1].first, config[i].first},
346  true));
347  }
348  std::cout << "INFO: Network constructed successfully" << std::endl;
349  }
Here is the call graph for this function:

◆ NeuralNetwork() [4/5]

machine_learning::neural_network::NeuralNetwork::NeuralNetwork ( const NeuralNetwork model)
default

Copy Constructor for class NeuralNetwork.

Parameters
modelinstance of class to be copied.

◆ ~NeuralNetwork()

machine_learning::neural_network::NeuralNetwork::~NeuralNetwork ( )
default

Destructor for class NeuralNetwork.

◆ NeuralNetwork() [5/5]

machine_learning::neural_network::NeuralNetwork::NeuralNetwork ( NeuralNetwork &&  )
default

Move constructor for class NeuralNetwork

Member Function Documentation

◆ __detailed_single_prediction()

std::vector<std::vector<std::valarray <double> > > machine_learning::neural_network::NeuralNetwork::__detailed_single_prediction ( const std::vector< std::valarray< double >> &  X)
inlineprivate

Private function to get detailed predictions (i.e. activated neuron values). This function is used in backpropagation, single predict and batch predict.

Parameters
Xinput vector
299  {
301  std::vector < std::valarray <double> > current_pass = X;
302  details.emplace_back(X);
303  for(const auto &l : layers) {
304  current_pass = multiply(current_pass, l.kernal);
305  current_pass = apply_function(current_pass, l.activation_function);
306  details.emplace_back(current_pass);
307  }
308  return details;
309  }
Here is the call graph for this function:

◆ batch_predict()

std::vector< std::vector <std::valarray<double> > > machine_learning::neural_network::NeuralNetwork::batch_predict ( const std::vector< std::vector< std::valarray< double >>> &  X)
inline

Function to get prediction of model on batch

Parameters
Xarray of feature vectors
Returns
returns predicted values as vector
466  {
467  // Store predicted values
469  for(size_t i = 0; i < X.size(); i++) { // For every sample
470  // Push predicted values
471  predicted_batch[i] = this -> single_predict(X[i]);
472  }
473  return predicted_batch; // Return predicted values
474  }
Here is the call graph for this function:

◆ evaluate()

void machine_learning::neural_network::NeuralNetwork::evaluate ( const std::vector< std::vector< std::valarray< double >>> &  X,
const std::vector< std::vector< std::valarray< double >>> &  Y 
)
inline

Function to evaluate model on supplied data

Parameters
Xarray of feature vectors (input data)
Yarray of target values (label)
595  {
596  std::cout << "INFO: Evaluation Started" << std::endl;
597  double acc = 0, loss = 0; // intialize performance metrics with zero
598  for(size_t i = 0; i < X.size(); i++) { // For every sample in input
599  // Get predictions
601  // If predicted class is correct
602  if(argmax(pred) == argmax(Y[i])) {
603  acc += 1; // Increment accuracy
604  }
605  // Calculating loss - Mean Squared Error
606  loss += sum(apply_function((Y[i] - pred),
607  neural_network::util_functions::square) * 0.5);
608  }
609  acc /= X.size(); // Averaging accuracy
610  loss /= X.size(); // Averaging loss
611  // Prinitng performance of the model
612  std::cout << "Evaluation: Loss: " << loss;
613  std::cout << ", Accuracy: " << acc << std::endl;
614  return;
615  }
Here is the call graph for this function:

◆ evaluate_from_csv()

void machine_learning::neural_network::NeuralNetwork::evaluate_from_csv ( const std::string file_name,
const bool &  last_label,
const bool &  normalize,
const int &  slip_lines = 1 
)
inline

Function to evaluate model on data stored in csv file

Parameters
file_namecsv file name
last_labelflag for whether label is in first or last column
normalizeflag for whether to normalize data
slip_linesnumber of lines to skip
627  {
628  // Getting training data from csv file
629  auto data = this -> get_XY_from_csv(file_name, last_label, normalize, slip_lines);
630  // Evaluating model
631  this -> evaluate(data.first, data.second);
632  return;
633  }
Here is the call graph for this function:

◆ fit()

void machine_learning::neural_network::NeuralNetwork::fit ( const std::vector< std::vector< std::valarray< double >>> &  X_,
const std::vector< std::vector< std::valarray< double >>> &  Y_,
const int &  epochs = 100,
const double &  learning_rate = 0.01,
const size_t &  batch_size = 32,
const bool &  shuffle = true 
)
inline

Function to fit model on supplied data

Parameters
Xarray of feature vectors
Yarray of target values
epochsnumber of epochs (default = 100)
learning_ratelearning rate (default = 0.01)
batch_sizebatch size for gradient descent (default = 32)
shuffleflag for whether to shuffle data (default = true)
490  {
492  // Both label and input data should have same size
493  if (X.size() != Y.size()) {
494  std::cerr << "ERROR : X and Y in fit have different sizes" << std::endl;
495  std::exit(EXIT_FAILURE);
496  }
497  std::cout << "INFO: Training Started" << std::endl;
498  for (int epoch = 1; epoch <= epochs; epoch++) { // For every epoch
499  // Shuffle X and Y if flag is set
500  if(shuffle) {
501  equal_shuffle(X, Y);
502  }
503  auto start = std::chrono::high_resolution_clock::now(); // Start clock
504  double loss = 0, acc = 0; // Intialize performance metrics with zero
505  // For each starting index of batch
506  for(size_t batch_start = 0; batch_start < X.size(); batch_start += batch_size) {
507  for(size_t i = batch_start; i < std::min(X.size(), batch_start + batch_size); i++) {
508  std::vector <std::valarray<double>> grad, cur_error, predicted;
509  auto activations = this -> __detailed_single_prediction(X[i]);
510  // Gradients vector to store gradients for all layers
511  // They will be averaged and applied to kernal
513  gradients.resize(this -> layers.size());
514  // First intialize gradients to zero
515  for(size_t i = 0; i < gradients.size(); i++) {
516  zeroes_initialization(gradients[i], get_shape(this -> layers[i].kernal));
517  }
518  predicted = activations.back(); // Predicted vector
519  cur_error = predicted - Y[i]; // Absoulute error
520  // Calculating loss with MSE
521  loss += sum(apply_function(cur_error, neural_network::util_functions::square));
522  // If prediction is correct
523  if(argmax(predicted) == argmax(Y[i])) {
524  acc += 1;
525  }
526  // For every layer (except first) starting from last one
527  for(size_t j = this -> layers.size() - 1; j >= 1; j--) {
528  // Backpropogating errors
529  cur_error = hadamard_product(cur_error,
530  apply_function(activations[j + 1],
531  this -> layers[j].dactivation_function));
532  // Calculating gradient for current layer
533  grad = multiply(transpose(activations[j]), cur_error);
534  // Change error according to current kernal values
535  cur_error = multiply(cur_error, transpose(this -> layers[j].kernal));
536  // Adding gradient values to collection of gradients
537  gradients[j] = gradients[j] + grad / double(batch_size);
538  }
539  // Applying gradients
540  for(size_t j = this -> layers.size() - 1; j >= 1; j--) {
541  // Updating kernal (aka weights)
542  this -> layers[j].kernal = this -> layers[j].kernal -
543  gradients[j] * learning_rate;
544  }
545  }
546  }
547  auto stop = std::chrono::high_resolution_clock::now(); // Stoping the clock
548  // Calculate time taken by epoch
549  auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
550  loss /= X.size(); // Averaging loss
551  acc /= X.size(); // Averaging accuracy
552  std::cout.precision(4); // set output precision to 4
553  // Printing training stats
554  std::cout << "Training: Epoch " << epoch << '/' << epochs;
555  std::cout << ", Loss: " << loss;
556  std::cout << ", Accuracy: " << acc;
557  std::cout << ", Taken time: " << duration.count() / 1e6 << " seconds";
558  std::cout << std::endl;
559  }
560  return;
561  }
Here is the call graph for this function:

◆ fit_from_csv()

void machine_learning::neural_network::NeuralNetwork::fit_from_csv ( const std::string file_name,
const bool &  last_label,
const int &  epochs,
const double &  learning_rate,
const bool &  normalize,
const int &  slip_lines = 1,
const size_t &  batch_size = 32,
const bool &  shuffle = true 
)
inline

Function to fit model on data stored in csv file

Parameters
file_namecsv file name
last_labelflag for whether label is in first or last column
epochsnumber of epochs
learning_ratelearning rate
normalizeflag for whether to normalize data
slip_linesnumber of lines to skip
batch_sizebatch size for gradient descent (default = 32)
shuffleflag for whether to shuffle data (default = true)
581  {
582  // Getting training data from csv file
583  auto data = this -> get_XY_from_csv(file_name, last_label, normalize, slip_lines);
584  // Fit the model on training data
585  this -> fit(data.first, data.second, epochs, learning_rate, batch_size, shuffle);
586  return;
587  }
Here is the call graph for this function:

◆ get_XY_from_csv()

std::pair<std::vector<std::vector<std::valarray<double> > >, std::vector<std::vector<std::valarray<double> > > > machine_learning::neural_network::NeuralNetwork::get_XY_from_csv ( const std::string file_name,
const bool &  last_label,
const bool &  normalize,
const int &  slip_lines = 1 
)
inline

Function to get X and Y from csv file (where X = data, Y = label)

Parameters
file_namecsv file name
last_labelflag for whether label is in first or last column
normalizeflag for whether to normalize data
slip_linesnumber of lines to skip
Returns
returns pair of X and Y
390  {
391  std::ifstream in_file; // Ifstream to read file
392  in_file.open(file_name.c_str(), std::ios::in); // Open file
393  std::vector <std::vector<std::valarray<double>>> X, Y; // To store X and Y
394  std::string line; // To store each line
395  // Skip lines
396  for(int i = 0; i < slip_lines; i ++) {
397  std::getline(in_file, line, '\n'); // Ignore line
398  }
399  // While file has information
400  while(!in_file.eof() && std::getline(in_file, line, '\n'))
401  {
402  std::valarray <double> x_data, y_data; // To store single sample and label
403  std::stringstream ss(line); // Constructing stringstream from line
404  std::string token; // To store each token in line (seprated by ',')
405  while(std::getline(ss, token, ',')) { // For each token
406  // Insert numerical value of token in x_data
407  x_data = insert_element(x_data, std::stod(token));
408  }
409  // If label is in last column
410  if(last_label) {
411  y_data.resize(this -> layers.back().neurons);
412  // If task is classification
413  if(y_data.size() > 1) {
414  y_data[x_data[x_data.size() - 1]] = 1;
415  }
416  // If task is regrssion (of single value)
417  else {
418  y_data[0] = x_data[x_data.size() - 1];
419  }
420  x_data = pop_back(x_data); // Remove label from x_data
421  }
422  else {
423  y_data.resize(this -> layers.back().neurons);
424  // If task is classification
425  if(y_data.size() > 1) {
426  y_data[x_data[x_data.size() - 1]] = 1;
427  }
428  // If task is regrssion (of single value)
429  else {
430  y_data[0] = x_data[x_data.size() - 1];
431  }
432  x_data = pop_front(x_data); // Remove label from x_data
433  }
434  // Push collected X_data and y_data in X and Y
435  X.push_back({x_data});
436  Y.push_back({y_data});
437  }
438  in_file.close();
439  // Normalize training data if flag is set
440  if(normalize) {
441  // Scale data between 0 and 1 using min-max scaler
442  X = minmax_scaler(X, 0.01, 1.0);
443  }
444  return make_pair(X, Y); // Return pair of X and Y
445  }
Here is the call graph for this function:

◆ load_model()

NeuralNetwork machine_learning::neural_network::NeuralNetwork::load_model ( const std::string file_name)
inline

Function to load earlier saved model.

Parameters
file_namefile from which model will be loaded (*.model)
Returns
instance of NeuralNetwork class with pretrained weights
711  {
712  std::ifstream in_file; // Ifstream to read file
713  in_file.open(file_name.c_str()); // Openinig file
714  std::vector <std::pair<int, std::string>> config; // To store config
715  std::vector <std::vector<std::valarray<double>>> kernals; // To store pretrained kernals
716  // Loading model from saved file format
717  size_t total_layers = 0;
718  in_file >> total_layers;
719  for(size_t i = 0; i < total_layers; i++) {
720  int neurons = 0;
721  std::string activation;
722  size_t shape_a = 0, shape_b = 0;
724  in_file >> neurons >> activation >> shape_a >> shape_b;
725  for(size_t r = 0; r < shape_a; r++) {
726  std::valarray<double> row(shape_b);
727  for(size_t c = 0; c < shape_b; c++) {
728  in_file >> row[c];
729  }
730  kernal.push_back(row);
731  }
732  config.emplace_back(make_pair(neurons, activation));;
733  kernals.emplace_back(kernal);
734  }
735  std::cout << "INFO: Model loaded successfully" << std::endl;
736  return NeuralNetwork(config, kernals); // Return instance of NeuralNetwork class
737  }
Here is the call graph for this function:

◆ operator=() [1/2]

NeuralNetwork& machine_learning::neural_network::NeuralNetwork::operator= ( const NeuralNetwork model)
default

Copy assignment operator for class NeuralNetwork

◆ operator=() [2/2]

NeuralNetwork& machine_learning::neural_network::NeuralNetwork::operator= ( NeuralNetwork &&  )
default

Move assignment operator for class NeuralNetwork

◆ save_model()

void machine_learning::neural_network::NeuralNetwork::save_model ( const std::string _file_name)
inline

Function to save current model.

Parameters
file_namefile name to save model (*.model)

Format in which model is saved:

total_layers neurons(1st neural_network::layers::DenseLayer) activation_name(1st neural_network::layers::DenseLayer) kernal_shape(1st neural_network::layers::DenseLayer) kernal_valuesneurons(Nth neural_network::layers::DenseLayer) activation_name(Nth neural_network::layers::DenseLayer) kernal_shape(Nth neural_network::layers::DenseLayer) kernal_value

For Example, pretrained model with 3 layers:

3
4 none
4 4
1 0 0 0 
0 1 0 0 
0 0 1 0 
0 0 0 1 
6 relu
4 6
-1.88963 -3.61165 1.30757 -0.443906 -2.41039 -2.69653 
-0.684753 0.0891452 0.795294 -2.39619 2.73377 0.318202 
-2.91451 -4.43249 -0.804187 2.51995 -6.97524 -1.07049 
-0.571531 -1.81689 -1.24485 1.92264 -2.81322 1.01741 
3 sigmoid
6 3
0.390267 -0.391703 -0.0989607 
0.499234 -0.564539 -0.28097 
0.553386 -0.153974 -1.92493 
-2.01336 -0.0219682 1.44145 
1.72853 -0.465264 -0.705373 
-0.908409 -0.740547 0.376416 
639  {
640  std::string file_name = _file_name;
641  // Adding ".model" extension if it is not already there in name
642  if(file_name.find(".model") == file_name.npos) {
643  file_name += ".model";
644  }
645  std::ofstream out_file; // Ofstream to write in file
646  // Open file in out|trunc mode
647  out_file.open(file_name.c_str(), std::ofstream::out | std::ofstream::trunc);
648  /**
649  Format in which model is saved:
650 
651  total_layers
652  neurons(1st neural_network::layers::DenseLayer) activation_name(1st neural_network::layers::DenseLayer)
653  kernal_shape(1st neural_network::layers::DenseLayer)
654  kernal_values
655  .
656  .
657  .
658  neurons(Nth neural_network::layers::DenseLayer) activation_name(Nth neural_network::layers::DenseLayer)
659  kernal_shape(Nth neural_network::layers::DenseLayer)
660  kernal_value
661 
662  For Example, pretrained model with 3 layers:
663  <pre>
664  3
665  4 none
666  4 4
667  1 0 0 0
668  0 1 0 0
669  0 0 1 0
670  0 0 0 1
671  6 relu
672  4 6
673  -1.88963 -3.61165 1.30757 -0.443906 -2.41039 -2.69653
674  -0.684753 0.0891452 0.795294 -2.39619 2.73377 0.318202
675  -2.91451 -4.43249 -0.804187 2.51995 -6.97524 -1.07049
676  -0.571531 -1.81689 -1.24485 1.92264 -2.81322 1.01741
677  3 sigmoid
678  6 3
679  0.390267 -0.391703 -0.0989607
680  0.499234 -0.564539 -0.28097
681  0.553386 -0.153974 -1.92493
682  -2.01336 -0.0219682 1.44145
683  1.72853 -0.465264 -0.705373
684  -0.908409 -0.740547 0.376416
685  </pre>
686  */
687  // Saving model in the same format
688  out_file << layers.size();
689  out_file << std::endl;
690  for(const auto &layer : this -> layers) {
691  out_file << layer.neurons << ' ' << layer.activation << std::endl;
692  const auto shape = get_shape(layer.kernal);
693  out_file << shape.first << ' ' << shape.second << std::endl;
694  for(const auto &row : layer.kernal) {
695  for(const auto &val : row) {
696  out_file << val << ' ';
697  }
698  out_file << std::endl;
699  }
700  }
701  std::cout << "INFO: Model saved successfully with name : ";
702  std::cout << file_name << std::endl;
703  return;
704  }
Here is the call graph for this function:

◆ single_predict()

std::vector<std::valarray <double> > machine_learning::neural_network::NeuralNetwork::single_predict ( const std::vector< std::valarray< double >> &  X)
inline

Function to get prediction of model on single sample.

Parameters
Xarray of feature vectors
Returns
returns predictions as vector
453  {
454  // Get activations of all layers
455  auto activations = this -> __detailed_single_prediction(X);
456  // Return activations of last layer (actual predicted values)
457  return activations.back();
458  }
Here is the call graph for this function:

◆ summary()

void machine_learning::neural_network::NeuralNetwork::summary ( )
inline

Function to print summary of the network.

742  {
743  // Printing Summary
744  std::cout << "===============================================================" << std::endl;
745  std::cout << "\t\t+ MODEL SUMMARY +\t\t\n";
746  std::cout << "===============================================================" << std::endl;
747  for(size_t i = 1; i <= layers.size(); i++) { // For every layer
748  std::cout << i << ")";
749  std::cout << " Neurons : " << layers[i - 1].neurons; // number of neurons
750  std::cout << ", Activation : " << layers[i - 1].activation; // activation
751  std::cout << ", Kernal Shape : " << get_shape(layers[i - 1].kernal); // kernal shape
752  std::cout << std::endl;
753  }
754  std::cout << "===============================================================" << std::endl;
755  return;
756  }
Here is the call graph for this function:

The documentation for this class was generated from the following file:
machine_learning::equal_shuffle
void equal_shuffle(std::vector< std::vector< std::valarray< T >> > &A, std::vector< std::vector< std::valarray< T >> > &B)
Definition: vector_ops.hpp:133
std::vector::resize
T resize(T... args)
machine_learning::apply_function
std::vector< std::valarray< T > > apply_function(const std::vector< std::valarray< T >> &A, T(*func)(const T &))
Definition: vector_ops.hpp:315
machine_learning::transpose
std::vector< std::valarray< T > > transpose(const std::vector< std::valarray< T >> &A)
Definition: vector_ops.hpp:363
std::string
STL class.
machine_learning::pop_back
std::valarray< T > pop_back(const std::valarray< T > &A)
Definition: vector_ops.hpp:117
machine_learning::pop_front
std::valarray< T > pop_front(const std::valarray< T > &A)
Definition: vector_ops.hpp:101
std::vector
STL class.
std::string::find
T find(T... args)
std::vector::size
T size(T... args)
std::stringstream
STL class.
sorting::shuffle
std::array< T, N > shuffle(std::array< T, N > arr)
Definition: bogo_sort.cpp:36
machine_learning::hadamard_product
std::vector< std::valarray< T > > hadamard_product(const std::vector< std::valarray< T >> &A, const std::vector< std::valarray< T >> &B)
Definition: vector_ops.hpp:466
machine_learning::multiply
std::vector< std::valarray< T > > multiply(const std::vector< std::valarray< T >> &A, const std::vector< std::valarray< T >> &B)
Definition: vector_ops.hpp:434
machine_learning::argmax
size_t argmax(const std::vector< std::valarray< T >> &A)
Definition: vector_ops.hpp:296
std::vector::push_back
T push_back(T... args)
machine_learning::neural_network::NeuralNetwork::evaluate
void evaluate(const std::vector< std::vector< std::valarray< double >>> &X, const std::vector< std::vector< std::valarray< double >>> &Y)
Definition: neural_network.cpp:594
machine_learning::neural_network::NeuralNetwork::single_predict
std::vector< std::valarray< double > > single_predict(const std::vector< std::valarray< double >> &X)
Definition: neural_network.cpp:453
std::cerr
std::ofstream
STL class.
machine_learning::neural_network::NeuralNetwork::fit
void fit(const std::vector< std::vector< std::valarray< double >>> &X_, const std::vector< std::vector< std::valarray< double >>> &Y_, const int &epochs=100, const double &learning_rate=0.01, const size_t &batch_size=32, const bool &shuffle=true)
Definition: neural_network.cpp:485
std::string::c_str
T c_str(T... args)
activations
Various activation functions used in Neural network.
machine_learning::insert_element
std::valarray< T > insert_element(const std::valarray< T > &A, const T &ele)
Definition: vector_ops.hpp:84
std::ifstream::close
T close(T... args)
std::valarray< double >
std::ifstream::open
T open(T... args)
machine_learning::neural_network::NeuralNetwork::NeuralNetwork
NeuralNetwork()=default
machine_learning::neural_network::NeuralNetwork::__detailed_single_prediction
std::vector< std::vector< std::valarray< double > > > __detailed_single_prediction(const std::vector< std::valarray< double >> &X)
Definition: neural_network.cpp:299
std::min
T min(T... args)
machine_learning::minmax_scaler
std::vector< std::vector< std::valarray< T > > > minmax_scaler(const std::vector< std::vector< std::valarray< T >>> &A, const T &low, const T &high)
Definition: vector_ops.hpp:265
data
int data[MAX]
test data
Definition: hash_search.cpp:24
std::vector::emplace_back
T emplace_back(T... args)
std::stod
T stod(T... args)
std::endl
T endl(T... args)
machine_learning::neural_network::NeuralNetwork::get_XY_from_csv
std::pair< std::vector< std::vector< std::valarray< double > > >, std::vector< std::vector< std::valarray< double > > > > get_XY_from_csv(const std::string &file_name, const bool &last_label, const bool &normalize, const int &slip_lines=1)
Definition: neural_network.cpp:387
std::vector::begin
T begin(T... args)
std::getline
T getline(T... args)
machine_learning::get_shape
std::pair< size_t, size_t > get_shape(const std::vector< std::valarray< T >> &A)
Definition: vector_ops.hpp:243
layers
This namespace contains layers used in MLP.
std::make_pair
T make_pair(T... args)
machine_learning::zeroes_initialization
void zeroes_initialization(std::vector< std::valarray< T >> &A, const std::pair< size_t, size_t > &shape)
Definition: vector_ops.hpp:209
std::ifstream::eof
T eof(T... args)
std::exit
T exit(T... args)
machine_learning::sum
T sum(const std::vector< std::valarray< T >> &A)
Definition: vector_ops.hpp:228
std::ifstream
STL class.
std::chrono::high_resolution_clock::now
T now(T... args)