lars.hpp
Go to the documentation of this file.
1 
24 #ifndef MLPACK_METHODS_LARS_LARS_HPP
25 #define MLPACK_METHODS_LARS_LARS_HPP
26 
27 #include <mlpack/prereqs.hpp>
28 
29 namespace mlpack {
30 namespace regression {
31 
32 // beta is the estimator
33 // yHat is the prediction from the current estimator
34 
89 class LARS
90 {
91  public:
102  LARS(const bool useCholesky = false,
103  const double lambda1 = 0.0,
104  const double lambda2 = 0.0,
105  const double tolerance = 1e-16);
106 
119  LARS(const bool useCholesky,
120  const arma::mat& gramMatrix,
121  const double lambda1 = 0.0,
122  const double lambda2 = 0.0,
123  const double tolerance = 1e-16);
124 
140  LARS(const arma::mat& data,
141  const arma::rowvec& responses,
142  const bool transposeData = true,
143  const bool useCholesky = false,
144  const double lambda1 = 0.0,
145  const double lambda2 = 0.0,
146  const double tolerance = 1e-16);
147 
164  LARS(const arma::mat& data,
165  const arma::rowvec& responses,
166  const bool transposeData,
167  const bool useCholesky,
168  const arma::mat& gramMatrix,
169  const double lambda1 = 0.0,
170  const double lambda2 = 0.0,
171  const double tolerance = 1e-16);
172 
178  LARS(const LARS& other);
179 
185  LARS(LARS&& other);
186 
192  LARS& operator=(const LARS& other);
193 
199  LARS& operator=(LARS&& other);
200 
216  double Train(const arma::mat& data,
217  const arma::rowvec& responses,
218  arma::vec& beta,
219  const bool transposeData = true);
220 
235  double Train(const arma::mat& data,
236  const arma::rowvec& responses,
237  const bool transposeData = true);
238 
248  void Predict(const arma::mat& points,
249  arma::rowvec& predictions,
250  const bool rowMajor = false) const;
251 
253  const std::vector<size_t>& ActiveSet() const { return activeSet; }
254 
257  const std::vector<arma::vec>& BetaPath() const { return betaPath; }
258 
260  const arma::vec& Beta() const { return betaPath.back(); }
261 
264  const std::vector<double>& LambdaPath() const { return lambdaPath; }
265 
267  const arma::mat& MatUtriCholFactor() const { return matUtriCholFactor; }
268 
272  template<typename Archive>
273  void serialize(Archive& ar, const unsigned int /* version */);
274 
287  double ComputeError(const arma::mat& matX,
288  const arma::rowvec& y,
289  const bool rowMajor = false);
290 
291  private:
293  arma::mat matGramInternal;
294 
296  const arma::mat* matGram;
297 
299  arma::mat matUtriCholFactor;
300 
302  bool useCholesky;
303 
305  bool lasso;
307  double lambda1;
308 
310  bool elasticNet;
312  double lambda2;
313 
315  double tolerance;
316 
318  std::vector<arma::vec> betaPath;
319 
321  std::vector<double> lambdaPath;
322 
324  std::vector<size_t> activeSet;
325 
327  std::vector<bool> isActive;
328 
329  // Set of variables that are ignored (if any).
330 
332  std::vector<size_t> ignoreSet;
333 
335  std::vector<bool> isIgnored;
336 
342  void Deactivate(const size_t activeVarInd);
343 
349  void Activate(const size_t varInd);
350 
356  void Ignore(const size_t varInd);
357 
358  // compute "equiangular" direction in output space
359  void ComputeYHatDirection(const arma::mat& matX,
360  const arma::vec& betaDirection,
361  arma::vec& yHatDirection);
362 
363  // interpolate to compute last solution vector
364  void InterpolateBeta();
365 
366  void CholeskyInsert(const arma::vec& newX, const arma::mat& X);
367 
368  void CholeskyInsert(double sqNormNewX, const arma::vec& newGramCol);
369 
370  void GivensRotate(const arma::vec::fixed<2>& x,
371  arma::vec::fixed<2>& rotatedX,
372  arma::mat& G);
373 
374  void CholeskyDelete(const size_t colToKill);
375 };
376 
377 } // namespace regression
378 } // namespace mlpack
379 
380 // Include implementation of serialize().
381 #include "lars_impl.hpp"
382 
383 #endif
void serialize(Archive &ar, const unsigned int)
Serialize the LARS model.
Linear algebra utility functions, generally performed on matrices or vectors.
Definition: add_to_po.hpp:21
void Predict(const arma::mat &points, arma::rowvec &predictions, const bool rowMajor=false) const
Predict y_i for each data point in the given data matrix using the currently-trained LARS model...
The core includes that mlpack expects; standard C++ includes and Armadillo.
const std::vector< arma::vec > & BetaPath() const
Access the set of coefficients after each iteration; the solution is the last element.
Definition: lars.hpp:257
double Train(const arma::mat &data, const arma::rowvec &responses, arma::vec &beta, const bool transposeData=true)
Run LARS.
LARS & operator=(const LARS &other)
Copy the given LARS object.
LARS(const bool useCholesky=false, const double lambda1=0.0, const double lambda2=0.0, const double tolerance=1e-16)
Set the parameters to LARS.
An implementation of LARS, a stage-wise homotopy-based algorithm for l1-regularized linear regression...
Definition: lars.hpp:89
const arma::mat & MatUtriCholFactor() const
Access the upper triangular cholesky factor.
Definition: lars.hpp:267
double ComputeError(const arma::mat &matX, const arma::rowvec &y, const bool rowMajor=false)
Compute cost error of the given data matrix using the currently-trained LARS model.
const std::vector< double > & LambdaPath() const
Access the set of values for lambda1 after each iteration; the solution is the last element...
Definition: lars.hpp:264
const std::vector< size_t > & ActiveSet() const
Access the set of active dimensions.
Definition: lars.hpp:253
const arma::vec & Beta() const
Access the solution coefficients.
Definition: lars.hpp:260