spalera_sgd.hpp
Go to the documentation of this file.
1 
12 #ifndef MLPACK_CORE_OPTIMIZERS_SPALERA_SGD_SPALERA_SGD_HPP
13 #define MLPACK_CORE_OPTIMIZERS_SPALERA_SGD_SPALERA_SGD_HPP
14 
15 #include <mlpack/prereqs.hpp>
18 
19 namespace mlpack {
20 namespace optimization {
21 
87 template<typename DecayPolicyType = NoDecay>
89 {
90  public:
114  SPALeRASGD(const double stepSize = 0.01,
115  const size_t batchSize = 32,
116  const size_t maxIterations = 100000,
117  const double tolerance = 1e-5,
118  const double lambda = 0.01,
119  const double alpha = 0.001,
120  const double epsilon = 1e-6,
121  const double adaptRate = 3.10e-8,
122  const bool shuffle = true,
123  const DecayPolicyType& decayPolicy = DecayPolicyType(),
124  const bool resetPolicy = true);
125 
136  template<typename DecomposableFunctionType>
137  double Optimize(DecomposableFunctionType& function, arma::mat& iterate);
138 
140  size_t BatchSize() const { return batchSize; }
142  size_t& BatchSize() { return batchSize; }
143 
145  double StepSize() const { return stepSize; }
147  double& StepSize() { return stepSize; }
148 
150  size_t MaxIterations() const { return maxIterations; }
152  size_t& MaxIterations() { return maxIterations; }
153 
155  double Tolerance() const { return tolerance; }
157  double& Tolerance() { return tolerance; }
158 
160  double Alpha() const { return updatePolicy.Alpha(); }
162  double& Alpha() { return updatePolicy.Alpha(); }
163 
165  double AdaptRate() const { return updatePolicy.AdaptRate(); }
167  double& AdaptRate() { return updatePolicy.AdaptRate(); }
168 
170  bool Shuffle() const { return shuffle; }
172  bool& Shuffle() { return shuffle; }
173 
176  bool ResetPolicy() const { return resetPolicy; }
179  bool& ResetPolicy() { return resetPolicy; }
180 
182  SPALeRAStepsize UpdatePolicy() const { return updatePolicy; }
184  SPALeRAStepsize& UpdatePolicy() { return updatePolicy; }
185 
187  DecayPolicyType DecayPolicy() const { return decayPolicy; }
189  DecayPolicyType& DecayPolicy() { return decayPolicy; }
190 
191  private:
193  double stepSize;
194 
196  size_t batchSize;
197 
199  size_t maxIterations;
200 
202  double tolerance;
203 
205  double lambda;
206 
209  bool shuffle;
210 
212  SPALeRAStepsize updatePolicy;
213 
215  DecayPolicyType decayPolicy;
216 
219  bool resetPolicy;
220 };
221 
222 } // namespace optimization
223 } // namespace mlpack
224 
225 // Include implementation.
226 #include "spalera_sgd_impl.hpp"
227 
228 #endif
double Tolerance() const
Get the tolerance for termination.
bool & Shuffle()
Modify whether or not the individual functions are shuffled.
.hpp
Definition: add_to_po.hpp:21
SPALeRA Stochastic Gradient Descent is a technique for minimizing a function which can be expressed a...
Definition: spalera_sgd.hpp:88
SPALeRAStepsize & UpdatePolicy()
Modify the update policy.
bool & ResetPolicy()
Modify whether or not the update policy parameters are reset before Optimize call.
The core includes that mlpack expects; standard C++ includes and Armadillo.
bool Shuffle() const
Get whether or not the individual functions are shuffled.
SPALeRASGD(const double stepSize=0.01, const size_t batchSize=32, const size_t maxIterations=100000, const double tolerance=1e-5, const double lambda=0.01, const double alpha=0.001, const double epsilon=1e-6, const double adaptRate=3.10e-8, const bool shuffle=true, const DecayPolicyType &decayPolicy=DecayPolicyType(), const bool resetPolicy=true)
Construct the SPALeRASGD optimizer with the given function and parameters.
Definition of the SPALeRA stepize technique, which implementes a change detection mechanism with an a...
size_t BatchSize() const
Get the batch size.
double Optimize(DecomposableFunctionType &function, arma::mat &iterate)
Optimize the given function using SPALeRA SGD.
DecayPolicyType & DecayPolicy()
Modify the decay policy.
size_t MaxIterations() const
Get the maximum number of iterations (0 indicates no limit).
double & Alpha()
Modify the tolerance for termination.
double Alpha() const
Get the agnostic learning rate adaptation parameter.
double & Tolerance()
Modify the tolerance for termination.
double AdaptRate() const
Get the agnostic learning rate update rate.
see subsection cli_alt_reg_tut Alternate DET regularization The usual regularized error f $R_ alpha(t)\f$ of a node \f $t\f$ is given by
Definition: det.txt:344
size_t & BatchSize()
Modify the batch size.
bool ResetPolicy() const
Get whether or not the update policy parameters are reset before Optimize call.
size_t & MaxIterations()
Modify the maximum number of iterations (0 indicates no limit).
double & AdaptRate()
Modify the agnostic learning rate update rate.
DecayPolicyType DecayPolicy() const
Get the decay policy.
double Alpha() const
Get the tolerance for termination.
double AdaptRate() const
Get the agnostic learning rate update rate.
double StepSize() const
Get the step size.
double & StepSize()
Modify the step size.
SPALeRAStepsize UpdatePolicy() const
Get the update policy.