22 #ifndef MLPACK_CORE_OPTIMIZERS_ADAM_ADAM_HPP 23 #define MLPACK_CORE_OPTIMIZERS_ADAM_ADAM_HPP 36 namespace optimization {
86 template<
typename UpdateRule = AdamUpdate>
110 AdamType(
const double stepSize = 0.001,
111 const size_t batchSize = 32,
112 const double beta1 = 0.9,
113 const double beta2 = 0.999,
114 const double eps = 1e-8,
115 const size_t maxIterations = 100000,
116 const double tolerance = 1e-5,
117 const bool shuffle =
true);
129 template<
typename DecomposableFunctionType>
130 double Optimize(DecomposableFunctionType&
function, arma::mat& iterate)
132 return optimizer.
Optimize(
function, iterate);
196 #include "adam_impl.hpp" double & Tolerance()
Modify the tolerance for termination.
bool Shuffle() const
Get whether or not the individual functions are shuffled.
bool & Shuffle()
Modify whether or not the individual functions are shuffled.
const UpdatePolicyType & UpdatePolicy() const
Get the update policy.
size_t MaxIterations() const
Get the maximum number of iterations (0 indicates no limit).
double Epsilon() const
Get the value used to initialise the mean squared gradient parameter.
double & Beta2()
Modify the second moment coefficient.
size_t BatchSize() const
Get the batch size.
The core includes that mlpack expects; standard C++ includes and Armadillo.
size_t & BatchSize()
Modify the batch size.
double Tolerance() const
Get the tolerance for termination.
double Tolerance() const
Get the tolerance for termination.
size_t & MaxIterations()
Modify the maximum number of iterations (0 indicates no limit).
double Optimize(DecomposableFunctionType &function, arma::mat &iterate)
Optimize the given function using Adam.
size_t BatchSize() const
Get the batch size.
size_t MaxIterations() const
Get the maximum number of iterations (0 indicates no limit).
double StepSize() const
Get the step size.
Adam is an optimizer that computes individual adaptive learning rates for different parameters from e...
bool Shuffle() const
Get whether or not the individual functions are shuffled.
AdamType(const double stepSize=0.001, const size_t batchSize=32, const double beta1=0.9, const double beta2=0.999, const double eps=1e-8, const size_t maxIterations=100000, const double tolerance=1e-5, const bool shuffle=true)
Construct the Adam optimizer with the given function and parameters.
double Beta2() const
Get the second moment coefficient.
double & StepSize()
Modify the step size.
double & Beta1()
Modify the smoothing parameter.
double & Epsilon()
Modify the value used to initialise the mean squared gradient parameter.
double Beta1() const
Get the smoothing parameter.
double StepSize() const
Get the step size.
double Optimize(DecomposableFunctionType &function, arma::mat &iterate)
Optimize the given function using stochastic gradient descent.