\section{R\+BM$<$ Initialization\+Rule\+Type, Data\+Type, Policy\+Type $>$ Class Template Reference}
\label{classmlpack_1_1ann_1_1RBM}\index{R\+B\+M$<$ Initialization\+Rule\+Type, Data\+Type, Policy\+Type $>$@{R\+B\+M$<$ Initialization\+Rule\+Type, Data\+Type, Policy\+Type $>$}}


The implementation of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} module.  


\subsection*{Public Types}
\begin{DoxyCompactItemize}
\item 
typedef Data\+Type\+::elem\+\_\+type \textbf{ Elem\+Type}
\item 
using \textbf{ Network\+Type} = \textbf{ R\+BM}$<$ Initialization\+Rule\+Type, Data\+Type, Policy\+Type $>$
\end{DoxyCompactItemize}
\subsection*{Public Member Functions}
\begin{DoxyCompactItemize}
\item 
\textbf{ R\+BM} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ predictors, Initialization\+Rule\+Type initialize\+Rule, const size\+\_\+t visible\+Size, const size\+\_\+t hidden\+Size, const size\+\_\+t batch\+Size=1, const size\+\_\+t num\+Steps=1, const size\+\_\+t neg\+Steps=1, const size\+\_\+t pool\+Size=2, const \textbf{ Elem\+Type} slab\+Penalty=8, const \textbf{ Elem\+Type} radius=1, const bool persistence=false)
\begin{DoxyCompactList}\small\item\em Initialize all the parameters of the network using initialize\+Rule. \end{DoxyCompactList}\item 
double \textbf{ Evaluate} (const arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&parameters, const size\+\_\+t i, const size\+\_\+t batch\+Size)
\begin{DoxyCompactList}\small\item\em Evaluate the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network with the given parameters. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Binary\+R\+BM} $>$\+::value, double $>$\+::type \textbf{ Free\+Energy} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&input)
\begin{DoxyCompactList}\small\item\em This function calculates the free energy of the \doxyref{Binary\+R\+BM}{p.}{classmlpack_1_1ann_1_1BinaryRBM}. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, double $>$\+::type \textbf{ Free\+Energy} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&input)
\begin{DoxyCompactList}\small\item\em This function calculates the free energy of the \doxyref{Spike\+Slab\+R\+BM}{p.}{classmlpack_1_1ann_1_1SpikeSlabRBM}. \end{DoxyCompactList}\item 
void \textbf{ Gibbs} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&input, arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&output, const size\+\_\+t steps=S\+I\+Z\+E\+\_\+\+M\+AX)
\begin{DoxyCompactList}\small\item\em This function does the k-\/step Gibbs Sampling. \end{DoxyCompactList}\item 
void \textbf{ Gradient} (const arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&parameters, const size\+\_\+t i, arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&gradient, const size\+\_\+t batch\+Size)
\begin{DoxyCompactList}\small\item\em Calculates the gradients for the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network. \end{DoxyCompactList}\item 
Data\+Type const  \& \textbf{ Hidden\+Bias} () const
\begin{DoxyCompactList}\small\item\em Return the hidden bias of the network. \end{DoxyCompactList}\item 
Data\+Type \& \textbf{ Hidden\+Bias} ()
\begin{DoxyCompactList}\small\item\em Modify the hidden bias of the network. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Binary\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Hidden\+Mean} (Data\+Type \&\&input, Data\+Type \&\&output)
\begin{DoxyCompactList}\small\item\em The function calculates the mean for the hidden layer. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Hidden\+Mean} (Data\+Type \&\&input, Data\+Type \&\&output)
\begin{DoxyCompactList}\small\item\em The function calculates the mean of the Normal distribution of P(s$\vert$v, h). \end{DoxyCompactList}\item 
size\+\_\+t const  \& \textbf{ Hidden\+Size} () const
\begin{DoxyCompactList}\small\item\em Get the hidden size. \end{DoxyCompactList}\item 
size\+\_\+t \textbf{ Num\+Functions} () const
\begin{DoxyCompactList}\small\item\em Return the number of separable functions (the number of predictor points). \end{DoxyCompactList}\item 
size\+\_\+t \textbf{ Num\+Steps} () const
\begin{DoxyCompactList}\small\item\em Return the number of steps of Gibbs Sampling. \end{DoxyCompactList}\item 
const arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \& \textbf{ Parameters} () const
\begin{DoxyCompactList}\small\item\em Return the parameters of the network. \end{DoxyCompactList}\item 
arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \& \textbf{ Parameters} ()
\begin{DoxyCompactList}\small\item\em Modify the parameters of the network. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Binary\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Phase} (Data\+Type \&\&input, Data\+Type \&\&gradient)
\begin{DoxyCompactList}\small\item\em Calculates the gradient of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network on the provided input. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Phase} (Data\+Type \&\&input, Data\+Type \&\&gradient)
\begin{DoxyCompactList}\small\item\em Calculates the gradient of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network on the provided input. \end{DoxyCompactList}\item 
size\+\_\+t const  \& \textbf{ Pool\+Size} () const
\begin{DoxyCompactList}\small\item\em Get the pool size. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Binary\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Reset} ()
\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Reset} ()
\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Binary\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Sample\+Hidden} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&input, arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&output)
\begin{DoxyCompactList}\small\item\em This function samples the hidden layer given the visible layer using Bernoulli function. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Sample\+Hidden} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&input, arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&output)
\begin{DoxyCompactList}\small\item\em This function samples the slab outputs from the Normal distribution with mean given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$ and variance\+: \$\&\{-\/1\}\$. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Sample\+Slab} (Data\+Type \&\&slab\+Mean, Data\+Type \&\&slab)
\begin{DoxyCompactList}\small\item\em The function samples from the Normal distribution of P(s$\vert$v, h), where the mean is given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$ and variance is given by\+: \$$^\wedge$\{-\/1\}\$. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Sample\+Spike} (Data\+Type \&\&spike\+Mean, Data\+Type \&\&spike)
\begin{DoxyCompactList}\small\item\em The function samples the spike function using Bernoulli distribution. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Binary\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Sample\+Visible} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&input, arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&output)
\begin{DoxyCompactList}\small\item\em This function samples the visible layer given the hidden layer using Bernoulli function. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Sample\+Visible} (arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&input, arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&output)
\begin{DoxyCompactList}\small\item\em Sample Hidden function samples the slab outputs from the Normal distribution with mean given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$ and variance\+: \$\&\{-\/1\}\$. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Archive $>$ }\\void \textbf{ serialize} (Archive \&ar, const unsigned int)
\begin{DoxyCompactList}\small\item\em Serialize the model. \end{DoxyCompactList}\item 
void \textbf{ Shuffle} ()
\begin{DoxyCompactList}\small\item\em Shuffle the order of function visitation. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Slab\+Mean} (Data\+Type \&\&visible, Data\+Type \&\&spike, Data\+Type \&\&slab\+Mean)
\begin{DoxyCompactList}\small\item\em The function calculates the mean of Normal distribution of P(s$\vert$v, h), where the mean is given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$. \end{DoxyCompactList}\item 
\textbf{ Elem\+Type} const  \& \textbf{ Slab\+Penalty} () const
\begin{DoxyCompactList}\small\item\em Get the regularizer associated with slab variables. \end{DoxyCompactList}\item 
Data\+Type const  \& \textbf{ Spike\+Bias} () const
\begin{DoxyCompactList}\small\item\em Get the regularizer associated with spike variables. \end{DoxyCompactList}\item 
Data\+Type \& \textbf{ Spike\+Bias} ()
\begin{DoxyCompactList}\small\item\em Modify the regularizer associated with spike variables. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Spike\+Mean} (Data\+Type \&\&visible, Data\+Type \&\&spike\+Mean)
\begin{DoxyCompactList}\small\item\em The function calculates the mean of the distribution P(h$\vert$v), where mean is given by\+: \$sigm(v$^\wedge$\+T$\ast$\+W\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v + b\+\_\+i)\$. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Optimizer\+Type , typename... Callback\+Type$>$ }\\double \textbf{ Train} (Optimizer\+Type \&optimizer, Callback\+Type \&\&... callbacks)
\begin{DoxyCompactList}\small\item\em Train the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} on the given input data. \end{DoxyCompactList}\item 
Data\+Type const  \& \textbf{ Visible\+Bias} () const
\begin{DoxyCompactList}\small\item\em Return the visible bias of the network. \end{DoxyCompactList}\item 
Data\+Type \& \textbf{ Visible\+Bias} ()
\begin{DoxyCompactList}\small\item\em Modify the visible bias of the network. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Binary\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Visible\+Mean} (Data\+Type \&\&input, Data\+Type \&\&output)
\begin{DoxyCompactList}\small\item\em The function calculates the mean for the visible layer. \end{DoxyCompactList}\item 
{\footnotesize template$<$typename Policy  = Policy\+Type$>$ }\\std\+::enable\+\_\+if$<$ std\+::is\+\_\+same$<$ Policy, \textbf{ Spike\+Slab\+R\+BM} $>$\+::value, void $>$\+::type \textbf{ Visible\+Mean} (Data\+Type \&\&input, Data\+Type \&\&output)
\begin{DoxyCompactList}\small\item\em The function calculates the mean of the Normal distribution of P(v$\vert$s, h). \end{DoxyCompactList}\item 
Data\+Type const  \& \textbf{ Visible\+Penalty} () const
\begin{DoxyCompactList}\small\item\em Get the regularizer associated with visible variables. \end{DoxyCompactList}\item 
Data\+Type \& \textbf{ Visible\+Penalty} ()
\begin{DoxyCompactList}\small\item\em Modify the regularizer associated with visible variables. \end{DoxyCompactList}\item 
size\+\_\+t const  \& \textbf{ Visible\+Size} () const
\begin{DoxyCompactList}\small\item\em Get the visible size. \end{DoxyCompactList}\item 
arma\+::\+Cube$<$ \textbf{ Elem\+Type} $>$ const  \& \textbf{ Weight} () const
\begin{DoxyCompactList}\small\item\em Get the weights of the network. \end{DoxyCompactList}\item 
arma\+::\+Cube$<$ \textbf{ Elem\+Type} $>$ \& \textbf{ Weight} ()
\begin{DoxyCompactList}\small\item\em Modify the weights of the network. \end{DoxyCompactList}\end{DoxyCompactItemize}


\subsection{Detailed Description}
\subsubsection*{template$<$typename Initialization\+Rule\+Type, typename Data\+Type = arma\+::mat, typename Policy\+Type = Binary\+R\+BM$>$\newline
class mlpack\+::ann\+::\+R\+B\+M$<$ Initialization\+Rule\+Type, Data\+Type, Policy\+Type $>$}

The implementation of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} module. 

A Restricted Boltzmann Machines (\doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM}) is a generative stochastic artificial neural network that can learn a probability distribution over its set of inputs. R\+B\+Ms have found applications in dimensionality reduction, classification, collaborative filtering, feature learning and topic modelling. They can be trained in either supervised or unsupervised ways, depending on the task. They are a variant of Boltzmann machines, with the restriction that the neurons must form a bipartite graph.


\begin{DoxyTemplParams}{Template Parameters}
{\em Initialization\+Rule\+Type} & Rule used to initialize the network. \\
\hline
{\em Data\+Type} & The type of matrix to be used. \\
\hline
{\em Policy\+Type} & The \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} variant to be used (\doxyref{Binary\+R\+BM}{p.}{classmlpack_1_1ann_1_1BinaryRBM} or \doxyref{Spike\+Slab\+R\+BM}{p.}{classmlpack_1_1ann_1_1SpikeSlabRBM}). \\
\hline
\end{DoxyTemplParams}


Definition at line 38 of file rbm.\+hpp.



\subsection{Member Typedef Documentation}
\mbox{\label{classmlpack_1_1ann_1_1RBM_aeb15e37d26993b88da027a0b8247ee55}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Elem\+Type@{Elem\+Type}}
\index{Elem\+Type@{Elem\+Type}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Elem\+Type}
{\footnotesize\ttfamily typedef Data\+Type\+::elem\+\_\+type \textbf{ Elem\+Type}}



Definition at line 42 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a1222e7405946cb6c406b8ab61b88950a}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Network\+Type@{Network\+Type}}
\index{Network\+Type@{Network\+Type}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Network\+Type}
{\footnotesize\ttfamily using \textbf{ Network\+Type} =  \textbf{ R\+BM}$<$Initialization\+Rule\+Type, Data\+Type, Policy\+Type$>$}



Definition at line 41 of file rbm.\+hpp.



\subsection{Constructor \& Destructor Documentation}
\mbox{\label{classmlpack_1_1ann_1_1RBM_af96c1d7469e25128c1391dee9da7fcd8}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!R\+BM@{R\+BM}}
\index{R\+BM@{R\+BM}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{R\+B\+M()}
{\footnotesize\ttfamily \textbf{ R\+BM} (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$}]{predictors,  }\item[{Initialization\+Rule\+Type}]{initialize\+Rule,  }\item[{const size\+\_\+t}]{visible\+Size,  }\item[{const size\+\_\+t}]{hidden\+Size,  }\item[{const size\+\_\+t}]{batch\+Size = {\ttfamily 1},  }\item[{const size\+\_\+t}]{num\+Steps = {\ttfamily 1},  }\item[{const size\+\_\+t}]{neg\+Steps = {\ttfamily 1},  }\item[{const size\+\_\+t}]{pool\+Size = {\ttfamily 2},  }\item[{const \textbf{ Elem\+Type}}]{slab\+Penalty = {\ttfamily 8},  }\item[{const \textbf{ Elem\+Type}}]{radius = {\ttfamily 1},  }\item[{const bool}]{persistence = {\ttfamily false} }\end{DoxyParamCaption})}



Initialize all the parameters of the network using initialize\+Rule. 


\begin{DoxyParams}{Parameters}
{\em predictors} & Training data to be used. \\
\hline
{\em initialize\+Rule} & Initialization\+Rule object for initializing the network parameter. \\
\hline
{\em visible\+Size} & Number of visible neurons. \\
\hline
{\em hidden\+Size} & Number of hidden neurons. \\
\hline
{\em batch\+Size} & Batch size to be used for training. \\
\hline
{\em num\+Steps} & Number of Gibbs Sampling steps. \\
\hline
{\em neg\+Steps} & Number of negative samples to average negative gradient. \\
\hline
{\em pool\+Size} & Number of hidden neurons to pool together. \\
\hline
{\em slab\+Penalty} & Regulariser of slab variables. \\
\hline
{\em radius} & Feasible regions for visible layer samples. \\
\hline
{\em persistence} & Indicates whether to use Persistent CD or not. \\
\hline
\end{DoxyParams}


\subsection{Member Function Documentation}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a0bcf0739f699f1147677f759067127eb}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Evaluate@{Evaluate}}
\index{Evaluate@{Evaluate}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Evaluate()}
{\footnotesize\ttfamily double Evaluate (\begin{DoxyParamCaption}\item[{const arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&}]{parameters,  }\item[{const size\+\_\+t}]{i,  }\item[{const size\+\_\+t}]{batch\+Size }\end{DoxyParamCaption})}



Evaluate the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network with the given parameters. 

The function is needed for monitoring the progress of the network.


\begin{DoxyParams}{Parameters}
{\em parameters} & Matrix model parameters. \\
\hline
{\em i} & Index of the data point. \\
\hline
{\em batch\+Size} & Variable to store the present number of inputs. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_abd4e0dacb23a82b1d45d4ce084a9e292}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Free\+Energy@{Free\+Energy}}
\index{Free\+Energy@{Free\+Energy}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Free\+Energy()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Binary\+R\+BM}$>$\+::value, double$>$\+::type Free\+Energy (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{input }\end{DoxyParamCaption})}



This function calculates the free energy of the \doxyref{Binary\+R\+BM}{p.}{classmlpack_1_1ann_1_1BinaryRBM}. 

The free energy is given by\+: \$-\/b$^\wedge$\+Tv -\/ \{i=1\}$^\wedge$M log(1 + e$^\wedge$\{c\+\_\+j+v$^\wedge$\+T\+W\+\_\+j\})\$.


\begin{DoxyParams}{Parameters}
{\em input} & The visible neurons. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_af773b0134f02d77dedd2c0dbd77aa95a}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Free\+Energy@{Free\+Energy}}
\index{Free\+Energy@{Free\+Energy}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Free\+Energy()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, double$>$\+::type Free\+Energy (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{input }\end{DoxyParamCaption})}



This function calculates the free energy of the \doxyref{Spike\+Slab\+R\+BM}{p.}{classmlpack_1_1ann_1_1SpikeSlabRBM}. 

The free energy is given by\+: \$v$^\wedge$t\$\$\$v -\/ \$\{i=1\}$^\wedge$N\$ \$\{ \{\{(-\/2)$^\wedge$K\}\{\{m=1\}$^\wedge$\{K\}()\+\_\+m\}\}\}\$ -\/ \$\{i=1\}$^\wedge$N (1+( b\+\_\+i + \{m=1\}$^\wedge$k \{(v(w\+\_\+i)\+\_\+m$^\wedge$t)$^\wedge$2\}\{2()\+\_\+m\})\$


\begin{DoxyParams}{Parameters}
{\em input} & The visible layer neurons. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a060615966a70efe4e84ed52bb9a91c6b}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Gibbs@{Gibbs}}
\index{Gibbs@{Gibbs}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Gibbs()}
{\footnotesize\ttfamily void Gibbs (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{input,  }\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{output,  }\item[{const size\+\_\+t}]{steps = {\ttfamily SIZE\+\_\+MAX} }\end{DoxyParamCaption})}



This function does the k-\/step Gibbs Sampling. 


\begin{DoxyParams}{Parameters}
{\em input} & Input to the Gibbs function. \\
\hline
{\em output} & Used for storing the negative sample. \\
\hline
{\em steps} & Number of Gibbs Sampling steps taken. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_aab9fd0b4b53e35da7c2e5c5e1ade7bc2}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Gradient@{Gradient}}
\index{Gradient@{Gradient}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Gradient()}
{\footnotesize\ttfamily void Gradient (\begin{DoxyParamCaption}\item[{const arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&}]{parameters,  }\item[{const size\+\_\+t}]{i,  }\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&}]{gradient,  }\item[{const size\+\_\+t}]{batch\+Size }\end{DoxyParamCaption})}



Calculates the gradients for the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network. 


\begin{DoxyParams}{Parameters}
{\em parameters} & The current parameters of the network. \\
\hline
{\em i} & Index of the data point. \\
\hline
{\em gradient} & Variable to store the present gradient. \\
\hline
{\em batch\+Size} & Variable to store the present number of inputs. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a7834c6befe9e3b5bd950ed64bfc24225}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Hidden\+Bias@{Hidden\+Bias}}
\index{Hidden\+Bias@{Hidden\+Bias}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Hidden\+Bias()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily Data\+Type const\& Hidden\+Bias (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Return the hidden bias of the network. 



Definition at line 354 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a95ee1068552bc1e1f05438ed13786ab3}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Hidden\+Bias@{Hidden\+Bias}}
\index{Hidden\+Bias@{Hidden\+Bias}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Hidden\+Bias()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily Data\+Type\& Hidden\+Bias (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})\hspace{0.3cm}{\ttfamily [inline]}}



Modify the hidden bias of the network. 



Definition at line 356 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a3935695747f2deabc61047eeb3044233}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Hidden\+Mean@{Hidden\+Mean}}
\index{Hidden\+Mean@{Hidden\+Mean}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Hidden\+Mean()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Binary\+R\+BM}$>$\+::value, void$>$\+::type Hidden\+Mean (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{input,  }\item[{Data\+Type \&\&}]{output }\end{DoxyParamCaption})}



The function calculates the mean for the hidden layer. 


\begin{DoxyParams}{Parameters}
{\em input} & Visible neurons. \\
\hline
{\em output} & Hidden neuron activations. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a1726522e53ca7d6951615834a9431fb1}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Hidden\+Mean@{Hidden\+Mean}}
\index{Hidden\+Mean@{Hidden\+Mean}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Hidden\+Mean()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Hidden\+Mean (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{input,  }\item[{Data\+Type \&\&}]{output }\end{DoxyParamCaption})}



The function calculates the mean of the Normal distribution of P(s$\vert$v, h). 

The mean is given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$ The variance is given by\+: \$$^\wedge$\{-\/1\}\$


\begin{DoxyParams}{Parameters}
{\em input} & Visible layer neurons. \\
\hline
{\em output} & Consists of both the spike samples and slab samples. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a2b00f48e3f5906e07a24615a572bf440}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Hidden\+Size@{Hidden\+Size}}
\index{Hidden\+Size@{Hidden\+Size}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Hidden\+Size()}
{\footnotesize\ttfamily size\+\_\+t const\& Hidden\+Size (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Get the hidden size. 



Definition at line 374 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a1fa76af34a6e3ea927b307f0c318ee4b}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Num\+Functions@{Num\+Functions}}
\index{Num\+Functions@{Num\+Functions}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Num\+Functions()}
{\footnotesize\ttfamily size\+\_\+t Num\+Functions (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Return the number of separable functions (the number of predictor points). 



Definition at line 333 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_adbc0b25e9e3d013f7886ea72998f9d8d}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Num\+Steps@{Num\+Steps}}
\index{Num\+Steps@{Num\+Steps}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Num\+Steps()}
{\footnotesize\ttfamily size\+\_\+t Num\+Steps (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Return the number of steps of Gibbs Sampling. 



Definition at line 336 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_ab1b2789fef2a390f863d39250c209a43}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Parameters@{Parameters}}
\index{Parameters@{Parameters}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Parameters()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily const arma\+::\+Mat$<$\textbf{ Elem\+Type}$>$\& Parameters (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Return the parameters of the network. 



Definition at line 339 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_ab2414685b4049c8d634fe67103f283a6}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Parameters@{Parameters}}
\index{Parameters@{Parameters}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Parameters()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily arma\+::\+Mat$<$\textbf{ Elem\+Type}$>$\& Parameters (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})\hspace{0.3cm}{\ttfamily [inline]}}



Modify the parameters of the network. 



Definition at line 341 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a8d090c67d59d42e465f3ce72d8bb64a4}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Phase@{Phase}}
\index{Phase@{Phase}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Phase()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Binary\+R\+BM}$>$\+::value, void$>$\+::type Phase (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{input,  }\item[{Data\+Type \&\&}]{gradient }\end{DoxyParamCaption})}



Calculates the gradient of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network on the provided input. 


\begin{DoxyParams}{Parameters}
{\em input} & The provided input data. \\
\hline
{\em gradient} & Stores the gradient of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_ac0910cb50a773551f9083f458af9a61e}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Phase@{Phase}}
\index{Phase@{Phase}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Phase()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Phase (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{input,  }\item[{Data\+Type \&\&}]{gradient }\end{DoxyParamCaption})}



Calculates the gradient of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network on the provided input. 


\begin{DoxyParams}{Parameters}
{\em input} & The provided input data. \\
\hline
{\em gradient} & Stores the gradient of the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} network. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_ac94d648f4d497a1197361acf2ab0a590}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Pool\+Size@{Pool\+Size}}
\index{Pool\+Size@{Pool\+Size}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Pool\+Size()}
{\footnotesize\ttfamily size\+\_\+t const\& Pool\+Size (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Get the pool size. 



Definition at line 376 of file rbm.\+hpp.



References R\+B\+M$<$ Initialization\+Rule\+Type, Data\+Type, Policy\+Type $>$\+::serialize().

\mbox{\label{classmlpack_1_1ann_1_1RBM_aa3987a643d0318b9968c089b840901ab}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Reset@{Reset}}
\index{Reset@{Reset}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Reset()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Binary\+R\+BM}$>$\+::value, void$>$\+::type Reset (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})}

\mbox{\label{classmlpack_1_1ann_1_1RBM_a912cb0ab790986b9592200db039278af}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Reset@{Reset}}
\index{Reset@{Reset}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Reset()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Reset (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})}

\mbox{\label{classmlpack_1_1ann_1_1RBM_afc473d3b41fa20b2d49c0b80eb332605}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Sample\+Hidden@{Sample\+Hidden}}
\index{Sample\+Hidden@{Sample\+Hidden}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Sample\+Hidden()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Binary\+R\+BM}$>$\+::value, void$>$\+::type Sample\+Hidden (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{input,  }\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{output }\end{DoxyParamCaption})}



This function samples the hidden layer given the visible layer using Bernoulli function. 


\begin{DoxyParams}{Parameters}
{\em input} & Visible layer input. \\
\hline
{\em output} & The sampled hidden layer. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a3eb267351ec137747ec87dfcf511db66}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Sample\+Hidden@{Sample\+Hidden}}
\index{Sample\+Hidden@{Sample\+Hidden}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Sample\+Hidden()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Sample\+Hidden (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{input,  }\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{output }\end{DoxyParamCaption})}



This function samples the slab outputs from the Normal distribution with mean given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$ and variance\+: \$\&\{-\/1\}\$. 


\begin{DoxyParams}{Parameters}
{\em input} & Consists of both visible and spike variables. \\
\hline
{\em output} & Sampled slab neurons. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_af68a72e3b0bd35bed53d308d44f36454}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Sample\+Slab@{Sample\+Slab}}
\index{Sample\+Slab@{Sample\+Slab}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Sample\+Slab()}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Sample\+Slab (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{slab\+Mean,  }\item[{Data\+Type \&\&}]{slab }\end{DoxyParamCaption})}



The function samples from the Normal distribution of P(s$\vert$v, h), where the mean is given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$ and variance is given by\+: \$$^\wedge$\{-\/1\}\$. 


\begin{DoxyParams}{Parameters}
{\em slab\+Mean} & Mean of the Normal distribution of the slab neurons. \\
\hline
{\em slab} & Sampled slab variable from the Normal distribution. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_abd8a463b61aa518f3372a6d01fcecc77}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Sample\+Spike@{Sample\+Spike}}
\index{Sample\+Spike@{Sample\+Spike}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Sample\+Spike()}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Sample\+Spike (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{spike\+Mean,  }\item[{Data\+Type \&\&}]{spike }\end{DoxyParamCaption})}



The function samples the spike function using Bernoulli distribution. 


\begin{DoxyParams}{Parameters}
{\em spike\+Mean} & Indicates P(h$\vert$v). \\
\hline
{\em spike} & Sampled binary spike variables. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a8b6ec478f0aca78edb58951bc1b61a8c}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Sample\+Visible@{Sample\+Visible}}
\index{Sample\+Visible@{Sample\+Visible}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Sample\+Visible()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Binary\+R\+BM}$>$\+::value, void$>$\+::type Sample\+Visible (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{input,  }\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{output }\end{DoxyParamCaption})}



This function samples the visible layer given the hidden layer using Bernoulli function. 


\begin{DoxyParams}{Parameters}
{\em input} & Hidden layer of the network. \\
\hline
{\em output} & The sampled visible layer. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_af50b0495aa0c5822a1e2bc5ef7672cd0}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Sample\+Visible@{Sample\+Visible}}
\index{Sample\+Visible@{Sample\+Visible}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Sample\+Visible()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Sample\+Visible (\begin{DoxyParamCaption}\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{input,  }\item[{arma\+::\+Mat$<$ \textbf{ Elem\+Type} $>$ \&\&}]{output }\end{DoxyParamCaption})}



Sample Hidden function samples the slab outputs from the Normal distribution with mean given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$ and variance\+: \$\&\{-\/1\}\$. 


\begin{DoxyParams}{Parameters}
{\em input} & Hidden layer of the network. \\
\hline
{\em output} & The sampled visible layer. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_af0dd9205158ccf7bcfcd8ff81f79c927}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!serialize@{serialize}}
\index{serialize@{serialize}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{serialize()}
{\footnotesize\ttfamily void serialize (\begin{DoxyParamCaption}\item[{Archive \&}]{ar,  }\item[{const unsigned}]{int }\end{DoxyParamCaption})}



Serialize the model. 



Referenced by R\+B\+M$<$ Initialization\+Rule\+Type, Data\+Type, Policy\+Type $>$\+::\+Pool\+Size().

\mbox{\label{classmlpack_1_1ann_1_1RBM_a2697cc8b37d7bca7c055228382a9b208}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Shuffle@{Shuffle}}
\index{Shuffle@{Shuffle}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Shuffle()}
{\footnotesize\ttfamily void Shuffle (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})}



Shuffle the order of function visitation. 

This may be called by the optimizer. \mbox{\label{classmlpack_1_1ann_1_1RBM_ad407e98fa94eba58fda9c8fbc4b50c17}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Slab\+Mean@{Slab\+Mean}}
\index{Slab\+Mean@{Slab\+Mean}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Slab\+Mean()}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Slab\+Mean (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{visible,  }\item[{Data\+Type \&\&}]{spike,  }\item[{Data\+Type \&\&}]{slab\+Mean }\end{DoxyParamCaption})}



The function calculates the mean of Normal distribution of P(s$\vert$v, h), where the mean is given by\+: \$h\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v\$. 


\begin{DoxyParams}{Parameters}
{\em visible} & The visible layer neurons. \\
\hline
{\em spike} & The spike variables from hidden layer. \\
\hline
{\em slab\+Mean} & The mean of the Normal distribution of slab neurons. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a6f1cf88e0f2a58c078ea435c94d1b055}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Slab\+Penalty@{Slab\+Penalty}}
\index{Slab\+Penalty@{Slab\+Penalty}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Slab\+Penalty()}
{\footnotesize\ttfamily \textbf{ Elem\+Type} const\& Slab\+Penalty (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Get the regularizer associated with slab variables. 



Definition at line 364 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a6e0e561b035221128bcf78d1a7e84f88}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Spike\+Bias@{Spike\+Bias}}
\index{Spike\+Bias@{Spike\+Bias}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Spike\+Bias()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily Data\+Type const\& Spike\+Bias (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Get the regularizer associated with spike variables. 



Definition at line 359 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_ac3151aa09f96f582e7ebc29cbf235e47}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Spike\+Bias@{Spike\+Bias}}
\index{Spike\+Bias@{Spike\+Bias}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Spike\+Bias()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily Data\+Type\& Spike\+Bias (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})\hspace{0.3cm}{\ttfamily [inline]}}



Modify the regularizer associated with spike variables. 



Definition at line 361 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_aad5e509d60acb6326415863101f26a0b}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Spike\+Mean@{Spike\+Mean}}
\index{Spike\+Mean@{Spike\+Mean}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Spike\+Mean()}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Spike\+Mean (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{visible,  }\item[{Data\+Type \&\&}]{spike\+Mean }\end{DoxyParamCaption})}



The function calculates the mean of the distribution P(h$\vert$v), where mean is given by\+: \$sigm(v$^\wedge$\+T$\ast$\+W\+\_\+i$\ast$$^\wedge$\{-\/1\}$\ast$\+W\+\_\+i$^\wedge$\+T$\ast$v + b\+\_\+i)\$. 


\begin{DoxyParams}{Parameters}
{\em visible} & The visible layer neurons. \\
\hline
{\em spike\+Mean} & Indicates P(h$\vert$v). \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_a179f2b52999c7885ec3799c32ef0c35d}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Train@{Train}}
\index{Train@{Train}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Train()}
{\footnotesize\ttfamily double Train (\begin{DoxyParamCaption}\item[{Optimizer\+Type \&}]{optimizer,  }\item[{Callback\+Type \&\&...}]{callbacks }\end{DoxyParamCaption})}



Train the \doxyref{R\+BM}{p.}{classmlpack_1_1ann_1_1RBM} on the given input data. 

This will use the existing model parameters as a starting point for the optimization. If this is not what you want, then you should access the parameters vector directly with \doxyref{Parameters()}{p.}{classmlpack_1_1ann_1_1RBM_ab2414685b4049c8d634fe67103f283a6} and modify it as desired.


\begin{DoxyTemplParams}{Template Parameters}
{\em Optimizer\+Type} & Type of optimizer to use to train the model. \\
\hline
{\em Callback\+Types} & Types of Callback functions. \\
\hline
\end{DoxyTemplParams}

\begin{DoxyParams}{Parameters}
{\em optimizer} & Optimizer type. \\
\hline
{\em callbacks} & Callback Functions for ensmallen optimizer {\ttfamily Optimizer\+Type}. See {\tt https\+://www.\+ensmallen.\+org/docs.\+html\#callback-\/documentation}. \\
\hline
\end{DoxyParams}
\begin{DoxyReturn}{Returns}
The final objective of the trained model (NaN or Inf on error). 
\end{DoxyReturn}
\mbox{\label{classmlpack_1_1ann_1_1RBM_ac59ec2d09be158222c9d8b066bba3863}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Visible\+Bias@{Visible\+Bias}}
\index{Visible\+Bias@{Visible\+Bias}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Visible\+Bias()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily Data\+Type const\& Visible\+Bias (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Return the visible bias of the network. 



Definition at line 349 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_aba4315371cc80605143f0e33bfb87b49}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Visible\+Bias@{Visible\+Bias}}
\index{Visible\+Bias@{Visible\+Bias}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Visible\+Bias()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily Data\+Type\& Visible\+Bias (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})\hspace{0.3cm}{\ttfamily [inline]}}



Modify the visible bias of the network. 



Definition at line 351 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a2bf8ae6e34c033e8e4b18db363de93e1}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Visible\+Mean@{Visible\+Mean}}
\index{Visible\+Mean@{Visible\+Mean}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Visible\+Mean()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Binary\+R\+BM}$>$\+::value, void$>$\+::type Visible\+Mean (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{input,  }\item[{Data\+Type \&\&}]{output }\end{DoxyParamCaption})}



The function calculates the mean for the visible layer. 


\begin{DoxyParams}{Parameters}
{\em input} & Hidden neurons from the hidden layer of the network. \\
\hline
{\em output} & Visible neuron activations. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_afbe466daf48798f260522204b1542349}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Visible\+Mean@{Visible\+Mean}}
\index{Visible\+Mean@{Visible\+Mean}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Visible\+Mean()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily std\+::enable\+\_\+if$<$std\+::is\+\_\+same$<$Policy, \textbf{ Spike\+Slab\+R\+BM}$>$\+::value, void$>$\+::type Visible\+Mean (\begin{DoxyParamCaption}\item[{Data\+Type \&\&}]{input,  }\item[{Data\+Type \&\&}]{output }\end{DoxyParamCaption})}



The function calculates the mean of the Normal distribution of P(v$\vert$s, h). 

The mean is given by\+: \$$^\wedge$\{-\/1\} \{i=1\}$^\wedge$N W\+\_\+i $\ast$ s\+\_\+i $\ast$ h\+\_\+i\$


\begin{DoxyParams}{Parameters}
{\em input} & Consists of both the spike and slab variables. \\
\hline
{\em output} & Mean of the of the Normal distribution. \\
\hline
\end{DoxyParams}
\mbox{\label{classmlpack_1_1ann_1_1RBM_ad6b6a25a3d7f8ece8bb5946a2572f008}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Visible\+Penalty@{Visible\+Penalty}}
\index{Visible\+Penalty@{Visible\+Penalty}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Visible\+Penalty()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily Data\+Type const\& Visible\+Penalty (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Get the regularizer associated with visible variables. 



Definition at line 367 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a0511c3420378ba5d12459aa782541f0f}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Visible\+Penalty@{Visible\+Penalty}}
\index{Visible\+Penalty@{Visible\+Penalty}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Visible\+Penalty()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily Data\+Type\& Visible\+Penalty (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})\hspace{0.3cm}{\ttfamily [inline]}}



Modify the regularizer associated with visible variables. 



Definition at line 369 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a993e0e378ebed9812bfe629c11d16638}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Visible\+Size@{Visible\+Size}}
\index{Visible\+Size@{Visible\+Size}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Visible\+Size()}
{\footnotesize\ttfamily size\+\_\+t const\& Visible\+Size (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Get the visible size. 



Definition at line 372 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a3f58eb63c1d08b5066089ec90c3a48e7}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Weight@{Weight}}
\index{Weight@{Weight}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Weight()\hspace{0.1cm}{\footnotesize\ttfamily [1/2]}}
{\footnotesize\ttfamily arma\+::\+Cube$<$\textbf{ Elem\+Type}$>$ const\& Weight (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption}) const\hspace{0.3cm}{\ttfamily [inline]}}



Get the weights of the network. 



Definition at line 344 of file rbm.\+hpp.

\mbox{\label{classmlpack_1_1ann_1_1RBM_a8307fcd9b76dafd62b60094a1a5f585d}} 
\index{mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}!Weight@{Weight}}
\index{Weight@{Weight}!mlpack\+::ann\+::\+R\+BM@{mlpack\+::ann\+::\+R\+BM}}
\subsubsection{Weight()\hspace{0.1cm}{\footnotesize\ttfamily [2/2]}}
{\footnotesize\ttfamily arma\+::\+Cube$<$\textbf{ Elem\+Type}$>$\& Weight (\begin{DoxyParamCaption}{ }\end{DoxyParamCaption})\hspace{0.3cm}{\ttfamily [inline]}}



Modify the weights of the network. 



Definition at line 346 of file rbm.\+hpp.



The documentation for this class was generated from the following file\+:\begin{DoxyCompactItemize}
\item 
/var/www/mlpack.\+ratml.\+org/mlpack.\+org/\+\_\+src/mlpack-\/3.\+3.\+0/src/mlpack/methods/ann/rbm/\textbf{ rbm.\+hpp}\end{DoxyCompactItemize}
