transposed_convolution.hpp
Go to the documentation of this file.
1 
13 #ifndef MLPACK_METHODS_ANN_LAYER_TRANSPOSED_CONVOLUTION_HPP
14 #define MLPACK_METHODS_ANN_LAYER_TRANSPOSED_CONVOLUTION_HPP
15 
16 #include <mlpack/prereqs.hpp>
17 
23 
24 #include "layer_types.hpp"
25 #include "padding.hpp"
26 
27 namespace mlpack {
28 namespace ann {
29 
42 template <
43  typename ForwardConvolutionRule = NaiveConvolution<ValidConvolution>,
44  typename BackwardConvolutionRule = NaiveConvolution<ValidConvolution>,
45  typename GradientConvolutionRule = NaiveConvolution<ValidConvolution>,
46  typename InputDataType = arma::mat,
47  typename OutputDataType = arma::mat
48 >
49 class TransposedConvolution
50 {
51  public:
54 
79  TransposedConvolution(const size_t inSize,
80  const size_t outSize,
81  const size_t kernelWidth,
82  const size_t kernelHeight,
83  const size_t strideWidth = 1,
84  const size_t strideHeight = 1,
85  const size_t padW = 0,
86  const size_t padH = 0,
87  const size_t inputWidth = 0,
88  const size_t inputHeight = 0,
89  const size_t outputWidth = 0,
90  const size_t outputHeight = 0,
91  const std::string& paddingType = "None");
92 
121  TransposedConvolution(const size_t inSize,
122  const size_t outSize,
123  const size_t kernelWidth,
124  const size_t kernelHeight,
125  const size_t strideWidth,
126  const size_t strideHeight,
127  const std::tuple<size_t, size_t>& padW,
128  const std::tuple<size_t, size_t>& padH,
129  const size_t inputWidth = 0,
130  const size_t inputHeight = 0,
131  const size_t outputWidth = 0,
132  const size_t outputHeight = 0,
133  const std::string& paddingType = "None");
134 
135  /*
136  * Set the weight and bias term.
137  */
138  void Reset();
139 
147  template<typename eT>
148  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output);
149 
159  template<typename eT>
160  void Backward(const arma::Mat<eT>& /* input */,
161  const arma::Mat<eT>& gy,
162  arma::Mat<eT>& g);
163 
164  /*
165  * Calculate the gradient using the output delta and the input activation.
166  *
167  * @param * (input) The input parameter used for calculating the gradient.
168  * @param error The calculated error.
169  * @param gradient The calculated gradient.
170  */
171  template<typename eT>
172  void Gradient(const arma::Mat<eT>& /* input */,
173  const arma::Mat<eT>& error,
174  arma::Mat<eT>& gradient);
175 
177  OutputDataType const& Parameters() const { return weights; }
179  OutputDataType& Parameters() { return weights; }
180 
182  InputDataType const& InputParameter() const { return inputParameter; }
184  InputDataType& InputParameter() { return inputParameter; }
185 
187  OutputDataType const& OutputParameter() const { return outputParameter; }
189  OutputDataType& OutputParameter() { return outputParameter; }
190 
192  OutputDataType const& Delta() const { return delta; }
194  OutputDataType& Delta() { return delta; }
195 
197  OutputDataType const& Gradient() const { return gradient; }
199  OutputDataType& Gradient() { return gradient; }
200 
202  size_t const& InputWidth() const { return inputWidth; }
204  size_t& InputWidth() { return inputWidth; }
205 
207  size_t const& InputHeight() const { return inputHeight; }
209  size_t& InputHeight() { return inputHeight; }
210 
212  size_t const& OutputWidth() const { return outputWidth; }
214  size_t& OutputWidth() { return outputWidth; }
215 
217  size_t const& OutputHeight() const { return outputHeight; }
219  size_t& OutputHeight() { return outputHeight; }
220 
222  size_t InputSize() const { return inSize; }
223 
225  size_t OutputSize() const { return outSize; }
226 
228  size_t KernelWidth() const { return kernelWidth; }
230  size_t& KernelWidth() { return kernelWidth; }
231 
233  size_t KernelHeight() const { return kernelHeight; }
235  size_t& KernelHeight() { return kernelHeight; }
236 
238  size_t StrideWidth() const { return strideWidth; }
240  size_t& StrideWidth() { return strideWidth; }
241 
243  size_t StrideHeight() const { return strideHeight; }
245  size_t& StrideHeight() { return strideHeight; }
246 
248  size_t PadHTop() const { return padHTop; }
250  size_t& PadHTop() { return padHTop; }
251 
253  size_t PadHBottom() const { return padHBottom; }
255  size_t& PadHBottom() { return padHBottom; }
256 
258  size_t PadWLeft() const { return padWLeft; }
260  size_t& PadWLeft() { return padWLeft; }
261 
263  size_t PadWRight() const { return padWRight; }
265  size_t& PadWRight() { return padWRight; }
266 
268  arma::mat& Bias() { return bias; }
269 
273  template<typename Archive>
274  void serialize(Archive& ar, const unsigned int /* version */);
275 
276  private:
277  /*
278  * Rotates a 3rd-order tensor counterclockwise by 180 degrees.
279  *
280  * @param input The input data to be rotated.
281  * @param output The rotated output.
282  */
283  template<typename eT>
284  void Rotate180(const arma::Cube<eT>& input, arma::Cube<eT>& output)
285  {
286  output = arma::Cube<eT>(input.n_rows, input.n_cols, input.n_slices);
287 
288  // * left-right flip, up-down flip */
289  for (size_t s = 0; s < output.n_slices; s++)
290  output.slice(s) = arma::fliplr(arma::flipud(input.slice(s)));
291  }
292 
293  /*
294  * Function to assign padding such that output size is same as input size.
295  */
296  void InitializeSamePadding();
297 
298  /*
299  * Rotates a dense matrix counterclockwise by 180 degrees.
300  *
301  * @param input The input data to be rotated.
302  * @param output The rotated output.
303  */
304  template<typename eT>
305  void Rotate180(const arma::Mat<eT>& input, arma::Mat<eT>& output)
306  {
307  // * left-right flip, up-down flip */
308  output = arma::fliplr(arma::flipud(input));
309  }
310 
311 
312  /*
313  * Insert zeros between the units of the given input data.
314  * Note: This function should be used before using padding layer.
315  *
316  * @param input The input to be padded.
317  * @param strideWidth Stride of filter application in the x direction.
318  * @param strideHeight Stride of filter application in the y direction.
319  * @param output The padded output data.
320  */
321  template<typename eT>
322  void InsertZeros(const arma::Mat<eT>& input,
323  const size_t strideWidth,
324  const size_t strideHeight,
325  arma::Mat<eT>& output)
326  {
327  if (output.n_rows != input.n_rows * strideWidth - strideWidth + 1 ||
328  output.n_cols != input.n_cols * strideHeight - strideHeight + 1)
329  {
330  output = arma::zeros(input.n_rows * strideWidth - strideWidth + 1,
331  input.n_cols * strideHeight - strideHeight + 1);
332  }
333 
334  for (size_t i = 0; i < output.n_rows; i += strideHeight)
335  {
336  for (size_t j = 0; j < output.n_cols; j += strideWidth)
337  {
338  // TODO: Use [] instead of () for speedup after this is completely
339  // debugged and approved.
340  output(i, j) = input(i / strideHeight, j / strideWidth);
341  }
342  }
343  }
344 
345  /*
346  * Insert zeros between the units of the given input data.
347  * Note: This function should be used before using padding layer.
348  *
349  * @param input The input to be padded.
350  * @param strideWidth Stride of filter application in the x direction.
351  * @param strideHeight Stride of filter application in the y direction.
352  * @param output The padded output data.
353  */
354  template<typename eT>
355  void InsertZeros(const arma::Cube<eT>& input,
356  const size_t strideWidth,
357  const size_t strideHeight,
358  arma::Cube<eT>& output)
359  {
360  output = arma::zeros(input.n_rows * strideWidth - strideWidth + 1,
361  input.n_cols * strideHeight - strideHeight + 1, input.n_slices);
362 
363  for (size_t i = 0; i < input.n_slices; ++i)
364  {
365  InsertZeros<eT>(input.slice(i), strideWidth, strideHeight,
366  output.slice(i));
367  }
368  }
369 
371  size_t inSize;
372 
374  size_t outSize;
375 
377  size_t batchSize;
378 
380  size_t kernelWidth;
381 
383  size_t kernelHeight;
384 
386  size_t strideWidth;
387 
389  size_t strideHeight;
390 
392  size_t padWLeft;
393 
395  size_t padWRight;
396 
398  size_t padHBottom;
399 
401  size_t padHTop;
402 
404  size_t aW;
405 
407  size_t aH;
408 
410  OutputDataType weights;
411 
413  arma::cube weight;
414 
416  arma::mat bias;
417 
419  size_t inputWidth;
420 
422  size_t inputHeight;
423 
425  size_t outputWidth;
426 
428  size_t outputHeight;
429 
431  arma::cube outputTemp;
432 
434  arma::cube inputPaddedTemp;
435 
437  arma::cube inputExpandedTemp;
438 
440  arma::cube gTemp;
441 
443  arma::cube gradientTemp;
444 
446  ann::Padding<> paddingForward;
447 
449  ann::Padding<> paddingBackward;
450 
452  OutputDataType delta;
453 
455  OutputDataType gradient;
456 
458  InputDataType inputParameter;
459 
461  OutputDataType outputParameter;
462 }; // class TransposedConvolution
463 
464 } // namespace ann
465 } // namespace mlpack
466 
468 namespace boost {
469 namespace serialization {
470 
471 template<
472  typename ForwardConvolutionRule,
473  typename BackwardConvolutionRule,
474  typename GradientConvolutionRule,
475  typename InputDataType,
476  typename OutputDataType
477 >
478 struct version<
479  mlpack::ann::TransposedConvolution<ForwardConvolutionRule,
480  BackwardConvolutionRule, GradientConvolutionRule, InputDataType,
481  OutputDataType> >
482 {
483  BOOST_STATIC_CONSTANT(int, value = 1);
484 };
485 
486 } // namespace serialization
487 } // namespace boost
488 
489 // Include implementation.
490 #include "transposed_convolution_impl.hpp"
491 
492 #endif
arma::mat & Bias()
Modify the bias weights of the layer.
size_t & PadWLeft()
Modify the left padding width.
OutputDataType const & Gradient() const
Get the gradient.
OutputDataType & Parameters()
Modify the parameters.
Set the serialization version of the adaboost class.
Definition: adaboost.hpp:198
size_t const & OutputHeight() const
Get the output height.
size_t const & InputHeight() const
Get the input height.
Linear algebra utility functions, generally performed on matrices or vectors.
Definition: add_to_po.hpp:21
Implementation of the Padding module class.
Definition: layer_types.hpp:77
size_t & StrideHeight()
Modify the stride height.
OutputDataType & Delta()
Modify the delta.
The core includes that mlpack expects; standard C++ includes and Armadillo.
size_t & InputHeight()
Modify the input height.
size_t & KernelWidth()
Modify the kernel width.
size_t & PadHTop()
Modify the top padding height.
size_t & PadWRight()
Modify the right padding width.
size_t PadWLeft() const
Get the left padding width.
OutputDataType const & OutputParameter() const
Get the output parameter.
size_t PadHTop() const
Get the top padding height.
OutputDataType const & Parameters() const
Get the parameters.
size_t const & InputWidth() const
Get the input width.
size_t const & OutputWidth() const
Get the output width.
size_t & StrideWidth()
Modify the stride width.
TransposedConvolution()
Create the Transposed Convolution object.
size_t KernelHeight() const
Get the kernel height.
void serialize(Archive &ar, const unsigned int)
Serialize the layer.
OutputDataType & Gradient()
Modify the gradient.
size_t & KernelHeight()
Modify the kernel height.
InputDataType const & InputParameter() const
Get the input parameter.
OutputDataType const & Delta() const
Get the delta.
size_t & PadHBottom()
Modify the bottom padding height.
size_t & OutputWidth()
Modify the output width.
size_t OutputSize() const
Get the output size.
size_t & InputWidth()
Modify input the width.
size_t KernelWidth() const
Get the kernel width.
size_t InputSize() const
Get the input size.
size_t PadHBottom() const
Get the bottom padding height.
size_t StrideWidth() const
Get the stride width.
OutputDataType & OutputParameter()
Modify the output parameter.
InputDataType & InputParameter()
Modify the input parameter.
void Forward(const arma::Mat< eT > &input, arma::Mat< eT > &output)
Ordinary feed forward pass of a neural network, evaluating the function f(x) by propagating the activ...
size_t PadWRight() const
Get the right padding width.
size_t StrideHeight() const
Get the stride height.
size_t & OutputHeight()
Modify the output height.
void Backward(const arma::Mat< eT > &, const arma::Mat< eT > &gy, arma::Mat< eT > &g)
Ordinary feed backward pass of a neural network, calculating the function f(x) by propagating x backw...