Browse Source

Math/Algorithms: update algorithm docs and make code snippets compiled.

And add a bunch more math equations. Of course there was deprecated
functionality being used and some stuff not even compiling.
pull/268/head
Vladimír Vondruš 8 years ago
parent
commit
f9328e3f64
  1. 3
      doc/snippets/CMakeLists.txt
  2. 85
      doc/snippets/MagnumMathAlgorithms.cpp
  3. 13
      src/Magnum/Math/Algorithms/GaussJordan.h
  4. 38
      src/Magnum/Math/Algorithms/GramSchmidt.h
  5. 29
      src/Magnum/Math/Algorithms/KahanSum.h
  6. 2
      src/Magnum/Math/Algorithms/Qr.h
  7. 26
      src/Magnum/Math/Algorithms/Svd.h

3
doc/snippets/CMakeLists.txt

@ -42,7 +42,8 @@ add_library(snippets-Magnum STATIC
Magnum.cpp
MagnumAnimation.cpp
MagnumAnimation-custom.cpp
MagnumMath.cpp)
MagnumMath.cpp
MagnumMathAlgorithms.cpp)
target_link_libraries(snippets-Magnum PRIVATE Magnum)
set_target_properties(snippets-Magnum PROPERTIES FOLDER "Magnum/doc/snippets")

85
doc/snippets/MagnumMathAlgorithms.cpp

@ -0,0 +1,85 @@
/*
This file is part of Magnum.
Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018
Vladimír Vondruš <mosra@centrum.cz>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
#include <numeric>
#include <vector>
#include <Corrade/Containers/ArrayView.h>
#include "Magnum/Magnum.h"
#include "Magnum/Math/Algorithms/KahanSum.h"
#include "Magnum/Math/Algorithms/Svd.h"
#include "Magnum/Math/Packing.h"
using namespace Magnum;
using namespace Magnum::Math::Literals;
int main() {
{
/* [kahanSum] */
std::vector<Float> data(100000000, 1.0f);
Float a = std::accumulate(data.begin(), data.end(), 0.0f); // 1.667e7f
Float b = Math::Algorithms::kahanSum(data.begin(), data.end()); // 1.000e8f
/* [kahanSum] */
static_cast<void>(a);
static_cast<void>(b);
}
{
/* [kahanSum-iterative] */
Containers::ArrayView<UnsignedByte> pixels;
Float sum = 0.0f, c = 0.0f;
for(UnsignedByte pixel: pixels) {
Float value = Math::unpack<Float>(pixel);
sum = Math::Algorithms::kahanSum(&value, &value + 1, sum, &c);
}
/* [kahanSum-iterative] */
}
{
enum: std::size_t { cols = 3, rows = 4 };
/* [svd] */
Math::RectangularMatrix<cols, rows, Double> m;
Math::RectangularMatrix<cols, rows, Double> uPart;
Math::Vector<cols, Double> wDiagonal;
Math::Matrix<cols, Double> v;
std::tie(uPart, wDiagonal, v) = Math::Algorithms::svd(m);
// Extend U
Math::Matrix<rows, Double> u{Math::ZeroInit};
for(std::size_t i = 0; i != rows; ++i)
u[i] = uPart[i];
// Diagonal W
Math::RectangularMatrix<cols, rows, Double> w =
Math::RectangularMatrix<cols, rows, Double>::fromDiagonal(wDiagonal);
// u*w*v.transposed() == m
/* [svd] */
static_cast<void>(w);
}
}

13
src/Magnum/Math/Algorithms/GaussJordan.h

@ -40,10 +40,10 @@ namespace Magnum { namespace Math { namespace Algorithms {
@return True if @p a is regular, false if @p a is singular (and thus the
system cannot be solved).
As Gauss-Jordan elimination works on rows and matrices in OpenGL are
column-major, it is more efficient to operate on transposed matrices and treat
columns as rows. See also @ref gaussJordanInPlace() which works with
non-transposed matrices.
As [Gauss-Jordan elimination](https://en.wikipedia.org/wiki/Gaussian_elimination)
works on rows and matrices in OpenGL are column-major, it is more efficient to
operate on transposed matrices and treat columns as rows. See also
@ref gaussJordanInPlace() which works with non-transposed matrices.
The function eliminates matrix @p a and solves @p t in place. For efficiency
reasons, only pure Gaussian elimination is done on @p a and the final
@ -114,8 +114,9 @@ template<std::size_t size, std::size_t cols, class T> bool gaussJordanInPlace(Re
/**
@brief Gauss-Jordan matrix inversion
Since @f$ (\boldsymbol{A}^{-1})^T = (\boldsymbol{A}^T)^{-1} @f$, passes
@p matrix and an identity matrix to @ref gaussJordanInPlaceTransposed();
Uses the [Gauss-Jordan elimination](https://en.wikipedia.org/wiki/Gaussian_elimination#Finding_the_inverse_of_a_matrix) to perform a matrix
inversion. Since @f$ (\boldsymbol{A}^{-1})^T = (\boldsymbol{A}^T)^{-1} @f$,
passes @p matrix and an identity matrix to @ref gaussJordanInPlaceTransposed();
returning the inverted matrix. Expects that the matrix is invertible.
@see @ref Matrix::inverted()
*/

38
src/Magnum/Math/Algorithms/GramSchmidt.h

@ -36,6 +36,25 @@ namespace Magnum { namespace Math { namespace Algorithms {
/**
@brief In-place Gram-Schmidt matrix orthogonalization
@param[in,out] matrix Matrix to perform orthogonalization on
Performs the [Gram-Schmidt process](https://en.wikipedia.org/wiki/Gram–Schmidt_process).
With a *projection operator* defined as
@f[
\operatorname{proj}_{\boldsymbol{u}}\,(\boldsymbol{v}) = \frac{\boldsymbol{u} \cdot \boldsymbol{v}}{\boldsymbol{u} \cdot \boldsymbol{u}} \boldsymbol{u}
@f]
@m_class{m-noindent}
the process works as follows, with @f$ \boldsymbol{v}_k @f$ being columns of
@p matrix and @f$ \boldsymbol{u}_k @f$ columns of the output: @f[
\boldsymbol{u}_k = \boldsymbol{v}_k - \sum_{j = 1}^{k - 1} \operatorname{proj}_{\boldsymbol{u}_j}\,(\boldsymbol{v}_k)
@f]
Note that the above is not performed directly due to numerical instability,
the stable [modified Gram-Schmidt](https://en.wikipedia.org/wiki/Gram–Schmidt_process#Numerical_stability)
algorithm is used instead.
@see @ref gramSchmidtOrthogonalize(), @ref gramSchmidtOrthonormalizeInPlace(),
@ref Vector::projected()
*/
template<std::size_t cols, std::size_t rows, class T> void gramSchmidtOrthogonalizeInPlace(RectangularMatrix<cols, rows, T>& matrix) {
static_assert(cols <= rows, "Unsupported matrix aspect ratio");
@ -59,6 +78,25 @@ template<std::size_t cols, std::size_t rows, class T> RectangularMatrix<cols, ro
/**
@brief In-place Gram-Schmidt matrix orthonormalization
@param[in,out] matrix Matrix to perform orthonormalization on
Performs the [Gram-Schmidt process](https://en.wikipedia.org/wiki/Gram–Schmidt_process).
With a *projection operator* defined as
@f[
\operatorname{proj}_{\boldsymbol{u}}\,(\boldsymbol{v}) = \frac{\boldsymbol{u} \cdot \boldsymbol{v}}{\boldsymbol{u} \cdot \boldsymbol{u}} \boldsymbol{u}
@f]
@m_class{m-noindent}
the Gram-Schmidt process works as follows, with @f$ \boldsymbol{v}_k @f$ being
columns of @p matrix and @f$ \boldsymbol{e}_k @f$ columns of the output: @f[
\boldsymbol{u}_k = \boldsymbol{v}_k - \sum_{j = 1}^{k - 1} \operatorname{proj}_{\boldsymbol{u}_j}\,(\boldsymbol{v}_k), ~~~~~~ \boldsymbol{e}_k = \frac{\boldsymbol{u}_k}{|\boldsymbol{u}_k|}
@f]
In particular, this adds the normalization step on top of
@ref gramSchmidtOrthogonalizeInPlace(). Note that the above is not performed
directly due to numerical instability, the stable [modified Gram-Schmidt](https://en.wikipedia.org/wiki/Gram–Schmidt_process#Numerical_stability)
algorithm is used instead.
@see @ref gramSchmidtOrthonormalize(), @ref Vector::projectedOntoNormalized()
*/
template<std::size_t cols, std::size_t rows, class T> void gramSchmidtOrthonormalizeInPlace(RectangularMatrix<cols, rows, T>& matrix) {
static_assert(cols <= rows, "Unsupported matrix aspect ratio");

29
src/Magnum/Math/Algorithms/KahanSum.h

@ -39,35 +39,24 @@ namespace Magnum { namespace Math { namespace Algorithms {
@param[in] end Range end
@param[in] sum Initial value for the sum
@param[in,out] compensation Floating-point roundoff error compensation value.
If non-`nullptr`, value behind the pointer is used as initial compensation
value and the resulting value is
If non-@cpp nullptr @ce, value behind the pointer is used as initial
compensation value and the resulting value is
Calculates a sum of a large range of floating-point numbers with roundoff error
compensation. Compared to for example `std::accumulate()` the algorithm
significantly reduces numerical error in the total. See Wikipedia for an
in-depth explanation: https://en.wikipedia.org/wiki/Kahan_summation_algorithm
compensation. Compared to for example @ref std::accumulate() the algorithm
significantly reduces numerical error in the total. See the
[Kahan summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm)
article on Wikipedia for an in-depth explanation. Example with summation of a
hundred million ones:
Example with summation of a hundred million ones:
@code{.cpp}
std::vector<Float> data(100000000, 1.0f);
Float a = std::accumulate(data.begin(), data.end()); // 1.667e7f
Float b = Math::Algorithms::kahanSum(data.begin(), data.end()); // 1.000e8f
@endcode
@snippet MagnumMathAlgorithms.cpp kahanSum
If required, it is also possible to use this algorithm on non-contiguous ranges
or single values (for example when calculating sum of pixel values in an image
with some row padding or when the inputs are generated / converted from other
values):
@code{.cpp}
Containers::ArrayView<UnsignedByte> pixels;
Float sum = 0.0f, c = 0.0f;
for(UnsignedByte pixel: pixels) {
Float value = Math::normalize<Float>(pixel);
sum = Math::Algorithms::kahanSum(&value, &value + 1, sum, &c);
}
@endcode
@snippet MagnumMathAlgorithms.cpp kahanSum-iterative
*/
template<class Iterator, class T = typename std::decay<decltype(*std::declval<Iterator>())>::type> T kahanSum(Iterator begin, Iterator end, T sum = T(0), T* compensation = nullptr) {
T c = compensation ? *compensation : T(0);

2
src/Magnum/Math/Algorithms/Qr.h

@ -36,7 +36,7 @@ namespace Magnum { namespace Math { namespace Algorithms {
/**
@brief QR decomposition
Calculated using classic Gram-Schmidt process.
Calculated using the classic [Gram-Schmidt process](https://en.wikipedia.org/wiki/QR_decomposition#Using_the_Gram–Schmidt_process).
*/
template<std::size_t size, class T> std::pair<Matrix<size, T>, Matrix<size, T>> qr(const Matrix<size, T>& matrix) {
Matrix<size, T> q, r;

26
src/Magnum/Math/Algorithms/Svd.h

@ -58,10 +58,11 @@ template<> constexpr Double smallestDelta<Double>() { return 1.0e-64; }
/**
@brief Singular Value Decomposition
Performs Thin SVD on given matrix where @p rows >= @p cols:
@f[
Performs [Thin SVD](https://en.wikipedia.org/wiki/Singular-value_decomposition#Thin_SVD)
on given matrix where @p rows >= `cols`: @f[
M = U \Sigma V^*
@f]
Returns first @p cols column vectors of @f$ U @f$, diagonal of @f$ \Sigma @f$
and non-transposed @f$ V @f$. If the solution doesn't converge, returns
zero matrices.
@ -69,26 +70,7 @@ zero matrices.
Full @f$ U @f$, @f$ \Sigma @f$ matrices and original @f$ M @f$ matrix can be
reconstructed from the values as following:
@code{.cpp}
Math::RectangularMatrix<cols, rows, Double> m;
Math::RectangularMatrix<cols, rows, Double> uPart;
Math::Vector<cols, Double> wDiagonal;
Math::Matrix<cols, Double> v;
std::tie(uPart, wDiagonal, v) = Math::Algorithms::svd(m);
// Extend U
Math::Matrix<rows, Double> u(Matrix<rows, Double>::Zero);
for(std::size_t i = 0; i != rows; ++i)
u[i] = uPart[i];
// Diagonal W
Math::RectangularMatrix<cols, rows, Double> w =
Math::RectangularMatrix<cols, rows, Double>::fromDiagonal(wDiagonal);
// u*w*v.transposed() == m
@endcode
@snippet MagnumMathAlgorithms.cpp svd
Implementation based on *Golub, G. H.; Reinsch, C. (1970). "Singular value
decomposition and least squares solutions"*.

Loading…
Cancel
Save