diff --git a/doc/snippets/CMakeLists.txt b/doc/snippets/CMakeLists.txt index 4f025a56a..7b5cfbeba 100644 --- a/doc/snippets/CMakeLists.txt +++ b/doc/snippets/CMakeLists.txt @@ -42,7 +42,8 @@ add_library(snippets-Magnum STATIC Magnum.cpp MagnumAnimation.cpp MagnumAnimation-custom.cpp - MagnumMath.cpp) + MagnumMath.cpp + MagnumMathAlgorithms.cpp) target_link_libraries(snippets-Magnum PRIVATE Magnum) set_target_properties(snippets-Magnum PROPERTIES FOLDER "Magnum/doc/snippets") diff --git a/doc/snippets/MagnumMathAlgorithms.cpp b/doc/snippets/MagnumMathAlgorithms.cpp new file mode 100644 index 000000000..eb4f27761 --- /dev/null +++ b/doc/snippets/MagnumMathAlgorithms.cpp @@ -0,0 +1,85 @@ +/* + This file is part of Magnum. + + Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 + Vladimír Vondruš + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*/ + +#include +#include +#include + +#include "Magnum/Magnum.h" +#include "Magnum/Math/Algorithms/KahanSum.h" +#include "Magnum/Math/Algorithms/Svd.h" +#include "Magnum/Math/Packing.h" + +using namespace Magnum; +using namespace Magnum::Math::Literals; + +int main() { +{ +/* [kahanSum] */ +std::vector data(100000000, 1.0f); +Float a = std::accumulate(data.begin(), data.end(), 0.0f); // 1.667e7f +Float b = Math::Algorithms::kahanSum(data.begin(), data.end()); // 1.000e8f +/* [kahanSum] */ +static_cast(a); +static_cast(b); +} + +{ +/* [kahanSum-iterative] */ +Containers::ArrayView pixels; +Float sum = 0.0f, c = 0.0f; +for(UnsignedByte pixel: pixels) { + Float value = Math::unpack(pixel); + sum = Math::Algorithms::kahanSum(&value, &value + 1, sum, &c); +} +/* [kahanSum-iterative] */ +} + +{ +enum: std::size_t { cols = 3, rows = 4 }; +/* [svd] */ +Math::RectangularMatrix m; + +Math::RectangularMatrix uPart; +Math::Vector wDiagonal; +Math::Matrix v; + +std::tie(uPart, wDiagonal, v) = Math::Algorithms::svd(m); + +// Extend U +Math::Matrix u{Math::ZeroInit}; +for(std::size_t i = 0; i != rows; ++i) + u[i] = uPart[i]; + +// Diagonal W +Math::RectangularMatrix w = + Math::RectangularMatrix::fromDiagonal(wDiagonal); + +// u*w*v.transposed() == m +/* [svd] */ +static_cast(w); +} + +} diff --git a/src/Magnum/Math/Algorithms/GaussJordan.h b/src/Magnum/Math/Algorithms/GaussJordan.h index d84a78e52..f89707c29 100644 --- a/src/Magnum/Math/Algorithms/GaussJordan.h +++ b/src/Magnum/Math/Algorithms/GaussJordan.h @@ -40,10 +40,10 @@ namespace Magnum { namespace Math { namespace Algorithms { @return True if @p a is regular, false if @p a is singular (and thus the system cannot be solved). -As Gauss-Jordan elimination works on rows and matrices in OpenGL are -column-major, it is more efficient to operate on transposed matrices and treat -columns as rows. See also @ref gaussJordanInPlace() which works with -non-transposed matrices. +As [Gauss-Jordan elimination](https://en.wikipedia.org/wiki/Gaussian_elimination) +works on rows and matrices in OpenGL are column-major, it is more efficient to +operate on transposed matrices and treat columns as rows. See also +@ref gaussJordanInPlace() which works with non-transposed matrices. The function eliminates matrix @p a and solves @p t in place. For efficiency reasons, only pure Gaussian elimination is done on @p a and the final @@ -114,8 +114,9 @@ template bool gaussJordanInPlace(Re /** @brief Gauss-Jordan matrix inversion -Since @f$ (\boldsymbol{A}^{-1})^T = (\boldsymbol{A}^T)^{-1} @f$, passes -@p matrix and an identity matrix to @ref gaussJordanInPlaceTransposed(); +Uses the [Gauss-Jordan elimination](https://en.wikipedia.org/wiki/Gaussian_elimination#Finding_the_inverse_of_a_matrix) to perform a matrix +inversion. Since @f$ (\boldsymbol{A}^{-1})^T = (\boldsymbol{A}^T)^{-1} @f$, +passes @p matrix and an identity matrix to @ref gaussJordanInPlaceTransposed(); returning the inverted matrix. Expects that the matrix is invertible. @see @ref Matrix::inverted() */ diff --git a/src/Magnum/Math/Algorithms/GramSchmidt.h b/src/Magnum/Math/Algorithms/GramSchmidt.h index ae6d8e439..d0b7133da 100644 --- a/src/Magnum/Math/Algorithms/GramSchmidt.h +++ b/src/Magnum/Math/Algorithms/GramSchmidt.h @@ -36,6 +36,25 @@ namespace Magnum { namespace Math { namespace Algorithms { /** @brief In-place Gram-Schmidt matrix orthogonalization @param[in,out] matrix Matrix to perform orthogonalization on + +Performs the [Gram-Schmidt process](https://en.wikipedia.org/wiki/Gram–Schmidt_process). +With a *projection operator* defined as +@f[ + \operatorname{proj}_{\boldsymbol{u}}\,(\boldsymbol{v}) = \frac{\boldsymbol{u} \cdot \boldsymbol{v}}{\boldsymbol{u} \cdot \boldsymbol{u}} \boldsymbol{u} +@f] + +@m_class{m-noindent} + +the process works as follows, with @f$ \boldsymbol{v}_k @f$ being columns of +@p matrix and @f$ \boldsymbol{u}_k @f$ columns of the output: @f[ + \boldsymbol{u}_k = \boldsymbol{v}_k - \sum_{j = 1}^{k - 1} \operatorname{proj}_{\boldsymbol{u}_j}\,(\boldsymbol{v}_k) +@f] + +Note that the above is not performed directly due to numerical instability, +the stable [modified Gram-Schmidt](https://en.wikipedia.org/wiki/Gram–Schmidt_process#Numerical_stability) +algorithm is used instead. +@see @ref gramSchmidtOrthogonalize(), @ref gramSchmidtOrthonormalizeInPlace(), + @ref Vector::projected() */ template void gramSchmidtOrthogonalizeInPlace(RectangularMatrix& matrix) { static_assert(cols <= rows, "Unsupported matrix aspect ratio"); @@ -59,6 +78,25 @@ template RectangularMatrix void gramSchmidtOrthonormalizeInPlace(RectangularMatrix& matrix) { static_assert(cols <= rows, "Unsupported matrix aspect ratio"); diff --git a/src/Magnum/Math/Algorithms/KahanSum.h b/src/Magnum/Math/Algorithms/KahanSum.h index 7c2179fa2..7f7994a12 100644 --- a/src/Magnum/Math/Algorithms/KahanSum.h +++ b/src/Magnum/Math/Algorithms/KahanSum.h @@ -39,35 +39,24 @@ namespace Magnum { namespace Math { namespace Algorithms { @param[in] end Range end @param[in] sum Initial value for the sum @param[in,out] compensation Floating-point roundoff error compensation value. - If non-`nullptr`, value behind the pointer is used as initial compensation - value and the resulting value is + If non-@cpp nullptr @ce, value behind the pointer is used as initial + compensation value and the resulting value is Calculates a sum of a large range of floating-point numbers with roundoff error -compensation. Compared to for example `std::accumulate()` the algorithm -significantly reduces numerical error in the total. See Wikipedia for an -in-depth explanation: https://en.wikipedia.org/wiki/Kahan_summation_algorithm +compensation. Compared to for example @ref std::accumulate() the algorithm +significantly reduces numerical error in the total. See the +[Kahan summation algorithm](https://en.wikipedia.org/wiki/Kahan_summation_algorithm) +article on Wikipedia for an in-depth explanation. Example with summation of a +hundred million ones: -Example with summation of a hundred million ones: - -@code{.cpp} -std::vector data(100000000, 1.0f); -Float a = std::accumulate(data.begin(), data.end()); // 1.667e7f -Float b = Math::Algorithms::kahanSum(data.begin(), data.end()); // 1.000e8f -@endcode +@snippet MagnumMathAlgorithms.cpp kahanSum If required, it is also possible to use this algorithm on non-contiguous ranges or single values (for example when calculating sum of pixel values in an image with some row padding or when the inputs are generated / converted from other values): -@code{.cpp} -Containers::ArrayView pixels; -Float sum = 0.0f, c = 0.0f; -for(UnsignedByte pixel: pixels) { - Float value = Math::normalize(pixel); - sum = Math::Algorithms::kahanSum(&value, &value + 1, sum, &c); -} -@endcode +@snippet MagnumMathAlgorithms.cpp kahanSum-iterative */ template())>::type> T kahanSum(Iterator begin, Iterator end, T sum = T(0), T* compensation = nullptr) { T c = compensation ? *compensation : T(0); diff --git a/src/Magnum/Math/Algorithms/Qr.h b/src/Magnum/Math/Algorithms/Qr.h index 8ee8ef13d..f6c68f268 100644 --- a/src/Magnum/Math/Algorithms/Qr.h +++ b/src/Magnum/Math/Algorithms/Qr.h @@ -36,7 +36,7 @@ namespace Magnum { namespace Math { namespace Algorithms { /** @brief QR decomposition -Calculated using classic Gram-Schmidt process. +Calculated using the classic [Gram-Schmidt process](https://en.wikipedia.org/wiki/QR_decomposition#Using_the_Gram–Schmidt_process). */ template std::pair, Matrix> qr(const Matrix& matrix) { Matrix q, r; diff --git a/src/Magnum/Math/Algorithms/Svd.h b/src/Magnum/Math/Algorithms/Svd.h index 1115ccbf4..0654b464e 100644 --- a/src/Magnum/Math/Algorithms/Svd.h +++ b/src/Magnum/Math/Algorithms/Svd.h @@ -58,10 +58,11 @@ template<> constexpr Double smallestDelta() { return 1.0e-64; } /** @brief Singular Value Decomposition -Performs Thin SVD on given matrix where @p rows >= @p cols: -@f[ +Performs [Thin SVD](https://en.wikipedia.org/wiki/Singular-value_decomposition#Thin_SVD) +on given matrix where @p rows >= `cols`: @f[ M = U \Sigma V^* @f] + Returns first @p cols column vectors of @f$ U @f$, diagonal of @f$ \Sigma @f$ and non-transposed @f$ V @f$. If the solution doesn't converge, returns zero matrices. @@ -69,26 +70,7 @@ zero matrices. Full @f$ U @f$, @f$ \Sigma @f$ matrices and original @f$ M @f$ matrix can be reconstructed from the values as following: -@code{.cpp} -Math::RectangularMatrix m; - -Math::RectangularMatrix uPart; -Math::Vector wDiagonal; -Math::Matrix v; - -std::tie(uPart, wDiagonal, v) = Math::Algorithms::svd(m); - -// Extend U -Math::Matrix u(Matrix::Zero); -for(std::size_t i = 0; i != rows; ++i) - u[i] = uPart[i]; - -// Diagonal W -Math::RectangularMatrix w = - Math::RectangularMatrix::fromDiagonal(wDiagonal); - -// u*w*v.transposed() == m -@endcode +@snippet MagnumMathAlgorithms.cpp svd Implementation based on *Golub, G. H.; Reinsch, C. (1970). "Singular value decomposition and least squares solutions"*.