Browse Source

Text: new Renderer class.

Builds upon RendererCore and generates index and vertex data from the
glyph positions and IDs. Documentation again coming later once
everything is in, next is a RendererGL which populates a GL::Mesh with
these.
pull/674/head
Vladimír Vondruš 1 year ago
parent
commit
3c626b00f4
  1. 47
      src/Magnum/Text/Implementation/rendererState.h
  2. 592
      src/Magnum/Text/Renderer.cpp
  3. 470
      src/Magnum/Text/Renderer.h
  4. 4340
      src/Magnum/Text/Test/RendererTest.cpp
  5. 1
      src/Magnum/Text/Text.h

47
src/Magnum/Text/Implementation/rendererState.h

@ -32,13 +32,15 @@
#include <Corrade/Containers/Optional.h> #include <Corrade/Containers/Optional.h>
#include <Corrade/Containers/StridedArrayView.h> #include <Corrade/Containers/StridedArrayView.h>
#include "Magnum/Mesh.h"
#include "Magnum/Math/Range.h" #include "Magnum/Math/Range.h"
#include "Magnum/Text/Alignment.h" #include "Magnum/Text/Alignment.h"
#include "Magnum/Text/Direction.h" #include "Magnum/Text/Direction.h"
namespace Magnum { namespace Text { namespace Magnum { namespace Text {
/* Is inherited by RendererCore::AllocatorState to avoid extra allocations */ /* Is inherited by RendererCore::AllocatorState, Renderer::State and then
RendererGL::State to avoid extra allocations for each class' state */
struct RendererCore::State { struct RendererCore::State {
/* Gets called by RendererCore only if both allocators are specified by the /* Gets called by RendererCore only if both allocators are specified by the
user. If not, AllocatorState is constructed instead. */ user. If not, AllocatorState is constructed instead. */
@ -118,6 +120,49 @@ struct RendererCore::AllocatorState: RendererCore::State {
Containers::Array<char> runData; Containers::Array<char> runData;
}; };
/** @todo this includes the glyphData + runData (+ indexData, vertexData)
members even when they're unused because custom allocators are used, have
some templated RendererCore::AllocatorState that inherits either the
RendererCore::State or Renderer::State and adds one or more of those based
on what all builtin allocators are used? or am I overdoing it for measly 96
byte savings? */
struct Renderer::State: RendererCore::AllocatorState {
/* Defined in Renderer.cpp because it needs access to default allocator
implementations */
explicit State(const AbstractGlyphCache& glyphCache, void(*glyphAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<UnsignedInt>&, Containers::StridedArrayView1D<UnsignedInt>*, Containers::StridedArrayView1D<Vector2>&), void* glyphAllocatorState, void(*const runAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Float>&, Containers::StridedArrayView1D<UnsignedInt>&), void* runAllocatorState, void(*indexAllocator)(void*, UnsignedInt, Containers::ArrayView<char>&), void* indexAllocatorState, void(*vertexAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<Vector2>&), void* vertexAllocatorState, RendererFlags flags);
void(*const indexAllocator)(void*, UnsignedInt, Containers::ArrayView<char>&);
void* const indexAllocatorState;
void(*const vertexAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<Vector2>&);
void* const vertexAllocatorState;
MeshIndexType minIndexType = MeshIndexType::UnsignedByte;
MeshIndexType indexType = MeshIndexType::UnsignedByte;
Containers::ArrayView<char> indices;
Containers::StridedArrayView1D<Vector2> vertexPositions;
/* If using an array glyph cache, it can be cast to Vector3 */
Containers::StridedArrayView1D<Vector2> vertexTextureCoordinates;
/* Used only if the builtin vertex allocator is used */
Containers::Array<char> indexData;
Containers::Array<char> vertexData;
};
namespace Implementation {
/* Not used in the state structs above but needed by Renderer */
struct Vertex {
Vector2 position;
Vector2 textureCoordinates;
};
struct VertexArray {
Vector2 position;
Vector3 textureCoordinates;
};
}
}} }}
#endif #endif

592
src/Magnum/Text/Renderer.cpp

@ -62,7 +62,6 @@
#include <Corrade/Containers/ArrayViewStl.h> /** @todo remove once Renderer is STL-free */ #include <Corrade/Containers/ArrayViewStl.h> /** @todo remove once Renderer is STL-free */
#include <Corrade/Containers/StringStl.h> /** @todo remove once Renderer is STL-free */ #include <Corrade/Containers/StringStl.h> /** @todo remove once Renderer is STL-free */
#include "Magnum/Mesh.h"
#include "Magnum/GL/Context.h" #include "Magnum/GL/Context.h"
#include "Magnum/GL/Extensions.h" #include "Magnum/GL/Extensions.h"
#include "Magnum/GL/Mesh.h" #include "Magnum/GL/Mesh.h"
@ -201,6 +200,8 @@ RendererCore::RendererCore(const AbstractGlyphCache& glyphCache, void(*glyphAllo
Containers::pointer<State>(glyphCache, glyphAllocator, glyphAllocatorState, runAllocator, runAllocatorState, flags) : Containers::pointer<State>(glyphCache, glyphAllocator, glyphAllocatorState, runAllocator, runAllocatorState, flags) :
Containers::pointer<AllocatorState>(glyphCache, glyphAllocator, glyphAllocatorState, runAllocator, runAllocatorState, flags)} {} Containers::pointer<AllocatorState>(glyphCache, glyphAllocator, glyphAllocatorState, runAllocator, runAllocatorState, flags)} {}
RendererCore::RendererCore(Containers::Pointer<State>&& state): _state{Utility::move(state)} {}
RendererCore::RendererCore(NoCreateT) noexcept {} RendererCore::RendererCore(NoCreateT) noexcept {}
RendererCore::RendererCore(RendererCore&&) noexcept = default; RendererCore::RendererCore(RendererCore&&) noexcept = default;
@ -214,7 +215,8 @@ const AbstractGlyphCache& RendererCore::glyphCache() const {
} }
RendererCoreFlags RendererCore::flags() const { RendererCoreFlags RendererCore::flags() const {
return _state->flags; /* Subclasses inherit and add their own flags, mask them away */
return _state->flags & RendererCoreFlags{0x1};
} }
UnsignedInt RendererCore::glyphCount() const { UnsignedInt RendererCore::glyphCount() const {
@ -504,7 +506,8 @@ void RendererCore::resetInternal() {
RendererCore& RendererCore::reset() { RendererCore& RendererCore::reset() {
clear(); clear();
/* Reset also all other settable state to defaults */ /* Reset also all other settable state to defaults. Is in a separate helper
because it gets called from Renderer::reset() as well. */
resetInternal(); resetInternal();
return *this; return *this;
@ -772,6 +775,589 @@ Containers::Pair<Range2D, Range1Dui> RendererCore::render(AbstractShaper& shaper
return render(shaper, size, text, Containers::arrayView(features)); return render(shaper, size, text, Containers::arrayView(features));
} }
Debug& operator<<(Debug& debug, const RendererFlag value) {
debug << "Text::RendererFlag" << Debug::nospace;
switch(value) {
/* LCOV_EXCL_START */
#define _c(v) case RendererFlag::v: return debug << "::" #v;
_c(GlyphPositionsClusters)
#undef _c
/* LCOV_EXCL_STOP */
}
return debug << "(" << Debug::nospace << Debug::hex << UnsignedByte(value) << Debug::nospace << ")";
}
Debug& operator<<(Debug& debug, const RendererFlags value) {
return Containers::enumSetDebugOutput(debug, value, "Text::RendererFlags{}", {
RendererFlag::GlyphPositionsClusters
});
}
namespace {
template<class Vertex> auto defaultGlyphAllocatorFor(const RendererFlags flags, const bool hasCustomVertexAllocator) -> void(*)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<UnsignedInt>&, Containers::StridedArrayView1D<UnsignedInt>*, Containers::StridedArrayView1D<Vector2>&) {
/* If glyph positions and clusters are meant to be preserved, or if a
custom vertex allocator is used and thus shouldn't allocate the whole
vertex data again just to store glyph data inside, use the default
RendererCore allocator */
/** @todo it will still result in IDs being allocated and then never used
after, provide a custom allocator for this case as well */
if(flags >= RendererFlag::GlyphPositionsClusters || hasCustomVertexAllocator)
return nullptr;
return [](void* const state, const UnsignedInt glyphCount, Containers::StridedArrayView1D<Vector2>& glyphPositions, Containers::StridedArrayView1D<UnsignedInt>& glyphIds, Containers::StridedArrayView1D<UnsignedInt>*, Containers::StridedArrayView1D<Vector2>& glyphAdvances) {
Containers::Array<char>& vertexData = *static_cast<Containers::Array<char>*>(state);
const std::size_t existingSize = glyphPositions.size();
const std::size_t desiredByteSize = 4*(existingSize + glyphCount)*sizeof(Vertex);
if(desiredByteSize > vertexData.size()) {
/* Using arrayAppend() as it reallocates with a growth strategy,
arrayResize() would take the size literally */
arrayAppend(vertexData, NoInit, desiredByteSize - vertexData.size());
}
const Containers::StridedArrayView1D<Vertex> vertices = Containers::arrayCast<Vertex>(vertexData);
/* As each glyph turns into four vertices, we have plenty of space to
store everything. Glyph positions occupy the position of each first
vertex, */
glyphPositions = vertices.slice(&Vertex::position).every(4);
/* glyph IDs the first four bytes of the texture coordinates of each
first vertex, */
glyphIds = Containers::arrayCast<UnsignedInt>(vertices.slice(&Vertex::textureCoordinates)).every(4);
/* and advances the position of each *second* vertex from the
yet-unused suffix. If we have no vertex data at all however, which
can happen when calling clear() right after construction, don't
slice away any prefix to avoid OOB access. */
glyphAdvances = vertices.slice(&Vertex::position).exceptPrefix(
vertexData.size() ? existingSize*4 + 1 : 0
).every(4);
};
}
void defaultIndexAllocator(void* state, UnsignedInt size, Containers::ArrayView<char>& indices) {
Containers::Array<char>& indexData = *static_cast<Containers::Array<char>*>(state);
const std::size_t desiredByteSize = indices.size() + size;
if(desiredByteSize > indexData.size()) {
/* Using arrayAppend() as it reallocates with a growth strategy,
arrayResize() would take the size literally */
arrayAppend(indexData, NoInit, desiredByteSize - indexData.size());
}
indices = indexData;
}
template<class Vertex> auto defaultVertexAllocatorFor(const RendererFlags flags, const bool hasCustomGlyphAllocator) -> void(*)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<Vector2>&) {
/* If glyph positions and clusters are meant to be preserved, or if a
custom glyph allocator is used so there's no data sharing between the
two, vertices are in a separate allocation. The second branch part
is explicitly verified in the indicesVertices(custom glyph allocator)
test. */
if(flags >= RendererFlag::GlyphPositionsClusters || hasCustomGlyphAllocator)
return [](void* const state, const UnsignedInt vertexCount, Containers::StridedArrayView1D<Vector2>& vertexPositions, Containers::StridedArrayView1D<Vector2>& vertexTextureCoordinates) {
Containers::Array<char>& vertexData = *static_cast<Containers::Array<char>*>(state);
const std::size_t desiredByteSize = (vertexPositions.size() + vertexCount)*sizeof(Vertex);
if(desiredByteSize > vertexData.size()) {
/* Using arrayAppend() as it reallocates with a growth
strategy, arrayResize() would take the size literally */
arrayAppend(vertexData, NoInit, desiredByteSize - vertexData.size());
}
const Containers::StridedArrayView1D<Vertex> vertices = Containers::arrayCast<Vertex>(vertexData);
vertexPositions = vertices.slice(&Vertex::position);
/* The texture coordinates are Vector3 for array glyph caches, the
allocator wants just a two-component prefix with an assumption
that the third component is there too. Can't use
.slice(&Vector3::xy) because the type may be Vector2. */
vertexTextureCoordinates = Containers::arrayCast<Vector2>(vertices.slice(&Vertex::textureCoordinates));
};
/* If not, vertices share the allocation with glyph properties, and since
they're always allocated after, the size should be sufficient and it's
just about redirecting the views to new memory */
else
return [](void* const state, const UnsignedInt
#ifndef CORRADE_NO_ASSERT
vertexCount
#endif
, Containers::StridedArrayView1D<Vector2>& vertexPositions, Containers::StridedArrayView1D<Vector2>& vertexTextureCoordinates)
{
Containers::Array<char>& vertexData = *static_cast<Containers::Array<char>*>(state);
/* As both the glyph allocator and vertex allocator share the same
array, the assumption is that the glyph allocator already
enlarged the array for all needed glyphs. Or this allocator is
called from clear() with zero vertex count, in which case the
array size can be whatever. */
CORRADE_INTERNAL_ASSERT((vertexPositions.size() + vertexCount)*sizeof(Vertex) == vertexData.size() || vertexCount == 0);
const Containers::StridedArrayView1D<Vertex> vertices = Containers::arrayCast<Vertex>(vertexData);
vertexPositions = vertices.slice(&Vertex::position);
/* The texture coordinates are Vector3 for array glyph caches, the
allocator wants just a two-component prefix with an assumption
that the third component is there too. Can't use
.slice(&Vector3::xy) because the type may be Vector2. */
vertexTextureCoordinates = Containers::arrayCast<Vector2>(vertices.slice(&Vertex::textureCoordinates));
};
}
}
Renderer::State::State(const AbstractGlyphCache& glyphCache, void(*glyphAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<UnsignedInt>&, Containers::StridedArrayView1D<UnsignedInt>*, Containers::StridedArrayView1D<Vector2>&), void* glyphAllocatorState, void(*const runAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Float>&, Containers::StridedArrayView1D<UnsignedInt>&), void* runAllocatorState, void(*indexAllocator)(void*, UnsignedInt, Containers::ArrayView<char>&), void* indexAllocatorState, void(*vertexAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<Vector2>&), void* vertexAllocatorState, RendererFlags flags):
RendererCore::AllocatorState{glyphCache,
glyphAllocator ? glyphAllocator :
glyphCache.size().z() == 1 ?
defaultGlyphAllocatorFor<Implementation::Vertex>(flags, !!vertexAllocator) :
defaultGlyphAllocatorFor<Implementation::VertexArray>(flags, !!vertexAllocator),
/* The defaultGlyphAllocatorFor() puts glyph data into the same
allocation as vertex data so it's `&vertexData`, not `&glyphData`
here. If such sharing isn't desired because the glyph data need to
be accessible etc., defaultGlyphAllocatorFor() returns nullptr,
which then causes `&vertexData` to be ignored and RendererCore then
picks its own default allocator and `&glyphData`. */
glyphAllocator ? glyphAllocatorState : &vertexData,
runAllocator, runAllocatorState,
RendererCoreFlags{UnsignedByte(flags)}},
indexAllocator{indexAllocator ? indexAllocator : defaultIndexAllocator},
indexAllocatorState{indexAllocator ? indexAllocatorState : &indexData},
vertexAllocator{vertexAllocator ? vertexAllocator :
glyphCache.size().z() == 1 ?
defaultVertexAllocatorFor<Implementation::Vertex>(flags, !!glyphAllocator) :
defaultVertexAllocatorFor<Implementation::VertexArray>(flags, !!glyphAllocator)},
vertexAllocatorState{vertexAllocator ? vertexAllocatorState : &vertexData} {}
Renderer::Renderer(const AbstractGlyphCache& glyphCache, void(*glyphAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<UnsignedInt>&, Containers::StridedArrayView1D<UnsignedInt>*, Containers::StridedArrayView1D<Vector2>&), void* glyphAllocatorState, void(*runAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Float>&, Containers::StridedArrayView1D<UnsignedInt>&), void* runAllocatorState, void(*indexAllocator)(void*, UnsignedInt, Containers::ArrayView<char>&), void* indexAllocatorState, void(*vertexAllocator)(void*, UnsignedInt, Containers::StridedArrayView1D<Vector2>&, Containers::StridedArrayView1D<Vector2>&), void* vertexAllocatorState, RendererFlags flags): RendererCore{Containers::pointer<State>(glyphCache, glyphAllocator, glyphAllocatorState, runAllocator, runAllocatorState, indexAllocator, indexAllocatorState, vertexAllocator, vertexAllocatorState, flags)} {}
Renderer::Renderer(Renderer&&) noexcept = default;
Renderer::~Renderer() = default;
Renderer& Renderer::operator=(Renderer&&) noexcept = default;
RendererFlags Renderer::flags() const {
return RendererFlags{UnsignedByte(_state->flags)};
}
namespace {
/* Like meshIndexTypeSize() but inline, constexpr, branchless and without
assertions */
constexpr UnsignedInt indexTypeSize(MeshIndexType type) {
return 1 << (int(type) - 1);
}
static_assert(
indexTypeSize(MeshIndexType::UnsignedByte) == sizeof(UnsignedByte) &&
indexTypeSize(MeshIndexType::UnsignedShort) == sizeof(UnsignedShort) &&
indexTypeSize(MeshIndexType::UnsignedInt) == sizeof(UnsignedInt),
"broken assumptions about MeshIndexType values matching type sizes");
}
UnsignedInt Renderer::glyphIndexCapacity() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_INTERNAL_DEBUG_ASSERT(state.indices.size() % 6 == 0);
return state.indices.size()/(6*indexTypeSize(state.indexType));
}
UnsignedInt Renderer::glyphVertexCapacity() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_INTERNAL_DEBUG_ASSERT(state.vertexPositions.size() % 4 == 0);
return state.vertexPositions.size()/4;
}
MeshIndexType Renderer::indexType() const {
return static_cast<const State&>(*_state).indexType;
}
namespace {
/* used by setIndexType() and allocateIndices() */
MeshIndexType indexTypeFor(MeshIndexType minType, UnsignedInt glyphCount) {
MeshIndexType minTypeForGlyphCount;
if(glyphCount > 16384)
minTypeForGlyphCount = MeshIndexType::UnsignedInt;
else if(glyphCount > 64)
minTypeForGlyphCount = MeshIndexType::UnsignedShort;
else
minTypeForGlyphCount = MeshIndexType::UnsignedByte;
return Utility::max(minType, minTypeForGlyphCount);
}
}
Renderer& Renderer::setIndexType(const MeshIndexType type) {
State& state = static_cast<State&>(*_state);
CORRADE_ASSERT(!state.rendering,
"Text::Renderer::setIndexType(): rendering in progress", *this);
/* Remember the type as the smallest index type we can use going forward */
state.minIndexType = type;
/* If the capacity is zero, just update the currently used index type
without calling an allocator */
if(state.glyphPositions.isEmpty()) {
state.indexType = type;
/* Otherwise, if the index type for current capacity is now different from
what's currently used, reallocate the indices fully */
} else if(indexTypeFor(type, state.glyphPositions.size()) != state.indexType) {
/* In particular, the allocator gets a zero-sized prefix of the view
it returned last time (*not* just nullptr), to hint that it can
reallocate without preserving any contents at all */
state.indices = state.indices.prefix(0);
allocateIndices(
#ifndef CORRADE_NO_ASSERT
"Text::Renderer::setIndexType():",
#endif
state.glyphPositions.size()
);
}
return *this;
}
Containers::StridedArrayView1D<const Vector2> Renderer::glyphPositions() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_ASSERT(RendererFlags(UnsignedByte(state.flags)) >= RendererFlag::GlyphPositionsClusters,
"Text::Renderer::glyphPositions(): glyph positions and clusters not enabled", {});
return state.glyphPositions.prefix(state.glyphCount);
}
Containers::StridedArrayView1D<const UnsignedInt> Renderer::glyphClusters() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_ASSERT(RendererFlags(UnsignedByte(state.flags)) >= RendererFlag::GlyphPositionsClusters,
"Text::Renderer::glyphClusters(): glyph positions and clusters not enabled", {});
return state.glyphClusters.prefix(state.glyphCount);
}
Containers::StridedArrayView2D<const char> Renderer::indices() const {
const State& state = static_cast<const State&>(*_state);
const UnsignedInt typeSize = indexTypeSize(state.indexType);
return stridedArrayView(state.indices.prefix(state.glyphCount*6*typeSize)).expanded<0, 2>({state.glyphCount*6, typeSize});
}
/* On Windows (MSVC, clang-cl and MinGw) these need an explicit export
otherwise the specializations don't get exported */
template<> MAGNUM_TEXT_EXPORT Containers::ArrayView<const UnsignedByte> Renderer::indices<UnsignedByte>() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_ASSERT(state.indexType == MeshIndexType::UnsignedByte,
"Text::Renderer::indices(): cannot retrieve" << state.indexType << "as an UnsignedByte", {});
return Containers::arrayCast<UnsignedByte>(state.indices).prefix(state.glyphCount*6);
}
template<> MAGNUM_TEXT_EXPORT Containers::ArrayView<const UnsignedShort> Renderer::indices<UnsignedShort>() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_ASSERT(state.indexType == MeshIndexType::UnsignedShort,
"Text::Renderer::indices(): cannot retrieve" << state.indexType << "as an UnsignedShort", {});
return Containers::arrayCast<UnsignedShort>(state.indices).prefix(state.glyphCount*6);
}
template<> MAGNUM_TEXT_EXPORT Containers::ArrayView<const UnsignedInt> Renderer::indices<UnsignedInt>() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_ASSERT(state.indexType == MeshIndexType::UnsignedInt,
"Text::Renderer::indices(): cannot retrieve" << state.indexType << "as an UnsignedInt", {});
return Containers::arrayCast<UnsignedInt>(state.indices).prefix(state.glyphCount*6);
}
Containers::StridedArrayView1D<const Vector2> Renderer::vertexPositions() const {
const State& state = static_cast<const State&>(*_state);
return state.vertexPositions.prefix(state.glyphCount*4);
}
Containers::StridedArrayView1D<const Vector2> Renderer::vertexTextureCoordinates() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_ASSERT(state.glyphCache.size().z() == 1,
"Text::Renderer::vertexTextureCoordinates(): cannot retrieve two-dimensional coordinates with an array glyph cache", {});
return state.vertexTextureCoordinates.prefix(state.glyphCount*4);
}
Containers::StridedArrayView1D<const Vector3> Renderer::vertexTextureArrayCoordinates() const {
const State& state = static_cast<const State&>(*_state);
CORRADE_ASSERT(state.glyphCache.size().z() != 1,
"Text::Renderer::vertexTextureArrayCoordinates(): cannot retrieve three-dimensional coordinates with a non-array glyph cache", {});
return Containers::arrayCast<Vector3>(state.vertexTextureCoordinates.prefix(state.glyphCount*4));
}
void Renderer::allocateIndices(
#ifndef CORRADE_NO_ASSERT
const char* const messagePrefix,
#endif
const UnsignedInt totalGlyphCount)
{
State& state = static_cast<State&>(*_state);
/* The data allocated by RendererCore should already be at this size or
more, since allocateGlyphs() is always called before this function. */
CORRADE_INTERNAL_ASSERT(state.glyphPositions.size() >= totalGlyphCount);
/* This function should only be called if we need more memory, from clear()
with everything empty or from setIndexType() if the type changes (where
it sets `state.indices` to an empty prefix).
The expectation is that `state.indices` is only as large as makes sense
for given `state.indexType`, as is done below. */
CORRADE_INTERNAL_DEBUG_ASSERT(6*totalGlyphCount*indexTypeSize(state.indexType) > state.indices.size() || (state.glyphCount == 0 && state.renderingGlyphCount == 0 && totalGlyphCount == 0));
/* Figure out index type needed for this glyph count. If it's different or
we're called from clear() with totalGlyphCount being 0, we're replacing
the whole index array. If it's not, we're generating just the extra
indices. */
const MeshIndexType indexType = indexTypeFor(state.minIndexType, totalGlyphCount);
const UnsignedInt typeSize = indexTypeSize(indexType);
UnsignedInt previousFilledSize;
if(indexType != state.indexType || totalGlyphCount == 0) {
previousFilledSize = 0;
state.indexType = indexType;
} else {
previousFilledSize = state.indices.size();
}
/* Sliced copy of the view for the allocator to update */
Containers::ArrayView<char> indices = state.indices.prefix(previousFilledSize);
/* While this function gets total glyph count, the allocator gets byte
count to grow by */
state.indexAllocator(state.indexAllocatorState,
totalGlyphCount*6*typeSize - previousFilledSize,
indices);
/* Cap the returned capacity to just what's possible to represent with
given type size. E.g., for an 8-bit type it can represent indices only
for 256 vertices / 64 glyphs at most, which is 384 indices, thus is
never larger than 384 bytes. */
const UnsignedInt glyphCapacity = Math::min(
/* 64 for 1-byte indices, 16k for 2-byte, 1M for 4-byte */
1u << (8*typeSize - 2),
UnsignedInt(indices.size()/(6*typeSize)));
/* These assertions are present even for the builtin allocator but
shouldn't fire. If they do, the whole thing is broken, but it's better
to blow up with a nice message than with some strange OOB error later */
CORRADE_ASSERT(glyphCapacity >= totalGlyphCount,
messagePrefix << "expected allocated indices to have at least" << totalGlyphCount*6*typeSize << "bytes but got" << indices.size(), );
state.indices = indices.prefix(glyphCapacity*6*typeSize);
/* Fill the indices during allocation already as they're not dependent on
the contents in any way */
const UnsignedInt glyphOffset = previousFilledSize/(6*typeSize);
const Containers::ArrayView<char> indicesToFill = state.indices.exceptPrefix(previousFilledSize);
if(indexType == MeshIndexType::UnsignedByte)
renderGlyphQuadIndicesInto(glyphOffset, Containers::arrayCast<UnsignedByte>(indicesToFill));
else if(indexType == MeshIndexType::UnsignedShort)
renderGlyphQuadIndicesInto(glyphOffset, Containers::arrayCast<UnsignedShort>(indicesToFill));
else if(indexType == MeshIndexType::UnsignedInt)
renderGlyphQuadIndicesInto(glyphOffset, Containers::arrayCast<UnsignedInt>(indicesToFill));
else CORRADE_INTERNAL_ASSERT_UNREACHABLE(); /* LCOV_EXCL_LINE */
}
void Renderer::allocateVertices(
#ifndef CORRADE_NO_ASSERT
const char* const messagePrefix,
#endif
const UnsignedInt totalGlyphCount)
{
State& state = static_cast<State&>(*_state);
/* The data allocated by RendererCore should already be at this size or
more, since allocateGlyphs() is always called before this function */
CORRADE_INTERNAL_ASSERT(state.glyphPositions.size() >= totalGlyphCount);
/* This function should only be called if we need more memory or from
clear() with everything empty */
CORRADE_INTERNAL_DEBUG_ASSERT(4*totalGlyphCount > state.vertexPositions.size() || (state.glyphCount == 0 && totalGlyphCount == 0));
/* Sliced copies of the views for the allocator to update. Unlike with
allocateGlyphs(), where `state.renderingGlyphCount` is used because it
gets called from add(), this is called with `state.glyphCount` because
it's only called from render(), and so the vertex capacity may not yet
include space for the in-progress glyphs. */
Containers::StridedArrayView1D<Vector2> vertexPositions =
state.vertexPositions.prefix(state.glyphCount*4);
Containers::StridedArrayView1D<Vector2> vertexTextureCoordinates =
state.vertexTextureCoordinates.prefix(state.glyphCount*4);
/* While this function gets total glyph count, the allocator gets vertex
count to grow by instead */
state.vertexAllocator(state.vertexAllocatorState, (totalGlyphCount - state.glyphCount)*4,
vertexPositions,
vertexTextureCoordinates);
/* Take the smallest size of both as the new vertex capacity */
const std::size_t minGlyphCapacity = Math::min({
vertexPositions.size()/4,
vertexTextureCoordinates.size()/4});
/* These assertions are present even for the builtin allocator but
shouldn't fire. If they do, the whole thing is broken, but it's better
to blow up with a nice message than with some strange OOB error later */
CORRADE_ASSERT(minGlyphCapacity >= totalGlyphCount,
messagePrefix << "expected allocated vertex positions and texture coordinates to have at least" << totalGlyphCount*4 << "elements but got" << vertexPositions.size() << "and" << vertexTextureCoordinates.size(), );
CORRADE_ASSERT(state.glyphCache.size().z() == 1 || std::size_t(Math::abs(vertexTextureCoordinates.stride())) >= sizeof(Vector3),
messagePrefix << "expected allocated texture coordinates to have a stride large enough to fit a Vector3 but got only" << Math::abs(vertexTextureCoordinates.stride()) << "bytes", );
/* Keep just the minimal size for both, which is the new capacity */
state.vertexPositions = vertexPositions.prefix(minGlyphCapacity *4);
state.vertexTextureCoordinates = vertexTextureCoordinates.prefix(minGlyphCapacity *4);
}
Renderer& Renderer::clear() {
RendererCore::clear();
/* Not calling allocateIndices() with 0 because it makes no sense to
regenerate the index buffer to the exact same contents on every clear */
allocateVertices(
#ifndef CORRADE_NO_ASSERT
"", /* Asserts won't happen as returned sizes will be always >= 0 */
#endif
0);
return *this;
}
Renderer& Renderer::reset() {
/* Compared to RendererCore::reset() this calls our clear() instead of
RendererCore::clear() */
clear();
resetInternal();
return *this;
}
Renderer& Renderer::reserve(const UnsignedInt glyphCapacity, const UnsignedInt runCapacity) {
State& state = static_cast<State&>(*_state);
/* Reserve glyph and run capacity. It's possible that there's already
enough glyph/run capacity but the index/vertex capacity not yet because
glyphs/runs get allocated during add() already and index/vertex only
during the final render(). */
RendererCore::reserve(glyphCapacity, runCapacity);
/* Reserve (and fill) indices if there's too little of them for the
required glyph capacity. Done separately from vertex allocation because
each of the allocations can have a different growth pattern and the
index type can change during the renderer lifetime.
The expectation is that `state.indices` is only as large as makes sense
for given `state.indexType` (e.g., for an 8-bit type it can represent
indices only for 256 vertices / 64 glyphs at most, which is 384 indices,
thus is never larger than 384 bytes). */
if(state.indices.size() < glyphCapacity*6*indexTypeSize(state.indexType))
allocateIndices(
#ifndef CORRADE_NO_ASSERT
"Text::Renderer::reserve():",
#endif
glyphCapacity);
/* Reserve vertices if there's too little of them for the required glyph
capacity */
if(state.vertexPositions.size() < glyphCapacity*4)
allocateVertices(
#ifndef CORRADE_NO_ASSERT
"Text::Renderer::reserve():",
#endif
glyphCapacity);
return *this;
}
Containers::Pair<Range2D, Range1Dui> Renderer::render() {
State& state = static_cast<State&>(*_state);
/* If we need to generate more indices / vertices than what's in the
capacity, allocate more. The logic is the same as in reserve(), see
there for more information.
This has to be called before RendererCore::render() in order to know
which glyphs have only positions + IDs (state.renderingGlyphCount) and
which have also index and vertex data (state.glyphCount). The
RendererCore::render() then makes both values the same. */
if(state.indices.size() < state.renderingGlyphCount *6*indexTypeSize(state.indexType))
allocateIndices(
#ifndef CORRADE_NO_ASSERT
"Text::Renderer::render():",
#endif
state.renderingGlyphCount);
if(state.vertexPositions.size() < state.renderingGlyphCount *4)
allocateVertices(
#ifndef CORRADE_NO_ASSERT
"Text::Renderer::render():",
#endif
state.renderingGlyphCount);
#ifdef CORRADE_GRACEFUL_ASSERT
/* For testing only -- if vertex allocation failed, bail. Indices are only
touched in allocateIndices(), so if allocateIndices() fails we don't
need to exit here. */
if(state.vertexPositions.size() < state.renderingGlyphCount *4)
return {};
#endif
/* Finish rendering of glyph positions and IDs */
const bool isArray = state.glyphCache.size().z() > 1;
const Containers::Pair<Range2D, Range1Dui> out = RendererCore::render();
/* Populate vertex data for all runs */
UnsignedInt glyphBegin = out.second().min() ? state.runEnds[out.second().min() - 1] : 0;
for(UnsignedInt run = out.second().min(), runEnd = out.second().max(); run != runEnd; ++run) {
const UnsignedInt glyphEnd = state.runEnds[run];
const Containers::StridedArrayView1D<const Vector2> glyphPositions = state.glyphPositions.slice(glyphBegin, glyphEnd);
const Containers::StridedArrayView1D<const UnsignedInt> glyphIds = state.glyphIds.slice(glyphBegin, glyphEnd);
const Containers::StridedArrayView1D<Vector2> vertexPositions = state.vertexPositions.slice(4*glyphBegin, 4*glyphEnd);
const Containers::StridedArrayView1D<Vector2> vertexTextureCoordinates = state.vertexTextureCoordinates.slice(4*glyphBegin, 4*glyphEnd);
if(!isArray) renderGlyphQuadsInto(state.glyphCache,
state.runScales[run],
glyphPositions,
glyphIds,
vertexPositions,
vertexTextureCoordinates);
else renderGlyphQuadsInto(state.glyphCache,
state.runScales[run],
glyphPositions,
glyphIds,
vertexPositions,
Containers::arrayCast<Vector3>(vertexTextureCoordinates));
glyphBegin = glyphEnd;
}
return out;
}
Renderer& Renderer::add(AbstractShaper& shaper, const Float size, const Containers::StringView text, const UnsignedInt begin, const UnsignedInt end, const Containers::ArrayView<const FeatureRange> features) {
return static_cast<Renderer&>(RendererCore::add(shaper, size, text, begin, end, features));
}
Renderer& Renderer::add(AbstractShaper& shaper, const Float size, const Containers::StringView text, const UnsignedInt begin, const UnsignedInt end) {
return static_cast<Renderer&>(RendererCore::add(shaper, size, text, begin, end));
}
Renderer& Renderer::add(AbstractShaper& shaper, const Float size, const Containers::StringView text, const UnsignedInt begin, const UnsignedInt end, const std::initializer_list<FeatureRange> features) {
return static_cast<Renderer&>(RendererCore::add(shaper, size, text, begin, end, features));
}
Renderer& Renderer::add(AbstractShaper& shaper, const Float size, const Containers::StringView text, const Containers::ArrayView<const FeatureRange> features) {
return static_cast<Renderer&>(RendererCore::add(shaper, size, text, features));
}
Renderer& Renderer::add(AbstractShaper& shaper, const Float size, const Containers::StringView text) {
return static_cast<Renderer&>(RendererCore::add(shaper, size, text));
}
Renderer& Renderer::add(AbstractShaper& shaper, const Float size, const Containers::StringView text, const std::initializer_list<FeatureRange> features) {
return static_cast<Renderer&>(RendererCore::add(shaper, size, text, features));
}
Containers::Pair<Range2D, Range1Dui> Renderer::render(AbstractShaper& shaper, const Float size, const Containers::StringView text, const Containers::ArrayView<const FeatureRange> features) {
/* Compared to RendererCore::render() this calls our render() instead of
RendererCore::render() */
add(shaper, size, text, features);
return render();
}
Containers::Pair<Range2D, Range1Dui> Renderer::render(AbstractShaper& shaper, const Float size, const Containers::StringView text) {
return render(shaper, size, text, {});
}
Containers::Pair<Range2D, Range1Dui> Renderer::render(AbstractShaper& shaper, const Float size, const Containers::StringView text, const std::initializer_list<FeatureRange> features) {
return render(shaper, size, text, Containers::arrayView(features));
}
Range2D renderLineGlyphPositionsInto(const AbstractFont& font, const Float size, const LayoutDirection direction, const Containers::StridedArrayView1D<const Vector2>& glyphOffsets, const Containers::StridedArrayView1D<const Vector2>& glyphAdvances, Vector2& cursor, const Containers::StridedArrayView1D<Vector2>& glyphPositions) { Range2D renderLineGlyphPositionsInto(const AbstractFont& font, const Float size, const LayoutDirection direction, const Containers::StridedArrayView1D<const Vector2>& glyphOffsets, const Containers::StridedArrayView1D<const Vector2>& glyphAdvances, Vector2& cursor, const Containers::StridedArrayView1D<Vector2>& glyphPositions) {
CORRADE_ASSERT(glyphAdvances.size() == glyphOffsets.size() && CORRADE_ASSERT(glyphAdvances.size() == glyphOffsets.size() &&
glyphPositions.size() == glyphOffsets.size(), glyphPositions.size() == glyphOffsets.size(),

470
src/Magnum/Text/Renderer.h

@ -27,7 +27,7 @@
*/ */
/** @file /** @file
* @brief Class @ref Magnum::Text::RendererCore, @ref Magnum::Text::AbstractRenderer, typedef @ref Magnum::Text::Renderer2D, @ref Magnum::Text::Renderer3D, function @ref Magnum::Text::renderLineGlyphPositionsInto(), @ref Magnum::Text::renderGlyphQuadsInto(), @ref Magnum::Text::glyphQuadBounds(), @ref Magnum::Text::alignRenderedLine(), @ref Magnum::Text::alignRenderedBlock(), @ref Magnum::Text::renderGlyphQuadIndicesInto(), @ref Magnum::Text::glyphRangeForBytes() * @brief Class @ref Magnum::Text::RendererCore, @ref Magnum::Text::Renderer, @ref Magnum::Text::AbstractRenderer, typedef @ref Magnum::Text::Renderer2D, @ref Magnum::Text::Renderer3D, function @ref Magnum::Text::renderLineGlyphPositionsInto(), @ref Magnum::Text::renderGlyphQuadsInto(), @ref Magnum::Text::glyphQuadBounds(), @ref Magnum::Text::alignRenderedLine(), @ref Magnum::Text::alignRenderedBlock(), @ref Magnum::Text::renderGlyphQuadIndicesInto(), @ref Magnum::Text::glyphRangeForBytes()
*/ */
#include <initializer_list> #include <initializer_list>
@ -67,6 +67,9 @@ enum class RendererCoreFlag: UnsignedByte {
* text selection and editing purposes. * text selection and editing purposes.
*/ */
GlyphClusters = 1 << 0, GlyphClusters = 1 << 0,
/* Additions to this enum have to be propagated to RendererFlag and the
mask in RendererCore::flag() */
}; };
/** /**
@ -415,9 +418,13 @@ class MAGNUM_TEXT_EXPORT RendererCore {
* With @p runRange being for example the second value returned by * With @p runRange being for example the second value returned by
* @ref render(), returns a begin and end glyph offset for given run * @ref render(), returns a begin and end glyph offset for given run
* range, which can then be used to index the @ref glyphPositions(), * range, which can then be used to index the @ref glyphPositions(),
* @ref glyphIds() and @ref glyphClusters() views. Expects that both * @ref glyphIds() and @ref glyphClusters() views; when multipled by
* the min and max @p runRange value are less than or equal to * @cpp 6 @ce to index the @ref Renderer::indices() view and when
* @ref renderingRunCount(). * multiplied by @cpp 4 @ce to index the @ref Renderer::vertexPositions()
* and @relativeref{Renderer,vertexTextureCoordinates()} /
* @relativeref{Renderer,vertexTextureArrayCoordinates()} views.
* Expects that both the min and max @p runRange value are less than or
* equal to @ref renderingRunCount().
* *
* Note that the returned value is not guaranteed to be meaningful if * Note that the returned value is not guaranteed to be meaningful if
* custom run allocator is used, as the user code is free to perform * custom run allocator is used, as the user code is free to perform
@ -554,7 +561,11 @@ class MAGNUM_TEXT_EXPORT RendererCore {
* @ref glyphsForRuns() to convert the returned run range to a begin * @ref glyphsForRuns() to convert the returned run range to a begin
* and end glyph offset, which can be then used to index the * and end glyph offset, which can be then used to index the
* @ref glyphPositions(), @ref glyphIds() and @ref glyphClusters() * @ref glyphPositions(), @ref glyphIds() and @ref glyphClusters()
* views. * views; when multipled by @cpp 6 @ce to index the
* @ref Renderer::indices() view and when multiplied by @cpp 4 @ce to
* index the @ref Renderer::vertexPositions() and
* @relativeref{Renderer,vertexTextureCoordinates()} /
* @relativeref{Renderer,vertexTextureArrayCoordinates()} views.
* *
* The rendered glyph range is not touched or used by the renderer in * The rendered glyph range is not touched or used by the renderer in
* any way afterwards. If the renderer was created with custom * any way afterwards. If the renderer was created with custom
@ -600,7 +611,10 @@ class MAGNUM_TEXT_EXPORT RendererCore {
struct AllocatorState; struct AllocatorState;
Containers::Pointer<State> _state; Containers::Pointer<State> _state;
/* Called by reset() */ /* Delegated to by Renderer constructors */
explicit MAGNUM_TEXT_LOCAL RendererCore(Containers::Pointer<State>&& state);
/* Called by reset() and Renderer::reset() */
MAGNUM_TEXT_LOCAL void resetInternal(); MAGNUM_TEXT_LOCAL void resetInternal();
private: private:
@ -619,6 +633,450 @@ class MAGNUM_TEXT_EXPORT RendererCore {
MAGNUM_TEXT_LOCAL void alignAndFinishLine(); MAGNUM_TEXT_LOCAL void alignAndFinishLine();
}; };
/**
@brief Text renderer flag
@m_since_latest
A superset of @ref RendererCoreFlag.
@see @ref RendererFlags, @ref Renderer
*/
enum class RendererFlag: UnsignedByte {
/**
* Populate glyph cluster info in @ref Renderer::glyphPositions() and
* @ref Renderer::glyphClusters() for text selection and editing purposes.
*
* Compared to @ref RendererCore and @ref RendererCoreFlag::GlyphClusters,
* the @ref Renderer by default queries glyph positions to a temporary
* location that's later overwritten by quad vertex positions to save
* memory so this flag includes both clusters and positions.
*/
GlyphPositionsClusters = Int(RendererCoreFlag::GlyphClusters)
};
/**
* @debugoperatorenum{RendererFlag}
* @m_since_latest
*/
MAGNUM_TEXT_EXPORT Debug& operator<<(Debug& output, RendererFlag value);
/**
@brief Text renderer flags
@m_since_latest
A superset of @ref RendererCoreFlags.
@see @ref Renderer
*/
typedef Containers::EnumSet<RendererFlag> RendererFlags;
CORRADE_ENUMSET_OPERATORS(RendererFlags)
/**
* @debugoperatorenum{RendererFlags}
* @m_since_latest
*/
MAGNUM_TEXT_EXPORT Debug& operator<<(Debug& output, RendererFlags value);
/**
@brief Text renderer
@m_since_latest
*/
class MAGNUM_TEXT_EXPORT Renderer: public RendererCore {
public:
/**
* @brief Construct
* @param glyphCache Glyph cache to use
* @param flags Opt-in feature flags
*
* By default, the renderer allocates the memory for glyph, run, index
* and vertex data internally. Use the overload below to supply
* external allocators.
* @todoc the damn thing can't link to functions taking functions
*/
explicit Renderer(const AbstractGlyphCache& glyphCache, RendererFlags flags = {}): Renderer{glyphCache, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, flags} {}
/**
* @brief Construct with external allocators
* @param glyphCache Glyph cache to use for glyph ID mapping
* @param glyphAllocator Glyph allocator function or @cpp nullptr @ce
* @param glyphAllocatorState State pointer to pass to @p glyphAllocator
* @param runAllocator Run allocator function or @cpp nullptr @ce
* @param runAllocatorState State pointer to pass to @p runAllocator
* @param indexAllocator Index allocator function or @cpp nullptr @ce
* @param indexAllocatorState State pointer to pass to @p indexAllocator
* @param vertexAllocator Vertex allocator function or @cpp nullptr @ce
* @param vertexAllocatorState State pointer to pass to @p vertexAllocator
* @param flags Opt-in feature flags
*
* The @p glyphAllocator gets called with desired @p glyphCount every
* time @ref glyphCount() reaches @ref glyphCapacity(). Size of
* passed-in @p glyphPositions, @p glyphIds and @p glyphClusters views
* matches @ref glyphCount(). The @p glyphAdvances view is a temporary
* storage with contents that don't need to be preserved on
* reallocation and is thus passed in empty. If the renderer wasn't
* constructed with @ref RendererFlag::GlyphPositionsClusters, the
* @p glyphClusters is @cpp nullptr @ce to indicate it's not meant to
* be allocated. The allocator is expected to replace all passed views
* with new views that are larger by *at least* @p glyphCount, pointing
* to a reallocated memory with contents from the original view
* preserved. Initially @ref glyphCount() is @cpp 0 @ce and the views
* are all passed in empty, every subsequent time the views match a
* prefix of views previously returned by the allocator. To save
* memory, the renderer guarantees that @p glyphIds and
* @p glyphClusters are only filled once @p glyphAdvances were merged
* into @p glyphPositions. In other words, the @p glyphAdvances can
* alias a suffix of @p glyphIds and @p glyphClusters.
*
* The @p runAllocator gets called with desired @p runCount every time
* @ref runCount() reaches @ref runCapacity(). Size of passed-in
* @p runScales and @p runEnds views matches @ref runCount(). The
* allocator is expected to replace the views with new views that are
* larger by *at least* @p runCount, pointing to a reallocated memory
* with contents from the original views preserved. Initially
* @ref runCount() is @cpp 0 @ce and the views are passed in empty,
* every subsequent time the views match a prefix of views previously
* returned by the allocator.
*
* The @p indexAllocator gets called with desired @p size every time
* @ref glyphCapacity() increases. Size of passed-in @p indices array
* either matches @ref glyphCapacity() times @cpp 6 @ce times size of
* @ref indexType() if the index type stays the same, or is empty if
* the index type changes (and the whole index array is going to
* get rebuilt with a different type, thus no contents need to be
* preserved). The allocator is expected to replace the passed view
* with a new view that's larger by *at least* @p size, pointing to a
* reallocated memory with contents from the original view preserved.
* Initially @ref glyphCapacity() is @cpp 0 @ce and the view is passed
* in empty, every subsequent time the view matches a prefix of the
* view previously returned by the allocator.
*
* The @p vertexAllocator gets called with @p vertexCount every time
* @ref glyphCount() reaches @ref glyphCapacity(). Size of passed-in
* @p vertexPositions and @p vertexTextureCoordinates views matches
* @ref glyphCount() times @cpp 4 @ce. The allocator is expected to
* replace the views with new views that are larger by *at least*
* @p vertexCount, pointing to a reallocated memory with contents from
* the original views preserved. Initially @ref glyphCount() is
* @cpp 0 @ce and the views are passed in empty, every subsequent time
* the views match a prefix of views previously returned by the
* allocator. If the @p glyphCache is an array, the allocator is
* expected to (re)allocate @p vertexTextureCoordinates for a
* @relativeref{Magnum,Vector3} type even though the view points to
* just the first two components of each texture coordinates.
*
* The renderer always requests only exactly the desired size and the
* growth strategy is up to the allocators themselves --- the returned
* glyph and run views can be larger than requested and aren't all
* required to all have the same size. The minimum of size increases
* across all views is then treated as the new @ref glyphCapacity(),
* @ref glyphIndexCapacity(), @ref glyphVertexCapacity() and
* @ref runCapacity().
*
* As a special case, when @ref clear() or @ref reset() is called, the
* allocators are called with empty views and @p glyphCount /
* @p runCount / @p size / @p vertexCount being @cpp 0 @ce. This is to
* allow the allocators to perform any needed reset as well.
*
* If @p glyphAllocator, @p runAllocator, @p indexAllocator or
* @p vertexAllocator is @cpp nullptr @ce, @p glyphAllocatorState,
* @p runAllocatorState, @p indexAllocatorState or
* @p vertexAllocatorState is ignored and default builtin allocator get
* used for either. Passing @cpp nullptr @ce for all is equivalent to
* calling the @ref Renderer(const AbstractGlyphCache&, RendererFlags)
* constructor.
*/
explicit Renderer(const AbstractGlyphCache& glyphCache, void(*glyphAllocator)(void* state, UnsignedInt glyphCount, Containers::StridedArrayView1D<Vector2>& glyphPositions, Containers::StridedArrayView1D<UnsignedInt>& glyphIds, Containers::StridedArrayView1D<UnsignedInt>* glyphClusters, Containers::StridedArrayView1D<Vector2>& glyphAdvances), void* glyphAllocatorState, void(*runAllocator)(void* state, UnsignedInt runCount, Containers::StridedArrayView1D<Float>& runScales, Containers::StridedArrayView1D<UnsignedInt>& runEnds), void* runAllocatorState, void(*indexAllocator)(void* state, UnsignedInt size, Containers::ArrayView<char>& indices), void* indexAllocatorState, void(*vertexAllocator)(void* state, UnsignedInt vertexCount, Containers::StridedArrayView1D<Vector2>& vertexPositions, Containers::StridedArrayView1D<Vector2>& vertexTextureCoordinates), void* vertexAllocatorState, RendererFlags flags = {});
/**
* @brief Construct without creating the internal state
* @m_since_latest
*
* The constructed instance is equivalent to moved-from state, i.e. no
* APIs can be safely called on the object. Useful in cases where you
* will overwrite the instance later anyway. Move another object over
* it to make it useful.
*
* Note that this is a low-level and a potentially dangerous API, see
* the documentation of @ref NoCreate for alternatives.
*/
explicit Renderer(NoCreateT) noexcept: RendererCore{NoCreate} {}
/** @brief Copying is not allowed */
Renderer(Renderer&) = delete;
/**
* @brief Move constructor
*
* Performs a destructive move, i.e. the original object isn't usable
* afterwards anymore.
*/
Renderer(Renderer&&) noexcept;
~Renderer();
/** @brief Copying is not allowed */
Renderer& operator=(Renderer&) = delete;
/** @brief Move assignment */
Renderer& operator=(Renderer&&) noexcept;
/** @brief Flags */
RendererFlags flags() const;
/**
* @brief Glyph index capacity
*
* Describes how many glyphs can be rendered into the index buffer. The
* actual index count is six times the capacity.
* @see @ref glyphCapacity(), @ref glyphVertexCapacity(),
* @ref glyphCount(), @ref runCapacity(), @ref reserve()
*/
UnsignedInt glyphIndexCapacity() const;
/**
* @brief Glyph vertex capacity
*
* Describes how many glyphs can be rendered into the vertex buffer.
* The actual vertex count is four times the capacity.
* @see @ref glyphCapacity(), @ref glyphIndexCapacity(),
* @ref glyphCount(), @ref runCapacity(), @ref reserve()
*/
UnsignedInt glyphVertexCapacity() const;
/**
* @brief Index type
*
* The smallest type that can describe vertices for all
* @ref glyphCapacity() glyphs and isn't smaller than what was set in
* @ref setIndexType(). Initially set to
* @ref MeshIndexType::UnsignedByte, a lerger type is automatically
* switched to once the capacity exceeds @cpp 64 @ce and @cpp 16384 @ce
* glyphs.
*/
MeshIndexType indexType() const;
/**
* @brief Set index type
* @return Reference to self (for method chaining)
*
* Sets the smallest possible index type to be used. Initially
* @ref MeshIndexType::UnsignedByte, a larger type is automatically
* switched to once @ref glyphCapacity() exceeds @cpp 64 @ce and
* @cpp 16384 @ce glyphs. Set to a larger type if you want it to be
* used even if the glyph capacity is smaller. Setting it back to a
* smaller type afterwards uses the type only if the glyph capacity
* allows it.
*/
Renderer& setIndexType(MeshIndexType atLeast);
/**
* @brief Glyph positions
*
* Expects that the renderer was constructed with
* @ref RendererFlag::GlyphPositionsClusters. The returned view has a
* size of @ref glyphCount(). Note that the contents are not guaranteed
* to be meaningful if custom glyph allocator is used, as the user code
* is free to perform subsequent operations on those.
*/
Containers::StridedArrayView1D<const Vector2> glyphPositions() const;
/**
* @brief Glyph IDs are not accessible
*
* Unlike with @ref RendererCore, to save memory, glyph IDs are
* retrieved only to a temporary location to produce glyph quads and
* are subsequently overwritten by vertex data.
*/
Containers::StridedArrayView1D<const UnsignedInt> glyphIds() const = delete;
/**
* @brief Glyph cluster IDs
*
* Expects that the renderer was constructed with
* @ref RendererFlag::GlyphPositionsClusters. The returned view has a
* size of @ref glyphCount(). Note that the contents are not guaranteed
* to be meaningful if custom glyph allocator is used, as the user code
* is free to perform subsequent operations on those.
*/
Containers::StridedArrayView1D<const UnsignedInt> glyphClusters() const;
/**
* @brief Type-erased glyph quad indices
*
* The returned view is contiguous with a size of @ref glyphCount()
* times @cpp 6 @ce, the second dimension having a size of
* @ref indexType(). The values index the @ref vertexPositions() and
* @ref vertexTextureCoordinates() / @ref vertexTextureArrayCoordinates()
* arrays. Note that the contents are not guaranteed to be meaningful
* if custom index allocator is used, as the user code is free to
* perform subsequent operations on those.
*
* Use the templated overload below to get the indices in a concrete
* type.
*/
Containers::StridedArrayView2D<const char> indices() const;
/**
* @brief Glyph quad indices
*
* Expects that @p T is either @relativeref{Magnum,UnsignedByte},
* @relativeref{Magnum,UnsignedShort} or
* @relativeref{Magnum,UnsignedInt} and matches @ref indexType(). The
* returned view has a size of @ref glyphCount() times @cpp 6 @ce. Note
* that the contents are not guaranteed to be meaningful if custom
* index allocator is used, as the user code is free to perform
* subsequent operations on those.
*
* Use the non-templated overload above to get a type-erased view on
* the indices.
*/
template<class T> Containers::ArrayView<const T> indices() const;
/**
* @brief Vertex positions
*
* The returned view has a size of @ref glyphCount() times @cpp 4 @ce.
* Note that the contents are not guaranteed to be meaningful if custom
* vertex allocator is used, as the user code is free to perform
* subsequent operations on those.
*/
Containers::StridedArrayView1D<const Vector2> vertexPositions() const;
/**
* @brief Vertex texture coordinates
*
* Expects that the renderer was constructed with a non-array
* @ref AbstractGlyphCache, i.e. with a depth equal to @cpp 1 @ce.
* The returned view has a size of @ref glyphCount() times @cpp 4 @ce.
* Note that the contents are not guaranteed to be meaningful if custom
* vertex allocator is used, as the user code is free to perform
* subsequent operations on those.
*/
Containers::StridedArrayView1D<const Vector2> vertexTextureCoordinates() const;
/**
* @brief Vertex texture array coordinates
*
* Expects that the renderer was constructed with an array
* @ref AbstractGlyphCache, i.e. with a depth larger than @cpp 1 @ce.
* The returned view has a size of @ref glyphCount() times @cpp 4 @ce.
* Note that the contents are not guaranteed to be meaningful if custom
* vertex allocator is used, as the user code is free to perform
* subsequent operations on those.
*/
Containers::StridedArrayView1D<const Vector3> vertexTextureArrayCoordinates() const;
/**
* @brief Reserve capacity for given glyph count
* @return Reference to self (for method chaining)
*
* Calls @ref RendererCore::reserve() and additionally reserves
* capacity also for the corresponding index and vertex memory. Note
* that while reserved index and vertex capacity is derived from
* @p glyphCapacity and @ref indexType(), their actually allocated
* capacity doesn't need to match @ref glyphCapacity() and is exposed
* through @ref glyphIndexCapacity() and @ref glyphVertexCapacity().
* @see @ref glyphCount(), @ref runCapacity(), @ref runCount()
*/
Renderer& reserve(UnsignedInt glyphCapacity, UnsignedInt runCapacity);
/**
* @brief Clear rendered glyphs, runs and vertices
* @return Reference to self (for method chaining)
*
* Calls @ref RendererCore::clear(). The @ref glyphCount() and
* @ref runCount() becomes @cpp 0 @ce after this call and any
* in-progress rendering is discarded, making @ref isRendering() return
* @cpp false @ce. If custom glyph, run or vertex allocators are used,
* they get called with empty views and zero sizes. Custom index
* allocator isn't called however, as the index buffer only needs
* updating when its capacity isn't large enough.
*
* Depending on allocator used, @ref glyphCapacity(),
* @ref glyphVertexCapacity() and @ref runCapacity() may stay non-zero.
* The @ref cursor(), @ref alignment(), @ref lineAdvance() and
* @ref layoutDirection() are left untouched, use @ref reset() to reset
* those to their default values as well.
*/
Renderer& clear();
/**
* @brief Reset internal renderer state
* @return Reference to self (for method chaining)
*
* Calls @ref clear(), and additionally @ref cursor(),
* @ref alignment(), @ref lineAdvance() and @ref layoutDirection() are
* reset to their default values. Apart from @ref glyphCapacity(),
* @ref glyphVertexCapacity() and @ref runCapacity() which may stay
* non-zero depending on allocator used, and @ref glyphIndexCapacity()
* plus @ref indexType() which are left untouched, the instance is
* equivalent to a default-constructed state.
*/
Renderer& reset();
/**
* @brief Wrap up rendering of all text added so far
*
* Calls @ref RendererCore::render() and populates also index and
* vertex data, subsequently available through @ref indices(),
* @ref vertexPositions() and @ref vertexTextureCoordinates() /
* @ref vertexTextureArrayCoordinates().
*
* The function uses @ref renderGlyphQuadsInto() and
* @ref renderGlyphQuadIndicesInto() internally, see their
* documentation for more information.
*/
Containers::Pair<Range2D, Range1Dui> render();
/* Overloads to remove a WTF factor from method chaining order, and to
ensure our render() is called instead of RenderCore::render() */
#ifndef DOXYGEN_GENERATING_OUTPUT
Renderer& setCursor(const Vector2& cursor) {
return static_cast<Renderer&>(RendererCore::setCursor(cursor));
}
Renderer& setAlignment(Alignment alignment) {
return static_cast<Renderer&>(RendererCore::setAlignment(alignment));
}
Renderer& setLineAdvance(Float advance) {
return static_cast<Renderer&>(RendererCore::setLineAdvance(advance));
}
Renderer& setLayoutDirection(LayoutDirection direction) {
return static_cast<Renderer&>(RendererCore::setLayoutDirection(direction));
}
Renderer& add(AbstractShaper& shaper, Float size, Containers::StringView text, UnsignedInt begin, UnsignedInt end, Containers::ArrayView<const FeatureRange> features);
Renderer& add(AbstractShaper& shaper, Float size, Containers::StringView text, UnsignedInt begin, UnsignedInt end);
Renderer& add(AbstractShaper& shaper, Float size, Containers::StringView text, UnsignedInt begin, UnsignedInt end, std::initializer_list<FeatureRange> features);
Renderer& add(AbstractShaper& shaper, Float size, Containers::StringView text, Containers::ArrayView<const FeatureRange> features);
Renderer& add(AbstractShaper& shaper, Float size, Containers::StringView text);
Renderer& add(AbstractShaper& shaper, Float size, Containers::StringView text, std::initializer_list<FeatureRange> features);
Containers::Pair<Range2D, Range1Dui> render(AbstractShaper& shaper, Float size, Containers::StringView text, Containers::ArrayView<const FeatureRange> features);
Containers::Pair<Range2D, Range1Dui> render(AbstractShaper& shaper, Float size, Containers::StringView text);
Containers::Pair<Range2D, Range1Dui> render(AbstractShaper& shaper, Float size, Containers::StringView text, std::initializer_list<FeatureRange> features);
#endif
#ifdef DOXYGEN_GENERATING_OUTPUT
private:
#else
protected:
#endif
struct State;
private:
/* While the allocators get just size to grow by, these functions get
the total count */
MAGNUM_TEXT_LOCAL void allocateIndices(
#ifndef CORRADE_NO_ASSERT
const char* messagePrefix,
#endif
UnsignedInt totalGlyphCount);
MAGNUM_TEXT_LOCAL void allocateVertices(
#ifndef CORRADE_NO_ASSERT
const char* messagePrefix,
#endif
UnsignedInt totalGlyphCount);
};
/** /**
@brief Render glyph positions for a (part of a) single line @brief Render glyph positions for a (part of a) single line
@param[in] font Font to query metrics from @param[in] font Font to query metrics from

4340
src/Magnum/Text/Test/RendererTest.cpp

File diff suppressed because it is too large Load Diff

1
src/Magnum/Text/Text.h

@ -57,6 +57,7 @@ enum class Script: UnsignedInt;
class FeatureRange; class FeatureRange;
class RendererCore; class RendererCore;
class Renderer;
#ifdef MAGNUM_TARGET_GL #ifdef MAGNUM_TARGET_GL
class DistanceFieldGlyphCacheGL; class DistanceFieldGlyphCacheGL;

Loading…
Cancel
Save