From f32cc66d3d83ac922fe275f2de2d43187c6771f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vladim=C3=ADr=20Vondru=C5=A1?= Date: Mon, 1 Feb 2021 18:44:26 +0100 Subject: [PATCH] Vk: a container for a shader set needed by a pipeline. --- doc/snippets/MagnumVk.cpp | 32 +++ doc/vulkan-mapping.dox | 8 +- src/Magnum/Vk/CMakeLists.txt | 2 + src/Magnum/Vk/Shader.h | 74 ++++++- src/Magnum/Vk/ShaderSet.cpp | 156 +++++++++++++ src/Magnum/Vk/ShaderSet.h | 252 +++++++++++++++++++++ src/Magnum/Vk/Test/CMakeLists.txt | 1 + src/Magnum/Vk/Test/ShaderSetTest.cpp | 318 +++++++++++++++++++++++++++ src/Magnum/Vk/Vk.h | 3 + 9 files changed, 841 insertions(+), 5 deletions(-) create mode 100644 src/Magnum/Vk/ShaderSet.cpp create mode 100644 src/Magnum/Vk/ShaderSet.h create mode 100644 src/Magnum/Vk/Test/ShaderSetTest.cpp diff --git a/doc/snippets/MagnumVk.cpp b/doc/snippets/MagnumVk.cpp index 9f4e64fb1..558d7e203 100644 --- a/doc/snippets/MagnumVk.cpp +++ b/doc/snippets/MagnumVk.cpp @@ -57,6 +57,7 @@ #include "Magnum/Vk/RenderPassCreateInfo.h" #include "Magnum/Vk/Result.h" #include "Magnum/Vk/ShaderCreateInfo.h" +#include "Magnum/Vk/ShaderSet.h" #include "MagnumExternal/Vulkan/flextVkGlobal.h" /* [wrapping-include-createinfo] */ @@ -869,6 +870,37 @@ Vk::Shader shader{device, info}; /* [Shader-creation] */ } +{ +/* [ShaderSet-usage] */ +Vk::Shader vert{DOXYGEN_IGNORE(NoCreate)}, frag{DOXYGEN_IGNORE(NoCreate)}; + +using namespace Containers::Literals; + +Vk::ShaderSet set; +set.addShader(Vk::ShaderStage::Vertex, vert, "main"_s) + .addShader(Vk::ShaderStage::Fragment, frag, "main"_s); +/* [ShaderSet-usage] */ + +/* [ShaderSet-usage-specializations] */ +set.addShader(Vk::ShaderStage::Fragment, frag, "main"_s, { + {0, 3}, + {1, 0.25f}, + {2, false} +}); +/* [ShaderSet-usage-specializations] */ +} + +{ +using namespace Containers::Literals; +/* [ShaderSet-usage-ownership-transfer] */ +Vk::Shader shader{DOXYGEN_IGNORE(NoCreate)}; + +Vk::ShaderSet set; +set.addShader(Vk::ShaderStage::Vertex, shader, "vert"_s) + .addShader(Vk::ShaderStage::Fragment, std::move(shader), "frag"_s); +/* [ShaderSet-usage-ownership-transfer] */ +} + { /* [Integration] */ VkOffset2D a{64, 32}; diff --git a/doc/vulkan-mapping.dox b/doc/vulkan-mapping.dox index 87caad62d..3db6bbb0a 100644 --- a/doc/vulkan-mapping.dox +++ b/doc/vulkan-mapping.dox @@ -664,7 +664,7 @@ Vulkan structure | Matching API @type_vk{PipelineLibraryCreateInfoKHR} @m_class{m-label m-flat m-warning} **KHR** | | @type_vk{PipelineMultisampleStateCreateInfo} | | @type_vk{PipelineRasterizationStateCreateInfo} | | -@type_vk{PipelineShaderStageCreateInfo} | | +@type_vk{PipelineShaderStageCreateInfo} | @ref ShaderSet @type_vk{PipelineTessellationStateCreateInfo} | | @type_vk{PipelineTessellationDomainOriginStateCreateInfo} @m_class{m-label m-flat m-success} **KHR, 1.1** | | @type_vk{PipelineVertexInputDivisorStateCreateInfoEXT} @m_class{m-label m-flat m-warning} **EXT** | @ref MeshLayout @@ -722,8 +722,8 @@ Vulkan structure | Matching API @type_vk{SparseImageMemoryBind} | | @type_vk{SparseImageMemoryRequirements}, \n @type_vk{SparseImageMemoryRequirements2} @m_class{m-label m-flat m-success} **KHR, 1.1** | | @type_vk{SparseMemoryBind} | | -@type_vk{SpecializationInfo} | | -@type_vk{SpecializationMapEntry} | | +@type_vk{SpecializationInfo} | @ref ShaderSet +@type_vk{SpecializationMapEntry} | @ref ShaderSet @type_vk{StencilOpState} | | @type_vk{StridedDeviceAddressRegionKHR} @m_class{m-label m-flat m-warning} **KHR** | | @type_vk{SubmitInfo} | @ref SubmitInfo @@ -969,7 +969,7 @@ Vulkan enum | Matching API @type_vk{ShaderFloatControlsIndependence} @m_class{m-label m-flat m-success} **KHR, 1.2** | | @type_vk{ShaderGroupShaderKHR} @m_class{m-label m-flat m-warning} **KHR** | | @type_vk{ShaderModuleCreateFlagBits}, \n @type_vk{ShaderModuleCreateFlags} | @ref ShaderCreateInfo::Flags -@type_vk{ShaderStageFlagBits}, \n @type_vk{ShaderStageFlags} | | +@type_vk{ShaderStageFlagBits}, \n @type_vk{ShaderStageFlags} | @ref ShaderStage @type_vk{SharingMode} | | @type_vk{SparseImageFormatFlagBits}, \n @type_vk{SparseImageFormatFlags} | | @type_vk{SparseMemoryBindFlagBits}, \n @type_vk{SparseMemoryBindFlags} | | diff --git a/src/Magnum/Vk/CMakeLists.txt b/src/Magnum/Vk/CMakeLists.txt index 4970090e3..95b760323 100644 --- a/src/Magnum/Vk/CMakeLists.txt +++ b/src/Magnum/Vk/CMakeLists.txt @@ -60,6 +60,7 @@ set(MagnumVk_GracefulAssert_SRCS Memory.cpp PixelFormat.cpp RenderPass.cpp + ShaderSet.cpp VertexFormat.cpp) set(MagnumVk_HEADERS @@ -102,6 +103,7 @@ set(MagnumVk_HEADERS Result.h Shader.h ShaderCreateInfo.h + ShaderSet.h TypeTraits.h Version.h VertexFormat.h diff --git a/src/Magnum/Vk/Shader.h b/src/Magnum/Vk/Shader.h index 2e8a68b4e..ad0a2a83f 100644 --- a/src/Magnum/Vk/Shader.h +++ b/src/Magnum/Vk/Shader.h @@ -26,7 +26,7 @@ */ /** @file - * @brief Class @ref Magnum::Vk::Shader + * @brief Class @ref Magnum::Vk::Shader, enum @ref Magnum::Vk::ShaderStage * @m_since_latest */ @@ -39,6 +39,78 @@ namespace Magnum { namespace Vk { +/** +@brief Shader stage +@m_since_latest + +Wraps @type_vk_keyword{ShaderStageFlagBits}. +@m_enum_values_as_keywords +*/ +enum class ShaderStage: UnsignedInt { + /** Vertex stage */ + Vertex = VK_SHADER_STAGE_VERTEX_BIT, + + /** Fragment stage */ + Fragment = VK_SHADER_STAGE_FRAGMENT_BIT, + + /** + * Geometry stage + * @requires_vk_feature @ref DeviceFeature::GeometryShader + */ + Geometry = VK_SHADER_STAGE_GEOMETRY_BIT, + + /** + * Tessellation control stage + * @requires_vk_feature @ref DeviceFeature::TessellationShader + */ + TessellationControl = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, + + /** + * Tessellation evaluation stage + * @requires_vk_feature @ref DeviceFeature::TessellationShader + */ + TessellationEvaluation = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, + + /** Compute stage */ + Compute = VK_SHADER_STAGE_COMPUTE_BIT, + + /** + * Ray generation stage + * @requires_vk_feature @ref DeviceFeature::RayTracingPipeline + */ + RayGeneration = VK_SHADER_STAGE_RAYGEN_BIT_KHR, + + /** + * Ray any hit stage + * @requires_vk_feature @ref DeviceFeature::RayTracingPipeline + */ + RayAnyHit = VK_SHADER_STAGE_ANY_HIT_BIT_KHR, + + /** + * Ray closest hit stage + * @requires_vk_feature @ref DeviceFeature::RayTracingPipeline + */ + RayClosestHit = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, + + /** + * Ray miss stage + * @requires_vk_feature @ref DeviceFeature::RayTracingPipeline + */ + RayMiss = VK_SHADER_STAGE_MISS_BIT_KHR, + + /** + * Ray intersection stage + * @requires_vk_feature @ref DeviceFeature::RayTracingPipeline + */ + RayIntersection = VK_SHADER_STAGE_INTERSECTION_BIT_KHR, + + /** + * Ray callable stage + * @requires_vk_feature @ref DeviceFeature::RayTracingPipeline + */ + RayCallable = VK_SHADER_STAGE_CALLABLE_BIT_KHR +}; + /** @brief Shader @m_since_latest diff --git a/src/Magnum/Vk/ShaderSet.cpp b/src/Magnum/Vk/ShaderSet.cpp new file mode 100644 index 000000000..06f121ddd --- /dev/null +++ b/src/Magnum/Vk/ShaderSet.cpp @@ -0,0 +1,156 @@ +/* + This file is part of Magnum. + + Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021 Vladimír Vondruš + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*/ + +#include "ShaderSet.h" + +#include +#include +#include +#include + +#include "Magnum/Vk/Shader.h" + +namespace Magnum { namespace Vk { + +struct ShaderSet::State { + Containers::Array ownedShaders; + Containers::Array entrypointNames; + Containers::Array specializationData; + Containers::Array specializations; +}; + +ShaderSet::ShaderSet(): _stages{}, _specializations{}, _stageCount{} {} + +ShaderSet::ShaderSet(ShaderSet&& other) noexcept: _stageCount{other._stageCount}, _state{std::move(other._state)} { + /* C++, WHY THE FUCK can't you copy C arrays, why do I have to do that for + you?! */ + Utility::copy(other._stages, _stages); + Utility::copy(other._specializations, _specializations); + /* The easiest is to just make the original stage list empty and leave + whatever dangling internal pointers are there. Otherwise we'd need to + clear even the entrypoint field in case the name is owned, which would + make the whole list invalid and thus pointless. */ + other._stageCount = 0; +} + +ShaderSet::~ShaderSet() = default; + +ShaderSet& ShaderSet::operator=(ShaderSet&& other) noexcept { + using std::swap; + swap(other._stages, _stages); + swap(other._specializations, _specializations); + swap(other._stageCount, _stageCount); + swap(other._state, _state); + return *this; +} + +ShaderSet& ShaderSet::addShader(const ShaderStage stage, const VkShaderModule shader, const Containers::StringView entrypoint, const Containers::ArrayView specializations) { + CORRADE_ASSERT(_stageCount < Containers::arraySize(_stages), + "Vk::ShaderSet::addShader(): too many stages, expected at most" << Containers::arraySize(_stages), *this); + + _stages[_stageCount].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; + _stages[_stageCount].stage = VkShaderStageFlagBits(stage); + _stages[_stageCount].module = shader; + + /* Not using String::nullTerminatedGlobalView() unconditionally because + this way we can avoid allocating the state struct altogether */ + if(entrypoint.flags() >= (Containers::StringViewFlag::Global|Containers::StringViewFlag::NullTerminated)) { + _stages[_stageCount].pName = entrypoint.data(); + } else { + if(!_state) _state.emplace(); + /* Ensure the data are never SSO'd and so when the array reallocates we + don't need to rewire existing name pointers */ + _stages[_stageCount].pName = arrayAppend(_state->entrypointNames, Containers::InPlaceInit, Containers::AllocatedInit, entrypoint).data(); + } + + /* Specialization, also only if there are any to avoid allocating the state + struct when not neccessary */ + if(!specializations.empty()) { + if(!_state) _state.emplace(); + + /* Remember the original base data pointers so we can reroute the + structures after a potential reallocation */ + const char* const previousBaseDataPointer = _state->specializationData.data(); + const VkSpecializationMapEntry* const previousBasePointer = _state->specializations.data(); + + /* The data is (currently) always four bytes, so we don't need to do + any extra work to calculate the total data size over all + specializations */ + const Containers::ArrayView newSpecializationData = arrayAppend(_state->specializationData, Containers::NoInit, specializations.size()*4); + const Containers::ArrayView newSpecializations = arrayAppend(_state->specializations, Containers::NoInit, specializations.size()); + + /* Reroute the existing structures for possible reallocations */ + for(std::size_t i = 0; i != _stageCount; ++i) { + if(!_specializations[i].dataSize) continue; + + CORRADE_INTERNAL_ASSERT(_specializations[i].pData >= previousBaseDataPointer && _specializations[i].pData < previousBaseDataPointer + _state->specializationData.size() - specializations.size()*4); + _specializations[i].pData = _state->specializationData.data() + (static_cast(_specializations[i].pData) - previousBaseDataPointer); + + CORRADE_INTERNAL_ASSERT(_specializations[i].pMapEntries >= previousBasePointer && _specializations[i].pMapEntries < previousBasePointer + _state->specializationData.size() - specializations.size()); + _specializations[i].pMapEntries = _state->specializations.data() + (_specializations[i].pMapEntries - previousBasePointer); + } + + /* Add new specializations */ + const auto newSpecializationDataInteger = Containers::arrayCast(newSpecializationData); + for(std::size_t i = 0; i != specializations.size(); ++i) { + newSpecializations[i].constantID = specializations[i].id(); + newSpecializations[i].offset = i*4; + newSpecializations[i].size = 4; + newSpecializationDataInteger[i] = specializations[i].data(); + } + + _specializations[_stageCount].mapEntryCount = newSpecializations.size(); + _specializations[_stageCount].pMapEntries = newSpecializations; + _specializations[_stageCount].dataSize = newSpecializationData.size(); + _specializations[_stageCount].pData = newSpecializationData; + _stages[_stageCount].pSpecializationInfo = _specializations + _stageCount; + } + + ++_stageCount; + return *this; +} + +ShaderSet& ShaderSet::addShader(const ShaderStage stage, const VkShaderModule shader, const Containers::StringView entrypoint, const std::initializer_list specializations) { + return addShader(stage, shader, entrypoint, Containers::arrayView(specializations)); +} + +ShaderSet& ShaderSet::addShader(const ShaderStage stage, Shader&& shader, const Containers::StringView entrypoint, const Containers::ArrayView specializations) { + if(!_state) _state.emplace(); + return addShader(stage, arrayAppend(_state->ownedShaders, std::move(shader)), entrypoint, specializations); +} + +ShaderSet& ShaderSet::addShader(const ShaderStage stage, Shader&& shader, const Containers::StringView entrypoint, const std::initializer_list specializations) { + return addShader(stage, std::move(shader), entrypoint, Containers::arrayView(specializations)); +} + +Containers::ArrayView ShaderSet::stages() { + return {_stages, _stageCount}; +} + +Containers::ArrayView ShaderSet::stages() const { + return {_stages, _stageCount}; +} + +}} diff --git a/src/Magnum/Vk/ShaderSet.h b/src/Magnum/Vk/ShaderSet.h new file mode 100644 index 000000000..941490aa5 --- /dev/null +++ b/src/Magnum/Vk/ShaderSet.h @@ -0,0 +1,252 @@ +#ifndef Magnum_Vk_ShaderSet_h +#define Magnum_Vk_ShaderSet_h +/* + This file is part of Magnum. + + Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021 Vladimír Vondruš + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*/ + +/** @file + * @brief Class @ref Magnum::Vk::ShaderSet, @ref Magnum::Vk::ShaderSpecialization + * @m_since_latest + */ + +#include + +#include "Magnum/Tags.h" +#include "Magnum/Vk/Vk.h" +#include "Magnum/Vk/Vulkan.h" +#include "Magnum/Vk/visibility.h" + +namespace Magnum { namespace Vk { + +/** +@brief Shader specialization +@m_since_latest + +Used by @ref ShaderSet for specifying shader specialization constants. See its +documentation for more information. +*/ +class ShaderSpecialization { + public: + /** + * @brief Construct an integer specialization constant + * @param id Specialization constant ID + * @param value Specialized value + */ + /*implicit*/ ShaderSpecialization(UnsignedInt id, Int value): _id{id}, _data{reinterpret_cast(value)} {} + + /** + * @brief Construct a float specialization constant + * @param id Specialization constant ID + * @param value Specialized value + */ + /*implicit*/ ShaderSpecialization(UnsignedInt id, Float value): _id{id}, _data{reinterpret_cast(value)} {} + + /** + * @brief Construct a boolean specialization constant + * @param id Specialization constant ID + * @param value Specialized value + */ + /*implicit*/ ShaderSpecialization(UnsignedInt id, bool value): _id{id}, _data{value} {} + + /** @brief Specialization constant ID */ + UnsignedInt id() const { return _id; } + + /** + * @brief Specialization value data + * + * The contents can be an integer, a float or a boolean extended to + * four bytes based on what constructor got used. + */ + UnsignedInt data() const { return _data; } + + private: + UnsignedInt _id; + + /* It would be great if this was explicitly said in either the SPIR-V + or Vulkan spec, but AFAICT, specialization is only possible for + booleans (which have to be four bytes), ints and floats, not + composite types (and at least in the GL_KHR_vulkan_glsl spec these + are enumerated as the only allowed types). Looking at the SPIR-V + spec, OpSpecConstant gets turned into OpConstant and that can only + be an int or float as well. + + In conclusion, I don't see why there has to be the size specified + if it's required to be always 4 bytes. Maybe future-proofing blah + blah, which may as well never happen. Here I'm making my life + simpler by explicitly supporting only the three allowed types, + putting them all into an int. */ + UnsignedInt _data; +}; + +/** +@brief Shader set +@m_since_latest + +A collection of @ref Shader instances together with populated +@type_vk_keyword{PipelineShaderStageCreateInfo} structures for use in a +pipeline. + +@section Vk-ShaderSet-usage Usage + +Based on whether the shader set is for a rasterization, compute or ray tracing +pipeline, you'll call @ref addShader() with all stages that the pipeline needs. +At the very least you need to specify what stage is the shader for and the +entrypoint name --- usually it'd be @cpp main() @ce, but there can be also +SPIR-V shader modules with multiple entry points, which is why this parameter +is needed. + +@snippet MagnumVk.cpp ShaderSet-usage + + + +@m_class{m-note m-success} + +@par + The above code uses the @link Containers::Literals::operator""_s() @endlink + literal, which lets the library know that given string is global and + null-terminated. Such strings then don't need to be copied internally to + keep them in scope until they're consumed by Vulkan APIs. + +@subsection Vk-ShaderSet-usage-specializations Specialization constants + +If the shader module exposes specialization constants, those can be specialized +via an additional parameter, taking a list of @ref ShaderSpecialization +instances. The constant can be an integer, float or a boolean; constant IDs not +present in the SPIR-V module are ignored. + +@snippet MagnumVk.cpp ShaderSet-usage-specializations + +@subsection Vk-ShaderSet-usage-ownership-transfer Shader ownership transfer + +To create a self-contained shader set it's possible to move the @ref Shader +instances into the class using the @ref addShader(ShaderStage, Shader&&, Containers::StringView, Containers::ArrayView) +overload. If you have a multi-entrypoint shader, move only the last specified +stage, for example: + +@snippet MagnumVk.cpp ShaderSet-usage-ownership-transfer +*/ +class MAGNUM_VK_EXPORT ShaderSet { + public: + /** + * @brief Constructor + * + * Creates an empty shader set. At least one shader has to be present, + * call @ref addShader() to add it. + */ + explicit ShaderSet(); + + /** @brief Copying is not allowed */ + ShaderSet(const ShaderSet&) = delete; + + /** @brief Move constructor */ + ShaderSet(ShaderSet&& other) noexcept; + + /** + * @brief Destructor + * + * If any shaders were added using @ref addShader(ShaderStage, Shader&&, Containers::StringView, Containers::ArrayView), + * their owned instances are destructed at this point. + */ + ~ShaderSet(); + + /** @brief Copying is not allowed */ + ShaderSet& operator=(const ShaderSet&) = delete; + + /** @brief Move assignment */ + ShaderSet& operator=(ShaderSet&& other) noexcept; + + /** + * @brief Add a shader + * @param stage Shader stage + * @param shader A @ref Shader or a raw Vulkan shader handle + * @param entrypoint Entrypoint name + * @param specializations Specialization constant values + * @return Reference to self (for method chaining) + * + * The function makes a copy of @p entrypoint if it's not global or + * null-terminated, use the @link Containers::Literals::operator""_s() @endlink + * literal to prevent that where possible. + * + * The populated @type_vk{VkPipelineShaderStageCreateInfo} is + * subsequently available through @ref stages() for direct editing. The + * following fields are pre-filled in addition to `sType`, everything + * else is zero-filled: + * + * - `stage` + * - `module` to @p shader + * - `pName` to @p entrypoint + * - `pSpecializationInfo`, if @p specializations are non-empty + * - @cpp pSpecializationInfo->mapEntryCount @ce, + * @cpp pSpecializationInfo->pMapEntries @ce, + * @cpp pSpecializationInfo->pMapEntries[i].constantID @ce, + * @cpp pSpecializationInfo->pMapEntries[i].offset @ce, + * @cpp pSpecializationInfo->pMapEntries[i].size @ce, + * @cpp pSpecializationInfo->dataSize @ce and + * @cpp pSpecializationInfo->pData @ce to processed and linearized + * contents of @p specializations + */ + ShaderSet& addShader(ShaderStage stage, VkShaderModule shader, Containers::StringView entrypoint, Containers::ArrayView specializations); + + /** @overload */ + /* Having a default here to avoid having to include ArrayView or have + two addShader() implementations, one with and one without */ + ShaderSet& addShader(ShaderStage stage, VkShaderModule shader, Containers::StringView entrypoint, std::initializer_list specializations = {}); + + /** + * @brief Add a shader and take over its ownership + * @return Reference to self (for method chaining) + * + * Compared to @ref addShader(ShaderStage, VkShaderModule, Containers::StringView, Containers::ArrayView) + * the @p shader instance ownership is transferred to the class and + * thus doesn't have to be managed separately. + */ + ShaderSet& addShader(ShaderStage stage, Shader&& shader, Containers::StringView entrypoint, Containers::ArrayView specializations); + + /** @overload */ + /* Having a default here to avoid having to include ArrayView or have + two addShader() implementations, one with and one without */ + ShaderSet& addShader(ShaderStage stage, Shader&& shader, Containers::StringView entrypoint, std::initializer_list specializations = {}); + + /** + * @brief Shader stages + * + * Exposes all data added with @ref addShader() calls. If + * @ref addShader() was not called yet, the returned view is empty. + */ + Containers::ArrayView stages(); + /** @overload */ + Containers::ArrayView stages() const; + + private: + VkPipelineShaderStageCreateInfo _stages[6]; + VkSpecializationInfo _specializations[6]; + std::size_t _stageCount; + + struct State; + Containers::Pointer _state; +}; + +}} + +#endif diff --git a/src/Magnum/Vk/Test/CMakeLists.txt b/src/Magnum/Vk/Test/CMakeLists.txt index a74de4e0a..78d31c4a7 100644 --- a/src/Magnum/Vk/Test/CMakeLists.txt +++ b/src/Magnum/Vk/Test/CMakeLists.txt @@ -50,6 +50,7 @@ corrade_add_test(VkQueueTest QueueTest.cpp LIBRARIES MagnumVk) corrade_add_test(VkResultTest ResultTest.cpp LIBRARIES MagnumVk) corrade_add_test(VkRenderPassTest RenderPassTest.cpp LIBRARIES MagnumVkTestLib) corrade_add_test(VkShaderTest ShaderTest.cpp LIBRARIES MagnumVk) +corrade_add_test(VkShaderSetTest ShaderSetTest.cpp LIBRARIES MagnumVkTestLib) corrade_add_test(VkVertexFormatTest VertexFormatTest.cpp LIBRARIES MagnumVkTestLib) corrade_add_test(VkStructureHelpersTest StructureHelpersTest.cpp) diff --git a/src/Magnum/Vk/Test/ShaderSetTest.cpp b/src/Magnum/Vk/Test/ShaderSetTest.cpp new file mode 100644 index 000000000..f838007ce --- /dev/null +++ b/src/Magnum/Vk/Test/ShaderSetTest.cpp @@ -0,0 +1,318 @@ +/* + This file is part of Magnum. + + Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, + 2020, 2021 Vladimír Vondruš + + Permission is hereby granted, free of charge, to any person obtaining a + copy of this software and associated documentation files (the "Software"), + to deal in the Software without restriction, including without limitation + the rights to use, copy, modify, merge, publish, distribute, sublicense, + and/or sell copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS IN THE SOFTWARE. +*/ + +#include +#include +#include +#include +#include + +#include "Magnum/Vk/Device.h" +#include "Magnum/Vk/Shader.h" +#include "Magnum/Vk/ShaderSet.h" + +namespace Magnum { namespace Vk { namespace Test { namespace { + +struct ShaderSetTest: TestSuite::Tester { + explicit ShaderSetTest(); + + void specializationConstructInt(); + void specializationConstructFloat(); + void specializationConstructBool(); + + void construct(); + void constructCopy(); + void constructMove(); + + void addShader(); + void addShaderEntrypointCopy(); + void addShaderEntrypointCopyReallocation(); + void addShaderSpecializations(); + void addShaderSpecializationsReallocation(); + void addShaderOwnershipTransfer(); + void addShaderTooManyStages(); +}; + +ShaderSetTest::ShaderSetTest() { + addTests({&ShaderSetTest::specializationConstructInt, + &ShaderSetTest::specializationConstructFloat, + &ShaderSetTest::specializationConstructBool, + + &ShaderSetTest::construct, + &ShaderSetTest::constructCopy, + &ShaderSetTest::constructMove, + + &ShaderSetTest::addShader, + &ShaderSetTest::addShaderEntrypointCopy, + &ShaderSetTest::addShaderEntrypointCopyReallocation, + &ShaderSetTest::addShaderSpecializations, + &ShaderSetTest::addShaderSpecializationsReallocation, + &ShaderSetTest::addShaderOwnershipTransfer, + &ShaderSetTest::addShaderTooManyStages}); +} + +using namespace Containers::Literals; + +void ShaderSetTest::specializationConstructInt() { + ShaderSpecialization spec{42, 133785}; + CORRADE_COMPARE(spec.id(), 42); + CORRADE_COMPARE(spec.data(), 133785); +} + +void ShaderSetTest::specializationConstructFloat() { + ShaderSpecialization spec{42, 4.32f}; + CORRADE_COMPARE(spec.id(), 42); + UnsignedInt data = spec.data(); + CORRADE_COMPARE(reinterpret_cast(data), 4.32f); +} + +void ShaderSetTest::specializationConstructBool() { + ShaderSpecialization spec{42, true}; + CORRADE_COMPARE(spec.id(), 42); + CORRADE_COMPARE(spec.data(), 1); +} + +void ShaderSetTest::construct() { + ShaderSet set; + CORRADE_VERIFY(set.stages().empty()); + + /* The actually meaningful test done in addShader() and friends */ +} + +void ShaderSetTest::constructCopy() { + CORRADE_VERIFY(!std::is_copy_constructible{}); + CORRADE_VERIFY(!std::is_copy_assignable{}); +} + +void ShaderSetTest::constructMove() { + ShaderSet c; + + { + ShaderSet a; + a.addShader(ShaderStage::Geometry, reinterpret_cast(0xdeadbeef), "main!"_s.except(1), { + {42, 1.15f} + }); + CORRADE_COMPARE(a.stages().size(), 1); + CORRADE_COMPARE(a.stages()[0].pName, "main"_s); + CORRADE_VERIFY(a.stages()[0].pSpecializationInfo); + CORRADE_COMPARE(a.stages()[0].pSpecializationInfo->mapEntryCount, 1); + CORRADE_VERIFY(a.stages()[0].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(a.stages()[0].pSpecializationInfo->pMapEntries[0].constantID, 42); + CORRADE_VERIFY(a.stages()[0].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(a.stages()[0].pSpecializationInfo->pData), 1.15f); + + ShaderSet b = std::move(a); + CORRADE_VERIFY(a.stages().empty()); + CORRADE_COMPARE(b.stages().size(), 1); + CORRADE_COMPARE(b.stages()[0].pName, "main"_s); + CORRADE_VERIFY(b.stages()[0].pSpecializationInfo); + CORRADE_COMPARE(b.stages()[0].pSpecializationInfo->mapEntryCount, 1); + CORRADE_VERIFY(b.stages()[0].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(b.stages()[0].pSpecializationInfo->pMapEntries[0].constantID, 42); + CORRADE_VERIFY(b.stages()[0].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(b.stages()[0].pSpecializationInfo->pData), 1.15f); + + c = std::move(b); + CORRADE_VERIFY(b.stages().empty()); + } + + /* Doing this in outer scope to verify that the internal state pointer got + properly transferred as well and we're not referencing destroyed data */ + CORRADE_COMPARE(c.stages().size(), 1); + CORRADE_COMPARE(c.stages()[0].pName, "main"_s); + CORRADE_VERIFY(c.stages()[0].pSpecializationInfo); + CORRADE_COMPARE(c.stages()[0].pSpecializationInfo->mapEntryCount, 1); + CORRADE_VERIFY(c.stages()[0].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(c.stages()[0].pSpecializationInfo->pMapEntries[0].constantID, 42); + CORRADE_VERIFY(c.stages()[0].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(c.stages()[0].pSpecializationInfo->pData), 1.15f); +} + +void ShaderSetTest::addShader() { + ShaderSet set; + Containers::StringView entrypoint = "enterHere"_s; + set.addShader(ShaderStage::Geometry, reinterpret_cast(0xdeadbeef), entrypoint); + CORRADE_COMPARE(set.stages().size(), 1); + CORRADE_COMPARE(set.stages()[0].stage, VK_SHADER_STAGE_GEOMETRY_BIT); + CORRADE_COMPARE(set.stages()[0].module, reinterpret_cast(0xdeadbeef)); + /* The name should not be copied if it's null-terminated and global */ + CORRADE_COMPARE(set.stages()[0].pName, entrypoint.data()); + CORRADE_VERIFY(!set.stages()[0].pSpecializationInfo); +} + +void ShaderSetTest::addShaderEntrypointCopy() { + ShaderSet set; + Containers::StringView entrypoint = "enterHere!"_s; + set.addShader(ShaderStage{}, {}, entrypoint.except(1)); + CORRADE_COMPARE(set.stages().size(), 1); + CORRADE_VERIFY(set.stages()[0].pName != entrypoint.data()); + CORRADE_COMPARE(set.stages()[0].pName, "enterHere"_s); +} + +void ShaderSetTest::addShaderEntrypointCopyReallocation() { + ShaderSet set; + Containers::StringView entrypoint = "enterHere!"_s; + set.addShader(ShaderStage{}, {}, entrypoint.except(1)); + CORRADE_COMPARE(set.stages().size(), 1); + CORRADE_VERIFY(set.stages()[0].pName != entrypoint.data()); + CORRADE_COMPARE(set.stages()[0].pName, "enterHere"_s); + + /* After adding more stages, the original name pointers should be preserved + -- no SSO strings getting reallocated but instead all copies allocated */ + const char* prev = set.stages()[0].pName; + set.addShader(ShaderStage{}, {}, "huajajajaja"_s.prefix(5)) + .addShader(ShaderStage{}, {}, "ablablablab"_s.prefix(5)); + CORRADE_COMPARE(set.stages().size(), 3); + CORRADE_COMPARE(set.stages()[0].pName, prev); + CORRADE_COMPARE(set.stages()[0].pName, "enterHere"_s); + CORRADE_COMPARE(set.stages()[1].pName, "huaja"_s); + CORRADE_COMPARE(set.stages()[2].pName, "ablab"_s); +} + +void ShaderSetTest::addShaderSpecializations() { + ShaderSet set; + set.addShader(ShaderStage{}, {}, "main"_s, { + {42, 1.15f}, + {1, true}, + {13, -227} + }); + CORRADE_COMPARE(set.stages().size(), 1); + CORRADE_COMPARE(set.stages()[0].pName, "main"_s); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->mapEntryCount, 3); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[0].constantID, 42); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[0].offset, 0); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[0].size, 4); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[1].constantID, 1); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[1].offset, 4); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[1].size, 4); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[2].constantID, 13); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[2].offset, 8); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[2].size, 4); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->dataSize, 4*3); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(set.stages()[0].pSpecializationInfo->pData), 1.15f); + CORRADE_COMPARE(*(reinterpret_cast(set.stages()[0].pSpecializationInfo->pData) + 1), 1); + CORRADE_COMPARE(*(reinterpret_cast(set.stages()[0].pSpecializationInfo->pData) + 2), -227); +} + +void ShaderSetTest::addShaderSpecializationsReallocation() { + ShaderSet set; + set.addShader(ShaderStage{}, {}, "main"_s, { + {42, 1.15f} + }); + CORRADE_COMPARE(set.stages().size(), 1); + CORRADE_COMPARE(set.stages()[0].pName, "main"_s); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->mapEntryCount, 1); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[0].constantID, 42); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(set.stages()[0].pSpecializationInfo->pData), 1.15f); + + const void* prevData = set.stages()[0].pSpecializationInfo->pData; + const VkSpecializationMapEntry* prev = set.stages()[0].pSpecializationInfo->pMapEntries; + + set.addShader(ShaderStage{}, {}, "well"_s, { + {1, true}, + {13, -227} + }); + CORRADE_COMPARE(set.stages().size(), 2); + + /* Don't fail in this case -- the allocator is expected to be smarter than + this test */ + if(set.stages()[0].pSpecializationInfo->pData == prevData) + Warning{} << "No data reallocation happened."; + if(set.stages()[0].pSpecializationInfo->pMapEntries == prev) + Warning{} << "No entry map reallocation happened."; + + /* Same as above, everything should be kept */ + CORRADE_COMPARE(set.stages()[0].pName, "main"_s); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->mapEntryCount, 1); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[0].constantID, 42); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(set.stages()[0].pSpecializationInfo->pData), 1.15f); + + /* New entries */ + CORRADE_COMPARE(set.stages()[1].pName, "well"_s); + CORRADE_VERIFY(set.stages()[1].pSpecializationInfo); + CORRADE_COMPARE(set.stages()[1].pSpecializationInfo->mapEntryCount, 2); + CORRADE_VERIFY(set.stages()[1].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(set.stages()[1].pSpecializationInfo->pMapEntries[0].constantID, 1); + CORRADE_COMPARE(set.stages()[1].pSpecializationInfo->pMapEntries[1].constantID, 13); + CORRADE_VERIFY(set.stages()[1].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(set.stages()[1].pSpecializationInfo->pData), 1); + CORRADE_COMPARE(*(reinterpret_cast(set.stages()[1].pSpecializationInfo->pData) + 1), -227); +} + +void ShaderSetTest::addShaderOwnershipTransfer() { + Device device{NoCreate}; + auto shader = Shader::wrap(device, reinterpret_cast(0xdeadbeef)); + + ShaderSet set; + set.addShader(ShaderStage::RayAnyHit, std::move(shader), "main"_s, { + {13, 1227} + }); + + CORRADE_COMPARE(set.stages()[0].stage, VK_SHADER_STAGE_ANY_HIT_BIT_KHR); + CORRADE_COMPARE(set.stages()[0].pName, "main"_s); + CORRADE_COMPARE(set.stages()[0].module, reinterpret_cast(0xdeadbeef)); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->mapEntryCount, 1); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pMapEntries); + CORRADE_COMPARE(set.stages()[0].pSpecializationInfo->pMapEntries[0].constantID, 13); + CORRADE_VERIFY(set.stages()[0].pSpecializationInfo->pData); + CORRADE_COMPARE(*reinterpret_cast(set.stages()[0].pSpecializationInfo->pData), 1227); + + /* The shader should be moved away */ + CORRADE_VERIFY(!shader.handle()); +} + +void ShaderSetTest::addShaderTooManyStages() { + #ifdef CORRADE_NO_ASSERT + CORRADE_SKIP("CORRADE_NO_ASSERT defined, can't test assertions"); + #endif + + ShaderSet set; + + set.addShader({}, {}, {}) + .addShader({}, {}, {}) + .addShader({}, {}, {}) + .addShader({}, {}, {}) + .addShader({}, {}, {}) + .addShader({}, {}, {}); + + std::ostringstream out; + Error redirectError{&out}; + set.addShader({}, {}, {}); + CORRADE_COMPARE(out.str(), "Vk::ShaderSet::addShader(): too many stages, expected at most 6\n"); +} + +}}}} + +CORRADE_TEST_MAIN(Magnum::Vk::Test::ShaderSetTest) diff --git a/src/Magnum/Vk/Vk.h b/src/Magnum/Vk/Vk.h index eb339a740..7f93d1b61 100644 --- a/src/Magnum/Vk/Vk.h +++ b/src/Magnum/Vk/Vk.h @@ -113,6 +113,9 @@ class RenderPassCreateInfo; enum class Result: Int; class Shader; class ShaderCreateInfo; +class ShaderSet; +/* ShaderSpecialization used only directly with ShaderSet */ +enum class ShaderStage: UnsignedInt; class SubmitInfo; class SubpassBeginInfo; class SubpassEndInfo;