You've already forked godot
mirror of
https://github.com/godotengine/godot.git
synced 2025-11-04 12:00:25 +00:00
Add shader baker to project exporter.
Metal Support contributed by Migeran (https://migeran.com) and Stuart Carnie. Co-authored-by: Stuart Carnie <stuart.carnie@gmail.com> Co-authored-by: Gergely Kis <gergely.kis@migeran.com>
This commit is contained in:
@@ -32,9 +32,12 @@
|
||||
|
||||
#include "core/config/project_settings.h"
|
||||
#include "core/io/marshalls.h"
|
||||
#include "thirdparty/misc/smolv.h"
|
||||
#include "vulkan_hooks.h"
|
||||
|
||||
#if RENDERING_SHADER_CONTAINER_VULKAN_SMOLV
|
||||
#include "thirdparty/misc/smolv.h"
|
||||
#endif
|
||||
|
||||
#if defined(ANDROID_ENABLED)
|
||||
#include "platform/android/java_godot_wrapper.h"
|
||||
#include "platform/android/os_android.h"
|
||||
@@ -3550,260 +3553,34 @@ static VkShaderStageFlagBits RD_STAGE_TO_VK_SHADER_STAGE_BITS[RDD::SHADER_STAGE_
|
||||
VK_SHADER_STAGE_COMPUTE_BIT,
|
||||
};
|
||||
|
||||
String RenderingDeviceDriverVulkan::shader_get_binary_cache_key() {
|
||||
return "Vulkan-SV" + uitos(ShaderBinary::VERSION);
|
||||
}
|
||||
|
||||
Vector<uint8_t> RenderingDeviceDriverVulkan::shader_compile_binary_from_spirv(VectorView<ShaderStageSPIRVData> p_spirv, const String &p_shader_name) {
|
||||
ShaderReflection shader_refl;
|
||||
if (_reflect_spirv(p_spirv, shader_refl) != OK) {
|
||||
return Vector<uint8_t>();
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V_MSG((uint32_t)shader_refl.uniform_sets.size() > physical_device_properties.limits.maxBoundDescriptorSets, Vector<uint8_t>(),
|
||||
"Number of uniform sets is larger than what is supported by the hardware (" + itos(physical_device_properties.limits.maxBoundDescriptorSets) + ").");
|
||||
|
||||
// Collect reflection data into binary data.
|
||||
ShaderBinary::Data binary_data;
|
||||
Vector<Vector<ShaderBinary::DataBinding>> uniforms; // Set bindings.
|
||||
Vector<ShaderBinary::SpecializationConstant> specialization_constants;
|
||||
{
|
||||
binary_data.vertex_input_mask = shader_refl.vertex_input_mask;
|
||||
binary_data.fragment_output_mask = shader_refl.fragment_output_mask;
|
||||
binary_data.specialization_constants_count = shader_refl.specialization_constants.size();
|
||||
binary_data.is_compute = shader_refl.is_compute;
|
||||
binary_data.compute_local_size[0] = shader_refl.compute_local_size[0];
|
||||
binary_data.compute_local_size[1] = shader_refl.compute_local_size[1];
|
||||
binary_data.compute_local_size[2] = shader_refl.compute_local_size[2];
|
||||
binary_data.set_count = shader_refl.uniform_sets.size();
|
||||
binary_data.push_constant_size = shader_refl.push_constant_size;
|
||||
for (uint32_t i = 0; i < SHADER_STAGE_MAX; i++) {
|
||||
if (shader_refl.push_constant_stages.has_flag((ShaderStage)(1 << i))) {
|
||||
binary_data.vk_push_constant_stages_mask |= RD_STAGE_TO_VK_SHADER_STAGE_BITS[i];
|
||||
}
|
||||
}
|
||||
|
||||
for (const Vector<ShaderUniform> &set_refl : shader_refl.uniform_sets) {
|
||||
Vector<ShaderBinary::DataBinding> set_bindings;
|
||||
for (const ShaderUniform &uniform_refl : set_refl) {
|
||||
ShaderBinary::DataBinding binding;
|
||||
binding.type = (uint32_t)uniform_refl.type;
|
||||
binding.binding = uniform_refl.binding;
|
||||
binding.stages = (uint32_t)uniform_refl.stages;
|
||||
binding.length = uniform_refl.length;
|
||||
binding.writable = (uint32_t)uniform_refl.writable;
|
||||
set_bindings.push_back(binding);
|
||||
}
|
||||
uniforms.push_back(set_bindings);
|
||||
}
|
||||
|
||||
for (const ShaderSpecializationConstant &refl_sc : shader_refl.specialization_constants) {
|
||||
ShaderBinary::SpecializationConstant spec_constant;
|
||||
spec_constant.type = (uint32_t)refl_sc.type;
|
||||
spec_constant.constant_id = refl_sc.constant_id;
|
||||
spec_constant.int_value = refl_sc.int_value;
|
||||
spec_constant.stage_flags = (uint32_t)refl_sc.stages;
|
||||
specialization_constants.push_back(spec_constant);
|
||||
RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_container(const Ref<RenderingShaderContainer> &p_shader_container, const Vector<ImmutableSampler> &p_immutable_samplers) {
|
||||
ShaderReflection shader_refl = p_shader_container->get_shader_reflection();
|
||||
ShaderInfo shader_info;
|
||||
for (uint32_t i = 0; i < SHADER_STAGE_MAX; i++) {
|
||||
if (shader_refl.push_constant_stages.has_flag((ShaderStage)(1 << i))) {
|
||||
shader_info.vk_push_constant_stages |= RD_STAGE_TO_VK_SHADER_STAGE_BITS[i];
|
||||
}
|
||||
}
|
||||
|
||||
Vector<Vector<uint8_t>> compressed_stages;
|
||||
Vector<uint32_t> smolv_size;
|
||||
Vector<uint32_t> zstd_size; // If 0, zstd not used.
|
||||
|
||||
uint32_t stages_binary_size = 0;
|
||||
|
||||
bool strip_debug = false;
|
||||
|
||||
for (uint32_t i = 0; i < p_spirv.size(); i++) {
|
||||
smolv::ByteArray smolv;
|
||||
if (!smolv::Encode(p_spirv[i].spirv.ptr(), p_spirv[i].spirv.size(), smolv, strip_debug ? smolv::kEncodeFlagStripDebugInfo : 0)) {
|
||||
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Error compressing shader stage :" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]));
|
||||
} else {
|
||||
smolv_size.push_back(smolv.size());
|
||||
{ // zstd.
|
||||
Vector<uint8_t> zstd;
|
||||
zstd.resize(Compression::get_max_compressed_buffer_size(smolv.size(), Compression::MODE_ZSTD));
|
||||
int dst_size = Compression::compress(zstd.ptrw(), &smolv[0], smolv.size(), Compression::MODE_ZSTD);
|
||||
|
||||
if (dst_size > 0 && (uint32_t)dst_size < smolv.size()) {
|
||||
zstd_size.push_back(dst_size);
|
||||
zstd.resize(dst_size);
|
||||
compressed_stages.push_back(zstd);
|
||||
} else {
|
||||
Vector<uint8_t> smv;
|
||||
smv.resize(smolv.size());
|
||||
memcpy(smv.ptrw(), &smolv[0], smolv.size());
|
||||
zstd_size.push_back(0); // Not using zstd.
|
||||
compressed_stages.push_back(smv);
|
||||
}
|
||||
}
|
||||
}
|
||||
uint32_t s = compressed_stages[i].size();
|
||||
stages_binary_size += STEPIFY(s, 4);
|
||||
}
|
||||
|
||||
binary_data.specialization_constants_count = specialization_constants.size();
|
||||
binary_data.set_count = uniforms.size();
|
||||
binary_data.stage_count = p_spirv.size();
|
||||
|
||||
CharString shader_name_utf = p_shader_name.utf8();
|
||||
|
||||
binary_data.shader_name_len = shader_name_utf.length();
|
||||
|
||||
uint32_t total_size = sizeof(uint32_t) * 4; // Header + version + pad + main datasize;.
|
||||
total_size += sizeof(ShaderBinary::Data);
|
||||
|
||||
total_size += STEPIFY(binary_data.shader_name_len, 4);
|
||||
|
||||
for (int i = 0; i < uniforms.size(); i++) {
|
||||
total_size += sizeof(uint32_t);
|
||||
total_size += uniforms[i].size() * sizeof(ShaderBinary::DataBinding);
|
||||
}
|
||||
|
||||
total_size += sizeof(ShaderBinary::SpecializationConstant) * specialization_constants.size();
|
||||
|
||||
total_size += compressed_stages.size() * sizeof(uint32_t) * 3; // Sizes.
|
||||
total_size += stages_binary_size;
|
||||
|
||||
Vector<uint8_t> ret;
|
||||
ret.resize(total_size);
|
||||
{
|
||||
uint32_t offset = 0;
|
||||
uint8_t *binptr = ret.ptrw();
|
||||
binptr[0] = 'G';
|
||||
binptr[1] = 'S';
|
||||
binptr[2] = 'B';
|
||||
binptr[3] = 'D'; // Godot Shader Binary Data.
|
||||
offset += 4;
|
||||
encode_uint32(ShaderBinary::VERSION, binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
encode_uint32(sizeof(ShaderBinary::Data), binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
encode_uint32(0, binptr + offset); // Pad to align ShaderBinary::Data to 8 bytes.
|
||||
offset += sizeof(uint32_t);
|
||||
memcpy(binptr + offset, &binary_data, sizeof(ShaderBinary::Data));
|
||||
offset += sizeof(ShaderBinary::Data);
|
||||
|
||||
#define ADVANCE_OFFSET_WITH_ALIGNMENT(m_bytes) \
|
||||
{ \
|
||||
offset += m_bytes; \
|
||||
uint32_t padding = STEPIFY(m_bytes, 4) - m_bytes; \
|
||||
memset(binptr + offset, 0, padding); /* Avoid garbage data. */ \
|
||||
offset += padding; \
|
||||
}
|
||||
|
||||
if (binary_data.shader_name_len > 0) {
|
||||
memcpy(binptr + offset, shader_name_utf.ptr(), binary_data.shader_name_len);
|
||||
ADVANCE_OFFSET_WITH_ALIGNMENT(binary_data.shader_name_len);
|
||||
}
|
||||
|
||||
for (int i = 0; i < uniforms.size(); i++) {
|
||||
int count = uniforms[i].size();
|
||||
encode_uint32(count, binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
if (count > 0) {
|
||||
memcpy(binptr + offset, uniforms[i].ptr(), sizeof(ShaderBinary::DataBinding) * count);
|
||||
offset += sizeof(ShaderBinary::DataBinding) * count;
|
||||
}
|
||||
}
|
||||
|
||||
if (specialization_constants.size()) {
|
||||
memcpy(binptr + offset, specialization_constants.ptr(), sizeof(ShaderBinary::SpecializationConstant) * specialization_constants.size());
|
||||
offset += sizeof(ShaderBinary::SpecializationConstant) * specialization_constants.size();
|
||||
}
|
||||
|
||||
for (int i = 0; i < compressed_stages.size(); i++) {
|
||||
encode_uint32(p_spirv[i].shader_stage, binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
encode_uint32(smolv_size[i], binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
encode_uint32(zstd_size[i], binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
memcpy(binptr + offset, compressed_stages[i].ptr(), compressed_stages[i].size());
|
||||
ADVANCE_OFFSET_WITH_ALIGNMENT(compressed_stages[i].size());
|
||||
}
|
||||
|
||||
DEV_ASSERT(offset == (uint32_t)ret.size());
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, ShaderDescription &r_shader_desc, String &r_name, const Vector<ImmutableSampler> &p_immutable_samplers) {
|
||||
r_shader_desc = {}; // Driver-agnostic.
|
||||
ShaderInfo shader_info; // Driver-specific.
|
||||
|
||||
const uint8_t *binptr = p_shader_binary.ptr();
|
||||
uint32_t binsize = p_shader_binary.size();
|
||||
|
||||
uint32_t read_offset = 0;
|
||||
|
||||
// Consistency check.
|
||||
ERR_FAIL_COND_V(binsize < sizeof(uint32_t) * 4 + sizeof(ShaderBinary::Data), ShaderID());
|
||||
ERR_FAIL_COND_V(binptr[0] != 'G' || binptr[1] != 'S' || binptr[2] != 'B' || binptr[3] != 'D', ShaderID());
|
||||
|
||||
uint32_t bin_version = decode_uint32(binptr + 4);
|
||||
ERR_FAIL_COND_V(bin_version != ShaderBinary::VERSION, ShaderID());
|
||||
|
||||
uint32_t bin_data_size = decode_uint32(binptr + 8);
|
||||
|
||||
// 16, not 12, to skip alignment padding.
|
||||
const ShaderBinary::Data &binary_data = *(reinterpret_cast<const ShaderBinary::Data *>(binptr + 16));
|
||||
|
||||
r_shader_desc.push_constant_size = binary_data.push_constant_size;
|
||||
shader_info.vk_push_constant_stages = binary_data.vk_push_constant_stages_mask;
|
||||
|
||||
r_shader_desc.vertex_input_mask = binary_data.vertex_input_mask;
|
||||
r_shader_desc.fragment_output_mask = binary_data.fragment_output_mask;
|
||||
|
||||
r_shader_desc.is_compute = binary_data.is_compute;
|
||||
r_shader_desc.compute_local_size[0] = binary_data.compute_local_size[0];
|
||||
r_shader_desc.compute_local_size[1] = binary_data.compute_local_size[1];
|
||||
r_shader_desc.compute_local_size[2] = binary_data.compute_local_size[2];
|
||||
|
||||
read_offset += sizeof(uint32_t) * 4 + bin_data_size;
|
||||
|
||||
if (binary_data.shader_name_len) {
|
||||
r_name.clear();
|
||||
r_name.append_utf8((const char *)(binptr + read_offset), binary_data.shader_name_len);
|
||||
read_offset += STEPIFY(binary_data.shader_name_len, 4);
|
||||
}
|
||||
|
||||
// Set bindings.
|
||||
Vector<Vector<VkDescriptorSetLayoutBinding>> vk_set_bindings;
|
||||
|
||||
r_shader_desc.uniform_sets.resize(binary_data.set_count);
|
||||
vk_set_bindings.resize(binary_data.set_count);
|
||||
|
||||
for (uint32_t i = 0; i < binary_data.set_count; i++) {
|
||||
ERR_FAIL_COND_V(read_offset + sizeof(uint32_t) >= binsize, ShaderID());
|
||||
uint32_t set_count = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
const ShaderBinary::DataBinding *set_ptr = reinterpret_cast<const ShaderBinary::DataBinding *>(binptr + read_offset);
|
||||
uint32_t set_size = set_count * sizeof(ShaderBinary::DataBinding);
|
||||
ERR_FAIL_COND_V(read_offset + set_size >= binsize, ShaderID());
|
||||
|
||||
for (uint32_t j = 0; j < set_count; j++) {
|
||||
ShaderUniform info;
|
||||
info.type = UniformType(set_ptr[j].type);
|
||||
info.writable = set_ptr[j].writable;
|
||||
info.length = set_ptr[j].length;
|
||||
info.binding = set_ptr[j].binding;
|
||||
info.stages = set_ptr[j].stages;
|
||||
|
||||
vk_set_bindings.resize(shader_refl.uniform_sets.size());
|
||||
for (uint32_t i = 0; i < shader_refl.uniform_sets.size(); i++) {
|
||||
for (uint32_t j = 0; j < shader_refl.uniform_sets[i].size(); j++) {
|
||||
const ShaderUniform &uniform = shader_refl.uniform_sets[i][j];
|
||||
VkDescriptorSetLayoutBinding layout_binding = {};
|
||||
layout_binding.binding = set_ptr[j].binding;
|
||||
layout_binding.binding = uniform.binding;
|
||||
layout_binding.descriptorCount = 1;
|
||||
for (uint32_t k = 0; k < SHADER_STAGE_MAX; k++) {
|
||||
if ((set_ptr[j].stages & (1 << k))) {
|
||||
if ((uniform.stages.has_flag(ShaderStage(1U << k)))) {
|
||||
layout_binding.stageFlags |= RD_STAGE_TO_VK_SHADER_STAGE_BITS[k];
|
||||
}
|
||||
}
|
||||
|
||||
switch (info.type) {
|
||||
switch (uniform.type) {
|
||||
case UNIFORM_TYPE_SAMPLER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
layout_binding.descriptorCount = uniform.length;
|
||||
// Immutable samplers: here they get set in the layoutbinding, given that they will not be changed later.
|
||||
int immutable_bind_index = -1;
|
||||
if (immutable_samplers_enabled && p_immutable_samplers.size() > 0) {
|
||||
@@ -3820,19 +3597,19 @@ RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_bytecode(const Vec
|
||||
} break;
|
||||
case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
layout_binding.descriptorCount = uniform.length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_TEXTURE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
layout_binding.descriptorCount = uniform.length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_IMAGE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
layout_binding.descriptorCount = uniform.length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_TEXTURE_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
layout_binding.descriptorCount = uniform.length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_IMAGE_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
|
||||
@@ -3851,104 +3628,72 @@ RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_bytecode(const Vec
|
||||
}
|
||||
}
|
||||
|
||||
r_shader_desc.uniform_sets.write[i].push_back(info);
|
||||
vk_set_bindings.write[i].push_back(layout_binding);
|
||||
}
|
||||
|
||||
read_offset += set_size;
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V(read_offset + binary_data.specialization_constants_count * sizeof(ShaderBinary::SpecializationConstant) >= binsize, ShaderID());
|
||||
|
||||
r_shader_desc.specialization_constants.resize(binary_data.specialization_constants_count);
|
||||
for (uint32_t i = 0; i < binary_data.specialization_constants_count; i++) {
|
||||
const ShaderBinary::SpecializationConstant &src_sc = *(reinterpret_cast<const ShaderBinary::SpecializationConstant *>(binptr + read_offset));
|
||||
ShaderSpecializationConstant sc;
|
||||
sc.type = PipelineSpecializationConstantType(src_sc.type);
|
||||
sc.constant_id = src_sc.constant_id;
|
||||
sc.int_value = src_sc.int_value;
|
||||
sc.stages = src_sc.stage_flags;
|
||||
r_shader_desc.specialization_constants.write[i] = sc;
|
||||
|
||||
read_offset += sizeof(ShaderBinary::SpecializationConstant);
|
||||
}
|
||||
|
||||
Vector<Vector<uint8_t>> stages_spirv;
|
||||
stages_spirv.resize(binary_data.stage_count);
|
||||
r_shader_desc.stages.resize(binary_data.stage_count);
|
||||
|
||||
for (uint32_t i = 0; i < binary_data.stage_count; i++) {
|
||||
ERR_FAIL_COND_V(read_offset + sizeof(uint32_t) * 3 >= binsize, ShaderID());
|
||||
|
||||
uint32_t stage = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
uint32_t smolv_size = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
uint32_t zstd_size = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
|
||||
uint32_t buf_size = (zstd_size > 0) ? zstd_size : smolv_size;
|
||||
|
||||
Vector<uint8_t> smolv;
|
||||
const uint8_t *src_smolv = nullptr;
|
||||
|
||||
if (zstd_size > 0) {
|
||||
// Decompress to smolv.
|
||||
smolv.resize(smolv_size);
|
||||
int dec_smolv_size = Compression::decompress(smolv.ptrw(), smolv.size(), binptr + read_offset, zstd_size, Compression::MODE_ZSTD);
|
||||
ERR_FAIL_COND_V(dec_smolv_size != (int32_t)smolv_size, ShaderID());
|
||||
src_smolv = smolv.ptr();
|
||||
} else {
|
||||
src_smolv = binptr + read_offset;
|
||||
}
|
||||
|
||||
Vector<uint8_t> &spirv = stages_spirv.ptrw()[i];
|
||||
uint32_t spirv_size = smolv::GetDecodedBufferSize(src_smolv, smolv_size);
|
||||
spirv.resize(spirv_size);
|
||||
if (!smolv::Decode(src_smolv, smolv_size, spirv.ptrw(), spirv_size)) {
|
||||
ERR_FAIL_V_MSG(ShaderID(), "Malformed smolv input uncompressing shader stage:" + String(SHADER_STAGE_NAMES[stage]));
|
||||
}
|
||||
|
||||
r_shader_desc.stages.set(i, ShaderStage(stage));
|
||||
|
||||
buf_size = STEPIFY(buf_size, 4);
|
||||
read_offset += buf_size;
|
||||
ERR_FAIL_COND_V(read_offset > binsize, ShaderID());
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V(read_offset != binsize, ShaderID());
|
||||
|
||||
// Modules.
|
||||
|
||||
VkResult res;
|
||||
String error_text;
|
||||
Vector<uint8_t> decompressed_code;
|
||||
Vector<uint8_t> decoded_spirv;
|
||||
VkShaderModule vk_module;
|
||||
for (int i = 0; i < shader_refl.stages_vector.size(); i++) {
|
||||
const RenderingShaderContainer::Shader &shader = p_shader_container->shaders[i];
|
||||
#if RENDERING_SHADER_CONTAINER_VULKAN_COMPRESSION
|
||||
bool requires_decompression = (shader.code_decompressed_size > 0);
|
||||
if (requires_decompression) {
|
||||
decompressed_code.resize(shader.code_decompressed_size);
|
||||
bool decompressed = p_shader_container->decompress_code(shader.code_compressed_bytes.ptr(), shader.code_compressed_bytes.size(), shader.code_compression_flags, decompressed_code.ptrw(), decompressed_code.size());
|
||||
if (!decompressed) {
|
||||
error_text = vformat("Failed to decompress code on shader stage %s.", String(SHADER_STAGE_NAMES[shader_refl.stages_vector[i]]));
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
bool requires_decompression = false;
|
||||
#endif
|
||||
|
||||
const uint8_t *smolv_input = requires_decompression ? decompressed_code.ptr() : shader.code_compressed_bytes.ptr();
|
||||
uint32_t smolv_input_size = requires_decompression ? decompressed_code.size() : shader.code_compressed_bytes.size();
|
||||
#if RENDERING_SHADER_CONTAINER_VULKAN_SMOLV
|
||||
decoded_spirv.resize(smolv::GetDecodedBufferSize(smolv_input, smolv_input_size));
|
||||
if (decoded_spirv.is_empty()) {
|
||||
error_text = vformat("Malformed smolv input on shader stage %s.", String(SHADER_STAGE_NAMES[shader_refl.stages_vector[i]]));
|
||||
break;
|
||||
}
|
||||
|
||||
if (!smolv::Decode(smolv_input, smolv_input_size, decoded_spirv.ptrw(), decoded_spirv.size())) {
|
||||
error_text = vformat("Malformed smolv input on shader stage %s.", String(SHADER_STAGE_NAMES[shader_refl.stages_vector[i]]));
|
||||
break;
|
||||
}
|
||||
#else
|
||||
decoded_spirv.resize(smolv_input_size);
|
||||
memcpy(decoded_spirv.ptrw(), smolv_input, decoded_spirv.size());
|
||||
#endif
|
||||
|
||||
for (int i = 0; i < r_shader_desc.stages.size(); i++) {
|
||||
VkShaderModuleCreateInfo shader_module_create_info = {};
|
||||
shader_module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
|
||||
shader_module_create_info.codeSize = stages_spirv[i].size();
|
||||
shader_module_create_info.pCode = (const uint32_t *)stages_spirv[i].ptr();
|
||||
shader_module_create_info.codeSize = decoded_spirv.size();
|
||||
shader_module_create_info.pCode = (const uint32_t *)(decoded_spirv.ptr());
|
||||
|
||||
VkShaderModule vk_module = VK_NULL_HANDLE;
|
||||
VkResult res = vkCreateShaderModule(vk_device, &shader_module_create_info, VKC::get_allocation_callbacks(VK_OBJECT_TYPE_SHADER_MODULE), &vk_module);
|
||||
if (res) {
|
||||
error_text = "Error (" + itos(res) + ") creating shader module for stage: " + String(SHADER_STAGE_NAMES[r_shader_desc.stages[i]]);
|
||||
res = vkCreateShaderModule(vk_device, &shader_module_create_info, VKC::get_allocation_callbacks(VK_OBJECT_TYPE_SHADER_MODULE), &vk_module);
|
||||
if (res != VK_SUCCESS) {
|
||||
error_text = vformat("Error (%d) creating module for shader stage %s.", res, String(SHADER_STAGE_NAMES[shader_refl.stages_vector[i]]));
|
||||
break;
|
||||
}
|
||||
|
||||
VkPipelineShaderStageCreateInfo create_info = {};
|
||||
create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
create_info.stage = RD_STAGE_TO_VK_SHADER_STAGE_BITS[r_shader_desc.stages[i]];
|
||||
create_info.stage = RD_STAGE_TO_VK_SHADER_STAGE_BITS[shader_refl.stages_vector[i]];
|
||||
create_info.module = vk_module;
|
||||
create_info.pName = "main";
|
||||
|
||||
shader_info.vk_stages_create_info.push_back(create_info);
|
||||
}
|
||||
|
||||
// Descriptor sets.
|
||||
|
||||
if (error_text.is_empty()) {
|
||||
DEV_ASSERT((uint32_t)vk_set_bindings.size() == binary_data.set_count);
|
||||
for (uint32_t i = 0; i < binary_data.set_count; i++) {
|
||||
for (uint32_t i = 0; i < shader_refl.uniform_sets.size(); i++) {
|
||||
// Empty ones are fine if they were not used according to spec (binding count will be 0).
|
||||
VkDescriptorSetLayoutCreateInfo layout_create_info = {};
|
||||
layout_create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
||||
@@ -3956,9 +3701,9 @@ RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_bytecode(const Vec
|
||||
layout_create_info.pBindings = vk_set_bindings[i].ptr();
|
||||
|
||||
VkDescriptorSetLayout layout = VK_NULL_HANDLE;
|
||||
VkResult res = vkCreateDescriptorSetLayout(vk_device, &layout_create_info, VKC::get_allocation_callbacks(VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT), &layout);
|
||||
res = vkCreateDescriptorSetLayout(vk_device, &layout_create_info, VKC::get_allocation_callbacks(VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT), &layout);
|
||||
if (res) {
|
||||
error_text = "Error (" + itos(res) + ") creating descriptor set layout for set " + itos(i);
|
||||
error_text = vformat("Error (%d) creating descriptor set layout for set %d.", res, i);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -3968,24 +3713,23 @@ RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_bytecode(const Vec
|
||||
|
||||
if (error_text.is_empty()) {
|
||||
// Pipeline layout.
|
||||
|
||||
VkPipelineLayoutCreateInfo pipeline_layout_create_info = {};
|
||||
pipeline_layout_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
|
||||
pipeline_layout_create_info.setLayoutCount = binary_data.set_count;
|
||||
pipeline_layout_create_info.setLayoutCount = shader_info.vk_descriptor_set_layouts.size();
|
||||
pipeline_layout_create_info.pSetLayouts = shader_info.vk_descriptor_set_layouts.ptr();
|
||||
|
||||
if (binary_data.push_constant_size) {
|
||||
if (shader_refl.push_constant_size > 0) {
|
||||
VkPushConstantRange *push_constant_range = ALLOCA_SINGLE(VkPushConstantRange);
|
||||
*push_constant_range = {};
|
||||
push_constant_range->stageFlags = binary_data.vk_push_constant_stages_mask;
|
||||
push_constant_range->size = binary_data.push_constant_size;
|
||||
push_constant_range->stageFlags = shader_info.vk_push_constant_stages;
|
||||
push_constant_range->size = shader_refl.push_constant_size;
|
||||
pipeline_layout_create_info.pushConstantRangeCount = 1;
|
||||
pipeline_layout_create_info.pPushConstantRanges = push_constant_range;
|
||||
}
|
||||
|
||||
VkResult err = vkCreatePipelineLayout(vk_device, &pipeline_layout_create_info, VKC::get_allocation_callbacks(VK_OBJECT_TYPE_PIPELINE_LAYOUT), &shader_info.vk_pipeline_layout);
|
||||
if (err) {
|
||||
error_text = "Error (" + itos(err) + ") creating pipeline layout.";
|
||||
res = vkCreatePipelineLayout(vk_device, &pipeline_layout_create_info, VKC::get_allocation_callbacks(VK_OBJECT_TYPE_PIPELINE_LAYOUT), &shader_info.vk_pipeline_layout);
|
||||
if (res != VK_SUCCESS) {
|
||||
error_text = vformat("Error (%d) creating pipeline layout.", res);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3994,7 +3738,7 @@ RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_bytecode(const Vec
|
||||
for (uint32_t i = 0; i < shader_info.vk_stages_create_info.size(); i++) {
|
||||
vkDestroyShaderModule(vk_device, shader_info.vk_stages_create_info[i].module, VKC::get_allocation_callbacks(VK_OBJECT_TYPE_SHADER_MODULE));
|
||||
}
|
||||
for (uint32_t i = 0; i < binary_data.set_count; i++) {
|
||||
for (uint32_t i = 0; i < shader_info.vk_descriptor_set_layouts.size(); i++) {
|
||||
vkDestroyDescriptorSetLayout(vk_device, shader_info.vk_descriptor_set_layouts[i], VKC::get_allocation_callbacks(VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT));
|
||||
}
|
||||
|
||||
@@ -4002,7 +3746,6 @@ RDD::ShaderID RenderingDeviceDriverVulkan::shader_create_from_bytecode(const Vec
|
||||
}
|
||||
|
||||
// Bookkeep.
|
||||
|
||||
ShaderInfo *shader_info_ptr = VersatileResource::allocate<ShaderInfo>(resources_allocator);
|
||||
*shader_info_ptr = shader_info;
|
||||
return ShaderID(shader_info_ptr);
|
||||
@@ -6185,6 +5928,10 @@ const RDD::Capabilities &RenderingDeviceDriverVulkan::get_capabilities() const {
|
||||
return device_capabilities;
|
||||
}
|
||||
|
||||
const RenderingShaderContainerFormat &RenderingDeviceDriverVulkan::get_shader_container_format() const {
|
||||
return shader_container_format;
|
||||
}
|
||||
|
||||
bool RenderingDeviceDriverVulkan::is_composite_alpha_supported(CommandQueueID p_queue) const {
|
||||
if (has_comp_alpha.has((uint64_t)p_queue.id)) {
|
||||
return has_comp_alpha[(uint64_t)p_queue.id];
|
||||
|
||||
Reference in New Issue
Block a user