You've already forked godot
mirror of
https://github.com/godotengine/godot.git
synced 2025-11-12 13:20:55 +00:00
Merge pull request #105138 from stuartcarnie/fix_hangs
Renderer: Reduce scope of mutex locks to prevent common deadlocks
This commit is contained in:
@@ -182,6 +182,7 @@ void WorkerThreadPool::_process_task(Task *p_task) {
|
||||
|
||||
void WorkerThreadPool::_thread_function(void *p_user) {
|
||||
ThreadData *thread_data = (ThreadData *)p_user;
|
||||
Thread::set_name(vformat("WorkerThread %d", thread_data->index));
|
||||
|
||||
while (true) {
|
||||
Task *task_to_process = nullptr;
|
||||
|
||||
@@ -684,21 +684,25 @@ void BaseMaterial3D::_update_shader() {
|
||||
return; //no update required in the end
|
||||
}
|
||||
|
||||
{
|
||||
MutexLock lock(shader_map_mutex);
|
||||
if (shader_map.has(current_key)) {
|
||||
shader_map[current_key].users--;
|
||||
if (shader_map[current_key].users == 0) {
|
||||
ShaderData *v = shader_map.getptr(current_key);
|
||||
if (v) {
|
||||
v->users--;
|
||||
if (v->users == 0) {
|
||||
// Deallocate shader which is no longer in use.
|
||||
RS::get_singleton()->free(shader_map[current_key].shader);
|
||||
shader_rid = RID();
|
||||
RS::get_singleton()->free(v->shader);
|
||||
shader_map.erase(current_key);
|
||||
}
|
||||
}
|
||||
|
||||
current_key = mk;
|
||||
|
||||
if (shader_map.has(mk)) {
|
||||
shader_rid = shader_map[mk].shader;
|
||||
shader_map[mk].users++;
|
||||
v = shader_map.getptr(mk);
|
||||
if (v) {
|
||||
shader_rid = v->shader;
|
||||
v->users++;
|
||||
|
||||
if (_get_material().is_valid()) {
|
||||
RS::get_singleton()->material_set_shader(_get_material(), shader_rid);
|
||||
@@ -706,6 +710,11 @@ void BaseMaterial3D::_update_shader() {
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// From this point, it is possible that multiple threads requesting the same key will
|
||||
// race to create the shader. The winner, which is the one found in shader_map, will be
|
||||
// used. The losers will free their shader.
|
||||
|
||||
String texfilter_str;
|
||||
// Force linear filtering for the heightmap texture, as the heightmap effect
|
||||
@@ -1929,11 +1938,28 @@ void fragment() {)";
|
||||
|
||||
code += "}\n";
|
||||
|
||||
// We must create the shader outside the shader_map_mutex to avoid potential deadlocks with
|
||||
// other tasks in the WorkerThreadPool simultaneously creating materials, which
|
||||
// may also hold the shared shader_map_mutex lock.
|
||||
RID new_shader = RS::get_singleton()->shader_create_from_code(code);
|
||||
|
||||
MutexLock lock(shader_map_mutex);
|
||||
|
||||
ShaderData *v = shader_map.getptr(mk);
|
||||
if (unlikely(v)) {
|
||||
// We raced and managed to create the same key concurrently, so we'll free the shader we just created,
|
||||
// given we know it isn't used, and use the winner.
|
||||
RS::get_singleton()->free(new_shader);
|
||||
} else {
|
||||
ShaderData shader_data;
|
||||
shader_data.shader = RS::get_singleton()->shader_create_from_code(code);
|
||||
shader_data.users = 1;
|
||||
shader_map[mk] = shader_data;
|
||||
shader_rid = shader_data.shader;
|
||||
shader_data.shader = new_shader;
|
||||
// ShaderData will be inserted with a users count of 0, but we
|
||||
// increment unconditionally outside this if block, whilst still under lock.
|
||||
v = &shader_map.insert(mk, shader_data)->value;
|
||||
}
|
||||
|
||||
shader_rid = v->shader;
|
||||
v->users++;
|
||||
|
||||
if (_get_material().is_valid()) {
|
||||
RS::get_singleton()->material_set_shader(_get_material(), shader_rid);
|
||||
@@ -1959,11 +1985,18 @@ void BaseMaterial3D::_check_material_rid() {
|
||||
}
|
||||
|
||||
void BaseMaterial3D::flush_changes() {
|
||||
SelfList<BaseMaterial3D>::List copy;
|
||||
{
|
||||
MutexLock lock(material_mutex);
|
||||
while (SelfList<BaseMaterial3D> *E = dirty_materials.first()) {
|
||||
dirty_materials.remove(E);
|
||||
copy.add(E);
|
||||
}
|
||||
}
|
||||
|
||||
while (dirty_materials.first()) {
|
||||
dirty_materials.first()->self()->_update_shader();
|
||||
dirty_materials.first()->remove_from_list();
|
||||
while (SelfList<BaseMaterial3D> *E = copy.first()) {
|
||||
E->self()->_update_shader();
|
||||
copy.remove(E);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -141,8 +141,11 @@ void SceneShaderForwardClustered::ShaderData::set_code(const String &p_code) {
|
||||
|
||||
actions.uniforms = &uniforms;
|
||||
|
||||
Error err = OK;
|
||||
{
|
||||
MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
|
||||
Error err = SceneShaderForwardClustered::singleton->compiler.compile(RS::SHADER_SPATIAL, code, &actions, path, gen_code);
|
||||
err = SceneShaderForwardClustered::singleton->compiler.compile(RS::SHADER_SPATIAL, code, &actions, path, gen_code);
|
||||
}
|
||||
|
||||
if (err != OK) {
|
||||
if (version.is_valid()) {
|
||||
@@ -215,7 +218,6 @@ bool SceneShaderForwardClustered::ShaderData::casts_shadows() const {
|
||||
|
||||
RS::ShaderNativeSourceCode SceneShaderForwardClustered::ShaderData::get_native_source_code() const {
|
||||
if (version.is_valid()) {
|
||||
MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
|
||||
return SceneShaderForwardClustered::singleton->shader.version_get_native_source_code(version);
|
||||
} else {
|
||||
return RS::ShaderNativeSourceCode();
|
||||
@@ -406,7 +408,6 @@ RD::PolygonCullMode SceneShaderForwardClustered::ShaderData::get_cull_mode_from_
|
||||
|
||||
RID SceneShaderForwardClustered::ShaderData::_get_shader_variant(uint16_t p_shader_version) const {
|
||||
if (version.is_valid()) {
|
||||
MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
|
||||
ERR_FAIL_NULL_V(SceneShaderForwardClustered::singleton, RID());
|
||||
return SceneShaderForwardClustered::singleton->shader.version_get_shader(version, p_shader_version);
|
||||
} else {
|
||||
@@ -441,7 +442,6 @@ uint64_t SceneShaderForwardClustered::ShaderData::get_vertex_input_mask(Pipeline
|
||||
|
||||
bool SceneShaderForwardClustered::ShaderData::is_valid() const {
|
||||
if (version.is_valid()) {
|
||||
MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
|
||||
ERR_FAIL_NULL_V(SceneShaderForwardClustered::singleton, false);
|
||||
return SceneShaderForwardClustered::singleton->shader.version_is_valid(version);
|
||||
} else {
|
||||
@@ -459,7 +459,6 @@ SceneShaderForwardClustered::ShaderData::~ShaderData() {
|
||||
pipeline_hash_map.clear_pipelines();
|
||||
|
||||
if (version.is_valid()) {
|
||||
MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
|
||||
ERR_FAIL_NULL(SceneShaderForwardClustered::singleton);
|
||||
SceneShaderForwardClustered::singleton->shader.version_free(version);
|
||||
}
|
||||
@@ -482,8 +481,10 @@ void SceneShaderForwardClustered::MaterialData::set_next_pass(RID p_pass) {
|
||||
|
||||
bool SceneShaderForwardClustered::MaterialData::update_parameters(const HashMap<StringName, Variant> &p_parameters, bool p_uniform_dirty, bool p_textures_dirty) {
|
||||
if (shader_data->version.is_valid()) {
|
||||
RID shader_rid = SceneShaderForwardClustered::singleton->shader.version_get_shader(shader_data->version, 0);
|
||||
|
||||
MutexLock lock(SceneShaderForwardClustered::singleton_mutex);
|
||||
return update_parameters_uniform_set(p_parameters, p_uniform_dirty, p_textures_dirty, shader_data->uniforms, shader_data->ubo_offsets.ptr(), shader_data->texture_uniforms, shader_data->default_texture_params, shader_data->ubo_size, uniform_set, SceneShaderForwardClustered::singleton->shader.version_get_shader(shader_data->version, 0), RenderForwardClustered::MATERIAL_UNIFORM_SET, true, true);
|
||||
return update_parameters_uniform_set(p_parameters, p_uniform_dirty, p_textures_dirty, shader_data->uniforms, shader_data->ubo_offsets.ptr(), shader_data->texture_uniforms, shader_data->default_texture_params, shader_data->ubo_size, uniform_set, shader_rid, RenderForwardClustered::MATERIAL_UNIFORM_SET, true, true);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -176,7 +176,11 @@ RID ShaderRD::version_create() {
|
||||
version.initialize_needed = true;
|
||||
version.variants.clear();
|
||||
version.variant_data.clear();
|
||||
return version_owner.make_rid(version);
|
||||
version.mutex = memnew(Mutex);
|
||||
RID rid = version_owner.make_rid(version);
|
||||
MutexLock lock(versions_mutex);
|
||||
version_mutexes.insert(rid, version.mutex);
|
||||
return rid;
|
||||
}
|
||||
|
||||
void ShaderRD::_initialize_version(Version *p_version) {
|
||||
@@ -328,7 +332,6 @@ void ShaderRD::_compile_variant(uint32_t p_variant, CompileData p_data) {
|
||||
}
|
||||
|
||||
if (!build_ok) {
|
||||
MutexLock lock(variant_set_mutex); //properly print the errors
|
||||
ERR_PRINT("Error compiling " + String(current_stage == RD::SHADER_STAGE_COMPUTE ? "Compute " : (current_stage == RD::SHADER_STAGE_VERTEX ? "Vertex" : "Fragment")) + " shader, variant #" + itos(variant) + " (" + variant_defines[variant].text.get_data() + ").");
|
||||
ERR_PRINT(error);
|
||||
|
||||
@@ -343,8 +346,6 @@ void ShaderRD::_compile_variant(uint32_t p_variant, CompileData p_data) {
|
||||
ERR_FAIL_COND(shader_data.is_empty());
|
||||
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
|
||||
p_data.version->variants.write[variant] = RD::get_singleton()->shader_create_from_bytecode_with_samplers(shader_data, p_data.version->variants[variant], immutable_samplers);
|
||||
p_data.version->variant_data.write[variant] = shader_data;
|
||||
}
|
||||
@@ -355,6 +356,8 @@ RS::ShaderNativeSourceCode ShaderRD::version_get_native_source_code(RID p_versio
|
||||
RS::ShaderNativeSourceCode source_code;
|
||||
ERR_FAIL_NULL_V(version, source_code);
|
||||
|
||||
MutexLock lock(*version->mutex);
|
||||
|
||||
source_code.versions.resize(variant_defines.size());
|
||||
|
||||
for (int i = 0; i < source_code.versions.size(); i++) {
|
||||
@@ -481,12 +484,10 @@ bool ShaderRD::_load_from_cache(Version *p_version, int p_group) {
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
int variant_id = group_to_variant_map[p_group][i];
|
||||
if (!variants_enabled[variant_id]) {
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants.write[variant_id] = RID();
|
||||
continue;
|
||||
}
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
RID shader = RD::get_singleton()->shader_create_from_bytecode_with_samplers(p_version->variant_data[variant_id], p_version->variants[variant_id], immutable_samplers);
|
||||
if (shader.is_null()) {
|
||||
for (uint32_t j = 0; j < i; j++) {
|
||||
@@ -527,7 +528,6 @@ void ShaderRD::_allocate_placeholders(Version *p_version, int p_group) {
|
||||
int variant_id = group_to_variant_map[p_group][i];
|
||||
RID shader = RD::get_singleton()->shader_create_placeholder();
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants.write[variant_id] = shader;
|
||||
}
|
||||
}
|
||||
@@ -554,7 +554,7 @@ void ShaderRD::_compile_version_start(Version *p_version, int p_group) {
|
||||
compile_data.version = p_version;
|
||||
compile_data.group = p_group;
|
||||
|
||||
WorkerThreadPool::GroupID group_task = WorkerThreadPool::get_named_pool(SNAME("ShaderCompilationPool"))->add_template_group_task(this, &ShaderRD::_compile_variant, compile_data, group_to_variant_map[p_group].size(), -1, true, SNAME("ShaderCompilation"));
|
||||
WorkerThreadPool::GroupID group_task = WorkerThreadPool::get_singleton()->add_template_group_task(this, &ShaderRD::_compile_variant, compile_data, group_to_variant_map[p_group].size(), -1, true, SNAME("ShaderCompilation"));
|
||||
p_version->group_compilation_tasks.write[p_group] = group_task;
|
||||
}
|
||||
|
||||
@@ -563,7 +563,7 @@ void ShaderRD::_compile_version_end(Version *p_version, int p_group) {
|
||||
return;
|
||||
}
|
||||
WorkerThreadPool::GroupID group_task = p_version->group_compilation_tasks[p_group];
|
||||
WorkerThreadPool::get_named_pool(SNAME("ShaderCompilationPool"))->wait_for_group_task_completion(group_task);
|
||||
WorkerThreadPool::get_singleton()->wait_for_group_task_completion(group_task);
|
||||
p_version->group_compilation_tasks.write[p_group] = 0;
|
||||
|
||||
bool all_valid = true;
|
||||
@@ -616,6 +616,8 @@ void ShaderRD::version_set_code(RID p_version, const HashMap<String, String> &p_
|
||||
Version *version = version_owner.get_or_null(p_version);
|
||||
ERR_FAIL_NULL(version);
|
||||
|
||||
MutexLock lock(*version->mutex);
|
||||
|
||||
_compile_ensure_finished(version);
|
||||
|
||||
version->vertex_globals = p_vertex_globals.utf8();
|
||||
@@ -651,6 +653,8 @@ void ShaderRD::version_set_compute_code(RID p_version, const HashMap<String, Str
|
||||
Version *version = version_owner.get_or_null(p_version);
|
||||
ERR_FAIL_NULL(version);
|
||||
|
||||
MutexLock lock(*version->mutex);
|
||||
|
||||
_compile_ensure_finished(version);
|
||||
|
||||
version->compute_globals = p_compute_globals.utf8();
|
||||
@@ -684,6 +688,8 @@ bool ShaderRD::version_is_valid(RID p_version) {
|
||||
Version *version = version_owner.get_or_null(p_version);
|
||||
ERR_FAIL_NULL_V(version, false);
|
||||
|
||||
MutexLock lock(*version->mutex);
|
||||
|
||||
if (version->dirty) {
|
||||
_initialize_version(version);
|
||||
for (int i = 0; i < group_enabled.size(); i++) {
|
||||
@@ -702,9 +708,17 @@ bool ShaderRD::version_is_valid(RID p_version) {
|
||||
|
||||
bool ShaderRD::version_free(RID p_version) {
|
||||
if (version_owner.owns(p_version)) {
|
||||
{
|
||||
MutexLock lock(versions_mutex);
|
||||
version_mutexes.erase(p_version);
|
||||
}
|
||||
|
||||
Version *version = version_owner.get_or_null(p_version);
|
||||
version->mutex->lock();
|
||||
_clear_version(version);
|
||||
version_owner.free(p_version);
|
||||
version->mutex->unlock();
|
||||
memdelete(version->mutex);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
@@ -738,7 +752,9 @@ void ShaderRD::enable_group(int p_group) {
|
||||
version_owner.get_owned_list(&all_versions);
|
||||
for (const RID &E : all_versions) {
|
||||
Version *version = version_owner.get_or_null(E);
|
||||
version->mutex->lock();
|
||||
_compile_version_start(version, p_group);
|
||||
version->mutex->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -63,6 +63,7 @@ private:
|
||||
Vector<RD::PipelineImmutableSampler> immutable_samplers;
|
||||
|
||||
struct Version {
|
||||
Mutex *mutex = nullptr;
|
||||
CharString uniforms;
|
||||
CharString vertex_globals;
|
||||
CharString compute_globals;
|
||||
@@ -79,8 +80,6 @@ private:
|
||||
bool initialize_needed;
|
||||
};
|
||||
|
||||
Mutex variant_set_mutex;
|
||||
|
||||
struct CompileData {
|
||||
Version *version;
|
||||
int group = 0;
|
||||
@@ -95,7 +94,9 @@ private:
|
||||
void _compile_ensure_finished(Version *p_version);
|
||||
void _allocate_placeholders(Version *p_version, int p_group);
|
||||
|
||||
RID_Owner<Version> version_owner;
|
||||
RID_Owner<Version, true> version_owner;
|
||||
Mutex versions_mutex;
|
||||
HashMap<RID, Mutex *> version_mutexes;
|
||||
|
||||
struct StageTemplate {
|
||||
struct Chunk {
|
||||
@@ -168,6 +169,8 @@ public:
|
||||
Version *version = version_owner.get_or_null(p_version);
|
||||
ERR_FAIL_NULL_V(version, RID());
|
||||
|
||||
MutexLock lock(*version->mutex);
|
||||
|
||||
if (version->dirty) {
|
||||
_initialize_version(version);
|
||||
for (int i = 0; i < group_enabled.size(); i++) {
|
||||
|
||||
@@ -2142,9 +2142,19 @@ void MaterialStorage::_material_queue_update(Material *material, bool p_uniform,
|
||||
}
|
||||
|
||||
void MaterialStorage::_update_queued_materials() {
|
||||
SelfList<Material>::List copy;
|
||||
{
|
||||
MutexLock lock(material_update_list_mutex);
|
||||
while (material_update_list.first()) {
|
||||
Material *material = material_update_list.first()->self();
|
||||
while (SelfList<Material> *E = material_update_list.first()) {
|
||||
DEV_ASSERT(E == &E->self()->update_element);
|
||||
material_update_list.remove(E);
|
||||
copy.add(E);
|
||||
}
|
||||
}
|
||||
|
||||
while (SelfList<Material> *E = copy.first()) {
|
||||
Material *material = E->self();
|
||||
copy.remove(E);
|
||||
bool uniforms_changed = false;
|
||||
|
||||
if (material->data) {
|
||||
@@ -2153,8 +2163,6 @@ void MaterialStorage::_update_queued_materials() {
|
||||
material->texture_dirty = false;
|
||||
material->uniform_dirty = false;
|
||||
|
||||
material_update_list.remove(&material->update_element);
|
||||
|
||||
if (uniforms_changed) {
|
||||
//some implementations such as 3D renderer cache the material uniform set, so update is required
|
||||
material->dependency.changed_notify(Dependency::DEPENDENCY_CHANGED_MATERIAL);
|
||||
|
||||
Reference in New Issue
Block a user