1
0
mirror of https://github.com/godotengine/godot.git synced 2025-11-24 15:26:15 +00:00

Extracting render buffers and changing it to a more generic solution

This commit is contained in:
Bastiaan Olij
2022-08-04 18:40:39 +10:00
parent 0c221f0284
commit 2cd84be64d
49 changed files with 3095 additions and 2656 deletions

View File

@@ -0,0 +1,173 @@
/*************************************************************************/
/* fsr_upscale.glsl */
/*************************************************************************/
/* This file is part of: */
/* GODOT ENGINE */
/* https://godotengine.org */
/*************************************************************************/
/* Copyright (c) 2007-2022 Juan Linietsky, Ariel Manzur. */
/* Copyright (c) 2014-2022 Godot Engine contributors (cf. AUTHORS.md). */
/* */
/* Permission is hereby granted, free of charge, to any person obtaining */
/* a copy of this software and associated documentation files (the */
/* "Software"), to deal in the Software without restriction, including */
/* without limitation the rights to use, copy, modify, merge, publish, */
/* distribute, sublicense, and/or sell copies of the Software, and to */
/* permit persons to whom the Software is furnished to do so, subject to */
/* the following conditions: */
/* */
/* The above copyright notice and this permission notice shall be */
/* included in all copies or substantial portions of the Software. */
/* */
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.*/
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
/*************************************************************************/
#[compute]
#version 450
#VERSION_DEFINES
#define A_GPU
#define A_GLSL
#ifdef MODE_FSR_UPSCALE_NORMAL
#define A_HALF
#endif
#include "thirdparty/amd-fsr/ffx_a.h"
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly image2D fsr_image;
layout(set = 0, binding = 0) uniform sampler2D source_image;
#define FSR_UPSCALE_PASS_TYPE_EASU 0
#define FSR_UPSCALE_PASS_TYPE_RCAS 1
layout(push_constant, std430) uniform Params {
float resolution_width;
float resolution_height;
float upscaled_width;
float upscaled_height;
float sharpness;
int pass;
}
params;
AU4 Const0, Const1, Const2, Const3;
#ifdef MODE_FSR_UPSCALE_FALLBACK
#define FSR_EASU_F
AF4 FsrEasuRF(AF2 p) {
AF4 res = textureGather(source_image, p, 0);
return res;
}
AF4 FsrEasuGF(AF2 p) {
AF4 res = textureGather(source_image, p, 1);
return res;
}
AF4 FsrEasuBF(AF2 p) {
AF4 res = textureGather(source_image, p, 2);
return res;
}
#define FSR_RCAS_F
AF4 FsrRcasLoadF(ASU2 p) {
return AF4(texelFetch(source_image, ASU2(p), 0));
}
void FsrRcasInputF(inout AF1 r, inout AF1 g, inout AF1 b) {}
#else
#define FSR_EASU_H
AH4 FsrEasuRH(AF2 p) {
AH4 res = AH4(textureGather(source_image, p, 0));
return res;
}
AH4 FsrEasuGH(AF2 p) {
AH4 res = AH4(textureGather(source_image, p, 1));
return res;
}
AH4 FsrEasuBH(AF2 p) {
AH4 res = AH4(textureGather(source_image, p, 2));
return res;
}
#define FSR_RCAS_H
AH4 FsrRcasLoadH(ASW2 p) {
return AH4(texelFetch(source_image, ASU2(p), 0));
}
void FsrRcasInputH(inout AH1 r, inout AH1 g, inout AH1 b) {}
#endif
#include "thirdparty/amd-fsr/ffx_fsr1.h"
void fsr_easu_pass(AU2 pos) {
#ifdef MODE_FSR_UPSCALE_NORMAL
AH3 Gamma2Color = AH3(0, 0, 0);
FsrEasuH(Gamma2Color, pos, Const0, Const1, Const2, Const3);
imageStore(fsr_image, ASU2(pos), AH4(Gamma2Color, 1));
#else
AF3 Gamma2Color = AF3(0, 0, 0);
FsrEasuF(Gamma2Color, pos, Const0, Const1, Const2, Const3);
imageStore(fsr_image, ASU2(pos), AF4(Gamma2Color, 1));
#endif
}
void fsr_rcas_pass(AU2 pos) {
#ifdef MODE_FSR_UPSCALE_NORMAL
AH3 Gamma2Color = AH3(0, 0, 0);
FsrRcasH(Gamma2Color.r, Gamma2Color.g, Gamma2Color.b, pos, Const0);
imageStore(fsr_image, ASU2(pos), AH4(Gamma2Color, 1));
#else
AF3 Gamma2Color = AF3(0, 0, 0);
FsrRcasF(Gamma2Color.r, Gamma2Color.g, Gamma2Color.b, pos, Const0);
imageStore(fsr_image, ASU2(pos), AF4(Gamma2Color, 1));
#endif
}
void fsr_pass(AU2 pos) {
if (params.pass == FSR_UPSCALE_PASS_TYPE_EASU) {
fsr_easu_pass(pos);
} else if (params.pass == FSR_UPSCALE_PASS_TYPE_RCAS) {
fsr_rcas_pass(pos);
}
}
void main() {
// Clang does not like unused functions. If ffx_a.h is included in the binary, clang will throw a fit and not compile so we must configure FSR in this shader
if (params.pass == FSR_UPSCALE_PASS_TYPE_EASU) {
FsrEasuCon(Const0, Const1, Const2, Const3, params.resolution_width, params.resolution_height, params.resolution_width, params.resolution_height, params.upscaled_width, params.upscaled_height);
} else if (params.pass == FSR_UPSCALE_PASS_TYPE_RCAS) {
FsrRcasCon(Const0, params.sharpness);
}
AU2 gxy = ARmp8x8(gl_LocalInvocationID.x) + AU2(gl_WorkGroupID.x << 4u, gl_WorkGroupID.y << 4u);
fsr_pass(gxy);
gxy.x += 8u;
fsr_pass(gxy);
gxy.y += 8u;
fsr_pass(gxy);
gxy.x -= 8u;
fsr_pass(gxy);
}

View File

@@ -0,0 +1,189 @@
#[compute]
#version 450
#VERSION_DEFINES
layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in;
#ifdef USE_25_SAMPLES
const int kernel_size = 13;
const vec2 kernel[kernel_size] = vec2[](
vec2(0.530605, 0.0),
vec2(0.0211412, 0.0208333),
vec2(0.0402784, 0.0833333),
vec2(0.0493588, 0.1875),
vec2(0.0410172, 0.333333),
vec2(0.0263642, 0.520833),
vec2(0.017924, 0.75),
vec2(0.0128496, 1.02083),
vec2(0.0094389, 1.33333),
vec2(0.00700976, 1.6875),
vec2(0.00500364, 2.08333),
vec2(0.00333804, 2.52083),
vec2(0.000973794, 3.0));
const vec4 skin_kernel[kernel_size] = vec4[](
vec4(0.530605, 0.613514, 0.739601, 0),
vec4(0.0211412, 0.0459286, 0.0378196, 0.0208333),
vec4(0.0402784, 0.0657244, 0.04631, 0.0833333),
vec4(0.0493588, 0.0367726, 0.0219485, 0.1875),
vec4(0.0410172, 0.0199899, 0.0118481, 0.333333),
vec4(0.0263642, 0.0119715, 0.00684598, 0.520833),
vec4(0.017924, 0.00711691, 0.00347194, 0.75),
vec4(0.0128496, 0.00356329, 0.00132016, 1.02083),
vec4(0.0094389, 0.00139119, 0.000416598, 1.33333),
vec4(0.00700976, 0.00049366, 0.000151938, 1.6875),
vec4(0.00500364, 0.00020094, 5.28848e-005, 2.08333),
vec4(0.00333804, 7.85443e-005, 1.2945e-005, 2.52083),
vec4(0.000973794, 1.11862e-005, 9.43437e-007, 3));
#endif //USE_25_SAMPLES
#ifdef USE_17_SAMPLES
const int kernel_size = 9;
const vec2 kernel[kernel_size] = vec2[](
vec2(0.536343, 0.0),
vec2(0.0324462, 0.03125),
vec2(0.0582416, 0.125),
vec2(0.0571056, 0.28125),
vec2(0.0347317, 0.5),
vec2(0.0216301, 0.78125),
vec2(0.0144609, 1.125),
vec2(0.0100386, 1.53125),
vec2(0.00317394, 2.0));
const vec4 skin_kernel[kernel_size] = vec4[](
vec4(0.536343, 0.624624, 0.748867, 0),
vec4(0.0324462, 0.0656718, 0.0532821, 0.03125),
vec4(0.0582416, 0.0659959, 0.0411329, 0.125),
vec4(0.0571056, 0.0287432, 0.0172844, 0.28125),
vec4(0.0347317, 0.0151085, 0.00871983, 0.5),
vec4(0.0216301, 0.00794618, 0.00376991, 0.78125),
vec4(0.0144609, 0.00317269, 0.00106399, 1.125),
vec4(0.0100386, 0.000914679, 0.000275702, 1.53125),
vec4(0.00317394, 0.000134823, 3.77269e-005, 2));
#endif //USE_17_SAMPLES
#ifdef USE_11_SAMPLES
const int kernel_size = 6;
const vec2 kernel[kernel_size] = vec2[](
vec2(0.560479, 0.0),
vec2(0.0771802, 0.08),
vec2(0.0821904, 0.32),
vec2(0.03639, 0.72),
vec2(0.0192831, 1.28),
vec2(0.00471691, 2.0));
const vec4 skin_kernel[kernel_size] = vec4[](
vec4(0.560479, 0.669086, 0.784728, 0),
vec4(0.0771802, 0.113491, 0.0793803, 0.08),
vec4(0.0821904, 0.0358608, 0.0209261, 0.32),
vec4(0.03639, 0.0130999, 0.00643685, 0.72),
vec4(0.0192831, 0.00282018, 0.00084214, 1.28),
vec4(0.00471691, 0.000184771, 5.07565e-005, 2));
#endif //USE_11_SAMPLES
layout(push_constant, std430) uniform Params {
ivec2 screen_size;
float camera_z_far;
float camera_z_near;
bool vertical;
bool orthogonal;
float unit_size;
float scale;
float depth_scale;
uint pad[3];
}
params;
layout(set = 0, binding = 0) uniform sampler2D source_image;
layout(rgba16f, set = 1, binding = 0) uniform restrict writeonly image2D dest_image;
layout(set = 2, binding = 0) uniform sampler2D source_depth;
void do_filter(inout vec3 color_accum, inout vec3 divisor, vec2 uv, vec2 step, bool p_skin) {
// Accumulate the other samples:
for (int i = 1; i < kernel_size; i++) {
// Fetch color and depth for current sample:
vec2 offset = uv + kernel[i].y * step;
vec4 color = texture(source_image, offset);
if (abs(color.a) < 0.001) {
break; //mix no more
}
vec3 w;
if (p_skin) {
//skin
w = skin_kernel[i].rgb;
} else {
w = vec3(kernel[i].x);
}
color_accum += color.rgb * w;
divisor += w;
}
}
void main() {
// Pixel being shaded
ivec2 ssC = ivec2(gl_GlobalInvocationID.xy);
if (any(greaterThanEqual(ssC, params.screen_size))) { //too large, do nothing
return;
}
vec2 uv = (vec2(ssC) + 0.5) / vec2(params.screen_size);
// Fetch color of current pixel:
vec4 base_color = texture(source_image, uv);
float strength = abs(base_color.a);
if (strength > 0.0) {
vec2 dir = params.vertical ? vec2(0.0, 1.0) : vec2(1.0, 0.0);
// Fetch linear depth of current pixel:
float depth = texture(source_depth, uv).r * 2.0 - 1.0;
float depth_scale;
if (params.orthogonal) {
depth = ((depth + (params.camera_z_far + params.camera_z_near) / (params.camera_z_far - params.camera_z_near)) * (params.camera_z_far - params.camera_z_near)) / 2.0;
depth_scale = params.unit_size; //remember depth is negative by default in OpenGL
} else {
depth = 2.0 * params.camera_z_near * params.camera_z_far / (params.camera_z_far + params.camera_z_near - depth * (params.camera_z_far - params.camera_z_near));
depth_scale = params.unit_size / depth; //remember depth is negative by default in OpenGL
}
float scale = mix(params.scale, depth_scale, params.depth_scale);
// Calculate the final step to fetch the surrounding pixels:
vec2 step = scale * dir;
step *= strength;
step /= 3.0;
// Accumulate the center sample:
vec3 divisor;
bool skin = bool(base_color.a < 0.0);
if (skin) {
//skin
divisor = skin_kernel[0].rgb;
} else {
divisor = vec3(kernel[0].x);
}
vec3 color = base_color.rgb * divisor;
do_filter(color, divisor, uv, step, skin);
do_filter(color, divisor, uv, -step, skin);
base_color.rgb = color / divisor;
}
imageStore(dest_image, ssC, base_color);
}

View File

@@ -0,0 +1,394 @@
///////////////////////////////////////////////////////////////////////////////////
// Copyright(c) 2016-2022 Panos Karabelas
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
// copies of the Software, and to permit persons to whom the Software is furnished
// to do so, subject to the following conditions :
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
///////////////////////////////////////////////////////////////////////////////////
// File changes (yyyy-mm-dd)
// 2022-05-06: Panos Karabelas: first commit
// 2020-12-05: Joan Fons: convert to Vulkan and Godot
///////////////////////////////////////////////////////////////////////////////////
#[compute]
#version 450
#VERSION_DEFINES
// Based on Spartan Engine's TAA implementation (without TAA upscale).
// <https://github.com/PanosK92/SpartanEngine/blob/a8338d0609b85dc32f3732a5c27fb4463816a3b9/Data/shaders/temporal_antialiasing.hlsl>
#define USE_SUBGROUPS
#define GROUP_SIZE 8
#define FLT_MIN 0.00000001
#define FLT_MAX 32767.0
#define RPC_9 0.11111111111
#define RPC_16 0.0625
#ifdef USE_SUBGROUPS
layout(local_size_x = GROUP_SIZE, local_size_y = GROUP_SIZE, local_size_z = 1) in;
#endif
layout(rgba16f, set = 0, binding = 0) uniform restrict readonly image2D color_buffer;
layout(set = 0, binding = 1) uniform sampler2D depth_buffer;
layout(rg16f, set = 0, binding = 2) uniform restrict readonly image2D velocity_buffer;
layout(rg16f, set = 0, binding = 3) uniform restrict readonly image2D last_velocity_buffer;
layout(set = 0, binding = 4) uniform sampler2D history_buffer;
layout(rgba16f, set = 0, binding = 5) uniform restrict writeonly image2D output_buffer;
layout(push_constant, std430) uniform Params {
vec2 resolution;
float disocclusion_threshold; // 0.1 / max(params.resolution.x, params.resolution.y
float disocclusion_scale;
}
params;
const ivec2 kOffsets3x3[9] = {
ivec2(-1, -1),
ivec2(0, -1),
ivec2(1, -1),
ivec2(-1, 0),
ivec2(0, 0),
ivec2(1, 0),
ivec2(-1, 1),
ivec2(0, 1),
ivec2(1, 1),
};
/*------------------------------------------------------------------------------
THREAD GROUP SHARED MEMORY (LDS)
------------------------------------------------------------------------------*/
const int kBorderSize = 1;
const int kGroupSize = GROUP_SIZE;
const int kTileDimension = kGroupSize + kBorderSize * 2;
const int kTileDimension2 = kTileDimension * kTileDimension;
vec3 reinhard(vec3 hdr) {
return hdr / (hdr + 1.0);
}
vec3 reinhard_inverse(vec3 sdr) {
return sdr / (1.0 - sdr);
}
float get_depth(ivec2 thread_id) {
return texelFetch(depth_buffer, thread_id, 0).r;
}
#ifdef USE_SUBGROUPS
shared vec3 tile_color[kTileDimension][kTileDimension];
shared float tile_depth[kTileDimension][kTileDimension];
vec3 load_color(uvec2 group_thread_id) {
group_thread_id += kBorderSize;
return tile_color[group_thread_id.x][group_thread_id.y];
}
void store_color(uvec2 group_thread_id, vec3 color) {
tile_color[group_thread_id.x][group_thread_id.y] = color;
}
float load_depth(uvec2 group_thread_id) {
group_thread_id += kBorderSize;
return tile_depth[group_thread_id.x][group_thread_id.y];
}
void store_depth(uvec2 group_thread_id, float depth) {
tile_depth[group_thread_id.x][group_thread_id.y] = depth;
}
void store_color_depth(uvec2 group_thread_id, ivec2 thread_id) {
// out of bounds clamp
thread_id = clamp(thread_id, ivec2(0, 0), ivec2(params.resolution) - ivec2(1, 1));
store_color(group_thread_id, imageLoad(color_buffer, thread_id).rgb);
store_depth(group_thread_id, get_depth(thread_id));
}
void populate_group_shared_memory(uvec2 group_id, uint group_index) {
// Populate group shared memory
ivec2 group_top_left = ivec2(group_id) * kGroupSize - kBorderSize;
if (group_index < (kTileDimension2 >> 2)) {
ivec2 group_thread_id_1 = ivec2(group_index % kTileDimension, group_index / kTileDimension);
ivec2 group_thread_id_2 = ivec2((group_index + (kTileDimension2 >> 2)) % kTileDimension, (group_index + (kTileDimension2 >> 2)) / kTileDimension);
ivec2 group_thread_id_3 = ivec2((group_index + (kTileDimension2 >> 1)) % kTileDimension, (group_index + (kTileDimension2 >> 1)) / kTileDimension);
ivec2 group_thread_id_4 = ivec2((group_index + kTileDimension2 * 3 / 4) % kTileDimension, (group_index + kTileDimension2 * 3 / 4) / kTileDimension);
store_color_depth(group_thread_id_1, group_top_left + group_thread_id_1);
store_color_depth(group_thread_id_2, group_top_left + group_thread_id_2);
store_color_depth(group_thread_id_3, group_top_left + group_thread_id_3);
store_color_depth(group_thread_id_4, group_top_left + group_thread_id_4);
}
// Wait for group threads to load store data.
groupMemoryBarrier();
barrier();
}
#else
vec3 load_color(uvec2 screen_pos) {
return imageLoad(color_buffer, ivec2(screen_pos)).rgb;
}
float load_depth(uvec2 screen_pos) {
return get_depth(ivec2(screen_pos));
}
#endif
/*------------------------------------------------------------------------------
VELOCITY
------------------------------------------------------------------------------*/
void depth_test_min(uvec2 pos, inout float min_depth, inout uvec2 min_pos) {
float depth = load_depth(pos);
if (depth < min_depth) {
min_depth = depth;
min_pos = pos;
}
}
// Returns velocity with closest depth (3x3 neighborhood)
void get_closest_pixel_velocity_3x3(in uvec2 group_pos, uvec2 group_top_left, out vec2 velocity) {
float min_depth = 1.0;
uvec2 min_pos = group_pos;
depth_test_min(group_pos + kOffsets3x3[0], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[1], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[2], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[3], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[4], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[5], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[6], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[7], min_depth, min_pos);
depth_test_min(group_pos + kOffsets3x3[8], min_depth, min_pos);
// Velocity out
velocity = imageLoad(velocity_buffer, ivec2(group_top_left + min_pos)).xy;
}
/*------------------------------------------------------------------------------
HISTORY SAMPLING
------------------------------------------------------------------------------*/
vec3 sample_catmull_rom_9(sampler2D stex, vec2 uv, vec2 resolution) {
// Source: https://gist.github.com/TheRealMJP/c83b8c0f46b63f3a88a5986f4fa982b1
// License: https://gist.github.com/TheRealMJP/bc503b0b87b643d3505d41eab8b332ae
// We're going to sample a 4x4 grid of texels surrounding the target UV coordinate. We'll do this by rounding
// down the sample location to get the exact center of our "starting" texel. The starting texel will be at
// location [1, 1] in the grid, where [0, 0] is the top left corner.
vec2 sample_pos = uv * resolution;
vec2 texPos1 = floor(sample_pos - 0.5f) + 0.5f;
// Compute the fractional offset from our starting texel to our original sample location, which we'll
// feed into the Catmull-Rom spline function to get our filter weights.
vec2 f = sample_pos - texPos1;
// Compute the Catmull-Rom weights using the fractional offset that we calculated earlier.
// These equations are pre-expanded based on our knowledge of where the texels will be located,
// which lets us avoid having to evaluate a piece-wise function.
vec2 w0 = f * (-0.5f + f * (1.0f - 0.5f * f));
vec2 w1 = 1.0f + f * f * (-2.5f + 1.5f * f);
vec2 w2 = f * (0.5f + f * (2.0f - 1.5f * f));
vec2 w3 = f * f * (-0.5f + 0.5f * f);
// Work out weighting factors and sampling offsets that will let us use bilinear filtering to
// simultaneously evaluate the middle 2 samples from the 4x4 grid.
vec2 w12 = w1 + w2;
vec2 offset12 = w2 / (w1 + w2);
// Compute the final UV coordinates we'll use for sampling the texture
vec2 texPos0 = texPos1 - 1.0f;
vec2 texPos3 = texPos1 + 2.0f;
vec2 texPos12 = texPos1 + offset12;
texPos0 /= resolution;
texPos3 /= resolution;
texPos12 /= resolution;
vec3 result = vec3(0.0f, 0.0f, 0.0f);
result += textureLod(stex, vec2(texPos0.x, texPos0.y), 0.0).xyz * w0.x * w0.y;
result += textureLod(stex, vec2(texPos12.x, texPos0.y), 0.0).xyz * w12.x * w0.y;
result += textureLod(stex, vec2(texPos3.x, texPos0.y), 0.0).xyz * w3.x * w0.y;
result += textureLod(stex, vec2(texPos0.x, texPos12.y), 0.0).xyz * w0.x * w12.y;
result += textureLod(stex, vec2(texPos12.x, texPos12.y), 0.0).xyz * w12.x * w12.y;
result += textureLod(stex, vec2(texPos3.x, texPos12.y), 0.0).xyz * w3.x * w12.y;
result += textureLod(stex, vec2(texPos0.x, texPos3.y), 0.0).xyz * w0.x * w3.y;
result += textureLod(stex, vec2(texPos12.x, texPos3.y), 0.0).xyz * w12.x * w3.y;
result += textureLod(stex, vec2(texPos3.x, texPos3.y), 0.0).xyz * w3.x * w3.y;
return max(result, 0.0f);
}
/*------------------------------------------------------------------------------
HISTORY CLIPPING
------------------------------------------------------------------------------*/
// Based on "Temporal Reprojection Anti-Aliasing" - https://github.com/playdeadgames/temporal
vec3 clip_aabb(vec3 aabb_min, vec3 aabb_max, vec3 p, vec3 q) {
vec3 r = q - p;
vec3 rmax = (aabb_max - p.xyz);
vec3 rmin = (aabb_min - p.xyz);
if (r.x > rmax.x + FLT_MIN)
r *= (rmax.x / r.x);
if (r.y > rmax.y + FLT_MIN)
r *= (rmax.y / r.y);
if (r.z > rmax.z + FLT_MIN)
r *= (rmax.z / r.z);
if (r.x < rmin.x - FLT_MIN)
r *= (rmin.x / r.x);
if (r.y < rmin.y - FLT_MIN)
r *= (rmin.y / r.y);
if (r.z < rmin.z - FLT_MIN)
r *= (rmin.z / r.z);
return p + r;
}
// Clip history to the neighbourhood of the current sample
vec3 clip_history_3x3(uvec2 group_pos, vec3 color_history, vec2 velocity_closest) {
// Sample a 3x3 neighbourhood
vec3 s1 = load_color(group_pos + kOffsets3x3[0]);
vec3 s2 = load_color(group_pos + kOffsets3x3[1]);
vec3 s3 = load_color(group_pos + kOffsets3x3[2]);
vec3 s4 = load_color(group_pos + kOffsets3x3[3]);
vec3 s5 = load_color(group_pos + kOffsets3x3[4]);
vec3 s6 = load_color(group_pos + kOffsets3x3[5]);
vec3 s7 = load_color(group_pos + kOffsets3x3[6]);
vec3 s8 = load_color(group_pos + kOffsets3x3[7]);
vec3 s9 = load_color(group_pos + kOffsets3x3[8]);
// Compute min and max (with an adaptive box size, which greatly reduces ghosting)
vec3 color_avg = (s1 + s2 + s3 + s4 + s5 + s6 + s7 + s8 + s9) * RPC_9;
vec3 color_avg2 = ((s1 * s1) + (s2 * s2) + (s3 * s3) + (s4 * s4) + (s5 * s5) + (s6 * s6) + (s7 * s7) + (s8 * s8) + (s9 * s9)) * RPC_9;
float box_size = mix(0.0f, 2.5f, smoothstep(0.02f, 0.0f, length(velocity_closest)));
vec3 dev = sqrt(abs(color_avg2 - (color_avg * color_avg))) * box_size;
vec3 color_min = color_avg - dev;
vec3 color_max = color_avg + dev;
// Variance clipping
vec3 color = clip_aabb(color_min, color_max, clamp(color_avg, color_min, color_max), color_history);
// Clamp to prevent NaNs
color = clamp(color, FLT_MIN, FLT_MAX);
return color;
}
/*------------------------------------------------------------------------------
TAA
------------------------------------------------------------------------------*/
const vec3 lumCoeff = vec3(0.299f, 0.587f, 0.114f);
float luminance(vec3 color) {
return max(dot(color, lumCoeff), 0.0001f);
}
float get_factor_disocclusion(vec2 uv_reprojected, vec2 velocity) {
vec2 velocity_previous = imageLoad(last_velocity_buffer, ivec2(uv_reprojected * params.resolution)).xy;
vec2 velocity_texels = velocity * params.resolution;
vec2 prev_velocity_texels = velocity_previous * params.resolution;
float disocclusion = length(prev_velocity_texels - velocity_texels) - params.disocclusion_threshold;
return clamp(disocclusion * params.disocclusion_scale, 0.0, 1.0);
}
vec3 temporal_antialiasing(uvec2 pos_group_top_left, uvec2 pos_group, uvec2 pos_screen, vec2 uv, sampler2D tex_history) {
// Get the velocity of the current pixel
vec2 velocity = imageLoad(velocity_buffer, ivec2(pos_screen)).xy;
// Get reprojected uv
vec2 uv_reprojected = uv - velocity;
// Get input color
vec3 color_input = load_color(pos_group);
// Get history color (catmull-rom reduces a lot of the blurring that you get under motion)
vec3 color_history = sample_catmull_rom_9(tex_history, uv_reprojected, params.resolution).rgb;
// Clip history to the neighbourhood of the current sample (fixes a lot of the ghosting).
vec2 velocity_closest = vec2(0.0); // This is best done by using the velocity with the closest depth.
get_closest_pixel_velocity_3x3(pos_group, pos_group_top_left, velocity_closest);
color_history = clip_history_3x3(pos_group, color_history, velocity_closest);
// Compute blend factor
float blend_factor = RPC_16; // We want to be able to accumulate as many jitter samples as we generated, that is, 16.
{
// If re-projected UV is out of screen, converge to current color immediatel
float factor_screen = any(lessThan(uv_reprojected, vec2(0.0))) || any(greaterThan(uv_reprojected, vec2(1.0))) ? 1.0 : 0.0;
// Increase blend factor when there is disocclusion (fixes a lot of the remaining ghosting).
float factor_disocclusion = get_factor_disocclusion(uv_reprojected, velocity);
// Add to the blend factor
blend_factor = clamp(blend_factor + factor_screen + factor_disocclusion, 0.0, 1.0);
}
// Resolve
vec3 color_resolved = vec3(0.0);
{
// Tonemap
color_history = reinhard(color_history);
color_input = reinhard(color_input);
// Reduce flickering
float lum_color = luminance(color_input);
float lum_history = luminance(color_history);
float diff = abs(lum_color - lum_history) / max(lum_color, max(lum_history, 1.001));
diff = 1.0 - diff;
diff = diff * diff;
blend_factor = mix(0.0, blend_factor, diff);
// Lerp/blend
color_resolved = mix(color_history, color_input, blend_factor);
// Inverse tonemap
color_resolved = reinhard_inverse(color_resolved);
}
return color_resolved;
}
void main() {
#ifdef USE_SUBGROUPS
populate_group_shared_memory(gl_WorkGroupID.xy, gl_LocalInvocationIndex);
#endif
// Out of bounds check
if (any(greaterThanEqual(vec2(gl_GlobalInvocationID.xy), params.resolution))) {
return;
}
#ifdef USE_SUBGROUPS
const uvec2 pos_group = gl_LocalInvocationID.xy;
const uvec2 pos_group_top_left = gl_WorkGroupID.xy * kGroupSize - kBorderSize;
#else
const uvec2 pos_group = gl_GlobalInvocationID.xy;
const uvec2 pos_group_top_left = uvec2(0, 0);
#endif
const uvec2 pos_screen = gl_GlobalInvocationID.xy;
const vec2 uv = (gl_GlobalInvocationID.xy + 0.5f) / params.resolution;
vec3 result = temporal_antialiasing(pos_group_top_left, pos_group, pos_screen, uv, history_buffer);
imageStore(output_buffer, ivec2(gl_GlobalInvocationID.xy), vec4(result, 1.0));
}