return b.shader;
}
-static nir_ssa_def *radv_meta_build_resolve_srgb_conversion(nir_builder *b,
- nir_ssa_def *input)
-{
- nir_const_value v;
- unsigned i;
- v.u32[0] = 0x3b4d2e1c; // 0.00313080009
-
- nir_ssa_def *cmp[3];
- for (i = 0; i < 3; i++)
- cmp[i] = nir_flt(b, nir_channel(b, input, i),
- nir_build_imm(b, 1, 32, v));
-
- nir_ssa_def *ltvals[3];
- v.f32[0] = 12.92;
- for (i = 0; i < 3; i++)
- ltvals[i] = nir_fmul(b, nir_channel(b, input, i),
- nir_build_imm(b, 1, 32, v));
-
- nir_ssa_def *gtvals[3];
-
- for (i = 0; i < 3; i++) {
- v.f32[0] = 1.0/2.4;
- gtvals[i] = nir_fpow(b, nir_channel(b, input, i),
- nir_build_imm(b, 1, 32, v));
- v.f32[0] = 1.055;
- gtvals[i] = nir_fmul(b, gtvals[i],
- nir_build_imm(b, 1, 32, v));
- v.f32[0] = 0.055;
- gtvals[i] = nir_fsub(b, gtvals[i],
- nir_build_imm(b, 1, 32, v));
- }
-
- nir_ssa_def *comp[4];
- for (i = 0; i < 3; i++)
- comp[i] = nir_bcsel(b, cmp[i], ltvals[i], gtvals[i]);
- comp[3] = nir_channels(b, input, 3);
- return nir_vec(b, comp, 4);
-}
-
void radv_meta_build_resolve_shader_core(nir_builder *b,
bool is_integer,
- bool is_srgb,
int samples,
nir_variable *input_img,
nir_variable *color,
if (outer_if)
b->cursor = nir_after_cf_node(&outer_if->cf_node);
-
- if (is_srgb) {
- nir_ssa_def *newv = nir_load_var(b, color);
- newv = radv_meta_build_resolve_srgb_conversion(b, newv);
- nir_store_var(b, color, newv, 0xf);
- }
}
void radv_meta_build_resolve_shader_core(nir_builder *b,
bool is_integer,
- bool is_srgb,
int samples,
nir_variable *input_img,
nir_variable *color,
#include "sid.h"
#include "vk_format.h"
+static nir_ssa_def *radv_meta_build_resolve_srgb_conversion(nir_builder *b,
+ nir_ssa_def *input)
+{
+ nir_const_value v;
+ unsigned i;
+ v.u32[0] = 0x3b4d2e1c; // 0.00313080009
+
+ nir_ssa_def *cmp[3];
+ for (i = 0; i < 3; i++)
+ cmp[i] = nir_flt(b, nir_channel(b, input, i),
+ nir_build_imm(b, 1, 32, v));
+
+ nir_ssa_def *ltvals[3];
+ v.f32[0] = 12.92;
+ for (i = 0; i < 3; i++)
+ ltvals[i] = nir_fmul(b, nir_channel(b, input, i),
+ nir_build_imm(b, 1, 32, v));
+
+ nir_ssa_def *gtvals[3];
+
+ for (i = 0; i < 3; i++) {
+ v.f32[0] = 1.0/2.4;
+ gtvals[i] = nir_fpow(b, nir_channel(b, input, i),
+ nir_build_imm(b, 1, 32, v));
+ v.f32[0] = 1.055;
+ gtvals[i] = nir_fmul(b, gtvals[i],
+ nir_build_imm(b, 1, 32, v));
+ v.f32[0] = 0.055;
+ gtvals[i] = nir_fsub(b, gtvals[i],
+ nir_build_imm(b, 1, 32, v));
+ }
+
+ nir_ssa_def *comp[4];
+ for (i = 0; i < 3; i++)
+ comp[i] = nir_bcsel(b, cmp[i], ltvals[i], gtvals[i]);
+ comp[3] = nir_channels(b, input, 3);
+ return nir_vec(b, comp, 4);
+}
+
static nir_shader *
build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_srgb, int samples)
{
nir_ssa_def *img_coord = nir_channels(&b, nir_iadd(&b, global_id, &src_offset->dest.ssa), 0x3);
nir_variable *color = nir_local_variable_create(b.impl, glsl_vec4_type(), "color");
- radv_meta_build_resolve_shader_core(&b, is_integer, is_srgb, samples,
- input_img, color, img_coord);
+ radv_meta_build_resolve_shader_core(&b, is_integer, samples, input_img,
+ color, img_coord);
nir_ssa_def *outval = nir_load_var(&b, color);
+ if (is_srgb)
+ outval = radv_meta_build_resolve_srgb_conversion(&b, outval);
+
nir_ssa_def *coord = nir_iadd(&b, global_id, &dst_offset->dest.ssa);
nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_store);
store->src[0] = nir_src_for_ssa(coord);
}
static nir_shader *
-build_resolve_fragment_shader(struct radv_device *dev, bool is_integer, bool is_srgb, int samples)
+build_resolve_fragment_shader(struct radv_device *dev, bool is_integer, int samples)
{
nir_builder b;
char name[64];
false,
GLSL_TYPE_FLOAT);
- snprintf(name, 64, "meta_resolve_fs-%d-%s", samples, is_integer ? "int" : (is_srgb ? "srgb" : "float"));
+ snprintf(name, 64, "meta_resolve_fs-%d-%s", samples, is_integer ? "int" : "float");
nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL);
b.shader->info.name = ralloc_strdup(b.shader, name);
nir_ssa_def *img_coord = nir_channels(&b, nir_iadd(&b, pos_int, &src_offset->dest.ssa), 0x3);
nir_variable *color = nir_local_variable_create(b.impl, glsl_vec4_type(), "color");
- radv_meta_build_resolve_shader_core(&b, is_integer, is_srgb,samples,
- input_img, color, img_coord);
+ radv_meta_build_resolve_shader_core(&b, is_integer, samples, input_img,
+ color, img_coord);
nir_ssa_def *outval = nir_load_var(&b, color);
nir_store_var(&b, color_out, outval, 0xf);
VkFormat format)
{
VkResult result;
- bool is_integer = false, is_srgb = false;
+ bool is_integer = false;
uint32_t samples = 1 << samples_log2;
unsigned fs_key = radv_format_meta_fs_key(format);
const VkPipelineVertexInputStateCreateInfo *vi_create_info;
vi_create_info = &normal_vi_create_info;
if (vk_format_is_int(format))
is_integer = true;
- else if (vk_format_is_srgb(format))
- is_srgb = true;
struct radv_shader_module fs = { .nir = NULL };
- fs.nir = build_resolve_fragment_shader(device, is_integer, is_srgb, samples);
+ fs.nir = build_resolve_fragment_shader(device, is_integer, samples);
struct radv_shader_module vs = {
.nir = build_nir_vertex_shader(),
};
- VkRenderPass *rp = is_srgb ?
- &device->meta_state.resolve_fragment.rc[samples_log2].srgb_render_pass :
- &device->meta_state.resolve_fragment.rc[samples_log2].render_pass[fs_key];
+ VkRenderPass *rp = &device->meta_state.resolve_fragment.rc[samples_log2].render_pass[fs_key];
assert(!*rp);
- VkPipeline *pipeline = is_srgb ?
- &device->meta_state.resolve_fragment.rc[samples_log2].srgb_pipeline :
- &device->meta_state.resolve_fragment.rc[samples_log2].pipeline[fs_key];
+ VkPipeline *pipeline = &device->meta_state.resolve_fragment.rc[samples_log2].pipeline[fs_key];
assert(!*pipeline);
VkPipelineShaderStageCreateInfo pipeline_shader_stages[] = {
for (unsigned j = 0; j < ARRAY_SIZE(pipeline_formats); ++j) {
res = create_resolve_pipeline(device, i, pipeline_formats[j]);
}
-
- res = create_resolve_pipeline(device, i, VK_FORMAT_R8G8B8A8_SRGB);
}
return res;
state->resolve_fragment.rc[i].pipeline[j],
&state->alloc);
}
- radv_DestroyRenderPass(radv_device_to_handle(device),
- state->resolve_fragment.rc[i].srgb_render_pass,
- &state->alloc);
- radv_DestroyPipeline(radv_device_to_handle(device),
- state->resolve_fragment.rc[i].srgb_pipeline,
- &state->alloc);
}
radv_DestroyDescriptorSetLayout(radv_device_to_handle(device),
push_constants);
unsigned fs_key = radv_format_meta_fs_key(dest_iview->vk_format);
- VkPipeline pipeline_h = vk_format_is_srgb(dest_iview->vk_format) ?
- device->meta_state.resolve_fragment.rc[samples_log2].srgb_pipeline :
- device->meta_state.resolve_fragment.rc[samples_log2].pipeline[fs_key];
+ VkPipeline pipeline_h = device->meta_state.resolve_fragment.rc[samples_log2].pipeline[fs_key];
radv_CmdBindPipeline(cmd_buffer_h, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline_h);
radv_fast_clear_flush_image_inplace(cmd_buffer, src_image, &range);
}
- rp = vk_format_is_srgb(dest_image->vk_format) ?
- device->meta_state.resolve_fragment.rc[samples_log2].srgb_render_pass :
- device->meta_state.resolve_fragment.rc[samples_log2].render_pass[fs_key];
+ rp = device->meta_state.resolve_fragment.rc[samples_log2].render_pass[fs_key];
radv_meta_save_graphics_reset_vport_scissor_novertex(&saved_state, cmd_buffer);
for (uint32_t r = 0; r < region_count; ++r) {
VkPipelineLayout p_layout;
struct {
- VkRenderPass srgb_render_pass;
- VkPipeline srgb_pipeline;
VkRenderPass render_pass[NUM_META_FS_KEYS];
VkPipeline pipeline[NUM_META_FS_KEYS];
} rc[MAX_SAMPLES_LOG2];