X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_meta_resolve_cs.c;h=c40c61929d2915fbc80329a7881f888130c774ee;hb=b1f023307768570a621c3050222bd32c546d72a9;hp=05a212b6d93e0d77586bd4fd4e26d52ea0f9ab5c;hpb=8860b39d9432216dd40b825fc5750d3601b6cf4e;p=mesa.git diff --git a/src/amd/vulkan/radv_meta_resolve_cs.c b/src/amd/vulkan/radv_meta_resolve_cs.c index 05a212b6d93..c40c61929d2 100644 --- a/src/amd/vulkan/radv_meta_resolve_cs.c +++ b/src/amd/vulkan/radv_meta_resolve_cs.c @@ -34,33 +34,27 @@ static nir_ssa_def *radv_meta_build_resolve_srgb_conversion(nir_builder *b, nir_ssa_def *input) { - nir_const_value v; unsigned i; - v.u32[0] = 0x3b4d2e1c; // 0.00313080009 nir_ssa_def *cmp[3]; for (i = 0; i < 3; i++) cmp[i] = nir_flt(b, nir_channel(b, input, i), - nir_build_imm(b, 1, 32, v)); + nir_imm_int(b, 0x3b4d2e1c)); nir_ssa_def *ltvals[3]; - v.f32[0] = 12.92; for (i = 0; i < 3; i++) ltvals[i] = nir_fmul(b, nir_channel(b, input, i), - nir_build_imm(b, 1, 32, v)); + nir_imm_float(b, 12.92)); nir_ssa_def *gtvals[3]; for (i = 0; i < 3; i++) { - v.f32[0] = 1.0/2.4; gtvals[i] = nir_fpow(b, nir_channel(b, input, i), - nir_build_imm(b, 1, 32, v)); - v.f32[0] = 1.055; + nir_imm_float(b, 1.0/2.4)); gtvals[i] = nir_fmul(b, gtvals[i], - nir_build_imm(b, 1, 32, v)); - v.f32[0] = 0.055; + nir_imm_float(b, 1.055)); gtvals[i] = nir_fsub(b, gtvals[i], - nir_build_imm(b, 1, 32, v)); + nir_imm_float(b, 0.055)); } nir_ssa_def *comp[4]; @@ -99,8 +93,8 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s img_type, "out_img"); output_img->data.descriptor_set = 0; output_img->data.binding = 1; - nir_ssa_def *invoc_id = nir_load_system_value(&b, nir_intrinsic_load_local_invocation_id, 0); - nir_ssa_def *wg_id = nir_load_system_value(&b, nir_intrinsic_load_work_group_id, 0); + nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b); + nir_ssa_def *wg_id = nir_load_work_group_id(&b); nir_ssa_def *block_size = nir_imm_ivec4(&b, b.shader->info.cs.local_size[0], b.shader->info.cs.local_size[1], @@ -135,15 +129,177 @@ build_resolve_compute_shader(struct radv_device *dev, bool is_integer, bool is_s outval = radv_meta_build_resolve_srgb_conversion(&b, outval); nir_ssa_def *coord = nir_iadd(&b, global_id, &dst_offset->dest.ssa); - nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_store); - store->src[0] = nir_src_for_ssa(coord); - store->src[1] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32)); - store->src[2] = nir_src_for_ssa(outval); - store->variables[0] = nir_deref_var_create(store, output_img); + nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_deref_store); + store->num_components = 4; + store->src[0] = nir_src_for_ssa(&nir_build_deref_var(&b, output_img)->dest.ssa); + store->src[1] = nir_src_for_ssa(coord); + store->src[2] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32)); + store->src[3] = nir_src_for_ssa(outval); + store->src[4] = nir_src_for_ssa(nir_imm_int(&b, 0)); nir_builder_instr_insert(&b, &store->instr); return b.shader; } +enum { + DEPTH_RESOLVE, + STENCIL_RESOLVE, +}; + +static const char * +get_resolve_mode_str(VkResolveModeFlagBits resolve_mode) +{ + switch (resolve_mode) { + case VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR: + return "zero"; + case VK_RESOLVE_MODE_AVERAGE_BIT_KHR: + return "average"; + case VK_RESOLVE_MODE_MIN_BIT_KHR: + return "min"; + case VK_RESOLVE_MODE_MAX_BIT_KHR: + return "max"; + default: + unreachable("invalid resolve mode"); + } +} + +static nir_shader * +build_depth_stencil_resolve_compute_shader(struct radv_device *dev, int samples, + int index, + VkResolveModeFlagBits resolve_mode) +{ + nir_builder b; + char name[64]; + const struct glsl_type *sampler_type = glsl_sampler_type(GLSL_SAMPLER_DIM_MS, + false, + false, + GLSL_TYPE_FLOAT); + const struct glsl_type *img_type = glsl_sampler_type(GLSL_SAMPLER_DIM_2D, + false, + false, + GLSL_TYPE_FLOAT); + snprintf(name, 64, "meta_resolve_cs_%s-%s-%d", + index == DEPTH_RESOLVE ? "depth" : "stencil", + get_resolve_mode_str(resolve_mode), samples); + + nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_COMPUTE, NULL); + b.shader->info.name = ralloc_strdup(b.shader, name); + b.shader->info.cs.local_size[0] = 16; + b.shader->info.cs.local_size[1] = 16; + b.shader->info.cs.local_size[2] = 1; + + nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, + sampler_type, "s_tex"); + input_img->data.descriptor_set = 0; + input_img->data.binding = 0; + + nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform, + img_type, "out_img"); + output_img->data.descriptor_set = 0; + output_img->data.binding = 1; + nir_ssa_def *invoc_id = nir_load_local_invocation_id(&b); + nir_ssa_def *wg_id = nir_load_work_group_id(&b); + nir_ssa_def *block_size = nir_imm_ivec4(&b, + b.shader->info.cs.local_size[0], + b.shader->info.cs.local_size[1], + b.shader->info.cs.local_size[2], 0); + + nir_ssa_def *global_id = nir_iadd(&b, nir_imul(&b, wg_id, block_size), invoc_id); + + nir_intrinsic_instr *src_offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant); + nir_intrinsic_set_base(src_offset, 0); + nir_intrinsic_set_range(src_offset, 16); + src_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 0)); + src_offset->num_components = 2; + nir_ssa_dest_init(&src_offset->instr, &src_offset->dest, 2, 32, "src_offset"); + nir_builder_instr_insert(&b, &src_offset->instr); + + nir_intrinsic_instr *dst_offset = nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_push_constant); + nir_intrinsic_set_base(dst_offset, 0); + nir_intrinsic_set_range(dst_offset, 16); + dst_offset->src[0] = nir_src_for_ssa(nir_imm_int(&b, 8)); + dst_offset->num_components = 2; + nir_ssa_dest_init(&dst_offset->instr, &dst_offset->dest, 2, 32, "dst_offset"); + nir_builder_instr_insert(&b, &dst_offset->instr); + + nir_ssa_def *img_coord = nir_channels(&b, nir_iadd(&b, global_id, &src_offset->dest.ssa), 0x3); + + nir_ssa_def *input_img_deref = &nir_build_deref_var(&b, input_img)->dest.ssa; + + nir_alu_type type = index == DEPTH_RESOLVE ? nir_type_float : nir_type_uint; + + nir_tex_instr *tex = nir_tex_instr_create(b.shader, 3); + tex->sampler_dim = GLSL_SAMPLER_DIM_MS; + tex->op = nir_texop_txf_ms; + tex->src[0].src_type = nir_tex_src_coord; + tex->src[0].src = nir_src_for_ssa(img_coord); + tex->src[1].src_type = nir_tex_src_ms_index; + tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, 0)); + tex->src[2].src_type = nir_tex_src_texture_deref; + tex->src[2].src = nir_src_for_ssa(input_img_deref); + tex->dest_type = type; + tex->is_array = false; + tex->coord_components = 2; + + nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex"); + nir_builder_instr_insert(&b, &tex->instr); + + nir_ssa_def *outval = &tex->dest.ssa; + + if (resolve_mode != VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR) { + for (int i = 1; i < samples; i++) { + nir_tex_instr *tex_add = nir_tex_instr_create(b.shader, 3); + tex_add->sampler_dim = GLSL_SAMPLER_DIM_MS; + tex_add->op = nir_texop_txf_ms; + tex_add->src[0].src_type = nir_tex_src_coord; + tex_add->src[0].src = nir_src_for_ssa(img_coord); + tex_add->src[1].src_type = nir_tex_src_ms_index; + tex_add->src[1].src = nir_src_for_ssa(nir_imm_int(&b, i)); + tex_add->src[2].src_type = nir_tex_src_texture_deref; + tex_add->src[2].src = nir_src_for_ssa(input_img_deref); + tex_add->dest_type = type; + tex_add->is_array = false; + tex_add->coord_components = 2; + + nir_ssa_dest_init(&tex_add->instr, &tex_add->dest, 4, 32, "tex"); + nir_builder_instr_insert(&b, &tex_add->instr); + + switch (resolve_mode) { + case VK_RESOLVE_MODE_AVERAGE_BIT_KHR: + assert(index == DEPTH_RESOLVE); + outval = nir_fadd(&b, outval, &tex_add->dest.ssa); + break; + case VK_RESOLVE_MODE_MIN_BIT_KHR: + if (index == DEPTH_RESOLVE) + outval = nir_fmin(&b, outval, &tex_add->dest.ssa); + else + outval = nir_umin(&b, outval, &tex_add->dest.ssa); + break; + case VK_RESOLVE_MODE_MAX_BIT_KHR: + if (index == DEPTH_RESOLVE) + outval = nir_fmax(&b, outval, &tex_add->dest.ssa); + else + outval = nir_umax(&b, outval, &tex_add->dest.ssa); + break; + default: + unreachable("invalid resolve mode"); + } + } + + if (resolve_mode == VK_RESOLVE_MODE_AVERAGE_BIT_KHR) + outval = nir_fdiv(&b, outval, nir_imm_float(&b, samples)); + } + + nir_ssa_def *coord = nir_iadd(&b, global_id, &dst_offset->dest.ssa); + nir_intrinsic_instr *store = nir_intrinsic_instr_create(b.shader, nir_intrinsic_image_deref_store); + store->num_components = 4; + store->src[0] = nir_src_for_ssa(&nir_build_deref_var(&b, output_img)->dest.ssa); + store->src[1] = nir_src_for_ssa(coord); + store->src[2] = nir_src_for_ssa(nir_ssa_undef(&b, 1, 32)); + store->src[3] = nir_src_for_ssa(outval); + store->src[4] = nir_src_for_ssa(nir_imm_int(&b, 0)); + nir_builder_instr_insert(&b, &store->instr); + return b.shader; +} static VkResult create_layout(struct radv_device *device) @@ -212,6 +368,12 @@ create_resolve_pipeline(struct radv_device *device, VkResult result; struct radv_shader_module cs = { .nir = NULL }; + mtx_lock(&device->meta_state.mtx); + if (*pipeline) { + mtx_unlock(&device->meta_state.mtx); + return VK_SUCCESS; + } + cs.nir = build_resolve_compute_shader(device, is_integer, is_srgb, samples); /* compute shader */ @@ -239,37 +401,149 @@ create_resolve_pipeline(struct radv_device *device, goto fail; ralloc_free(cs.nir); + mtx_unlock(&device->meta_state.mtx); + return VK_SUCCESS; +fail: + ralloc_free(cs.nir); + mtx_unlock(&device->meta_state.mtx); + return result; +} + +static VkResult +create_depth_stencil_resolve_pipeline(struct radv_device *device, + int samples, + int index, + VkResolveModeFlagBits resolve_mode, + VkPipeline *pipeline) +{ + VkResult result; + struct radv_shader_module cs = { .nir = NULL }; + + mtx_lock(&device->meta_state.mtx); + if (*pipeline) { + mtx_unlock(&device->meta_state.mtx); + return VK_SUCCESS; + } + + cs.nir = build_depth_stencil_resolve_compute_shader(device, samples, + index, resolve_mode); + + /* compute shader */ + VkPipelineShaderStageCreateInfo pipeline_shader_stage = { + .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + .stage = VK_SHADER_STAGE_COMPUTE_BIT, + .module = radv_shader_module_to_handle(&cs), + .pName = "main", + .pSpecializationInfo = NULL, + }; + + VkComputePipelineCreateInfo vk_pipeline_info = { + .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + .stage = pipeline_shader_stage, + .flags = 0, + .layout = device->meta_state.resolve_compute.p_layout, + }; + + result = radv_CreateComputePipelines(radv_device_to_handle(device), + radv_pipeline_cache_to_handle(&device->meta_state.cache), + 1, &vk_pipeline_info, NULL, + pipeline); + if (result != VK_SUCCESS) + goto fail; + + ralloc_free(cs.nir); + mtx_unlock(&device->meta_state.mtx); return VK_SUCCESS; fail: ralloc_free(cs.nir); + mtx_unlock(&device->meta_state.mtx); return result; } VkResult -radv_device_init_meta_resolve_compute_state(struct radv_device *device) +radv_device_init_meta_resolve_compute_state(struct radv_device *device, bool on_demand) { struct radv_meta_state *state = &device->meta_state; VkResult res; - memset(&device->meta_state.resolve_compute, 0, sizeof(device->meta_state.resolve_compute)); res = create_layout(device); if (res != VK_SUCCESS) - return res; + goto fail; + + if (on_demand) + return VK_SUCCESS; for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; ++i) { uint32_t samples = 1 << i; res = create_resolve_pipeline(device, samples, false, false, &state->resolve_compute.rc[i].pipeline); + if (res != VK_SUCCESS) + goto fail; res = create_resolve_pipeline(device, samples, true, false, &state->resolve_compute.rc[i].i_pipeline); + if (res != VK_SUCCESS) + goto fail; res = create_resolve_pipeline(device, samples, false, true, &state->resolve_compute.rc[i].srgb_pipeline); - + if (res != VK_SUCCESS) + goto fail; + + res = create_depth_stencil_resolve_pipeline(device, samples, + DEPTH_RESOLVE, + VK_RESOLVE_MODE_AVERAGE_BIT_KHR, + &state->resolve_compute.depth[i].average_pipeline); + if (res != VK_SUCCESS) + goto fail; + + res = create_depth_stencil_resolve_pipeline(device, samples, + DEPTH_RESOLVE, + VK_RESOLVE_MODE_MAX_BIT_KHR, + &state->resolve_compute.depth[i].max_pipeline); + if (res != VK_SUCCESS) + goto fail; + + res = create_depth_stencil_resolve_pipeline(device, samples, + DEPTH_RESOLVE, + VK_RESOLVE_MODE_MIN_BIT_KHR, + &state->resolve_compute.depth[i].min_pipeline); + if (res != VK_SUCCESS) + goto fail; + + res = create_depth_stencil_resolve_pipeline(device, samples, + STENCIL_RESOLVE, + VK_RESOLVE_MODE_MAX_BIT_KHR, + &state->resolve_compute.stencil[i].max_pipeline); + if (res != VK_SUCCESS) + goto fail; + + res = create_depth_stencil_resolve_pipeline(device, samples, + STENCIL_RESOLVE, + VK_RESOLVE_MODE_MIN_BIT_KHR, + &state->resolve_compute.stencil[i].min_pipeline); + if (res != VK_SUCCESS) + goto fail; } + res = create_depth_stencil_resolve_pipeline(device, 0, + DEPTH_RESOLVE, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR, + &state->resolve_compute.depth_zero_pipeline); + if (res != VK_SUCCESS) + goto fail; + + res = create_depth_stencil_resolve_pipeline(device, 0, + STENCIL_RESOLVE, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR, + &state->resolve_compute.stencil_zero_pipeline); + if (res != VK_SUCCESS) + goto fail; + + return VK_SUCCESS; +fail: + radv_device_finish_meta_resolve_compute_state(device); return res; } @@ -289,8 +563,36 @@ radv_device_finish_meta_resolve_compute_state(struct radv_device *device) radv_DestroyPipeline(radv_device_to_handle(device), state->resolve_compute.rc[i].srgb_pipeline, &state->alloc); + + radv_DestroyPipeline(radv_device_to_handle(device), + state->resolve_compute.depth[i].average_pipeline, + &state->alloc); + + radv_DestroyPipeline(radv_device_to_handle(device), + state->resolve_compute.depth[i].max_pipeline, + &state->alloc); + + radv_DestroyPipeline(radv_device_to_handle(device), + state->resolve_compute.depth[i].min_pipeline, + &state->alloc); + + radv_DestroyPipeline(radv_device_to_handle(device), + state->resolve_compute.stencil[i].max_pipeline, + &state->alloc); + + radv_DestroyPipeline(radv_device_to_handle(device), + state->resolve_compute.stencil[i].min_pipeline, + &state->alloc); } + radv_DestroyPipeline(radv_device_to_handle(device), + state->resolve_compute.depth_zero_pipeline, + &state->alloc); + + radv_DestroyPipeline(radv_device_to_handle(device), + state->resolve_compute.stencil_zero_pipeline, + &state->alloc); + radv_DestroyDescriptorSetLayout(radv_device_to_handle(device), state->resolve_compute.ds_layout, &state->alloc); @@ -299,6 +601,39 @@ radv_device_finish_meta_resolve_compute_state(struct radv_device *device) &state->alloc); } +static VkPipeline * +radv_get_resolve_pipeline(struct radv_cmd_buffer *cmd_buffer, + struct radv_image_view *src_iview) +{ + struct radv_device *device = cmd_buffer->device; + struct radv_meta_state *state = &device->meta_state; + uint32_t samples = src_iview->image->info.samples; + uint32_t samples_log2 = ffs(samples) - 1; + VkPipeline *pipeline; + + if (vk_format_is_int(src_iview->vk_format)) + pipeline = &state->resolve_compute.rc[samples_log2].i_pipeline; + else if (vk_format_is_srgb(src_iview->vk_format)) + pipeline = &state->resolve_compute.rc[samples_log2].srgb_pipeline; + else + pipeline = &state->resolve_compute.rc[samples_log2].pipeline; + + if (!*pipeline) { + VkResult ret; + + ret = create_resolve_pipeline(device, samples, + vk_format_is_int(src_iview->vk_format), + vk_format_is_srgb(src_iview->vk_format), + pipeline); + if (ret != VK_SUCCESS) { + cmd_buffer->record_result = ret; + return NULL; + } + } + + return pipeline; +} + static void emit_resolve(struct radv_cmd_buffer *cmd_buffer, struct radv_image_view *src_iview, @@ -306,10 +641,79 @@ emit_resolve(struct radv_cmd_buffer *cmd_buffer, const VkOffset2D *src_offset, const VkOffset2D *dest_offset, const VkExtent2D *resolve_extent) +{ + struct radv_device *device = cmd_buffer->device; + VkPipeline *pipeline; + + radv_meta_push_descriptor_set(cmd_buffer, + VK_PIPELINE_BIND_POINT_COMPUTE, + device->meta_state.resolve_compute.p_layout, + 0, /* set */ + 2, /* descriptorWriteCount */ + (VkWriteDescriptorSet[]) { + { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, + .pImageInfo = (VkDescriptorImageInfo[]) { + { + .sampler = VK_NULL_HANDLE, + .imageView = radv_image_view_to_handle(src_iview), + .imageLayout = VK_IMAGE_LAYOUT_GENERAL }, + } + }, + { + .sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + .pImageInfo = (VkDescriptorImageInfo[]) { + { + .sampler = VK_NULL_HANDLE, + .imageView = radv_image_view_to_handle(dest_iview), + .imageLayout = VK_IMAGE_LAYOUT_GENERAL, + }, + } + } + }); + + pipeline = radv_get_resolve_pipeline(cmd_buffer, src_iview); + + radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), + VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); + + unsigned push_constants[4] = { + src_offset->x, + src_offset->y, + dest_offset->x, + dest_offset->y, + }; + radv_CmdPushConstants(radv_cmd_buffer_to_handle(cmd_buffer), + device->meta_state.resolve_compute.p_layout, + VK_SHADER_STAGE_COMPUTE_BIT, 0, 16, + push_constants); + radv_unaligned_dispatch(cmd_buffer, resolve_extent->width, resolve_extent->height, 1); + +} + +static void +emit_depth_stencil_resolve(struct radv_cmd_buffer *cmd_buffer, + struct radv_image_view *src_iview, + struct radv_image_view *dest_iview, + const VkOffset2D *src_offset, + const VkOffset2D *dest_offset, + const VkExtent2D *resolve_extent, + VkImageAspectFlags aspects, + VkResolveModeFlagBits resolve_mode) { struct radv_device *device = cmd_buffer->device; const uint32_t samples = src_iview->image->info.samples; const uint32_t samples_log2 = ffs(samples) - 1; + VkPipeline *pipeline; + radv_meta_push_descriptor_set(cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, device->meta_state.resolve_compute.p_layout, @@ -345,18 +749,49 @@ emit_resolve(struct radv_cmd_buffer *cmd_buffer, } }); - VkPipeline pipeline; - if (vk_format_is_int(src_iview->image->vk_format)) - pipeline = device->meta_state.resolve_compute.rc[samples_log2].i_pipeline; - else if (vk_format_is_srgb(src_iview->image->vk_format)) - pipeline = device->meta_state.resolve_compute.rc[samples_log2].srgb_pipeline; - else - pipeline = device->meta_state.resolve_compute.rc[samples_log2].pipeline; - if (cmd_buffer->state.compute_pipeline != radv_pipeline_from_handle(pipeline)) { - radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), - VK_PIPELINE_BIND_POINT_COMPUTE, pipeline); + switch (resolve_mode) { + case VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR: + if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) + pipeline = &device->meta_state.resolve_compute.depth_zero_pipeline; + else + pipeline = &device->meta_state.resolve_compute.stencil_zero_pipeline; + break; + case VK_RESOLVE_MODE_AVERAGE_BIT_KHR: + assert(aspects == VK_IMAGE_ASPECT_DEPTH_BIT); + pipeline = &device->meta_state.resolve_compute.depth[samples_log2].average_pipeline; + break; + case VK_RESOLVE_MODE_MIN_BIT_KHR: + if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) + pipeline = &device->meta_state.resolve_compute.depth[samples_log2].min_pipeline; + else + pipeline = &device->meta_state.resolve_compute.stencil[samples_log2].min_pipeline; + break; + case VK_RESOLVE_MODE_MAX_BIT_KHR: + if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) + pipeline = &device->meta_state.resolve_compute.depth[samples_log2].max_pipeline; + else + pipeline = &device->meta_state.resolve_compute.stencil[samples_log2].max_pipeline; + break; + default: + unreachable("invalid resolve mode"); + } + + if (!*pipeline) { + int index = aspects == VK_IMAGE_ASPECT_DEPTH_BIT ? DEPTH_RESOLVE : STENCIL_RESOLVE; + VkResult ret; + + ret = create_depth_stencil_resolve_pipeline(device, samples, + index, resolve_mode, + pipeline); + if (ret != VK_SUCCESS) { + cmd_buffer->record_result = ret; + return; + } } + radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), + VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline); + unsigned push_constants[4] = { src_offset->x, src_offset->y, @@ -373,29 +808,23 @@ emit_resolve(struct radv_cmd_buffer *cmd_buffer, void radv_meta_resolve_compute_image(struct radv_cmd_buffer *cmd_buffer, struct radv_image *src_image, + VkFormat src_format, VkImageLayout src_image_layout, struct radv_image *dest_image, + VkFormat dest_format, VkImageLayout dest_image_layout, uint32_t region_count, const VkImageResolve *regions) { - struct radv_meta_saved_compute_state saved_state; + struct radv_meta_saved_state saved_state; - for (uint32_t r = 0; r < region_count; ++r) { - const VkImageResolve *region = ®ions[r]; - const uint32_t src_base_layer = - radv_meta_get_iview_layer(src_image, ®ion->srcSubresource, - ®ion->srcOffset); - VkImageSubresourceRange range; - range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - range.baseMipLevel = region->srcSubresource.mipLevel; - range.levelCount = 1; - range.baseArrayLayer = src_base_layer; - range.layerCount = region->srcSubresource.layerCount; - radv_fast_clear_flush_image_inplace(cmd_buffer, src_image, &range); - } + radv_decompress_resolve_src(cmd_buffer, src_image, src_image_layout, + region_count, regions); - radv_meta_save_compute(&saved_state, cmd_buffer, 16); + radv_meta_save(&saved_state, cmd_buffer, + RADV_META_SAVE_COMPUTE_PIPELINE | + RADV_META_SAVE_CONSTANTS | + RADV_META_SAVE_DESCRIPTORS); for (uint32_t r = 0; r < region_count; ++r) { const VkImageResolve *region = ®ions[r]; @@ -428,7 +857,7 @@ void radv_meta_resolve_compute_image(struct radv_cmd_buffer *cmd_buffer, .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .image = radv_image_to_handle(src_image), .viewType = radv_meta_get_view_type(src_image), - .format = src_image->vk_format, + .format = src_format, .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = region->srcSubresource.mipLevel, @@ -436,7 +865,7 @@ void radv_meta_resolve_compute_image(struct radv_cmd_buffer *cmd_buffer, .baseArrayLayer = src_base_layer + layer, .layerCount = 1, }, - }); + }, NULL); struct radv_image_view dest_iview; radv_image_view_init(&dest_iview, cmd_buffer->device, @@ -444,7 +873,7 @@ void radv_meta_resolve_compute_image(struct radv_cmd_buffer *cmd_buffer, .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, .image = radv_image_to_handle(dest_image), .viewType = radv_meta_get_view_type(dest_image), - .format = vk_to_non_srgb_format(dest_image->vk_format), + .format = vk_to_non_srgb_format(dest_format), .subresourceRange = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .baseMipLevel = region->dstSubresource.mipLevel, @@ -452,7 +881,7 @@ void radv_meta_resolve_compute_image(struct radv_cmd_buffer *cmd_buffer, .baseArrayLayer = dest_base_layer + layer, .layerCount = 1, }, - }); + }, NULL); emit_resolve(cmd_buffer, &src_iview, @@ -462,7 +891,7 @@ void radv_meta_resolve_compute_image(struct radv_cmd_buffer *cmd_buffer, &(VkExtent2D) {extent.width, extent.height }); } } - radv_meta_restore_compute(&saved_state, cmd_buffer); + radv_meta_restore(&saved_state, cmd_buffer); } /** @@ -473,75 +902,171 @@ radv_cmd_buffer_resolve_subpass_cs(struct radv_cmd_buffer *cmd_buffer) { struct radv_framebuffer *fb = cmd_buffer->state.framebuffer; const struct radv_subpass *subpass = cmd_buffer->state.subpass; - struct radv_meta_saved_compute_state saved_state; - /* FINISHME(perf): Skip clears for resolve attachments. - * - * From the Vulkan 1.0 spec: - * - * If the first use of an attachment in a render pass is as a resolve - * attachment, then the loadOp is effectively ignored as the resolve is - * guaranteed to overwrite all pixels in the render area. - */ + struct radv_subpass_barrier barrier; + uint32_t layer_count = fb->layers; + + if (subpass->view_mask) + layer_count = util_last_bit(subpass->view_mask); - if (!subpass->has_resolve) - return; + /* Resolves happen before the end-of-subpass barriers get executed, so + * we have to make the attachment shader-readable. + */ + barrier.src_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + barrier.src_access_mask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; + barrier.dst_access_mask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; + radv_subpass_barrier(cmd_buffer, &barrier); for (uint32_t i = 0; i < subpass->color_count; ++i) { - VkAttachmentReference src_att = subpass->color_attachments[i]; - VkAttachmentReference dest_att = subpass->resolve_attachments[i]; + struct radv_subpass_attachment src_att = subpass->color_attachments[i]; + struct radv_subpass_attachment dst_att = subpass->resolve_attachments[i]; - if (src_att.attachment == VK_ATTACHMENT_UNUSED || - dest_att.attachment == VK_ATTACHMENT_UNUSED) + if (dst_att.attachment == VK_ATTACHMENT_UNUSED) continue; - struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image; - struct radv_image_view *src_iview = cmd_buffer->state.framebuffer->attachments[src_att.attachment].attachment; + struct radv_image_view *src_iview = cmd_buffer->state.attachments[src_att.attachment].iview; + struct radv_image_view *dst_iview = cmd_buffer->state.attachments[dst_att.attachment].iview; - if (dst_img->surface.dcc_size) { - radv_initialize_dcc(cmd_buffer, dst_img, 0xffffffff); - cmd_buffer->state.attachments[dest_att.attachment].current_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; - } - - VkImageSubresourceRange range; - range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - range.baseMipLevel = 0; - range.levelCount = 1; - range.baseArrayLayer = 0; - range.layerCount = 1; - radv_fast_clear_flush_image_inplace(cmd_buffer, src_iview->image, &range); + VkImageResolve region = { + .extent = (VkExtent3D){ fb->width, fb->height, 0 }, + .srcSubresource = (VkImageSubresourceLayers) { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = src_iview->base_mip, + .baseArrayLayer = src_iview->base_layer, + .layerCount = layer_count, + }, + .dstSubresource = (VkImageSubresourceLayers) { + .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, + .mipLevel = dst_iview->base_mip, + .baseArrayLayer = dst_iview->base_layer, + .layerCount = layer_count, + }, + .srcOffset = (VkOffset3D){ 0, 0, 0 }, + .dstOffset = (VkOffset3D){ 0, 0, 0 }, + }; + + radv_meta_resolve_compute_image(cmd_buffer, + src_iview->image, + src_iview->vk_format, + src_att.layout, + dst_iview->image, + dst_iview->vk_format, + dst_att.layout, + 1, ®ion); } - radv_meta_save_compute(&saved_state, cmd_buffer, 16); + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | + RADV_CMD_FLAG_INV_VCACHE; +} - for (uint32_t i = 0; i < subpass->color_count; ++i) { - VkAttachmentReference src_att = subpass->color_attachments[i]; - VkAttachmentReference dest_att = subpass->resolve_attachments[i]; - struct radv_image_view *src_iview = cmd_buffer->state.framebuffer->attachments[src_att.attachment].attachment; - struct radv_image_view *dst_iview = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment; - if (dest_att.attachment == VK_ATTACHMENT_UNUSED) - continue; +void +radv_depth_stencil_resolve_subpass_cs(struct radv_cmd_buffer *cmd_buffer, + VkImageAspectFlags aspects, + VkResolveModeFlagBits resolve_mode) +{ + struct radv_framebuffer *fb = cmd_buffer->state.framebuffer; + const struct radv_subpass *subpass = cmd_buffer->state.subpass; + struct radv_meta_saved_state saved_state; + struct radv_subpass_barrier barrier; + uint32_t layer_count = fb->layers; - emit_resolve(cmd_buffer, - src_iview, - dst_iview, - &(VkOffset2D) { 0, 0 }, - &(VkOffset2D) { 0, 0 }, - &(VkExtent2D) { fb->width, fb->height }); - } + if (subpass->view_mask) + layer_count = util_last_bit(subpass->view_mask); - radv_meta_restore_compute(&saved_state, cmd_buffer); + /* Resolves happen before the end-of-subpass barriers get executed, so + * we have to make the attachment shader-readable. + */ + barrier.src_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; + barrier.src_access_mask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; + barrier.dst_access_mask = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT; + radv_subpass_barrier(cmd_buffer, &barrier); + + radv_decompress_resolve_subpass_src(cmd_buffer); + + radv_meta_save(&saved_state, cmd_buffer, + RADV_META_SAVE_COMPUTE_PIPELINE | + RADV_META_SAVE_CONSTANTS | + RADV_META_SAVE_DESCRIPTORS); + + struct radv_subpass_attachment src_att = *subpass->depth_stencil_attachment; + struct radv_subpass_attachment dest_att = *subpass->ds_resolve_attachment; + + struct radv_image_view *src_iview = + cmd_buffer->state.attachments[src_att.attachment].iview; + struct radv_image_view *dst_iview = + cmd_buffer->state.attachments[dest_att.attachment].iview; + + struct radv_image *src_image = src_iview->image; + struct radv_image *dst_image = dst_iview->image; + + for (uint32_t layer = 0; layer < layer_count; layer++) { + struct radv_image_view tsrc_iview; + radv_image_view_init(&tsrc_iview, cmd_buffer->device, + &(VkImageViewCreateInfo) { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = radv_image_to_handle(src_image), + .viewType = radv_meta_get_view_type(src_image), + .format = src_iview->vk_format, + .subresourceRange = { + .aspectMask = aspects, + .baseMipLevel = src_iview->base_mip, + .levelCount = 1, + .baseArrayLayer = src_iview->base_layer + layer, + .layerCount = 1, + }, + }, NULL); + + struct radv_image_view tdst_iview; + radv_image_view_init(&tdst_iview, cmd_buffer->device, + &(VkImageViewCreateInfo) { + .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + .image = radv_image_to_handle(dst_image), + .viewType = radv_meta_get_view_type(dst_image), + .format = dst_iview->vk_format, + .subresourceRange = { + .aspectMask = aspects, + .baseMipLevel = dst_iview->base_mip, + .levelCount = 1, + .baseArrayLayer = dst_iview->base_layer + layer, + .layerCount = 1, + }, + }, NULL); + + emit_depth_stencil_resolve(cmd_buffer, &tsrc_iview, &tdst_iview, + &(VkOffset2D) { 0, 0 }, + &(VkOffset2D) { 0, 0 }, + &(VkExtent2D) { fb->width, fb->height }, + aspects, + resolve_mode); + } - for (uint32_t i = 0; i < subpass->color_count; ++i) { - VkAttachmentReference dest_att = subpass->resolve_attachments[i]; - struct radv_image *dst_img = cmd_buffer->state.framebuffer->attachments[dest_att.attachment].attachment->image; - if (dest_att.attachment == VK_ATTACHMENT_UNUSED) - continue; - VkImageSubresourceRange range; - range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; - range.baseMipLevel = 0; - range.levelCount = 1; - range.baseArrayLayer = 0; - range.layerCount = 1; - radv_fast_clear_flush_image_inplace(cmd_buffer, dst_img, &range); + cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | + RADV_CMD_FLAG_INV_VCACHE; + + if (radv_image_has_htile(dst_image)) { + if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) { + VkImageSubresourceRange range = {}; + range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + range.baseMipLevel = dst_iview->base_mip; + range.levelCount = 1; + range.baseArrayLayer = dst_iview->base_layer; + range.layerCount = layer_count; + + uint32_t clear_value = 0xfffc000f; + + if (vk_format_is_stencil(dst_image->vk_format) && + subpass->stencil_resolve_mode != VK_RESOLVE_MODE_NONE_KHR) { + /* Only clear the stencil part of the HTILE + * buffer if it's resolved, otherwise this + * might break if the stencil has been cleared. + */ + clear_value = 0xfffff30f; + } + + cmd_buffer->state.flush_bits |= + radv_clear_htile(cmd_buffer, dst_image, &range, + clear_value); + } } + + radv_meta_restore(&saved_state, cmd_buffer); }