X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Famd%2Fvulkan%2Fradv_meta.c;h=ec4fc4a6d4bf03fa248edda20496a421ecdbb95d;hb=4f18c43d1df64135e8968a7d4fbfd2c9918b76ae;hp=3584396b727573680ead7259f96b3ce0f98a8e19;hpb=dd17e4ceb40efdd8895b24eb3940771bf64208f1;p=mesa.git diff --git a/src/amd/vulkan/radv_meta.c b/src/amd/vulkan/radv_meta.c index 3584396b727..ec4fc4a6d4b 100644 --- a/src/amd/vulkan/radv_meta.c +++ b/src/amd/vulkan/radv_meta.c @@ -30,103 +30,136 @@ #include #include -void -radv_meta_save_novertex(struct radv_meta_saved_state *state, - const struct radv_cmd_buffer *cmd_buffer, - uint32_t dynamic_mask) -{ - state->old_pipeline = cmd_buffer->state.pipeline; - - state->dynamic_mask = dynamic_mask; - radv_dynamic_state_copy(&state->dynamic, &cmd_buffer->state.dynamic, - dynamic_mask); - - memcpy(state->push_constants, cmd_buffer->push_constants, MAX_PUSH_CONSTANTS_SIZE); - state->vertex_saved = false; -} - void radv_meta_save(struct radv_meta_saved_state *state, - const struct radv_cmd_buffer *cmd_buffer, - uint32_t dynamic_mask) + struct radv_cmd_buffer *cmd_buffer, uint32_t flags) { - radv_meta_save_novertex(state, cmd_buffer, dynamic_mask); - state->old_descriptor_set0 = cmd_buffer->state.descriptors[0]; - memcpy(state->old_vertex_bindings, cmd_buffer->state.vertex_bindings, - sizeof(state->old_vertex_bindings)); - state->vertex_saved = true; -} - -void -radv_meta_restore(const struct radv_meta_saved_state *state, - struct radv_cmd_buffer *cmd_buffer) -{ - cmd_buffer->state.pipeline = state->old_pipeline; - if (state->vertex_saved) { - radv_bind_descriptor_set(cmd_buffer, state->old_descriptor_set0, 0); - memcpy(cmd_buffer->state.vertex_bindings, state->old_vertex_bindings, - sizeof(state->old_vertex_bindings)); - cmd_buffer->state.vb_dirty |= (1 << RADV_META_VERTEX_BINDING_COUNT) - 1; + VkPipelineBindPoint bind_point = + flags & RADV_META_SAVE_GRAPHICS_PIPELINE ? + VK_PIPELINE_BIND_POINT_GRAPHICS : + VK_PIPELINE_BIND_POINT_COMPUTE; + struct radv_descriptor_state *descriptors_state = + radv_get_descriptors_state(cmd_buffer, bind_point); + + assert(flags & (RADV_META_SAVE_GRAPHICS_PIPELINE | + RADV_META_SAVE_COMPUTE_PIPELINE)); + + state->flags = flags; + + if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) { + assert(!(state->flags & RADV_META_SAVE_COMPUTE_PIPELINE)); + + state->old_pipeline = cmd_buffer->state.pipeline; + + /* Save all viewports. */ + state->viewport.count = cmd_buffer->state.dynamic.viewport.count; + typed_memcpy(state->viewport.viewports, + cmd_buffer->state.dynamic.viewport.viewports, + MAX_VIEWPORTS); + + /* Save all scissors. */ + state->scissor.count = cmd_buffer->state.dynamic.scissor.count; + typed_memcpy(state->scissor.scissors, + cmd_buffer->state.dynamic.scissor.scissors, + MAX_SCISSORS); + + /* The most common meta operations all want to have the + * viewport reset and any scissors disabled. The rest of the + * dynamic state should have no effect. + */ + cmd_buffer->state.dynamic.viewport.count = 0; + cmd_buffer->state.dynamic.scissor.count = 0; + cmd_buffer->state.dirty |= 1 << VK_DYNAMIC_STATE_VIEWPORT | + 1 << VK_DYNAMIC_STATE_SCISSOR; } - cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE; + if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) { + assert(!(state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE)); - radv_dynamic_state_copy(&cmd_buffer->state.dynamic, &state->dynamic, - state->dynamic_mask); - cmd_buffer->state.dirty |= state->dynamic_mask; + state->old_pipeline = cmd_buffer->state.compute_pipeline; + } - memcpy(cmd_buffer->push_constants, state->push_constants, MAX_PUSH_CONSTANTS_SIZE); - cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_ALL_GRAPHICS | VK_SHADER_STAGE_COMPUTE_BIT; -} + if (state->flags & RADV_META_SAVE_DESCRIPTORS) { + state->old_descriptor_set0 = descriptors_state->sets[0]; + if (!state->old_descriptor_set0) + state->flags &= ~RADV_META_SAVE_DESCRIPTORS; + } -void -radv_meta_save_pass(struct radv_meta_saved_pass_state *state, - const struct radv_cmd_buffer *cmd_buffer) -{ - state->pass = cmd_buffer->state.pass; - state->subpass = cmd_buffer->state.subpass; - state->framebuffer = cmd_buffer->state.framebuffer; - state->attachments = cmd_buffer->state.attachments; - state->render_area = cmd_buffer->state.render_area; -} + if (state->flags & RADV_META_SAVE_CONSTANTS) { + memcpy(state->push_constants, cmd_buffer->push_constants, + MAX_PUSH_CONSTANTS_SIZE); + } -void -radv_meta_restore_pass(const struct radv_meta_saved_pass_state *state, - struct radv_cmd_buffer *cmd_buffer) -{ - cmd_buffer->state.pass = state->pass; - cmd_buffer->state.subpass = state->subpass; - cmd_buffer->state.framebuffer = state->framebuffer; - cmd_buffer->state.attachments = state->attachments; - cmd_buffer->state.render_area = state->render_area; - if (state->subpass) - radv_emit_framebuffer_state(cmd_buffer); + if (state->flags & RADV_META_SAVE_PASS) { + state->pass = cmd_buffer->state.pass; + state->subpass = cmd_buffer->state.subpass; + state->framebuffer = cmd_buffer->state.framebuffer; + state->attachments = cmd_buffer->state.attachments; + state->render_area = cmd_buffer->state.render_area; + } } void -radv_meta_save_compute(struct radv_meta_saved_compute_state *state, - const struct radv_cmd_buffer *cmd_buffer, - unsigned push_constant_size) +radv_meta_restore(const struct radv_meta_saved_state *state, + struct radv_cmd_buffer *cmd_buffer) { - state->old_pipeline = cmd_buffer->state.compute_pipeline; - state->old_descriptor_set0 = cmd_buffer->state.descriptors[0]; + VkPipelineBindPoint bind_point = + state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE ? + VK_PIPELINE_BIND_POINT_GRAPHICS : + VK_PIPELINE_BIND_POINT_COMPUTE; + + if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) { + radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), + VK_PIPELINE_BIND_POINT_GRAPHICS, + radv_pipeline_to_handle(state->old_pipeline)); + + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE; + + /* Restore all viewports. */ + cmd_buffer->state.dynamic.viewport.count = state->viewport.count; + typed_memcpy(cmd_buffer->state.dynamic.viewport.viewports, + state->viewport.viewports, + MAX_VIEWPORTS); + + /* Restore all scissors. */ + cmd_buffer->state.dynamic.scissor.count = state->scissor.count; + typed_memcpy(cmd_buffer->state.dynamic.scissor.scissors, + state->scissor.scissors, + MAX_SCISSORS); + + cmd_buffer->state.dirty |= 1 << VK_DYNAMIC_STATE_VIEWPORT | + 1 << VK_DYNAMIC_STATE_SCISSOR; + } - if (push_constant_size) - memcpy(state->push_constants, cmd_buffer->push_constants, push_constant_size); -} + if (state->flags & RADV_META_SAVE_COMPUTE_PIPELINE) { + radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), + VK_PIPELINE_BIND_POINT_COMPUTE, + radv_pipeline_to_handle(state->old_pipeline)); + } -void -radv_meta_restore_compute(const struct radv_meta_saved_compute_state *state, - struct radv_cmd_buffer *cmd_buffer, - unsigned push_constant_size) -{ - radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE, - radv_pipeline_to_handle(state->old_pipeline)); - radv_bind_descriptor_set(cmd_buffer, state->old_descriptor_set0, 0); + if (state->flags & RADV_META_SAVE_DESCRIPTORS) { + radv_set_descriptor_set(cmd_buffer, bind_point, + state->old_descriptor_set0, 0); + } - if (push_constant_size) { - memcpy(cmd_buffer->push_constants, state->push_constants, push_constant_size); + if (state->flags & RADV_META_SAVE_CONSTANTS) { + memcpy(cmd_buffer->push_constants, state->push_constants, + MAX_PUSH_CONSTANTS_SIZE); cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT; + + if (state->flags & RADV_META_SAVE_GRAPHICS_PIPELINE) { + cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_ALL_GRAPHICS; + } + } + + if (state->flags & RADV_META_SAVE_PASS) { + cmd_buffer->state.pass = state->pass; + cmd_buffer->state.subpass = state->subpass; + cmd_buffer->state.framebuffer = state->framebuffer; + cmd_buffer->state.attachments = state->attachments; + cmd_buffer->state.render_area = state->render_area; + if (state->subpass) + cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER; } } @@ -201,15 +234,12 @@ radv_builtin_cache_path(char *path) const char *suffix2 = "/.cache/radv_builtin_shaders"; struct passwd pwd, *result; char path2[PATH_MAX + 1]; /* PATH_MAX is not a real max,but suffices here. */ + int ret; if (xdg_cache_home) { - - if (strlen(xdg_cache_home) + strlen(suffix) > PATH_MAX) - return false; - - strcpy(path, xdg_cache_home); - strcat(path, suffix); - return true; + ret = snprintf(path, PATH_MAX + 1, "%s%s%zd", + xdg_cache_home, suffix, sizeof(void *) * 8); + return ret > 0 && ret < PATH_MAX + 1; } getpwuid_r(getuid(), &pwd, path2, PATH_MAX - strlen(suffix2), &result); @@ -220,23 +250,25 @@ radv_builtin_cache_path(char *path) strcat(path, "/.cache"); mkdir(path, 0755); - strcat(path, suffix); - return true; + ret = snprintf(path, PATH_MAX + 1, "%s%s%zd", + pwd.pw_dir, suffix2, sizeof(void *) * 8); + return ret > 0 && ret < PATH_MAX + 1; } -static void +static bool radv_load_meta_pipeline(struct radv_device *device) { char path[PATH_MAX + 1]; struct stat st; void *data = NULL; + bool ret = false; if (!radv_builtin_cache_path(path)) - return; + return false; int fd = open(path, O_RDONLY); if (fd < 0) - return; + return false; if (fstat(fd, &st)) goto fail; data = malloc(st.st_size); @@ -245,10 +277,11 @@ radv_load_meta_pipeline(struct radv_device *device) if(read(fd, data, st.st_size) == -1) goto fail; - radv_pipeline_cache_load(&device->meta_state.cache, data, st.st_size); + ret = radv_pipeline_cache_load(&device->meta_state.cache, data, st.st_size); fail: free(data); close(fd); + return ret; } static void @@ -297,6 +330,8 @@ radv_device_init_meta(struct radv_device *device) { VkResult result; + memset(&device->meta_state, 0, sizeof(device->meta_state)); + device->meta_state.alloc = (VkAllocationCallbacks) { .pUserData = device, .pfnAllocation = meta_alloc, @@ -306,21 +341,24 @@ radv_device_init_meta(struct radv_device *device) device->meta_state.cache.alloc = device->meta_state.alloc; radv_pipeline_cache_init(&device->meta_state.cache, device); - radv_load_meta_pipeline(device); + bool loaded_cache = radv_load_meta_pipeline(device); + bool on_demand = !loaded_cache; + + mtx_init(&device->meta_state.mtx, mtx_plain); - result = radv_device_init_meta_clear_state(device); + result = radv_device_init_meta_clear_state(device, on_demand); if (result != VK_SUCCESS) goto fail_clear; - result = radv_device_init_meta_resolve_state(device); + result = radv_device_init_meta_resolve_state(device, on_demand); if (result != VK_SUCCESS) goto fail_resolve; - result = radv_device_init_meta_blit_state(device); + result = radv_device_init_meta_blit_state(device, on_demand); if (result != VK_SUCCESS) goto fail_blit; - result = radv_device_init_meta_blit2d_state(device); + result = radv_device_init_meta_blit2d_state(device, on_demand); if (result != VK_SUCCESS) goto fail_blit2d; @@ -328,7 +366,7 @@ radv_device_init_meta(struct radv_device *device) if (result != VK_SUCCESS) goto fail_bufimage; - result = radv_device_init_meta_depth_decomp_state(device); + result = radv_device_init_meta_depth_decomp_state(device, on_demand); if (result != VK_SUCCESS) goto fail_depth_decomp; @@ -336,25 +374,38 @@ radv_device_init_meta(struct radv_device *device) if (result != VK_SUCCESS) goto fail_buffer; - result = radv_device_init_meta_query_state(device); + result = radv_device_init_meta_query_state(device, on_demand); if (result != VK_SUCCESS) goto fail_query; - result = radv_device_init_meta_fast_clear_flush_state(device); + result = radv_device_init_meta_fast_clear_flush_state(device, on_demand); if (result != VK_SUCCESS) goto fail_fast_clear; - result = radv_device_init_meta_resolve_compute_state(device); + result = radv_device_init_meta_resolve_compute_state(device, on_demand); if (result != VK_SUCCESS) goto fail_resolve_compute; + + result = radv_device_init_meta_resolve_fragment_state(device, on_demand); + if (result != VK_SUCCESS) + goto fail_resolve_fragment; + + result = radv_device_init_meta_fmask_expand_state(device); + if (result != VK_SUCCESS) + goto fail_fmask_expand; + return VK_SUCCESS; +fail_fmask_expand: + radv_device_finish_meta_resolve_fragment_state(device); +fail_resolve_fragment: + radv_device_finish_meta_resolve_compute_state(device); fail_resolve_compute: radv_device_finish_meta_fast_clear_flush_state(device); fail_fast_clear: - radv_device_finish_meta_buffer_state(device); -fail_query: radv_device_finish_meta_query_state(device); +fail_query: + radv_device_finish_meta_buffer_state(device); fail_buffer: radv_device_finish_meta_depth_decomp_state(device); fail_depth_decomp: @@ -368,6 +419,7 @@ fail_blit: fail_resolve: radv_device_finish_meta_clear_state(device); fail_clear: + mtx_destroy(&device->meta_state.mtx); radv_pipeline_cache_finish(&device->meta_state.cache); return result; } @@ -385,36 +437,12 @@ radv_device_finish_meta(struct radv_device *device) radv_device_finish_meta_buffer_state(device); radv_device_finish_meta_fast_clear_flush_state(device); radv_device_finish_meta_resolve_compute_state(device); + radv_device_finish_meta_resolve_fragment_state(device); + radv_device_finish_meta_fmask_expand_state(device); radv_store_meta_pipeline(device); radv_pipeline_cache_finish(&device->meta_state.cache); -} - -/* - * The most common meta operations all want to have the viewport - * reset and any scissors disabled. The rest of the dynamic state - * should have no effect. - */ -void -radv_meta_save_graphics_reset_vport_scissor(struct radv_meta_saved_state *saved_state, - struct radv_cmd_buffer *cmd_buffer) -{ - uint32_t dirty_state = (1 << VK_DYNAMIC_STATE_VIEWPORT) | (1 << VK_DYNAMIC_STATE_SCISSOR); - radv_meta_save(saved_state, cmd_buffer, dirty_state); - cmd_buffer->state.dynamic.viewport.count = 0; - cmd_buffer->state.dynamic.scissor.count = 0; - cmd_buffer->state.dirty |= dirty_state; -} - -void -radv_meta_save_graphics_reset_vport_scissor_novertex(struct radv_meta_saved_state *saved_state, - struct radv_cmd_buffer *cmd_buffer) -{ - uint32_t dirty_state = (1 << VK_DYNAMIC_STATE_VIEWPORT) | (1 << VK_DYNAMIC_STATE_SCISSOR); - radv_meta_save_novertex(saved_state, cmd_buffer, dirty_state); - cmd_buffer->state.dynamic.viewport.count = 0; - cmd_buffer->state.dynamic.scissor.count = 0; - cmd_buffer->state.dirty |= dirty_state; + mtx_destroy(&device->meta_state.mtx); } nir_ssa_def *radv_meta_gen_rect_vertices_comp2(nir_builder *vs_b, nir_ssa_def *comp2) @@ -454,3 +482,121 @@ nir_ssa_def *radv_meta_gen_rect_vertices(nir_builder *vs_b) { return radv_meta_gen_rect_vertices_comp2(vs_b, nir_imm_float(vs_b, 0.0)); } + +/* vertex shader that generates vertices */ +nir_shader * +radv_meta_build_nir_vs_generate_vertices(void) +{ + const struct glsl_type *vec4 = glsl_vec4_type(); + + nir_builder b; + nir_variable *v_position; + + nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, NULL); + b.shader->info.name = ralloc_strdup(b.shader, "meta_vs_gen_verts"); + + nir_ssa_def *outvec = radv_meta_gen_rect_vertices(&b); + + v_position = nir_variable_create(b.shader, nir_var_shader_out, vec4, + "gl_Position"); + v_position->data.location = VARYING_SLOT_POS; + + nir_store_var(&b, v_position, outvec, 0xf); + + return b.shader; +} + +nir_shader * +radv_meta_build_nir_fs_noop(void) +{ + nir_builder b; + + nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, NULL); + b.shader->info.name = ralloc_asprintf(b.shader, + "meta_noop_fs"); + + return b.shader; +} + +void radv_meta_build_resolve_shader_core(nir_builder *b, + bool is_integer, + int samples, + nir_variable *input_img, + nir_variable *color, + nir_ssa_def *img_coord) +{ + /* do a txf_ms on each sample */ + nir_ssa_def *tmp; + nir_if *outer_if = NULL; + + nir_ssa_def *input_img_deref = &nir_build_deref_var(b, input_img)->dest.ssa; + + nir_tex_instr *tex = nir_tex_instr_create(b->shader, 3); + tex->sampler_dim = GLSL_SAMPLER_DIM_MS; + tex->op = nir_texop_txf_ms; + tex->src[0].src_type = nir_tex_src_coord; + tex->src[0].src = nir_src_for_ssa(img_coord); + tex->src[1].src_type = nir_tex_src_ms_index; + tex->src[1].src = nir_src_for_ssa(nir_imm_int(b, 0)); + tex->src[2].src_type = nir_tex_src_texture_deref; + tex->src[2].src = nir_src_for_ssa(input_img_deref); + tex->dest_type = nir_type_float; + tex->is_array = false; + tex->coord_components = 2; + + nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex"); + nir_builder_instr_insert(b, &tex->instr); + + tmp = &tex->dest.ssa; + + if (!is_integer && samples > 1) { + nir_tex_instr *tex_all_same = nir_tex_instr_create(b->shader, 2); + tex_all_same->sampler_dim = GLSL_SAMPLER_DIM_MS; + tex_all_same->op = nir_texop_samples_identical; + tex_all_same->src[0].src_type = nir_tex_src_coord; + tex_all_same->src[0].src = nir_src_for_ssa(img_coord); + tex_all_same->src[1].src_type = nir_tex_src_texture_deref; + tex_all_same->src[1].src = nir_src_for_ssa(input_img_deref); + tex_all_same->dest_type = nir_type_float; + tex_all_same->is_array = false; + tex_all_same->coord_components = 2; + + nir_ssa_dest_init(&tex_all_same->instr, &tex_all_same->dest, 1, 32, "tex"); + nir_builder_instr_insert(b, &tex_all_same->instr); + + nir_ssa_def *all_same = nir_ieq(b, &tex_all_same->dest.ssa, nir_imm_int(b, 0)); + nir_if *if_stmt = nir_if_create(b->shader); + if_stmt->condition = nir_src_for_ssa(all_same); + nir_cf_node_insert(b->cursor, &if_stmt->cf_node); + + b->cursor = nir_after_cf_list(&if_stmt->then_list); + for (int i = 1; i < samples; i++) { + nir_tex_instr *tex_add = nir_tex_instr_create(b->shader, 3); + tex_add->sampler_dim = GLSL_SAMPLER_DIM_MS; + tex_add->op = nir_texop_txf_ms; + tex_add->src[0].src_type = nir_tex_src_coord; + tex_add->src[0].src = nir_src_for_ssa(img_coord); + tex_add->src[1].src_type = nir_tex_src_ms_index; + tex_add->src[1].src = nir_src_for_ssa(nir_imm_int(b, i)); + tex_add->src[2].src_type = nir_tex_src_texture_deref; + tex_add->src[2].src = nir_src_for_ssa(input_img_deref); + tex_add->dest_type = nir_type_float; + tex_add->is_array = false; + tex_add->coord_components = 2; + + nir_ssa_dest_init(&tex_add->instr, &tex_add->dest, 4, 32, "tex"); + nir_builder_instr_insert(b, &tex_add->instr); + + tmp = nir_fadd(b, tmp, &tex_add->dest.ssa); + } + + tmp = nir_fdiv(b, tmp, nir_imm_float(b, samples)); + nir_store_var(b, color, tmp, 0xf); + b->cursor = nir_after_cf_list(&if_stmt->else_list); + outer_if = if_stmt; + } + nir_store_var(b, color, &tex->dest.ssa, 0xf); + + if (outer_if) + b->cursor = nir_after_cf_node(&outer_if->cf_node); +}