X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fradeonsi%2Fsi_state_shaders.c;h=d33008cdda77f3470d17bfa15611aa43494c83d2;hb=55f8431c76adcfb91d644edbbe57be6a15a04654;hp=dbaa2dcd5cb7b11804d5a4625b08762a3fe8d27b;hpb=3784ce9782b47772c950b841f8934d51a18c4b7d;p=mesa.git diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.c b/src/gallium/drivers/radeonsi/si_state_shaders.c index dbaa2dcd5cb..d33008cdda7 100644 --- a/src/gallium/drivers/radeonsi/si_state_shaders.c +++ b/src/gallium/drivers/radeonsi/si_state_shaders.c @@ -19,10 +19,6 @@ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: - * Christian König - * Marek Olšák */ #include "si_pipe.h" @@ -34,12 +30,14 @@ #include "tgsi/tgsi_ureg.h" #include "util/hash_table.h" #include "util/crc32.h" +#include "util/u_async_debug.h" #include "util/u_memory.h" #include "util/u_prim.h" #include "util/disk_cache.h" #include "util/mesa-sha1.h" #include "ac_exp_param.h" +#include "ac_shader_util.h" /* SHADER_CACHE */ @@ -209,10 +207,10 @@ static bool si_shader_cache_insert_shader(struct si_screen *sscreen, return false; } - if (sscreen->b.disk_shader_cache && insert_into_disk_cache) { - disk_cache_compute_key(sscreen->b.disk_shader_cache, tgsi_binary, + if (sscreen->disk_shader_cache && insert_into_disk_cache) { + disk_cache_compute_key(sscreen->disk_shader_cache, tgsi_binary, *((uint32_t *)tgsi_binary), key); - disk_cache_put(sscreen->b.disk_shader_cache, key, hw_binary, + disk_cache_put(sscreen->disk_shader_cache, key, hw_binary, *((uint32_t *) hw_binary), NULL); } @@ -226,16 +224,16 @@ static bool si_shader_cache_load_shader(struct si_screen *sscreen, struct hash_entry *entry = _mesa_hash_table_search(sscreen->shader_cache, tgsi_binary); if (!entry) { - if (sscreen->b.disk_shader_cache) { + if (sscreen->disk_shader_cache) { unsigned char sha1[CACHE_KEY_SIZE]; size_t tg_size = *((uint32_t *) tgsi_binary); - disk_cache_compute_key(sscreen->b.disk_shader_cache, + disk_cache_compute_key(sscreen->disk_shader_cache, tgsi_binary, tg_size, sha1); size_t binary_size; uint8_t *buffer = - disk_cache_get(sscreen->b.disk_shader_cache, + disk_cache_get(sscreen->disk_shader_cache, sha1, &binary_size); if (!buffer) return false; @@ -249,7 +247,7 @@ static bool si_shader_cache_load_shader(struct si_screen *sscreen, assert(!"Invalid radeonsi shader disk cache " "item!"); - disk_cache_remove(sscreen->b.disk_shader_cache, + disk_cache_remove(sscreen->disk_shader_cache, sha1); free(buffer); @@ -274,7 +272,7 @@ static bool si_shader_cache_load_shader(struct si_screen *sscreen, else return false; } - p_atomic_inc(&sscreen->b.num_shader_cache_hits); + p_atomic_inc(&sscreen->num_shader_cache_hits); return true; } @@ -375,8 +373,8 @@ static void si_set_tesseval_regs(struct si_screen *sscreen, topology = V_028B6C_OUTPUT_TRIANGLE_CW; if (sscreen->has_distributed_tess) { - if (sscreen->b.family == CHIP_FIJI || - sscreen->b.family >= CHIP_POLARIS10) + if (sscreen->info.family == CHIP_FIJI || + sscreen->info.family >= CHIP_POLARIS10) distribution_mode = V_028B6C_DISTRIBUTION_MODE_TRAPEZOIDS; else distribution_mode = V_028B6C_DISTRIBUTION_MODE_DONUTS; @@ -411,7 +409,7 @@ static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen, { unsigned type = sel->type; - if (sscreen->b.family < CHIP_POLARIS10) + if (sscreen->info.family < CHIP_POLARIS10) return; /* VS as VS, or VS as ES: */ @@ -448,7 +446,7 @@ static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader) unsigned vgpr_comp_cnt; uint64_t va; - assert(sscreen->b.chip_class <= VI); + assert(sscreen->info.chip_class <= VI); pm4 = si_get_shader_pm4_state(shader); if (!pm4) @@ -488,7 +486,7 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader) va = shader->bo->gpu_address; si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); - if (sscreen->b.chip_class >= GFX9) { + if (sscreen->info.chip_class >= GFX9) { si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8); si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS, va >> 40); @@ -519,7 +517,7 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader) S_00B428_FLOAT_MODE(shader->config.float_mode) | S_00B428_LS_VGPR_COMP_CNT(ls_vgpr_comp_cnt)); - if (sscreen->b.chip_class <= VI) { + if (sscreen->info.chip_class <= VI) { si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, shader->config.rsrc2); } @@ -533,7 +531,7 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) uint64_t va; unsigned oc_lds_en; - assert(sscreen->b.chip_class <= VI); + assert(sscreen->info.chip_class <= VI); pm4 = si_get_shader_pm4_state(shader); if (!pm4) @@ -581,7 +579,7 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader) */ static uint32_t si_vgt_gs_mode(struct si_shader_selector *sel) { - enum chip_class chip_class = sel->screen->b.chip_class; + enum chip_class chip_class = sel->screen->info.chip_class; unsigned gs_max_vert_out = sel->gs_max_out_vertices; unsigned cut_mode; @@ -754,7 +752,7 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader) va = shader->bo->gpu_address; si_pm4_add_bo(pm4, shader->bo, RADEON_USAGE_READ, RADEON_PRIO_SHADER_BINARY); - if (sscreen->b.chip_class >= GFX9) { + if (sscreen->info.chip_class >= GFX9) { unsigned input_prim = sel->info.properties[TGSI_PROPERTY_GS_INPUT_PRIM]; unsigned es_type = shader->key.part.gs.es->type; unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt; @@ -873,7 +871,7 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader, si_pm4_set_reg(pm4, R_028A84_VGT_PRIMITIVEID_EN, 0); } - if (sscreen->b.chip_class <= VI) { + if (sscreen->info.chip_class <= VI) { /* Reuse needs to be set off if we write oViewport. */ si_pm4_set_reg(pm4, R_028AB4_VGT_REUSE_OFF, S_028AB4_REUSE_OFF(info->writes_viewport_index)); @@ -983,38 +981,6 @@ static unsigned si_get_spi_shader_col_format(struct si_shader *shader) return value; } -static unsigned si_get_cb_shader_mask(unsigned spi_shader_col_format) -{ - unsigned i, cb_shader_mask = 0; - - for (i = 0; i < 8; i++) { - switch ((spi_shader_col_format >> (i * 4)) & 0xf) { - case V_028714_SPI_SHADER_ZERO: - break; - case V_028714_SPI_SHADER_32_R: - cb_shader_mask |= 0x1 << (i * 4); - break; - case V_028714_SPI_SHADER_32_GR: - cb_shader_mask |= 0x3 << (i * 4); - break; - case V_028714_SPI_SHADER_32_AR: - cb_shader_mask |= 0x9 << (i * 4); - break; - case V_028714_SPI_SHADER_FP16_ABGR: - case V_028714_SPI_SHADER_UNORM16_ABGR: - case V_028714_SPI_SHADER_SNORM16_ABGR: - case V_028714_SPI_SHADER_UINT16_ABGR: - case V_028714_SPI_SHADER_SINT16_ABGR: - case V_028714_SPI_SHADER_32_ABGR: - cb_shader_mask |= 0xf << (i * 4); - break; - default: - assert(0); - } - } - return cb_shader_mask; -} - static void si_shader_ps(struct si_shader *shader) { struct tgsi_shader_info *info = &shader->selector->info; @@ -1097,7 +1063,7 @@ static void si_shader_ps(struct si_shader *shader) spi_baryc_cntl |= S_0286E0_POS_FLOAT_ULC(1); spi_shader_col_format = si_get_spi_shader_col_format(shader); - cb_shader_mask = si_get_cb_shader_mask(spi_shader_col_format); + cb_shader_mask = ac_get_cb_shader_mask(spi_shader_col_format); /* Ensure that some export memory is always allocated, for two reasons: * @@ -1126,7 +1092,7 @@ static void si_shader_ps(struct si_shader *shader) si_pm4_set_reg(pm4, R_0286D8_SPI_PS_IN_CONTROL, spi_ps_in_control); si_pm4_set_reg(pm4, R_028710_SPI_SHADER_Z_FORMAT, - si_get_spi_shader_z_format(info->writes_z, + ac_get_spi_shader_z_format(info->writes_z, info->writes_stencil, info->writes_samplemask)); @@ -1483,7 +1449,7 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, assert(0); } - if (unlikely(sctx->screen->b.debug_flags & DBG(NO_OPT_VARIANT))) + if (unlikely(sctx->screen->debug_flags & DBG(NO_OPT_VARIANT))) memset(&key->opt, 0, sizeof(key->opt)); } @@ -1556,6 +1522,11 @@ static bool si_check_missing_main_part(struct si_screen *sscreen, if (!main_part) return false; + /* We can leave the fence as permanently signaled because the + * main part becomes visible globally only after it has been + * compiled. */ + util_queue_fence_init(&main_part->ready); + main_part->selector = sel; main_part->key.as_es = key->as_es; main_part->key.as_ls = key->as_ls; @@ -1589,10 +1560,19 @@ again: * variants, it will cost just a computation of the key and this * test. */ if (likely(current && - memcmp(¤t->key, key, sizeof(*key)) == 0 && - (!current->is_optimized || - util_queue_fence_is_signalled(¤t->optimized_ready)))) + memcmp(¤t->key, key, sizeof(*key)) == 0)) { + if (unlikely(!util_queue_fence_is_signalled(¤t->ready))) { + if (current->is_optimized) { + memset(&key->opt, 0, sizeof(key->opt)); + goto current_not_ready; + } + + util_queue_fence_wait(¤t->ready); + } + return current->compilation_failed ? -1 : 0; + } +current_not_ready: /* This must be done before the mutex is locked, because async GS * compilation calls this function too, and therefore must enter @@ -1611,24 +1591,26 @@ again: /* Don't check the "current" shader. We checked it above. */ if (current != iter && memcmp(&iter->key, key, sizeof(*key)) == 0) { - /* If it's an optimized shader and its compilation has - * been started but isn't done, use the unoptimized - * shader so as not to cause a stall due to compilation. - */ - if (iter->is_optimized && - !util_queue_fence_is_signalled(&iter->optimized_ready)) { - memset(&key->opt, 0, sizeof(key->opt)); - mtx_unlock(&sel->mutex); - goto again; + mtx_unlock(&sel->mutex); + + if (unlikely(!util_queue_fence_is_signalled(&iter->ready))) { + /* If it's an optimized shader and its compilation has + * been started but isn't done, use the unoptimized + * shader so as not to cause a stall due to compilation. + */ + if (iter->is_optimized) { + memset(&key->opt, 0, sizeof(key->opt)); + goto again; + } + + util_queue_fence_wait(&iter->ready); } if (iter->compilation_failed) { - mtx_unlock(&sel->mutex); return -1; /* skip the draw call */ } state->current = iter; - mtx_unlock(&sel->mutex); return 0; } } @@ -1639,12 +1621,15 @@ again: mtx_unlock(&sel->mutex); return -ENOMEM; } + + util_queue_fence_init(&shader->ready); + shader->selector = sel; shader->key = *key; shader->compiler_ctx_state = *compiler_state; /* If this is a merged shader, get the first shader's selector. */ - if (sscreen->b.chip_class >= GFX9) { + if (sscreen->info.chip_class >= GFX9) { if (sel->type == PIPE_SHADER_TESS_CTRL) previous_stage_sel = key->part.tcs.ls; else if (sel->type == PIPE_SHADER_GEOMETRY) @@ -1715,16 +1700,6 @@ again: shader->is_optimized = !is_pure_monolithic && memcmp(&key->opt, &zeroed.opt, sizeof(key->opt)) != 0; - if (shader->is_optimized) - util_queue_fence_init(&shader->optimized_ready); - - if (!sel->last_variant) { - sel->first_variant = shader; - sel->last_variant = shader; - } else { - sel->last_variant->next_variant = shader; - sel->last_variant = shader; - } /* If it's an optimized shader, compile it asynchronously. */ if (shader->is_optimized && @@ -1732,22 +1707,46 @@ again: thread_index < 0) { /* Compile it asynchronously. */ util_queue_add_job(&sscreen->shader_compiler_queue_low_priority, - shader, &shader->optimized_ready, + shader, &shader->ready, si_build_shader_variant_low_priority, NULL); + /* Add only after the ready fence was reset, to guard against a + * race with si_bind_XX_shader. */ + if (!sel->last_variant) { + sel->first_variant = shader; + sel->last_variant = shader; + } else { + sel->last_variant->next_variant = shader; + sel->last_variant = shader; + } + /* Use the default (unoptimized) shader for now. */ memset(&key->opt, 0, sizeof(key->opt)); mtx_unlock(&sel->mutex); goto again; } + /* Reset the fence before adding to the variant list. */ + util_queue_fence_reset(&shader->ready); + + if (!sel->last_variant) { + sel->first_variant = shader; + sel->last_variant = shader; + } else { + sel->last_variant->next_variant = shader; + sel->last_variant = shader; + } + + mtx_unlock(&sel->mutex); + assert(!shader->is_optimized); si_build_shader_variant(shader, thread_index, false); + util_queue_fence_signal(&shader->ready); + if (!shader->compilation_failed) state->current = shader; - mtx_unlock(&sel->mutex); return shader->compilation_failed ? -1 : 0; } @@ -1811,14 +1810,10 @@ static void si_init_shader_selector_async(void *job, int thread_index) struct pipe_debug_callback *debug = &sel->compiler_ctx_state.debug; unsigned i; - if (thread_index >= 0) { - assert(thread_index < ARRAY_SIZE(sscreen->tm)); - tm = sscreen->tm[thread_index]; - if (!debug->async) - debug = NULL; - } else { - tm = sel->compiler_ctx_state.tm; - } + assert(!debug->debug_message || debug->async); + assert(thread_index >= 0); + assert(thread_index < ARRAY_SIZE(sscreen->tm)); + tm = sscreen->tm[thread_index]; /* Compile the main shader part for use with a prolog and/or epilog. * If this fails, the driver will try to compile a monolithic shader @@ -1833,6 +1828,10 @@ static void si_init_shader_selector_async(void *job, int thread_index) return; } + /* We can leave the fence signaled because use of the default + * main part is guarded by the selector's ready fence. */ + util_queue_fence_init(&shader->ready); + shader->selector = sel; si_parse_next_shader_property(&sel->info, sel->so.num_outputs != 0, @@ -1913,9 +1912,9 @@ static void si_init_shader_selector_async(void *job, int thread_index) } /* Pre-compilation. */ - if (sscreen->b.debug_flags & DBG(PRECOMPILE) && + if (sscreen->debug_flags & DBG(PRECOMPILE) && /* GFX9 needs LS or ES for compilation, which we don't have here. */ - (sscreen->b.chip_class <= VI || + (sscreen->info.chip_class <= VI || (sel->type != PIPE_SHADER_TESS_CTRL && sel->type != PIPE_SHADER_GEOMETRY))) { struct si_shader_ctx_state state = {sel}; @@ -1927,7 +1926,7 @@ static void si_init_shader_selector_async(void *job, int thread_index) &key); /* GFX9 doesn't have LS and ES. */ - if (sscreen->b.chip_class >= GFX9) { + if (sscreen->info.chip_class >= GFX9) { key.as_ls = 0; key.as_es = 0; } @@ -2009,8 +2008,7 @@ static void *si_create_shader_selector(struct pipe_context *ctx, pipe_reference_init(&sel->reference, 1); sel->screen = sscreen; - sel->compiler_ctx_state.tm = sctx->tm; - sel->compiler_ctx_state.debug = sctx->b.debug; + sel->compiler_ctx_state.debug = sctx->debug; sel->compiler_ctx_state.is_debug_context = sctx->is_debug; sel->so = state->stream_output; @@ -2035,7 +2033,7 @@ static void *si_create_shader_selector(struct pipe_context *ctx, } sel->type = sel->info.processor; - p_atomic_inc(&sscreen->b.num_shaders_created); + p_atomic_inc(&sscreen->num_shaders_created); si_get_active_slot_masks(&sel->info, &sel->active_const_and_shader_buffers, &sel->active_samplers_and_images); @@ -2052,6 +2050,12 @@ static void *si_create_shader_selector(struct pipe_context *ctx, sel->info.num_inputs && !sel->info.properties[TGSI_PROPERTY_VS_BLIT_SGPRS]; + sel->force_correct_derivs_after_kill = + sel->type == PIPE_SHADER_FRAGMENT && + sel->info.uses_derivatives && + sel->info.uses_kill && + sctx->screen->debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL); + /* Set which opcode uses which (i,j) pair. */ if (sel->info.uses_persp_opcode_interp_centroid) sel->info.uses_persp_centroid = true; @@ -2233,14 +2237,26 @@ static void *si_create_shader_selector(struct pipe_context *ctx, (void) mtx_init(&sel->mutex, mtx_plain); util_queue_fence_init(&sel->ready); - if ((sctx->b.debug.debug_message && !sctx->b.debug.async) || - sctx->is_debug || - si_can_dump_shader(&sscreen->b, sel->info.processor)) - si_init_shader_selector_async(sel, -1); - else - util_queue_add_job(&sscreen->shader_compiler_queue, sel, - &sel->ready, si_init_shader_selector_async, - NULL); + struct util_async_debug_callback async_debug; + bool wait = + (sctx->debug.debug_message && !sctx->debug.async) || + sctx->is_debug || + si_can_dump_shader(sscreen, sel->info.processor); + + if (wait) { + u_async_debug_init(&async_debug); + sel->compiler_ctx_state.debug = async_debug.base; + } + + util_queue_add_job(&sscreen->shader_compiler_queue, sel, + &sel->ready, si_init_shader_selector_async, + NULL); + + if (wait) { + util_queue_fence_wait(&sel->ready); + u_async_debug_drain(&async_debug, &sctx->debug); + u_async_debug_cleanup(&async_debug); + } return sel; } @@ -2252,9 +2268,9 @@ static void si_update_streamout_state(struct si_context *sctx) if (!shader_with_so) return; - sctx->b.streamout.enabled_stream_buffers_mask = + sctx->streamout.enabled_stream_buffers_mask = shader_with_so->enabled_streamout_buffer_mask; - sctx->b.streamout.stride_in_dw = shader_with_so->so.stride; + sctx->streamout.stride_in_dw = shader_with_so->so.stride; } static void si_update_clip_regs(struct si_context *sctx, @@ -2446,10 +2462,11 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader) { if (shader->is_optimized) { util_queue_drop_job(&sctx->screen->shader_compiler_queue_low_priority, - &shader->optimized_ready); - util_queue_fence_destroy(&shader->optimized_ready); + &shader->ready); } + util_queue_fence_destroy(&shader->ready); + if (shader->pm4) { switch (shader->selector->type) { case PIPE_SHADER_VERTEX: @@ -2673,7 +2690,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) struct si_pm4_state *pm4; /* Chip constants. */ - unsigned num_se = sctx->screen->b.info.max_se; + unsigned num_se = sctx->screen->info.max_se; unsigned wave_size = 64; unsigned max_gs_waves = 32 * num_se; /* max 32 per SE on GCN */ /* On SI-CI, the value comes from VGT_GS_VERTEX_REUSE = 16. @@ -2774,7 +2791,7 @@ static bool si_update_gs_ring_buffers(struct si_context *sctx) /* Flush the context to re-emit both init_config states. */ sctx->b.initial_gfx_cs_size = 0; /* force flush */ - si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL); + si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL); /* Set ring bindings. */ if (sctx->esgs_ring) { @@ -2973,7 +2990,7 @@ static bool si_update_spi_tmpring_size(struct si_context *sctx) r600_resource_reference(&sctx->scratch_buffer, NULL); sctx->scratch_buffer = (struct r600_resource*) - si_aligned_buffer_create(&sctx->screen->b.b, + si_aligned_buffer_create(&sctx->screen->b, R600_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT, scratch_needed_size, 256); @@ -2981,8 +2998,8 @@ static bool si_update_spi_tmpring_size(struct si_context *sctx) return false; si_mark_atom_dirty(sctx, &sctx->scratch_state); - r600_context_add_resource_size(&sctx->b.b, - &sctx->scratch_buffer->b.b); + si_context_add_resource_size(&sctx->b.b, + &sctx->scratch_buffer->b.b); } if (!si_update_scratch_relocs(sctx)) @@ -3012,7 +3029,7 @@ static void si_init_tess_factor_ring(struct si_context *sctx) */ unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 127 : 63; unsigned max_offchip_buffers = max_offchip_buffers_per_se * - sctx->screen->b.info.max_se; + sctx->screen->info.max_se; unsigned offchip_granularity; switch (sctx->screen->tess_offchip_block_dw_size) { @@ -3034,7 +3051,7 @@ static void si_init_tess_factor_ring(struct si_context *sctx) sctx->tf_ring = si_aligned_buffer_create(sctx->b.b.screen, R600_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT, - 32768 * sctx->screen->b.info.max_se, + 32768 * sctx->screen->info.max_se, 64 * 1024); if (!sctx->tf_ring) return; @@ -3113,7 +3130,7 @@ static void si_init_tess_factor_ring(struct si_context *sctx) */ si_pm4_upload_indirect_buffer(sctx, sctx->init_config); sctx->b.initial_gfx_cs_size = 0; /* force flush */ - si_context_gfx_flush(sctx, RADEON_FLUSH_ASYNC, NULL); + si_context_gfx_flush(sctx, PIPE_FLUSH_ASYNC, NULL); } /** @@ -3197,7 +3214,7 @@ bool si_update_shaders(struct si_context *sctx) int r; compiler_state.tm = sctx->tm; - compiler_state.debug = sctx->b.debug; + compiler_state.debug = sctx->debug; compiler_state.is_debug_context = sctx->is_debug; /* Update stages before GS. */ @@ -3318,7 +3335,7 @@ bool si_update_shaders(struct si_context *sctx) si_mark_atom_dirty(sctx, &sctx->spi_map); } - if (sctx->screen->b.rbplus_allowed && + if (sctx->screen->rbplus_allowed && si_pm4_state_changed(sctx, ps) && (!old_ps || old_spi_shader_col_format !=