X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fgallium%2Fdrivers%2Fradeonsi%2Fsi_compute_blit.c;h=e07042adf3fe5e7951e12e95a02e781b6da4aa21;hb=1909bc526dd0e51d49252a1fc1c2aab0a8a51fb5;hp=31f5261ad8d0508a6902464478e948a19cffe6d0;hpb=1b25d340b791ad8350bdfb27f1a91ac79fa17748;p=mesa.git diff --git a/src/gallium/drivers/radeonsi/si_compute_blit.c b/src/gallium/drivers/radeonsi/si_compute_blit.c index 31f5261ad8d..e07042adf3f 100644 --- a/src/gallium/drivers/radeonsi/si_compute_blit.c +++ b/src/gallium/drivers/radeonsi/si_compute_blit.c @@ -25,6 +25,7 @@ #include "si_pipe.h" #include "util/u_format.h" +#include "util/format_srgb.h" /* Note: Compute shaders always use SI_COMPUTE_DST_CACHE_POLICY for dst * and L2_STREAM for src. @@ -35,7 +36,7 @@ static enum si_cache_policy get_cache_policy(struct si_context *sctx, { if ((sctx->chip_class >= GFX9 && (coher == SI_COHERENCY_CB_META || coher == SI_COHERENCY_CP)) || - (sctx->chip_class >= CIK && coher == SI_COHERENCY_SHADER)) + (sctx->chip_class >= GFX7 && coher == SI_COHERENCY_SHADER)) return size <= 256 * 1024 ? L2_LRU : L2_STREAM; return L2_BYPASS; @@ -50,14 +51,28 @@ unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher, case SI_COHERENCY_CP: return 0; case SI_COHERENCY_SHADER: - return SI_CONTEXT_INV_SMEM_L1 | - SI_CONTEXT_INV_VMEM_L1 | - (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_GLOBAL_L2 : 0); + return SI_CONTEXT_INV_SCACHE | + SI_CONTEXT_INV_VCACHE | + (cache_policy == L2_BYPASS ? SI_CONTEXT_INV_L2 : 0); case SI_COHERENCY_CB_META: return SI_CONTEXT_FLUSH_AND_INV_CB; } } +static void si_compute_internal_begin(struct si_context *sctx) +{ + sctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS; + sctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS; + sctx->render_cond_force_off = true; +} + +static void si_compute_internal_end(struct si_context *sctx) +{ + sctx->flags &= ~SI_CONTEXT_STOP_PIPELINE_STATS; + sctx->flags |= SI_CONTEXT_START_PIPELINE_STATS; + sctx->render_cond_force_off = false; +} + static void si_compute_do_clear_or_copy(struct si_context *sctx, struct pipe_resource *dst, unsigned dst_offset, @@ -77,6 +92,7 @@ static void si_compute_do_clear_or_copy(struct si_context *sctx, assert(dst->target != PIPE_BUFFER || dst_offset + size <= dst->width0); assert(!src || src_offset + size <= src->width0); + si_compute_internal_begin(sctx); sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH | si_get_flush_flags(sctx, coher, SI_COMPUTE_DST_CACHE_POLICY); @@ -86,6 +102,13 @@ static void si_compute_do_clear_or_copy(struct si_context *sctx, struct pipe_shader_buffer saved_sb[2] = {}; si_get_shader_buffers(sctx, PIPE_SHADER_COMPUTE, 0, src ? 2 : 1, saved_sb); + unsigned saved_writable_mask = 0; + for (unsigned i = 0; i < (src ? 2 : 1); i++) { + if (sctx->const_and_shader_buffers[PIPE_SHADER_COMPUTE].writable_mask & + (1u << si_get_shaderbuf_slot(i))) + saved_writable_mask |= 1 << i; + } + /* The memory accesses are coalesced, meaning that the 1st instruction writes * the 1st contiguous block of data for the whole wave, the 2nd instruction * writes the 2nd contiguous block of data, etc. @@ -94,13 +117,14 @@ static void si_compute_do_clear_or_copy(struct si_context *sctx, SI_COMPUTE_CLEAR_DW_PER_THREAD; unsigned instructions_per_thread = MAX2(1, dwords_per_thread / 4); unsigned dwords_per_instruction = dwords_per_thread / instructions_per_thread; - unsigned dwords_per_wave = dwords_per_thread * 64; + unsigned wave_size = sctx->screen->compute_wave_size; + unsigned dwords_per_wave = dwords_per_thread * wave_size; unsigned num_dwords = size / 4; unsigned num_instructions = DIV_ROUND_UP(num_dwords, dwords_per_instruction); struct pipe_grid_info info = {}; - info.block[0] = MIN2(64, num_instructions); + info.block[0] = MIN2(wave_size, num_instructions); info.block[1] = 1; info.block[2] = 1; info.grid[0] = DIV_ROUND_UP(num_dwords, dwords_per_wave); @@ -119,7 +143,7 @@ static void si_compute_do_clear_or_copy(struct si_context *sctx, sb[1].buffer_offset = src_offset; sb[1].buffer_size = size; - ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 2, sb); + ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 2, sb, 0x1); if (!sctx->cs_copy_buffer) { sctx->cs_copy_buffer = si_create_dma_compute_shader(&sctx->b, @@ -135,7 +159,7 @@ static void si_compute_do_clear_or_copy(struct si_context *sctx, for (unsigned i = 0; i < 4; i++) sctx->cs_user_data[i] = clear_value[i % (clear_value_size / 4)]; - ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 1, sb); + ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 1, sb, 0x1); if (!sctx->cs_clear_buffer) { sctx->cs_clear_buffer = si_create_dma_compute_shader(&sctx->b, @@ -149,24 +173,27 @@ static void si_compute_do_clear_or_copy(struct si_context *sctx, enum si_cache_policy cache_policy = get_cache_policy(sctx, coher, size); sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | - (cache_policy == L2_BYPASS ? SI_CONTEXT_WRITEBACK_GLOBAL_L2 : 0); + (cache_policy == L2_BYPASS ? SI_CONTEXT_WB_L2 : 0); if (cache_policy != L2_BYPASS) - r600_resource(dst)->TC_L2_dirty = true; + si_resource(dst)->TC_L2_dirty = true; /* Restore states. */ ctx->bind_compute_state(ctx, saved_cs); - ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, src ? 2 : 1, saved_sb); + ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, src ? 2 : 1, saved_sb, + saved_writable_mask); + si_compute_internal_end(sctx); } void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst, uint64_t offset, uint64_t size, uint32_t *clear_value, - uint32_t clear_value_size, enum si_coherency coher) + uint32_t clear_value_size, enum si_coherency coher, + bool force_cpdma) { if (!size) return; - unsigned clear_alignment = MIN2(clear_value_size, 4); + ASSERTED unsigned clear_alignment = MIN2(clear_value_size, 4); assert(clear_value_size != 3 && clear_value_size != 6); /* 12 is allowed. */ assert(offset % clear_alignment == 0); @@ -225,9 +252,10 @@ void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst, * about buffer placements. */ if (clear_value_size > 4 || - (clear_value_size == 4 && + (!force_cpdma && + clear_value_size == 4 && offset % 4 == 0 && - (size > 32*1024 || sctx->chip_class <= VI))) { + (size > 32*1024 || sctx->chip_class <= GFX8))) { si_compute_do_clear_or_copy(sctx, dst, offset, NULL, 0, aligned_size, clear_value, clear_value_size, coher); @@ -258,15 +286,8 @@ static void si_pipe_clear_buffer(struct pipe_context *ctx, const void *clear_value, int clear_value_size) { - enum si_coherency coher; - - if (dst->flags & SI_RESOURCE_FLAG_SO_FILLED_SIZE) - coher = SI_COHERENCY_CP; - else - coher = SI_COHERENCY_SHADER; - si_clear_buffer((struct si_context*)ctx, dst, offset, size, (uint32_t*)clear_value, - clear_value_size, coher); + clear_value_size, SI_COHERENCY_SHADER, false); } void si_copy_buffer(struct si_context *sctx, @@ -281,8 +302,8 @@ void si_copy_buffer(struct si_context *sctx, /* Only use compute for VRAM copies on dGPUs. */ if (sctx->screen->info.has_dedicated_vram && - r600_resource(dst)->domains & RADEON_DOMAIN_VRAM && - r600_resource(src)->domains & RADEON_DOMAIN_VRAM && + si_resource(dst)->domains & RADEON_DOMAIN_VRAM && + si_resource(src)->domains & RADEON_DOMAIN_VRAM && size > 32 * 1024 && dst_offset % 4 == 0 && src_offset % 4 == 0 && size % 4 == 0) { si_compute_do_clear_or_copy(sctx, dst, dst_offset, src, src_offset, @@ -311,9 +332,14 @@ void si_compute_copy_image(struct si_context *sctx, if (width == 0 || height == 0) return; + si_compute_internal_begin(sctx); sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM); - si_make_CB_shader_coherent(sctx, dst->nr_samples, true); + + /* src and dst have the same number of samples. */ + si_make_CB_shader_coherent(sctx, src->nr_samples, true, + /* Only src can have DCC.*/ + ((struct si_texture*)src)->surface.u.gfx9.dcc.pipe_aligned); struct pipe_constant_buffer saved_cb = {}; si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb); @@ -370,7 +396,7 @@ void si_compute_copy_image(struct si_context *sctx, si_create_copy_image_compute_shader_1d_array(ctx); ctx->bind_compute_state(ctx, sctx->cs_copy_image_1d_array); info.block[0] = 64; - sctx->compute_last_block[0] = width % 64; + info.last_block[0] = width % 64; info.block[1] = 1; info.block[2] = 1; info.grid[0] = DIV_ROUND_UP(width, 64); @@ -381,9 +407,9 @@ void si_compute_copy_image(struct si_context *sctx, sctx->cs_copy_image = si_create_copy_image_compute_shader(ctx); ctx->bind_compute_state(ctx, sctx->cs_copy_image); info.block[0] = 8; - sctx->compute_last_block[0] = width % 8; + info.last_block[0] = width % 8; info.block[1] = 8; - sctx->compute_last_block[1] = height % 8; + info.last_block[1] = height % 8; info.block[2] = 1; info.grid[0] = DIV_ROUND_UP(width, 8); info.grid[1] = DIV_ROUND_UP(height, 8); @@ -392,18 +418,275 @@ void si_compute_copy_image(struct si_context *sctx, ctx->launch_grid(ctx, &info); - sctx->compute_last_block[0] = 0; - sctx->compute_last_block[1] = 0; - sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | - (sctx->chip_class <= VI ? SI_CONTEXT_WRITEBACK_GLOBAL_L2 : 0) | + (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) | si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM); ctx->bind_compute_state(ctx, saved_cs); ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, saved_image); ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb); + si_compute_internal_end(sctx); +} + +void si_retile_dcc(struct si_context *sctx, struct si_texture *tex) +{ + struct pipe_context *ctx = &sctx->b; + + sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | + SI_CONTEXT_CS_PARTIAL_FLUSH | + si_get_flush_flags(sctx, SI_COHERENCY_CB_META, L2_LRU) | + si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_LRU); + sctx->emit_cache_flush(sctx); + + /* Save states. */ + void *saved_cs = sctx->cs_shader_state.program; + struct pipe_image_view saved_img[3] = {}; + + for (unsigned i = 0; i < 3; i++) { + util_copy_image_view(&saved_img[i], + &sctx->images[PIPE_SHADER_COMPUTE].views[i]); + } + + /* Set images. */ + bool use_uint16 = tex->surface.u.gfx9.dcc_retile_use_uint16; + unsigned num_elements = tex->surface.u.gfx9.dcc_retile_num_elements; + struct pipe_image_view img[3]; + + assert(tex->surface.dcc_retile_map_offset && tex->surface.dcc_retile_map_offset <= UINT_MAX); + assert(tex->surface.dcc_offset && tex->surface.dcc_offset <= UINT_MAX); + assert(tex->surface.display_dcc_offset && tex->surface.display_dcc_offset <= UINT_MAX); + + for (unsigned i = 0; i < 3; i++) { + img[i].resource = &tex->buffer.b.b; + img[i].access = i == 2 ? PIPE_IMAGE_ACCESS_WRITE : PIPE_IMAGE_ACCESS_READ; + img[i].shader_access = SI_IMAGE_ACCESS_AS_BUFFER; + } + + img[0].format = use_uint16 ? PIPE_FORMAT_R16G16B16A16_UINT : + PIPE_FORMAT_R32G32B32A32_UINT; + img[0].u.buf.offset = tex->surface.dcc_retile_map_offset; + img[0].u.buf.size = num_elements * (use_uint16 ? 2 : 4); + + img[1].format = PIPE_FORMAT_R8_UINT; + img[1].u.buf.offset = tex->surface.dcc_offset; + img[1].u.buf.size = tex->surface.dcc_size; + + img[2].format = PIPE_FORMAT_R8_UINT; + img[2].u.buf.offset = tex->surface.display_dcc_offset; + img[2].u.buf.size = tex->surface.u.gfx9.display_dcc_size; + + ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 3, img); + + /* Bind the compute shader. */ + if (!sctx->cs_dcc_retile) + sctx->cs_dcc_retile = si_create_dcc_retile_cs(ctx); + ctx->bind_compute_state(ctx, sctx->cs_dcc_retile); + + /* Dispatch compute. */ + /* img[0] has 4 channels per element containing 2 pairs of DCC offsets. */ + unsigned num_threads = num_elements / 4; + + struct pipe_grid_info info = {}; + info.block[0] = 64; + info.block[1] = 1; + info.block[2] = 1; + info.grid[0] = DIV_ROUND_UP(num_threads, 64); /* includes the partial block */ + info.grid[1] = 1; + info.grid[2] = 1; + info.last_block[0] = num_threads % 64; + + ctx->launch_grid(ctx, &info); + + /* Don't flush caches or wait. The driver will wait at the end of this IB, + * and L2 will be flushed by the kernel fence. + */ + + /* Restore states. */ + ctx->bind_compute_state(ctx, saved_cs); + ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 3, saved_img); +} + +/* Expand FMASK to make it identity, so that image stores can ignore it. */ +void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex) +{ + struct si_context *sctx = (struct si_context *)ctx; + bool is_array = tex->target == PIPE_TEXTURE_2D_ARRAY; + unsigned log_fragments = util_logbase2(tex->nr_storage_samples); + unsigned log_samples = util_logbase2(tex->nr_samples); + assert(tex->nr_samples >= 2); + + /* EQAA FMASK expansion is unimplemented. */ + if (tex->nr_samples != tex->nr_storage_samples) + return; + + si_compute_internal_begin(sctx); + + /* Flush caches and sync engines. */ + sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | + si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM); + si_make_CB_shader_coherent(sctx, tex->nr_samples, true, + true /* DCC is not possible with image stores */); + + /* Save states. */ + void *saved_cs = sctx->cs_shader_state.program; + struct pipe_image_view saved_image = {0}; + util_copy_image_view(&saved_image, &sctx->images[PIPE_SHADER_COMPUTE].views[0]); + + /* Bind the image. */ + struct pipe_image_view image = {0}; + image.resource = tex; + /* Don't set WRITE so as not to trigger FMASK expansion, causing + * an infinite loop. */ + image.shader_access = image.access = PIPE_IMAGE_ACCESS_READ; + image.format = util_format_linear(tex->format); + if (is_array) + image.u.tex.last_layer = tex->array_size - 1; + + ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &image); + + /* Bind the shader. */ + void **shader = &sctx->cs_fmask_expand[log_samples - 1][is_array]; + if (!*shader) + *shader = si_create_fmask_expand_cs(ctx, tex->nr_samples, is_array); + ctx->bind_compute_state(ctx, *shader); + + /* Dispatch compute. */ + struct pipe_grid_info info = {0}; + info.block[0] = 8; + info.last_block[0] = tex->width0 % 8; + info.block[1] = 8; + info.last_block[1] = tex->height0 % 8; + info.block[2] = 1; + info.grid[0] = DIV_ROUND_UP(tex->width0, 8); + info.grid[1] = DIV_ROUND_UP(tex->height0, 8); + info.grid[2] = is_array ? tex->array_size : 1; + + ctx->launch_grid(ctx, &info); + + /* Flush caches and sync engines. */ + sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | + (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) | + si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM); + + /* Restore previous states. */ + ctx->bind_compute_state(ctx, saved_cs); + ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_image); + si_compute_internal_end(sctx); + + /* Array of fully expanded FMASK values, arranged by [log2(fragments)][log2(samples)-1]. */ +#define INVALID 0 /* never used */ + static const uint64_t fmask_expand_values[][4] = { + /* samples */ + /* 2 (8 bpp) 4 (8 bpp) 8 (8-32bpp) 16 (16-64bpp) fragments */ + {0x02020202, 0x0E0E0E0E, 0xFEFEFEFE, 0xFFFEFFFE}, /* 1 */ + {0x02020202, 0xA4A4A4A4, 0xAAA4AAA4, 0xAAAAAAA4}, /* 2 */ + {INVALID, 0xE4E4E4E4, 0x44443210, 0x4444444444443210}, /* 4 */ + {INVALID, INVALID, 0x76543210, 0x8888888876543210}, /* 8 */ + }; + + /* Clear FMASK to identity. */ + struct si_texture *stex = (struct si_texture*)tex; + si_clear_buffer(sctx, tex, stex->surface.fmask_offset, stex->surface.fmask_size, + (uint32_t*)&fmask_expand_values[log_fragments][log_samples - 1], + 4, SI_COHERENCY_SHADER, false); } void si_init_compute_blit_functions(struct si_context *sctx) { sctx->b.clear_buffer = si_pipe_clear_buffer; } + +/* Clear a region of a color surface to a constant value. */ +void si_compute_clear_render_target(struct pipe_context *ctx, + struct pipe_surface *dstsurf, + const union pipe_color_union *color, + unsigned dstx, unsigned dsty, + unsigned width, unsigned height, + bool render_condition_enabled) +{ + struct si_context *sctx = (struct si_context *)ctx; + unsigned num_layers = dstsurf->u.tex.last_layer - dstsurf->u.tex.first_layer + 1; + unsigned data[4 + sizeof(color->ui)] = {dstx, dsty, dstsurf->u.tex.first_layer, 0}; + + if (width == 0 || height == 0) + return; + + if (util_format_is_srgb(dstsurf->format)) { + union pipe_color_union color_srgb; + for (int i = 0; i < 3; i++) + color_srgb.f[i] = util_format_linear_to_srgb_float(color->f[i]); + color_srgb.f[3] = color->f[3]; + memcpy(data + 4, color_srgb.ui, sizeof(color->ui)); + } else { + memcpy(data + 4, color->ui, sizeof(color->ui)); + } + + si_compute_internal_begin(sctx); + sctx->render_cond_force_off = !render_condition_enabled; + + sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | + si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM); + si_make_CB_shader_coherent(sctx, dstsurf->texture->nr_samples, true, + true /* DCC is not possible with image stores */); + + struct pipe_constant_buffer saved_cb = {}; + si_get_pipe_constant_buffer(sctx, PIPE_SHADER_COMPUTE, 0, &saved_cb); + + struct si_images *images = &sctx->images[PIPE_SHADER_COMPUTE]; + struct pipe_image_view saved_image = {0}; + util_copy_image_view(&saved_image, &images->views[0]); + + void *saved_cs = sctx->cs_shader_state.program; + + struct pipe_constant_buffer cb = {}; + cb.buffer_size = sizeof(data); + cb.user_buffer = data; + ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb); + + struct pipe_image_view image = {0}; + image.resource = dstsurf->texture; + image.shader_access = image.access = PIPE_IMAGE_ACCESS_WRITE; + image.format = util_format_linear(dstsurf->format); + image.u.tex.level = dstsurf->u.tex.level; + image.u.tex.first_layer = 0; /* 3D images ignore first_layer (BASE_ARRAY) */ + image.u.tex.last_layer = dstsurf->u.tex.last_layer; + + ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &image); + + struct pipe_grid_info info = {0}; + + if (dstsurf->texture->target != PIPE_TEXTURE_1D_ARRAY) { + if (!sctx->cs_clear_render_target) + sctx->cs_clear_render_target = si_clear_render_target_shader(ctx); + ctx->bind_compute_state(ctx, sctx->cs_clear_render_target); + info.block[0] = 8; + info.last_block[0] = width % 8; + info.block[1] = 8; + info.last_block[1] = height % 8; + info.block[2] = 1; + info.grid[0] = DIV_ROUND_UP(width, 8); + info.grid[1] = DIV_ROUND_UP(height, 8); + info.grid[2] = num_layers; + } else { + if (!sctx->cs_clear_render_target_1d_array) + sctx->cs_clear_render_target_1d_array = + si_clear_render_target_shader_1d_array(ctx); + ctx->bind_compute_state(ctx, sctx->cs_clear_render_target_1d_array); + info.block[0] = 64; + info.last_block[0] = width % 64; + info.block[1] = 1; + info.block[2] = 1; + info.grid[0] = DIV_ROUND_UP(width, 64); + info.grid[1] = num_layers; + info.grid[2] = 1; + } + + ctx->launch_grid(ctx, &info); + + sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH | + (sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0) | + si_get_flush_flags(sctx, SI_COHERENCY_SHADER, L2_STREAM); + ctx->bind_compute_state(ctx, saved_cs); + ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_image); + ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb); + si_compute_internal_end(sctx); +}