#include "si_pipe.h"
#include "si_public.h"
#include "si_shader_internal.h"
+#include "si_compute.h"
#include "sid.h"
#include "ac_llvm_util.h"
#include "radeon/radeon_uvd.h"
-#include "gallivm/lp_bld_misc.h"
#include "util/disk_cache.h"
#include "util/u_log.h"
#include "util/u_memory.h"
#include "vl/vl_decoder.h"
#include "driver_ddebug/dd_util.h"
+#include "gallium/winsys/radeon/drm/radeon_drm_public.h"
+#include "gallium/winsys/amdgpu/drm/amdgpu_public.h"
+#include <xf86drm.h>
+
+#include <llvm/Config/llvm-config.h>
+
+static struct pipe_context *si_create_context(struct pipe_screen *screen,
+ unsigned flags);
+
static const struct debug_named_value debug_options[] = {
/* Shader logging options: */
{ "vs", DBG(VS), "Print vertex shaders" },
{ "preoptir", DBG(PREOPT_IR), "Print the LLVM IR before initial optimizations" },
/* Shader compiler options the shader cache should be aware of: */
- { "unsafemath", DBG(UNSAFE_MATH), "Enable unsafe math shader optimizations" },
{ "sisched", DBG(SI_SCHED), "Enable LLVM SI Machine Instruction Scheduler." },
{ "gisel", DBG(GISEL), "Enable LLVM global instruction selector." },
+ { "w32ge", DBG(W32_GE), "Use Wave32 for vertex, tessellation, and geometry shaders." },
+ { "w32ps", DBG(W32_PS), "Use Wave32 for pixel shaders." },
+ { "w32cs", DBG(W32_CS), "Use Wave32 for computes shaders." },
+ { "w64ge", DBG(W64_GE), "Use Wave64 for vertex, tessellation, and geometry shaders." },
+ { "w64ps", DBG(W64_PS), "Use Wave64 for pixel shaders." },
+ { "w64cs", DBG(W64_CS), "Use Wave64 for computes shaders." },
/* Shader compiler options (with no effect on the shader cache): */
{ "checkir", DBG(CHECK_IR), "Enable additional sanity checks on shader IR" },
- { "nir", DBG(NIR), "Enable experimental NIR shaders" },
{ "mono", DBG(MONOLITHIC_SHADERS), "Use old-style monolithic shaders compiled on demand" },
{ "nooptvariant", DBG(NO_OPT_VARIANT), "Disable compiling optimized shader variants." },
{ "zerovram", DBG(ZERO_VRAM), "Clear VRAM allocations." },
/* 3D engine options: */
+ { "nogfx", DBG(NO_GFX), "Disable graphics. Only multimedia compute paths can be used." },
+ { "nongg", DBG(NO_NGG), "Disable NGG and use the legacy pipeline." },
+ { "alwayspd", DBG(ALWAYS_PD), "Always enable the primitive discard compute shader." },
+ { "pd", DBG(PD), "Enable the primitive discard compute shader for large draw calls." },
+ { "nopd", DBG(NO_PD), "Disable the primitive discard compute shader." },
{ "switch_on_eop", DBG(SWITCH_ON_EOP), "Program WD/IA to switch on end-of-packet." },
{ "nooutoforder", DBG(NO_OUT_OF_ORDER), "Disable out-of-order rasterization" },
{ "nodpbb", DBG(NO_DPBB), "Disable DPBB." },
DEBUG_NAMED_VALUE_END /* must be last */
};
-static void si_init_compiler(struct si_screen *sscreen,
- struct ac_llvm_compiler *compiler)
+void si_init_compiler(struct si_screen *sscreen, struct ac_llvm_compiler *compiler)
{
/* Only create the less-optimizing version of the compiler on APUs
* predating Ryzen (Raven). */
bool create_low_opt_compiler = !sscreen->info.has_dedicated_vram &&
- sscreen->info.chip_class <= VI;
+ sscreen->info.chip_class <= GFX8;
enum ac_target_machine_options tm_options =
(sscreen->debug_flags & DBG(SI_SCHED) ? AC_TM_SISCHED : 0) |
ac_init_llvm_compiler(compiler, sscreen->info.family, tm_options);
compiler->passes = ac_create_llvm_passes(compiler->tm);
+ if (compiler->tm_wave32)
+ compiler->passes_wave32 = ac_create_llvm_passes(compiler->tm_wave32);
if (compiler->low_opt_tm)
compiler->low_opt_passes = ac_create_llvm_passes(compiler->low_opt_tm);
}
static void si_destroy_compiler(struct ac_llvm_compiler *compiler)
{
- ac_destroy_llvm_passes(compiler->passes);
- ac_destroy_llvm_passes(compiler->low_opt_passes);
ac_destroy_llvm_compiler(compiler);
}
struct si_context *sctx = (struct si_context *)context;
int i;
+ util_queue_finish(&sctx->screen->shader_compiler_queue);
+ util_queue_finish(&sctx->screen->shader_compiler_queue_low_priority);
+
/* Unreference the framebuffer normally to disable related logic
* properly.
*/
si_release_all_descriptors(sctx);
+ if (sctx->chip_class >= GFX10 && sctx->has_graphics)
+ gfx10_destroy_query(sctx);
+
pipe_resource_reference(&sctx->esgs_ring, NULL);
pipe_resource_reference(&sctx->gsvs_ring, NULL);
pipe_resource_reference(&sctx->tess_rings, NULL);
sctx->b.delete_compute_state(&sctx->b, sctx->cs_clear_render_target);
if (sctx->cs_clear_render_target_1d_array)
sctx->b.delete_compute_state(&sctx->b, sctx->cs_clear_render_target_1d_array);
+ if (sctx->cs_dcc_retile)
+ sctx->b.delete_compute_state(&sctx->b, sctx->cs_dcc_retile);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(sctx->cs_fmask_expand); i++) {
+ for (unsigned j = 0; j < ARRAY_SIZE(sctx->cs_fmask_expand[i]); j++) {
+ if (sctx->cs_fmask_expand[i][j]) {
+ sctx->b.delete_compute_state(&sctx->b,
+ sctx->cs_fmask_expand[i][j]);
+ }
+ }
+ }
if (sctx->blitter)
util_blitter_destroy(sctx->blitter);
if (sctx->query_result_shader)
sctx->b.delete_compute_state(&sctx->b, sctx->query_result_shader);
+ if (sctx->sh_query_result_shader)
+ sctx->b.delete_compute_state(&sctx->b, sctx->sh_query_result_shader);
if (sctx->gfx_cs)
sctx->ws->cs_destroy(sctx->gfx_cs);
sctx->ws->fence_reference(&sctx->last_gfx_fence, NULL);
sctx->ws->fence_reference(&sctx->last_sdma_fence, NULL);
+ sctx->ws->fence_reference(&sctx->last_ib_barrier_fence, NULL);
si_resource_reference(&sctx->eop_bug_scratch, NULL);
+ si_resource_reference(&sctx->index_ring, NULL);
+ si_resource_reference(&sctx->barrier_buf, NULL);
+ si_resource_reference(&sctx->last_ib_barrier_buf, NULL);
+ pb_reference(&sctx->gds, NULL);
+ pb_reference(&sctx->gds_oa, NULL);
si_destroy_compiler(&sctx->compiler);
util_dynarray_fini(&sctx->resident_tex_needs_color_decompress);
util_dynarray_fini(&sctx->resident_img_needs_color_decompress);
util_dynarray_fini(&sctx->resident_tex_needs_depth_decompress);
+ si_unref_sdma_uploads(sctx);
+ free(sctx->sdma_uploads);
FREE(sctx);
}
static enum pipe_reset_status si_get_reset_status(struct pipe_context *ctx)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_screen *sscreen = sctx->screen;
+ enum pipe_reset_status status = sctx->ws->ctx_query_reset_status(sctx->ctx);
+
+ if (status != PIPE_NO_RESET) {
+ /* Call the state tracker to set a no-op API dispatch. */
+ if (sctx->device_reset_callback.reset) {
+ sctx->device_reset_callback.reset(sctx->device_reset_callback.data,
+ status);
+ }
- if (sctx->screen->info.has_gpu_reset_status_query)
- return sctx->ws->ctx_query_reset_status(sctx->ctx);
-
- if (sctx->screen->info.has_gpu_reset_counter_query) {
- unsigned latest = sctx->ws->query_value(sctx->ws,
- RADEON_GPU_RESET_COUNTER);
+ /* Re-create the auxiliary context, because it won't submit
+ * any new IBs due to a GPU reset.
+ */
+ simple_mtx_lock(&sscreen->aux_context_lock);
- if (sctx->gpu_reset_counter == latest)
- return PIPE_NO_RESET;
+ struct u_log_context *aux_log = ((struct si_context *)sscreen->aux_context)->log;
+ sscreen->aux_context->set_log_context(sscreen->aux_context, NULL);
+ sscreen->aux_context->destroy(sscreen->aux_context);
- sctx->gpu_reset_counter = latest;
- return PIPE_UNKNOWN_CONTEXT_RESET;
+ sscreen->aux_context = si_create_context(&sscreen->b,
+ (sscreen->options.aux_debug ? PIPE_CONTEXT_DEBUG : 0) |
+ (sscreen->info.has_graphics ? 0 : PIPE_CONTEXT_COMPUTE_ONLY));
+ sscreen->aux_context->set_log_context(sscreen->aux_context, aux_log);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
}
-
- return PIPE_NO_RESET;
+ return status;
}
static void si_set_device_reset_callback(struct pipe_context *ctx,
sizeof(sctx->device_reset_callback));
}
-bool si_check_device_reset(struct si_context *sctx)
-{
- enum pipe_reset_status status;
-
- if (!sctx->device_reset_callback.reset)
- return false;
-
- if (!sctx->b.get_device_reset_status)
- return false;
-
- status = sctx->b.get_device_reset_status(&sctx->b);
- if (status == PIPE_NO_RESET)
- return false;
-
- sctx->device_reset_callback.reset(sctx->device_reset_callback.data, status);
- return true;
-}
-
/* Apitrace profiling:
* 1) qapitrace : Tools -> Profile: Measure CPU & GPU times
* 2) In the middle panel, zoom in (mouse wheel) on some bad draw call
static struct pipe_context *si_create_context(struct pipe_screen *screen,
unsigned flags)
{
- struct si_context *sctx = CALLOC_STRUCT(si_context);
struct si_screen* sscreen = (struct si_screen *)screen;
+
+ /* Don't create a context if it's not compute-only and hw is compute-only. */
+ if (!sscreen->info.has_graphics &&
+ !(flags & PIPE_CONTEXT_COMPUTE_ONLY))
+ return NULL;
+
+ struct si_context *sctx = CALLOC_STRUCT(si_context);
struct radeon_winsys *ws = sscreen->ws;
int shader, i;
bool stop_exec_on_failure = (flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET) != 0;
if (!sctx)
return NULL;
+ sctx->has_graphics = sscreen->info.chip_class == GFX6 ||
+ !(flags & PIPE_CONTEXT_COMPUTE_ONLY);
+
if (flags & PIPE_CONTEXT_DEBUG)
sscreen->record_llvm_ir = true; /* racy but not critical */
sctx->b.screen = screen; /* this must be set first */
sctx->b.priv = NULL;
sctx->b.destroy = si_destroy_context;
- sctx->b.emit_string_marker = si_emit_string_marker;
- sctx->b.set_debug_callback = si_set_debug_callback;
- sctx->b.set_log_context = si_set_log_context;
- sctx->b.set_context_param = si_set_context_param;
sctx->screen = sscreen; /* Easy accessing of screen/winsys. */
sctx->is_debug = (flags & PIPE_CONTEXT_DEBUG) != 0;
sctx->family = sscreen->info.family;
sctx->chip_class = sscreen->info.chip_class;
- if (sscreen->info.has_gpu_reset_counter_query) {
- sctx->gpu_reset_counter =
- sctx->ws->query_value(sctx->ws, RADEON_GPU_RESET_COUNTER);
- }
-
- sctx->b.get_device_reset_status = si_get_reset_status;
- sctx->b.set_device_reset_callback = si_set_device_reset_callback;
-
- si_init_context_texture_functions(sctx);
- si_init_query_functions(sctx);
-
- if (sctx->chip_class == CIK ||
- sctx->chip_class == VI ||
+ if (sctx->chip_class == GFX7 ||
+ sctx->chip_class == GFX8 ||
sctx->chip_class == GFX9) {
sctx->eop_bug_scratch = si_resource(
pipe_buffer_create(&sscreen->b, 0, PIPE_USAGE_DEFAULT,
goto fail;
}
+ /* Initialize context allocators. */
sctx->allocator_zeroed_memory =
u_suballocator_create(&sctx->b, 128 * 1024,
0, PIPE_USAGE_DEFAULT,
if (!sctx->b.stream_uploader)
goto fail;
- sctx->b.const_uploader = u_upload_create(&sctx->b, 128 * 1024,
- 0, PIPE_USAGE_DEFAULT,
- SI_RESOURCE_FLAG_32BIT |
- (sscreen->cpdma_prefetch_writes_memory ?
- 0 : SI_RESOURCE_FLAG_READ_ONLY));
- if (!sctx->b.const_uploader)
- goto fail;
-
sctx->cached_gtt_allocator = u_upload_create(&sctx->b, 16 * 1024,
0, PIPE_USAGE_STAGING, 0);
if (!sctx->cached_gtt_allocator)
if (!sctx->ctx)
goto fail;
- if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
+ if (sscreen->info.num_sdma_rings &&
+ !(sscreen->debug_flags & DBG(NO_ASYNC_DMA)) &&
+ /* SDMA timeouts sometimes on gfx10 so disable it for now. See:
+ * https://bugs.freedesktop.org/show_bug.cgi?id=111481
+ * https://gitlab.freedesktop.org/mesa/mesa/issues/1907
+ */
+ (sctx->chip_class != GFX10 || sscreen->debug_flags & DBG(FORCE_DMA))) {
sctx->dma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
(void*)si_flush_dma_cs,
sctx, stop_exec_on_failure);
}
- si_init_buffer_functions(sctx);
- si_init_clear_functions(sctx);
- si_init_blit_functions(sctx);
- si_init_compute_functions(sctx);
- si_init_compute_blit_functions(sctx);
- si_init_debug_functions(sctx);
- si_init_msaa_functions(sctx);
- si_init_streamout_functions(sctx);
+ bool use_sdma_upload = sscreen->info.has_dedicated_vram && sctx->dma_cs;
+ sctx->b.const_uploader = u_upload_create(&sctx->b, 256 * 1024,
+ 0, PIPE_USAGE_DEFAULT,
+ SI_RESOURCE_FLAG_32BIT |
+ (use_sdma_upload ?
+ SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA : 0));
+ if (!sctx->b.const_uploader)
+ goto fail;
- if (sscreen->info.has_hw_decode) {
- sctx->b.create_video_codec = si_uvd_create_decoder;
- sctx->b.create_video_buffer = si_video_buffer_create;
- } else {
- sctx->b.create_video_codec = vl_create_decoder;
- sctx->b.create_video_buffer = vl_video_buffer_create;
- }
+ if (use_sdma_upload)
+ u_upload_enable_flush_explicit(sctx->b.const_uploader);
- sctx->gfx_cs = ws->cs_create(sctx->ctx, RING_GFX,
+ sctx->gfx_cs = ws->cs_create(sctx->ctx,
+ sctx->has_graphics ? RING_GFX : RING_COMPUTE,
(void*)si_flush_gfx_cs, sctx, stop_exec_on_failure);
/* Border colors. */
if (!sctx->border_color_map)
goto fail;
+ sctx->ngg = sscreen->use_ngg;
+
+ /* Initialize context functions used by graphics and compute. */
+ if (sctx->chip_class >= GFX10)
+ sctx->emit_cache_flush = gfx10_emit_cache_flush;
+ else
+ sctx->emit_cache_flush = si_emit_cache_flush;
+
+ sctx->b.emit_string_marker = si_emit_string_marker;
+ sctx->b.set_debug_callback = si_set_debug_callback;
+ sctx->b.set_log_context = si_set_log_context;
+ sctx->b.set_context_param = si_set_context_param;
+ sctx->b.get_device_reset_status = si_get_reset_status;
+ sctx->b.set_device_reset_callback = si_set_device_reset_callback;
+
si_init_all_descriptors(sctx);
+ si_init_buffer_functions(sctx);
+ si_init_clear_functions(sctx);
+ si_init_blit_functions(sctx);
+ si_init_compute_functions(sctx);
+ si_init_compute_blit_functions(sctx);
+ si_init_debug_functions(sctx);
si_init_fence_functions(sctx);
- si_init_state_functions(sctx);
- si_init_shader_functions(sctx);
- si_init_viewport_functions(sctx);
+ si_init_query_functions(sctx);
+ si_init_state_compute_functions(sctx);
+ si_init_context_texture_functions(sctx);
+
+ /* Initialize graphics-only context functions. */
+ if (sctx->has_graphics) {
+ if (sctx->chip_class >= GFX10)
+ gfx10_init_query(sctx);
+ si_init_msaa_functions(sctx);
+ si_init_shader_functions(sctx);
+ si_init_state_functions(sctx);
+ si_init_streamout_functions(sctx);
+ si_init_viewport_functions(sctx);
+
+ sctx->blitter = util_blitter_create(&sctx->b);
+ if (sctx->blitter == NULL)
+ goto fail;
+ sctx->blitter->skip_viewport_restore = true;
+
+ /* Some states are expected to be always non-NULL. */
+ sctx->noop_blend = util_blitter_get_noop_blend_state(sctx->blitter);
+ sctx->queued.named.blend = sctx->noop_blend;
+
+ sctx->noop_dsa = util_blitter_get_noop_dsa_state(sctx->blitter);
+ sctx->queued.named.dsa = sctx->noop_dsa;
+
+ sctx->discard_rasterizer_state =
+ util_blitter_get_discard_rasterizer_state(sctx->blitter);
+ sctx->queued.named.rasterizer = sctx->discard_rasterizer_state;
- if (sctx->chip_class >= CIK)
+ si_init_draw_functions(sctx);
+ si_initialize_prim_discard_tunables(sctx);
+ }
+
+ /* Initialize SDMA functions. */
+ if (sctx->chip_class >= GFX7)
cik_init_sdma_functions(sctx);
else
si_init_dma_functions(sctx);
if (sscreen->debug_flags & DBG(FORCE_DMA))
sctx->b.resource_copy_region = sctx->dma_copy;
- sctx->blitter = util_blitter_create(&sctx->b);
- if (sctx->blitter == NULL)
- goto fail;
- sctx->blitter->skip_viewport_restore = true;
-
- si_init_draw_functions(sctx);
-
sctx->sample_mask = 0xffff;
+ /* Initialize multimedia functions. */
+ if (sscreen->info.has_hw_decode) {
+ sctx->b.create_video_codec = si_uvd_create_decoder;
+ sctx->b.create_video_buffer = si_video_buffer_create;
+ } else {
+ sctx->b.create_video_codec = vl_create_decoder;
+ sctx->b.create_video_buffer = vl_video_buffer_create;
+ }
+
if (sctx->chip_class >= GFX9) {
sctx->wait_mem_scratch = si_resource(
- pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 4));
+ pipe_buffer_create(screen, 0, PIPE_USAGE_DEFAULT, 8));
if (!sctx->wait_mem_scratch)
goto fail;
V_370_MEM, V_370_ME, &sctx->wait_mem_number);
}
- /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
+ /* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
* if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
- if (sctx->chip_class == CIK) {
+ if (sctx->chip_class == GFX7) {
sctx->null_const_buf.buffer =
pipe_aligned_buffer_create(screen,
SI_RESOURCE_FLAG_32BIT,
goto fail;
sctx->null_const_buf.buffer_size = sctx->null_const_buf.buffer->width0;
- for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
+ unsigned start_shader = sctx->has_graphics ? 0 : PIPE_SHADER_COMPUTE;
+ for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) {
for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) {
sctx->b.set_constant_buffer(&sctx->b, shader, i,
&sctx->null_const_buf);
/* this must be last */
si_begin_new_gfx_cs(sctx);
- if (sctx->chip_class == CIK) {
- /* Clear the NULL constant buffer, because loads should return zeros. */
+ if (sctx->chip_class == GFX7) {
+ /* Clear the NULL constant buffer, because loads should return zeros.
+ * Note that this forces CP DMA to be used, because clover deadlocks
+ * for some reason when the compute codepath is used.
+ */
uint32_t clear_value = 0;
si_clear_buffer(sctx, sctx->null_const_buf.buffer, 0,
sctx->null_const_buf.buffer->width0,
- &clear_value, 4, SI_COHERENCY_SHADER);
+ &clear_value, 4, SI_COHERENCY_SHADER, true);
}
return &sctx->b;
fail:
* implementation for fence_server_sync is incomplete. */
return threaded_context_create(ctx, &sscreen->pool_transfers,
si_replace_buffer_storage,
- sscreen->info.drm_major >= 3 ? si_create_fence : NULL,
+ sscreen->info.is_amdgpu ? si_create_fence : NULL,
&((struct si_context*)ctx)->tc);
}
if (!sscreen->ws->unref(sscreen->ws))
return;
+ simple_mtx_destroy(&sscreen->aux_context_lock);
+
+ struct u_log_context *aux_log = ((struct si_context *)sscreen->aux_context)->log;
+ if (aux_log) {
+ sscreen->aux_context->set_log_context(sscreen->aux_context, NULL);
+ u_log_context_destroy(aux_log);
+ FREE(aux_log);
+ }
+
+ sscreen->aux_context->destroy(sscreen->aux_context);
+
util_queue_destroy(&sscreen->shader_compiler_queue);
util_queue_destroy(&sscreen->shader_compiler_queue_low_priority);
+ /* Release the reference on glsl types of the compiler threads. */
+ glsl_type_singleton_decref();
+
for (i = 0; i < ARRAY_SIZE(sscreen->compiler); i++)
si_destroy_compiler(&sscreen->compiler[i]);
struct si_shader_part *part = parts[i];
parts[i] = part->next;
- ac_shader_binary_clean(&part->binary);
+ si_shader_binary_clean(&part->binary);
FREE(part);
}
}
- mtx_destroy(&sscreen->shader_parts_mutex);
+ simple_mtx_destroy(&sscreen->shader_parts_mutex);
si_destroy_shader_cache(sscreen);
si_destroy_perfcounters(sscreen);
si_gpu_load_kill_thread(sscreen);
- mtx_destroy(&sscreen->gpu_load_mutex);
- mtx_destroy(&sscreen->aux_context_lock);
- sscreen->aux_context->destroy(sscreen->aux_context);
+ simple_mtx_destroy(&sscreen->gpu_load_mutex);
slab_destroy_parent(&sscreen->pool_transfers);
disk_cache_format_hex_id(cache_id, sha1, 20 * 2);
/* These flags affect shader compilation. */
- #define ALL_FLAGS (DBG(FS_CORRECT_DERIVS_AFTER_KILL) | \
- DBG(SI_SCHED) | \
- DBG(GISEL) | \
- DBG(UNSAFE_MATH) | \
- DBG(NIR))
- uint64_t shader_debug_flags = sscreen->debug_flags &
- ALL_FLAGS;
+ #define ALL_FLAGS (DBG(SI_SCHED) | DBG(GISEL))
+ uint64_t shader_debug_flags = sscreen->debug_flags & ALL_FLAGS;
+ /* Reserve left-most bit for tgsi/nir selector */
+ assert(!(shader_debug_flags & (1u << 31)));
+ shader_debug_flags |= (uint32_t)
+ ((sscreen->options.enable_nir & 0x1) << 31);
/* Add the high bits of 32-bit addresses, which affects
* how 32-bit addresses are expanded to 64 bits.
*/
STATIC_ASSERT(ALL_FLAGS <= UINT_MAX);
- shader_debug_flags |= (uint64_t)sscreen->info.address32_hi << 32;
+ assert((int16_t)sscreen->info.address32_hi == (int32_t)sscreen->info.address32_hi);
+ shader_debug_flags |= (uint64_t)(sscreen->info.address32_hi & 0xffff) << 32;
sscreen->disk_shader_cache =
disk_cache_create(sscreen->info.name,
shader_debug_flags);
}
-struct pipe_screen *radeonsi_screen_create(struct radeon_winsys *ws,
- const struct pipe_screen_config *config)
+static void si_set_max_shader_compiler_threads(struct pipe_screen *screen,
+ unsigned max_threads)
+{
+ struct si_screen *sscreen = (struct si_screen *)screen;
+
+ /* This function doesn't allow a greater number of threads than
+ * the queue had at its creation. */
+ util_queue_adjust_num_threads(&sscreen->shader_compiler_queue,
+ max_threads);
+ /* Don't change the number of threads on the low priority queue. */
+}
+
+static bool si_is_parallel_shader_compilation_finished(struct pipe_screen *screen,
+ void *shader,
+ enum pipe_shader_type shader_type)
+{
+ struct si_shader_selector *sel = (struct si_shader_selector *)shader;
+
+ return util_queue_fence_is_signalled(&sel->ready);
+}
+
+static struct pipe_screen *
+radeonsi_screen_create_impl(struct radeon_winsys *ws,
+ const struct pipe_screen_config *config)
{
struct si_screen *sscreen = CALLOC_STRUCT(si_screen);
- unsigned hw_threads, num_comp_hi_threads, num_comp_lo_threads, i;
+ unsigned hw_threads, num_comp_hi_threads, num_comp_lo_threads;
if (!sscreen) {
return NULL;
sscreen->ws = ws;
ws->query_info(ws, &sscreen->info);
+ if (sscreen->info.chip_class == GFX10 && LLVM_VERSION_MAJOR < 9) {
+ fprintf(stderr, "radeonsi: Navi family support requires LLVM 9 or higher\n");
+ FREE(sscreen);
+ return NULL;
+ }
+
if (sscreen->info.chip_class >= GFX9) {
sscreen->se_tile_repeat = 32 * sscreen->info.max_se;
} else {
sscreen->debug_flags |= debug_get_flags_option("AMD_DEBUG",
debug_options, 0);
+ if (sscreen->debug_flags & DBG(NO_GFX))
+ sscreen->info.has_graphics = false;
+
/* Set functions first. */
sscreen->b.context_create = si_pipe_create_context;
sscreen->b.destroy = si_destroy_screen;
+ sscreen->b.set_max_shader_compiler_threads =
+ si_set_max_shader_compiler_threads;
+ sscreen->b.is_parallel_shader_compilation_finished =
+ si_is_parallel_shader_compilation_finished;
+ sscreen->b.finalize_nir = si_finalize_nir;
si_init_screen_get_functions(sscreen);
si_init_screen_buffer_functions(sscreen);
if (driQueryOptionb(config->options, "radeonsi_enable_sisched"))
sscreen->debug_flags |= DBG(SI_SCHED);
-
if (sscreen->debug_flags & DBG(INFO))
ac_print_gpu_info(&sscreen->info);
sizeof(struct si_transfer), 64);
sscreen->force_aniso = MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
+ if (sscreen->force_aniso == -1) {
+ sscreen->force_aniso = MIN2(16, debug_get_num_option("AMD_TEX_ANISO", -1));
+ }
+
if (sscreen->force_aniso >= 0) {
printf("radeonsi: Forcing anisotropy filter to %ix\n",
/* round down to a power of two */
1 << util_logbase2(sscreen->force_aniso));
}
- (void) mtx_init(&sscreen->aux_context_lock, mtx_plain);
- (void) mtx_init(&sscreen->gpu_load_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->aux_context_lock, mtx_plain);
+ (void) simple_mtx_init(&sscreen->gpu_load_mutex, mtx_plain);
si_init_gs_info(sscreen);
if (!si_init_shader_cache(sscreen)) {
return NULL;
}
+ {
+#define OPT_BOOL(name, dflt, description) \
+ sscreen->options.name = \
+ driQueryOptionb(config->options, "radeonsi_"#name);
+#include "si_debug_options.h"
+ }
+
si_disk_cache_create(sscreen);
/* Determine the number of shader compiler threads. */
num_comp_lo_threads = MIN2(num_comp_lo_threads,
ARRAY_SIZE(sscreen->compiler_lowp));
+ /* Take a reference on the glsl types for the compiler threads. */
+ glsl_type_singleton_init_or_ref();
+
if (!util_queue_init(&sscreen->shader_compiler_queue, "sh",
64, num_comp_hi_threads,
UTIL_QUEUE_INIT_RESIZE_IF_FULL |
UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY)) {
si_destroy_shader_cache(sscreen);
FREE(sscreen);
+ glsl_type_singleton_decref();
return NULL;
}
UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY)) {
si_destroy_shader_cache(sscreen);
FREE(sscreen);
+ glsl_type_singleton_decref();
return NULL;
}
si_init_perfcounters(sscreen);
/* Determine tessellation ring info. */
- bool double_offchip_buffers = sscreen->info.chip_class >= CIK &&
+ bool double_offchip_buffers = sscreen->info.chip_class >= GFX7 &&
sscreen->info.family != CHIP_CARRIZO &&
sscreen->info.family != CHIP_STONEY;
/* This must be one less than the maximum number due to a hw limitation.
- * Various hardware bugs in SI, CIK, and GFX9 need this.
+ * Various hardware bugs need this.
*/
unsigned max_offchip_buffers_per_se;
+ if (sscreen->info.chip_class >= GFX10)
+ max_offchip_buffers_per_se = 256;
/* Only certain chips can use the maximum value. */
- if (sscreen->info.family == CHIP_VEGA12 ||
- sscreen->info.family == CHIP_VEGA20)
+ else if (sscreen->info.family == CHIP_VEGA12 ||
+ sscreen->info.family == CHIP_VEGA20)
max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
else
max_offchip_buffers_per_se = double_offchip_buffers ? 127 : 63;
}
sscreen->tess_factor_ring_size = 32768 * sscreen->info.max_se;
- assert(((sscreen->tess_factor_ring_size / 4) & C_030938_SIZE) == 0);
sscreen->tess_offchip_ring_size = max_offchip_buffers *
sscreen->tess_offchip_block_dw_size * 4;
- if (sscreen->info.chip_class >= CIK) {
- if (sscreen->info.chip_class >= VI)
+ if (sscreen->info.chip_class >= GFX7) {
+ if (sscreen->info.chip_class >= GFX8)
--max_offchip_buffers;
sscreen->vgt_hs_offchip_param =
S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
}
- /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
- * on SI. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
- * SPI_VS_OUT_CONFIG. So only enable CI CLEAR_STATE on amdgpu kernel.*/
- sscreen->has_clear_state = sscreen->info.chip_class >= CIK &&
- sscreen->info.drm_major == 3;
-
- sscreen->has_distributed_tess =
- sscreen->info.chip_class >= VI &&
- sscreen->info.max_se >= 2;
-
sscreen->has_draw_indirect_multi =
(sscreen->info.family >= CHIP_POLARIS10) ||
- (sscreen->info.chip_class == VI &&
+ (sscreen->info.chip_class == GFX8 &&
sscreen->info.pfp_fw_version >= 121 &&
sscreen->info.me_fw_version >= 87) ||
- (sscreen->info.chip_class == CIK &&
+ (sscreen->info.chip_class == GFX7 &&
sscreen->info.pfp_fw_version >= 211 &&
sscreen->info.me_fw_version >= 173) ||
- (sscreen->info.chip_class == SI &&
+ (sscreen->info.chip_class == GFX6 &&
sscreen->info.pfp_fw_version >= 79 &&
sscreen->info.me_fw_version >= 142);
- sscreen->has_out_of_order_rast = sscreen->info.chip_class >= VI &&
- sscreen->info.max_se >= 2 &&
+ sscreen->has_out_of_order_rast = sscreen->info.has_out_of_order_rast &&
!(sscreen->debug_flags & DBG(NO_OUT_OF_ORDER));
sscreen->assume_no_z_fights =
driQueryOptionb(config->options, "radeonsi_assume_no_z_fights");
sscreen->commutative_blend_add =
driQueryOptionb(config->options, "radeonsi_commutative_blend_add");
- sscreen->clear_db_cache_before_clear =
- driQueryOptionb(config->options, "radeonsi_clear_db_cache_before_clear");
- sscreen->has_msaa_sample_loc_bug = (sscreen->info.family >= CHIP_POLARIS10 &&
- sscreen->info.family <= CHIP_POLARIS12) ||
- sscreen->info.family == CHIP_VEGA10 ||
- sscreen->info.family == CHIP_RAVEN;
- sscreen->has_ls_vgpr_init_bug = sscreen->info.family == CHIP_VEGA10 ||
- sscreen->info.family == CHIP_RAVEN;
- sscreen->has_dcc_constant_encode = sscreen->info.family == CHIP_RAVEN2;
- /* Only enable primitive binning on APUs by default. */
- sscreen->dpbb_allowed = sscreen->info.family == CHIP_RAVEN ||
- sscreen->info.family == CHIP_RAVEN2;
+ sscreen->use_ngg = sscreen->info.chip_class >= GFX10 &&
+ sscreen->info.family != CHIP_NAVI14 &&
+ !(sscreen->debug_flags & DBG(NO_NGG));
+ sscreen->use_ngg_streamout = false;
- sscreen->dfsm_allowed = sscreen->info.family == CHIP_RAVEN ||
- sscreen->info.family == CHIP_RAVEN2;
+ /* Only enable primitive binning on APUs by default. */
+ if (sscreen->info.chip_class >= GFX10) {
+ sscreen->dpbb_allowed = true;
+ sscreen->dfsm_allowed = !sscreen->info.has_dedicated_vram;
+ } else if (sscreen->info.chip_class == GFX9) {
+ sscreen->dpbb_allowed = !sscreen->info.has_dedicated_vram;
+ sscreen->dfsm_allowed = !sscreen->info.has_dedicated_vram;
+ }
/* Process DPBB enable flags. */
if (sscreen->debug_flags & DBG(DPBB)) {
}
/* While it would be nice not to have this flag, we are constrained
- * by the reality that LLVM 5.0 doesn't have working VGPR indexing
- * on GFX9.
+ * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
*/
- sscreen->llvm_has_working_vgpr_indexing = sscreen->info.chip_class <= VI;
-
- /* Some chips have RB+ registers, but don't support RB+. Those must
- * always disable it.
- */
- if (sscreen->info.family == CHIP_STONEY ||
- sscreen->info.chip_class >= GFX9) {
- sscreen->has_rbplus = true;
-
- sscreen->rbplus_allowed =
- !(sscreen->debug_flags & DBG(NO_RB_PLUS)) &&
- (sscreen->info.family == CHIP_STONEY ||
- sscreen->info.family == CHIP_VEGA12 ||
- sscreen->info.family == CHIP_RAVEN ||
- sscreen->info.family == CHIP_RAVEN2);
- }
+ sscreen->llvm_has_working_vgpr_indexing = sscreen->info.chip_class != GFX9;
sscreen->dcc_msaa_allowed =
!(sscreen->debug_flags & DBG(NO_DCC_MSAA));
- sscreen->cpdma_prefetch_writes_memory = sscreen->info.chip_class <= VI;
-
- (void) mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
sscreen->use_monolithic_shaders =
(sscreen->debug_flags & DBG(MONOLITHIC_SHADERS)) != 0;
- sscreen->barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SMEM_L1 |
- SI_CONTEXT_INV_VMEM_L1;
- if (sscreen->info.chip_class <= VI) {
- sscreen->barrier_flags.cp_to_L2 |= SI_CONTEXT_INV_GLOBAL_L2;
- sscreen->barrier_flags.L2_to_cp |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ sscreen->barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE;
+ if (sscreen->info.chip_class <= GFX8) {
+ sscreen->barrier_flags.cp_to_L2 |= SI_CONTEXT_INV_L2;
+ sscreen->barrier_flags.L2_to_cp |= SI_CONTEXT_WB_L2;
}
if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
}
}
- for (i = 0; i < num_comp_hi_threads; i++)
- si_init_compiler(sscreen, &sscreen->compiler[i]);
- for (i = 0; i < num_comp_lo_threads; i++)
- si_init_compiler(sscreen, &sscreen->compiler_lowp[i]);
+ sscreen->ge_wave_size = 64;
+ sscreen->ps_wave_size = 64;
+ sscreen->compute_wave_size = 64;
+
+ if (sscreen->info.chip_class >= GFX10) {
+ /* Pixels shaders: Wave64 is recommended.
+ * Compute shaders: There are piglit failures with Wave32.
+ */
+ sscreen->ge_wave_size = 32;
+
+ if (sscreen->debug_flags & DBG(W32_GE))
+ sscreen->ge_wave_size = 32;
+ if (sscreen->debug_flags & DBG(W32_PS))
+ sscreen->ps_wave_size = 32;
+ if (sscreen->debug_flags & DBG(W32_CS))
+ sscreen->compute_wave_size = 32;
+
+ if (sscreen->debug_flags & DBG(W64_GE))
+ sscreen->ge_wave_size = 64;
+ if (sscreen->debug_flags & DBG(W64_PS))
+ sscreen->ps_wave_size = 64;
+ if (sscreen->debug_flags & DBG(W64_CS))
+ sscreen->compute_wave_size = 64;
+ }
/* Create the auxiliary context. This must be done last. */
- sscreen->aux_context = si_create_context(&sscreen->b, 0);
+ sscreen->aux_context = si_create_context(&sscreen->b,
+ (sscreen->options.aux_debug ? PIPE_CONTEXT_DEBUG : 0) |
+ (sscreen->info.has_graphics ? 0 : PIPE_CONTEXT_COMPUTE_ONLY));
+ if (sscreen->options.aux_debug) {
+ struct u_log_context *log = CALLOC_STRUCT(u_log_context);
+ u_log_context_init(log);
+ sscreen->aux_context->set_log_context(sscreen->aux_context, log);
+ }
if (sscreen->debug_flags & DBG(TEST_DMA))
si_test_dma(sscreen);
return &sscreen->b;
}
+
+struct pipe_screen *radeonsi_screen_create(int fd, const struct pipe_screen_config *config)
+{
+ drmVersionPtr version = drmGetVersion(fd);
+ struct radeon_winsys *rw = NULL;
+
+ switch (version->version_major) {
+ case 2:
+ rw = radeon_drm_winsys_create(fd, config, radeonsi_screen_create_impl);
+ break;
+ case 3:
+ rw = amdgpu_winsys_create(fd, config, radeonsi_screen_create_impl);
+ break;
+ }
+
+ drmFreeVersion(version);
+ return rw ? rw->screen : NULL;
+}