#include "ac_llvm_util.h"
#include "radeon/radeon_uvd.h"
-#include "gallivm/lp_bld_misc.h"
#include "util/disk_cache.h"
#include "util/u_log.h"
#include "util/u_memory.h"
#include "gallium/winsys/amdgpu/drm/amdgpu_public.h"
#include <xf86drm.h>
+#include <llvm/Config/llvm-config.h>
+
+static struct pipe_context *si_create_context(struct pipe_screen *screen,
+ unsigned flags);
+
static const struct debug_named_value debug_options[] = {
/* Shader logging options: */
{ "vs", DBG(VS), "Print vertex shaders" },
{ "preoptir", DBG(PREOPT_IR), "Print the LLVM IR before initial optimizations" },
/* Shader compiler options the shader cache should be aware of: */
- { "unsafemath", DBG(UNSAFE_MATH), "Enable unsafe math shader optimizations" },
{ "sisched", DBG(SI_SCHED), "Enable LLVM SI Machine Instruction Scheduler." },
{ "gisel", DBG(GISEL), "Enable LLVM global instruction selector." },
+ { "w32ge", DBG(W32_GE), "Use Wave32 for vertex, tessellation, and geometry shaders." },
+ { "w32ps", DBG(W32_PS), "Use Wave32 for pixel shaders." },
+ { "w32cs", DBG(W32_CS), "Use Wave32 for computes shaders." },
+ { "w64ge", DBG(W64_GE), "Use Wave64 for vertex, tessellation, and geometry shaders." },
+ { "w64ps", DBG(W64_PS), "Use Wave64 for pixel shaders." },
+ { "w64cs", DBG(W64_CS), "Use Wave64 for computes shaders." },
/* Shader compiler options (with no effect on the shader cache): */
{ "checkir", DBG(CHECK_IR), "Enable additional sanity checks on shader IR" },
{ "zerovram", DBG(ZERO_VRAM), "Clear VRAM allocations." },
/* 3D engine options: */
+ { "nogfx", DBG(NO_GFX), "Disable graphics. Only multimedia compute paths can be used." },
+ { "nongg", DBG(NO_NGG), "Disable NGG and use the legacy pipeline." },
{ "alwayspd", DBG(ALWAYS_PD), "Always enable the primitive discard compute shader." },
{ "pd", DBG(PD), "Enable the primitive discard compute shader for large draw calls." },
{ "nopd", DBG(NO_PD), "Disable the primitive discard compute shader." },
DEBUG_NAMED_VALUE_END /* must be last */
};
-static void si_init_compiler(struct si_screen *sscreen,
- struct ac_llvm_compiler *compiler)
+void si_init_compiler(struct si_screen *sscreen, struct ac_llvm_compiler *compiler)
{
/* Only create the less-optimizing version of the compiler on APUs
* predating Ryzen (Raven). */
ac_init_llvm_compiler(compiler, sscreen->info.family, tm_options);
compiler->passes = ac_create_llvm_passes(compiler->tm);
+ if (compiler->tm_wave32)
+ compiler->passes_wave32 = ac_create_llvm_passes(compiler->tm_wave32);
if (compiler->low_opt_tm)
compiler->low_opt_passes = ac_create_llvm_passes(compiler->low_opt_tm);
}
static void si_destroy_compiler(struct ac_llvm_compiler *compiler)
{
- ac_destroy_llvm_passes(compiler->passes);
- ac_destroy_llvm_passes(compiler->low_opt_passes);
ac_destroy_llvm_compiler(compiler);
}
si_release_all_descriptors(sctx);
+ if (sctx->chip_class >= GFX10 && sctx->has_graphics)
+ gfx10_destroy_query(sctx);
+
pipe_resource_reference(&sctx->esgs_ring, NULL);
pipe_resource_reference(&sctx->gsvs_ring, NULL);
pipe_resource_reference(&sctx->tess_rings, NULL);
if (sctx->cs_dcc_retile)
sctx->b.delete_compute_state(&sctx->b, sctx->cs_dcc_retile);
+ for (unsigned i = 0; i < ARRAY_SIZE(sctx->cs_fmask_expand); i++) {
+ for (unsigned j = 0; j < ARRAY_SIZE(sctx->cs_fmask_expand[i]); j++) {
+ if (sctx->cs_fmask_expand[i][j]) {
+ sctx->b.delete_compute_state(&sctx->b,
+ sctx->cs_fmask_expand[i][j]);
+ }
+ }
+ }
+
if (sctx->blitter)
util_blitter_destroy(sctx->blitter);
if (sctx->query_result_shader)
sctx->b.delete_compute_state(&sctx->b, sctx->query_result_shader);
+ if (sctx->sh_query_result_shader)
+ sctx->b.delete_compute_state(&sctx->b, sctx->sh_query_result_shader);
if (sctx->gfx_cs)
sctx->ws->cs_destroy(sctx->gfx_cs);
util_dynarray_fini(&sctx->resident_img_needs_color_decompress);
util_dynarray_fini(&sctx->resident_tex_needs_depth_decompress);
si_unref_sdma_uploads(sctx);
+ free(sctx->sdma_uploads);
FREE(sctx);
}
static enum pipe_reset_status si_get_reset_status(struct pipe_context *ctx)
{
struct si_context *sctx = (struct si_context *)ctx;
+ struct si_screen *sscreen = sctx->screen;
+ enum pipe_reset_status status = sctx->ws->ctx_query_reset_status(sctx->ctx);
+
+ if (status != PIPE_NO_RESET) {
+ /* Call the state tracker to set a no-op API dispatch. */
+ if (sctx->device_reset_callback.reset) {
+ sctx->device_reset_callback.reset(sctx->device_reset_callback.data,
+ status);
+ }
+
+ /* Re-create the auxiliary context, because it won't submit
+ * any new IBs due to a GPU reset.
+ */
+ simple_mtx_lock(&sscreen->aux_context_lock);
- return sctx->ws->ctx_query_reset_status(sctx->ctx);
+ struct u_log_context *aux_log = ((struct si_context *)sscreen->aux_context)->log;
+ sscreen->aux_context->set_log_context(sscreen->aux_context, NULL);
+ sscreen->aux_context->destroy(sscreen->aux_context);
+
+ sscreen->aux_context = si_create_context(&sscreen->b,
+ (sscreen->options.aux_debug ? PIPE_CONTEXT_DEBUG : 0) |
+ (sscreen->info.has_graphics ? 0 : PIPE_CONTEXT_COMPUTE_ONLY));
+ sscreen->aux_context->set_log_context(sscreen->aux_context, aux_log);
+ simple_mtx_unlock(&sscreen->aux_context_lock);
+ }
+ return status;
}
static void si_set_device_reset_callback(struct pipe_context *ctx,
sizeof(sctx->device_reset_callback));
}
-bool si_check_device_reset(struct si_context *sctx)
-{
- enum pipe_reset_status status;
-
- if (!sctx->device_reset_callback.reset)
- return false;
-
- status = sctx->ws->ctx_query_reset_status(sctx->ctx);
- if (status == PIPE_NO_RESET)
- return false;
-
- sctx->device_reset_callback.reset(sctx->device_reset_callback.data, status);
- return true;
-}
-
/* Apitrace profiling:
* 1) qapitrace : Tools -> Profile: Measure CPU & GPU times
* 2) In the middle panel, zoom in (mouse wheel) on some bad draw call
static struct pipe_context *si_create_context(struct pipe_screen *screen,
unsigned flags)
{
- struct si_context *sctx = CALLOC_STRUCT(si_context);
struct si_screen* sscreen = (struct si_screen *)screen;
+
+ /* Don't create a context if it's not compute-only and hw is compute-only. */
+ if (!sscreen->info.has_graphics &&
+ !(flags & PIPE_CONTEXT_COMPUTE_ONLY))
+ return NULL;
+
+ struct si_context *sctx = CALLOC_STRUCT(si_context);
struct radeon_winsys *ws = sscreen->ws;
int shader, i;
bool stop_exec_on_failure = (flags & PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET) != 0;
if (!sctx->ctx)
goto fail;
- if (sscreen->info.num_sdma_rings && !(sscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
+ if (sscreen->info.num_sdma_rings &&
+ !(sscreen->debug_flags & DBG(NO_ASYNC_DMA)) &&
+ /* SDMA timeouts sometimes on gfx10 so disable it for now. See:
+ * https://bugs.freedesktop.org/show_bug.cgi?id=111481
+ * https://gitlab.freedesktop.org/mesa/mesa/issues/1907
+ */
+ (sctx->chip_class != GFX10 || sscreen->debug_flags & DBG(FORCE_DMA))) {
sctx->dma_cs = sctx->ws->cs_create(sctx->ctx, RING_DMA,
(void*)si_flush_dma_cs,
sctx, stop_exec_on_failure);
0, PIPE_USAGE_DEFAULT,
SI_RESOURCE_FLAG_32BIT |
(use_sdma_upload ?
- SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA :
- (sscreen->cpdma_prefetch_writes_memory ?
- 0 : SI_RESOURCE_FLAG_READ_ONLY)));
+ SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA : 0));
if (!sctx->b.const_uploader)
goto fail;
if (!sctx->border_color_map)
goto fail;
+ sctx->ngg = sscreen->use_ngg;
+
/* Initialize context functions used by graphics and compute. */
+ if (sctx->chip_class >= GFX10)
+ sctx->emit_cache_flush = gfx10_emit_cache_flush;
+ else
+ sctx->emit_cache_flush = si_emit_cache_flush;
+
sctx->b.emit_string_marker = si_emit_string_marker;
sctx->b.set_debug_callback = si_set_debug_callback;
sctx->b.set_log_context = si_set_log_context;
si_init_fence_functions(sctx);
si_init_query_functions(sctx);
si_init_state_compute_functions(sctx);
+ si_init_context_texture_functions(sctx);
/* Initialize graphics-only context functions. */
if (sctx->has_graphics) {
- si_init_context_texture_functions(sctx);
+ if (sctx->chip_class >= GFX10)
+ gfx10_init_query(sctx);
si_init_msaa_functions(sctx);
si_init_shader_functions(sctx);
si_init_state_functions(sctx);
goto fail;
sctx->blitter->skip_viewport_restore = true;
+ /* Some states are expected to be always non-NULL. */
+ sctx->noop_blend = util_blitter_get_noop_blend_state(sctx->blitter);
+ sctx->queued.named.blend = sctx->noop_blend;
+
+ sctx->noop_dsa = util_blitter_get_noop_dsa_state(sctx->blitter);
+ sctx->queued.named.dsa = sctx->noop_dsa;
+
+ sctx->discard_rasterizer_state =
+ util_blitter_get_discard_rasterizer_state(sctx->blitter);
+ sctx->queued.named.rasterizer = sctx->discard_rasterizer_state;
+
si_init_draw_functions(sctx);
si_initialize_prim_discard_tunables(sctx);
}
if (!sscreen->ws->unref(sscreen->ws))
return;
- mtx_destroy(&sscreen->aux_context_lock);
+ simple_mtx_destroy(&sscreen->aux_context_lock);
struct u_log_context *aux_log = ((struct si_context *)sscreen->aux_context)->log;
if (aux_log) {
util_queue_destroy(&sscreen->shader_compiler_queue);
util_queue_destroy(&sscreen->shader_compiler_queue_low_priority);
+ /* Release the reference on glsl types of the compiler threads. */
+ glsl_type_singleton_decref();
+
for (i = 0; i < ARRAY_SIZE(sscreen->compiler); i++)
si_destroy_compiler(&sscreen->compiler[i]);
FREE(part);
}
}
- mtx_destroy(&sscreen->shader_parts_mutex);
+ simple_mtx_destroy(&sscreen->shader_parts_mutex);
si_destroy_shader_cache(sscreen);
si_destroy_perfcounters(sscreen);
si_gpu_load_kill_thread(sscreen);
- mtx_destroy(&sscreen->gpu_load_mutex);
+ simple_mtx_destroy(&sscreen->gpu_load_mutex);
slab_destroy_parent(&sscreen->pool_transfers);
disk_cache_format_hex_id(cache_id, sha1, 20 * 2);
/* These flags affect shader compilation. */
- #define ALL_FLAGS (DBG(FS_CORRECT_DERIVS_AFTER_KILL) | \
- DBG(SI_SCHED) | \
- DBG(GISEL) | \
- DBG(UNSAFE_MATH))
- uint64_t shader_debug_flags = sscreen->debug_flags &
- ALL_FLAGS;
+ #define ALL_FLAGS (DBG(SI_SCHED) | DBG(GISEL))
+ uint64_t shader_debug_flags = sscreen->debug_flags & ALL_FLAGS;
/* Add the high bits of 32-bit addresses, which affects
* how 32-bit addresses are expanded to 64 bits.
assert((int16_t)sscreen->info.address32_hi == (int32_t)sscreen->info.address32_hi);
shader_debug_flags |= (uint64_t)(sscreen->info.address32_hi & 0xffff) << 32;
- if (sscreen->options.enable_nir)
- shader_debug_flags |= 1ull << 48;
-
sscreen->disk_shader_cache =
disk_cache_create(sscreen->info.name,
cache_id,
static bool si_is_parallel_shader_compilation_finished(struct pipe_screen *screen,
void *shader,
- unsigned shader_type)
+ enum pipe_shader_type shader_type)
{
- if (shader_type == PIPE_SHADER_COMPUTE) {
- struct si_compute *cs = (struct si_compute*)shader;
-
- return util_queue_fence_is_signalled(&cs->ready);
- }
struct si_shader_selector *sel = (struct si_shader_selector *)shader;
return util_queue_fence_is_signalled(&sel->ready);
const struct pipe_screen_config *config)
{
struct si_screen *sscreen = CALLOC_STRUCT(si_screen);
- unsigned hw_threads, num_comp_hi_threads, num_comp_lo_threads, i;
+ unsigned hw_threads, num_comp_hi_threads, num_comp_lo_threads;
if (!sscreen) {
return NULL;
sscreen->ws = ws;
ws->query_info(ws, &sscreen->info);
+ if (sscreen->info.chip_class == GFX10 && LLVM_VERSION_MAJOR < 9) {
+ fprintf(stderr, "radeonsi: Navi family support requires LLVM 9 or higher\n");
+ FREE(sscreen);
+ return NULL;
+ }
+
if (sscreen->info.chip_class >= GFX9) {
sscreen->se_tile_repeat = 32 * sscreen->info.max_se;
} else {
sscreen->debug_flags |= debug_get_flags_option("AMD_DEBUG",
debug_options, 0);
+ if (sscreen->debug_flags & DBG(NO_GFX))
+ sscreen->info.has_graphics = false;
+
/* Set functions first. */
sscreen->b.context_create = si_pipe_create_context;
sscreen->b.destroy = si_destroy_screen;
si_set_max_shader_compiler_threads;
sscreen->b.is_parallel_shader_compilation_finished =
si_is_parallel_shader_compilation_finished;
+ sscreen->b.finalize_nir = si_finalize_nir;
si_init_screen_get_functions(sscreen);
si_init_screen_buffer_functions(sscreen);
1 << util_logbase2(sscreen->force_aniso));
}
- (void) mtx_init(&sscreen->aux_context_lock, mtx_plain);
- (void) mtx_init(&sscreen->gpu_load_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->aux_context_lock, mtx_plain);
+ (void) simple_mtx_init(&sscreen->gpu_load_mutex, mtx_plain);
si_init_gs_info(sscreen);
if (!si_init_shader_cache(sscreen)) {
num_comp_lo_threads = MIN2(num_comp_lo_threads,
ARRAY_SIZE(sscreen->compiler_lowp));
+ /* Take a reference on the glsl types for the compiler threads. */
+ glsl_type_singleton_init_or_ref();
+
if (!util_queue_init(&sscreen->shader_compiler_queue, "sh",
64, num_comp_hi_threads,
UTIL_QUEUE_INIT_RESIZE_IF_FULL |
UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY)) {
si_destroy_shader_cache(sscreen);
FREE(sscreen);
+ glsl_type_singleton_decref();
return NULL;
}
UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY)) {
si_destroy_shader_cache(sscreen);
FREE(sscreen);
+ glsl_type_singleton_decref();
return NULL;
}
*/
unsigned max_offchip_buffers_per_se;
+ if (sscreen->info.chip_class >= GFX10)
+ max_offchip_buffers_per_se = 256;
/* Only certain chips can use the maximum value. */
- if (sscreen->info.family == CHIP_VEGA12 ||
- sscreen->info.family == CHIP_VEGA20)
+ else if (sscreen->info.family == CHIP_VEGA12 ||
+ sscreen->info.family == CHIP_VEGA20)
max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
else
max_offchip_buffers_per_se = double_offchip_buffers ? 127 : 63;
}
sscreen->tess_factor_ring_size = 32768 * sscreen->info.max_se;
- assert(((sscreen->tess_factor_ring_size / 4) & C_030938_SIZE) == 0);
sscreen->tess_offchip_ring_size = max_offchip_buffers *
sscreen->tess_offchip_block_dw_size * 4;
S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
}
- /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
- * on GFX6. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
- * SPI_VS_OUT_CONFIG. So only enable GFX7 CLEAR_STATE on amdgpu kernel.*/
- sscreen->has_clear_state = sscreen->info.chip_class >= GFX7 &&
- sscreen->info.is_amdgpu;
-
- sscreen->has_distributed_tess =
- sscreen->info.chip_class >= GFX8 &&
- sscreen->info.max_se >= 2;
-
sscreen->has_draw_indirect_multi =
(sscreen->info.family >= CHIP_POLARIS10) ||
(sscreen->info.chip_class == GFX8 &&
sscreen->info.pfp_fw_version >= 79 &&
sscreen->info.me_fw_version >= 142);
- sscreen->has_out_of_order_rast = sscreen->info.chip_class >= GFX8 &&
- sscreen->info.max_se >= 2 &&
+ sscreen->has_out_of_order_rast = sscreen->info.has_out_of_order_rast &&
!(sscreen->debug_flags & DBG(NO_OUT_OF_ORDER));
sscreen->assume_no_z_fights =
driQueryOptionb(config->options, "radeonsi_assume_no_z_fights");
#include "si_debug_options.h"
}
- sscreen->has_gfx9_scissor_bug = sscreen->info.family == CHIP_VEGA10 ||
- sscreen->info.family == CHIP_RAVEN;
- sscreen->has_msaa_sample_loc_bug = (sscreen->info.family >= CHIP_POLARIS10 &&
- sscreen->info.family <= CHIP_POLARIS12) ||
- sscreen->info.family == CHIP_VEGA10 ||
- sscreen->info.family == CHIP_RAVEN;
- sscreen->has_ls_vgpr_init_bug = sscreen->info.family == CHIP_VEGA10 ||
- sscreen->info.family == CHIP_RAVEN;
- sscreen->has_dcc_constant_encode = sscreen->info.family == CHIP_RAVEN2;
+ sscreen->use_ngg = sscreen->info.chip_class >= GFX10 &&
+ sscreen->info.family != CHIP_NAVI14 &&
+ !(sscreen->debug_flags & DBG(NO_NGG));
+ sscreen->use_ngg_streamout = false;
/* Only enable primitive binning on APUs by default. */
- sscreen->dpbb_allowed = sscreen->info.family == CHIP_RAVEN ||
- sscreen->info.family == CHIP_RAVEN2;
-
- sscreen->dfsm_allowed = sscreen->info.family == CHIP_RAVEN ||
- sscreen->info.family == CHIP_RAVEN2;
+ if (sscreen->info.chip_class >= GFX10) {
+ sscreen->dpbb_allowed = true;
+ sscreen->dfsm_allowed = !sscreen->info.has_dedicated_vram;
+ } else if (sscreen->info.chip_class == GFX9) {
+ sscreen->dpbb_allowed = !sscreen->info.has_dedicated_vram;
+ sscreen->dfsm_allowed = !sscreen->info.has_dedicated_vram;
+ }
/* Process DPBB enable flags. */
if (sscreen->debug_flags & DBG(DPBB)) {
}
/* While it would be nice not to have this flag, we are constrained
- * by the reality that LLVM 5.0 doesn't have working VGPR indexing
- * on GFX9.
+ * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
*/
- sscreen->llvm_has_working_vgpr_indexing = sscreen->info.chip_class <= GFX8;
-
- /* Some chips have RB+ registers, but don't support RB+. Those must
- * always disable it.
- */
- if (sscreen->info.family == CHIP_STONEY ||
- sscreen->info.chip_class >= GFX9) {
- sscreen->has_rbplus = true;
-
- sscreen->rbplus_allowed =
- !(sscreen->debug_flags & DBG(NO_RB_PLUS)) &&
- (sscreen->info.family == CHIP_STONEY ||
- sscreen->info.family == CHIP_VEGA12 ||
- sscreen->info.family == CHIP_RAVEN ||
- sscreen->info.family == CHIP_RAVEN2);
- }
+ sscreen->llvm_has_working_vgpr_indexing = sscreen->info.chip_class != GFX9;
sscreen->dcc_msaa_allowed =
!(sscreen->debug_flags & DBG(NO_DCC_MSAA));
- sscreen->cpdma_prefetch_writes_memory = sscreen->info.chip_class <= GFX8;
-
- (void) mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
+ (void) simple_mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
sscreen->use_monolithic_shaders =
(sscreen->debug_flags & DBG(MONOLITHIC_SHADERS)) != 0;
- sscreen->barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SMEM_L1 |
- SI_CONTEXT_INV_VMEM_L1;
+ sscreen->barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SCACHE |
+ SI_CONTEXT_INV_VCACHE;
if (sscreen->info.chip_class <= GFX8) {
- sscreen->barrier_flags.cp_to_L2 |= SI_CONTEXT_INV_GLOBAL_L2;
- sscreen->barrier_flags.L2_to_cp |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
+ sscreen->barrier_flags.cp_to_L2 |= SI_CONTEXT_INV_L2;
+ sscreen->barrier_flags.L2_to_cp |= SI_CONTEXT_WB_L2;
}
if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
}
}
- for (i = 0; i < num_comp_hi_threads; i++)
- si_init_compiler(sscreen, &sscreen->compiler[i]);
- for (i = 0; i < num_comp_lo_threads; i++)
- si_init_compiler(sscreen, &sscreen->compiler_lowp[i]);
+ sscreen->ge_wave_size = 64;
+ sscreen->ps_wave_size = 64;
+ sscreen->compute_wave_size = 64;
+
+ if (sscreen->info.chip_class >= GFX10) {
+ /* Pixels shaders: Wave64 is recommended.
+ * Compute shaders: There are piglit failures with Wave32.
+ */
+ sscreen->ge_wave_size = 32;
+
+ if (sscreen->debug_flags & DBG(W32_GE))
+ sscreen->ge_wave_size = 32;
+ if (sscreen->debug_flags & DBG(W32_PS))
+ sscreen->ps_wave_size = 32;
+ if (sscreen->debug_flags & DBG(W32_CS))
+ sscreen->compute_wave_size = 32;
+
+ if (sscreen->debug_flags & DBG(W64_GE))
+ sscreen->ge_wave_size = 64;
+ if (sscreen->debug_flags & DBG(W64_PS))
+ sscreen->ps_wave_size = 64;
+ if (sscreen->debug_flags & DBG(W64_CS))
+ sscreen->compute_wave_size = 64;
+ }
/* Create the auxiliary context. This must be done last. */
- sscreen->aux_context = si_create_context(
- &sscreen->b, sscreen->options.aux_debug ? PIPE_CONTEXT_DEBUG : 0);
+ sscreen->aux_context = si_create_context(&sscreen->b,
+ (sscreen->options.aux_debug ? PIPE_CONTEXT_DEBUG : 0) |
+ (sscreen->info.has_graphics ? 0 : PIPE_CONTEXT_COMPUTE_ONLY));
if (sscreen->options.aux_debug) {
struct u_log_context *log = CALLOC_STRUCT(u_log_context);
u_log_context_init(log);