res->flags &= ~RADEON_FLAG_NO_CPU_ACCESS; /* disallowed with VRAM_GTT */
}
- if (rscreen->debug_flags & DBG_NO_WC)
+ if (rscreen->debug_flags & DBG(NO_WC))
res->flags &= ~RADEON_FLAG_GTT_WC;
/* Set expected VRAM and GART usage for the buffer. */
res->TC_L2_dirty = false;
/* Print debug information. */
- if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
+ if (rscreen->debug_flags & DBG(VM) && res->b.b.target == PIPE_BUFFER) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
res->gpu_address, res->gpu_address + res->buf->size,
res->buf->size);
}
if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
- !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
+ !(rscreen->debug_flags & DBG(NO_DISCARD_RANGE)) &&
((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
PIPE_TRANSFER_PERSISTENT)) &&
r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
struct radeon_winsys_cs *cs = rctx->dma.cs;
struct radeon_saved_cs saved;
bool check_vm =
- (rctx->screen->debug_flags & DBG_CHECK_VM) &&
+ (rctx->screen->debug_flags & DBG(CHECK_VM)) &&
rctx->check_vm_faults;
if (!radeon_emitted(cs, 0)) {
if (!rctx->ctx)
return false;
- if (rscreen->info.num_sdma_rings && !(rscreen->debug_flags & DBG_NO_ASYNC_DMA)) {
+ if (rscreen->info.num_sdma_rings && !(rscreen->debug_flags & DBG(NO_ASYNC_DMA))) {
rctx->dma.cs = rctx->ws->cs_create(rctx->ctx, RING_DMA,
r600_flush_dma_ring,
rctx);
static const struct debug_named_value common_debug_options[] = {
/* logging */
- { "tex", DBG_TEX, "Print texture info" },
- { "nir", DBG_NIR, "Enable experimental NIR shaders" },
- { "compute", DBG_COMPUTE, "Print compute info" },
- { "vm", DBG_VM, "Print virtual addresses when creating resources" },
- { "info", DBG_INFO, "Print driver information" },
+ { "tex", DBG(TEX), "Print texture info" },
+ { "nir", DBG(NIR), "Enable experimental NIR shaders" },
+ { "compute", DBG(COMPUTE), "Print compute info" },
+ { "vm", DBG(VM), "Print virtual addresses when creating resources" },
+ { "info", DBG(INFO), "Print driver information" },
/* shaders */
- { "fs", DBG_FS, "Print fetch shaders" },
- { "vs", DBG_VS, "Print vertex shaders" },
- { "gs", DBG_GS, "Print geometry shaders" },
- { "ps", DBG_PS, "Print pixel shaders" },
- { "cs", DBG_CS, "Print compute shaders" },
- { "tcs", DBG_TCS, "Print tessellation control shaders" },
- { "tes", DBG_TES, "Print tessellation evaluation shaders" },
- { "noir", DBG_NO_IR, "Don't print the LLVM IR"},
- { "notgsi", DBG_NO_TGSI, "Don't print the TGSI"},
- { "noasm", DBG_NO_ASM, "Don't print disassembled shaders"},
- { "preoptir", DBG_PREOPT_IR, "Print the LLVM IR before initial optimizations" },
- { "checkir", DBG_CHECK_IR, "Enable additional sanity checks on shader IR" },
- { "nooptvariant", DBG_NO_OPT_VARIANT, "Disable compiling optimized shader variants." },
-
- { "testdma", DBG_TEST_DMA, "Invoke SDMA tests and exit." },
- { "testvmfaultcp", DBG_TEST_VMFAULT_CP, "Invoke a CP VM fault test and exit." },
- { "testvmfaultsdma", DBG_TEST_VMFAULT_SDMA, "Invoke a SDMA VM fault test and exit." },
- { "testvmfaultshader", DBG_TEST_VMFAULT_SHADER, "Invoke a shader VM fault test and exit." },
+ { "vs", DBG(VS), "Print vertex shaders" },
+ { "gs", DBG(GS), "Print geometry shaders" },
+ { "ps", DBG(PS), "Print pixel shaders" },
+ { "cs", DBG(CS), "Print compute shaders" },
+ { "tcs", DBG(TCS), "Print tessellation control shaders" },
+ { "tes", DBG(TES), "Print tessellation evaluation shaders" },
+ { "noir", DBG(NO_IR), "Don't print the LLVM IR"},
+ { "notgsi", DBG(NO_TGSI), "Don't print the TGSI"},
+ { "noasm", DBG(NO_ASM), "Don't print disassembled shaders"},
+ { "preoptir", DBG(PREOPT_IR), "Print the LLVM IR before initial optimizations" },
+ { "checkir", DBG(CHECK_IR), "Enable additional sanity checks on shader IR" },
+ { "nooptvariant", DBG(NO_OPT_VARIANT), "Disable compiling optimized shader variants." },
+
+ { "testdma", DBG(TEST_DMA), "Invoke SDMA tests and exit." },
+ { "testvmfaultcp", DBG(TEST_VMFAULT_CP), "Invoke a CP VM fault test and exit." },
+ { "testvmfaultsdma", DBG(TEST_VMFAULT_SDMA), "Invoke a SDMA VM fault test and exit." },
+ { "testvmfaultshader", DBG(TEST_VMFAULT_SHADER), "Invoke a shader VM fault test and exit." },
/* features */
- { "nodma", DBG_NO_ASYNC_DMA, "Disable asynchronous DMA" },
- { "nohyperz", DBG_NO_HYPERZ, "Disable Hyper-Z" },
+ { "nodma", DBG(NO_ASYNC_DMA), "Disable asynchronous DMA" },
+ { "nohyperz", DBG(NO_HYPERZ), "Disable Hyper-Z" },
/* GL uses the word INVALIDATE, gallium uses the word DISCARD */
- { "noinvalrange", DBG_NO_DISCARD_RANGE, "Disable handling of INVALIDATE_RANGE map flags" },
- { "no2d", DBG_NO_2D_TILING, "Disable 2D tiling" },
- { "notiling", DBG_NO_TILING, "Disable tiling" },
- { "switch_on_eop", DBG_SWITCH_ON_EOP, "Program WD/IA to switch on end-of-packet." },
- { "forcedma", DBG_FORCE_DMA, "Use asynchronous DMA for all operations when possible." },
- { "precompile", DBG_PRECOMPILE, "Compile one shader variant at shader creation." },
- { "nowc", DBG_NO_WC, "Disable GTT write combining" },
- { "check_vm", DBG_CHECK_VM, "Check VM faults and dump debug info." },
- { "nodcc", DBG_NO_DCC, "Disable DCC." },
- { "nodccclear", DBG_NO_DCC_CLEAR, "Disable DCC fast clear." },
- { "norbplus", DBG_NO_RB_PLUS, "Disable RB+." },
- { "sisched", DBG_SI_SCHED, "Enable LLVM SI Machine Instruction Scheduler." },
- { "mono", DBG_MONOLITHIC_SHADERS, "Use old-style monolithic shaders compiled on demand" },
- { "unsafemath", DBG_UNSAFE_MATH, "Enable unsafe math shader optimizations" },
- { "nodccfb", DBG_NO_DCC_FB, "Disable separate DCC on the main framebuffer" },
- { "nodpbb", DBG_NO_DPBB, "Disable DPBB." },
- { "nodfsm", DBG_NO_DFSM, "Disable DFSM." },
- { "nooutoforder", DBG_NO_OUT_OF_ORDER, "Disable out-of-order rasterization" },
+ { "noinvalrange", DBG(NO_DISCARD_RANGE), "Disable handling of INVALIDATE_RANGE map flags" },
+ { "no2d", DBG(NO_2D_TILING), "Disable 2D tiling" },
+ { "notiling", DBG(NO_TILING), "Disable tiling" },
+ { "switch_on_eop", DBG(SWITCH_ON_EOP), "Program WD/IA to switch on end-of-packet." },
+ { "forcedma", DBG(FORCE_DMA), "Use asynchronous DMA for all operations when possible." },
+ { "precompile", DBG(PRECOMPILE), "Compile one shader variant at shader creation." },
+ { "nowc", DBG(NO_WC), "Disable GTT write combining" },
+ { "check_vm", DBG(CHECK_VM), "Check VM faults and dump debug info." },
+ { "nodcc", DBG(NO_DCC), "Disable DCC." },
+ { "nodccclear", DBG(NO_DCC_CLEAR), "Disable DCC fast clear." },
+ { "norbplus", DBG(NO_RB_PLUS), "Disable RB+." },
+ { "sisched", DBG(SI_SCHED), "Enable LLVM SI Machine Instruction Scheduler." },
+ { "mono", DBG(MONOLITHIC_SHADERS), "Use old-style monolithic shaders compiled on demand" },
+ { "unsafemath", DBG(UNSAFE_MATH), "Enable unsafe math shader optimizations" },
+ { "nodccfb", DBG(NO_DCC_FB), "Disable separate DCC on the main framebuffer" },
+ { "nodpbb", DBG(NO_DPBB), "Disable DPBB." },
+ { "nodfsm", DBG(NO_DFSM), "Disable DFSM." },
+ { "nooutoforder", DBG(NO_OUT_OF_ORDER), "Disable out-of-order rasterization" },
DEBUG_NAMED_VALUE_END /* must be last */
};
/* These flags affect shader compilation. */
uint64_t shader_debug_flags =
rscreen->debug_flags &
- (DBG_FS_CORRECT_DERIVS_AFTER_KILL |
- DBG_SI_SCHED |
- DBG_UNSAFE_MATH);
+ (DBG(FS_CORRECT_DERIVS_AFTER_KILL) |
+ DBG(SI_SCHED) |
+ DBG(UNSAFE_MATH));
rscreen->disk_shader_cache =
disk_cache_create(r600_get_family_name(rscreen),
(void) mtx_init(&rscreen->aux_context_lock, mtx_plain);
(void) mtx_init(&rscreen->gpu_load_mutex, mtx_plain);
- if (rscreen->debug_flags & DBG_INFO) {
+ if (rscreen->debug_flags & DBG(INFO)) {
printf("pci (domain:bus:dev.func): %04x:%02x:%02x.%x\n",
rscreen->info.pci_domain, rscreen->info.pci_bus,
rscreen->info.pci_dev, rscreen->info.pci_func);
bool si_extra_shader_checks(struct r600_common_screen *rscreen, unsigned processor)
{
- return (rscreen->debug_flags & DBG_CHECK_IR) ||
+ return (rscreen->debug_flags & DBG(CHECK_IR)) ||
si_can_dump_shader(rscreen, processor);
}
#define R600_NOT_QUERY 0xffffffff
/* Debug flags. */
-#define DBG_VS (1 << PIPE_SHADER_VERTEX)
-#define DBG_PS (1 << PIPE_SHADER_FRAGMENT)
-#define DBG_GS (1 << PIPE_SHADER_GEOMETRY)
-#define DBG_TCS (1 << PIPE_SHADER_TESS_CTRL)
-#define DBG_TES (1 << PIPE_SHADER_TESS_EVAL)
-#define DBG_CS (1 << PIPE_SHADER_COMPUTE)
-#define DBG_ALL_SHADERS (DBG_FS - 1)
-#define DBG_FS (1 << 6) /* fetch shader */
-#define DBG_TEX (1 << 7)
-#define DBG_NIR (1 << 8)
-#define DBG_COMPUTE (1 << 9)
-/* gap */
-#define DBG_VM (1 << 11)
-#define DBG_NO_IR (1 << 12)
-#define DBG_NO_TGSI (1 << 13)
-#define DBG_NO_ASM (1 << 14)
-#define DBG_PREOPT_IR (1 << 15)
-#define DBG_CHECK_IR (1 << 16)
-#define DBG_NO_OPT_VARIANT (1 << 17)
-#define DBG_FS_CORRECT_DERIVS_AFTER_KILL (1 << 18)
-/* gaps */
-#define DBG_TEST_DMA (1 << 20)
-/* Bits 21-31 are reserved for the r600g driver. */
-/* features */
-#define DBG_NO_ASYNC_DMA (1ull << 32)
-#define DBG_NO_HYPERZ (1ull << 33)
-#define DBG_NO_DISCARD_RANGE (1ull << 34)
-#define DBG_NO_2D_TILING (1ull << 35)
-#define DBG_NO_TILING (1ull << 36)
-#define DBG_SWITCH_ON_EOP (1ull << 37)
-#define DBG_FORCE_DMA (1ull << 38)
-#define DBG_PRECOMPILE (1ull << 39)
-#define DBG_INFO (1ull << 40)
-#define DBG_NO_WC (1ull << 41)
-#define DBG_CHECK_VM (1ull << 42)
-#define DBG_NO_DCC (1ull << 43)
-#define DBG_NO_DCC_CLEAR (1ull << 44)
-#define DBG_NO_RB_PLUS (1ull << 45)
-#define DBG_SI_SCHED (1ull << 46)
-#define DBG_MONOLITHIC_SHADERS (1ull << 47)
-#define DBG_NO_OUT_OF_ORDER (1ull << 48)
-#define DBG_UNSAFE_MATH (1ull << 49)
-#define DBG_NO_DCC_FB (1ull << 50)
-#define DBG_TEST_VMFAULT_CP (1ull << 51)
-#define DBG_TEST_VMFAULT_SDMA (1ull << 52)
-#define DBG_TEST_VMFAULT_SHADER (1ull << 53)
-#define DBG_NO_DPBB (1ull << 54)
-#define DBG_NO_DFSM (1ull << 55)
+enum {
+ /* Shader logging options: */
+ DBG_VS = PIPE_SHADER_VERTEX,
+ DBG_PS = PIPE_SHADER_FRAGMENT,
+ DBG_GS = PIPE_SHADER_GEOMETRY,
+ DBG_TCS = PIPE_SHADER_TESS_CTRL,
+ DBG_TES = PIPE_SHADER_TESS_EVAL,
+ DBG_CS = PIPE_SHADER_COMPUTE,
+ DBG_NO_IR,
+ DBG_NO_TGSI,
+ DBG_NO_ASM,
+ DBG_PREOPT_IR,
+
+ /* Shader compiler options the shader cache should be aware of: */
+ DBG_FS_CORRECT_DERIVS_AFTER_KILL,
+ DBG_UNSAFE_MATH,
+ DBG_SI_SCHED,
+
+ /* Shader compiler options (with no effect on the shader cache): */
+ DBG_CHECK_IR,
+ DBG_PRECOMPILE,
+ DBG_NIR,
+ DBG_MONOLITHIC_SHADERS,
+ DBG_NO_OPT_VARIANT,
+
+ /* Information logging options: */
+ DBG_INFO,
+ DBG_TEX,
+ DBG_COMPUTE,
+ DBG_VM,
+
+ /* Driver options: */
+ DBG_FORCE_DMA,
+ DBG_NO_ASYNC_DMA,
+ DBG_NO_DISCARD_RANGE,
+ DBG_NO_WC,
+ DBG_CHECK_VM,
+
+ /* 3D engine options: */
+ DBG_SWITCH_ON_EOP,
+ DBG_NO_OUT_OF_ORDER,
+ DBG_NO_DPBB,
+ DBG_NO_DFSM,
+ DBG_NO_HYPERZ,
+ DBG_NO_RB_PLUS,
+ DBG_NO_2D_TILING,
+ DBG_NO_TILING,
+ DBG_NO_DCC,
+ DBG_NO_DCC_CLEAR,
+ DBG_NO_DCC_FB,
+
+ /* Tests: */
+ DBG_TEST_DMA,
+ DBG_TEST_VMFAULT_CP,
+ DBG_TEST_VMFAULT_SDMA,
+ DBG_TEST_VMFAULT_SHADER,
+};
+
+#define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
+#define DBG(name) (1ull << DBG_##name)
#define R600_MAP_BUFFER_ALIGNMENT 64
#define COMPUTE_DBG(rscreen, fmt, args...) \
do { \
- if ((rscreen->b.debug_flags & DBG_COMPUTE)) fprintf(stderr, fmt, ##args); \
+ if ((rscreen->b.debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
} while (0);
#define R600_ERR(fmt, args...) \
R600_RESOURCE_FLAG_FLUSHED_DEPTH))) {
rtex->db_compatible = true;
- if (!(rscreen->debug_flags & DBG_NO_HYPERZ))
+ if (!(rscreen->debug_flags & DBG(NO_HYPERZ)))
r600_texture_allocate_htile(rscreen, rtex);
}
} else {
* apply_opaque_metadata later.
*/
if (rtex->surface.dcc_size &&
- (buf || !(rscreen->debug_flags & DBG_NO_DCC)) &&
+ (buf || !(rscreen->debug_flags & DBG(NO_DCC))) &&
!(rtex->surface.flags & RADEON_SURF_SCANOUT)) {
/* Reserve space for the DCC buffer. */
rtex->dcc_offset = align64(rtex->size, rtex->surface.dcc_alignment);
rtex->cmask.base_address_reg =
(rtex->resource.gpu_address + rtex->cmask.offset) >> 8;
- if (rscreen->debug_flags & DBG_VM) {
+ if (rscreen->debug_flags & DBG(VM)) {
fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n",
rtex->resource.gpu_address,
rtex->resource.gpu_address + rtex->resource.buf->size,
base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format));
}
- if (rscreen->debug_flags & DBG_TEX) {
+ if (rscreen->debug_flags & DBG(TEX)) {
puts("Texture:");
struct u_log_context log;
u_log_context_init(&log);
if (!force_tiling &&
!is_depth_stencil &&
!util_format_is_compressed(templ->format)) {
- if (rscreen->debug_flags & DBG_NO_TILING)
+ if (rscreen->debug_flags & DBG(NO_TILING))
return RADEON_SURF_MODE_LINEAR_ALIGNED;
/* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */
/* Make small textures 1D tiled. */
if (templ->width0 <= 16 || templ->height0 <= 16 ||
- (rscreen->debug_flags & DBG_NO_2D_TILING))
+ (rscreen->debug_flags & DBG(NO_2D_TILING)))
return RADEON_SURF_MODE_1D;
/* The allocator will switch to 1D if needed. */
bool tc_compatible_htile =
rscreen->chip_class >= VI &&
(templ->flags & PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY) &&
- !(rscreen->debug_flags & DBG_NO_HYPERZ) &&
+ !(rscreen->debug_flags & DBG(NO_HYPERZ)) &&
!is_flushed_depth &&
templ->nr_samples <= 1 && /* TC-compat HTILE is less efficient with MSAA */
util_format_is_depth_or_stencil(templ->format);
* displayable surfaces.
*/
if (rctx->chip_class >= VI &&
- !(rctx->screen->debug_flags & DBG_NO_DCC_FB)) {
+ !(rctx->screen->debug_flags & DBG(NO_DCC_FB))) {
vi_separate_dcc_try_enable(rctx, tex);
/* RB+ isn't supported with a CMASK clear only on Stoney,
uint32_t reset_value;
bool clear_words_needed;
- if (rctx->screen->debug_flags & DBG_NO_DCC_CLEAR)
+ if (rctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
continue;
if (!vi_get_fast_clear_parameters(fb->cbufs[i]->format,
info->src.box.z,
info->src.box.z + info->src.box.depth - 1);
- if (sctx->screen->b.debug_flags & DBG_FORCE_DMA &&
+ if (sctx->screen->b.debug_flags & DBG(FORCE_DMA) &&
util_try_blit_via_copy_region(ctx, info))
return;
/* Set the initial dmesg timestamp for this context, so that
* only new messages will be checked for VM faults.
*/
- if (sctx->screen->b.debug_flags & DBG_CHECK_VM)
+ if (sctx->screen->b.debug_flags & DBG(CHECK_VM))
ac_vm_fault_occured(sctx->b.chip_class,
&sctx->dmesg_timestamp, NULL);
}
if (si_check_device_reset(&ctx->b))
return;
- if (ctx->screen->b.debug_flags & DBG_CHECK_VM)
+ if (ctx->screen->b.debug_flags & DBG(CHECK_VM))
flags &= ~RADEON_FLUSH_ASYNC;
/* If the state tracker is flushing the GFX IB, r600_flush_from_st is
ctx->b.num_gfx_cs_flushes++;
/* Check VM faults if needed. */
- if (ctx->screen->b.debug_flags & DBG_CHECK_VM) {
+ if (ctx->screen->b.debug_flags & DBG(CHECK_VM)) {
/* Use conservative timeout 800ms, after which we won't wait any
* longer and assume the GPU is hung.
*/
"+DumpCode,+vgpr-spilling,-fp32-denormals,+fp64-denormals%s%s%s",
sscreen->b.chip_class >= GFX9 ? ",+xnack" : ",-xnack",
sscreen->llvm_has_working_vgpr_indexing ? "" : ",-promote-alloca",
- sscreen->b.debug_flags & DBG_SI_SCHED ? ",+si-scheduler" : "");
+ sscreen->b.debug_flags & DBG(SI_SCHED) ? ",+si-scheduler" : "");
return LLVMCreateTargetMachine(ac_get_llvm_target(triple), triple,
si_get_llvm_processor_name(sscreen->b.family),
else
si_init_dma_functions(sctx);
- if (sscreen->b.debug_flags & DBG_FORCE_DMA)
+ if (sscreen->b.debug_flags & DBG(FORCE_DMA))
sctx->b.b.resource_copy_region = sctx->b.dma_copy;
sctx->blitter = util_blitter_create(&sctx->b.b);
struct si_screen *sscreen = (struct si_screen *)screen;
struct pipe_context *ctx;
- if (sscreen->b.debug_flags & DBG_CHECK_VM)
+ if (sscreen->b.debug_flags & DBG(CHECK_VM))
flags |= PIPE_CONTEXT_DEBUG;
ctx = si_create_context(screen, flags);
return 4;
case PIPE_CAP_GLSL_FEATURE_LEVEL:
- if (sscreen->b.debug_flags & DBG_NIR)
+ if (sscreen->b.debug_flags & DBG(NIR))
return 140; /* no geometry and tessellation shaders yet */
if (si_have_tgsi_compute(sscreen))
return 450;
case PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT:
return 32;
case PIPE_SHADER_CAP_PREFERRED_IR:
- if (sscreen->b.debug_flags & DBG_NIR &&
+ if (sscreen->b.debug_flags & DBG(NIR) &&
(shader == PIPE_SHADER_VERTEX ||
shader == PIPE_SHADER_FRAGMENT))
return PIPE_SHADER_IR_NIR;
r600_resource(buf)->gpu_address = 0; /* cause a VM fault */
- if (sscreen->b.debug_flags & DBG_TEST_VMFAULT_CP) {
+ if (sscreen->b.debug_flags & DBG(TEST_VMFAULT_CP)) {
si_copy_buffer(sctx, buf, buf, 0, 4, 4, 0);
ctx->flush(ctx, NULL, 0);
puts("VM fault test: CP - done.");
}
- if (sscreen->b.debug_flags & DBG_TEST_VMFAULT_SDMA) {
+ if (sscreen->b.debug_flags & DBG(TEST_VMFAULT_SDMA)) {
sctx->b.dma_clear_buffer(ctx, buf, 0, 4, 0);
ctx->flush(ctx, NULL, 0);
puts("VM fault test: SDMA - done.");
}
- if (sscreen->b.debug_flags & DBG_TEST_VMFAULT_SHADER) {
+ if (sscreen->b.debug_flags & DBG(TEST_VMFAULT_SHADER)) {
util_test_constant_buffer(ctx, buf);
puts("VM fault test: Shader - done.");
}
*/
if (driQueryOptionb(config->options,
"glsl_correct_derivatives_after_discard"))
- sscreen->b.debug_flags |= DBG_FS_CORRECT_DERIVS_AFTER_KILL;
+ sscreen->b.debug_flags |= DBG(FS_CORRECT_DERIVS_AFTER_KILL);
if (driQueryOptionb(config->options, "radeonsi_enable_sisched"))
- sscreen->b.debug_flags |= DBG_SI_SCHED;
+ sscreen->b.debug_flags |= DBG(SI_SCHED);
if (!si_common_screen_init(&sscreen->b, ws) ||
!si_init_gs_info(sscreen) ||
sscreen->has_out_of_order_rast = sscreen->b.chip_class >= VI &&
sscreen->b.info.max_se >= 2 &&
- !(sscreen->b.debug_flags & DBG_NO_OUT_OF_ORDER);
+ !(sscreen->b.debug_flags & DBG(NO_OUT_OF_ORDER));
sscreen->assume_no_z_fights =
driQueryOptionb(config->options, "radeonsi_assume_no_z_fights");
sscreen->commutative_blend_add =
sscreen->b.family == CHIP_VEGA10 ||
sscreen->b.family == CHIP_RAVEN;
sscreen->dpbb_allowed = sscreen->b.chip_class >= GFX9 &&
- !(sscreen->b.debug_flags & DBG_NO_DPBB);
+ !(sscreen->b.debug_flags & DBG(NO_DPBB));
sscreen->dfsm_allowed = sscreen->dpbb_allowed &&
- !(sscreen->b.debug_flags & DBG_NO_DFSM);
+ !(sscreen->b.debug_flags & DBG(NO_DFSM));
/* While it would be nice not to have this flag, we are constrained
* by the reality that LLVM 5.0 doesn't have working VGPR indexing
sscreen->b.has_rbplus = true;
sscreen->b.rbplus_allowed =
- !(sscreen->b.debug_flags & DBG_NO_RB_PLUS) &&
+ !(sscreen->b.debug_flags & DBG(NO_RB_PLUS)) &&
(sscreen->b.family == CHIP_STONEY ||
sscreen->b.family == CHIP_RAVEN);
}
(void) mtx_init(&sscreen->shader_parts_mutex, mtx_plain);
sscreen->use_monolithic_shaders =
- (sscreen->b.debug_flags & DBG_MONOLITHIC_SHADERS) != 0;
+ (sscreen->b.debug_flags & DBG(MONOLITHIC_SHADERS)) != 0;
sscreen->b.barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SMEM_L1 |
SI_CONTEXT_INV_VMEM_L1;
/* Create the auxiliary context. This must be done last. */
sscreen->b.aux_context = si_create_context(&sscreen->b.b, 0);
- if (sscreen->b.debug_flags & DBG_TEST_DMA)
+ if (sscreen->b.debug_flags & DBG(TEST_DMA))
si_test_dma(&sscreen->b);
- if (sscreen->b.debug_flags & (DBG_TEST_VMFAULT_CP |
- DBG_TEST_VMFAULT_SDMA |
- DBG_TEST_VMFAULT_SHADER))
+ if (sscreen->b.debug_flags & (DBG(TEST_VMFAULT_CP) |
+ DBG(TEST_VMFAULT_SDMA) |
+ DBG(TEST_VMFAULT_SHADER)))
si_test_vmfault(sscreen);
return &sscreen->b.b;
"no-signed-zeros-fp-math",
"true");
- if (ctx->screen->b.debug_flags & DBG_UNSAFE_MATH) {
+ if (ctx->screen->b.debug_flags & DBG(UNSAFE_MATH)) {
/* These were copied from some LLVM test. */
LLVMAddTargetDependentFunctionAttr(ctx->main_fn,
"less-precise-fpmad",
if (!check_debug_option ||
(si_can_dump_shader(&sscreen->b, processor) &&
- !(sscreen->b.debug_flags & DBG_NO_ASM))) {
+ !(sscreen->b.debug_flags & DBG(NO_ASM)))) {
fprintf(file, "\n%s:\n", si_get_shader_name(shader, processor));
if (shader->prolog)
if (si_can_dump_shader(&sscreen->b, processor)) {
fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
- if (!(sscreen->b.debug_flags & (DBG_NO_IR | DBG_PREOPT_IR))) {
+ if (!(sscreen->b.debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
fprintf(stderr, "%s LLVM IR:\n\n", name);
ac_dump_module(mod);
fprintf(stderr, "\n");
}
if (ctx->type == PIPE_SHADER_FRAGMENT && sel->info.uses_kill &&
- ctx->screen->b.debug_flags & DBG_FS_CORRECT_DERIVS_AFTER_KILL) {
+ ctx->screen->b.debug_flags & DBG(FS_CORRECT_DERIVS_AFTER_KILL)) {
/* This is initialized to 0.0 = not kill. */
ctx->postponed_kill = lp_build_alloca(&ctx->gallivm, ctx->f32, "");
}
/* Dump TGSI code before doing TGSI->LLVM conversion in case the
* conversion fails. */
if (si_can_dump_shader(&sscreen->b, sel->info.processor) &&
- !(sscreen->b.debug_flags & DBG_NO_TGSI)) {
+ !(sscreen->b.debug_flags & DBG(NO_TGSI))) {
if (sel->tokens)
tgsi_dump(sel->tokens, 0);
else
LLVMDisposeTargetData(data_layout);
LLVMDisposeMessage(data_layout_str);
- bool unsafe_fpmath = (sscreen->b.debug_flags & DBG_UNSAFE_MATH) != 0;
+ bool unsafe_fpmath = (sscreen->b.debug_flags & DBG(UNSAFE_MATH)) != 0;
enum lp_float_mode float_mode =
unsafe_fpmath ? LP_FLOAT_MODE_UNSAFE_FP_MATH :
LP_FLOAT_MODE_NO_SIGNED_ZEROS_FP_MATH;
LLVMTargetLibraryInfoRef target_library_info;
/* Dump LLVM IR before any optimization passes */
- if (ctx->screen->b.debug_flags & DBG_PREOPT_IR &&
+ if (ctx->screen->b.debug_flags & DBG(PREOPT_IR) &&
si_can_dump_shader(&ctx->screen->b, ctx->type))
LLVMDumpModule(ctx->gallivm.module);
/* This is a hardware requirement. */
if (key->u.line_stipple_enabled ||
- (sscreen->b.debug_flags & DBG_SWITCH_ON_EOP)) {
+ (sscreen->b.debug_flags & DBG(SWITCH_ON_EOP))) {
ia_switch_on_eop = true;
wd_switch_on_eop = true;
}
assert(0);
}
- if (unlikely(sctx->screen->b.debug_flags & DBG_NO_OPT_VARIANT))
+ if (unlikely(sctx->screen->b.debug_flags & DBG(NO_OPT_VARIANT)))
memset(&key->opt, 0, sizeof(key->opt));
}
}
/* Pre-compilation. */
- if (sscreen->b.debug_flags & DBG_PRECOMPILE &&
+ if (sscreen->b.debug_flags & DBG(PRECOMPILE) &&
/* GFX9 needs LS or ES for compilation, which we don't have here. */
(sscreen->b.chip_class <= VI ||
(sel->type != PIPE_SHADER_TESS_CTRL &&