2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include "si_public.h"
28 #include "si_shader_internal.h"
31 #include "ac_llvm_util.h"
32 #include "radeon/radeon_uvd.h"
33 #include "gallivm/lp_bld_misc.h"
34 #include "util/disk_cache.h"
35 #include "util/u_log.h"
36 #include "util/u_memory.h"
37 #include "util/u_suballoc.h"
38 #include "util/u_tests.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/xmlconfig.h"
41 #include "vl/vl_decoder.h"
42 #include "driver_ddebug/dd_util.h"
44 static const struct debug_named_value debug_options
[] = {
45 /* Shader logging options: */
46 { "vs", DBG(VS
), "Print vertex shaders" },
47 { "ps", DBG(PS
), "Print pixel shaders" },
48 { "gs", DBG(GS
), "Print geometry shaders" },
49 { "tcs", DBG(TCS
), "Print tessellation control shaders" },
50 { "tes", DBG(TES
), "Print tessellation evaluation shaders" },
51 { "cs", DBG(CS
), "Print compute shaders" },
52 { "noir", DBG(NO_IR
), "Don't print the LLVM IR"},
53 { "notgsi", DBG(NO_TGSI
), "Don't print the TGSI"},
54 { "noasm", DBG(NO_ASM
), "Don't print disassembled shaders"},
55 { "preoptir", DBG(PREOPT_IR
), "Print the LLVM IR before initial optimizations" },
57 /* Shader compiler options the shader cache should be aware of: */
58 { "unsafemath", DBG(UNSAFE_MATH
), "Enable unsafe math shader optimizations" },
59 { "sisched", DBG(SI_SCHED
), "Enable LLVM SI Machine Instruction Scheduler." },
60 { "gisel", DBG(GISEL
), "Enable LLVM global instruction selector." },
62 /* Shader compiler options (with no effect on the shader cache): */
63 { "checkir", DBG(CHECK_IR
), "Enable additional sanity checks on shader IR" },
64 { "nir", DBG(NIR
), "Enable experimental NIR shaders" },
65 { "mono", DBG(MONOLITHIC_SHADERS
), "Use old-style monolithic shaders compiled on demand" },
66 { "nooptvariant", DBG(NO_OPT_VARIANT
), "Disable compiling optimized shader variants." },
68 /* Information logging options: */
69 { "info", DBG(INFO
), "Print driver information" },
70 { "tex", DBG(TEX
), "Print texture info" },
71 { "compute", DBG(COMPUTE
), "Print compute info" },
72 { "vm", DBG(VM
), "Print virtual addresses when creating resources" },
75 { "forcedma", DBG(FORCE_DMA
), "Use asynchronous DMA for all operations when possible." },
76 { "nodma", DBG(NO_ASYNC_DMA
), "Disable asynchronous DMA" },
77 { "nowc", DBG(NO_WC
), "Disable GTT write combining" },
78 { "check_vm", DBG(CHECK_VM
), "Check VM faults and dump debug info." },
79 { "reserve_vmid", DBG(RESERVE_VMID
), "Force VMID reservation per context." },
80 { "zerovram", DBG(ZERO_VRAM
), "Clear VRAM allocations." },
82 /* 3D engine options: */
83 { "switch_on_eop", DBG(SWITCH_ON_EOP
), "Program WD/IA to switch on end-of-packet." },
84 { "nooutoforder", DBG(NO_OUT_OF_ORDER
), "Disable out-of-order rasterization" },
85 { "nodpbb", DBG(NO_DPBB
), "Disable DPBB." },
86 { "nodfsm", DBG(NO_DFSM
), "Disable DFSM." },
87 { "dpbb", DBG(DPBB
), "Enable DPBB." },
88 { "dfsm", DBG(DFSM
), "Enable DFSM." },
89 { "nohyperz", DBG(NO_HYPERZ
), "Disable Hyper-Z" },
90 { "norbplus", DBG(NO_RB_PLUS
), "Disable RB+." },
91 { "no2d", DBG(NO_2D_TILING
), "Disable 2D tiling" },
92 { "notiling", DBG(NO_TILING
), "Disable tiling" },
93 { "nodcc", DBG(NO_DCC
), "Disable DCC." },
94 { "nodccclear", DBG(NO_DCC_CLEAR
), "Disable DCC fast clear." },
95 { "nodccfb", DBG(NO_DCC_FB
), "Disable separate DCC on the main framebuffer" },
96 { "nodccmsaa", DBG(NO_DCC_MSAA
), "Disable DCC for MSAA" },
97 { "nofmask", DBG(NO_FMASK
), "Disable MSAA compression" },
100 { "testdma", DBG(TEST_DMA
), "Invoke SDMA tests and exit." },
101 { "testvmfaultcp", DBG(TEST_VMFAULT_CP
), "Invoke a CP VM fault test and exit." },
102 { "testvmfaultsdma", DBG(TEST_VMFAULT_SDMA
), "Invoke a SDMA VM fault test and exit." },
103 { "testvmfaultshader", DBG(TEST_VMFAULT_SHADER
), "Invoke a shader VM fault test and exit." },
104 { "testdmaperf", DBG(TEST_DMA_PERF
), "Test DMA performance" },
105 { "testgds", DBG(TEST_GDS
), "Test GDS." },
106 { "testgdsmm", DBG(TEST_GDS_MM
), "Test GDS memory management." },
107 { "testgdsoamm", DBG(TEST_GDS_OA_MM
), "Test GDS OA memory management." },
109 DEBUG_NAMED_VALUE_END
/* must be last */
112 static void si_init_compiler(struct si_screen
*sscreen
,
113 struct ac_llvm_compiler
*compiler
)
115 /* Only create the less-optimizing version of the compiler on APUs
116 * predating Ryzen (Raven). */
117 bool create_low_opt_compiler
= !sscreen
->info
.has_dedicated_vram
&&
118 sscreen
->info
.chip_class
<= VI
;
120 enum ac_target_machine_options tm_options
=
121 (sscreen
->debug_flags
& DBG(SI_SCHED
) ? AC_TM_SISCHED
: 0) |
122 (sscreen
->debug_flags
& DBG(GISEL
) ? AC_TM_ENABLE_GLOBAL_ISEL
: 0) |
123 (sscreen
->info
.chip_class
>= GFX9
? AC_TM_FORCE_ENABLE_XNACK
: 0) |
124 (sscreen
->info
.chip_class
< GFX9
? AC_TM_FORCE_DISABLE_XNACK
: 0) |
125 (!sscreen
->llvm_has_working_vgpr_indexing
? AC_TM_PROMOTE_ALLOCA_TO_SCRATCH
: 0) |
126 (sscreen
->debug_flags
& DBG(CHECK_IR
) ? AC_TM_CHECK_IR
: 0) |
127 (create_low_opt_compiler
? AC_TM_CREATE_LOW_OPT
: 0);
130 ac_init_llvm_compiler(compiler
, sscreen
->info
.family
, tm_options
);
131 compiler
->passes
= ac_create_llvm_passes(compiler
->tm
);
133 if (compiler
->low_opt_tm
)
134 compiler
->low_opt_passes
= ac_create_llvm_passes(compiler
->low_opt_tm
);
137 static void si_destroy_compiler(struct ac_llvm_compiler
*compiler
)
139 ac_destroy_llvm_passes(compiler
->passes
);
140 ac_destroy_llvm_passes(compiler
->low_opt_passes
);
141 ac_destroy_llvm_compiler(compiler
);
147 static void si_destroy_context(struct pipe_context
*context
)
149 struct si_context
*sctx
= (struct si_context
*)context
;
152 /* Unreference the framebuffer normally to disable related logic
155 struct pipe_framebuffer_state fb
= {};
156 if (context
->set_framebuffer_state
)
157 context
->set_framebuffer_state(context
, &fb
);
159 si_release_all_descriptors(sctx
);
161 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
162 pipe_resource_reference(&sctx
->gsvs_ring
, NULL
);
163 pipe_resource_reference(&sctx
->tess_rings
, NULL
);
164 pipe_resource_reference(&sctx
->null_const_buf
.buffer
, NULL
);
165 pipe_resource_reference(&sctx
->sample_pos_buffer
, NULL
);
166 r600_resource_reference(&sctx
->border_color_buffer
, NULL
);
167 free(sctx
->border_color_table
);
168 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
169 r600_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
170 r600_resource_reference(&sctx
->wait_mem_scratch
, NULL
);
172 si_pm4_free_state(sctx
, sctx
->init_config
, ~0);
173 if (sctx
->init_config_gs_rings
)
174 si_pm4_free_state(sctx
, sctx
->init_config_gs_rings
, ~0);
175 for (i
= 0; i
< ARRAY_SIZE(sctx
->vgt_shader_config
); i
++)
176 si_pm4_delete_state(sctx
, vgt_shader_config
, sctx
->vgt_shader_config
[i
]);
178 if (sctx
->fixed_func_tcs_shader
.cso
)
179 sctx
->b
.delete_tcs_state(&sctx
->b
, sctx
->fixed_func_tcs_shader
.cso
);
180 if (sctx
->custom_dsa_flush
)
181 sctx
->b
.delete_depth_stencil_alpha_state(&sctx
->b
, sctx
->custom_dsa_flush
);
182 if (sctx
->custom_blend_resolve
)
183 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_resolve
);
184 if (sctx
->custom_blend_fmask_decompress
)
185 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_fmask_decompress
);
186 if (sctx
->custom_blend_eliminate_fastclear
)
187 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_eliminate_fastclear
);
188 if (sctx
->custom_blend_dcc_decompress
)
189 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_dcc_decompress
);
190 if (sctx
->vs_blit_pos
)
191 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_pos
);
192 if (sctx
->vs_blit_pos_layered
)
193 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_pos_layered
);
194 if (sctx
->vs_blit_color
)
195 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_color
);
196 if (sctx
->vs_blit_color_layered
)
197 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_color_layered
);
198 if (sctx
->vs_blit_texcoord
)
199 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_texcoord
);
200 if (sctx
->cs_clear_buffer
)
201 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_clear_buffer
);
202 if (sctx
->cs_copy_buffer
)
203 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_copy_buffer
);
206 util_blitter_destroy(sctx
->blitter
);
208 /* Release DCC stats. */
209 for (int i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++) {
210 assert(!sctx
->dcc_stats
[i
].query_active
);
212 for (int j
= 0; j
< ARRAY_SIZE(sctx
->dcc_stats
[i
].ps_stats
); j
++)
213 if (sctx
->dcc_stats
[i
].ps_stats
[j
])
214 sctx
->b
.destroy_query(&sctx
->b
,
215 sctx
->dcc_stats
[i
].ps_stats
[j
]);
217 si_texture_reference(&sctx
->dcc_stats
[i
].tex
, NULL
);
220 if (sctx
->query_result_shader
)
221 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->query_result_shader
);
224 sctx
->ws
->cs_destroy(sctx
->gfx_cs
);
226 sctx
->ws
->cs_destroy(sctx
->dma_cs
);
228 sctx
->ws
->ctx_destroy(sctx
->ctx
);
230 if (sctx
->b
.stream_uploader
)
231 u_upload_destroy(sctx
->b
.stream_uploader
);
232 if (sctx
->b
.const_uploader
)
233 u_upload_destroy(sctx
->b
.const_uploader
);
234 if (sctx
->cached_gtt_allocator
)
235 u_upload_destroy(sctx
->cached_gtt_allocator
);
237 slab_destroy_child(&sctx
->pool_transfers
);
238 slab_destroy_child(&sctx
->pool_transfers_unsync
);
240 if (sctx
->allocator_zeroed_memory
)
241 u_suballocator_destroy(sctx
->allocator_zeroed_memory
);
243 sctx
->ws
->fence_reference(&sctx
->last_gfx_fence
, NULL
);
244 sctx
->ws
->fence_reference(&sctx
->last_sdma_fence
, NULL
);
245 r600_resource_reference(&sctx
->eop_bug_scratch
, NULL
);
247 si_destroy_compiler(&sctx
->compiler
);
249 si_saved_cs_reference(&sctx
->current_saved_cs
, NULL
);
251 _mesa_hash_table_destroy(sctx
->tex_handles
, NULL
);
252 _mesa_hash_table_destroy(sctx
->img_handles
, NULL
);
254 util_dynarray_fini(&sctx
->resident_tex_handles
);
255 util_dynarray_fini(&sctx
->resident_img_handles
);
256 util_dynarray_fini(&sctx
->resident_tex_needs_color_decompress
);
257 util_dynarray_fini(&sctx
->resident_img_needs_color_decompress
);
258 util_dynarray_fini(&sctx
->resident_tex_needs_depth_decompress
);
262 static enum pipe_reset_status
si_get_reset_status(struct pipe_context
*ctx
)
264 struct si_context
*sctx
= (struct si_context
*)ctx
;
266 if (sctx
->screen
->info
.has_gpu_reset_status_query
)
267 return sctx
->ws
->ctx_query_reset_status(sctx
->ctx
);
269 if (sctx
->screen
->info
.has_gpu_reset_counter_query
) {
270 unsigned latest
= sctx
->ws
->query_value(sctx
->ws
,
271 RADEON_GPU_RESET_COUNTER
);
273 if (sctx
->gpu_reset_counter
== latest
)
274 return PIPE_NO_RESET
;
276 sctx
->gpu_reset_counter
= latest
;
277 return PIPE_UNKNOWN_CONTEXT_RESET
;
280 return PIPE_NO_RESET
;
283 static void si_set_device_reset_callback(struct pipe_context
*ctx
,
284 const struct pipe_device_reset_callback
*cb
)
286 struct si_context
*sctx
= (struct si_context
*)ctx
;
289 sctx
->device_reset_callback
= *cb
;
291 memset(&sctx
->device_reset_callback
, 0,
292 sizeof(sctx
->device_reset_callback
));
295 bool si_check_device_reset(struct si_context
*sctx
)
297 enum pipe_reset_status status
;
299 if (!sctx
->device_reset_callback
.reset
)
302 if (!sctx
->b
.get_device_reset_status
)
305 status
= sctx
->b
.get_device_reset_status(&sctx
->b
);
306 if (status
== PIPE_NO_RESET
)
309 sctx
->device_reset_callback
.reset(sctx
->device_reset_callback
.data
, status
);
313 /* Apitrace profiling:
314 * 1) qapitrace : Tools -> Profile: Measure CPU & GPU times
315 * 2) In the middle panel, zoom in (mouse wheel) on some bad draw call
316 * and remember its number.
317 * 3) In Mesa, enable queries and performance counters around that draw
318 * call and print the results.
319 * 4) glretrace --benchmark --markers ..
321 static void si_emit_string_marker(struct pipe_context
*ctx
,
322 const char *string
, int len
)
324 struct si_context
*sctx
= (struct si_context
*)ctx
;
326 dd_parse_apitrace_marker(string
, len
, &sctx
->apitrace_call_number
);
329 u_log_printf(sctx
->log
, "\nString marker: %*s\n", len
, string
);
332 static void si_set_debug_callback(struct pipe_context
*ctx
,
333 const struct pipe_debug_callback
*cb
)
335 struct si_context
*sctx
= (struct si_context
*)ctx
;
336 struct si_screen
*screen
= sctx
->screen
;
338 util_queue_finish(&screen
->shader_compiler_queue
);
339 util_queue_finish(&screen
->shader_compiler_queue_low_priority
);
344 memset(&sctx
->debug
, 0, sizeof(sctx
->debug
));
347 static void si_set_log_context(struct pipe_context
*ctx
,
348 struct u_log_context
*log
)
350 struct si_context
*sctx
= (struct si_context
*)ctx
;
354 u_log_add_auto_logger(log
, si_auto_log_cs
, sctx
);
357 static void si_set_context_param(struct pipe_context
*ctx
,
358 enum pipe_context_param param
,
361 struct radeon_winsys
*ws
= ((struct si_context
*)ctx
)->ws
;
364 case PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE
:
365 ws
->pin_threads_to_L3_cache(ws
, value
);
371 static struct pipe_context
*si_create_context(struct pipe_screen
*screen
,
374 struct si_context
*sctx
= CALLOC_STRUCT(si_context
);
375 struct si_screen
* sscreen
= (struct si_screen
*)screen
;
376 struct radeon_winsys
*ws
= sscreen
->ws
;
378 bool stop_exec_on_failure
= (flags
& PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET
) != 0;
383 if (flags
& PIPE_CONTEXT_DEBUG
)
384 sscreen
->record_llvm_ir
= true; /* racy but not critical */
386 sctx
->b
.screen
= screen
; /* this must be set first */
388 sctx
->b
.destroy
= si_destroy_context
;
389 sctx
->b
.emit_string_marker
= si_emit_string_marker
;
390 sctx
->b
.set_debug_callback
= si_set_debug_callback
;
391 sctx
->b
.set_log_context
= si_set_log_context
;
392 sctx
->b
.set_context_param
= si_set_context_param
;
393 sctx
->screen
= sscreen
; /* Easy accessing of screen/winsys. */
394 sctx
->is_debug
= (flags
& PIPE_CONTEXT_DEBUG
) != 0;
396 slab_create_child(&sctx
->pool_transfers
, &sscreen
->pool_transfers
);
397 slab_create_child(&sctx
->pool_transfers_unsync
, &sscreen
->pool_transfers
);
399 sctx
->ws
= sscreen
->ws
;
400 sctx
->family
= sscreen
->info
.family
;
401 sctx
->chip_class
= sscreen
->info
.chip_class
;
403 if (sscreen
->info
.has_gpu_reset_counter_query
) {
404 sctx
->gpu_reset_counter
=
405 sctx
->ws
->query_value(sctx
->ws
, RADEON_GPU_RESET_COUNTER
);
408 sctx
->b
.get_device_reset_status
= si_get_reset_status
;
409 sctx
->b
.set_device_reset_callback
= si_set_device_reset_callback
;
411 si_init_context_texture_functions(sctx
);
412 si_init_query_functions(sctx
);
414 if (sctx
->chip_class
== CIK
||
415 sctx
->chip_class
== VI
||
416 sctx
->chip_class
== GFX9
) {
417 sctx
->eop_bug_scratch
= r600_resource(
418 pipe_buffer_create(&sscreen
->b
, 0, PIPE_USAGE_DEFAULT
,
419 16 * sscreen
->info
.num_render_backends
));
420 if (!sctx
->eop_bug_scratch
)
424 sctx
->allocator_zeroed_memory
=
425 u_suballocator_create(&sctx
->b
, sscreen
->info
.gart_page_size
,
426 0, PIPE_USAGE_DEFAULT
,
427 SI_RESOURCE_FLAG_SO_FILLED_SIZE
, true);
428 if (!sctx
->allocator_zeroed_memory
)
431 sctx
->b
.stream_uploader
= u_upload_create(&sctx
->b
, 1024 * 1024,
432 0, PIPE_USAGE_STREAM
,
433 SI_RESOURCE_FLAG_READ_ONLY
);
434 if (!sctx
->b
.stream_uploader
)
437 sctx
->b
.const_uploader
= u_upload_create(&sctx
->b
, 128 * 1024,
438 0, PIPE_USAGE_DEFAULT
,
439 SI_RESOURCE_FLAG_32BIT
|
440 (sscreen
->cpdma_prefetch_writes_memory
?
441 0 : SI_RESOURCE_FLAG_READ_ONLY
));
442 if (!sctx
->b
.const_uploader
)
445 sctx
->cached_gtt_allocator
= u_upload_create(&sctx
->b
, 16 * 1024,
446 0, PIPE_USAGE_STAGING
, 0);
447 if (!sctx
->cached_gtt_allocator
)
450 sctx
->ctx
= sctx
->ws
->ctx_create(sctx
->ws
);
454 if (sscreen
->info
.num_sdma_rings
&& !(sscreen
->debug_flags
& DBG(NO_ASYNC_DMA
))) {
455 sctx
->dma_cs
= sctx
->ws
->cs_create(sctx
->ctx
, RING_DMA
,
456 (void*)si_flush_dma_cs
,
457 sctx
, stop_exec_on_failure
);
460 si_init_buffer_functions(sctx
);
461 si_init_clear_functions(sctx
);
462 si_init_blit_functions(sctx
);
463 si_init_compute_functions(sctx
);
464 si_init_compute_blit_functions(sctx
);
465 si_init_debug_functions(sctx
);
466 si_init_msaa_functions(sctx
);
467 si_init_streamout_functions(sctx
);
469 if (sscreen
->info
.has_hw_decode
) {
470 sctx
->b
.create_video_codec
= si_uvd_create_decoder
;
471 sctx
->b
.create_video_buffer
= si_video_buffer_create
;
473 sctx
->b
.create_video_codec
= vl_create_decoder
;
474 sctx
->b
.create_video_buffer
= vl_video_buffer_create
;
477 sctx
->gfx_cs
= ws
->cs_create(sctx
->ctx
, RING_GFX
,
478 (void*)si_flush_gfx_cs
, sctx
, stop_exec_on_failure
);
481 sctx
->border_color_table
= malloc(SI_MAX_BORDER_COLORS
*
482 sizeof(*sctx
->border_color_table
));
483 if (!sctx
->border_color_table
)
486 sctx
->border_color_buffer
= r600_resource(
487 pipe_buffer_create(screen
, 0, PIPE_USAGE_DEFAULT
,
488 SI_MAX_BORDER_COLORS
*
489 sizeof(*sctx
->border_color_table
)));
490 if (!sctx
->border_color_buffer
)
493 sctx
->border_color_map
=
494 ws
->buffer_map(sctx
->border_color_buffer
->buf
,
495 NULL
, PIPE_TRANSFER_WRITE
);
496 if (!sctx
->border_color_map
)
499 si_init_all_descriptors(sctx
);
500 si_init_fence_functions(sctx
);
501 si_init_state_functions(sctx
);
502 si_init_shader_functions(sctx
);
503 si_init_viewport_functions(sctx
);
505 if (sctx
->chip_class
>= CIK
)
506 cik_init_sdma_functions(sctx
);
508 si_init_dma_functions(sctx
);
510 if (sscreen
->debug_flags
& DBG(FORCE_DMA
))
511 sctx
->b
.resource_copy_region
= sctx
->dma_copy
;
513 sctx
->blitter
= util_blitter_create(&sctx
->b
);
514 if (sctx
->blitter
== NULL
)
516 sctx
->blitter
->skip_viewport_restore
= true;
518 si_init_draw_functions(sctx
);
520 sctx
->sample_mask
= 0xffff;
522 if (sctx
->chip_class
>= GFX9
) {
523 sctx
->wait_mem_scratch
= r600_resource(
524 pipe_buffer_create(screen
, 0, PIPE_USAGE_DEFAULT
, 4));
525 if (!sctx
->wait_mem_scratch
)
528 /* Initialize the memory. */
529 si_cp_write_data(sctx
, sctx
->wait_mem_scratch
, 0, 4,
530 V_370_MEM
, V_370_ME
, &sctx
->wait_mem_number
);
533 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
534 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
535 if (sctx
->chip_class
== CIK
) {
536 sctx
->null_const_buf
.buffer
=
537 pipe_aligned_buffer_create(screen
,
538 SI_RESOURCE_FLAG_32BIT
,
539 PIPE_USAGE_DEFAULT
, 16,
540 sctx
->screen
->info
.tcc_cache_line_size
);
541 if (!sctx
->null_const_buf
.buffer
)
543 sctx
->null_const_buf
.buffer_size
= sctx
->null_const_buf
.buffer
->width0
;
545 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
546 for (i
= 0; i
< SI_NUM_CONST_BUFFERS
; i
++) {
547 sctx
->b
.set_constant_buffer(&sctx
->b
, shader
, i
,
548 &sctx
->null_const_buf
);
552 si_set_rw_buffer(sctx
, SI_HS_CONST_DEFAULT_TESS_LEVELS
,
553 &sctx
->null_const_buf
);
554 si_set_rw_buffer(sctx
, SI_VS_CONST_INSTANCE_DIVISORS
,
555 &sctx
->null_const_buf
);
556 si_set_rw_buffer(sctx
, SI_VS_CONST_CLIP_PLANES
,
557 &sctx
->null_const_buf
);
558 si_set_rw_buffer(sctx
, SI_PS_CONST_POLY_STIPPLE
,
559 &sctx
->null_const_buf
);
560 si_set_rw_buffer(sctx
, SI_PS_CONST_SAMPLE_POSITIONS
,
561 &sctx
->null_const_buf
);
564 uint64_t max_threads_per_block
;
565 screen
->get_compute_param(screen
, PIPE_SHADER_IR_TGSI
,
566 PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
,
567 &max_threads_per_block
);
569 /* The maximum number of scratch waves. Scratch space isn't divided
570 * evenly between CUs. The number is only a function of the number of CUs.
571 * We can decrease the constant to decrease the scratch buffer size.
573 * sctx->scratch_waves must be >= the maximum posible size of
574 * 1 threadgroup, so that the hw doesn't hang from being unable
577 * The recommended value is 4 per CU at most. Higher numbers don't
578 * bring much benefit, but they still occupy chip resources (think
579 * async compute). I've seen ~2% performance difference between 4 and 32.
581 sctx
->scratch_waves
= MAX2(32 * sscreen
->info
.num_good_compute_units
,
582 max_threads_per_block
/ 64);
584 si_init_compiler(sscreen
, &sctx
->compiler
);
586 /* Bindless handles. */
587 sctx
->tex_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
588 _mesa_key_pointer_equal
);
589 sctx
->img_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
590 _mesa_key_pointer_equal
);
592 util_dynarray_init(&sctx
->resident_tex_handles
, NULL
);
593 util_dynarray_init(&sctx
->resident_img_handles
, NULL
);
594 util_dynarray_init(&sctx
->resident_tex_needs_color_decompress
, NULL
);
595 util_dynarray_init(&sctx
->resident_img_needs_color_decompress
, NULL
);
596 util_dynarray_init(&sctx
->resident_tex_needs_depth_decompress
, NULL
);
598 sctx
->sample_pos_buffer
=
599 pipe_buffer_create(sctx
->b
.screen
, 0, PIPE_USAGE_DEFAULT
,
600 sizeof(sctx
->sample_positions
));
601 pipe_buffer_write(&sctx
->b
, sctx
->sample_pos_buffer
, 0,
602 sizeof(sctx
->sample_positions
), &sctx
->sample_positions
);
604 /* this must be last */
605 si_begin_new_gfx_cs(sctx
);
607 if (sctx
->chip_class
== CIK
) {
608 /* Clear the NULL constant buffer, because loads should return zeros. */
609 uint32_t clear_value
= 0;
610 si_clear_buffer(sctx
, sctx
->null_const_buf
.buffer
, 0,
611 sctx
->null_const_buf
.buffer
->width0
,
612 &clear_value
, 4, SI_COHERENCY_SHADER
);
616 fprintf(stderr
, "radeonsi: Failed to create a context.\n");
617 si_destroy_context(&sctx
->b
);
621 static struct pipe_context
*si_pipe_create_context(struct pipe_screen
*screen
,
622 void *priv
, unsigned flags
)
624 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
625 struct pipe_context
*ctx
;
627 if (sscreen
->debug_flags
& DBG(CHECK_VM
))
628 flags
|= PIPE_CONTEXT_DEBUG
;
630 ctx
= si_create_context(screen
, flags
);
632 if (!(flags
& PIPE_CONTEXT_PREFER_THREADED
))
635 /* Clover (compute-only) is unsupported. */
636 if (flags
& PIPE_CONTEXT_COMPUTE_ONLY
)
639 /* When shaders are logged to stderr, asynchronous compilation is
641 if (sscreen
->debug_flags
& DBG_ALL_SHADERS
)
644 /* Use asynchronous flushes only on amdgpu, since the radeon
645 * implementation for fence_server_sync is incomplete. */
646 return threaded_context_create(ctx
, &sscreen
->pool_transfers
,
647 si_replace_buffer_storage
,
648 sscreen
->info
.drm_major
>= 3 ? si_create_fence
: NULL
,
649 &((struct si_context
*)ctx
)->tc
);
655 static void si_destroy_screen(struct pipe_screen
* pscreen
)
657 struct si_screen
*sscreen
= (struct si_screen
*)pscreen
;
658 struct si_shader_part
*parts
[] = {
660 sscreen
->tcs_epilogs
,
667 if (!sscreen
->ws
->unref(sscreen
->ws
))
670 util_queue_destroy(&sscreen
->shader_compiler_queue
);
671 util_queue_destroy(&sscreen
->shader_compiler_queue_low_priority
);
673 for (i
= 0; i
< ARRAY_SIZE(sscreen
->compiler
); i
++)
674 si_destroy_compiler(&sscreen
->compiler
[i
]);
676 for (i
= 0; i
< ARRAY_SIZE(sscreen
->compiler_lowp
); i
++)
677 si_destroy_compiler(&sscreen
->compiler_lowp
[i
]);
679 /* Free shader parts. */
680 for (i
= 0; i
< ARRAY_SIZE(parts
); i
++) {
682 struct si_shader_part
*part
= parts
[i
];
684 parts
[i
] = part
->next
;
685 ac_shader_binary_clean(&part
->binary
);
689 mtx_destroy(&sscreen
->shader_parts_mutex
);
690 si_destroy_shader_cache(sscreen
);
692 si_destroy_perfcounters(sscreen
);
693 si_gpu_load_kill_thread(sscreen
);
695 mtx_destroy(&sscreen
->gpu_load_mutex
);
696 mtx_destroy(&sscreen
->aux_context_lock
);
697 sscreen
->aux_context
->destroy(sscreen
->aux_context
);
699 slab_destroy_parent(&sscreen
->pool_transfers
);
701 disk_cache_destroy(sscreen
->disk_shader_cache
);
702 sscreen
->ws
->destroy(sscreen
->ws
);
706 static void si_init_gs_info(struct si_screen
*sscreen
)
708 sscreen
->gs_table_depth
= ac_get_gs_table_depth(sscreen
->info
.chip_class
,
709 sscreen
->info
.family
);
712 static void si_test_vmfault(struct si_screen
*sscreen
)
714 struct pipe_context
*ctx
= sscreen
->aux_context
;
715 struct si_context
*sctx
= (struct si_context
*)ctx
;
716 struct pipe_resource
*buf
=
717 pipe_buffer_create_const0(&sscreen
->b
, 0, PIPE_USAGE_DEFAULT
, 64);
720 puts("Buffer allocation failed.");
724 r600_resource(buf
)->gpu_address
= 0; /* cause a VM fault */
726 if (sscreen
->debug_flags
& DBG(TEST_VMFAULT_CP
)) {
727 si_cp_dma_copy_buffer(sctx
, buf
, buf
, 0, 4, 4, 0,
728 SI_COHERENCY_NONE
, L2_BYPASS
);
729 ctx
->flush(ctx
, NULL
, 0);
730 puts("VM fault test: CP - done.");
732 if (sscreen
->debug_flags
& DBG(TEST_VMFAULT_SDMA
)) {
733 si_sdma_clear_buffer(sctx
, buf
, 0, 4, 0);
734 ctx
->flush(ctx
, NULL
, 0);
735 puts("VM fault test: SDMA - done.");
737 if (sscreen
->debug_flags
& DBG(TEST_VMFAULT_SHADER
)) {
738 util_test_constant_buffer(ctx
, buf
);
739 puts("VM fault test: Shader - done.");
744 static void si_test_gds_memory_management(struct si_context
*sctx
,
745 unsigned alloc_size
, unsigned alignment
,
746 enum radeon_bo_domain domain
)
748 struct radeon_winsys
*ws
= sctx
->ws
;
749 struct radeon_cmdbuf
*cs
[8];
750 struct pb_buffer
*gds_bo
[ARRAY_SIZE(cs
)];
752 for (unsigned i
= 0; i
< ARRAY_SIZE(cs
); i
++) {
753 cs
[i
] = ws
->cs_create(sctx
->ctx
, RING_COMPUTE
,
755 gds_bo
[i
] = ws
->buffer_create(ws
, alloc_size
, alignment
, domain
, 0);
759 for (unsigned iterations
= 0; iterations
< 20000; iterations
++) {
760 for (unsigned i
= 0; i
< ARRAY_SIZE(cs
); i
++) {
761 /* This clears GDS with CP DMA.
763 * We don't care if GDS is present. Just add some packet
764 * to make the GPU busy for a moment.
766 si_cp_dma_clear_buffer(sctx
, cs
[i
], NULL
, 0, alloc_size
, 0,
767 SI_CPDMA_SKIP_BO_LIST_UPDATE
|
768 SI_CPDMA_SKIP_CHECK_CS_SPACE
|
769 SI_CPDMA_SKIP_GFX_SYNC
, 0, 0);
771 ws
->cs_add_buffer(cs
[i
], gds_bo
[i
], domain
,
772 RADEON_USAGE_READWRITE
, 0);
773 ws
->cs_flush(cs
[i
], PIPE_FLUSH_ASYNC
, NULL
);
779 static void si_disk_cache_create(struct si_screen
*sscreen
)
781 /* Don't use the cache if shader dumping is enabled. */
782 if (sscreen
->debug_flags
& DBG_ALL_SHADERS
)
785 struct mesa_sha1 ctx
;
786 unsigned char sha1
[20];
787 char cache_id
[20 * 2 + 1];
789 _mesa_sha1_init(&ctx
);
791 if (!disk_cache_get_function_identifier(si_disk_cache_create
, &ctx
) ||
792 !disk_cache_get_function_identifier(LLVMInitializeAMDGPUTargetInfo
,
796 _mesa_sha1_final(&ctx
, sha1
);
797 disk_cache_format_hex_id(cache_id
, sha1
, 20 * 2);
799 /* These flags affect shader compilation. */
800 #define ALL_FLAGS (DBG(FS_CORRECT_DERIVS_AFTER_KILL) | \
805 uint64_t shader_debug_flags
= sscreen
->debug_flags
&
808 /* Add the high bits of 32-bit addresses, which affects
809 * how 32-bit addresses are expanded to 64 bits.
811 STATIC_ASSERT(ALL_FLAGS
<= UINT_MAX
);
812 shader_debug_flags
|= (uint64_t)sscreen
->info
.address32_hi
<< 32;
814 sscreen
->disk_shader_cache
=
815 disk_cache_create(sscreen
->info
.name
,
820 struct pipe_screen
*radeonsi_screen_create(struct radeon_winsys
*ws
,
821 const struct pipe_screen_config
*config
)
823 struct si_screen
*sscreen
= CALLOC_STRUCT(si_screen
);
824 unsigned hw_threads
, num_comp_hi_threads
, num_comp_lo_threads
, i
;
831 ws
->query_info(ws
, &sscreen
->info
);
833 if (sscreen
->info
.chip_class
>= GFX9
) {
834 sscreen
->se_tile_repeat
= 32 * sscreen
->info
.max_se
;
836 ac_get_raster_config(&sscreen
->info
,
837 &sscreen
->pa_sc_raster_config
,
838 &sscreen
->pa_sc_raster_config_1
,
839 &sscreen
->se_tile_repeat
);
842 sscreen
->debug_flags
= debug_get_flags_option("R600_DEBUG",
845 /* Set functions first. */
846 sscreen
->b
.context_create
= si_pipe_create_context
;
847 sscreen
->b
.destroy
= si_destroy_screen
;
849 si_init_screen_get_functions(sscreen
);
850 si_init_screen_buffer_functions(sscreen
);
851 si_init_screen_fence_functions(sscreen
);
852 si_init_screen_state_functions(sscreen
);
853 si_init_screen_texture_functions(sscreen
);
854 si_init_screen_query_functions(sscreen
);
856 /* Set these flags in debug_flags early, so that the shader cache takes
859 if (driQueryOptionb(config
->options
,
860 "glsl_correct_derivatives_after_discard"))
861 sscreen
->debug_flags
|= DBG(FS_CORRECT_DERIVS_AFTER_KILL
);
862 if (driQueryOptionb(config
->options
, "radeonsi_enable_sisched"))
863 sscreen
->debug_flags
|= DBG(SI_SCHED
);
866 if (sscreen
->debug_flags
& DBG(INFO
))
867 ac_print_gpu_info(&sscreen
->info
);
869 slab_create_parent(&sscreen
->pool_transfers
,
870 sizeof(struct si_transfer
), 64);
872 sscreen
->force_aniso
= MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
873 if (sscreen
->force_aniso
>= 0) {
874 printf("radeonsi: Forcing anisotropy filter to %ix\n",
875 /* round down to a power of two */
876 1 << util_logbase2(sscreen
->force_aniso
));
879 (void) mtx_init(&sscreen
->aux_context_lock
, mtx_plain
);
880 (void) mtx_init(&sscreen
->gpu_load_mutex
, mtx_plain
);
882 si_init_gs_info(sscreen
);
883 if (!si_init_shader_cache(sscreen
)) {
888 si_disk_cache_create(sscreen
);
890 /* Determine the number of shader compiler threads. */
891 hw_threads
= sysconf(_SC_NPROCESSORS_ONLN
);
893 if (hw_threads
>= 12) {
894 num_comp_hi_threads
= hw_threads
* 3 / 4;
895 num_comp_lo_threads
= hw_threads
/ 3;
896 } else if (hw_threads
>= 6) {
897 num_comp_hi_threads
= hw_threads
- 2;
898 num_comp_lo_threads
= hw_threads
/ 2;
899 } else if (hw_threads
>= 2) {
900 num_comp_hi_threads
= hw_threads
- 1;
901 num_comp_lo_threads
= hw_threads
/ 2;
903 num_comp_hi_threads
= 1;
904 num_comp_lo_threads
= 1;
907 num_comp_hi_threads
= MIN2(num_comp_hi_threads
,
908 ARRAY_SIZE(sscreen
->compiler
));
909 num_comp_lo_threads
= MIN2(num_comp_lo_threads
,
910 ARRAY_SIZE(sscreen
->compiler_lowp
));
912 if (!util_queue_init(&sscreen
->shader_compiler_queue
, "sh",
913 64, num_comp_hi_threads
,
914 UTIL_QUEUE_INIT_RESIZE_IF_FULL
|
915 UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY
)) {
916 si_destroy_shader_cache(sscreen
);
921 if (!util_queue_init(&sscreen
->shader_compiler_queue_low_priority
,
923 64, num_comp_lo_threads
,
924 UTIL_QUEUE_INIT_RESIZE_IF_FULL
|
925 UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY
|
926 UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
)) {
927 si_destroy_shader_cache(sscreen
);
932 if (!debug_get_bool_option("RADEON_DISABLE_PERFCOUNTERS", false))
933 si_init_perfcounters(sscreen
);
935 /* Determine tessellation ring info. */
936 bool double_offchip_buffers
= sscreen
->info
.chip_class
>= CIK
&&
937 sscreen
->info
.family
!= CHIP_CARRIZO
&&
938 sscreen
->info
.family
!= CHIP_STONEY
;
939 /* This must be one less than the maximum number due to a hw limitation.
940 * Various hardware bugs in SI, CIK, and GFX9 need this.
942 unsigned max_offchip_buffers_per_se
;
944 /* Only certain chips can use the maximum value. */
945 if (sscreen
->info
.family
== CHIP_VEGA12
||
946 sscreen
->info
.family
== CHIP_VEGA20
)
947 max_offchip_buffers_per_se
= double_offchip_buffers
? 128 : 64;
949 max_offchip_buffers_per_se
= double_offchip_buffers
? 127 : 63;
951 unsigned max_offchip_buffers
= max_offchip_buffers_per_se
*
952 sscreen
->info
.max_se
;
953 unsigned offchip_granularity
;
955 /* Hawaii has a bug with offchip buffers > 256 that can be worked
956 * around by setting 4K granularity.
958 if (sscreen
->info
.family
== CHIP_HAWAII
) {
959 sscreen
->tess_offchip_block_dw_size
= 4096;
960 offchip_granularity
= V_03093C_X_4K_DWORDS
;
962 sscreen
->tess_offchip_block_dw_size
= 8192;
963 offchip_granularity
= V_03093C_X_8K_DWORDS
;
966 sscreen
->tess_factor_ring_size
= 32768 * sscreen
->info
.max_se
;
967 assert(((sscreen
->tess_factor_ring_size
/ 4) & C_030938_SIZE
) == 0);
968 sscreen
->tess_offchip_ring_size
= max_offchip_buffers
*
969 sscreen
->tess_offchip_block_dw_size
* 4;
971 if (sscreen
->info
.chip_class
>= CIK
) {
972 if (sscreen
->info
.chip_class
>= VI
)
973 --max_offchip_buffers
;
974 sscreen
->vgt_hs_offchip_param
=
975 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers
) |
976 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity
);
978 assert(offchip_granularity
== V_03093C_X_8K_DWORDS
);
979 sscreen
->vgt_hs_offchip_param
=
980 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers
);
983 /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
984 * on SI. Some CLEAR_STATE cause asic hang on radeon kernel, etc.
985 * SPI_VS_OUT_CONFIG. So only enable CI CLEAR_STATE on amdgpu kernel.*/
986 sscreen
->has_clear_state
= sscreen
->info
.chip_class
>= CIK
&&
987 sscreen
->info
.drm_major
== 3;
989 sscreen
->has_distributed_tess
=
990 sscreen
->info
.chip_class
>= VI
&&
991 sscreen
->info
.max_se
>= 2;
993 sscreen
->has_draw_indirect_multi
=
994 (sscreen
->info
.family
>= CHIP_POLARIS10
) ||
995 (sscreen
->info
.chip_class
== VI
&&
996 sscreen
->info
.pfp_fw_version
>= 121 &&
997 sscreen
->info
.me_fw_version
>= 87) ||
998 (sscreen
->info
.chip_class
== CIK
&&
999 sscreen
->info
.pfp_fw_version
>= 211 &&
1000 sscreen
->info
.me_fw_version
>= 173) ||
1001 (sscreen
->info
.chip_class
== SI
&&
1002 sscreen
->info
.pfp_fw_version
>= 79 &&
1003 sscreen
->info
.me_fw_version
>= 142);
1005 sscreen
->has_out_of_order_rast
= sscreen
->info
.chip_class
>= VI
&&
1006 sscreen
->info
.max_se
>= 2 &&
1007 !(sscreen
->debug_flags
& DBG(NO_OUT_OF_ORDER
));
1008 sscreen
->assume_no_z_fights
=
1009 driQueryOptionb(config
->options
, "radeonsi_assume_no_z_fights");
1010 sscreen
->commutative_blend_add
=
1011 driQueryOptionb(config
->options
, "radeonsi_commutative_blend_add");
1012 sscreen
->clear_db_cache_before_clear
=
1013 driQueryOptionb(config
->options
, "radeonsi_clear_db_cache_before_clear");
1014 sscreen
->has_msaa_sample_loc_bug
= (sscreen
->info
.family
>= CHIP_POLARIS10
&&
1015 sscreen
->info
.family
<= CHIP_POLARIS12
) ||
1016 sscreen
->info
.family
== CHIP_VEGA10
||
1017 sscreen
->info
.family
== CHIP_RAVEN
;
1018 sscreen
->has_ls_vgpr_init_bug
= sscreen
->info
.family
== CHIP_VEGA10
||
1019 sscreen
->info
.family
== CHIP_RAVEN
;
1020 sscreen
->has_dcc_constant_encode
= sscreen
->info
.family
== CHIP_RAVEN2
;
1022 /* Only enable primitive binning on APUs by default. */
1023 sscreen
->dpbb_allowed
= sscreen
->info
.family
== CHIP_RAVEN
||
1024 sscreen
->info
.family
== CHIP_RAVEN2
;
1026 sscreen
->dfsm_allowed
= sscreen
->info
.family
== CHIP_RAVEN
||
1027 sscreen
->info
.family
== CHIP_RAVEN2
;
1029 /* Process DPBB enable flags. */
1030 if (sscreen
->debug_flags
& DBG(DPBB
)) {
1031 sscreen
->dpbb_allowed
= true;
1032 if (sscreen
->debug_flags
& DBG(DFSM
))
1033 sscreen
->dfsm_allowed
= true;
1036 /* Process DPBB disable flags. */
1037 if (sscreen
->debug_flags
& DBG(NO_DPBB
)) {
1038 sscreen
->dpbb_allowed
= false;
1039 sscreen
->dfsm_allowed
= false;
1040 } else if (sscreen
->debug_flags
& DBG(NO_DFSM
)) {
1041 sscreen
->dfsm_allowed
= false;
1044 /* While it would be nice not to have this flag, we are constrained
1045 * by the reality that LLVM 5.0 doesn't have working VGPR indexing
1048 sscreen
->llvm_has_working_vgpr_indexing
= sscreen
->info
.chip_class
<= VI
;
1050 /* Some chips have RB+ registers, but don't support RB+. Those must
1051 * always disable it.
1053 if (sscreen
->info
.family
== CHIP_STONEY
||
1054 sscreen
->info
.chip_class
>= GFX9
) {
1055 sscreen
->has_rbplus
= true;
1057 sscreen
->rbplus_allowed
=
1058 !(sscreen
->debug_flags
& DBG(NO_RB_PLUS
)) &&
1059 (sscreen
->info
.family
== CHIP_STONEY
||
1060 sscreen
->info
.family
== CHIP_VEGA12
||
1061 sscreen
->info
.family
== CHIP_RAVEN
||
1062 sscreen
->info
.family
== CHIP_RAVEN2
);
1065 sscreen
->dcc_msaa_allowed
=
1066 !(sscreen
->debug_flags
& DBG(NO_DCC_MSAA
));
1068 sscreen
->cpdma_prefetch_writes_memory
= sscreen
->info
.chip_class
<= VI
;
1070 (void) mtx_init(&sscreen
->shader_parts_mutex
, mtx_plain
);
1071 sscreen
->use_monolithic_shaders
=
1072 (sscreen
->debug_flags
& DBG(MONOLITHIC_SHADERS
)) != 0;
1074 sscreen
->barrier_flags
.cp_to_L2
= SI_CONTEXT_INV_SMEM_L1
|
1075 SI_CONTEXT_INV_VMEM_L1
;
1076 if (sscreen
->info
.chip_class
<= VI
) {
1077 sscreen
->barrier_flags
.cp_to_L2
|= SI_CONTEXT_INV_GLOBAL_L2
;
1078 sscreen
->barrier_flags
.L2_to_cp
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
1081 if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
1082 sscreen
->debug_flags
|= DBG_ALL_SHADERS
;
1089 * That means 8 coverage samples, 4 Z/S samples, and 2 color samples.
1091 * s >= z >= c (ignoring this only wastes memory)
1096 * Only MSAA color and depth buffers are overriden.
1098 if (sscreen
->info
.has_eqaa_surface_allocator
) {
1099 const char *eqaa
= debug_get_option("EQAA", NULL
);
1102 if (eqaa
&& sscanf(eqaa
, "%u,%u,%u", &s
, &z
, &f
) == 3 && s
&& z
&& f
) {
1103 sscreen
->eqaa_force_coverage_samples
= s
;
1104 sscreen
->eqaa_force_z_samples
= z
;
1105 sscreen
->eqaa_force_color_samples
= f
;
1109 for (i
= 0; i
< num_comp_hi_threads
; i
++)
1110 si_init_compiler(sscreen
, &sscreen
->compiler
[i
]);
1111 for (i
= 0; i
< num_comp_lo_threads
; i
++)
1112 si_init_compiler(sscreen
, &sscreen
->compiler_lowp
[i
]);
1114 /* Create the auxiliary context. This must be done last. */
1115 sscreen
->aux_context
= si_create_context(&sscreen
->b
, 0);
1117 if (sscreen
->debug_flags
& DBG(TEST_DMA
))
1118 si_test_dma(sscreen
);
1120 if (sscreen
->debug_flags
& DBG(TEST_DMA_PERF
)) {
1121 si_test_dma_perf(sscreen
);
1124 if (sscreen
->debug_flags
& (DBG(TEST_VMFAULT_CP
) |
1125 DBG(TEST_VMFAULT_SDMA
) |
1126 DBG(TEST_VMFAULT_SHADER
)))
1127 si_test_vmfault(sscreen
);
1129 if (sscreen
->debug_flags
& DBG(TEST_GDS
))
1130 si_test_gds((struct si_context
*)sscreen
->aux_context
);
1132 if (sscreen
->debug_flags
& DBG(TEST_GDS_MM
)) {
1133 si_test_gds_memory_management((struct si_context
*)sscreen
->aux_context
,
1134 32 * 1024, 4, RADEON_DOMAIN_GDS
);
1136 if (sscreen
->debug_flags
& DBG(TEST_GDS_OA_MM
)) {
1137 si_test_gds_memory_management((struct si_context
*)sscreen
->aux_context
,
1138 4, 1, RADEON_DOMAIN_OA
);