2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
28 #include "driver_ddebug/dd_util.h"
29 #include "gallium/winsys/amdgpu/drm/amdgpu_public.h"
30 #include "gallium/winsys/radeon/drm/radeon_drm_public.h"
31 #include "radeon/radeon_uvd.h"
32 #include "si_compute.h"
33 #include "si_public.h"
34 #include "si_shader_internal.h"
36 #include "ac_shadowed_regs.h"
37 #include "util/disk_cache.h"
38 #include "util/u_log.h"
39 #include "util/u_memory.h"
40 #include "util/u_suballoc.h"
41 #include "util/u_tests.h"
42 #include "util/u_upload_mgr.h"
43 #include "util/xmlconfig.h"
44 #include "vl/vl_decoder.h"
48 static struct pipe_context
*si_create_context(struct pipe_screen
*screen
, unsigned flags
);
50 static const struct debug_named_value debug_options
[] = {
51 /* Shader logging options: */
52 {"vs", DBG(VS
), "Print vertex shaders"},
53 {"ps", DBG(PS
), "Print pixel shaders"},
54 {"gs", DBG(GS
), "Print geometry shaders"},
55 {"tcs", DBG(TCS
), "Print tessellation control shaders"},
56 {"tes", DBG(TES
), "Print tessellation evaluation shaders"},
57 {"cs", DBG(CS
), "Print compute shaders"},
58 {"noir", DBG(NO_IR
), "Don't print the LLVM IR"},
59 {"nonir", DBG(NO_NIR
), "Don't print NIR when printing shaders"},
60 {"noasm", DBG(NO_ASM
), "Don't print disassembled shaders"},
61 {"preoptir", DBG(PREOPT_IR
), "Print the LLVM IR before initial optimizations"},
63 /* Shader compiler options the shader cache should be aware of: */
64 {"gisel", DBG(GISEL
), "Enable LLVM global instruction selector."},
65 {"w32ge", DBG(W32_GE
), "Use Wave32 for vertex, tessellation, and geometry shaders."},
66 {"w32ps", DBG(W32_PS
), "Use Wave32 for pixel shaders."},
67 {"w32cs", DBG(W32_CS
), "Use Wave32 for computes shaders."},
68 {"w64ge", DBG(W64_GE
), "Use Wave64 for vertex, tessellation, and geometry shaders."},
69 {"w64ps", DBG(W64_PS
), "Use Wave64 for pixel shaders."},
70 {"w64cs", DBG(W64_CS
), "Use Wave64 for computes shaders."},
71 {"noinfinterp", DBG(KILL_PS_INF_INTERP
), "Kill PS with infinite interp coeff"},
73 /* Shader compiler options (with no effect on the shader cache): */
74 {"checkir", DBG(CHECK_IR
), "Enable additional sanity checks on shader IR"},
75 {"mono", DBG(MONOLITHIC_SHADERS
), "Use old-style monolithic shaders compiled on demand"},
76 {"nooptvariant", DBG(NO_OPT_VARIANT
), "Disable compiling optimized shader variants."},
78 /* Information logging options: */
79 {"info", DBG(INFO
), "Print driver information"},
80 {"tex", DBG(TEX
), "Print texture info"},
81 {"compute", DBG(COMPUTE
), "Print compute info"},
82 {"vm", DBG(VM
), "Print virtual addresses when creating resources"},
83 {"cache_stats", DBG(CACHE_STATS
), "Print shader cache statistics."},
86 {"forcedma", DBG(FORCE_SDMA
), "Use SDMA for all operations when possible."},
87 {"nodma", DBG(NO_SDMA
), "Disable SDMA"},
88 {"nodmaclear", DBG(NO_SDMA_CLEARS
), "Disable SDMA clears"},
89 {"nodmacopyimage", DBG(NO_SDMA_COPY_IMAGE
), "Disable SDMA image copies"},
90 {"nowc", DBG(NO_WC
), "Disable GTT write combining"},
91 {"check_vm", DBG(CHECK_VM
), "Check VM faults and dump debug info."},
92 {"reserve_vmid", DBG(RESERVE_VMID
), "Force VMID reservation per context."},
93 {"zerovram", DBG(ZERO_VRAM
), "Clear VRAM allocations."},
94 {"shadowregs", DBG(SHADOW_REGS
), "Enable CP register shadowing."},
96 /* 3D engine options: */
97 {"nogfx", DBG(NO_GFX
), "Disable graphics. Only multimedia compute paths can be used."},
98 {"nongg", DBG(NO_NGG
), "Disable NGG and use the legacy pipeline."},
99 {"nggc", DBG(ALWAYS_NGG_CULLING_ALL
), "Always use NGG culling even when it can hurt."},
100 {"nggctess", DBG(ALWAYS_NGG_CULLING_TESS
), "Always use NGG culling for tessellation."},
101 {"nonggc", DBG(NO_NGG_CULLING
), "Disable NGG culling."},
102 {"alwayspd", DBG(ALWAYS_PD
), "Always enable the primitive discard compute shader."},
103 {"pd", DBG(PD
), "Enable the primitive discard compute shader for large draw calls."},
104 {"nopd", DBG(NO_PD
), "Disable the primitive discard compute shader."},
105 {"switch_on_eop", DBG(SWITCH_ON_EOP
), "Program WD/IA to switch on end-of-packet."},
106 {"nooutoforder", DBG(NO_OUT_OF_ORDER
), "Disable out-of-order rasterization"},
107 {"nodpbb", DBG(NO_DPBB
), "Disable DPBB."},
108 {"nodfsm", DBG(NO_DFSM
), "Disable DFSM."},
109 {"dpbb", DBG(DPBB
), "Enable DPBB."},
110 {"dfsm", DBG(DFSM
), "Enable DFSM."},
111 {"nohyperz", DBG(NO_HYPERZ
), "Disable Hyper-Z"},
112 {"norbplus", DBG(NO_RB_PLUS
), "Disable RB+."},
113 {"no2d", DBG(NO_2D_TILING
), "Disable 2D tiling"},
114 {"notiling", DBG(NO_TILING
), "Disable tiling"},
115 {"nodcc", DBG(NO_DCC
), "Disable DCC."},
116 {"nodccclear", DBG(NO_DCC_CLEAR
), "Disable DCC fast clear."},
117 {"nodccfb", DBG(NO_DCC_FB
), "Disable separate DCC on the main framebuffer"},
118 {"nodccmsaa", DBG(NO_DCC_MSAA
), "Disable DCC for MSAA"},
119 {"nofmask", DBG(NO_FMASK
), "Disable MSAA compression"},
121 DEBUG_NAMED_VALUE_END
/* must be last */
124 static const struct debug_named_value test_options
[] = {
126 {"testdma", DBG(TEST_DMA
), "Invoke SDMA tests and exit."},
127 {"testvmfaultcp", DBG(TEST_VMFAULT_CP
), "Invoke a CP VM fault test and exit."},
128 {"testvmfaultsdma", DBG(TEST_VMFAULT_SDMA
), "Invoke a SDMA VM fault test and exit."},
129 {"testvmfaultshader", DBG(TEST_VMFAULT_SHADER
), "Invoke a shader VM fault test and exit."},
130 {"testdmaperf", DBG(TEST_DMA_PERF
), "Test DMA performance"},
131 {"testgds", DBG(TEST_GDS
), "Test GDS."},
132 {"testgdsmm", DBG(TEST_GDS_MM
), "Test GDS memory management."},
133 {"testgdsoamm", DBG(TEST_GDS_OA_MM
), "Test GDS OA memory management."},
135 DEBUG_NAMED_VALUE_END
/* must be last */
138 void si_init_compiler(struct si_screen
*sscreen
, struct ac_llvm_compiler
*compiler
)
140 /* Only create the less-optimizing version of the compiler on APUs
141 * predating Ryzen (Raven). */
142 bool create_low_opt_compiler
=
143 !sscreen
->info
.has_dedicated_vram
&& sscreen
->info
.chip_class
<= GFX8
;
145 enum ac_target_machine_options tm_options
=
146 (sscreen
->debug_flags
& DBG(GISEL
) ? AC_TM_ENABLE_GLOBAL_ISEL
: 0) |
147 (sscreen
->info
.chip_class
<= GFX8
? AC_TM_FORCE_DISABLE_XNACK
:
148 sscreen
->info
.chip_class
<= GFX10
? AC_TM_FORCE_ENABLE_XNACK
: 0) |
149 (!sscreen
->llvm_has_working_vgpr_indexing
? AC_TM_PROMOTE_ALLOCA_TO_SCRATCH
: 0) |
150 (sscreen
->debug_flags
& DBG(CHECK_IR
) ? AC_TM_CHECK_IR
: 0) |
151 (create_low_opt_compiler
? AC_TM_CREATE_LOW_OPT
: 0);
154 ac_init_llvm_compiler(compiler
, sscreen
->info
.family
, tm_options
);
155 compiler
->passes
= ac_create_llvm_passes(compiler
->tm
);
157 if (compiler
->tm_wave32
)
158 compiler
->passes_wave32
= ac_create_llvm_passes(compiler
->tm_wave32
);
159 if (compiler
->low_opt_tm
)
160 compiler
->low_opt_passes
= ac_create_llvm_passes(compiler
->low_opt_tm
);
163 static void si_destroy_compiler(struct ac_llvm_compiler
*compiler
)
165 ac_destroy_llvm_compiler(compiler
);
171 static void si_destroy_context(struct pipe_context
*context
)
173 struct si_context
*sctx
= (struct si_context
*)context
;
176 /* Unreference the framebuffer normally to disable related logic
179 struct pipe_framebuffer_state fb
= {};
180 if (context
->set_framebuffer_state
)
181 context
->set_framebuffer_state(context
, &fb
);
183 si_release_all_descriptors(sctx
);
185 if (sctx
->chip_class
>= GFX10
&& sctx
->has_graphics
)
186 gfx10_destroy_query(sctx
);
188 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
189 pipe_resource_reference(&sctx
->gsvs_ring
, NULL
);
190 pipe_resource_reference(&sctx
->tess_rings
, NULL
);
191 pipe_resource_reference(&sctx
->null_const_buf
.buffer
, NULL
);
192 pipe_resource_reference(&sctx
->sample_pos_buffer
, NULL
);
193 si_resource_reference(&sctx
->border_color_buffer
, NULL
);
194 free(sctx
->border_color_table
);
195 si_resource_reference(&sctx
->scratch_buffer
, NULL
);
196 si_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
197 si_resource_reference(&sctx
->wait_mem_scratch
, NULL
);
198 si_resource_reference(&sctx
->small_prim_cull_info_buf
, NULL
);
200 if (sctx
->cs_preamble_state
)
201 si_pm4_free_state(sctx
, sctx
->cs_preamble_state
, ~0);
202 if (sctx
->cs_preamble_gs_rings
)
203 si_pm4_free_state(sctx
, sctx
->cs_preamble_gs_rings
, ~0);
204 for (i
= 0; i
< ARRAY_SIZE(sctx
->vgt_shader_config
); i
++)
205 si_pm4_delete_state(sctx
, vgt_shader_config
, sctx
->vgt_shader_config
[i
]);
207 if (sctx
->fixed_func_tcs_shader
.cso
)
208 sctx
->b
.delete_tcs_state(&sctx
->b
, sctx
->fixed_func_tcs_shader
.cso
);
209 if (sctx
->custom_dsa_flush
)
210 sctx
->b
.delete_depth_stencil_alpha_state(&sctx
->b
, sctx
->custom_dsa_flush
);
211 if (sctx
->custom_blend_resolve
)
212 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_resolve
);
213 if (sctx
->custom_blend_fmask_decompress
)
214 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_fmask_decompress
);
215 if (sctx
->custom_blend_eliminate_fastclear
)
216 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_eliminate_fastclear
);
217 if (sctx
->custom_blend_dcc_decompress
)
218 sctx
->b
.delete_blend_state(&sctx
->b
, sctx
->custom_blend_dcc_decompress
);
219 if (sctx
->vs_blit_pos
)
220 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_pos
);
221 if (sctx
->vs_blit_pos_layered
)
222 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_pos_layered
);
223 if (sctx
->vs_blit_color
)
224 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_color
);
225 if (sctx
->vs_blit_color_layered
)
226 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_color_layered
);
227 if (sctx
->vs_blit_texcoord
)
228 sctx
->b
.delete_vs_state(&sctx
->b
, sctx
->vs_blit_texcoord
);
229 if (sctx
->cs_clear_buffer
)
230 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_clear_buffer
);
231 if (sctx
->cs_copy_buffer
)
232 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_copy_buffer
);
233 if (sctx
->cs_copy_image
)
234 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_copy_image
);
235 if (sctx
->cs_copy_image_1d_array
)
236 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_copy_image_1d_array
);
237 if (sctx
->cs_clear_render_target
)
238 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_clear_render_target
);
239 if (sctx
->cs_clear_render_target_1d_array
)
240 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_clear_render_target_1d_array
);
241 if (sctx
->cs_clear_12bytes_buffer
)
242 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_clear_12bytes_buffer
);
243 if (sctx
->cs_dcc_decompress
)
244 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_dcc_decompress
);
245 if (sctx
->cs_dcc_retile
)
246 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_dcc_retile
);
248 for (unsigned i
= 0; i
< ARRAY_SIZE(sctx
->cs_fmask_expand
); i
++) {
249 for (unsigned j
= 0; j
< ARRAY_SIZE(sctx
->cs_fmask_expand
[i
]); j
++) {
250 if (sctx
->cs_fmask_expand
[i
][j
]) {
251 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->cs_fmask_expand
[i
][j
]);
257 util_blitter_destroy(sctx
->blitter
);
259 /* Release DCC stats. */
260 for (int i
= 0; i
< ARRAY_SIZE(sctx
->dcc_stats
); i
++) {
261 assert(!sctx
->dcc_stats
[i
].query_active
);
263 for (int j
= 0; j
< ARRAY_SIZE(sctx
->dcc_stats
[i
].ps_stats
); j
++)
264 if (sctx
->dcc_stats
[i
].ps_stats
[j
])
265 sctx
->b
.destroy_query(&sctx
->b
, sctx
->dcc_stats
[i
].ps_stats
[j
]);
267 si_texture_reference(&sctx
->dcc_stats
[i
].tex
, NULL
);
270 if (sctx
->query_result_shader
)
271 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->query_result_shader
);
272 if (sctx
->sh_query_result_shader
)
273 sctx
->b
.delete_compute_state(&sctx
->b
, sctx
->sh_query_result_shader
);
276 sctx
->ws
->cs_destroy(sctx
->gfx_cs
);
278 sctx
->ws
->cs_destroy(sctx
->sdma_cs
);
280 sctx
->ws
->ctx_destroy(sctx
->ctx
);
282 if (sctx
->b
.stream_uploader
)
283 u_upload_destroy(sctx
->b
.stream_uploader
);
284 if (sctx
->b
.const_uploader
)
285 u_upload_destroy(sctx
->b
.const_uploader
);
286 if (sctx
->cached_gtt_allocator
)
287 u_upload_destroy(sctx
->cached_gtt_allocator
);
289 slab_destroy_child(&sctx
->pool_transfers
);
290 slab_destroy_child(&sctx
->pool_transfers_unsync
);
292 if (sctx
->allocator_zeroed_memory
)
293 u_suballocator_destroy(sctx
->allocator_zeroed_memory
);
295 sctx
->ws
->fence_reference(&sctx
->last_gfx_fence
, NULL
);
296 sctx
->ws
->fence_reference(&sctx
->last_sdma_fence
, NULL
);
297 sctx
->ws
->fence_reference(&sctx
->last_ib_barrier_fence
, NULL
);
298 si_resource_reference(&sctx
->eop_bug_scratch
, NULL
);
299 si_resource_reference(&sctx
->index_ring
, NULL
);
300 si_resource_reference(&sctx
->barrier_buf
, NULL
);
301 si_resource_reference(&sctx
->last_ib_barrier_buf
, NULL
);
302 si_resource_reference(&sctx
->shadowed_regs
, NULL
);
303 pb_reference(&sctx
->gds
, NULL
);
304 pb_reference(&sctx
->gds_oa
, NULL
);
306 si_destroy_compiler(&sctx
->compiler
);
308 si_saved_cs_reference(&sctx
->current_saved_cs
, NULL
);
310 _mesa_hash_table_destroy(sctx
->tex_handles
, NULL
);
311 _mesa_hash_table_destroy(sctx
->img_handles
, NULL
);
313 util_dynarray_fini(&sctx
->resident_tex_handles
);
314 util_dynarray_fini(&sctx
->resident_img_handles
);
315 util_dynarray_fini(&sctx
->resident_tex_needs_color_decompress
);
316 util_dynarray_fini(&sctx
->resident_img_needs_color_decompress
);
317 util_dynarray_fini(&sctx
->resident_tex_needs_depth_decompress
);
318 si_unref_sdma_uploads(sctx
);
319 free(sctx
->sdma_uploads
);
323 static enum pipe_reset_status
si_get_reset_status(struct pipe_context
*ctx
)
325 struct si_context
*sctx
= (struct si_context
*)ctx
;
326 struct si_screen
*sscreen
= sctx
->screen
;
327 enum pipe_reset_status status
= sctx
->ws
->ctx_query_reset_status(sctx
->ctx
);
329 if (status
!= PIPE_NO_RESET
) {
330 /* Call the gallium frontend to set a no-op API dispatch. */
331 if (sctx
->device_reset_callback
.reset
) {
332 sctx
->device_reset_callback
.reset(sctx
->device_reset_callback
.data
, status
);
335 /* Re-create the auxiliary context, because it won't submit
336 * any new IBs due to a GPU reset.
338 simple_mtx_lock(&sscreen
->aux_context_lock
);
340 struct u_log_context
*aux_log
= ((struct si_context
*)sscreen
->aux_context
)->log
;
341 sscreen
->aux_context
->set_log_context(sscreen
->aux_context
, NULL
);
342 sscreen
->aux_context
->destroy(sscreen
->aux_context
);
344 sscreen
->aux_context
= si_create_context(
345 &sscreen
->b
, (sscreen
->options
.aux_debug
? PIPE_CONTEXT_DEBUG
: 0) |
346 (sscreen
->info
.has_graphics
? 0 : PIPE_CONTEXT_COMPUTE_ONLY
));
347 sscreen
->aux_context
->set_log_context(sscreen
->aux_context
, aux_log
);
348 simple_mtx_unlock(&sscreen
->aux_context_lock
);
353 static void si_set_device_reset_callback(struct pipe_context
*ctx
,
354 const struct pipe_device_reset_callback
*cb
)
356 struct si_context
*sctx
= (struct si_context
*)ctx
;
359 sctx
->device_reset_callback
= *cb
;
361 memset(&sctx
->device_reset_callback
, 0, sizeof(sctx
->device_reset_callback
));
364 /* Apitrace profiling:
365 * 1) qapitrace : Tools -> Profile: Measure CPU & GPU times
366 * 2) In the middle panel, zoom in (mouse wheel) on some bad draw call
367 * and remember its number.
368 * 3) In Mesa, enable queries and performance counters around that draw
369 * call and print the results.
370 * 4) glretrace --benchmark --markers ..
372 static void si_emit_string_marker(struct pipe_context
*ctx
, const char *string
, int len
)
374 struct si_context
*sctx
= (struct si_context
*)ctx
;
376 dd_parse_apitrace_marker(string
, len
, &sctx
->apitrace_call_number
);
379 u_log_printf(sctx
->log
, "\nString marker: %*s\n", len
, string
);
382 static void si_set_debug_callback(struct pipe_context
*ctx
, const struct pipe_debug_callback
*cb
)
384 struct si_context
*sctx
= (struct si_context
*)ctx
;
385 struct si_screen
*screen
= sctx
->screen
;
387 util_queue_finish(&screen
->shader_compiler_queue
);
388 util_queue_finish(&screen
->shader_compiler_queue_low_priority
);
393 memset(&sctx
->debug
, 0, sizeof(sctx
->debug
));
396 static void si_set_log_context(struct pipe_context
*ctx
, struct u_log_context
*log
)
398 struct si_context
*sctx
= (struct si_context
*)ctx
;
402 u_log_add_auto_logger(log
, si_auto_log_cs
, sctx
);
405 static void si_set_context_param(struct pipe_context
*ctx
, enum pipe_context_param param
,
408 struct radeon_winsys
*ws
= ((struct si_context
*)ctx
)->ws
;
411 case PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE
:
412 ws
->pin_threads_to_L3_cache(ws
, value
);
418 static struct pipe_context
*si_create_context(struct pipe_screen
*screen
, unsigned flags
)
420 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
421 STATIC_ASSERT(DBG_COUNT
<= 64);
423 /* Don't create a context if it's not compute-only and hw is compute-only. */
424 if (!sscreen
->info
.has_graphics
&& !(flags
& PIPE_CONTEXT_COMPUTE_ONLY
))
427 struct si_context
*sctx
= CALLOC_STRUCT(si_context
);
428 struct radeon_winsys
*ws
= sscreen
->ws
;
430 bool stop_exec_on_failure
= (flags
& PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET
) != 0;
435 sctx
->has_graphics
= sscreen
->info
.chip_class
== GFX6
|| !(flags
& PIPE_CONTEXT_COMPUTE_ONLY
);
437 if (flags
& PIPE_CONTEXT_DEBUG
)
438 sscreen
->record_llvm_ir
= true; /* racy but not critical */
440 sctx
->b
.screen
= screen
; /* this must be set first */
442 sctx
->b
.destroy
= si_destroy_context
;
443 sctx
->screen
= sscreen
; /* Easy accessing of screen/winsys. */
444 sctx
->is_debug
= (flags
& PIPE_CONTEXT_DEBUG
) != 0;
446 slab_create_child(&sctx
->pool_transfers
, &sscreen
->pool_transfers
);
447 slab_create_child(&sctx
->pool_transfers_unsync
, &sscreen
->pool_transfers
);
449 sctx
->ws
= sscreen
->ws
;
450 sctx
->family
= sscreen
->info
.family
;
451 sctx
->chip_class
= sscreen
->info
.chip_class
;
453 if (sctx
->chip_class
== GFX7
|| sctx
->chip_class
== GFX8
|| sctx
->chip_class
== GFX9
) {
454 sctx
->eop_bug_scratch
= si_resource(pipe_buffer_create(
455 &sscreen
->b
, 0, PIPE_USAGE_DEFAULT
, 16 * sscreen
->info
.num_render_backends
));
456 if (!sctx
->eop_bug_scratch
)
460 /* Initialize context allocators. */
461 sctx
->allocator_zeroed_memory
=
462 u_suballocator_create(&sctx
->b
, 128 * 1024, 0, PIPE_USAGE_DEFAULT
,
463 SI_RESOURCE_FLAG_UNMAPPABLE
| SI_RESOURCE_FLAG_CLEAR
, false);
464 if (!sctx
->allocator_zeroed_memory
)
467 sctx
->b
.stream_uploader
=
468 u_upload_create(&sctx
->b
, 1024 * 1024, 0, PIPE_USAGE_STREAM
, SI_RESOURCE_FLAG_READ_ONLY
);
469 if (!sctx
->b
.stream_uploader
)
472 sctx
->cached_gtt_allocator
= u_upload_create(&sctx
->b
, 16 * 1024, 0, PIPE_USAGE_STAGING
, 0);
473 if (!sctx
->cached_gtt_allocator
)
476 sctx
->ctx
= sctx
->ws
->ctx_create(sctx
->ws
);
480 if (sscreen
->info
.num_rings
[RING_DMA
] && !(sscreen
->debug_flags
& DBG(NO_SDMA
)) &&
481 /* SDMA causes corruption on RX 580:
482 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/1399
483 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/1889
485 (sctx
->chip_class
!= GFX8
|| sscreen
->debug_flags
& DBG(FORCE_SDMA
)) &&
486 /* SDMA causes corruption on gfx9 APUs:
487 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/2814
489 * While we could keep buffer copies and clears enabled, let's disable
490 * everything, because neither gfx8 nor gfx10 enable SDMA, and it's not
493 (sctx
->chip_class
!= GFX9
|| sscreen
->debug_flags
& DBG(FORCE_SDMA
)) &&
494 /* SDMA timeouts sometimes on gfx10 so disable it for now. See:
495 * https://bugs.freedesktop.org/show_bug.cgi?id=111481
496 * https://gitlab.freedesktop.org/mesa/mesa/-/issues/1907
498 (sctx
->chip_class
!= GFX10
|| sscreen
->debug_flags
& DBG(FORCE_SDMA
))) {
499 sctx
->sdma_cs
= sctx
->ws
->cs_create(sctx
->ctx
, RING_DMA
, (void *)si_flush_dma_cs
, sctx
,
500 stop_exec_on_failure
);
503 bool use_sdma_upload
= sscreen
->info
.has_dedicated_vram
&& sctx
->sdma_cs
;
504 sctx
->b
.const_uploader
=
505 u_upload_create(&sctx
->b
, 256 * 1024, 0, PIPE_USAGE_DEFAULT
,
506 SI_RESOURCE_FLAG_32BIT
|
507 (use_sdma_upload
? SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA
: 0));
508 if (!sctx
->b
.const_uploader
)
512 u_upload_enable_flush_explicit(sctx
->b
.const_uploader
);
514 sctx
->gfx_cs
= ws
->cs_create(sctx
->ctx
, sctx
->has_graphics
? RING_GFX
: RING_COMPUTE
,
515 (void *)si_flush_gfx_cs
, sctx
, stop_exec_on_failure
);
518 sctx
->border_color_table
= malloc(SI_MAX_BORDER_COLORS
* sizeof(*sctx
->border_color_table
));
519 if (!sctx
->border_color_table
)
522 sctx
->border_color_buffer
= si_resource(pipe_buffer_create(
523 screen
, 0, PIPE_USAGE_DEFAULT
, SI_MAX_BORDER_COLORS
* sizeof(*sctx
->border_color_table
)));
524 if (!sctx
->border_color_buffer
)
527 sctx
->border_color_map
=
528 ws
->buffer_map(sctx
->border_color_buffer
->buf
, NULL
, PIPE_TRANSFER_WRITE
);
529 if (!sctx
->border_color_map
)
532 sctx
->ngg
= sscreen
->use_ngg
;
534 /* Initialize context functions used by graphics and compute. */
535 if (sctx
->chip_class
>= GFX10
)
536 sctx
->emit_cache_flush
= gfx10_emit_cache_flush
;
538 sctx
->emit_cache_flush
= si_emit_cache_flush
;
540 sctx
->b
.emit_string_marker
= si_emit_string_marker
;
541 sctx
->b
.set_debug_callback
= si_set_debug_callback
;
542 sctx
->b
.set_log_context
= si_set_log_context
;
543 sctx
->b
.set_context_param
= si_set_context_param
;
544 sctx
->b
.get_device_reset_status
= si_get_reset_status
;
545 sctx
->b
.set_device_reset_callback
= si_set_device_reset_callback
;
547 si_init_all_descriptors(sctx
);
548 si_init_buffer_functions(sctx
);
549 si_init_clear_functions(sctx
);
550 si_init_blit_functions(sctx
);
551 si_init_compute_functions(sctx
);
552 si_init_compute_blit_functions(sctx
);
553 si_init_debug_functions(sctx
);
554 si_init_fence_functions(sctx
);
555 si_init_query_functions(sctx
);
556 si_init_state_compute_functions(sctx
);
557 si_init_context_texture_functions(sctx
);
559 /* Initialize graphics-only context functions. */
560 if (sctx
->has_graphics
) {
561 if (sctx
->chip_class
>= GFX10
)
562 gfx10_init_query(sctx
);
563 si_init_msaa_functions(sctx
);
564 si_init_shader_functions(sctx
);
565 si_init_state_functions(sctx
);
566 si_init_streamout_functions(sctx
);
567 si_init_viewport_functions(sctx
);
569 sctx
->blitter
= util_blitter_create(&sctx
->b
);
570 if (sctx
->blitter
== NULL
)
572 sctx
->blitter
->skip_viewport_restore
= true;
574 /* Some states are expected to be always non-NULL. */
575 sctx
->noop_blend
= util_blitter_get_noop_blend_state(sctx
->blitter
);
576 sctx
->queued
.named
.blend
= sctx
->noop_blend
;
578 sctx
->noop_dsa
= util_blitter_get_noop_dsa_state(sctx
->blitter
);
579 sctx
->queued
.named
.dsa
= sctx
->noop_dsa
;
581 sctx
->discard_rasterizer_state
= util_blitter_get_discard_rasterizer_state(sctx
->blitter
);
582 sctx
->queued
.named
.rasterizer
= sctx
->discard_rasterizer_state
;
584 si_init_draw_functions(sctx
);
586 /* If aux_context == NULL, we are initializing aux_context right now. */
587 bool is_aux_context
= !sscreen
->aux_context
;
588 si_initialize_prim_discard_tunables(sscreen
, is_aux_context
,
589 &sctx
->prim_discard_vertex_count_threshold
,
590 &sctx
->index_ring_size_per_ib
);
593 /* Initialize SDMA functions. */
594 if (sctx
->chip_class
>= GFX7
)
595 cik_init_sdma_functions(sctx
);
597 sctx
->dma_copy
= si_resource_copy_region
;
599 if (sscreen
->debug_flags
& DBG(FORCE_SDMA
))
600 sctx
->b
.resource_copy_region
= sctx
->dma_copy
;
602 sctx
->sample_mask
= 0xffff;
604 /* Initialize multimedia functions. */
605 if (sscreen
->info
.has_hw_decode
) {
606 sctx
->b
.create_video_codec
= si_uvd_create_decoder
;
607 sctx
->b
.create_video_buffer
= si_video_buffer_create
;
609 sctx
->b
.create_video_codec
= vl_create_decoder
;
610 sctx
->b
.create_video_buffer
= vl_video_buffer_create
;
613 if (sctx
->chip_class
>= GFX9
|| si_compute_prim_discard_enabled(sctx
)) {
614 sctx
->wait_mem_scratch
=
615 si_aligned_buffer_create(screen
, SI_RESOURCE_FLAG_UNMAPPABLE
,
616 PIPE_USAGE_DEFAULT
, 8,
617 sscreen
->info
.tcc_cache_line_size
);
618 if (!sctx
->wait_mem_scratch
)
622 /* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
623 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
624 if (sctx
->chip_class
== GFX7
) {
625 sctx
->null_const_buf
.buffer
=
626 pipe_aligned_buffer_create(screen
, SI_RESOURCE_FLAG_32BIT
, PIPE_USAGE_DEFAULT
, 16,
627 sctx
->screen
->info
.tcc_cache_line_size
);
628 if (!sctx
->null_const_buf
.buffer
)
630 sctx
->null_const_buf
.buffer_size
= sctx
->null_const_buf
.buffer
->width0
;
632 unsigned start_shader
= sctx
->has_graphics
? 0 : PIPE_SHADER_COMPUTE
;
633 for (shader
= start_shader
; shader
< SI_NUM_SHADERS
; shader
++) {
634 for (i
= 0; i
< SI_NUM_CONST_BUFFERS
; i
++) {
635 sctx
->b
.set_constant_buffer(&sctx
->b
, shader
, i
, &sctx
->null_const_buf
);
639 si_set_rw_buffer(sctx
, SI_HS_CONST_DEFAULT_TESS_LEVELS
, &sctx
->null_const_buf
);
640 si_set_rw_buffer(sctx
, SI_VS_CONST_INSTANCE_DIVISORS
, &sctx
->null_const_buf
);
641 si_set_rw_buffer(sctx
, SI_VS_CONST_CLIP_PLANES
, &sctx
->null_const_buf
);
642 si_set_rw_buffer(sctx
, SI_PS_CONST_POLY_STIPPLE
, &sctx
->null_const_buf
);
643 si_set_rw_buffer(sctx
, SI_PS_CONST_SAMPLE_POSITIONS
, &sctx
->null_const_buf
);
646 uint64_t max_threads_per_block
;
647 screen
->get_compute_param(screen
, PIPE_SHADER_IR_NIR
, PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
,
648 &max_threads_per_block
);
650 /* The maximum number of scratch waves. Scratch space isn't divided
651 * evenly between CUs. The number is only a function of the number of CUs.
652 * We can decrease the constant to decrease the scratch buffer size.
654 * sctx->scratch_waves must be >= the maximum posible size of
655 * 1 threadgroup, so that the hw doesn't hang from being unable
658 * The recommended value is 4 per CU at most. Higher numbers don't
659 * bring much benefit, but they still occupy chip resources (think
660 * async compute). I've seen ~2% performance difference between 4 and 32.
662 sctx
->scratch_waves
=
663 MAX2(32 * sscreen
->info
.num_good_compute_units
, max_threads_per_block
/ 64);
665 /* Bindless handles. */
666 sctx
->tex_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
, _mesa_key_pointer_equal
);
667 sctx
->img_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
, _mesa_key_pointer_equal
);
669 util_dynarray_init(&sctx
->resident_tex_handles
, NULL
);
670 util_dynarray_init(&sctx
->resident_img_handles
, NULL
);
671 util_dynarray_init(&sctx
->resident_tex_needs_color_decompress
, NULL
);
672 util_dynarray_init(&sctx
->resident_img_needs_color_decompress
, NULL
);
673 util_dynarray_init(&sctx
->resident_tex_needs_depth_decompress
, NULL
);
675 sctx
->sample_pos_buffer
=
676 pipe_buffer_create(sctx
->b
.screen
, 0, PIPE_USAGE_DEFAULT
, sizeof(sctx
->sample_positions
));
677 pipe_buffer_write(&sctx
->b
, sctx
->sample_pos_buffer
, 0, sizeof(sctx
->sample_positions
),
678 &sctx
->sample_positions
);
680 /* The remainder of this function initializes the gfx CS and must be last. */
681 assert(sctx
->gfx_cs
->current
.cdw
== 0);
683 if (sctx
->has_graphics
) {
684 si_init_cp_reg_shadowing(sctx
);
687 si_begin_new_gfx_cs(sctx
, true);
688 assert(sctx
->gfx_cs
->current
.cdw
== sctx
->initial_gfx_cs_size
);
690 /* Initialize per-context buffers. */
691 if (sctx
->wait_mem_scratch
) {
692 si_cp_write_data(sctx
, sctx
->wait_mem_scratch
, 0, 4, V_370_MEM
, V_370_ME
,
693 &sctx
->wait_mem_number
);
696 if (sctx
->chip_class
== GFX7
) {
697 /* Clear the NULL constant buffer, because loads should return zeros.
698 * Note that this forces CP DMA to be used, because clover deadlocks
699 * for some reason when the compute codepath is used.
701 uint32_t clear_value
= 0;
702 si_clear_buffer(sctx
, sctx
->null_const_buf
.buffer
, 0, sctx
->null_const_buf
.buffer
->width0
,
703 &clear_value
, 4, SI_COHERENCY_SHADER
, true);
706 sctx
->initial_gfx_cs_size
= sctx
->gfx_cs
->current
.cdw
;
709 fprintf(stderr
, "radeonsi: Failed to create a context.\n");
710 si_destroy_context(&sctx
->b
);
714 static struct pipe_context
*si_pipe_create_context(struct pipe_screen
*screen
, void *priv
,
717 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
718 struct pipe_context
*ctx
;
721 if (sscreen
->debug_flags
& DBG(CHECK_VM
))
722 flags
|= PIPE_CONTEXT_DEBUG
;
724 ctx
= si_create_context(screen
, flags
);
726 if (!(flags
& PIPE_CONTEXT_PREFER_THREADED
))
729 /* Clover (compute-only) is unsupported. */
730 if (flags
& PIPE_CONTEXT_COMPUTE_ONLY
)
733 /* When shaders are logged to stderr, asynchronous compilation is
735 if (sscreen
->debug_flags
& DBG_ALL_SHADERS
)
738 /* Use asynchronous flushes only on amdgpu, since the radeon
739 * implementation for fence_server_sync is incomplete. */
740 struct pipe_context
* tc
= threaded_context_create(
741 ctx
, &sscreen
->pool_transfers
, si_replace_buffer_storage
,
742 sscreen
->info
.is_amdgpu
? si_create_fence
: NULL
,
743 &((struct si_context
*)ctx
)->tc
);
745 if (tc
&& tc
!= ctx
&& os_get_total_physical_memory(&total_ram
)) {
746 ((struct threaded_context
*) tc
)->bytes_mapped_limit
= total_ram
/ 4;
755 static void si_destroy_screen(struct pipe_screen
*pscreen
)
757 struct si_screen
*sscreen
= (struct si_screen
*)pscreen
;
758 struct si_shader_part
*parts
[] = {sscreen
->vs_prologs
, sscreen
->tcs_epilogs
, sscreen
->gs_prologs
,
759 sscreen
->ps_prologs
, sscreen
->ps_epilogs
};
762 if (!sscreen
->ws
->unref(sscreen
->ws
))
765 if (sscreen
->debug_flags
& DBG(CACHE_STATS
)) {
766 printf("live shader cache: hits = %u, misses = %u\n", sscreen
->live_shader_cache
.hits
,
767 sscreen
->live_shader_cache
.misses
);
768 printf("memory shader cache: hits = %u, misses = %u\n", sscreen
->num_memory_shader_cache_hits
,
769 sscreen
->num_memory_shader_cache_misses
);
770 printf("disk shader cache: hits = %u, misses = %u\n", sscreen
->num_disk_shader_cache_hits
,
771 sscreen
->num_disk_shader_cache_misses
);
774 simple_mtx_destroy(&sscreen
->aux_context_lock
);
776 struct u_log_context
*aux_log
= ((struct si_context
*)sscreen
->aux_context
)->log
;
778 sscreen
->aux_context
->set_log_context(sscreen
->aux_context
, NULL
);
779 u_log_context_destroy(aux_log
);
783 sscreen
->aux_context
->destroy(sscreen
->aux_context
);
785 util_queue_destroy(&sscreen
->shader_compiler_queue
);
786 util_queue_destroy(&sscreen
->shader_compiler_queue_low_priority
);
788 /* Release the reference on glsl types of the compiler threads. */
789 glsl_type_singleton_decref();
791 for (i
= 0; i
< ARRAY_SIZE(sscreen
->compiler
); i
++)
792 si_destroy_compiler(&sscreen
->compiler
[i
]);
794 for (i
= 0; i
< ARRAY_SIZE(sscreen
->compiler_lowp
); i
++)
795 si_destroy_compiler(&sscreen
->compiler_lowp
[i
]);
797 /* Free shader parts. */
798 for (i
= 0; i
< ARRAY_SIZE(parts
); i
++) {
800 struct si_shader_part
*part
= parts
[i
];
802 parts
[i
] = part
->next
;
803 si_shader_binary_clean(&part
->binary
);
807 simple_mtx_destroy(&sscreen
->shader_parts_mutex
);
808 si_destroy_shader_cache(sscreen
);
810 si_destroy_perfcounters(sscreen
);
811 si_gpu_load_kill_thread(sscreen
);
813 simple_mtx_destroy(&sscreen
->gpu_load_mutex
);
815 slab_destroy_parent(&sscreen
->pool_transfers
);
817 disk_cache_destroy(sscreen
->disk_shader_cache
);
818 util_live_shader_cache_deinit(&sscreen
->live_shader_cache
);
819 sscreen
->ws
->destroy(sscreen
->ws
);
823 static void si_init_gs_info(struct si_screen
*sscreen
)
825 sscreen
->gs_table_depth
= ac_get_gs_table_depth(sscreen
->info
.chip_class
, sscreen
->info
.family
);
828 static void si_test_vmfault(struct si_screen
*sscreen
, uint64_t test_flags
)
830 struct pipe_context
*ctx
= sscreen
->aux_context
;
831 struct si_context
*sctx
= (struct si_context
*)ctx
;
832 struct pipe_resource
*buf
= pipe_buffer_create_const0(&sscreen
->b
, 0, PIPE_USAGE_DEFAULT
, 64);
835 puts("Buffer allocation failed.");
839 si_resource(buf
)->gpu_address
= 0; /* cause a VM fault */
841 if (test_flags
& DBG(TEST_VMFAULT_CP
)) {
842 si_cp_dma_copy_buffer(sctx
, buf
, buf
, 0, 4, 4, 0, SI_COHERENCY_NONE
, L2_BYPASS
);
843 ctx
->flush(ctx
, NULL
, 0);
844 puts("VM fault test: CP - done.");
846 if (test_flags
& DBG(TEST_VMFAULT_SDMA
)) {
847 si_sdma_clear_buffer(sctx
, buf
, 0, 4, 0);
848 ctx
->flush(ctx
, NULL
, 0);
849 puts("VM fault test: SDMA - done.");
851 if (test_flags
& DBG(TEST_VMFAULT_SHADER
)) {
852 util_test_constant_buffer(ctx
, buf
);
853 puts("VM fault test: Shader - done.");
858 static void si_test_gds_memory_management(struct si_context
*sctx
, unsigned alloc_size
,
859 unsigned alignment
, enum radeon_bo_domain domain
)
861 struct radeon_winsys
*ws
= sctx
->ws
;
862 struct radeon_cmdbuf
*cs
[8];
863 struct pb_buffer
*gds_bo
[ARRAY_SIZE(cs
)];
865 for (unsigned i
= 0; i
< ARRAY_SIZE(cs
); i
++) {
866 cs
[i
] = ws
->cs_create(sctx
->ctx
, RING_COMPUTE
, NULL
, NULL
, false);
867 gds_bo
[i
] = ws
->buffer_create(ws
, alloc_size
, alignment
, domain
, 0);
871 for (unsigned iterations
= 0; iterations
< 20000; iterations
++) {
872 for (unsigned i
= 0; i
< ARRAY_SIZE(cs
); i
++) {
873 /* This clears GDS with CP DMA.
875 * We don't care if GDS is present. Just add some packet
876 * to make the GPU busy for a moment.
878 si_cp_dma_clear_buffer(
879 sctx
, cs
[i
], NULL
, 0, alloc_size
, 0,
880 SI_CPDMA_SKIP_BO_LIST_UPDATE
| SI_CPDMA_SKIP_CHECK_CS_SPACE
| SI_CPDMA_SKIP_GFX_SYNC
, 0,
883 ws
->cs_add_buffer(cs
[i
], gds_bo
[i
], RADEON_USAGE_READWRITE
, domain
, 0);
884 ws
->cs_flush(cs
[i
], PIPE_FLUSH_ASYNC
, NULL
);
890 static void si_disk_cache_create(struct si_screen
*sscreen
)
892 /* Don't use the cache if shader dumping is enabled. */
893 if (sscreen
->debug_flags
& DBG_ALL_SHADERS
)
896 struct mesa_sha1 ctx
;
897 unsigned char sha1
[20];
898 char cache_id
[20 * 2 + 1];
900 _mesa_sha1_init(&ctx
);
902 if (!disk_cache_get_function_identifier(si_disk_cache_create
, &ctx
) ||
903 !disk_cache_get_function_identifier(LLVMInitializeAMDGPUTargetInfo
, &ctx
))
906 _mesa_sha1_final(&ctx
, sha1
);
907 disk_cache_format_hex_id(cache_id
, sha1
, 20 * 2);
909 /* These flags affect shader compilation. */
910 #define ALL_FLAGS (DBG(GISEL) | DBG(KILL_PS_INF_INTERP) | DBG(CLAMP_DIV_BY_ZERO))
911 uint64_t shader_debug_flags
= sscreen
->debug_flags
& ALL_FLAGS
;
913 /* Add the high bits of 32-bit addresses, which affects
914 * how 32-bit addresses are expanded to 64 bits.
916 STATIC_ASSERT(ALL_FLAGS
<= UINT_MAX
);
917 assert((int16_t)sscreen
->info
.address32_hi
== (int32_t)sscreen
->info
.address32_hi
);
918 shader_debug_flags
|= (uint64_t)(sscreen
->info
.address32_hi
& 0xffff) << 32;
920 sscreen
->disk_shader_cache
= disk_cache_create(sscreen
->info
.name
, cache_id
, shader_debug_flags
);
923 static void si_set_max_shader_compiler_threads(struct pipe_screen
*screen
, unsigned max_threads
)
925 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
927 /* This function doesn't allow a greater number of threads than
928 * the queue had at its creation. */
929 util_queue_adjust_num_threads(&sscreen
->shader_compiler_queue
, max_threads
);
930 /* Don't change the number of threads on the low priority queue. */
933 static bool si_is_parallel_shader_compilation_finished(struct pipe_screen
*screen
, void *shader
,
934 enum pipe_shader_type shader_type
)
936 struct si_shader_selector
*sel
= (struct si_shader_selector
*)shader
;
938 return util_queue_fence_is_signalled(&sel
->ready
);
941 static struct pipe_screen
*radeonsi_screen_create_impl(struct radeon_winsys
*ws
,
942 const struct pipe_screen_config
*config
)
944 struct si_screen
*sscreen
= CALLOC_STRUCT(si_screen
);
945 unsigned hw_threads
, num_comp_hi_threads
, num_comp_lo_threads
;
953 ws
->query_info(ws
, &sscreen
->info
);
955 if (sscreen
->info
.chip_class
== GFX10_3
&& LLVM_VERSION_MAJOR
< 11) {
956 fprintf(stderr
, "radeonsi: GFX 10.3 requires LLVM 11 or higher\n");
961 if (sscreen
->info
.chip_class
== GFX10
&& LLVM_VERSION_MAJOR
< 9) {
962 fprintf(stderr
, "radeonsi: Navi family support requires LLVM 9 or higher\n");
967 if (sscreen
->info
.chip_class
>= GFX9
) {
968 sscreen
->se_tile_repeat
= 32 * sscreen
->info
.max_se
;
970 ac_get_raster_config(&sscreen
->info
, &sscreen
->pa_sc_raster_config
,
971 &sscreen
->pa_sc_raster_config_1
, &sscreen
->se_tile_repeat
);
974 sscreen
->debug_flags
= debug_get_flags_option("R600_DEBUG", debug_options
, 0);
975 sscreen
->debug_flags
|= debug_get_flags_option("AMD_DEBUG", debug_options
, 0);
976 test_flags
= debug_get_flags_option("AMD_TEST", test_options
, 0);
978 if (sscreen
->debug_flags
& DBG(NO_GFX
))
979 sscreen
->info
.has_graphics
= false;
981 /* Set functions first. */
982 sscreen
->b
.context_create
= si_pipe_create_context
;
983 sscreen
->b
.destroy
= si_destroy_screen
;
984 sscreen
->b
.set_max_shader_compiler_threads
= si_set_max_shader_compiler_threads
;
985 sscreen
->b
.is_parallel_shader_compilation_finished
= si_is_parallel_shader_compilation_finished
;
986 sscreen
->b
.finalize_nir
= si_finalize_nir
;
988 si_init_screen_get_functions(sscreen
);
989 si_init_screen_buffer_functions(sscreen
);
990 si_init_screen_fence_functions(sscreen
);
991 si_init_screen_state_functions(sscreen
);
992 si_init_screen_texture_functions(sscreen
);
993 si_init_screen_query_functions(sscreen
);
994 si_init_screen_live_shader_cache(sscreen
);
996 /* Set these flags in debug_flags early, so that the shader cache takes
999 if (driQueryOptionb(config
->options
, "glsl_correct_derivatives_after_discard"))
1000 sscreen
->debug_flags
|= DBG(FS_CORRECT_DERIVS_AFTER_KILL
);
1002 if (sscreen
->debug_flags
& DBG(INFO
))
1003 ac_print_gpu_info(&sscreen
->info
);
1005 slab_create_parent(&sscreen
->pool_transfers
, sizeof(struct si_transfer
), 64);
1007 sscreen
->force_aniso
= MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
1008 if (sscreen
->force_aniso
== -1) {
1009 sscreen
->force_aniso
= MIN2(16, debug_get_num_option("AMD_TEX_ANISO", -1));
1012 if (sscreen
->force_aniso
>= 0) {
1013 printf("radeonsi: Forcing anisotropy filter to %ix\n",
1014 /* round down to a power of two */
1015 1 << util_logbase2(sscreen
->force_aniso
));
1018 (void)simple_mtx_init(&sscreen
->aux_context_lock
, mtx_plain
);
1019 (void)simple_mtx_init(&sscreen
->gpu_load_mutex
, mtx_plain
);
1021 si_init_gs_info(sscreen
);
1022 if (!si_init_shader_cache(sscreen
)) {
1028 #define OPT_BOOL(name, dflt, description) \
1029 sscreen->options.name = driQueryOptionb(config->options, "radeonsi_" #name);
1030 #include "si_debug_options.h"
1033 if (sscreen
->options
.no_infinite_interp
)
1034 sscreen
->debug_flags
|= DBG(KILL_PS_INF_INTERP
);
1035 if (sscreen
->options
.clamp_div_by_zero
)
1036 sscreen
->debug_flags
|= DBG(CLAMP_DIV_BY_ZERO
);
1038 si_disk_cache_create(sscreen
);
1040 /* Determine the number of shader compiler threads. */
1041 hw_threads
= sysconf(_SC_NPROCESSORS_ONLN
);
1043 if (hw_threads
>= 12) {
1044 num_comp_hi_threads
= hw_threads
* 3 / 4;
1045 num_comp_lo_threads
= hw_threads
/ 3;
1046 } else if (hw_threads
>= 6) {
1047 num_comp_hi_threads
= hw_threads
- 2;
1048 num_comp_lo_threads
= hw_threads
/ 2;
1049 } else if (hw_threads
>= 2) {
1050 num_comp_hi_threads
= hw_threads
- 1;
1051 num_comp_lo_threads
= hw_threads
/ 2;
1053 num_comp_hi_threads
= 1;
1054 num_comp_lo_threads
= 1;
1057 num_comp_hi_threads
= MIN2(num_comp_hi_threads
, ARRAY_SIZE(sscreen
->compiler
));
1058 num_comp_lo_threads
= MIN2(num_comp_lo_threads
, ARRAY_SIZE(sscreen
->compiler_lowp
));
1060 /* Take a reference on the glsl types for the compiler threads. */
1061 glsl_type_singleton_init_or_ref();
1063 if (!util_queue_init(
1064 &sscreen
->shader_compiler_queue
, "sh", 64, num_comp_hi_threads
,
1065 UTIL_QUEUE_INIT_RESIZE_IF_FULL
| UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY
)) {
1066 si_destroy_shader_cache(sscreen
);
1068 glsl_type_singleton_decref();
1072 if (!util_queue_init(&sscreen
->shader_compiler_queue_low_priority
, "shlo", 64,
1073 num_comp_lo_threads
,
1074 UTIL_QUEUE_INIT_RESIZE_IF_FULL
| UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY
|
1075 UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
)) {
1076 si_destroy_shader_cache(sscreen
);
1078 glsl_type_singleton_decref();
1082 if (!debug_get_bool_option("RADEON_DISABLE_PERFCOUNTERS", false))
1083 si_init_perfcounters(sscreen
);
1085 unsigned prim_discard_vertex_count_threshold
, tmp
;
1086 si_initialize_prim_discard_tunables(sscreen
, false, &prim_discard_vertex_count_threshold
, &tmp
);
1087 /* Compute-shader-based culling doesn't support VBOs in user SGPRs. */
1088 if (prim_discard_vertex_count_threshold
== UINT_MAX
)
1089 sscreen
->num_vbos_in_user_sgprs
= sscreen
->info
.chip_class
>= GFX9
? 5 : 1;
1091 /* Determine tessellation ring info. */
1092 bool double_offchip_buffers
= sscreen
->info
.chip_class
>= GFX7
&&
1093 sscreen
->info
.family
!= CHIP_CARRIZO
&&
1094 sscreen
->info
.family
!= CHIP_STONEY
;
1095 /* This must be one less than the maximum number due to a hw limitation.
1096 * Various hardware bugs need this.
1098 unsigned max_offchip_buffers_per_se
;
1100 if (sscreen
->info
.chip_class
>= GFX10
)
1101 max_offchip_buffers_per_se
= 128;
1102 /* Only certain chips can use the maximum value. */
1103 else if (sscreen
->info
.family
== CHIP_VEGA12
|| sscreen
->info
.family
== CHIP_VEGA20
)
1104 max_offchip_buffers_per_se
= double_offchip_buffers
? 128 : 64;
1106 max_offchip_buffers_per_se
= double_offchip_buffers
? 127 : 63;
1108 unsigned max_offchip_buffers
= max_offchip_buffers_per_se
* sscreen
->info
.max_se
;
1109 unsigned offchip_granularity
;
1111 /* Hawaii has a bug with offchip buffers > 256 that can be worked
1112 * around by setting 4K granularity.
1114 if (sscreen
->info
.family
== CHIP_HAWAII
) {
1115 sscreen
->tess_offchip_block_dw_size
= 4096;
1116 offchip_granularity
= V_03093C_X_4K_DWORDS
;
1118 sscreen
->tess_offchip_block_dw_size
= 8192;
1119 offchip_granularity
= V_03093C_X_8K_DWORDS
;
1122 sscreen
->tess_factor_ring_size
= 32768 * sscreen
->info
.max_se
;
1123 sscreen
->tess_offchip_ring_size
= max_offchip_buffers
* sscreen
->tess_offchip_block_dw_size
* 4;
1125 if (sscreen
->info
.chip_class
>= GFX10_3
) {
1126 sscreen
->vgt_hs_offchip_param
=
1127 S_03093C_OFFCHIP_BUFFERING_GFX103(max_offchip_buffers
- 1) |
1128 S_03093C_OFFCHIP_GRANULARITY_GFX103(offchip_granularity
);
1129 } else if (sscreen
->info
.chip_class
>= GFX7
) {
1130 if (sscreen
->info
.chip_class
>= GFX8
)
1131 --max_offchip_buffers
;
1132 sscreen
->vgt_hs_offchip_param
= S_03093C_OFFCHIP_BUFFERING_GFX7(max_offchip_buffers
) |
1133 S_03093C_OFFCHIP_GRANULARITY_GFX7(offchip_granularity
);
1135 assert(offchip_granularity
== V_03093C_X_8K_DWORDS
);
1136 sscreen
->vgt_hs_offchip_param
= S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers
);
1139 sscreen
->has_draw_indirect_multi
=
1140 (sscreen
->info
.family
>= CHIP_POLARIS10
) ||
1141 (sscreen
->info
.chip_class
== GFX8
&& sscreen
->info
.pfp_fw_version
>= 121 &&
1142 sscreen
->info
.me_fw_version
>= 87) ||
1143 (sscreen
->info
.chip_class
== GFX7
&& sscreen
->info
.pfp_fw_version
>= 211 &&
1144 sscreen
->info
.me_fw_version
>= 173) ||
1145 (sscreen
->info
.chip_class
== GFX6
&& sscreen
->info
.pfp_fw_version
>= 79 &&
1146 sscreen
->info
.me_fw_version
>= 142);
1148 sscreen
->has_out_of_order_rast
=
1149 sscreen
->info
.has_out_of_order_rast
&& !(sscreen
->debug_flags
& DBG(NO_OUT_OF_ORDER
));
1150 sscreen
->assume_no_z_fights
= driQueryOptionb(config
->options
, "radeonsi_assume_no_z_fights") ||
1151 driQueryOptionb(config
->options
, "allow_draw_out_of_order");
1152 sscreen
->commutative_blend_add
=
1153 driQueryOptionb(config
->options
, "radeonsi_commutative_blend_add") ||
1154 driQueryOptionb(config
->options
, "allow_draw_out_of_order");
1156 /* TODO: Find out why NGG culling hangs on gfx10.3 */
1157 if (sscreen
->info
.chip_class
== GFX10_3
&&
1158 !(sscreen
->debug_flags
& (DBG(ALWAYS_NGG_CULLING_ALL
) | DBG(ALWAYS_NGG_CULLING_TESS
))))
1159 sscreen
->debug_flags
|= DBG(NO_NGG_CULLING
);
1161 sscreen
->use_ngg
= sscreen
->info
.chip_class
>= GFX10
&& sscreen
->info
.family
!= CHIP_NAVI14
&&
1162 !(sscreen
->debug_flags
& DBG(NO_NGG
));
1163 sscreen
->use_ngg_culling
= sscreen
->use_ngg
&& !(sscreen
->debug_flags
& DBG(NO_NGG_CULLING
));
1164 sscreen
->always_use_ngg_culling_all
=
1165 sscreen
->use_ngg_culling
&& sscreen
->debug_flags
& DBG(ALWAYS_NGG_CULLING_ALL
);
1166 sscreen
->always_use_ngg_culling_tess
=
1167 sscreen
->use_ngg_culling
&& sscreen
->debug_flags
& DBG(ALWAYS_NGG_CULLING_TESS
);
1168 sscreen
->use_ngg_streamout
= false;
1170 /* Only enable primitive binning on APUs by default. */
1171 if (sscreen
->info
.chip_class
>= GFX10
) {
1172 sscreen
->dpbb_allowed
= true;
1173 /* DFSM is not supported on GFX 10.3 and not beneficial on Navi1x. */
1174 } else if (sscreen
->info
.chip_class
== GFX9
) {
1175 sscreen
->dpbb_allowed
= !sscreen
->info
.has_dedicated_vram
;
1176 sscreen
->dfsm_allowed
= !sscreen
->info
.has_dedicated_vram
;
1179 /* Process DPBB enable flags. */
1180 if (sscreen
->debug_flags
& DBG(DPBB
)) {
1181 sscreen
->dpbb_allowed
= true;
1182 if (sscreen
->debug_flags
& DBG(DFSM
))
1183 sscreen
->dfsm_allowed
= true;
1186 /* Process DPBB disable flags. */
1187 if (sscreen
->debug_flags
& DBG(NO_DPBB
)) {
1188 sscreen
->dpbb_allowed
= false;
1189 sscreen
->dfsm_allowed
= false;
1190 } else if (sscreen
->debug_flags
& DBG(NO_DFSM
)) {
1191 sscreen
->dfsm_allowed
= false;
1194 /* While it would be nice not to have this flag, we are constrained
1195 * by the reality that LLVM 9.0 has buggy VGPR indexing on GFX9.
1197 sscreen
->llvm_has_working_vgpr_indexing
= sscreen
->info
.chip_class
!= GFX9
;
1199 sscreen
->dcc_msaa_allowed
= !(sscreen
->debug_flags
& DBG(NO_DCC_MSAA
));
1201 (void)simple_mtx_init(&sscreen
->shader_parts_mutex
, mtx_plain
);
1202 sscreen
->use_monolithic_shaders
= (sscreen
->debug_flags
& DBG(MONOLITHIC_SHADERS
)) != 0;
1204 sscreen
->barrier_flags
.cp_to_L2
= SI_CONTEXT_INV_SCACHE
| SI_CONTEXT_INV_VCACHE
;
1205 if (sscreen
->info
.chip_class
<= GFX8
) {
1206 sscreen
->barrier_flags
.cp_to_L2
|= SI_CONTEXT_INV_L2
;
1207 sscreen
->barrier_flags
.L2_to_cp
|= SI_CONTEXT_WB_L2
;
1210 if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
1211 sscreen
->debug_flags
|= DBG_ALL_SHADERS
;
1218 * That means 8 coverage samples, 4 Z/S samples, and 2 color samples.
1220 * s >= z >= c (ignoring this only wastes memory)
1225 * Only MSAA color and depth buffers are overriden.
1227 if (sscreen
->info
.has_eqaa_surface_allocator
) {
1228 const char *eqaa
= debug_get_option("EQAA", NULL
);
1231 if (eqaa
&& sscanf(eqaa
, "%u,%u,%u", &s
, &z
, &f
) == 3 && s
&& z
&& f
) {
1232 sscreen
->eqaa_force_coverage_samples
= s
;
1233 sscreen
->eqaa_force_z_samples
= z
;
1234 sscreen
->eqaa_force_color_samples
= f
;
1238 sscreen
->ge_wave_size
= 64;
1239 sscreen
->ps_wave_size
= 64;
1240 sscreen
->compute_wave_size
= 64;
1242 if (sscreen
->info
.chip_class
>= GFX10
) {
1243 /* Pixel shaders: Wave64 is always fastest.
1244 * Vertex shaders: Wave64 is probably better, because:
1245 * - greater chance of L0 cache hits, because more threads are assigned
1247 * - scalar instructions are only executed once for 64 threads instead of twice
1248 * - VGPR allocation granularity is half of Wave32, so 1 Wave64 can
1249 * sometimes use fewer VGPRs than 2 Wave32
1250 * - TessMark X64 with NGG culling is faster with Wave64
1252 if (sscreen
->debug_flags
& DBG(W32_GE
))
1253 sscreen
->ge_wave_size
= 32;
1254 if (sscreen
->debug_flags
& DBG(W32_PS
))
1255 sscreen
->ps_wave_size
= 32;
1256 if (sscreen
->debug_flags
& DBG(W32_CS
))
1257 sscreen
->compute_wave_size
= 32;
1259 if (sscreen
->debug_flags
& DBG(W64_GE
))
1260 sscreen
->ge_wave_size
= 64;
1261 if (sscreen
->debug_flags
& DBG(W64_PS
))
1262 sscreen
->ps_wave_size
= 64;
1263 if (sscreen
->debug_flags
& DBG(W64_CS
))
1264 sscreen
->compute_wave_size
= 64;
1267 /* Create the auxiliary context. This must be done last. */
1268 sscreen
->aux_context
= si_create_context(
1269 &sscreen
->b
, (sscreen
->options
.aux_debug
? PIPE_CONTEXT_DEBUG
: 0) |
1270 (sscreen
->info
.has_graphics
? 0 : PIPE_CONTEXT_COMPUTE_ONLY
));
1271 if (sscreen
->options
.aux_debug
) {
1272 struct u_log_context
*log
= CALLOC_STRUCT(u_log_context
);
1273 u_log_context_init(log
);
1274 sscreen
->aux_context
->set_log_context(sscreen
->aux_context
, log
);
1277 if (test_flags
& DBG(TEST_DMA
))
1278 si_test_dma(sscreen
);
1280 if (test_flags
& DBG(TEST_DMA_PERF
)) {
1281 si_test_dma_perf(sscreen
);
1284 if (test_flags
& (DBG(TEST_VMFAULT_CP
) | DBG(TEST_VMFAULT_SDMA
) | DBG(TEST_VMFAULT_SHADER
)))
1285 si_test_vmfault(sscreen
, test_flags
);
1287 if (test_flags
& DBG(TEST_GDS
))
1288 si_test_gds((struct si_context
*)sscreen
->aux_context
);
1290 if (test_flags
& DBG(TEST_GDS_MM
)) {
1291 si_test_gds_memory_management((struct si_context
*)sscreen
->aux_context
, 32 * 1024, 4,
1294 if (test_flags
& DBG(TEST_GDS_OA_MM
)) {
1295 si_test_gds_memory_management((struct si_context
*)sscreen
->aux_context
, 4, 1,
1299 ac_print_shadowed_regs(&sscreen
->info
);
1301 STATIC_ASSERT(sizeof(union si_vgt_stages_key
) == 4);
1305 struct pipe_screen
*radeonsi_screen_create(int fd
, const struct pipe_screen_config
*config
)
1307 drmVersionPtr version
= drmGetVersion(fd
);
1308 struct radeon_winsys
*rw
= NULL
;
1310 switch (version
->version_major
) {
1312 rw
= radeon_drm_winsys_create(fd
, config
, radeonsi_screen_create_impl
);
1315 rw
= amdgpu_winsys_create(fd
, config
, radeonsi_screen_create_impl
);
1319 drmFreeVersion(version
);
1320 return rw
? rw
->screen
: NULL
;