2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_public.h"
26 #include "si_shader_internal.h"
29 #include "radeon/radeon_uvd.h"
30 #include "util/hash_table.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "util/u_suballoc.h"
34 #include "util/u_tests.h"
35 #include "util/xmlconfig.h"
36 #include "vl/vl_decoder.h"
37 #include "../ddebug/dd_util.h"
39 static const struct debug_named_value debug_options
[] = {
40 /* Shader logging options: */
41 { "vs", DBG(VS
), "Print vertex shaders" },
42 { "ps", DBG(PS
), "Print pixel shaders" },
43 { "gs", DBG(GS
), "Print geometry shaders" },
44 { "tcs", DBG(TCS
), "Print tessellation control shaders" },
45 { "tes", DBG(TES
), "Print tessellation evaluation shaders" },
46 { "cs", DBG(CS
), "Print compute shaders" },
47 { "noir", DBG(NO_IR
), "Don't print the LLVM IR"},
48 { "notgsi", DBG(NO_TGSI
), "Don't print the TGSI"},
49 { "noasm", DBG(NO_ASM
), "Don't print disassembled shaders"},
50 { "preoptir", DBG(PREOPT_IR
), "Print the LLVM IR before initial optimizations" },
52 /* Shader compiler options the shader cache should be aware of: */
53 { "unsafemath", DBG(UNSAFE_MATH
), "Enable unsafe math shader optimizations" },
54 { "sisched", DBG(SI_SCHED
), "Enable LLVM SI Machine Instruction Scheduler." },
56 /* Shader compiler options (with no effect on the shader cache): */
57 { "checkir", DBG(CHECK_IR
), "Enable additional sanity checks on shader IR" },
58 { "nir", DBG(NIR
), "Enable experimental NIR shaders" },
59 { "mono", DBG(MONOLITHIC_SHADERS
), "Use old-style monolithic shaders compiled on demand" },
60 { "nooptvariant", DBG(NO_OPT_VARIANT
), "Disable compiling optimized shader variants." },
62 /* Information logging options: */
63 { "info", DBG(INFO
), "Print driver information" },
64 { "tex", DBG(TEX
), "Print texture info" },
65 { "compute", DBG(COMPUTE
), "Print compute info" },
66 { "vm", DBG(VM
), "Print virtual addresses when creating resources" },
69 { "forcedma", DBG(FORCE_DMA
), "Use asynchronous DMA for all operations when possible." },
70 { "nodma", DBG(NO_ASYNC_DMA
), "Disable asynchronous DMA" },
71 { "nowc", DBG(NO_WC
), "Disable GTT write combining" },
72 { "check_vm", DBG(CHECK_VM
), "Check VM faults and dump debug info." },
73 { "reserve_vmid", DBG(RESERVE_VMID
), "Force VMID reservation per context." },
75 /* 3D engine options: */
76 { "switch_on_eop", DBG(SWITCH_ON_EOP
), "Program WD/IA to switch on end-of-packet." },
77 { "nooutoforder", DBG(NO_OUT_OF_ORDER
), "Disable out-of-order rasterization" },
78 { "nodpbb", DBG(NO_DPBB
), "Disable DPBB." },
79 { "nodfsm", DBG(NO_DFSM
), "Disable DFSM." },
80 { "dpbb", DBG(DPBB
), "Enable DPBB." },
81 { "dfsm", DBG(DFSM
), "Enable DFSM." },
82 { "nohyperz", DBG(NO_HYPERZ
), "Disable Hyper-Z" },
83 { "norbplus", DBG(NO_RB_PLUS
), "Disable RB+." },
84 { "no2d", DBG(NO_2D_TILING
), "Disable 2D tiling" },
85 { "notiling", DBG(NO_TILING
), "Disable tiling" },
86 { "nodcc", DBG(NO_DCC
), "Disable DCC." },
87 { "nodccclear", DBG(NO_DCC_CLEAR
), "Disable DCC fast clear." },
88 { "nodccfb", DBG(NO_DCC_FB
), "Disable separate DCC on the main framebuffer" },
89 { "nodccmsaa", DBG(NO_DCC_MSAA
), "Disable DCC for MSAA" },
90 { "dccmsaa", DBG(DCC_MSAA
), "Enable DCC for MSAA" },
91 { "nofmask", DBG(NO_FMASK
), "Disable MSAA compression" },
94 { "testdma", DBG(TEST_DMA
), "Invoke SDMA tests and exit." },
95 { "testvmfaultcp", DBG(TEST_VMFAULT_CP
), "Invoke a CP VM fault test and exit." },
96 { "testvmfaultsdma", DBG(TEST_VMFAULT_SDMA
), "Invoke a SDMA VM fault test and exit." },
97 { "testvmfaultshader", DBG(TEST_VMFAULT_SHADER
), "Invoke a shader VM fault test and exit." },
99 DEBUG_NAMED_VALUE_END
/* must be last */
105 static void si_destroy_context(struct pipe_context
*context
)
107 struct si_context
*sctx
= (struct si_context
*)context
;
110 /* Unreference the framebuffer normally to disable related logic
113 struct pipe_framebuffer_state fb
= {};
114 if (context
->set_framebuffer_state
)
115 context
->set_framebuffer_state(context
, &fb
);
117 si_release_all_descriptors(sctx
);
119 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
120 pipe_resource_reference(&sctx
->gsvs_ring
, NULL
);
121 pipe_resource_reference(&sctx
->tess_rings
, NULL
);
122 pipe_resource_reference(&sctx
->null_const_buf
.buffer
, NULL
);
123 r600_resource_reference(&sctx
->border_color_buffer
, NULL
);
124 free(sctx
->border_color_table
);
125 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
126 r600_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
127 r600_resource_reference(&sctx
->wait_mem_scratch
, NULL
);
129 si_pm4_free_state(sctx
, sctx
->init_config
, ~0);
130 if (sctx
->init_config_gs_rings
)
131 si_pm4_free_state(sctx
, sctx
->init_config_gs_rings
, ~0);
132 for (i
= 0; i
< ARRAY_SIZE(sctx
->vgt_shader_config
); i
++)
133 si_pm4_delete_state(sctx
, vgt_shader_config
, sctx
->vgt_shader_config
[i
]);
135 if (sctx
->fixed_func_tcs_shader
.cso
)
136 sctx
->b
.b
.delete_tcs_state(&sctx
->b
.b
, sctx
->fixed_func_tcs_shader
.cso
);
137 if (sctx
->custom_dsa_flush
)
138 sctx
->b
.b
.delete_depth_stencil_alpha_state(&sctx
->b
.b
, sctx
->custom_dsa_flush
);
139 if (sctx
->custom_blend_resolve
)
140 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_resolve
);
141 if (sctx
->custom_blend_fmask_decompress
)
142 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_fmask_decompress
);
143 if (sctx
->custom_blend_eliminate_fastclear
)
144 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_eliminate_fastclear
);
145 if (sctx
->custom_blend_dcc_decompress
)
146 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_dcc_decompress
);
147 if (sctx
->vs_blit_pos
)
148 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_pos
);
149 if (sctx
->vs_blit_pos_layered
)
150 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_pos_layered
);
151 if (sctx
->vs_blit_color
)
152 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_color
);
153 if (sctx
->vs_blit_color_layered
)
154 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_color_layered
);
155 if (sctx
->vs_blit_texcoord
)
156 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_texcoord
);
159 util_blitter_destroy(sctx
->blitter
);
161 si_common_context_cleanup(&sctx
->b
);
163 LLVMDisposeTargetMachine(sctx
->tm
);
165 si_saved_cs_reference(&sctx
->current_saved_cs
, NULL
);
167 _mesa_hash_table_destroy(sctx
->tex_handles
, NULL
);
168 _mesa_hash_table_destroy(sctx
->img_handles
, NULL
);
170 util_dynarray_fini(&sctx
->resident_tex_handles
);
171 util_dynarray_fini(&sctx
->resident_img_handles
);
172 util_dynarray_fini(&sctx
->resident_tex_needs_color_decompress
);
173 util_dynarray_fini(&sctx
->resident_img_needs_color_decompress
);
174 util_dynarray_fini(&sctx
->resident_tex_needs_depth_decompress
);
178 static enum pipe_reset_status
179 si_amdgpu_get_reset_status(struct pipe_context
*ctx
)
181 struct si_context
*sctx
= (struct si_context
*)ctx
;
183 return sctx
->b
.ws
->ctx_query_reset_status(sctx
->b
.ctx
);
186 /* Apitrace profiling:
187 * 1) qapitrace : Tools -> Profile: Measure CPU & GPU times
188 * 2) In the middle panel, zoom in (mouse wheel) on some bad draw call
189 * and remember its number.
190 * 3) In Mesa, enable queries and performance counters around that draw
191 * call and print the results.
192 * 4) glretrace --benchmark --markers ..
194 static void si_emit_string_marker(struct pipe_context
*ctx
,
195 const char *string
, int len
)
197 struct si_context
*sctx
= (struct si_context
*)ctx
;
199 dd_parse_apitrace_marker(string
, len
, &sctx
->apitrace_call_number
);
202 u_log_printf(sctx
->b
.log
, "\nString marker: %*s\n", len
, string
);
205 static LLVMTargetMachineRef
206 si_create_llvm_target_machine(struct si_screen
*sscreen
)
208 enum ac_target_machine_options tm_options
=
209 (sscreen
->debug_flags
& DBG(SI_SCHED
) ? AC_TM_SISCHED
: 0) |
210 (sscreen
->info
.chip_class
>= GFX9
? AC_TM_FORCE_ENABLE_XNACK
: 0) |
211 (sscreen
->info
.chip_class
< GFX9
? AC_TM_FORCE_DISABLE_XNACK
: 0) |
212 (!sscreen
->llvm_has_working_vgpr_indexing
? AC_TM_PROMOTE_ALLOCA_TO_SCRATCH
: 0);
214 return ac_create_target_machine(sscreen
->info
.family
, tm_options
);
217 static void si_set_debug_callback(struct pipe_context
*ctx
,
218 const struct pipe_debug_callback
*cb
)
220 struct si_context
*sctx
= (struct si_context
*)ctx
;
221 struct si_screen
*screen
= sctx
->screen
;
223 util_queue_finish(&screen
->shader_compiler_queue
);
224 util_queue_finish(&screen
->shader_compiler_queue_low_priority
);
229 memset(&sctx
->debug
, 0, sizeof(sctx
->debug
));
232 static void si_set_log_context(struct pipe_context
*ctx
,
233 struct u_log_context
*log
)
235 struct si_context
*sctx
= (struct si_context
*)ctx
;
239 u_log_add_auto_logger(log
, si_auto_log_cs
, sctx
);
242 static struct pipe_context
*si_create_context(struct pipe_screen
*screen
,
245 struct si_context
*sctx
= CALLOC_STRUCT(si_context
);
246 struct si_screen
* sscreen
= (struct si_screen
*)screen
;
247 struct radeon_winsys
*ws
= sscreen
->ws
;
253 if (flags
& PIPE_CONTEXT_DEBUG
)
254 sscreen
->record_llvm_ir
= true; /* racy but not critical */
256 sctx
->b
.b
.screen
= screen
; /* this must be set first */
257 sctx
->b
.b
.priv
= NULL
;
258 sctx
->b
.b
.destroy
= si_destroy_context
;
259 sctx
->b
.b
.emit_string_marker
= si_emit_string_marker
;
260 sctx
->b
.b
.set_debug_callback
= si_set_debug_callback
;
261 sctx
->b
.b
.set_log_context
= si_set_log_context
;
262 sctx
->screen
= sscreen
; /* Easy accessing of screen/winsys. */
263 sctx
->is_debug
= (flags
& PIPE_CONTEXT_DEBUG
) != 0;
265 if (!si_common_context_init(&sctx
->b
, sscreen
, flags
))
268 if (sscreen
->info
.drm_major
== 3)
269 sctx
->b
.b
.get_device_reset_status
= si_amdgpu_get_reset_status
;
271 si_init_buffer_functions(sctx
);
272 si_init_clear_functions(sctx
);
273 si_init_blit_functions(sctx
);
274 si_init_compute_functions(sctx
);
275 si_init_cp_dma_functions(sctx
);
276 si_init_debug_functions(sctx
);
277 si_init_msaa_functions(sctx
);
278 si_init_streamout_functions(sctx
);
280 if (sscreen
->info
.has_hw_decode
) {
281 sctx
->b
.b
.create_video_codec
= si_uvd_create_decoder
;
282 sctx
->b
.b
.create_video_buffer
= si_video_buffer_create
;
284 sctx
->b
.b
.create_video_codec
= vl_create_decoder
;
285 sctx
->b
.b
.create_video_buffer
= vl_video_buffer_create
;
288 sctx
->b
.gfx
.cs
= ws
->cs_create(sctx
->b
.ctx
, RING_GFX
,
289 si_flush_gfx_cs
, sctx
);
290 sctx
->b
.gfx
.flush
= si_flush_gfx_cs
;
293 sctx
->border_color_table
= malloc(SI_MAX_BORDER_COLORS
*
294 sizeof(*sctx
->border_color_table
));
295 if (!sctx
->border_color_table
)
298 sctx
->border_color_buffer
= (struct r600_resource
*)
299 pipe_buffer_create(screen
, 0, PIPE_USAGE_DEFAULT
,
300 SI_MAX_BORDER_COLORS
*
301 sizeof(*sctx
->border_color_table
));
302 if (!sctx
->border_color_buffer
)
305 sctx
->border_color_map
=
306 ws
->buffer_map(sctx
->border_color_buffer
->buf
,
307 NULL
, PIPE_TRANSFER_WRITE
);
308 if (!sctx
->border_color_map
)
311 si_init_all_descriptors(sctx
);
312 si_init_fence_functions(sctx
);
313 si_init_state_functions(sctx
);
314 si_init_shader_functions(sctx
);
315 si_init_viewport_functions(sctx
);
316 si_init_ia_multi_vgt_param_table(sctx
);
318 if (sctx
->b
.chip_class
>= CIK
)
319 cik_init_sdma_functions(sctx
);
321 si_init_dma_functions(sctx
);
323 if (sscreen
->debug_flags
& DBG(FORCE_DMA
))
324 sctx
->b
.b
.resource_copy_region
= sctx
->b
.dma_copy
;
326 sctx
->blitter
= util_blitter_create(&sctx
->b
.b
);
327 if (sctx
->blitter
== NULL
)
329 sctx
->blitter
->draw_rectangle
= si_draw_rectangle
;
330 sctx
->blitter
->skip_viewport_restore
= true;
332 sctx
->sample_mask
.sample_mask
= 0xffff;
334 /* these must be last */
335 si_begin_new_gfx_cs(sctx
);
337 if (sctx
->b
.chip_class
>= GFX9
) {
338 sctx
->wait_mem_scratch
= (struct r600_resource
*)
339 pipe_buffer_create(screen
, 0, PIPE_USAGE_DEFAULT
, 4);
340 if (!sctx
->wait_mem_scratch
)
343 /* Initialize the memory. */
344 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
345 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
346 radeon_emit(cs
, S_370_DST_SEL(V_370_MEMORY_SYNC
) |
347 S_370_WR_CONFIRM(1) |
348 S_370_ENGINE_SEL(V_370_ME
));
349 radeon_emit(cs
, sctx
->wait_mem_scratch
->gpu_address
);
350 radeon_emit(cs
, sctx
->wait_mem_scratch
->gpu_address
>> 32);
351 radeon_emit(cs
, sctx
->wait_mem_number
);
354 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
355 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
356 if (sctx
->b
.chip_class
== CIK
) {
357 sctx
->null_const_buf
.buffer
=
358 si_aligned_buffer_create(screen
,
359 R600_RESOURCE_FLAG_32BIT
,
360 PIPE_USAGE_DEFAULT
, 16,
361 sctx
->screen
->info
.tcc_cache_line_size
);
362 if (!sctx
->null_const_buf
.buffer
)
364 sctx
->null_const_buf
.buffer_size
= sctx
->null_const_buf
.buffer
->width0
;
366 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
367 for (i
= 0; i
< SI_NUM_CONST_BUFFERS
; i
++) {
368 sctx
->b
.b
.set_constant_buffer(&sctx
->b
.b
, shader
, i
,
369 &sctx
->null_const_buf
);
373 si_set_rw_buffer(sctx
, SI_HS_CONST_DEFAULT_TESS_LEVELS
,
374 &sctx
->null_const_buf
);
375 si_set_rw_buffer(sctx
, SI_VS_CONST_INSTANCE_DIVISORS
,
376 &sctx
->null_const_buf
);
377 si_set_rw_buffer(sctx
, SI_VS_CONST_CLIP_PLANES
,
378 &sctx
->null_const_buf
);
379 si_set_rw_buffer(sctx
, SI_PS_CONST_POLY_STIPPLE
,
380 &sctx
->null_const_buf
);
381 si_set_rw_buffer(sctx
, SI_PS_CONST_SAMPLE_POSITIONS
,
382 &sctx
->null_const_buf
);
384 /* Clear the NULL constant buffer, because loads should return zeros. */
385 si_clear_buffer(&sctx
->b
.b
, sctx
->null_const_buf
.buffer
, 0,
386 sctx
->null_const_buf
.buffer
->width0
, 0,
387 R600_COHERENCY_SHADER
);
390 uint64_t max_threads_per_block
;
391 screen
->get_compute_param(screen
, PIPE_SHADER_IR_TGSI
,
392 PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
,
393 &max_threads_per_block
);
395 /* The maximum number of scratch waves. Scratch space isn't divided
396 * evenly between CUs. The number is only a function of the number of CUs.
397 * We can decrease the constant to decrease the scratch buffer size.
399 * sctx->scratch_waves must be >= the maximum posible size of
400 * 1 threadgroup, so that the hw doesn't hang from being unable
403 * The recommended value is 4 per CU at most. Higher numbers don't
404 * bring much benefit, but they still occupy chip resources (think
405 * async compute). I've seen ~2% performance difference between 4 and 32.
407 sctx
->scratch_waves
= MAX2(32 * sscreen
->info
.num_good_compute_units
,
408 max_threads_per_block
/ 64);
410 sctx
->tm
= si_create_llvm_target_machine(sscreen
);
412 /* Bindless handles. */
413 sctx
->tex_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
414 _mesa_key_pointer_equal
);
415 sctx
->img_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
416 _mesa_key_pointer_equal
);
418 util_dynarray_init(&sctx
->resident_tex_handles
, NULL
);
419 util_dynarray_init(&sctx
->resident_img_handles
, NULL
);
420 util_dynarray_init(&sctx
->resident_tex_needs_color_decompress
, NULL
);
421 util_dynarray_init(&sctx
->resident_img_needs_color_decompress
, NULL
);
422 util_dynarray_init(&sctx
->resident_tex_needs_depth_decompress
, NULL
);
426 fprintf(stderr
, "radeonsi: Failed to create a context.\n");
427 si_destroy_context(&sctx
->b
.b
);
431 static struct pipe_context
*si_pipe_create_context(struct pipe_screen
*screen
,
432 void *priv
, unsigned flags
)
434 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
435 struct pipe_context
*ctx
;
437 if (sscreen
->debug_flags
& DBG(CHECK_VM
))
438 flags
|= PIPE_CONTEXT_DEBUG
;
440 ctx
= si_create_context(screen
, flags
);
442 if (!(flags
& PIPE_CONTEXT_PREFER_THREADED
))
445 /* Clover (compute-only) is unsupported. */
446 if (flags
& PIPE_CONTEXT_COMPUTE_ONLY
)
449 /* When shaders are logged to stderr, asynchronous compilation is
451 if (sscreen
->debug_flags
& DBG_ALL_SHADERS
)
454 /* Use asynchronous flushes only on amdgpu, since the radeon
455 * implementation for fence_server_sync is incomplete. */
456 return threaded_context_create(ctx
, &sscreen
->pool_transfers
,
457 si_replace_buffer_storage
,
458 sscreen
->info
.drm_major
>= 3 ? si_create_fence
: NULL
,
459 &((struct si_context
*)ctx
)->b
.tc
);
465 static void si_destroy_screen(struct pipe_screen
* pscreen
)
467 struct si_screen
*sscreen
= (struct si_screen
*)pscreen
;
468 struct si_shader_part
*parts
[] = {
470 sscreen
->tcs_epilogs
,
477 if (!sscreen
->ws
->unref(sscreen
->ws
))
480 util_queue_destroy(&sscreen
->shader_compiler_queue
);
481 util_queue_destroy(&sscreen
->shader_compiler_queue_low_priority
);
483 for (i
= 0; i
< ARRAY_SIZE(sscreen
->tm
); i
++)
485 LLVMDisposeTargetMachine(sscreen
->tm
[i
]);
487 for (i
= 0; i
< ARRAY_SIZE(sscreen
->tm_low_priority
); i
++)
488 if (sscreen
->tm_low_priority
[i
])
489 LLVMDisposeTargetMachine(sscreen
->tm_low_priority
[i
]);
491 /* Free shader parts. */
492 for (i
= 0; i
< ARRAY_SIZE(parts
); i
++) {
494 struct si_shader_part
*part
= parts
[i
];
496 parts
[i
] = part
->next
;
497 ac_shader_binary_clean(&part
->binary
);
501 mtx_destroy(&sscreen
->shader_parts_mutex
);
502 si_destroy_shader_cache(sscreen
);
504 si_perfcounters_destroy(sscreen
);
505 si_gpu_load_kill_thread(sscreen
);
507 mtx_destroy(&sscreen
->gpu_load_mutex
);
508 mtx_destroy(&sscreen
->aux_context_lock
);
509 sscreen
->aux_context
->destroy(sscreen
->aux_context
);
511 slab_destroy_parent(&sscreen
->pool_transfers
);
513 disk_cache_destroy(sscreen
->disk_shader_cache
);
514 sscreen
->ws
->destroy(sscreen
->ws
);
518 static bool si_init_gs_info(struct si_screen
*sscreen
)
520 /* gs_table_depth is not used by GFX9 */
521 if (sscreen
->info
.chip_class
>= GFX9
)
524 switch (sscreen
->info
.family
) {
533 sscreen
->gs_table_depth
= 16;
545 sscreen
->gs_table_depth
= 32;
552 static void si_handle_env_var_force_family(struct si_screen
*sscreen
)
554 const char *family
= debug_get_option("SI_FORCE_FAMILY", NULL
);
560 for (i
= CHIP_TAHITI
; i
< CHIP_LAST
; i
++) {
561 if (!strcmp(family
, ac_get_llvm_processor_name(i
))) {
562 /* Override family and chip_class. */
563 sscreen
->info
.family
= i
;
565 if (i
>= CHIP_VEGA10
)
566 sscreen
->info
.chip_class
= GFX9
;
567 else if (i
>= CHIP_TONGA
)
568 sscreen
->info
.chip_class
= VI
;
569 else if (i
>= CHIP_BONAIRE
)
570 sscreen
->info
.chip_class
= CIK
;
572 sscreen
->info
.chip_class
= SI
;
574 /* Don't submit any IBs. */
575 setenv("RADEON_NOOP", "1", 1);
580 fprintf(stderr
, "radeonsi: Unknown family: %s\n", family
);
584 static void si_test_vmfault(struct si_screen
*sscreen
)
586 struct pipe_context
*ctx
= sscreen
->aux_context
;
587 struct si_context
*sctx
= (struct si_context
*)ctx
;
588 struct pipe_resource
*buf
=
589 pipe_buffer_create_const0(&sscreen
->b
, 0, PIPE_USAGE_DEFAULT
, 64);
592 puts("Buffer allocation failed.");
596 r600_resource(buf
)->gpu_address
= 0; /* cause a VM fault */
598 if (sscreen
->debug_flags
& DBG(TEST_VMFAULT_CP
)) {
599 si_copy_buffer(sctx
, buf
, buf
, 0, 4, 4, 0);
600 ctx
->flush(ctx
, NULL
, 0);
601 puts("VM fault test: CP - done.");
603 if (sscreen
->debug_flags
& DBG(TEST_VMFAULT_SDMA
)) {
604 sctx
->b
.dma_clear_buffer(ctx
, buf
, 0, 4, 0);
605 ctx
->flush(ctx
, NULL
, 0);
606 puts("VM fault test: SDMA - done.");
608 if (sscreen
->debug_flags
& DBG(TEST_VMFAULT_SHADER
)) {
609 util_test_constant_buffer(ctx
, buf
);
610 puts("VM fault test: Shader - done.");
615 static void si_disk_cache_create(struct si_screen
*sscreen
)
617 /* Don't use the cache if shader dumping is enabled. */
618 if (sscreen
->debug_flags
& DBG_ALL_SHADERS
)
621 uint32_t mesa_timestamp
;
622 if (disk_cache_get_function_timestamp(si_disk_cache_create
,
626 uint32_t llvm_timestamp
;
628 if (disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo
,
630 res
= asprintf(×tamp_str
, "%u_%u",
631 mesa_timestamp
, llvm_timestamp
);
635 /* These flags affect shader compilation. */
636 #define ALL_FLAGS (DBG(FS_CORRECT_DERIVS_AFTER_KILL) | \
640 uint64_t shader_debug_flags
= sscreen
->debug_flags
&
643 /* Add the high bits of 32-bit addresses, which affects
644 * how 32-bit addresses are expanded to 64 bits.
646 STATIC_ASSERT(ALL_FLAGS
<= UINT_MAX
);
647 shader_debug_flags
|= (uint64_t)sscreen
->info
.address32_hi
<< 32;
649 sscreen
->disk_shader_cache
=
650 disk_cache_create(si_get_family_name(sscreen
),
658 struct pipe_screen
*radeonsi_screen_create(struct radeon_winsys
*ws
,
659 const struct pipe_screen_config
*config
)
661 struct si_screen
*sscreen
= CALLOC_STRUCT(si_screen
);
662 unsigned num_threads
, num_compiler_threads
, num_compiler_threads_lowprio
, i
;
669 ws
->query_info(ws
, &sscreen
->info
);
671 sscreen
->debug_flags
= debug_get_flags_option("R600_DEBUG",
674 /* Set functions first. */
675 sscreen
->b
.context_create
= si_pipe_create_context
;
676 sscreen
->b
.destroy
= si_destroy_screen
;
678 si_init_screen_get_functions(sscreen
);
679 si_init_screen_buffer_functions(sscreen
);
680 si_init_screen_fence_functions(sscreen
);
681 si_init_screen_state_functions(sscreen
);
682 si_init_screen_texture_functions(sscreen
);
683 si_init_screen_query_functions(sscreen
);
685 /* Set these flags in debug_flags early, so that the shader cache takes
688 if (driQueryOptionb(config
->options
,
689 "glsl_correct_derivatives_after_discard"))
690 sscreen
->debug_flags
|= DBG(FS_CORRECT_DERIVS_AFTER_KILL
);
691 if (driQueryOptionb(config
->options
, "radeonsi_enable_sisched"))
692 sscreen
->debug_flags
|= DBG(SI_SCHED
);
695 if (sscreen
->debug_flags
& DBG(INFO
))
696 ac_print_gpu_info(&sscreen
->info
);
698 slab_create_parent(&sscreen
->pool_transfers
,
699 sizeof(struct r600_transfer
), 64);
701 sscreen
->force_aniso
= MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
702 if (sscreen
->force_aniso
>= 0) {
703 printf("radeonsi: Forcing anisotropy filter to %ix\n",
704 /* round down to a power of two */
705 1 << util_logbase2(sscreen
->force_aniso
));
708 (void) mtx_init(&sscreen
->aux_context_lock
, mtx_plain
);
709 (void) mtx_init(&sscreen
->gpu_load_mutex
, mtx_plain
);
711 if (!si_init_gs_info(sscreen
) ||
712 !si_init_shader_cache(sscreen
)) {
717 si_disk_cache_create(sscreen
);
719 /* Only enable as many threads as we have target machines, but at most
720 * the number of CPUs - 1 if there is more than one.
722 num_threads
= sysconf(_SC_NPROCESSORS_ONLN
);
723 num_threads
= MAX2(1, num_threads
- 1);
724 num_compiler_threads
= MIN2(num_threads
, ARRAY_SIZE(sscreen
->tm
));
725 num_compiler_threads_lowprio
=
726 MIN2(num_threads
, ARRAY_SIZE(sscreen
->tm_low_priority
));
728 if (!util_queue_init(&sscreen
->shader_compiler_queue
, "si_shader",
729 32, num_compiler_threads
,
730 UTIL_QUEUE_INIT_RESIZE_IF_FULL
)) {
731 si_destroy_shader_cache(sscreen
);
736 if (!util_queue_init(&sscreen
->shader_compiler_queue_low_priority
,
738 32, num_compiler_threads_lowprio
,
739 UTIL_QUEUE_INIT_RESIZE_IF_FULL
|
740 UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
)) {
741 si_destroy_shader_cache(sscreen
);
746 si_handle_env_var_force_family(sscreen
);
748 if (!debug_get_bool_option("RADEON_DISABLE_PERFCOUNTERS", false))
749 si_init_perfcounters(sscreen
);
751 /* Determine tessellation ring info. */
752 bool double_offchip_buffers
= sscreen
->info
.chip_class
>= CIK
&&
753 sscreen
->info
.family
!= CHIP_CARRIZO
&&
754 sscreen
->info
.family
!= CHIP_STONEY
;
755 /* This must be one less than the maximum number due to a hw limitation.
756 * Various hardware bugs in SI, CIK, and GFX9 need this.
758 unsigned max_offchip_buffers_per_se
;
760 /* Only certain chips can use the maximum value. */
761 if (sscreen
->info
.family
== CHIP_VEGA12
)
762 max_offchip_buffers_per_se
= double_offchip_buffers
? 128 : 64;
764 max_offchip_buffers_per_se
= double_offchip_buffers
? 127 : 63;
766 unsigned max_offchip_buffers
= max_offchip_buffers_per_se
*
767 sscreen
->info
.max_se
;
768 unsigned offchip_granularity
;
770 /* Hawaii has a bug with offchip buffers > 256 that can be worked
771 * around by setting 4K granularity.
773 if (sscreen
->info
.family
== CHIP_HAWAII
) {
774 sscreen
->tess_offchip_block_dw_size
= 4096;
775 offchip_granularity
= V_03093C_X_4K_DWORDS
;
777 sscreen
->tess_offchip_block_dw_size
= 8192;
778 offchip_granularity
= V_03093C_X_8K_DWORDS
;
781 sscreen
->tess_factor_ring_size
= 32768 * sscreen
->info
.max_se
;
782 assert(((sscreen
->tess_factor_ring_size
/ 4) & C_030938_SIZE
) == 0);
783 sscreen
->tess_offchip_ring_size
= max_offchip_buffers
*
784 sscreen
->tess_offchip_block_dw_size
* 4;
786 if (sscreen
->info
.chip_class
>= CIK
) {
787 if (sscreen
->info
.chip_class
>= VI
)
788 --max_offchip_buffers
;
789 sscreen
->vgt_hs_offchip_param
=
790 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers
) |
791 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity
);
793 assert(offchip_granularity
== V_03093C_X_8K_DWORDS
);
794 sscreen
->vgt_hs_offchip_param
=
795 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers
);
798 /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
800 sscreen
->has_clear_state
= sscreen
->info
.chip_class
>= CIK
;
802 sscreen
->has_distributed_tess
=
803 sscreen
->info
.chip_class
>= VI
&&
804 sscreen
->info
.max_se
>= 2;
806 sscreen
->has_draw_indirect_multi
=
807 (sscreen
->info
.family
>= CHIP_POLARIS10
) ||
808 (sscreen
->info
.chip_class
== VI
&&
809 sscreen
->info
.pfp_fw_version
>= 121 &&
810 sscreen
->info
.me_fw_version
>= 87) ||
811 (sscreen
->info
.chip_class
== CIK
&&
812 sscreen
->info
.pfp_fw_version
>= 211 &&
813 sscreen
->info
.me_fw_version
>= 173) ||
814 (sscreen
->info
.chip_class
== SI
&&
815 sscreen
->info
.pfp_fw_version
>= 79 &&
816 sscreen
->info
.me_fw_version
>= 142);
818 sscreen
->has_out_of_order_rast
= sscreen
->info
.chip_class
>= VI
&&
819 sscreen
->info
.max_se
>= 2 &&
820 !(sscreen
->debug_flags
& DBG(NO_OUT_OF_ORDER
));
821 sscreen
->assume_no_z_fights
=
822 driQueryOptionb(config
->options
, "radeonsi_assume_no_z_fights");
823 sscreen
->commutative_blend_add
=
824 driQueryOptionb(config
->options
, "radeonsi_commutative_blend_add");
825 sscreen
->clear_db_cache_before_clear
=
826 driQueryOptionb(config
->options
, "radeonsi_clear_db_cache_before_clear");
827 sscreen
->has_msaa_sample_loc_bug
= (sscreen
->info
.family
>= CHIP_POLARIS10
&&
828 sscreen
->info
.family
<= CHIP_POLARIS12
) ||
829 sscreen
->info
.family
== CHIP_VEGA10
||
830 sscreen
->info
.family
== CHIP_RAVEN
;
831 sscreen
->has_ls_vgpr_init_bug
= sscreen
->info
.family
== CHIP_VEGA10
||
832 sscreen
->info
.family
== CHIP_RAVEN
;
834 if (sscreen
->debug_flags
& DBG(DPBB
)) {
835 sscreen
->dpbb_allowed
= true;
837 /* Only enable primitive binning on Raven by default. */
838 /* TODO: Investigate if binning is profitable on Vega12. */
839 sscreen
->dpbb_allowed
= sscreen
->info
.family
== CHIP_RAVEN
&&
840 !(sscreen
->debug_flags
& DBG(NO_DPBB
));
843 if (sscreen
->debug_flags
& DBG(DFSM
)) {
844 sscreen
->dfsm_allowed
= sscreen
->dpbb_allowed
;
846 sscreen
->dfsm_allowed
= sscreen
->dpbb_allowed
&&
847 !(sscreen
->debug_flags
& DBG(NO_DFSM
));
850 /* While it would be nice not to have this flag, we are constrained
851 * by the reality that LLVM 5.0 doesn't have working VGPR indexing
854 sscreen
->llvm_has_working_vgpr_indexing
= sscreen
->info
.chip_class
<= VI
;
856 /* Some chips have RB+ registers, but don't support RB+. Those must
859 if (sscreen
->info
.family
== CHIP_STONEY
||
860 sscreen
->info
.chip_class
>= GFX9
) {
861 sscreen
->has_rbplus
= true;
863 sscreen
->rbplus_allowed
=
864 !(sscreen
->debug_flags
& DBG(NO_RB_PLUS
)) &&
865 (sscreen
->info
.family
== CHIP_STONEY
||
866 sscreen
->info
.family
== CHIP_VEGA12
||
867 sscreen
->info
.family
== CHIP_RAVEN
);
870 sscreen
->dcc_msaa_allowed
=
871 !(sscreen
->debug_flags
& DBG(NO_DCC_MSAA
)) &&
872 (sscreen
->debug_flags
& DBG(DCC_MSAA
) ||
873 sscreen
->info
.chip_class
== VI
);
875 sscreen
->cpdma_prefetch_writes_memory
= sscreen
->info
.chip_class
<= VI
;
877 (void) mtx_init(&sscreen
->shader_parts_mutex
, mtx_plain
);
878 sscreen
->use_monolithic_shaders
=
879 (sscreen
->debug_flags
& DBG(MONOLITHIC_SHADERS
)) != 0;
881 sscreen
->barrier_flags
.cp_to_L2
= SI_CONTEXT_INV_SMEM_L1
|
882 SI_CONTEXT_INV_VMEM_L1
;
883 if (sscreen
->info
.chip_class
<= VI
) {
884 sscreen
->barrier_flags
.cp_to_L2
|= SI_CONTEXT_INV_GLOBAL_L2
;
885 sscreen
->barrier_flags
.L2_to_cp
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
888 if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
889 sscreen
->debug_flags
|= DBG_ALL_SHADERS
;
891 for (i
= 0; i
< num_compiler_threads
; i
++)
892 sscreen
->tm
[i
] = si_create_llvm_target_machine(sscreen
);
893 for (i
= 0; i
< num_compiler_threads_lowprio
; i
++)
894 sscreen
->tm_low_priority
[i
] = si_create_llvm_target_machine(sscreen
);
896 /* Create the auxiliary context. This must be done last. */
897 sscreen
->aux_context
= si_create_context(&sscreen
->b
, 0);
899 if (sscreen
->debug_flags
& DBG(TEST_DMA
))
900 si_test_dma(sscreen
);
902 if (sscreen
->debug_flags
& (DBG(TEST_VMFAULT_CP
) |
903 DBG(TEST_VMFAULT_SDMA
) |
904 DBG(TEST_VMFAULT_SHADER
)))
905 si_test_vmfault(sscreen
);