2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "si_public.h"
26 #include "si_shader_internal.h"
29 #include "radeon/radeon_uvd.h"
30 #include "util/hash_table.h"
31 #include "util/u_log.h"
32 #include "util/u_memory.h"
33 #include "util/u_suballoc.h"
34 #include "util/u_tests.h"
35 #include "util/xmlconfig.h"
36 #include "vl/vl_decoder.h"
37 #include "../ddebug/dd_util.h"
39 static const struct debug_named_value debug_options
[] = {
40 /* Shader logging options: */
41 { "vs", DBG(VS
), "Print vertex shaders" },
42 { "ps", DBG(PS
), "Print pixel shaders" },
43 { "gs", DBG(GS
), "Print geometry shaders" },
44 { "tcs", DBG(TCS
), "Print tessellation control shaders" },
45 { "tes", DBG(TES
), "Print tessellation evaluation shaders" },
46 { "cs", DBG(CS
), "Print compute shaders" },
47 { "noir", DBG(NO_IR
), "Don't print the LLVM IR"},
48 { "notgsi", DBG(NO_TGSI
), "Don't print the TGSI"},
49 { "noasm", DBG(NO_ASM
), "Don't print disassembled shaders"},
50 { "preoptir", DBG(PREOPT_IR
), "Print the LLVM IR before initial optimizations" },
52 /* Shader compiler options the shader cache should be aware of: */
53 { "unsafemath", DBG(UNSAFE_MATH
), "Enable unsafe math shader optimizations" },
54 { "sisched", DBG(SI_SCHED
), "Enable LLVM SI Machine Instruction Scheduler." },
56 /* Shader compiler options (with no effect on the shader cache): */
57 { "checkir", DBG(CHECK_IR
), "Enable additional sanity checks on shader IR" },
58 { "precompile", DBG(PRECOMPILE
), "Compile one shader variant at shader creation." },
59 { "nir", DBG(NIR
), "Enable experimental NIR shaders" },
60 { "mono", DBG(MONOLITHIC_SHADERS
), "Use old-style monolithic shaders compiled on demand" },
61 { "nooptvariant", DBG(NO_OPT_VARIANT
), "Disable compiling optimized shader variants." },
63 /* Information logging options: */
64 { "info", DBG(INFO
), "Print driver information" },
65 { "tex", DBG(TEX
), "Print texture info" },
66 { "compute", DBG(COMPUTE
), "Print compute info" },
67 { "vm", DBG(VM
), "Print virtual addresses when creating resources" },
70 { "forcedma", DBG(FORCE_DMA
), "Use asynchronous DMA for all operations when possible." },
71 { "nodma", DBG(NO_ASYNC_DMA
), "Disable asynchronous DMA" },
72 { "nowc", DBG(NO_WC
), "Disable GTT write combining" },
73 { "check_vm", DBG(CHECK_VM
), "Check VM faults and dump debug info." },
74 { "reserve_vmid", DBG(RESERVE_VMID
), "Force VMID reservation per context." },
76 /* 3D engine options: */
77 { "switch_on_eop", DBG(SWITCH_ON_EOP
), "Program WD/IA to switch on end-of-packet." },
78 { "nooutoforder", DBG(NO_OUT_OF_ORDER
), "Disable out-of-order rasterization" },
79 { "nodpbb", DBG(NO_DPBB
), "Disable DPBB." },
80 { "nodfsm", DBG(NO_DFSM
), "Disable DFSM." },
81 { "dpbb", DBG(DPBB
), "Enable DPBB." },
82 { "dfsm", DBG(DFSM
), "Enable DFSM." },
83 { "nohyperz", DBG(NO_HYPERZ
), "Disable Hyper-Z" },
84 { "norbplus", DBG(NO_RB_PLUS
), "Disable RB+." },
85 { "no2d", DBG(NO_2D_TILING
), "Disable 2D tiling" },
86 { "notiling", DBG(NO_TILING
), "Disable tiling" },
87 { "nodcc", DBG(NO_DCC
), "Disable DCC." },
88 { "nodccclear", DBG(NO_DCC_CLEAR
), "Disable DCC fast clear." },
89 { "nodccfb", DBG(NO_DCC_FB
), "Disable separate DCC on the main framebuffer" },
90 { "nodccmsaa", DBG(NO_DCC_MSAA
), "Disable DCC for MSAA" },
91 { "dccmsaa", DBG(DCC_MSAA
), "Enable DCC for MSAA" },
94 { "testdma", DBG(TEST_DMA
), "Invoke SDMA tests and exit." },
95 { "testvmfaultcp", DBG(TEST_VMFAULT_CP
), "Invoke a CP VM fault test and exit." },
96 { "testvmfaultsdma", DBG(TEST_VMFAULT_SDMA
), "Invoke a SDMA VM fault test and exit." },
97 { "testvmfaultshader", DBG(TEST_VMFAULT_SHADER
), "Invoke a shader VM fault test and exit." },
99 DEBUG_NAMED_VALUE_END
/* must be last */
105 static void si_destroy_context(struct pipe_context
*context
)
107 struct si_context
*sctx
= (struct si_context
*)context
;
110 /* Unreference the framebuffer normally to disable related logic
113 struct pipe_framebuffer_state fb
= {};
114 if (context
->set_framebuffer_state
)
115 context
->set_framebuffer_state(context
, &fb
);
117 si_release_all_descriptors(sctx
);
119 pipe_resource_reference(&sctx
->esgs_ring
, NULL
);
120 pipe_resource_reference(&sctx
->gsvs_ring
, NULL
);
121 pipe_resource_reference(&sctx
->tf_ring
, NULL
);
122 pipe_resource_reference(&sctx
->tess_offchip_ring
, NULL
);
123 pipe_resource_reference(&sctx
->null_const_buf
.buffer
, NULL
);
124 r600_resource_reference(&sctx
->border_color_buffer
, NULL
);
125 free(sctx
->border_color_table
);
126 r600_resource_reference(&sctx
->scratch_buffer
, NULL
);
127 r600_resource_reference(&sctx
->compute_scratch_buffer
, NULL
);
128 r600_resource_reference(&sctx
->wait_mem_scratch
, NULL
);
130 si_pm4_free_state(sctx
, sctx
->init_config
, ~0);
131 if (sctx
->init_config_gs_rings
)
132 si_pm4_free_state(sctx
, sctx
->init_config_gs_rings
, ~0);
133 for (i
= 0; i
< ARRAY_SIZE(sctx
->vgt_shader_config
); i
++)
134 si_pm4_delete_state(sctx
, vgt_shader_config
, sctx
->vgt_shader_config
[i
]);
136 if (sctx
->fixed_func_tcs_shader
.cso
)
137 sctx
->b
.b
.delete_tcs_state(&sctx
->b
.b
, sctx
->fixed_func_tcs_shader
.cso
);
138 if (sctx
->custom_dsa_flush
)
139 sctx
->b
.b
.delete_depth_stencil_alpha_state(&sctx
->b
.b
, sctx
->custom_dsa_flush
);
140 if (sctx
->custom_blend_resolve
)
141 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_resolve
);
142 if (sctx
->custom_blend_fmask_decompress
)
143 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_fmask_decompress
);
144 if (sctx
->custom_blend_eliminate_fastclear
)
145 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_eliminate_fastclear
);
146 if (sctx
->custom_blend_dcc_decompress
)
147 sctx
->b
.b
.delete_blend_state(&sctx
->b
.b
, sctx
->custom_blend_dcc_decompress
);
148 if (sctx
->vs_blit_pos
)
149 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_pos
);
150 if (sctx
->vs_blit_pos_layered
)
151 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_pos_layered
);
152 if (sctx
->vs_blit_color
)
153 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_color
);
154 if (sctx
->vs_blit_color_layered
)
155 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_color_layered
);
156 if (sctx
->vs_blit_texcoord
)
157 sctx
->b
.b
.delete_vs_state(&sctx
->b
.b
, sctx
->vs_blit_texcoord
);
160 util_blitter_destroy(sctx
->blitter
);
162 si_common_context_cleanup(&sctx
->b
);
164 LLVMDisposeTargetMachine(sctx
->tm
);
166 si_saved_cs_reference(&sctx
->current_saved_cs
, NULL
);
168 _mesa_hash_table_destroy(sctx
->tex_handles
, NULL
);
169 _mesa_hash_table_destroy(sctx
->img_handles
, NULL
);
171 util_dynarray_fini(&sctx
->resident_tex_handles
);
172 util_dynarray_fini(&sctx
->resident_img_handles
);
173 util_dynarray_fini(&sctx
->resident_tex_needs_color_decompress
);
174 util_dynarray_fini(&sctx
->resident_img_needs_color_decompress
);
175 util_dynarray_fini(&sctx
->resident_tex_needs_depth_decompress
);
179 static enum pipe_reset_status
180 si_amdgpu_get_reset_status(struct pipe_context
*ctx
)
182 struct si_context
*sctx
= (struct si_context
*)ctx
;
184 return sctx
->b
.ws
->ctx_query_reset_status(sctx
->b
.ctx
);
187 /* Apitrace profiling:
188 * 1) qapitrace : Tools -> Profile: Measure CPU & GPU times
189 * 2) In the middle panel, zoom in (mouse wheel) on some bad draw call
190 * and remember its number.
191 * 3) In Mesa, enable queries and performance counters around that draw
192 * call and print the results.
193 * 4) glretrace --benchmark --markers ..
195 static void si_emit_string_marker(struct pipe_context
*ctx
,
196 const char *string
, int len
)
198 struct si_context
*sctx
= (struct si_context
*)ctx
;
200 dd_parse_apitrace_marker(string
, len
, &sctx
->apitrace_call_number
);
203 u_log_printf(sctx
->b
.log
, "\nString marker: %*s\n", len
, string
);
206 static LLVMTargetMachineRef
207 si_create_llvm_target_machine(struct si_screen
*sscreen
)
209 enum ac_target_machine_options tm_options
=
210 (sscreen
->b
.debug_flags
& DBG(SI_SCHED
) ? AC_TM_SISCHED
: 0) |
211 (sscreen
->b
.chip_class
>= GFX9
? AC_TM_FORCE_ENABLE_XNACK
: 0) |
212 (sscreen
->b
.chip_class
< GFX9
? AC_TM_FORCE_DISABLE_XNACK
: 0) |
213 (!sscreen
->llvm_has_working_vgpr_indexing
? AC_TM_PROMOTE_ALLOCA_TO_SCRATCH
: 0);
215 return ac_create_target_machine(sscreen
->b
.family
, tm_options
);
218 static void si_set_debug_callback(struct pipe_context
*ctx
,
219 const struct pipe_debug_callback
*cb
)
221 struct si_context
*sctx
= (struct si_context
*)ctx
;
222 struct si_screen
*screen
= sctx
->screen
;
224 util_queue_finish(&screen
->shader_compiler_queue
);
225 util_queue_finish(&screen
->shader_compiler_queue_low_priority
);
230 memset(&sctx
->debug
, 0, sizeof(sctx
->debug
));
233 static void si_set_log_context(struct pipe_context
*ctx
,
234 struct u_log_context
*log
)
236 struct si_context
*sctx
= (struct si_context
*)ctx
;
240 u_log_add_auto_logger(log
, si_auto_log_cs
, sctx
);
243 static struct pipe_context
*si_create_context(struct pipe_screen
*screen
,
246 struct si_context
*sctx
= CALLOC_STRUCT(si_context
);
247 struct si_screen
* sscreen
= (struct si_screen
*)screen
;
248 struct radeon_winsys
*ws
= sscreen
->b
.ws
;
254 if (flags
& PIPE_CONTEXT_DEBUG
)
255 sscreen
->record_llvm_ir
= true; /* racy but not critical */
257 sctx
->b
.b
.screen
= screen
; /* this must be set first */
258 sctx
->b
.b
.priv
= NULL
;
259 sctx
->b
.b
.destroy
= si_destroy_context
;
260 sctx
->b
.b
.emit_string_marker
= si_emit_string_marker
;
261 sctx
->b
.b
.set_debug_callback
= si_set_debug_callback
;
262 sctx
->b
.b
.set_log_context
= si_set_log_context
;
263 sctx
->b
.set_atom_dirty
= (void *)si_set_atom_dirty
;
264 sctx
->screen
= sscreen
; /* Easy accessing of screen/winsys. */
265 sctx
->is_debug
= (flags
& PIPE_CONTEXT_DEBUG
) != 0;
267 if (!si_common_context_init(&sctx
->b
, &sscreen
->b
, flags
))
270 if (sscreen
->b
.info
.drm_major
== 3)
271 sctx
->b
.b
.get_device_reset_status
= si_amdgpu_get_reset_status
;
273 si_init_buffer_functions(sctx
);
274 si_init_clear_functions(sctx
);
275 si_init_blit_functions(sctx
);
276 si_init_compute_functions(sctx
);
277 si_init_cp_dma_functions(sctx
);
278 si_init_debug_functions(sctx
);
279 si_init_msaa_functions(sctx
);
280 si_init_streamout_functions(sctx
);
282 if (sscreen
->b
.info
.has_hw_decode
) {
283 sctx
->b
.b
.create_video_codec
= si_uvd_create_decoder
;
284 sctx
->b
.b
.create_video_buffer
= si_video_buffer_create
;
286 sctx
->b
.b
.create_video_codec
= vl_create_decoder
;
287 sctx
->b
.b
.create_video_buffer
= vl_video_buffer_create
;
290 sctx
->b
.gfx
.cs
= ws
->cs_create(sctx
->b
.ctx
, RING_GFX
,
291 si_context_gfx_flush
, sctx
);
292 sctx
->b
.gfx
.flush
= si_context_gfx_flush
;
295 sctx
->border_color_table
= malloc(SI_MAX_BORDER_COLORS
*
296 sizeof(*sctx
->border_color_table
));
297 if (!sctx
->border_color_table
)
300 sctx
->border_color_buffer
= (struct r600_resource
*)
301 pipe_buffer_create(screen
, 0, PIPE_USAGE_DEFAULT
,
302 SI_MAX_BORDER_COLORS
*
303 sizeof(*sctx
->border_color_table
));
304 if (!sctx
->border_color_buffer
)
307 sctx
->border_color_map
=
308 ws
->buffer_map(sctx
->border_color_buffer
->buf
,
309 NULL
, PIPE_TRANSFER_WRITE
);
310 if (!sctx
->border_color_map
)
313 si_init_all_descriptors(sctx
);
314 si_init_fence_functions(sctx
);
315 si_init_state_functions(sctx
);
316 si_init_shader_functions(sctx
);
317 si_init_viewport_functions(sctx
);
318 si_init_ia_multi_vgt_param_table(sctx
);
320 if (sctx
->b
.chip_class
>= CIK
)
321 cik_init_sdma_functions(sctx
);
323 si_init_dma_functions(sctx
);
325 if (sscreen
->b
.debug_flags
& DBG(FORCE_DMA
))
326 sctx
->b
.b
.resource_copy_region
= sctx
->b
.dma_copy
;
328 sctx
->blitter
= util_blitter_create(&sctx
->b
.b
);
329 if (sctx
->blitter
== NULL
)
331 sctx
->blitter
->draw_rectangle
= si_draw_rectangle
;
332 sctx
->blitter
->skip_viewport_restore
= true;
334 sctx
->sample_mask
.sample_mask
= 0xffff;
336 /* these must be last */
337 si_begin_new_cs(sctx
);
339 if (sctx
->b
.chip_class
>= GFX9
) {
340 sctx
->wait_mem_scratch
= (struct r600_resource
*)
341 pipe_buffer_create(screen
, 0, PIPE_USAGE_DEFAULT
, 4);
342 if (!sctx
->wait_mem_scratch
)
345 /* Initialize the memory. */
346 struct radeon_winsys_cs
*cs
= sctx
->b
.gfx
.cs
;
347 radeon_emit(cs
, PKT3(PKT3_WRITE_DATA
, 3, 0));
348 radeon_emit(cs
, S_370_DST_SEL(V_370_MEMORY_SYNC
) |
349 S_370_WR_CONFIRM(1) |
350 S_370_ENGINE_SEL(V_370_ME
));
351 radeon_emit(cs
, sctx
->wait_mem_scratch
->gpu_address
);
352 radeon_emit(cs
, sctx
->wait_mem_scratch
->gpu_address
>> 32);
353 radeon_emit(cs
, sctx
->wait_mem_number
);
356 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
357 * if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
358 if (sctx
->b
.chip_class
== CIK
) {
359 sctx
->null_const_buf
.buffer
=
360 si_aligned_buffer_create(screen
,
361 R600_RESOURCE_FLAG_UNMAPPABLE
,
362 PIPE_USAGE_DEFAULT
, 16,
363 sctx
->screen
->b
.info
.tcc_cache_line_size
);
364 if (!sctx
->null_const_buf
.buffer
)
366 sctx
->null_const_buf
.buffer_size
= sctx
->null_const_buf
.buffer
->width0
;
368 for (shader
= 0; shader
< SI_NUM_SHADERS
; shader
++) {
369 for (i
= 0; i
< SI_NUM_CONST_BUFFERS
; i
++) {
370 sctx
->b
.b
.set_constant_buffer(&sctx
->b
.b
, shader
, i
,
371 &sctx
->null_const_buf
);
375 si_set_rw_buffer(sctx
, SI_HS_CONST_DEFAULT_TESS_LEVELS
,
376 &sctx
->null_const_buf
);
377 si_set_rw_buffer(sctx
, SI_VS_CONST_INSTANCE_DIVISORS
,
378 &sctx
->null_const_buf
);
379 si_set_rw_buffer(sctx
, SI_VS_CONST_CLIP_PLANES
,
380 &sctx
->null_const_buf
);
381 si_set_rw_buffer(sctx
, SI_PS_CONST_POLY_STIPPLE
,
382 &sctx
->null_const_buf
);
383 si_set_rw_buffer(sctx
, SI_PS_CONST_SAMPLE_POSITIONS
,
384 &sctx
->null_const_buf
);
386 /* Clear the NULL constant buffer, because loads should return zeros. */
387 si_clear_buffer(&sctx
->b
.b
, sctx
->null_const_buf
.buffer
, 0,
388 sctx
->null_const_buf
.buffer
->width0
, 0,
389 R600_COHERENCY_SHADER
);
392 uint64_t max_threads_per_block
;
393 screen
->get_compute_param(screen
, PIPE_SHADER_IR_TGSI
,
394 PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK
,
395 &max_threads_per_block
);
397 /* The maximum number of scratch waves. Scratch space isn't divided
398 * evenly between CUs. The number is only a function of the number of CUs.
399 * We can decrease the constant to decrease the scratch buffer size.
401 * sctx->scratch_waves must be >= the maximum posible size of
402 * 1 threadgroup, so that the hw doesn't hang from being unable
405 * The recommended value is 4 per CU at most. Higher numbers don't
406 * bring much benefit, but they still occupy chip resources (think
407 * async compute). I've seen ~2% performance difference between 4 and 32.
409 sctx
->scratch_waves
= MAX2(32 * sscreen
->b
.info
.num_good_compute_units
,
410 max_threads_per_block
/ 64);
412 sctx
->tm
= si_create_llvm_target_machine(sscreen
);
414 /* Bindless handles. */
415 sctx
->tex_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
416 _mesa_key_pointer_equal
);
417 sctx
->img_handles
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
418 _mesa_key_pointer_equal
);
420 util_dynarray_init(&sctx
->resident_tex_handles
, NULL
);
421 util_dynarray_init(&sctx
->resident_img_handles
, NULL
);
422 util_dynarray_init(&sctx
->resident_tex_needs_color_decompress
, NULL
);
423 util_dynarray_init(&sctx
->resident_img_needs_color_decompress
, NULL
);
424 util_dynarray_init(&sctx
->resident_tex_needs_depth_decompress
, NULL
);
428 fprintf(stderr
, "radeonsi: Failed to create a context.\n");
429 si_destroy_context(&sctx
->b
.b
);
433 static struct pipe_context
*si_pipe_create_context(struct pipe_screen
*screen
,
434 void *priv
, unsigned flags
)
436 struct si_screen
*sscreen
= (struct si_screen
*)screen
;
437 struct pipe_context
*ctx
;
439 if (sscreen
->b
.debug_flags
& DBG(CHECK_VM
))
440 flags
|= PIPE_CONTEXT_DEBUG
;
442 ctx
= si_create_context(screen
, flags
);
444 if (!(flags
& PIPE_CONTEXT_PREFER_THREADED
))
447 /* Clover (compute-only) is unsupported. */
448 if (flags
& PIPE_CONTEXT_COMPUTE_ONLY
)
451 /* When shaders are logged to stderr, asynchronous compilation is
453 if (sscreen
->b
.debug_flags
& DBG_ALL_SHADERS
)
456 /* Use asynchronous flushes only on amdgpu, since the radeon
457 * implementation for fence_server_sync is incomplete. */
458 return threaded_context_create(ctx
, &sscreen
->b
.pool_transfers
,
459 si_replace_buffer_storage
,
460 sscreen
->b
.info
.drm_major
>= 3 ? si_create_fence
: NULL
,
461 &((struct si_context
*)ctx
)->b
.tc
);
468 static void si_destroy_screen(struct pipe_screen
* pscreen
)
470 struct si_screen
*sscreen
= (struct si_screen
*)pscreen
;
471 struct si_shader_part
*parts
[] = {
473 sscreen
->tcs_epilogs
,
480 if (!sscreen
->b
.ws
->unref(sscreen
->b
.ws
))
483 util_queue_destroy(&sscreen
->shader_compiler_queue
);
484 util_queue_destroy(&sscreen
->shader_compiler_queue_low_priority
);
486 for (i
= 0; i
< ARRAY_SIZE(sscreen
->tm
); i
++)
488 LLVMDisposeTargetMachine(sscreen
->tm
[i
]);
490 for (i
= 0; i
< ARRAY_SIZE(sscreen
->tm_low_priority
); i
++)
491 if (sscreen
->tm_low_priority
[i
])
492 LLVMDisposeTargetMachine(sscreen
->tm_low_priority
[i
]);
494 /* Free shader parts. */
495 for (i
= 0; i
< ARRAY_SIZE(parts
); i
++) {
497 struct si_shader_part
*part
= parts
[i
];
499 parts
[i
] = part
->next
;
500 ac_shader_binary_clean(&part
->binary
);
504 mtx_destroy(&sscreen
->shader_parts_mutex
);
505 si_destroy_shader_cache(sscreen
);
507 si_perfcounters_destroy(&sscreen
->b
);
508 si_gpu_load_kill_thread(&sscreen
->b
);
510 mtx_destroy(&sscreen
->b
.gpu_load_mutex
);
511 mtx_destroy(&sscreen
->b
.aux_context_lock
);
512 sscreen
->b
.aux_context
->destroy(sscreen
->b
.aux_context
);
514 slab_destroy_parent(&sscreen
->b
.pool_transfers
);
516 disk_cache_destroy(sscreen
->b
.disk_shader_cache
);
517 sscreen
->b
.ws
->destroy(sscreen
->b
.ws
);
521 static bool si_init_gs_info(struct si_screen
*sscreen
)
523 /* gs_table_depth is not used by GFX9 */
524 if (sscreen
->b
.chip_class
>= GFX9
)
527 switch (sscreen
->b
.family
) {
536 sscreen
->gs_table_depth
= 16;
548 sscreen
->gs_table_depth
= 32;
555 static void si_handle_env_var_force_family(struct si_screen
*sscreen
)
557 const char *family
= debug_get_option("SI_FORCE_FAMILY", NULL
);
563 for (i
= CHIP_TAHITI
; i
< CHIP_LAST
; i
++) {
564 if (!strcmp(family
, ac_get_llvm_processor_name(i
))) {
565 /* Override family and chip_class. */
566 sscreen
->b
.family
= sscreen
->b
.info
.family
= i
;
568 if (i
>= CHIP_VEGA10
)
569 sscreen
->b
.chip_class
= sscreen
->b
.info
.chip_class
= GFX9
;
570 else if (i
>= CHIP_TONGA
)
571 sscreen
->b
.chip_class
= sscreen
->b
.info
.chip_class
= VI
;
572 else if (i
>= CHIP_BONAIRE
)
573 sscreen
->b
.chip_class
= sscreen
->b
.info
.chip_class
= CIK
;
575 sscreen
->b
.chip_class
= sscreen
->b
.info
.chip_class
= SI
;
577 /* Don't submit any IBs. */
578 setenv("RADEON_NOOP", "1", 1);
583 fprintf(stderr
, "radeonsi: Unknown family: %s\n", family
);
587 static void si_test_vmfault(struct si_screen
*sscreen
)
589 struct pipe_context
*ctx
= sscreen
->b
.aux_context
;
590 struct si_context
*sctx
= (struct si_context
*)ctx
;
591 struct pipe_resource
*buf
=
592 pipe_buffer_create(&sscreen
->b
.b
, 0, PIPE_USAGE_DEFAULT
, 64);
595 puts("Buffer allocation failed.");
599 r600_resource(buf
)->gpu_address
= 0; /* cause a VM fault */
601 if (sscreen
->b
.debug_flags
& DBG(TEST_VMFAULT_CP
)) {
602 si_copy_buffer(sctx
, buf
, buf
, 0, 4, 4, 0);
603 ctx
->flush(ctx
, NULL
, 0);
604 puts("VM fault test: CP - done.");
606 if (sscreen
->b
.debug_flags
& DBG(TEST_VMFAULT_SDMA
)) {
607 sctx
->b
.dma_clear_buffer(ctx
, buf
, 0, 4, 0);
608 ctx
->flush(ctx
, NULL
, 0);
609 puts("VM fault test: SDMA - done.");
611 if (sscreen
->b
.debug_flags
& DBG(TEST_VMFAULT_SHADER
)) {
612 util_test_constant_buffer(ctx
, buf
);
613 puts("VM fault test: Shader - done.");
618 static void si_disk_cache_create(struct si_screen
*sscreen
)
620 /* Don't use the cache if shader dumping is enabled. */
621 if (sscreen
->b
.debug_flags
& DBG_ALL_SHADERS
)
624 /* TODO: remove this once gallium supports a nir cache */
625 if (sscreen
->b
.debug_flags
& DBG(NIR
))
628 uint32_t mesa_timestamp
;
629 if (disk_cache_get_function_timestamp(si_disk_cache_create
,
633 uint32_t llvm_timestamp
;
635 if (disk_cache_get_function_timestamp(LLVMInitializeAMDGPUTargetInfo
,
637 res
= asprintf(×tamp_str
, "%u_%u",
638 mesa_timestamp
, llvm_timestamp
);
642 /* These flags affect shader compilation. */
643 uint64_t shader_debug_flags
=
644 sscreen
->b
.debug_flags
&
645 (DBG(FS_CORRECT_DERIVS_AFTER_KILL
) |
649 sscreen
->b
.disk_shader_cache
=
650 disk_cache_create(si_get_family_name(sscreen
),
658 struct pipe_screen
*radeonsi_screen_create(struct radeon_winsys
*ws
,
659 const struct pipe_screen_config
*config
)
661 struct si_screen
*sscreen
= CALLOC_STRUCT(si_screen
);
662 unsigned num_threads
, num_compiler_threads
, num_compiler_threads_lowprio
, i
;
669 ws
->query_info(ws
, &sscreen
->b
.info
);
671 sscreen
->b
.family
= sscreen
->b
.info
.family
;
672 sscreen
->b
.chip_class
= sscreen
->b
.info
.chip_class
;
673 sscreen
->b
.debug_flags
= debug_get_flags_option("R600_DEBUG",
676 /* Set functions first. */
677 sscreen
->b
.b
.context_create
= si_pipe_create_context
;
678 sscreen
->b
.b
.destroy
= si_destroy_screen
;
680 si_init_screen_get_functions(sscreen
);
681 si_init_screen_buffer_functions(sscreen
);
682 si_init_screen_fence_functions(sscreen
);
683 si_init_screen_state_functions(sscreen
);
684 si_init_screen_texture_functions(&sscreen
->b
);
685 si_init_screen_query_functions(&sscreen
->b
);
687 /* Set these flags in debug_flags early, so that the shader cache takes
690 if (driQueryOptionb(config
->options
,
691 "glsl_correct_derivatives_after_discard"))
692 sscreen
->b
.debug_flags
|= DBG(FS_CORRECT_DERIVS_AFTER_KILL
);
693 if (driQueryOptionb(config
->options
, "radeonsi_enable_sisched"))
694 sscreen
->b
.debug_flags
|= DBG(SI_SCHED
);
697 if (sscreen
->b
.debug_flags
& DBG(INFO
))
698 ac_print_gpu_info(&sscreen
->b
.info
);
700 slab_create_parent(&sscreen
->b
.pool_transfers
,
701 sizeof(struct r600_transfer
), 64);
703 sscreen
->b
.force_aniso
= MIN2(16, debug_get_num_option("R600_TEX_ANISO", -1));
704 if (sscreen
->b
.force_aniso
>= 0) {
705 printf("radeonsi: Forcing anisotropy filter to %ix\n",
706 /* round down to a power of two */
707 1 << util_logbase2(sscreen
->b
.force_aniso
));
710 (void) mtx_init(&sscreen
->b
.aux_context_lock
, mtx_plain
);
711 (void) mtx_init(&sscreen
->b
.gpu_load_mutex
, mtx_plain
);
713 if (!si_init_gs_info(sscreen
) ||
714 !si_init_shader_cache(sscreen
)) {
719 si_disk_cache_create(sscreen
);
721 /* Only enable as many threads as we have target machines, but at most
722 * the number of CPUs - 1 if there is more than one.
724 num_threads
= sysconf(_SC_NPROCESSORS_ONLN
);
725 num_threads
= MAX2(1, num_threads
- 1);
726 num_compiler_threads
= MIN2(num_threads
, ARRAY_SIZE(sscreen
->tm
));
727 num_compiler_threads_lowprio
=
728 MIN2(num_threads
, ARRAY_SIZE(sscreen
->tm_low_priority
));
730 if (!util_queue_init(&sscreen
->shader_compiler_queue
, "si_shader",
731 32, num_compiler_threads
,
732 UTIL_QUEUE_INIT_RESIZE_IF_FULL
)) {
733 si_destroy_shader_cache(sscreen
);
738 if (!util_queue_init(&sscreen
->shader_compiler_queue_low_priority
,
740 32, num_compiler_threads_lowprio
,
741 UTIL_QUEUE_INIT_RESIZE_IF_FULL
|
742 UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
)) {
743 si_destroy_shader_cache(sscreen
);
748 si_handle_env_var_force_family(sscreen
);
750 if (!debug_get_bool_option("RADEON_DISABLE_PERFCOUNTERS", false))
751 si_init_perfcounters(sscreen
);
753 /* Hawaii has a bug with offchip buffers > 256 that can be worked
754 * around by setting 4K granularity.
756 sscreen
->tess_offchip_block_dw_size
=
757 sscreen
->b
.family
== CHIP_HAWAII
? 4096 : 8192;
759 /* The mere presense of CLEAR_STATE in the IB causes random GPU hangs
761 sscreen
->has_clear_state
= sscreen
->b
.chip_class
>= CIK
;
763 sscreen
->has_distributed_tess
=
764 sscreen
->b
.chip_class
>= VI
&&
765 sscreen
->b
.info
.max_se
>= 2;
767 sscreen
->has_draw_indirect_multi
=
768 (sscreen
->b
.family
>= CHIP_POLARIS10
) ||
769 (sscreen
->b
.chip_class
== VI
&&
770 sscreen
->b
.info
.pfp_fw_version
>= 121 &&
771 sscreen
->b
.info
.me_fw_version
>= 87) ||
772 (sscreen
->b
.chip_class
== CIK
&&
773 sscreen
->b
.info
.pfp_fw_version
>= 211 &&
774 sscreen
->b
.info
.me_fw_version
>= 173) ||
775 (sscreen
->b
.chip_class
== SI
&&
776 sscreen
->b
.info
.pfp_fw_version
>= 79 &&
777 sscreen
->b
.info
.me_fw_version
>= 142);
779 sscreen
->has_out_of_order_rast
= sscreen
->b
.chip_class
>= VI
&&
780 sscreen
->b
.info
.max_se
>= 2 &&
781 !(sscreen
->b
.debug_flags
& DBG(NO_OUT_OF_ORDER
));
782 sscreen
->assume_no_z_fights
=
783 driQueryOptionb(config
->options
, "radeonsi_assume_no_z_fights");
784 sscreen
->commutative_blend_add
=
785 driQueryOptionb(config
->options
, "radeonsi_commutative_blend_add");
786 sscreen
->clear_db_cache_before_clear
=
787 driQueryOptionb(config
->options
, "radeonsi_clear_db_cache_before_clear");
788 sscreen
->has_msaa_sample_loc_bug
= (sscreen
->b
.family
>= CHIP_POLARIS10
&&
789 sscreen
->b
.family
<= CHIP_POLARIS12
) ||
790 sscreen
->b
.family
== CHIP_VEGA10
||
791 sscreen
->b
.family
== CHIP_RAVEN
;
792 sscreen
->has_ls_vgpr_init_bug
= sscreen
->b
.family
== CHIP_VEGA10
||
793 sscreen
->b
.family
== CHIP_RAVEN
;
795 if (sscreen
->b
.debug_flags
& DBG(DPBB
)) {
796 sscreen
->dpbb_allowed
= true;
798 /* Only enable primitive binning on Raven by default. */
799 sscreen
->dpbb_allowed
= sscreen
->b
.family
== CHIP_RAVEN
&&
800 !(sscreen
->b
.debug_flags
& DBG(NO_DPBB
));
803 if (sscreen
->b
.debug_flags
& DBG(DFSM
)) {
804 sscreen
->dfsm_allowed
= sscreen
->dpbb_allowed
;
806 sscreen
->dfsm_allowed
= sscreen
->dpbb_allowed
&&
807 !(sscreen
->b
.debug_flags
& DBG(NO_DFSM
));
810 /* While it would be nice not to have this flag, we are constrained
811 * by the reality that LLVM 5.0 doesn't have working VGPR indexing
814 sscreen
->llvm_has_working_vgpr_indexing
= sscreen
->b
.chip_class
<= VI
;
816 /* Some chips have RB+ registers, but don't support RB+. Those must
819 if (sscreen
->b
.family
== CHIP_STONEY
||
820 sscreen
->b
.chip_class
>= GFX9
) {
821 sscreen
->b
.has_rbplus
= true;
823 sscreen
->b
.rbplus_allowed
=
824 !(sscreen
->b
.debug_flags
& DBG(NO_RB_PLUS
)) &&
825 (sscreen
->b
.family
== CHIP_STONEY
||
826 sscreen
->b
.family
== CHIP_RAVEN
);
829 sscreen
->b
.dcc_msaa_allowed
=
830 !(sscreen
->b
.debug_flags
& DBG(NO_DCC_MSAA
)) &&
831 (sscreen
->b
.debug_flags
& DBG(DCC_MSAA
) ||
832 sscreen
->b
.chip_class
== VI
);
834 (void) mtx_init(&sscreen
->shader_parts_mutex
, mtx_plain
);
835 sscreen
->use_monolithic_shaders
=
836 (sscreen
->b
.debug_flags
& DBG(MONOLITHIC_SHADERS
)) != 0;
838 sscreen
->b
.barrier_flags
.cp_to_L2
= SI_CONTEXT_INV_SMEM_L1
|
839 SI_CONTEXT_INV_VMEM_L1
;
840 if (sscreen
->b
.chip_class
<= VI
) {
841 sscreen
->b
.barrier_flags
.cp_to_L2
|= SI_CONTEXT_INV_GLOBAL_L2
;
842 sscreen
->b
.barrier_flags
.L2_to_cp
|= SI_CONTEXT_WRITEBACK_GLOBAL_L2
;
845 sscreen
->b
.barrier_flags
.compute_to_L2
= SI_CONTEXT_CS_PARTIAL_FLUSH
;
847 if (debug_get_bool_option("RADEON_DUMP_SHADERS", false))
848 sscreen
->b
.debug_flags
|= DBG_ALL_SHADERS
;
850 for (i
= 0; i
< num_compiler_threads
; i
++)
851 sscreen
->tm
[i
] = si_create_llvm_target_machine(sscreen
);
852 for (i
= 0; i
< num_compiler_threads_lowprio
; i
++)
853 sscreen
->tm_low_priority
[i
] = si_create_llvm_target_machine(sscreen
);
855 /* Create the auxiliary context. This must be done last. */
856 sscreen
->b
.aux_context
= si_create_context(&sscreen
->b
.b
, 0);
858 if (sscreen
->b
.debug_flags
& DBG(TEST_DMA
))
859 si_test_dma(sscreen
);
861 if (sscreen
->b
.debug_flags
& (DBG(TEST_VMFAULT_CP
) |
862 DBG(TEST_VMFAULT_SDMA
) |
863 DBG(TEST_VMFAULT_SHADER
)))
864 si_test_vmfault(sscreen
);
866 return &sscreen
->b
.b
;