radeonsi: don't set BO metadata for non-zero planes
[mesa.git] / src / gallium / drivers / radeonsi / si_pipe.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25 #ifndef SI_PIPE_H
26 #define SI_PIPE_H
27
28 #include "si_shader.h"
29 #include "si_state.h"
30
31 #include "util/u_dynarray.h"
32 #include "util/u_idalloc.h"
33 #include "util/u_threaded_context.h"
34
35 #ifdef PIPE_ARCH_BIG_ENDIAN
36 #define SI_BIG_ENDIAN 1
37 #else
38 #define SI_BIG_ENDIAN 0
39 #endif
40
41 #define ATI_VENDOR_ID 0x1002
42 #define SI_PRIM_DISCARD_DEBUG 0
43 #define SI_NOT_QUERY 0xffffffff
44
45 /* The base vertex and primitive restart can be any number, but we must pick
46 * one which will mean "unknown" for the purpose of state tracking and
47 * the number shouldn't be a commonly-used one. */
48 #define SI_BASE_VERTEX_UNKNOWN INT_MIN
49 #define SI_RESTART_INDEX_UNKNOWN INT_MIN
50 #define SI_INSTANCE_COUNT_UNKNOWN INT_MIN
51 #define SI_NUM_SMOOTH_AA_SAMPLES 8
52 #define SI_MAX_POINT_SIZE 2048
53 #define SI_GS_PER_ES 128
54 /* Alignment for optimal CP DMA performance. */
55 #define SI_CPDMA_ALIGNMENT 32
56
57 /* Tunables for compute-based clear_buffer and copy_buffer: */
58 #define SI_COMPUTE_CLEAR_DW_PER_THREAD 4
59 #define SI_COMPUTE_COPY_DW_PER_THREAD 4
60 #define SI_COMPUTE_DST_CACHE_POLICY L2_STREAM
61
62 /* Pipeline & streamout query controls. */
63 #define SI_CONTEXT_START_PIPELINE_STATS (1 << 0)
64 #define SI_CONTEXT_STOP_PIPELINE_STATS (1 << 1)
65 #define SI_CONTEXT_FLUSH_FOR_RENDER_COND (1 << 2)
66 /* Instruction cache. */
67 #define SI_CONTEXT_INV_ICACHE (1 << 3)
68 /* Scalar cache. (GFX6-9: scalar L1; GFX10: scalar L0)
69 * GFX10: This also invalidates the L1 shader array cache. */
70 #define SI_CONTEXT_INV_SCACHE (1 << 4)
71 /* Vector cache. (GFX6-9: vector L1; GFX10: vector L0)
72 * GFX10: This also invalidates the L1 shader array cache. */
73 #define SI_CONTEXT_INV_VCACHE (1 << 5)
74 /* L2 cache + L2 metadata cache writeback & invalidate.
75 * GFX6-8: Used by shaders only. GFX9-10: Used by everything. */
76 #define SI_CONTEXT_INV_L2 (1 << 6)
77 /* L2 writeback (write dirty L2 lines to memory for non-L2 clients).
78 * Only used for coherency with non-L2 clients like CB, DB, CP on GFX6-8.
79 * GFX6-7 will do complete invalidation, because the writeback is unsupported. */
80 #define SI_CONTEXT_WB_L2 (1 << 7)
81 /* Writeback & invalidate the L2 metadata cache only. It can only be coupled with
82 * a CB or DB flush. */
83 #define SI_CONTEXT_INV_L2_METADATA (1 << 8)
84 /* Framebuffer caches. */
85 #define SI_CONTEXT_FLUSH_AND_INV_DB (1 << 9)
86 #define SI_CONTEXT_FLUSH_AND_INV_DB_META (1 << 10)
87 #define SI_CONTEXT_FLUSH_AND_INV_CB (1 << 11)
88 /* Engine synchronization. */
89 #define SI_CONTEXT_VS_PARTIAL_FLUSH (1 << 12)
90 #define SI_CONTEXT_PS_PARTIAL_FLUSH (1 << 13)
91 #define SI_CONTEXT_CS_PARTIAL_FLUSH (1 << 14)
92 #define SI_CONTEXT_VGT_FLUSH (1 << 15)
93 #define SI_CONTEXT_VGT_STREAMOUT_SYNC (1 << 16)
94
95 #define SI_PREFETCH_VBO_DESCRIPTORS (1 << 0)
96 #define SI_PREFETCH_LS (1 << 1)
97 #define SI_PREFETCH_HS (1 << 2)
98 #define SI_PREFETCH_ES (1 << 3)
99 #define SI_PREFETCH_GS (1 << 4)
100 #define SI_PREFETCH_VS (1 << 5)
101 #define SI_PREFETCH_PS (1 << 6)
102
103 #define SI_MAX_BORDER_COLORS 4096
104 #define SI_MAX_VIEWPORTS 16
105 #define SIX_BITS 0x3F
106 #define SI_MAP_BUFFER_ALIGNMENT 64
107 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
108
109 #define SI_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0)
110 #define SI_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1)
111 #define SI_RESOURCE_FLAG_FORCE_MSAA_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
112 #define SI_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3)
113 #define SI_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4)
114 #define SI_RESOURCE_FLAG_READ_ONLY (PIPE_RESOURCE_FLAG_DRV_PRIV << 5)
115 #define SI_RESOURCE_FLAG_32BIT (PIPE_RESOURCE_FLAG_DRV_PRIV << 6)
116 #define SI_RESOURCE_FLAG_CLEAR (PIPE_RESOURCE_FLAG_DRV_PRIV << 7)
117 /* For const_uploader, upload data via GTT and copy to VRAM on context flush via SDMA. */
118 #define SI_RESOURCE_FLAG_UPLOAD_FLUSH_EXPLICIT_VIA_SDMA (PIPE_RESOURCE_FLAG_DRV_PRIV << 8)
119
120 enum si_clear_code
121 {
122 DCC_CLEAR_COLOR_0000 = 0x00000000,
123 DCC_CLEAR_COLOR_0001 = 0x40404040,
124 DCC_CLEAR_COLOR_1110 = 0x80808080,
125 DCC_CLEAR_COLOR_1111 = 0xC0C0C0C0,
126 DCC_CLEAR_COLOR_REG = 0x20202020,
127 DCC_UNCOMPRESSED = 0xFFFFFFFF,
128 };
129
130 #define SI_IMAGE_ACCESS_AS_BUFFER (1 << 7)
131
132 /* Debug flags. */
133 enum {
134 /* Shader logging options: */
135 DBG_VS = PIPE_SHADER_VERTEX,
136 DBG_PS = PIPE_SHADER_FRAGMENT,
137 DBG_GS = PIPE_SHADER_GEOMETRY,
138 DBG_TCS = PIPE_SHADER_TESS_CTRL,
139 DBG_TES = PIPE_SHADER_TESS_EVAL,
140 DBG_CS = PIPE_SHADER_COMPUTE,
141 DBG_NO_IR,
142 DBG_NO_TGSI,
143 DBG_NO_ASM,
144 DBG_PREOPT_IR,
145
146 /* Shader compiler options the shader cache should be aware of: */
147 DBG_FS_CORRECT_DERIVS_AFTER_KILL,
148 DBG_SI_SCHED,
149 DBG_GISEL,
150 DBG_W32_GE,
151 DBG_W32_PS,
152 DBG_W32_CS,
153 DBG_W64_GE,
154 DBG_W64_PS,
155 DBG_W64_CS,
156
157 /* Shader compiler options (with no effect on the shader cache): */
158 DBG_CHECK_IR,
159 DBG_MONOLITHIC_SHADERS,
160 DBG_NO_OPT_VARIANT,
161
162 /* Information logging options: */
163 DBG_INFO,
164 DBG_TEX,
165 DBG_COMPUTE,
166 DBG_VM,
167
168 /* Driver options: */
169 DBG_FORCE_DMA,
170 DBG_NO_ASYNC_DMA,
171 DBG_NO_WC,
172 DBG_CHECK_VM,
173 DBG_RESERVE_VMID,
174 DBG_ZERO_VRAM,
175
176 /* 3D engine options: */
177 DBG_NO_GFX,
178 DBG_NO_NGG,
179 DBG_ALWAYS_PD,
180 DBG_PD,
181 DBG_NO_PD,
182 DBG_SWITCH_ON_EOP,
183 DBG_NO_OUT_OF_ORDER,
184 DBG_NO_DPBB,
185 DBG_NO_DFSM,
186 DBG_DPBB,
187 DBG_DFSM,
188 DBG_NO_HYPERZ,
189 DBG_NO_RB_PLUS,
190 DBG_NO_2D_TILING,
191 DBG_NO_TILING,
192 DBG_NO_DCC,
193 DBG_NO_DCC_CLEAR,
194 DBG_NO_DCC_FB,
195 DBG_NO_DCC_MSAA,
196 DBG_NO_FMASK,
197
198 /* Tests: */
199 DBG_TEST_DMA,
200 DBG_TEST_VMFAULT_CP,
201 DBG_TEST_VMFAULT_SDMA,
202 DBG_TEST_VMFAULT_SHADER,
203 DBG_TEST_DMA_PERF,
204 DBG_TEST_GDS,
205 DBG_TEST_GDS_MM,
206 DBG_TEST_GDS_OA_MM,
207 };
208
209 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
210 #define DBG(name) (1ull << DBG_##name)
211
212 enum si_cache_policy {
213 L2_BYPASS,
214 L2_STREAM, /* same as SLC=1 */
215 L2_LRU, /* same as SLC=0 */
216 };
217
218 enum si_coherency {
219 SI_COHERENCY_NONE, /* no cache flushes needed */
220 SI_COHERENCY_SHADER,
221 SI_COHERENCY_CB_META,
222 SI_COHERENCY_CP,
223 };
224
225 struct si_compute;
226 struct si_shader_context;
227 struct hash_table;
228 struct u_suballocator;
229
230 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
231 * at the moment.
232 */
233 struct si_resource {
234 struct threaded_resource b;
235
236 /* Winsys objects. */
237 struct pb_buffer *buf;
238 uint64_t gpu_address;
239 /* Memory usage if the buffer placement is optimal. */
240 uint64_t vram_usage;
241 uint64_t gart_usage;
242
243 /* Resource properties. */
244 uint64_t bo_size;
245 unsigned bo_alignment;
246 enum radeon_bo_domain domains;
247 enum radeon_bo_flag flags;
248 unsigned bind_history;
249 int max_forced_staging_uploads;
250
251 /* The buffer range which is initialized (with a write transfer,
252 * streamout, DMA, or as a random access target). The rest of
253 * the buffer is considered invalid and can be mapped unsynchronized.
254 *
255 * This allows unsychronized mapping of a buffer range which hasn't
256 * been used yet. It's for applications which forget to use
257 * the unsynchronized map flag and expect the driver to figure it out.
258 */
259 struct util_range valid_buffer_range;
260
261 /* For buffers only. This indicates that a write operation has been
262 * performed by TC L2, but the cache hasn't been flushed.
263 * Any hw block which doesn't use or bypasses TC L2 should check this
264 * flag and flush the cache before using the buffer.
265 *
266 * For example, TC L2 must be flushed if a buffer which has been
267 * modified by a shader store instruction is about to be used as
268 * an index buffer. The reason is that VGT DMA index fetching doesn't
269 * use TC L2.
270 */
271 bool TC_L2_dirty;
272
273 /* Whether this resource is referenced by bindless handles. */
274 bool texture_handle_allocated;
275 bool image_handle_allocated;
276
277 /* Whether the resource has been exported via resource_get_handle. */
278 unsigned external_usage; /* PIPE_HANDLE_USAGE_* */
279 };
280
281 struct si_transfer {
282 struct threaded_transfer b;
283 struct si_resource *staging;
284 unsigned offset;
285 };
286
287 struct si_texture {
288 struct si_resource buffer;
289
290 struct radeon_surf surface;
291 struct si_texture *flushed_depth_texture;
292
293 /* One texture allocation can contain these buffers:
294 * - image (pixel data)
295 * - FMASK buffer (MSAA compression)
296 * - CMASK buffer (MSAA compression and/or legacy fast color clear)
297 * - HTILE buffer (Z/S compression and fast Z/S clear)
298 * - DCC buffer (color compression and new fast color clear)
299 * - displayable DCC buffer (if the DCC buffer is not displayable)
300 * - DCC retile mapping buffer (if the DCC buffer is not displayable)
301 */
302 uint64_t cmask_base_address_reg;
303 struct si_resource *cmask_buffer;
304 unsigned cb_color_info; /* fast clear enable bit */
305 unsigned color_clear_value[2];
306 unsigned last_msaa_resolve_target_micro_mode;
307 unsigned num_level0_transfers;
308 unsigned plane_index; /* other planes are different pipe_resources */
309 unsigned num_planes;
310
311 /* Depth buffer compression and fast clear. */
312 float depth_clear_value;
313 uint16_t dirty_level_mask; /* each bit says if that mipmap is compressed */
314 uint16_t stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */
315 enum pipe_format db_render_format:16;
316 uint8_t stencil_clear_value;
317 bool tc_compatible_htile:1;
318 bool htile_stencil_disabled:1;
319 bool depth_cleared:1; /* if it was cleared at least once */
320 bool stencil_cleared:1; /* if it was cleared at least once */
321 bool upgraded_depth:1; /* upgraded from unorm to Z32_FLOAT */
322 bool is_depth:1;
323 bool db_compatible:1;
324 bool can_sample_z:1;
325 bool can_sample_s:1;
326
327 /* We need to track DCC dirtiness, because st/dri usually calls
328 * flush_resource twice per frame (not a bug) and we don't wanna
329 * decompress DCC twice. Also, the dirty tracking must be done even
330 * if DCC isn't used, because it's required by the DCC usage analysis
331 * for a possible future enablement.
332 */
333 bool separate_dcc_dirty:1;
334 /* Statistics gathering for the DCC enablement heuristic. */
335 bool dcc_gather_statistics:1;
336 /* Counter that should be non-zero if the texture is bound to a
337 * framebuffer.
338 */
339 unsigned framebuffers_bound;
340 /* Whether the texture is a displayable back buffer and needs DCC
341 * decompression, which is expensive. Therefore, it's enabled only
342 * if statistics suggest that it will pay off and it's allocated
343 * separately. It can't be bound as a sampler by apps. Limited to
344 * target == 2D and last_level == 0. If enabled, dcc_offset contains
345 * the absolute GPUVM address, not the relative one.
346 */
347 struct si_resource *dcc_separate_buffer;
348 /* When DCC is temporarily disabled, the separate buffer is here. */
349 struct si_resource *last_dcc_separate_buffer;
350 /* Estimate of how much this color buffer is written to in units of
351 * full-screen draws: ps_invocations / (width * height)
352 * Shader kills, late Z, and blending with trivial discards make it
353 * inaccurate (we need to count CB updates, not PS invocations).
354 */
355 unsigned ps_draw_ratio;
356 /* The number of clears since the last DCC usage analysis. */
357 unsigned num_slow_clears;
358 };
359
360 struct si_surface {
361 struct pipe_surface base;
362
363 /* These can vary with block-compressed textures. */
364 uint16_t width0;
365 uint16_t height0;
366
367 bool color_initialized:1;
368 bool depth_initialized:1;
369
370 /* Misc. color flags. */
371 bool color_is_int8:1;
372 bool color_is_int10:1;
373 bool dcc_incompatible:1;
374
375 /* Color registers. */
376 unsigned cb_color_info;
377 unsigned cb_color_view;
378 unsigned cb_color_attrib;
379 unsigned cb_color_attrib2; /* GFX9 and later */
380 unsigned cb_color_attrib3; /* GFX10 and later */
381 unsigned cb_dcc_control; /* GFX8 and later */
382 unsigned spi_shader_col_format:8; /* no blending, no alpha-to-coverage. */
383 unsigned spi_shader_col_format_alpha:8; /* alpha-to-coverage */
384 unsigned spi_shader_col_format_blend:8; /* blending without alpha. */
385 unsigned spi_shader_col_format_blend_alpha:8; /* blending with alpha. */
386
387 /* DB registers. */
388 uint64_t db_depth_base; /* DB_Z_READ/WRITE_BASE */
389 uint64_t db_stencil_base;
390 uint64_t db_htile_data_base;
391 unsigned db_depth_info;
392 unsigned db_z_info;
393 unsigned db_z_info2; /* GFX9 only */
394 unsigned db_depth_view;
395 unsigned db_depth_size;
396 unsigned db_depth_slice;
397 unsigned db_stencil_info;
398 unsigned db_stencil_info2; /* GFX9 only */
399 unsigned db_htile_surface;
400 };
401
402 struct si_mmio_counter {
403 unsigned busy;
404 unsigned idle;
405 };
406
407 union si_mmio_counters {
408 struct {
409 /* For global GPU load including SDMA. */
410 struct si_mmio_counter gpu;
411
412 /* GRBM_STATUS */
413 struct si_mmio_counter spi;
414 struct si_mmio_counter gui;
415 struct si_mmio_counter ta;
416 struct si_mmio_counter gds;
417 struct si_mmio_counter vgt;
418 struct si_mmio_counter ia;
419 struct si_mmio_counter sx;
420 struct si_mmio_counter wd;
421 struct si_mmio_counter bci;
422 struct si_mmio_counter sc;
423 struct si_mmio_counter pa;
424 struct si_mmio_counter db;
425 struct si_mmio_counter cp;
426 struct si_mmio_counter cb;
427
428 /* SRBM_STATUS2 */
429 struct si_mmio_counter sdma;
430
431 /* CP_STAT */
432 struct si_mmio_counter pfp;
433 struct si_mmio_counter meq;
434 struct si_mmio_counter me;
435 struct si_mmio_counter surf_sync;
436 struct si_mmio_counter cp_dma;
437 struct si_mmio_counter scratch_ram;
438 } named;
439 unsigned array[0];
440 };
441
442 struct si_memory_object {
443 struct pipe_memory_object b;
444 struct pb_buffer *buf;
445 uint32_t stride;
446 };
447
448 /* Saved CS data for debugging features. */
449 struct radeon_saved_cs {
450 uint32_t *ib;
451 unsigned num_dw;
452
453 struct radeon_bo_list_item *bo_list;
454 unsigned bo_count;
455 };
456
457 struct si_screen {
458 struct pipe_screen b;
459 struct radeon_winsys *ws;
460 struct disk_cache *disk_shader_cache;
461
462 struct radeon_info info;
463 uint64_t debug_flags;
464 char renderer_string[183];
465
466 void (*make_texture_descriptor)(
467 struct si_screen *screen,
468 struct si_texture *tex,
469 bool sampler,
470 enum pipe_texture_target target,
471 enum pipe_format pipe_format,
472 const unsigned char state_swizzle[4],
473 unsigned first_level, unsigned last_level,
474 unsigned first_layer, unsigned last_layer,
475 unsigned width, unsigned height, unsigned depth,
476 uint32_t *state,
477 uint32_t *fmask_state);
478
479 unsigned pa_sc_raster_config;
480 unsigned pa_sc_raster_config_1;
481 unsigned se_tile_repeat;
482 unsigned gs_table_depth;
483 unsigned tess_offchip_block_dw_size;
484 unsigned tess_offchip_ring_size;
485 unsigned tess_factor_ring_size;
486 unsigned vgt_hs_offchip_param;
487 unsigned eqaa_force_coverage_samples;
488 unsigned eqaa_force_z_samples;
489 unsigned eqaa_force_color_samples;
490 bool has_draw_indirect_multi;
491 bool has_out_of_order_rast;
492 bool assume_no_z_fights;
493 bool commutative_blend_add;
494 bool dpbb_allowed;
495 bool dfsm_allowed;
496 bool llvm_has_working_vgpr_indexing;
497 bool use_ngg;
498 bool use_ngg_streamout;
499
500 struct {
501 #define OPT_BOOL(name, dflt, description) bool name:1;
502 #include "si_debug_options.h"
503 } options;
504
505 /* Whether shaders are monolithic (1-part) or separate (3-part). */
506 bool use_monolithic_shaders;
507 bool record_llvm_ir;
508 bool dcc_msaa_allowed;
509
510 struct slab_parent_pool pool_transfers;
511
512 /* Texture filter settings. */
513 int force_aniso; /* -1 = disabled */
514
515 /* Auxiliary context. Mainly used to initialize resources.
516 * It must be locked prior to using and flushed before unlocking. */
517 struct pipe_context *aux_context;
518 simple_mtx_t aux_context_lock;
519
520 /* This must be in the screen, because UE4 uses one context for
521 * compilation and another one for rendering.
522 */
523 unsigned num_compilations;
524 /* Along with ST_DEBUG=precompile, this should show if applications
525 * are loading shaders on demand. This is a monotonic counter.
526 */
527 unsigned num_shaders_created;
528 unsigned num_shader_cache_hits;
529
530 /* GPU load thread. */
531 simple_mtx_t gpu_load_mutex;
532 thrd_t gpu_load_thread;
533 union si_mmio_counters mmio_counters;
534 volatile unsigned gpu_load_stop_thread; /* bool */
535
536 /* Performance counters. */
537 struct si_perfcounters *perfcounters;
538
539 /* If pipe_screen wants to recompute and re-emit the framebuffer,
540 * sampler, and image states of all contexts, it should atomically
541 * increment this.
542 *
543 * Each context will compare this with its own last known value of
544 * the counter before drawing and re-emit the states accordingly.
545 */
546 unsigned dirty_tex_counter;
547 unsigned dirty_buf_counter;
548
549 /* Atomically increment this counter when an existing texture's
550 * metadata is enabled or disabled in a way that requires changing
551 * contexts' compressed texture binding masks.
552 */
553 unsigned compressed_colortex_counter;
554
555 struct {
556 /* Context flags to set so that all writes from earlier jobs
557 * in the CP are seen by L2 clients.
558 */
559 unsigned cp_to_L2;
560
561 /* Context flags to set so that all writes from earlier jobs
562 * that end in L2 are seen by CP.
563 */
564 unsigned L2_to_cp;
565 } barrier_flags;
566
567 simple_mtx_t shader_parts_mutex;
568 struct si_shader_part *vs_prologs;
569 struct si_shader_part *tcs_epilogs;
570 struct si_shader_part *gs_prologs;
571 struct si_shader_part *ps_prologs;
572 struct si_shader_part *ps_epilogs;
573
574 /* Shader cache in memory.
575 *
576 * Design & limitations:
577 * - The shader cache is per screen (= per process), never saved to
578 * disk, and skips redundant shader compilations from TGSI to bytecode.
579 * - It can only be used with one-variant-per-shader support, in which
580 * case only the main (typically middle) part of shaders is cached.
581 * - Only VS, TCS, TES, PS are cached, out of which only the hw VS
582 * variants of VS and TES are cached, so LS and ES aren't.
583 * - GS and CS aren't cached, but it's certainly possible to cache
584 * those as well.
585 */
586 simple_mtx_t shader_cache_mutex;
587 struct hash_table *shader_cache;
588
589 /* Shader compiler queue for multithreaded compilation. */
590 struct util_queue shader_compiler_queue;
591 /* Use at most 3 normal compiler threads on quadcore and better.
592 * Hyperthreaded CPUs report the number of threads, but we want
593 * the number of cores. We only need this many threads for shader-db. */
594 struct ac_llvm_compiler compiler[24]; /* used by the queue only */
595
596 struct util_queue shader_compiler_queue_low_priority;
597 /* Use at most 2 low priority threads on quadcore and better.
598 * We want to minimize the impact on multithreaded Mesa. */
599 struct ac_llvm_compiler compiler_lowp[10];
600
601 unsigned compute_wave_size;
602 unsigned ps_wave_size;
603 unsigned ge_wave_size;
604 };
605
606 struct si_blend_color {
607 struct pipe_blend_color state;
608 bool any_nonzeros;
609 };
610
611 struct si_sampler_view {
612 struct pipe_sampler_view base;
613 /* [0..7] = image descriptor
614 * [4..7] = buffer descriptor */
615 uint32_t state[8];
616 uint32_t fmask_state[8];
617 const struct legacy_surf_level *base_level_info;
618 ubyte base_level;
619 ubyte block_width;
620 bool is_stencil_sampler;
621 bool is_integer;
622 bool dcc_incompatible;
623 };
624
625 #define SI_SAMPLER_STATE_MAGIC 0x34f1c35a
626
627 struct si_sampler_state {
628 #ifndef NDEBUG
629 unsigned magic;
630 #endif
631 uint32_t val[4];
632 uint32_t integer_val[4];
633 uint32_t upgraded_depth_val[4];
634 };
635
636 struct si_cs_shader_state {
637 struct si_compute *program;
638 struct si_compute *emitted_program;
639 unsigned offset;
640 bool initialized;
641 bool uses_scratch;
642 };
643
644 struct si_samplers {
645 struct pipe_sampler_view *views[SI_NUM_SAMPLERS];
646 struct si_sampler_state *sampler_states[SI_NUM_SAMPLERS];
647
648 /* The i-th bit is set if that element is enabled (non-NULL resource). */
649 unsigned enabled_mask;
650 uint32_t needs_depth_decompress_mask;
651 uint32_t needs_color_decompress_mask;
652 };
653
654 struct si_images {
655 struct pipe_image_view views[SI_NUM_IMAGES];
656 uint32_t needs_color_decompress_mask;
657 unsigned enabled_mask;
658 };
659
660 struct si_framebuffer {
661 struct pipe_framebuffer_state state;
662 unsigned colorbuf_enabled_4bit;
663 unsigned spi_shader_col_format;
664 unsigned spi_shader_col_format_alpha;
665 unsigned spi_shader_col_format_blend;
666 unsigned spi_shader_col_format_blend_alpha;
667 ubyte nr_samples:5; /* at most 16xAA */
668 ubyte log_samples:3; /* at most 4 = 16xAA */
669 ubyte nr_color_samples; /* at most 8xAA */
670 ubyte compressed_cb_mask;
671 ubyte uncompressed_cb_mask;
672 ubyte color_is_int8;
673 ubyte color_is_int10;
674 ubyte dirty_cbufs;
675 ubyte dcc_overwrite_combiner_watermark;
676 ubyte min_bytes_per_pixel;
677 bool dirty_zsbuf;
678 bool any_dst_linear;
679 bool CB_has_shader_readable_metadata;
680 bool DB_has_shader_readable_metadata;
681 bool all_DCC_pipe_aligned;
682 };
683
684 enum si_quant_mode {
685 /* This is the list we want to support. */
686 SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH,
687 SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH,
688 SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH,
689 };
690
691 struct si_signed_scissor {
692 int minx;
693 int miny;
694 int maxx;
695 int maxy;
696 enum si_quant_mode quant_mode;
697 };
698
699 struct si_viewports {
700 struct pipe_viewport_state states[SI_MAX_VIEWPORTS];
701 struct si_signed_scissor as_scissor[SI_MAX_VIEWPORTS];
702 bool y_inverted;
703 };
704
705 struct si_clip_state {
706 struct pipe_clip_state state;
707 bool any_nonzeros;
708 };
709
710 struct si_streamout_target {
711 struct pipe_stream_output_target b;
712
713 /* The buffer where BUFFER_FILLED_SIZE is stored. */
714 struct si_resource *buf_filled_size;
715 unsigned buf_filled_size_offset;
716 bool buf_filled_size_valid;
717
718 unsigned stride_in_dw;
719 };
720
721 struct si_streamout {
722 bool begin_emitted;
723
724 unsigned enabled_mask;
725 unsigned num_targets;
726 struct si_streamout_target *targets[PIPE_MAX_SO_BUFFERS];
727
728 unsigned append_bitmask;
729 bool suspended;
730
731 /* External state which comes from the vertex shader,
732 * it must be set explicitly when binding a shader. */
733 uint16_t *stride_in_dw;
734 unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
735
736 /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
737 unsigned hw_enabled_mask;
738
739 /* The state of VGT_STRMOUT_(CONFIG|EN). */
740 bool streamout_enabled;
741 bool prims_gen_query_enabled;
742 int num_prims_gen_queries;
743 };
744
745 /* A shader state consists of the shader selector, which is a constant state
746 * object shared by multiple contexts and shouldn't be modified, and
747 * the current shader variant selected for this context.
748 */
749 struct si_shader_ctx_state {
750 struct si_shader_selector *cso;
751 struct si_shader *current;
752 };
753
754 #define SI_NUM_VGT_PARAM_KEY_BITS 12
755 #define SI_NUM_VGT_PARAM_STATES (1 << SI_NUM_VGT_PARAM_KEY_BITS)
756
757 /* The IA_MULTI_VGT_PARAM key used to index the table of precomputed values.
758 * Some fields are set by state-change calls, most are set by draw_vbo.
759 */
760 union si_vgt_param_key {
761 struct {
762 #ifdef PIPE_ARCH_LITTLE_ENDIAN
763 unsigned prim:4;
764 unsigned uses_instancing:1;
765 unsigned multi_instances_smaller_than_primgroup:1;
766 unsigned primitive_restart:1;
767 unsigned count_from_stream_output:1;
768 unsigned line_stipple_enabled:1;
769 unsigned uses_tess:1;
770 unsigned tess_uses_prim_id:1;
771 unsigned uses_gs:1;
772 unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
773 #else /* PIPE_ARCH_BIG_ENDIAN */
774 unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
775 unsigned uses_gs:1;
776 unsigned tess_uses_prim_id:1;
777 unsigned uses_tess:1;
778 unsigned line_stipple_enabled:1;
779 unsigned count_from_stream_output:1;
780 unsigned primitive_restart:1;
781 unsigned multi_instances_smaller_than_primgroup:1;
782 unsigned uses_instancing:1;
783 unsigned prim:4;
784 #endif
785 } u;
786 uint32_t index;
787 };
788
789 #define SI_NUM_VGT_STAGES_KEY_BITS 4
790 #define SI_NUM_VGT_STAGES_STATES (1 << SI_NUM_VGT_STAGES_KEY_BITS)
791
792 /* The VGT_SHADER_STAGES key used to index the table of precomputed values.
793 * Some fields are set by state-change calls, most are set by draw_vbo.
794 */
795 union si_vgt_stages_key {
796 struct {
797 #ifdef PIPE_ARCH_LITTLE_ENDIAN
798 unsigned tess:1;
799 unsigned gs:1;
800 unsigned ngg:1; /* gfx10+ */
801 unsigned streamout:1; /* only used with NGG */
802 unsigned _pad:32 - SI_NUM_VGT_STAGES_KEY_BITS;
803 #else /* PIPE_ARCH_BIG_ENDIAN */
804 unsigned _pad:32 - SI_NUM_VGT_STAGES_KEY_BITS;
805 unsigned streamout:1;
806 unsigned ngg:1;
807 unsigned gs:1;
808 unsigned tess:1;
809 #endif
810 } u;
811 uint32_t index;
812 };
813
814 struct si_texture_handle
815 {
816 unsigned desc_slot;
817 bool desc_dirty;
818 struct pipe_sampler_view *view;
819 struct si_sampler_state sstate;
820 };
821
822 struct si_image_handle
823 {
824 unsigned desc_slot;
825 bool desc_dirty;
826 struct pipe_image_view view;
827 };
828
829 struct si_saved_cs {
830 struct pipe_reference reference;
831 struct si_context *ctx;
832 struct radeon_saved_cs gfx;
833 struct radeon_saved_cs compute;
834 struct si_resource *trace_buf;
835 unsigned trace_id;
836
837 unsigned gfx_last_dw;
838 unsigned compute_last_dw;
839 bool flushed;
840 int64_t time_flush;
841 };
842
843 struct si_sdma_upload {
844 struct si_resource *dst;
845 struct si_resource *src;
846 unsigned src_offset;
847 unsigned dst_offset;
848 unsigned size;
849 };
850
851 struct si_context {
852 struct pipe_context b; /* base class */
853
854 enum radeon_family family;
855 enum chip_class chip_class;
856
857 struct radeon_winsys *ws;
858 struct radeon_winsys_ctx *ctx;
859 struct radeon_cmdbuf *gfx_cs; /* compute IB if graphics is disabled */
860 struct radeon_cmdbuf *dma_cs;
861 struct pipe_fence_handle *last_gfx_fence;
862 struct pipe_fence_handle *last_sdma_fence;
863 struct si_resource *eop_bug_scratch;
864 struct u_upload_mgr *cached_gtt_allocator;
865 struct threaded_context *tc;
866 struct u_suballocator *allocator_zeroed_memory;
867 struct slab_child_pool pool_transfers;
868 struct slab_child_pool pool_transfers_unsync; /* for threaded_context */
869 struct pipe_device_reset_callback device_reset_callback;
870 struct u_log_context *log;
871 void *query_result_shader;
872 void *sh_query_result_shader;
873
874 void (*emit_cache_flush)(struct si_context *ctx);
875
876 struct blitter_context *blitter;
877 void *noop_blend;
878 void *noop_dsa;
879 void *discard_rasterizer_state;
880 void *custom_dsa_flush;
881 void *custom_blend_resolve;
882 void *custom_blend_fmask_decompress;
883 void *custom_blend_eliminate_fastclear;
884 void *custom_blend_dcc_decompress;
885 void *vs_blit_pos;
886 void *vs_blit_pos_layered;
887 void *vs_blit_color;
888 void *vs_blit_color_layered;
889 void *vs_blit_texcoord;
890 void *cs_clear_buffer;
891 void *cs_copy_buffer;
892 void *cs_copy_image;
893 void *cs_copy_image_1d_array;
894 void *cs_clear_render_target;
895 void *cs_clear_render_target_1d_array;
896 void *cs_dcc_retile;
897 struct si_screen *screen;
898 struct pipe_debug_callback debug;
899 struct ac_llvm_compiler compiler; /* only non-threaded compilation */
900 struct si_shader_ctx_state fixed_func_tcs_shader;
901 /* Offset 0: EOP flush number; Offset 4: GDS prim restart counter */
902 struct si_resource *wait_mem_scratch;
903 unsigned wait_mem_number;
904 uint16_t prefetch_L2_mask;
905
906 bool has_graphics;
907 bool gfx_flush_in_progress:1;
908 bool gfx_last_ib_is_busy:1;
909 bool compute_is_busy:1;
910
911 unsigned num_gfx_cs_flushes;
912 unsigned initial_gfx_cs_size;
913 unsigned last_dirty_tex_counter;
914 unsigned last_dirty_buf_counter;
915 unsigned last_compressed_colortex_counter;
916 unsigned last_num_draw_calls;
917 unsigned flags; /* flush flags */
918 /* Current unaccounted memory usage. */
919 uint64_t vram;
920 uint64_t gtt;
921
922 /* Compute-based primitive discard. */
923 unsigned prim_discard_vertex_count_threshold;
924 struct pb_buffer *gds;
925 struct pb_buffer *gds_oa;
926 struct radeon_cmdbuf *prim_discard_compute_cs;
927 unsigned compute_gds_offset;
928 struct si_shader *compute_ib_last_shader;
929 uint32_t compute_rewind_va;
930 unsigned compute_num_prims_in_batch;
931 bool preserve_prim_restart_gds_at_flush;
932 /* index_ring is divided into 2 halves for doublebuffering. */
933 struct si_resource *index_ring;
934 unsigned index_ring_base; /* offset of a per-IB portion */
935 unsigned index_ring_offset; /* offset within a per-IB portion */
936 unsigned index_ring_size_per_ib; /* max available size per IB */
937 bool prim_discard_compute_ib_initialized;
938 /* For tracking the last execution barrier - it can be either
939 * a WRITE_DATA packet or a fence. */
940 uint32_t *last_pkt3_write_data;
941 struct si_resource *barrier_buf;
942 unsigned barrier_buf_offset;
943 struct pipe_fence_handle *last_ib_barrier_fence;
944 struct si_resource *last_ib_barrier_buf;
945 unsigned last_ib_barrier_buf_offset;
946
947 /* Atoms (direct states). */
948 union si_state_atoms atoms;
949 unsigned dirty_atoms; /* mask */
950 /* PM4 states (precomputed immutable states) */
951 unsigned dirty_states;
952 union si_state queued;
953 union si_state emitted;
954
955 /* Atom declarations. */
956 struct si_framebuffer framebuffer;
957 unsigned sample_locs_num_samples;
958 uint16_t sample_mask;
959 unsigned last_cb_target_mask;
960 struct si_blend_color blend_color;
961 struct si_clip_state clip_state;
962 struct si_shader_data shader_pointers;
963 struct si_stencil_ref stencil_ref;
964 struct pipe_scissor_state scissors[SI_MAX_VIEWPORTS];
965 struct si_streamout streamout;
966 struct si_viewports viewports;
967 unsigned num_window_rectangles;
968 bool window_rectangles_include;
969 struct pipe_scissor_state window_rectangles[4];
970
971 /* Precomputed states. */
972 struct si_pm4_state *init_config;
973 struct si_pm4_state *init_config_gs_rings;
974 bool init_config_has_vgt_flush;
975 struct si_pm4_state *vgt_shader_config[SI_NUM_VGT_STAGES_STATES];
976
977 /* shaders */
978 struct si_shader_ctx_state ps_shader;
979 struct si_shader_ctx_state gs_shader;
980 struct si_shader_ctx_state vs_shader;
981 struct si_shader_ctx_state tcs_shader;
982 struct si_shader_ctx_state tes_shader;
983 struct si_shader_ctx_state cs_prim_discard_state;
984 struct si_cs_shader_state cs_shader_state;
985
986 /* shader information */
987 struct si_vertex_elements *vertex_elements;
988 unsigned sprite_coord_enable;
989 unsigned cs_max_waves_per_sh;
990 bool flatshade;
991 bool do_update_shaders;
992
993 /* vertex buffer descriptors */
994 uint32_t *vb_descriptors_gpu_list;
995 struct si_resource *vb_descriptors_buffer;
996 unsigned vb_descriptors_offset;
997
998 /* shader descriptors */
999 struct si_descriptors descriptors[SI_NUM_DESCS];
1000 unsigned descriptors_dirty;
1001 unsigned shader_pointers_dirty;
1002 unsigned shader_needs_decompress_mask;
1003 struct si_buffer_resources rw_buffers;
1004 struct si_buffer_resources const_and_shader_buffers[SI_NUM_SHADERS];
1005 struct si_samplers samplers[SI_NUM_SHADERS];
1006 struct si_images images[SI_NUM_SHADERS];
1007 bool bo_list_add_all_resident_resources;
1008 bool bo_list_add_all_gfx_resources;
1009 bool bo_list_add_all_compute_resources;
1010
1011 /* other shader resources */
1012 struct pipe_constant_buffer null_const_buf; /* used for set_constant_buffer(NULL) on GFX7 */
1013 struct pipe_resource *esgs_ring;
1014 struct pipe_resource *gsvs_ring;
1015 struct pipe_resource *tess_rings;
1016 union pipe_color_union *border_color_table; /* in CPU memory, any endian */
1017 struct si_resource *border_color_buffer;
1018 union pipe_color_union *border_color_map; /* in VRAM (slow access), little endian */
1019 unsigned border_color_count;
1020 unsigned num_vs_blit_sgprs;
1021 uint32_t vs_blit_sh_data[SI_VS_BLIT_SGPRS_POS_TEXCOORD];
1022 uint32_t cs_user_data[4];
1023
1024 /* Vertex and index buffers. */
1025 bool vertex_buffers_dirty;
1026 bool vertex_buffer_pointer_dirty;
1027 struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
1028 uint16_t vertex_buffer_unaligned; /* bitmask of not dword-aligned buffers */
1029
1030 /* MSAA config state. */
1031 int ps_iter_samples;
1032 bool ps_uses_fbfetch;
1033 bool smoothing_enabled;
1034
1035 /* DB render state. */
1036 unsigned ps_db_shader_control;
1037 unsigned dbcb_copy_sample;
1038 bool dbcb_depth_copy_enabled:1;
1039 bool dbcb_stencil_copy_enabled:1;
1040 bool db_flush_depth_inplace:1;
1041 bool db_flush_stencil_inplace:1;
1042 bool db_depth_clear:1;
1043 bool db_depth_disable_expclear:1;
1044 bool db_stencil_clear:1;
1045 bool db_stencil_disable_expclear:1;
1046 bool occlusion_queries_disabled:1;
1047 bool generate_mipmap_for_depth:1;
1048
1049 /* Emitted draw state. */
1050 bool gs_tri_strip_adj_fix:1;
1051 bool ls_vgpr_fix:1;
1052 bool prim_discard_cs_instancing:1;
1053 bool ngg:1;
1054 int last_index_size;
1055 int last_base_vertex;
1056 int last_start_instance;
1057 int last_instance_count;
1058 int last_drawid;
1059 int last_sh_base_reg;
1060 int last_primitive_restart_en;
1061 int last_restart_index;
1062 int last_prim;
1063 int last_multi_vgt_param;
1064 int last_rast_prim;
1065 int last_flatshade_first;
1066 int last_binning_enabled;
1067 unsigned last_sc_line_stipple;
1068 unsigned current_vs_state;
1069 unsigned last_vs_state;
1070 enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
1071
1072 /* Scratch buffer */
1073 struct si_resource *scratch_buffer;
1074 unsigned scratch_waves;
1075 unsigned spi_tmpring_size;
1076 unsigned max_seen_scratch_bytes_per_wave;
1077 unsigned max_seen_compute_scratch_bytes_per_wave;
1078
1079 struct si_resource *compute_scratch_buffer;
1080
1081 /* Emitted derived tessellation state. */
1082 /* Local shader (VS), or HS if LS-HS are merged. */
1083 struct si_shader *last_ls;
1084 struct si_shader_selector *last_tcs;
1085 int last_num_tcs_input_cp;
1086 int last_tes_sh_base;
1087 bool last_tess_uses_primid;
1088 unsigned last_num_patches;
1089 int last_ls_hs_config;
1090
1091 /* Debug state. */
1092 bool is_debug;
1093 struct si_saved_cs *current_saved_cs;
1094 uint64_t dmesg_timestamp;
1095 unsigned apitrace_call_number;
1096
1097 /* Other state */
1098 bool need_check_render_feedback;
1099 bool decompression_enabled;
1100 bool dpbb_force_off;
1101 bool vs_writes_viewport_index;
1102 bool vs_disables_clipping_viewport;
1103
1104 /* Precomputed IA_MULTI_VGT_PARAM */
1105 union si_vgt_param_key ia_multi_vgt_param_key;
1106 unsigned ia_multi_vgt_param[SI_NUM_VGT_PARAM_STATES];
1107
1108 /* Bindless descriptors. */
1109 struct si_descriptors bindless_descriptors;
1110 struct util_idalloc bindless_used_slots;
1111 unsigned num_bindless_descriptors;
1112 bool bindless_descriptors_dirty;
1113 bool graphics_bindless_pointer_dirty;
1114 bool compute_bindless_pointer_dirty;
1115
1116 /* Allocated bindless handles */
1117 struct hash_table *tex_handles;
1118 struct hash_table *img_handles;
1119
1120 /* Resident bindless handles */
1121 struct util_dynarray resident_tex_handles;
1122 struct util_dynarray resident_img_handles;
1123
1124 /* Resident bindless handles which need decompression */
1125 struct util_dynarray resident_tex_needs_color_decompress;
1126 struct util_dynarray resident_img_needs_color_decompress;
1127 struct util_dynarray resident_tex_needs_depth_decompress;
1128
1129 /* Bindless state */
1130 bool uses_bindless_samplers;
1131 bool uses_bindless_images;
1132
1133 /* MSAA sample locations.
1134 * The first index is the sample index.
1135 * The second index is the coordinate: X, Y. */
1136 struct {
1137 float x1[1][2];
1138 float x2[2][2];
1139 float x4[4][2];
1140 float x8[8][2];
1141 float x16[16][2];
1142 } sample_positions;
1143 struct pipe_resource *sample_pos_buffer;
1144
1145 /* Misc stats. */
1146 unsigned num_draw_calls;
1147 unsigned num_decompress_calls;
1148 unsigned num_mrt_draw_calls;
1149 unsigned num_prim_restart_calls;
1150 unsigned num_spill_draw_calls;
1151 unsigned num_compute_calls;
1152 unsigned num_spill_compute_calls;
1153 unsigned num_dma_calls;
1154 unsigned num_cp_dma_calls;
1155 unsigned num_vs_flushes;
1156 unsigned num_ps_flushes;
1157 unsigned num_cs_flushes;
1158 unsigned num_cb_cache_flushes;
1159 unsigned num_db_cache_flushes;
1160 unsigned num_L2_invalidates;
1161 unsigned num_L2_writebacks;
1162 unsigned num_resident_handles;
1163 uint64_t num_alloc_tex_transfer_bytes;
1164 unsigned last_tex_ps_draw_ratio; /* for query */
1165 unsigned compute_num_verts_accepted;
1166 unsigned compute_num_verts_rejected;
1167 unsigned compute_num_verts_ineligible; /* due to low vertex count */
1168 unsigned context_roll;
1169
1170 /* Queries. */
1171 /* Maintain the list of active queries for pausing between IBs. */
1172 int num_occlusion_queries;
1173 int num_perfect_occlusion_queries;
1174 int num_pipeline_stat_queries;
1175 struct list_head active_queries;
1176 unsigned num_cs_dw_queries_suspend;
1177
1178 /* Render condition. */
1179 struct pipe_query *render_cond;
1180 unsigned render_cond_mode;
1181 bool render_cond_invert;
1182 bool render_cond_force_off; /* for u_blitter */
1183
1184 /* For uploading data via GTT and copy to VRAM on context flush via SDMA. */
1185 bool sdma_uploads_in_progress;
1186 struct si_sdma_upload *sdma_uploads;
1187 unsigned num_sdma_uploads;
1188 unsigned max_sdma_uploads;
1189
1190 /* Shader-based queries. */
1191 struct list_head shader_query_buffers;
1192 unsigned num_active_shader_queries;
1193
1194 /* Statistics gathering for the DCC enablement heuristic. It can't be
1195 * in si_texture because si_texture can be shared by multiple
1196 * contexts. This is for back buffers only. We shouldn't get too many
1197 * of those.
1198 *
1199 * X11 DRI3 rotates among a finite set of back buffers. They should
1200 * all fit in this array. If they don't, separate DCC might never be
1201 * enabled by DCC stat gathering.
1202 */
1203 struct {
1204 struct si_texture *tex;
1205 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
1206 struct pipe_query *ps_stats[3];
1207 /* If all slots are used and another slot is needed,
1208 * the least recently used slot is evicted based on this. */
1209 int64_t last_use_timestamp;
1210 bool query_active;
1211 } dcc_stats[5];
1212
1213 /* Copy one resource to another using async DMA. */
1214 void (*dma_copy)(struct pipe_context *ctx,
1215 struct pipe_resource *dst,
1216 unsigned dst_level,
1217 unsigned dst_x, unsigned dst_y, unsigned dst_z,
1218 struct pipe_resource *src,
1219 unsigned src_level,
1220 const struct pipe_box *src_box);
1221
1222 struct si_tracked_regs tracked_regs;
1223 };
1224
1225 /* cik_sdma.c */
1226 void cik_init_sdma_functions(struct si_context *sctx);
1227
1228 /* si_blit.c */
1229 enum si_blitter_op /* bitmask */
1230 {
1231 SI_SAVE_TEXTURES = 1,
1232 SI_SAVE_FRAMEBUFFER = 2,
1233 SI_SAVE_FRAGMENT_STATE = 4,
1234 SI_DISABLE_RENDER_COND = 8,
1235 };
1236
1237 void si_blitter_begin(struct si_context *sctx, enum si_blitter_op op);
1238 void si_blitter_end(struct si_context *sctx);
1239 void si_init_blit_functions(struct si_context *sctx);
1240 void si_decompress_textures(struct si_context *sctx, unsigned shader_mask);
1241 void si_resource_copy_region(struct pipe_context *ctx,
1242 struct pipe_resource *dst,
1243 unsigned dst_level,
1244 unsigned dstx, unsigned dsty, unsigned dstz,
1245 struct pipe_resource *src,
1246 unsigned src_level,
1247 const struct pipe_box *src_box);
1248 void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex);
1249
1250 /* si_buffer.c */
1251 bool si_rings_is_buffer_referenced(struct si_context *sctx,
1252 struct pb_buffer *buf,
1253 enum radeon_bo_usage usage);
1254 void *si_buffer_map_sync_with_rings(struct si_context *sctx,
1255 struct si_resource *resource,
1256 unsigned usage);
1257 void si_init_resource_fields(struct si_screen *sscreen,
1258 struct si_resource *res,
1259 uint64_t size, unsigned alignment);
1260 bool si_alloc_resource(struct si_screen *sscreen,
1261 struct si_resource *res);
1262 struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
1263 unsigned flags, unsigned usage,
1264 unsigned size, unsigned alignment);
1265 struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen,
1266 unsigned flags, unsigned usage,
1267 unsigned size, unsigned alignment);
1268 void si_replace_buffer_storage(struct pipe_context *ctx,
1269 struct pipe_resource *dst,
1270 struct pipe_resource *src);
1271 void si_init_screen_buffer_functions(struct si_screen *sscreen);
1272 void si_init_buffer_functions(struct si_context *sctx);
1273
1274 /* si_clear.c */
1275 enum pipe_format si_simplify_cb_format(enum pipe_format format);
1276 bool vi_alpha_is_on_msb(struct si_screen *sscreen, enum pipe_format format);
1277 bool vi_dcc_clear_level(struct si_context *sctx,
1278 struct si_texture *tex,
1279 unsigned level, unsigned clear_value);
1280 void si_init_clear_functions(struct si_context *sctx);
1281
1282 /* si_compute_blit.c */
1283 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,
1284 enum si_cache_policy cache_policy);
1285 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
1286 uint64_t offset, uint64_t size, uint32_t *clear_value,
1287 uint32_t clear_value_size, enum si_coherency coher,
1288 bool force_cpdma);
1289 void si_copy_buffer(struct si_context *sctx,
1290 struct pipe_resource *dst, struct pipe_resource *src,
1291 uint64_t dst_offset, uint64_t src_offset, unsigned size);
1292 void si_compute_copy_image(struct si_context *sctx,
1293 struct pipe_resource *dst,
1294 unsigned dst_level,
1295 struct pipe_resource *src,
1296 unsigned src_level,
1297 unsigned dstx, unsigned dsty, unsigned dstz,
1298 const struct pipe_box *src_box);
1299 void si_compute_clear_render_target(struct pipe_context *ctx,
1300 struct pipe_surface *dstsurf,
1301 const union pipe_color_union *color,
1302 unsigned dstx, unsigned dsty,
1303 unsigned width, unsigned height,
1304 bool render_condition_enabled);
1305 void si_retile_dcc(struct si_context *sctx, struct si_texture *tex);
1306 void si_init_compute_blit_functions(struct si_context *sctx);
1307
1308 /* si_cp_dma.c */
1309 #define SI_CPDMA_SKIP_CHECK_CS_SPACE (1 << 0) /* don't call need_cs_space */
1310 #define SI_CPDMA_SKIP_SYNC_AFTER (1 << 1) /* don't wait for DMA after the copy */
1311 #define SI_CPDMA_SKIP_SYNC_BEFORE (1 << 2) /* don't wait for DMA before the copy (RAW hazards) */
1312 #define SI_CPDMA_SKIP_GFX_SYNC (1 << 3) /* don't flush caches and don't wait for PS/CS */
1313 #define SI_CPDMA_SKIP_BO_LIST_UPDATE (1 << 4) /* don't update the BO list */
1314 #define SI_CPDMA_SKIP_ALL (SI_CPDMA_SKIP_CHECK_CS_SPACE | \
1315 SI_CPDMA_SKIP_SYNC_AFTER | \
1316 SI_CPDMA_SKIP_SYNC_BEFORE | \
1317 SI_CPDMA_SKIP_GFX_SYNC | \
1318 SI_CPDMA_SKIP_BO_LIST_UPDATE)
1319
1320 void si_cp_dma_wait_for_idle(struct si_context *sctx);
1321 void si_cp_dma_clear_buffer(struct si_context *sctx, struct radeon_cmdbuf *cs,
1322 struct pipe_resource *dst, uint64_t offset,
1323 uint64_t size, unsigned value, unsigned user_flags,
1324 enum si_coherency coher, enum si_cache_policy cache_policy);
1325 void si_cp_dma_copy_buffer(struct si_context *sctx,
1326 struct pipe_resource *dst, struct pipe_resource *src,
1327 uint64_t dst_offset, uint64_t src_offset, unsigned size,
1328 unsigned user_flags, enum si_coherency coher,
1329 enum si_cache_policy cache_policy);
1330 void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
1331 uint64_t offset, unsigned size);
1332 void cik_emit_prefetch_L2(struct si_context *sctx, bool vertex_stage_only);
1333 void si_test_gds(struct si_context *sctx);
1334 void si_cp_write_data(struct si_context *sctx, struct si_resource *buf,
1335 unsigned offset, unsigned size, unsigned dst_sel,
1336 unsigned engine, const void *data);
1337 void si_cp_copy_data(struct si_context *sctx, struct radeon_cmdbuf *cs,
1338 unsigned dst_sel, struct si_resource *dst, unsigned dst_offset,
1339 unsigned src_sel, struct si_resource *src, unsigned src_offset);
1340
1341 /* si_debug.c */
1342 void si_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
1343 struct radeon_saved_cs *saved, bool get_buffer_list);
1344 void si_clear_saved_cs(struct radeon_saved_cs *saved);
1345 void si_destroy_saved_cs(struct si_saved_cs *scs);
1346 void si_auto_log_cs(void *data, struct u_log_context *log);
1347 void si_log_hw_flush(struct si_context *sctx);
1348 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log);
1349 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log);
1350 void si_init_debug_functions(struct si_context *sctx);
1351 void si_check_vm_faults(struct si_context *sctx,
1352 struct radeon_saved_cs *saved, enum ring_type ring);
1353 bool si_replace_shader(unsigned num, struct si_shader_binary *binary);
1354
1355 /* si_dma.c */
1356 void si_init_dma_functions(struct si_context *sctx);
1357
1358 /* si_dma_cs.c */
1359 void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
1360 uint64_t offset);
1361 void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
1362 uint64_t offset, uint64_t size, unsigned clear_value);
1363 void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
1364 struct si_resource *dst, struct si_resource *src);
1365 void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
1366 struct pipe_fence_handle **fence);
1367 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
1368 uint64_t offset, uint64_t size, unsigned value);
1369
1370 /* si_fence.c */
1371 void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
1372 unsigned event, unsigned event_flags,
1373 unsigned dst_sel, unsigned int_sel, unsigned data_sel,
1374 struct si_resource *buf, uint64_t va,
1375 uint32_t new_fence, unsigned query_type);
1376 unsigned si_cp_write_fence_dwords(struct si_screen *screen);
1377 void si_cp_wait_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
1378 uint64_t va, uint32_t ref, uint32_t mask, unsigned flags);
1379 void si_init_fence_functions(struct si_context *ctx);
1380 void si_init_screen_fence_functions(struct si_screen *screen);
1381 struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
1382 struct tc_unflushed_batch_token *tc_token);
1383
1384 /* si_get.c */
1385 void si_init_screen_get_functions(struct si_screen *sscreen);
1386
1387 /* si_gfx_cs.c */
1388 void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
1389 struct pipe_fence_handle **fence);
1390 void si_allocate_gds(struct si_context *ctx);
1391 void si_begin_new_gfx_cs(struct si_context *ctx);
1392 void si_need_gfx_cs_space(struct si_context *ctx);
1393 void si_unref_sdma_uploads(struct si_context *sctx);
1394
1395 /* si_gpu_load.c */
1396 void si_gpu_load_kill_thread(struct si_screen *sscreen);
1397 uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type);
1398 unsigned si_end_counter(struct si_screen *sscreen, unsigned type,
1399 uint64_t begin);
1400
1401 /* si_compute.c */
1402 void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf *cs);
1403 void si_init_compute_functions(struct si_context *sctx);
1404
1405 /* si_compute_prim_discard.c */
1406 enum si_prim_discard_outcome {
1407 SI_PRIM_DISCARD_ENABLED,
1408 SI_PRIM_DISCARD_DISABLED,
1409 SI_PRIM_DISCARD_DRAW_SPLIT,
1410 };
1411
1412 void si_build_prim_discard_compute_shader(struct si_shader_context *ctx);
1413 enum si_prim_discard_outcome
1414 si_prepare_prim_discard_or_split_draw(struct si_context *sctx,
1415 const struct pipe_draw_info *info,
1416 bool primitive_restart);
1417 void si_compute_signal_gfx(struct si_context *sctx);
1418 void si_dispatch_prim_discard_cs_and_draw(struct si_context *sctx,
1419 const struct pipe_draw_info *info,
1420 unsigned index_size,
1421 unsigned base_vertex,
1422 uint64_t input_indexbuf_va,
1423 unsigned input_indexbuf_max_elements);
1424 void si_initialize_prim_discard_tunables(struct si_context *sctx);
1425
1426 /* si_perfcounters.c */
1427 void si_init_perfcounters(struct si_screen *screen);
1428 void si_destroy_perfcounters(struct si_screen *screen);
1429
1430 /* si_pipe.c */
1431 bool si_check_device_reset(struct si_context *sctx);
1432
1433 /* si_query.c */
1434 void si_init_screen_query_functions(struct si_screen *sscreen);
1435 void si_init_query_functions(struct si_context *sctx);
1436 void si_suspend_queries(struct si_context *sctx);
1437 void si_resume_queries(struct si_context *sctx);
1438
1439 /* si_shaderlib_tgsi.c */
1440 void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type,
1441 unsigned num_layers);
1442 void *si_create_fixed_func_tcs(struct si_context *sctx);
1443 void *si_create_dma_compute_shader(struct pipe_context *ctx,
1444 unsigned num_dwords_per_thread,
1445 bool dst_stream_cache_policy, bool is_copy);
1446 void *si_create_copy_image_compute_shader(struct pipe_context *ctx);
1447 void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx);
1448 void *si_clear_render_target_shader(struct pipe_context *ctx);
1449 void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx);
1450 void *si_create_dcc_retile_cs(struct pipe_context *ctx);
1451 void *si_create_query_result_cs(struct si_context *sctx);
1452 void *gfx10_create_sh_query_result_cs(struct si_context *sctx);
1453
1454 /* gfx10_query.c */
1455 void gfx10_init_query(struct si_context *sctx);
1456 void gfx10_destroy_query(struct si_context *sctx);
1457
1458 /* si_test_dma.c */
1459 void si_test_dma(struct si_screen *sscreen);
1460
1461 /* si_test_clearbuffer.c */
1462 void si_test_dma_perf(struct si_screen *sscreen);
1463
1464 /* si_uvd.c */
1465 struct pipe_video_codec *si_uvd_create_decoder(struct pipe_context *context,
1466 const struct pipe_video_codec *templ);
1467
1468 struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe,
1469 const struct pipe_video_buffer *tmpl);
1470
1471 /* si_viewport.c */
1472 void si_update_vs_viewport_state(struct si_context *ctx);
1473 void si_init_viewport_functions(struct si_context *ctx);
1474
1475 /* si_texture.c */
1476 bool si_prepare_for_dma_blit(struct si_context *sctx,
1477 struct si_texture *dst,
1478 unsigned dst_level, unsigned dstx,
1479 unsigned dsty, unsigned dstz,
1480 struct si_texture *src,
1481 unsigned src_level,
1482 const struct pipe_box *src_box);
1483 void si_eliminate_fast_color_clear(struct si_context *sctx,
1484 struct si_texture *tex);
1485 void si_texture_discard_cmask(struct si_screen *sscreen,
1486 struct si_texture *tex);
1487 bool si_init_flushed_depth_texture(struct pipe_context *ctx,
1488 struct pipe_resource *texture);
1489 void si_print_texture_info(struct si_screen *sscreen,
1490 struct si_texture *tex, struct u_log_context *log);
1491 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
1492 const struct pipe_resource *templ);
1493 bool vi_dcc_formats_compatible(struct si_screen *sscreen,
1494 enum pipe_format format1,
1495 enum pipe_format format2);
1496 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
1497 unsigned level,
1498 enum pipe_format view_format);
1499 void vi_disable_dcc_if_incompatible_format(struct si_context *sctx,
1500 struct pipe_resource *tex,
1501 unsigned level,
1502 enum pipe_format view_format);
1503 struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
1504 struct pipe_resource *texture,
1505 const struct pipe_surface *templ,
1506 unsigned width0, unsigned height0,
1507 unsigned width, unsigned height);
1508 unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap);
1509 void vi_separate_dcc_try_enable(struct si_context *sctx,
1510 struct si_texture *tex);
1511 void vi_separate_dcc_start_query(struct si_context *sctx,
1512 struct si_texture *tex);
1513 void vi_separate_dcc_stop_query(struct si_context *sctx,
1514 struct si_texture *tex);
1515 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
1516 struct si_texture *tex);
1517 bool si_texture_disable_dcc(struct si_context *sctx,
1518 struct si_texture *tex);
1519 void si_init_screen_texture_functions(struct si_screen *sscreen);
1520 void si_init_context_texture_functions(struct si_context *sctx);
1521
1522
1523 /*
1524 * common helpers
1525 */
1526
1527 static inline struct si_resource *si_resource(struct pipe_resource *r)
1528 {
1529 return (struct si_resource*)r;
1530 }
1531
1532 static inline void
1533 si_resource_reference(struct si_resource **ptr, struct si_resource *res)
1534 {
1535 pipe_resource_reference((struct pipe_resource **)ptr,
1536 (struct pipe_resource *)res);
1537 }
1538
1539 static inline void
1540 si_texture_reference(struct si_texture **ptr, struct si_texture *res)
1541 {
1542 pipe_resource_reference((struct pipe_resource **)ptr, &res->buffer.b.b);
1543 }
1544
1545 static inline bool
1546 vi_dcc_enabled(struct si_texture *tex, unsigned level)
1547 {
1548 return tex->surface.dcc_offset && level < tex->surface.num_dcc_levels;
1549 }
1550
1551 static inline unsigned
1552 si_tile_mode_index(struct si_texture *tex, unsigned level, bool stencil)
1553 {
1554 if (stencil)
1555 return tex->surface.u.legacy.stencil_tiling_index[level];
1556 else
1557 return tex->surface.u.legacy.tiling_index[level];
1558 }
1559
1560 static inline unsigned
1561 si_get_minimum_num_gfx_cs_dwords(struct si_context *sctx)
1562 {
1563 /* Don't count the needed CS space exactly and just use an upper bound.
1564 *
1565 * Also reserve space for stopping queries at the end of IB, because
1566 * the number of active queries is unlimited in theory.
1567 */
1568 return 2048 + sctx->num_cs_dw_queries_suspend;
1569 }
1570
1571 static inline void
1572 si_context_add_resource_size(struct si_context *sctx, struct pipe_resource *r)
1573 {
1574 if (r) {
1575 /* Add memory usage for need_gfx_cs_space */
1576 sctx->vram += si_resource(r)->vram_usage;
1577 sctx->gtt += si_resource(r)->gart_usage;
1578 }
1579 }
1580
1581 static inline void
1582 si_invalidate_draw_sh_constants(struct si_context *sctx)
1583 {
1584 sctx->last_base_vertex = SI_BASE_VERTEX_UNKNOWN;
1585 sctx->last_instance_count = SI_INSTANCE_COUNT_UNKNOWN;
1586 }
1587
1588 static inline unsigned
1589 si_get_atom_bit(struct si_context *sctx, struct si_atom *atom)
1590 {
1591 return 1 << (atom - sctx->atoms.array);
1592 }
1593
1594 static inline void
1595 si_set_atom_dirty(struct si_context *sctx, struct si_atom *atom, bool dirty)
1596 {
1597 unsigned bit = si_get_atom_bit(sctx, atom);
1598
1599 if (dirty)
1600 sctx->dirty_atoms |= bit;
1601 else
1602 sctx->dirty_atoms &= ~bit;
1603 }
1604
1605 static inline bool
1606 si_is_atom_dirty(struct si_context *sctx, struct si_atom *atom)
1607 {
1608 return (sctx->dirty_atoms & si_get_atom_bit(sctx, atom)) != 0;
1609 }
1610
1611 static inline void
1612 si_mark_atom_dirty(struct si_context *sctx, struct si_atom *atom)
1613 {
1614 si_set_atom_dirty(sctx, atom, true);
1615 }
1616
1617 static inline struct si_shader_ctx_state *si_get_vs(struct si_context *sctx)
1618 {
1619 if (sctx->gs_shader.cso)
1620 return &sctx->gs_shader;
1621 if (sctx->tes_shader.cso)
1622 return &sctx->tes_shader;
1623
1624 return &sctx->vs_shader;
1625 }
1626
1627 static inline struct tgsi_shader_info *si_get_vs_info(struct si_context *sctx)
1628 {
1629 struct si_shader_ctx_state *vs = si_get_vs(sctx);
1630
1631 return vs->cso ? &vs->cso->info : NULL;
1632 }
1633
1634 static inline struct si_shader* si_get_vs_state(struct si_context *sctx)
1635 {
1636 if (sctx->gs_shader.cso &&
1637 sctx->gs_shader.current &&
1638 !sctx->gs_shader.current->key.as_ngg)
1639 return sctx->gs_shader.cso->gs_copy_shader;
1640
1641 struct si_shader_ctx_state *vs = si_get_vs(sctx);
1642 return vs->current ? vs->current : NULL;
1643 }
1644
1645 static inline bool si_can_dump_shader(struct si_screen *sscreen,
1646 unsigned processor)
1647 {
1648 return sscreen->debug_flags & (1 << processor);
1649 }
1650
1651 static inline bool si_get_strmout_en(struct si_context *sctx)
1652 {
1653 return sctx->streamout.streamout_enabled ||
1654 sctx->streamout.prims_gen_query_enabled;
1655 }
1656
1657 static inline unsigned
1658 si_optimal_tcc_alignment(struct si_context *sctx, unsigned upload_size)
1659 {
1660 unsigned alignment, tcc_cache_line_size;
1661
1662 /* If the upload size is less than the cache line size (e.g. 16, 32),
1663 * the whole thing will fit into a cache line if we align it to its size.
1664 * The idea is that multiple small uploads can share a cache line.
1665 * If the upload size is greater, align it to the cache line size.
1666 */
1667 alignment = util_next_power_of_two(upload_size);
1668 tcc_cache_line_size = sctx->screen->info.tcc_cache_line_size;
1669 return MIN2(alignment, tcc_cache_line_size);
1670 }
1671
1672 static inline void
1673 si_saved_cs_reference(struct si_saved_cs **dst, struct si_saved_cs *src)
1674 {
1675 if (pipe_reference(&(*dst)->reference, &src->reference))
1676 si_destroy_saved_cs(*dst);
1677
1678 *dst = src;
1679 }
1680
1681 static inline void
1682 si_make_CB_shader_coherent(struct si_context *sctx, unsigned num_samples,
1683 bool shaders_read_metadata, bool dcc_pipe_aligned)
1684 {
1685 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_CB |
1686 SI_CONTEXT_INV_VCACHE;
1687
1688 if (sctx->chip_class >= GFX10) {
1689 if (sctx->screen->info.tcc_harvested)
1690 sctx->flags |= SI_CONTEXT_INV_L2;
1691 else if (shaders_read_metadata)
1692 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1693 } else if (sctx->chip_class == GFX9) {
1694 /* Single-sample color is coherent with shaders on GFX9, but
1695 * L2 metadata must be flushed if shaders read metadata.
1696 * (DCC, CMASK).
1697 */
1698 if (num_samples >= 2 ||
1699 (shaders_read_metadata && !dcc_pipe_aligned))
1700 sctx->flags |= SI_CONTEXT_INV_L2;
1701 else if (shaders_read_metadata)
1702 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1703 } else {
1704 /* GFX6-GFX8 */
1705 sctx->flags |= SI_CONTEXT_INV_L2;
1706 }
1707 }
1708
1709 static inline void
1710 si_make_DB_shader_coherent(struct si_context *sctx, unsigned num_samples,
1711 bool include_stencil, bool shaders_read_metadata)
1712 {
1713 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB |
1714 SI_CONTEXT_INV_VCACHE;
1715
1716 if (sctx->chip_class >= GFX10) {
1717 if (sctx->screen->info.tcc_harvested)
1718 sctx->flags |= SI_CONTEXT_INV_L2;
1719 else if (shaders_read_metadata)
1720 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1721 } else if (sctx->chip_class == GFX9) {
1722 /* Single-sample depth (not stencil) is coherent with shaders
1723 * on GFX9, but L2 metadata must be flushed if shaders read
1724 * metadata.
1725 */
1726 if (num_samples >= 2 || include_stencil)
1727 sctx->flags |= SI_CONTEXT_INV_L2;
1728 else if (shaders_read_metadata)
1729 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1730 } else {
1731 /* GFX6-GFX8 */
1732 sctx->flags |= SI_CONTEXT_INV_L2;
1733 }
1734 }
1735
1736 static inline bool
1737 si_can_sample_zs(struct si_texture *tex, bool stencil_sampler)
1738 {
1739 return (stencil_sampler && tex->can_sample_s) ||
1740 (!stencil_sampler && tex->can_sample_z);
1741 }
1742
1743 static inline bool
1744 si_htile_enabled(struct si_texture *tex, unsigned level, unsigned zs_mask)
1745 {
1746 if (zs_mask == PIPE_MASK_S && tex->htile_stencil_disabled)
1747 return false;
1748
1749 return tex->surface.htile_offset && level == 0;
1750 }
1751
1752 static inline bool
1753 vi_tc_compat_htile_enabled(struct si_texture *tex, unsigned level, unsigned zs_mask)
1754 {
1755 assert(!tex->tc_compatible_htile || tex->surface.htile_offset);
1756 return tex->tc_compatible_htile && si_htile_enabled(tex, level, zs_mask);
1757 }
1758
1759 static inline unsigned si_get_ps_iter_samples(struct si_context *sctx)
1760 {
1761 if (sctx->ps_uses_fbfetch)
1762 return sctx->framebuffer.nr_color_samples;
1763
1764 return MIN2(sctx->ps_iter_samples, sctx->framebuffer.nr_color_samples);
1765 }
1766
1767 static inline unsigned si_get_total_colormask(struct si_context *sctx)
1768 {
1769 if (sctx->queued.named.rasterizer->rasterizer_discard)
1770 return 0;
1771
1772 struct si_shader_selector *ps = sctx->ps_shader.cso;
1773 if (!ps)
1774 return 0;
1775
1776 unsigned colormask = sctx->framebuffer.colorbuf_enabled_4bit &
1777 sctx->queued.named.blend->cb_target_mask;
1778
1779 if (!ps->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
1780 colormask &= ps->colors_written_4bit;
1781 else if (!ps->colors_written_4bit)
1782 colormask = 0; /* color0 writes all cbufs, but it's not written */
1783
1784 return colormask;
1785 }
1786
1787 #define UTIL_ALL_PRIM_LINE_MODES ((1 << PIPE_PRIM_LINES) | \
1788 (1 << PIPE_PRIM_LINE_LOOP) | \
1789 (1 << PIPE_PRIM_LINE_STRIP) | \
1790 (1 << PIPE_PRIM_LINES_ADJACENCY) | \
1791 (1 << PIPE_PRIM_LINE_STRIP_ADJACENCY))
1792
1793 static inline bool util_prim_is_lines(unsigned prim)
1794 {
1795 return ((1 << prim) & UTIL_ALL_PRIM_LINE_MODES) != 0;
1796 }
1797
1798 static inline bool util_prim_is_points_or_lines(unsigned prim)
1799 {
1800 return ((1 << prim) & (UTIL_ALL_PRIM_LINE_MODES |
1801 (1 << PIPE_PRIM_POINTS))) != 0;
1802 }
1803
1804 static inline bool util_rast_prim_is_triangles(unsigned prim)
1805 {
1806 return ((1 << prim) & ((1 << PIPE_PRIM_TRIANGLES) |
1807 (1 << PIPE_PRIM_TRIANGLE_STRIP) |
1808 (1 << PIPE_PRIM_TRIANGLE_FAN) |
1809 (1 << PIPE_PRIM_QUADS) |
1810 (1 << PIPE_PRIM_QUAD_STRIP) |
1811 (1 << PIPE_PRIM_POLYGON) |
1812 (1 << PIPE_PRIM_TRIANGLES_ADJACENCY) |
1813 (1 << PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY)));
1814 }
1815
1816 /**
1817 * Return true if there is enough memory in VRAM and GTT for the buffers
1818 * added so far.
1819 *
1820 * \param vram VRAM memory size not added to the buffer list yet
1821 * \param gtt GTT memory size not added to the buffer list yet
1822 */
1823 static inline bool
1824 radeon_cs_memory_below_limit(struct si_screen *screen,
1825 struct radeon_cmdbuf *cs,
1826 uint64_t vram, uint64_t gtt)
1827 {
1828 vram += cs->used_vram;
1829 gtt += cs->used_gart;
1830
1831 /* Anything that goes above the VRAM size should go to GTT. */
1832 if (vram > screen->info.vram_size)
1833 gtt += vram - screen->info.vram_size;
1834
1835 /* Now we just need to check if we have enough GTT. */
1836 return gtt < screen->info.gart_size * 0.7;
1837 }
1838
1839 /**
1840 * Add a buffer to the buffer list for the given command stream (CS).
1841 *
1842 * All buffers used by a CS must be added to the list. This tells the kernel
1843 * driver which buffers are used by GPU commands. Other buffers can
1844 * be swapped out (not accessible) during execution.
1845 *
1846 * The buffer list becomes empty after every context flush and must be
1847 * rebuilt.
1848 */
1849 static inline void radeon_add_to_buffer_list(struct si_context *sctx,
1850 struct radeon_cmdbuf *cs,
1851 struct si_resource *bo,
1852 enum radeon_bo_usage usage,
1853 enum radeon_bo_priority priority)
1854 {
1855 assert(usage);
1856 sctx->ws->cs_add_buffer(
1857 cs, bo->buf,
1858 (enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED),
1859 bo->domains, priority);
1860 }
1861
1862 /**
1863 * Same as above, but also checks memory usage and flushes the context
1864 * accordingly.
1865 *
1866 * When this SHOULD NOT be used:
1867 *
1868 * - if si_context_add_resource_size has been called for the buffer
1869 * followed by *_need_cs_space for checking the memory usage
1870 *
1871 * - if si_need_dma_space has been called for the buffer
1872 *
1873 * - when emitting state packets and draw packets (because preceding packets
1874 * can't be re-emitted at that point)
1875 *
1876 * - if shader resource "enabled_mask" is not up-to-date or there is
1877 * a different constraint disallowing a context flush
1878 */
1879 static inline void
1880 radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx,
1881 struct si_resource *bo,
1882 enum radeon_bo_usage usage,
1883 enum radeon_bo_priority priority,
1884 bool check_mem)
1885 {
1886 if (check_mem &&
1887 !radeon_cs_memory_below_limit(sctx->screen, sctx->gfx_cs,
1888 sctx->vram + bo->vram_usage,
1889 sctx->gtt + bo->gart_usage))
1890 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
1891
1892 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, bo, usage, priority);
1893 }
1894
1895 static inline bool si_compute_prim_discard_enabled(struct si_context *sctx)
1896 {
1897 return sctx->prim_discard_vertex_count_threshold != UINT_MAX;
1898 }
1899
1900 static inline unsigned si_get_wave_size(struct si_screen *sscreen,
1901 enum pipe_shader_type shader_type,
1902 bool ngg, bool es)
1903 {
1904 if (shader_type == PIPE_SHADER_COMPUTE)
1905 return sscreen->compute_wave_size;
1906 else if (shader_type == PIPE_SHADER_FRAGMENT)
1907 return sscreen->ps_wave_size;
1908 else if ((shader_type == PIPE_SHADER_VERTEX && es && !ngg) ||
1909 (shader_type == PIPE_SHADER_TESS_EVAL && es && !ngg) ||
1910 (shader_type == PIPE_SHADER_GEOMETRY && !ngg)) /* legacy GS only supports Wave64 */
1911 return 64;
1912 else
1913 return sscreen->ge_wave_size;
1914 }
1915
1916 static inline unsigned si_get_shader_wave_size(struct si_shader *shader)
1917 {
1918 return si_get_wave_size(shader->selector->screen, shader->selector->type,
1919 shader->key.as_ngg, shader->key.as_es);
1920 }
1921
1922 #define PRINT_ERR(fmt, args...) \
1923 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)
1924
1925 #endif