radeonsi: use compute for clear_render_target when possible
[mesa.git] / src / gallium / drivers / radeonsi / si_pipe.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25 #ifndef SI_PIPE_H
26 #define SI_PIPE_H
27
28 #include "si_shader.h"
29 #include "si_state.h"
30
31 #include "util/u_dynarray.h"
32 #include "util/u_idalloc.h"
33 #include "util/u_threaded_context.h"
34
35 #ifdef PIPE_ARCH_BIG_ENDIAN
36 #define SI_BIG_ENDIAN 1
37 #else
38 #define SI_BIG_ENDIAN 0
39 #endif
40
41 #define ATI_VENDOR_ID 0x1002
42
43 #define SI_NOT_QUERY 0xffffffff
44
45 /* The base vertex and primitive restart can be any number, but we must pick
46 * one which will mean "unknown" for the purpose of state tracking and
47 * the number shouldn't be a commonly-used one. */
48 #define SI_BASE_VERTEX_UNKNOWN INT_MIN
49 #define SI_RESTART_INDEX_UNKNOWN INT_MIN
50 #define SI_INSTANCE_COUNT_UNKNOWN INT_MIN
51 #define SI_NUM_SMOOTH_AA_SAMPLES 8
52 #define SI_MAX_POINT_SIZE 2048
53 #define SI_GS_PER_ES 128
54 /* Alignment for optimal CP DMA performance. */
55 #define SI_CPDMA_ALIGNMENT 32
56
57 /* Tunables for compute-based clear_buffer and copy_buffer: */
58 #define SI_COMPUTE_CLEAR_DW_PER_THREAD 4
59 #define SI_COMPUTE_COPY_DW_PER_THREAD 4
60 #define SI_COMPUTE_DST_CACHE_POLICY L2_STREAM
61
62 /* Pipeline & streamout query controls. */
63 #define SI_CONTEXT_START_PIPELINE_STATS (1 << 0)
64 #define SI_CONTEXT_STOP_PIPELINE_STATS (1 << 1)
65 #define SI_CONTEXT_FLUSH_FOR_RENDER_COND (1 << 2)
66 /* Instruction cache. */
67 #define SI_CONTEXT_INV_ICACHE (1 << 3)
68 /* SMEM L1, other names: KCACHE, constant cache, DCACHE, data cache */
69 #define SI_CONTEXT_INV_SMEM_L1 (1 << 4)
70 /* VMEM L1 can optionally be bypassed (GLC=1). Other names: TC L1 */
71 #define SI_CONTEXT_INV_VMEM_L1 (1 << 5)
72 /* Used by everything except CB/DB, can be bypassed (SLC=1). Other names: TC L2 */
73 #define SI_CONTEXT_INV_GLOBAL_L2 (1 << 6)
74 /* Write dirty L2 lines back to memory (shader and CP DMA stores), but don't
75 * invalidate L2. SI-CIK can't do it, so they will do complete invalidation. */
76 #define SI_CONTEXT_WRITEBACK_GLOBAL_L2 (1 << 7)
77 /* Writeback & invalidate the L2 metadata cache. It can only be coupled with
78 * a CB or DB flush. */
79 #define SI_CONTEXT_INV_L2_METADATA (1 << 8)
80 /* Framebuffer caches. */
81 #define SI_CONTEXT_FLUSH_AND_INV_DB (1 << 9)
82 #define SI_CONTEXT_FLUSH_AND_INV_DB_META (1 << 10)
83 #define SI_CONTEXT_FLUSH_AND_INV_CB (1 << 11)
84 /* Engine synchronization. */
85 #define SI_CONTEXT_VS_PARTIAL_FLUSH (1 << 12)
86 #define SI_CONTEXT_PS_PARTIAL_FLUSH (1 << 13)
87 #define SI_CONTEXT_CS_PARTIAL_FLUSH (1 << 14)
88 #define SI_CONTEXT_VGT_FLUSH (1 << 15)
89 #define SI_CONTEXT_VGT_STREAMOUT_SYNC (1 << 16)
90
91 #define SI_PREFETCH_VBO_DESCRIPTORS (1 << 0)
92 #define SI_PREFETCH_LS (1 << 1)
93 #define SI_PREFETCH_HS (1 << 2)
94 #define SI_PREFETCH_ES (1 << 3)
95 #define SI_PREFETCH_GS (1 << 4)
96 #define SI_PREFETCH_VS (1 << 5)
97 #define SI_PREFETCH_PS (1 << 6)
98
99 #define SI_MAX_BORDER_COLORS 4096
100 #define SI_MAX_VIEWPORTS 16
101 #define SIX_BITS 0x3F
102 #define SI_MAP_BUFFER_ALIGNMENT 64
103 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
104
105 #define SI_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0)
106 #define SI_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1)
107 #define SI_RESOURCE_FLAG_FORCE_MSAA_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
108 #define SI_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3)
109 #define SI_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4)
110 #define SI_RESOURCE_FLAG_READ_ONLY (PIPE_RESOURCE_FLAG_DRV_PRIV << 5)
111 #define SI_RESOURCE_FLAG_32BIT (PIPE_RESOURCE_FLAG_DRV_PRIV << 6)
112 #define SI_RESOURCE_FLAG_SO_FILLED_SIZE (PIPE_RESOURCE_FLAG_DRV_PRIV << 7)
113
114 /* Debug flags. */
115 enum {
116 /* Shader logging options: */
117 DBG_VS = PIPE_SHADER_VERTEX,
118 DBG_PS = PIPE_SHADER_FRAGMENT,
119 DBG_GS = PIPE_SHADER_GEOMETRY,
120 DBG_TCS = PIPE_SHADER_TESS_CTRL,
121 DBG_TES = PIPE_SHADER_TESS_EVAL,
122 DBG_CS = PIPE_SHADER_COMPUTE,
123 DBG_NO_IR,
124 DBG_NO_TGSI,
125 DBG_NO_ASM,
126 DBG_PREOPT_IR,
127
128 /* Shader compiler options the shader cache should be aware of: */
129 DBG_FS_CORRECT_DERIVS_AFTER_KILL,
130 DBG_UNSAFE_MATH,
131 DBG_SI_SCHED,
132 DBG_GISEL,
133
134 /* Shader compiler options (with no effect on the shader cache): */
135 DBG_CHECK_IR,
136 DBG_NIR,
137 DBG_MONOLITHIC_SHADERS,
138 DBG_NO_OPT_VARIANT,
139
140 /* Information logging options: */
141 DBG_INFO,
142 DBG_TEX,
143 DBG_COMPUTE,
144 DBG_VM,
145
146 /* Driver options: */
147 DBG_FORCE_DMA,
148 DBG_NO_ASYNC_DMA,
149 DBG_NO_WC,
150 DBG_CHECK_VM,
151 DBG_RESERVE_VMID,
152 DBG_ZERO_VRAM,
153
154 /* 3D engine options: */
155 DBG_SWITCH_ON_EOP,
156 DBG_NO_OUT_OF_ORDER,
157 DBG_NO_DPBB,
158 DBG_NO_DFSM,
159 DBG_DPBB,
160 DBG_DFSM,
161 DBG_NO_HYPERZ,
162 DBG_NO_RB_PLUS,
163 DBG_NO_2D_TILING,
164 DBG_NO_TILING,
165 DBG_NO_DCC,
166 DBG_NO_DCC_CLEAR,
167 DBG_NO_DCC_FB,
168 DBG_NO_DCC_MSAA,
169 DBG_NO_FMASK,
170
171 /* Tests: */
172 DBG_TEST_DMA,
173 DBG_TEST_VMFAULT_CP,
174 DBG_TEST_VMFAULT_SDMA,
175 DBG_TEST_VMFAULT_SHADER,
176 DBG_TEST_DMA_PERF,
177 DBG_TEST_GDS,
178 DBG_TEST_GDS_MM,
179 DBG_TEST_GDS_OA_MM,
180 };
181
182 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
183 #define DBG(name) (1ull << DBG_##name)
184
185 enum si_cache_policy {
186 L2_BYPASS,
187 L2_STREAM, /* same as SLC=1 */
188 L2_LRU, /* same as SLC=0 */
189 };
190
191 enum si_coherency {
192 SI_COHERENCY_NONE, /* no cache flushes needed */
193 SI_COHERENCY_SHADER,
194 SI_COHERENCY_CB_META,
195 SI_COHERENCY_CP,
196 };
197
198 struct si_compute;
199 struct hash_table;
200 struct u_suballocator;
201
202 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
203 * at the moment.
204 */
205 struct si_resource {
206 struct threaded_resource b;
207
208 /* Winsys objects. */
209 struct pb_buffer *buf;
210 uint64_t gpu_address;
211 /* Memory usage if the buffer placement is optimal. */
212 uint64_t vram_usage;
213 uint64_t gart_usage;
214
215 /* Resource properties. */
216 uint64_t bo_size;
217 unsigned bo_alignment;
218 enum radeon_bo_domain domains;
219 enum radeon_bo_flag flags;
220 unsigned bind_history;
221 int max_forced_staging_uploads;
222
223 /* The buffer range which is initialized (with a write transfer,
224 * streamout, DMA, or as a random access target). The rest of
225 * the buffer is considered invalid and can be mapped unsynchronized.
226 *
227 * This allows unsychronized mapping of a buffer range which hasn't
228 * been used yet. It's for applications which forget to use
229 * the unsynchronized map flag and expect the driver to figure it out.
230 */
231 struct util_range valid_buffer_range;
232
233 /* For buffers only. This indicates that a write operation has been
234 * performed by TC L2, but the cache hasn't been flushed.
235 * Any hw block which doesn't use or bypasses TC L2 should check this
236 * flag and flush the cache before using the buffer.
237 *
238 * For example, TC L2 must be flushed if a buffer which has been
239 * modified by a shader store instruction is about to be used as
240 * an index buffer. The reason is that VGT DMA index fetching doesn't
241 * use TC L2.
242 */
243 bool TC_L2_dirty;
244
245 /* Whether this resource is referenced by bindless handles. */
246 bool texture_handle_allocated;
247 bool image_handle_allocated;
248
249 /* Whether the resource has been exported via resource_get_handle. */
250 unsigned external_usage; /* PIPE_HANDLE_USAGE_* */
251 };
252
253 struct si_transfer {
254 struct threaded_transfer b;
255 struct si_resource *staging;
256 unsigned offset;
257 };
258
259 struct si_texture {
260 struct si_resource buffer;
261
262 struct radeon_surf surface;
263 uint64_t size;
264 struct si_texture *flushed_depth_texture;
265
266 /* Colorbuffer compression and fast clear. */
267 uint64_t fmask_offset;
268 uint64_t cmask_offset;
269 uint64_t cmask_base_address_reg;
270 struct si_resource *cmask_buffer;
271 uint64_t dcc_offset; /* 0 = disabled */
272 unsigned cb_color_info; /* fast clear enable bit */
273 unsigned color_clear_value[2];
274 unsigned last_msaa_resolve_target_micro_mode;
275 unsigned num_level0_transfers;
276
277 /* Depth buffer compression and fast clear. */
278 uint64_t htile_offset;
279 float depth_clear_value;
280 uint16_t dirty_level_mask; /* each bit says if that mipmap is compressed */
281 uint16_t stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */
282 enum pipe_format db_render_format:16;
283 uint8_t stencil_clear_value;
284 bool tc_compatible_htile:1;
285 bool depth_cleared:1; /* if it was cleared at least once */
286 bool stencil_cleared:1; /* if it was cleared at least once */
287 bool upgraded_depth:1; /* upgraded from unorm to Z32_FLOAT */
288 bool is_depth:1;
289 bool db_compatible:1;
290 bool can_sample_z:1;
291 bool can_sample_s:1;
292
293 /* We need to track DCC dirtiness, because st/dri usually calls
294 * flush_resource twice per frame (not a bug) and we don't wanna
295 * decompress DCC twice. Also, the dirty tracking must be done even
296 * if DCC isn't used, because it's required by the DCC usage analysis
297 * for a possible future enablement.
298 */
299 bool separate_dcc_dirty:1;
300 /* Statistics gathering for the DCC enablement heuristic. */
301 bool dcc_gather_statistics:1;
302 /* Counter that should be non-zero if the texture is bound to a
303 * framebuffer.
304 */
305 unsigned framebuffers_bound;
306 /* Whether the texture is a displayable back buffer and needs DCC
307 * decompression, which is expensive. Therefore, it's enabled only
308 * if statistics suggest that it will pay off and it's allocated
309 * separately. It can't be bound as a sampler by apps. Limited to
310 * target == 2D and last_level == 0. If enabled, dcc_offset contains
311 * the absolute GPUVM address, not the relative one.
312 */
313 struct si_resource *dcc_separate_buffer;
314 /* When DCC is temporarily disabled, the separate buffer is here. */
315 struct si_resource *last_dcc_separate_buffer;
316 /* Estimate of how much this color buffer is written to in units of
317 * full-screen draws: ps_invocations / (width * height)
318 * Shader kills, late Z, and blending with trivial discards make it
319 * inaccurate (we need to count CB updates, not PS invocations).
320 */
321 unsigned ps_draw_ratio;
322 /* The number of clears since the last DCC usage analysis. */
323 unsigned num_slow_clears;
324 };
325
326 struct si_surface {
327 struct pipe_surface base;
328
329 /* These can vary with block-compressed textures. */
330 uint16_t width0;
331 uint16_t height0;
332
333 bool color_initialized:1;
334 bool depth_initialized:1;
335
336 /* Misc. color flags. */
337 bool color_is_int8:1;
338 bool color_is_int10:1;
339 bool dcc_incompatible:1;
340
341 /* Color registers. */
342 unsigned cb_color_info;
343 unsigned cb_color_view;
344 unsigned cb_color_attrib;
345 unsigned cb_color_attrib2; /* GFX9 and later */
346 unsigned cb_dcc_control; /* VI and later */
347 unsigned spi_shader_col_format:8; /* no blending, no alpha-to-coverage. */
348 unsigned spi_shader_col_format_alpha:8; /* alpha-to-coverage */
349 unsigned spi_shader_col_format_blend:8; /* blending without alpha. */
350 unsigned spi_shader_col_format_blend_alpha:8; /* blending with alpha. */
351
352 /* DB registers. */
353 uint64_t db_depth_base; /* DB_Z_READ/WRITE_BASE */
354 uint64_t db_stencil_base;
355 uint64_t db_htile_data_base;
356 unsigned db_depth_info;
357 unsigned db_z_info;
358 unsigned db_z_info2; /* GFX9+ */
359 unsigned db_depth_view;
360 unsigned db_depth_size;
361 unsigned db_depth_slice;
362 unsigned db_stencil_info;
363 unsigned db_stencil_info2; /* GFX9+ */
364 unsigned db_htile_surface;
365 };
366
367 struct si_mmio_counter {
368 unsigned busy;
369 unsigned idle;
370 };
371
372 union si_mmio_counters {
373 struct {
374 /* For global GPU load including SDMA. */
375 struct si_mmio_counter gpu;
376
377 /* GRBM_STATUS */
378 struct si_mmio_counter spi;
379 struct si_mmio_counter gui;
380 struct si_mmio_counter ta;
381 struct si_mmio_counter gds;
382 struct si_mmio_counter vgt;
383 struct si_mmio_counter ia;
384 struct si_mmio_counter sx;
385 struct si_mmio_counter wd;
386 struct si_mmio_counter bci;
387 struct si_mmio_counter sc;
388 struct si_mmio_counter pa;
389 struct si_mmio_counter db;
390 struct si_mmio_counter cp;
391 struct si_mmio_counter cb;
392
393 /* SRBM_STATUS2 */
394 struct si_mmio_counter sdma;
395
396 /* CP_STAT */
397 struct si_mmio_counter pfp;
398 struct si_mmio_counter meq;
399 struct si_mmio_counter me;
400 struct si_mmio_counter surf_sync;
401 struct si_mmio_counter cp_dma;
402 struct si_mmio_counter scratch_ram;
403 } named;
404 unsigned array[0];
405 };
406
407 struct si_memory_object {
408 struct pipe_memory_object b;
409 struct pb_buffer *buf;
410 uint32_t stride;
411 };
412
413 /* Saved CS data for debugging features. */
414 struct radeon_saved_cs {
415 uint32_t *ib;
416 unsigned num_dw;
417
418 struct radeon_bo_list_item *bo_list;
419 unsigned bo_count;
420 };
421
422 struct si_screen {
423 struct pipe_screen b;
424 struct radeon_winsys *ws;
425 struct disk_cache *disk_shader_cache;
426
427 struct radeon_info info;
428 uint64_t debug_flags;
429 char renderer_string[183];
430
431 unsigned pa_sc_raster_config;
432 unsigned pa_sc_raster_config_1;
433 unsigned se_tile_repeat;
434 unsigned gs_table_depth;
435 unsigned tess_offchip_block_dw_size;
436 unsigned tess_offchip_ring_size;
437 unsigned tess_factor_ring_size;
438 unsigned vgt_hs_offchip_param;
439 unsigned eqaa_force_coverage_samples;
440 unsigned eqaa_force_z_samples;
441 unsigned eqaa_force_color_samples;
442 bool has_clear_state;
443 bool has_distributed_tess;
444 bool has_draw_indirect_multi;
445 bool has_out_of_order_rast;
446 bool assume_no_z_fights;
447 bool commutative_blend_add;
448 bool clear_db_cache_before_clear;
449 bool has_msaa_sample_loc_bug;
450 bool has_ls_vgpr_init_bug;
451 bool has_dcc_constant_encode;
452 bool dpbb_allowed;
453 bool dfsm_allowed;
454 bool llvm_has_working_vgpr_indexing;
455
456 /* Whether shaders are monolithic (1-part) or separate (3-part). */
457 bool use_monolithic_shaders;
458 bool record_llvm_ir;
459 bool has_rbplus; /* if RB+ registers exist */
460 bool rbplus_allowed; /* if RB+ is allowed */
461 bool dcc_msaa_allowed;
462 bool cpdma_prefetch_writes_memory;
463
464 struct slab_parent_pool pool_transfers;
465
466 /* Texture filter settings. */
467 int force_aniso; /* -1 = disabled */
468
469 /* Auxiliary context. Mainly used to initialize resources.
470 * It must be locked prior to using and flushed before unlocking. */
471 struct pipe_context *aux_context;
472 mtx_t aux_context_lock;
473
474 /* This must be in the screen, because UE4 uses one context for
475 * compilation and another one for rendering.
476 */
477 unsigned num_compilations;
478 /* Along with ST_DEBUG=precompile, this should show if applications
479 * are loading shaders on demand. This is a monotonic counter.
480 */
481 unsigned num_shaders_created;
482 unsigned num_shader_cache_hits;
483
484 /* GPU load thread. */
485 mtx_t gpu_load_mutex;
486 thrd_t gpu_load_thread;
487 union si_mmio_counters mmio_counters;
488 volatile unsigned gpu_load_stop_thread; /* bool */
489
490 /* Performance counters. */
491 struct si_perfcounters *perfcounters;
492
493 /* If pipe_screen wants to recompute and re-emit the framebuffer,
494 * sampler, and image states of all contexts, it should atomically
495 * increment this.
496 *
497 * Each context will compare this with its own last known value of
498 * the counter before drawing and re-emit the states accordingly.
499 */
500 unsigned dirty_tex_counter;
501
502 /* Atomically increment this counter when an existing texture's
503 * metadata is enabled or disabled in a way that requires changing
504 * contexts' compressed texture binding masks.
505 */
506 unsigned compressed_colortex_counter;
507
508 struct {
509 /* Context flags to set so that all writes from earlier jobs
510 * in the CP are seen by L2 clients.
511 */
512 unsigned cp_to_L2;
513
514 /* Context flags to set so that all writes from earlier jobs
515 * that end in L2 are seen by CP.
516 */
517 unsigned L2_to_cp;
518 } barrier_flags;
519
520 mtx_t shader_parts_mutex;
521 struct si_shader_part *vs_prologs;
522 struct si_shader_part *tcs_epilogs;
523 struct si_shader_part *gs_prologs;
524 struct si_shader_part *ps_prologs;
525 struct si_shader_part *ps_epilogs;
526
527 /* Shader cache in memory.
528 *
529 * Design & limitations:
530 * - The shader cache is per screen (= per process), never saved to
531 * disk, and skips redundant shader compilations from TGSI to bytecode.
532 * - It can only be used with one-variant-per-shader support, in which
533 * case only the main (typically middle) part of shaders is cached.
534 * - Only VS, TCS, TES, PS are cached, out of which only the hw VS
535 * variants of VS and TES are cached, so LS and ES aren't.
536 * - GS and CS aren't cached, but it's certainly possible to cache
537 * those as well.
538 */
539 mtx_t shader_cache_mutex;
540 struct hash_table *shader_cache;
541
542 /* Shader compiler queue for multithreaded compilation. */
543 struct util_queue shader_compiler_queue;
544 /* Use at most 3 normal compiler threads on quadcore and better.
545 * Hyperthreaded CPUs report the number of threads, but we want
546 * the number of cores. We only need this many threads for shader-db. */
547 struct ac_llvm_compiler compiler[24]; /* used by the queue only */
548
549 struct util_queue shader_compiler_queue_low_priority;
550 /* Use at most 2 low priority threads on quadcore and better.
551 * We want to minimize the impact on multithreaded Mesa. */
552 struct ac_llvm_compiler compiler_lowp[10];
553 };
554
555 struct si_blend_color {
556 struct pipe_blend_color state;
557 bool any_nonzeros;
558 };
559
560 struct si_sampler_view {
561 struct pipe_sampler_view base;
562 /* [0..7] = image descriptor
563 * [4..7] = buffer descriptor */
564 uint32_t state[8];
565 uint32_t fmask_state[8];
566 const struct legacy_surf_level *base_level_info;
567 ubyte base_level;
568 ubyte block_width;
569 bool is_stencil_sampler;
570 bool is_integer;
571 bool dcc_incompatible;
572 };
573
574 #define SI_SAMPLER_STATE_MAGIC 0x34f1c35a
575
576 struct si_sampler_state {
577 #ifdef DEBUG
578 unsigned magic;
579 #endif
580 uint32_t val[4];
581 uint32_t integer_val[4];
582 uint32_t upgraded_depth_val[4];
583 };
584
585 struct si_cs_shader_state {
586 struct si_compute *program;
587 struct si_compute *emitted_program;
588 unsigned offset;
589 bool initialized;
590 bool uses_scratch;
591 };
592
593 struct si_samplers {
594 struct pipe_sampler_view *views[SI_NUM_SAMPLERS];
595 struct si_sampler_state *sampler_states[SI_NUM_SAMPLERS];
596
597 /* The i-th bit is set if that element is enabled (non-NULL resource). */
598 unsigned enabled_mask;
599 uint32_t needs_depth_decompress_mask;
600 uint32_t needs_color_decompress_mask;
601 };
602
603 struct si_images {
604 struct pipe_image_view views[SI_NUM_IMAGES];
605 uint32_t needs_color_decompress_mask;
606 unsigned enabled_mask;
607 };
608
609 struct si_framebuffer {
610 struct pipe_framebuffer_state state;
611 unsigned colorbuf_enabled_4bit;
612 unsigned spi_shader_col_format;
613 unsigned spi_shader_col_format_alpha;
614 unsigned spi_shader_col_format_blend;
615 unsigned spi_shader_col_format_blend_alpha;
616 ubyte nr_samples:5; /* at most 16xAA */
617 ubyte log_samples:3; /* at most 4 = 16xAA */
618 ubyte nr_color_samples; /* at most 8xAA */
619 ubyte compressed_cb_mask;
620 ubyte uncompressed_cb_mask;
621 ubyte color_is_int8;
622 ubyte color_is_int10;
623 ubyte dirty_cbufs;
624 ubyte dcc_overwrite_combiner_watermark;
625 bool dirty_zsbuf;
626 bool any_dst_linear;
627 bool CB_has_shader_readable_metadata;
628 bool DB_has_shader_readable_metadata;
629 };
630
631 enum si_quant_mode {
632 /* This is the list we want to support. */
633 SI_QUANT_MODE_16_8_FIXED_POINT_1_256TH,
634 SI_QUANT_MODE_14_10_FIXED_POINT_1_1024TH,
635 SI_QUANT_MODE_12_12_FIXED_POINT_1_4096TH,
636 };
637
638 struct si_signed_scissor {
639 int minx;
640 int miny;
641 int maxx;
642 int maxy;
643 enum si_quant_mode quant_mode;
644 };
645
646 struct si_scissors {
647 unsigned dirty_mask;
648 struct pipe_scissor_state states[SI_MAX_VIEWPORTS];
649 };
650
651 struct si_viewports {
652 unsigned dirty_mask;
653 unsigned depth_range_dirty_mask;
654 struct pipe_viewport_state states[SI_MAX_VIEWPORTS];
655 struct si_signed_scissor as_scissor[SI_MAX_VIEWPORTS];
656 };
657
658 struct si_clip_state {
659 struct pipe_clip_state state;
660 bool any_nonzeros;
661 };
662
663 struct si_streamout_target {
664 struct pipe_stream_output_target b;
665
666 /* The buffer where BUFFER_FILLED_SIZE is stored. */
667 struct si_resource *buf_filled_size;
668 unsigned buf_filled_size_offset;
669 bool buf_filled_size_valid;
670
671 unsigned stride_in_dw;
672 };
673
674 struct si_streamout {
675 bool begin_emitted;
676
677 unsigned enabled_mask;
678 unsigned num_targets;
679 struct si_streamout_target *targets[PIPE_MAX_SO_BUFFERS];
680
681 unsigned append_bitmask;
682 bool suspended;
683
684 /* External state which comes from the vertex shader,
685 * it must be set explicitly when binding a shader. */
686 uint16_t *stride_in_dw;
687 unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
688
689 /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
690 unsigned hw_enabled_mask;
691
692 /* The state of VGT_STRMOUT_(CONFIG|EN). */
693 bool streamout_enabled;
694 bool prims_gen_query_enabled;
695 int num_prims_gen_queries;
696 };
697
698 /* A shader state consists of the shader selector, which is a constant state
699 * object shared by multiple contexts and shouldn't be modified, and
700 * the current shader variant selected for this context.
701 */
702 struct si_shader_ctx_state {
703 struct si_shader_selector *cso;
704 struct si_shader *current;
705 };
706
707 #define SI_NUM_VGT_PARAM_KEY_BITS 12
708 #define SI_NUM_VGT_PARAM_STATES (1 << SI_NUM_VGT_PARAM_KEY_BITS)
709
710 /* The IA_MULTI_VGT_PARAM key used to index the table of precomputed values.
711 * Some fields are set by state-change calls, most are set by draw_vbo.
712 */
713 union si_vgt_param_key {
714 struct {
715 #ifdef PIPE_ARCH_LITTLE_ENDIAN
716 unsigned prim:4;
717 unsigned uses_instancing:1;
718 unsigned multi_instances_smaller_than_primgroup:1;
719 unsigned primitive_restart:1;
720 unsigned count_from_stream_output:1;
721 unsigned line_stipple_enabled:1;
722 unsigned uses_tess:1;
723 unsigned tess_uses_prim_id:1;
724 unsigned uses_gs:1;
725 unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
726 #else /* PIPE_ARCH_BIG_ENDIAN */
727 unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
728 unsigned uses_gs:1;
729 unsigned tess_uses_prim_id:1;
730 unsigned uses_tess:1;
731 unsigned line_stipple_enabled:1;
732 unsigned count_from_stream_output:1;
733 unsigned primitive_restart:1;
734 unsigned multi_instances_smaller_than_primgroup:1;
735 unsigned uses_instancing:1;
736 unsigned prim:4;
737 #endif
738 } u;
739 uint32_t index;
740 };
741
742 struct si_texture_handle
743 {
744 unsigned desc_slot;
745 bool desc_dirty;
746 struct pipe_sampler_view *view;
747 struct si_sampler_state sstate;
748 };
749
750 struct si_image_handle
751 {
752 unsigned desc_slot;
753 bool desc_dirty;
754 struct pipe_image_view view;
755 };
756
757 struct si_saved_cs {
758 struct pipe_reference reference;
759 struct si_context *ctx;
760 struct radeon_saved_cs gfx;
761 struct si_resource *trace_buf;
762 unsigned trace_id;
763
764 unsigned gfx_last_dw;
765 bool flushed;
766 int64_t time_flush;
767 };
768
769 struct si_context {
770 struct pipe_context b; /* base class */
771
772 enum radeon_family family;
773 enum chip_class chip_class;
774
775 struct radeon_winsys *ws;
776 struct radeon_winsys_ctx *ctx;
777 struct radeon_cmdbuf *gfx_cs;
778 struct radeon_cmdbuf *dma_cs;
779 struct pipe_fence_handle *last_gfx_fence;
780 struct pipe_fence_handle *last_sdma_fence;
781 struct si_resource *eop_bug_scratch;
782 struct u_upload_mgr *cached_gtt_allocator;
783 struct threaded_context *tc;
784 struct u_suballocator *allocator_zeroed_memory;
785 struct slab_child_pool pool_transfers;
786 struct slab_child_pool pool_transfers_unsync; /* for threaded_context */
787 struct pipe_device_reset_callback device_reset_callback;
788 struct u_log_context *log;
789 void *query_result_shader;
790 struct blitter_context *blitter;
791 void *custom_dsa_flush;
792 void *custom_blend_resolve;
793 void *custom_blend_fmask_decompress;
794 void *custom_blend_eliminate_fastclear;
795 void *custom_blend_dcc_decompress;
796 void *vs_blit_pos;
797 void *vs_blit_pos_layered;
798 void *vs_blit_color;
799 void *vs_blit_color_layered;
800 void *vs_blit_texcoord;
801 void *cs_clear_buffer;
802 void *cs_copy_buffer;
803 void *cs_copy_image;
804 void *cs_copy_image_1d_array;
805 void *cs_clear_render_target;
806 void *cs_clear_render_target_1d_array;
807 struct si_screen *screen;
808 struct pipe_debug_callback debug;
809 struct ac_llvm_compiler compiler; /* only non-threaded compilation */
810 struct si_shader_ctx_state fixed_func_tcs_shader;
811 struct si_resource *wait_mem_scratch;
812 unsigned wait_mem_number;
813 uint16_t prefetch_L2_mask;
814
815 bool gfx_flush_in_progress:1;
816 bool gfx_last_ib_is_busy:1;
817 bool compute_is_busy:1;
818
819 unsigned num_gfx_cs_flushes;
820 unsigned initial_gfx_cs_size;
821 unsigned gpu_reset_counter;
822 unsigned last_dirty_tex_counter;
823 unsigned last_compressed_colortex_counter;
824 unsigned last_num_draw_calls;
825 unsigned flags; /* flush flags */
826 /* Current unaccounted memory usage. */
827 uint64_t vram;
828 uint64_t gtt;
829
830 /* Atoms (direct states). */
831 union si_state_atoms atoms;
832 unsigned dirty_atoms; /* mask */
833 /* PM4 states (precomputed immutable states) */
834 unsigned dirty_states;
835 union si_state queued;
836 union si_state emitted;
837
838 /* Atom declarations. */
839 struct si_framebuffer framebuffer;
840 unsigned sample_locs_num_samples;
841 uint16_t sample_mask;
842 unsigned last_cb_target_mask;
843 struct si_blend_color blend_color;
844 struct si_clip_state clip_state;
845 struct si_shader_data shader_pointers;
846 struct si_stencil_ref stencil_ref;
847 struct si_scissors scissors;
848 struct si_streamout streamout;
849 struct si_viewports viewports;
850 unsigned num_window_rectangles;
851 bool window_rectangles_include;
852 struct pipe_scissor_state window_rectangles[4];
853
854 /* Precomputed states. */
855 struct si_pm4_state *init_config;
856 struct si_pm4_state *init_config_gs_rings;
857 bool init_config_has_vgt_flush;
858 struct si_pm4_state *vgt_shader_config[4];
859
860 /* shaders */
861 struct si_shader_ctx_state ps_shader;
862 struct si_shader_ctx_state gs_shader;
863 struct si_shader_ctx_state vs_shader;
864 struct si_shader_ctx_state tcs_shader;
865 struct si_shader_ctx_state tes_shader;
866 struct si_cs_shader_state cs_shader_state;
867
868 /* shader information */
869 struct si_vertex_elements *vertex_elements;
870 unsigned sprite_coord_enable;
871 unsigned cs_max_waves_per_sh;
872 bool flatshade;
873 bool do_update_shaders;
874
875 /* vertex buffer descriptors */
876 uint32_t *vb_descriptors_gpu_list;
877 struct si_resource *vb_descriptors_buffer;
878 unsigned vb_descriptors_offset;
879
880 /* shader descriptors */
881 struct si_descriptors descriptors[SI_NUM_DESCS];
882 unsigned descriptors_dirty;
883 unsigned shader_pointers_dirty;
884 unsigned shader_needs_decompress_mask;
885 struct si_buffer_resources rw_buffers;
886 struct si_buffer_resources const_and_shader_buffers[SI_NUM_SHADERS];
887 struct si_samplers samplers[SI_NUM_SHADERS];
888 struct si_images images[SI_NUM_SHADERS];
889
890 /* other shader resources */
891 struct pipe_constant_buffer null_const_buf; /* used for set_constant_buffer(NULL) on CIK */
892 struct pipe_resource *esgs_ring;
893 struct pipe_resource *gsvs_ring;
894 struct pipe_resource *tess_rings;
895 union pipe_color_union *border_color_table; /* in CPU memory, any endian */
896 struct si_resource *border_color_buffer;
897 union pipe_color_union *border_color_map; /* in VRAM (slow access), little endian */
898 unsigned border_color_count;
899 unsigned num_vs_blit_sgprs;
900 uint32_t vs_blit_sh_data[SI_VS_BLIT_SGPRS_POS_TEXCOORD];
901 uint32_t cs_user_data[4];
902
903 /**
904 * last_block allows disabling threads at the farthermost grid boundary.
905 * Full blocks as specified by "block" are launched, but the threads
906 * outside of "last_block" dimensions are disabled.
907 *
908 * If a block touches the grid boundary in the i-th axis, threads with
909 * THREAD_ID[i] >= last_block[i] are disabled.
910 *
911 * If last_block[i] is 0, it has the same behavior as last_block[i] = block[i],
912 * meaning no effect.
913 *
914 * It's equivalent to doing this at the beginning of the compute shader:
915 *
916 * for (i = 0; i < 3; i++) {
917 * if (block_id[i] == grid[i] - 1 &&
918 * last_block[i] && last_block[i] >= thread_id[i])
919 * return;
920 * }
921 * (this could be moved into pipe_grid_info)
922 */
923 uint compute_last_block[3];
924
925 /* Vertex and index buffers. */
926 bool vertex_buffers_dirty;
927 bool vertex_buffer_pointer_dirty;
928 struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
929
930 /* MSAA config state. */
931 int ps_iter_samples;
932 bool ps_uses_fbfetch;
933 bool smoothing_enabled;
934
935 /* DB render state. */
936 unsigned ps_db_shader_control;
937 unsigned dbcb_copy_sample;
938 bool dbcb_depth_copy_enabled:1;
939 bool dbcb_stencil_copy_enabled:1;
940 bool db_flush_depth_inplace:1;
941 bool db_flush_stencil_inplace:1;
942 bool db_depth_clear:1;
943 bool db_depth_disable_expclear:1;
944 bool db_stencil_clear:1;
945 bool db_stencil_disable_expclear:1;
946 bool occlusion_queries_disabled:1;
947 bool generate_mipmap_for_depth:1;
948
949 /* Emitted draw state. */
950 bool gs_tri_strip_adj_fix:1;
951 bool ls_vgpr_fix:1;
952 int last_index_size;
953 int last_base_vertex;
954 int last_start_instance;
955 int last_instance_count;
956 int last_drawid;
957 int last_sh_base_reg;
958 int last_primitive_restart_en;
959 int last_restart_index;
960 int last_prim;
961 int last_multi_vgt_param;
962 int last_rast_prim;
963 unsigned last_sc_line_stipple;
964 unsigned current_vs_state;
965 unsigned last_vs_state;
966 enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
967
968 /* Scratch buffer */
969 struct si_resource *scratch_buffer;
970 unsigned scratch_waves;
971 unsigned spi_tmpring_size;
972
973 struct si_resource *compute_scratch_buffer;
974
975 /* Emitted derived tessellation state. */
976 /* Local shader (VS), or HS if LS-HS are merged. */
977 struct si_shader *last_ls;
978 struct si_shader_selector *last_tcs;
979 int last_num_tcs_input_cp;
980 int last_tes_sh_base;
981 bool last_tess_uses_primid;
982 unsigned last_num_patches;
983 int last_ls_hs_config;
984
985 /* Debug state. */
986 bool is_debug;
987 struct si_saved_cs *current_saved_cs;
988 uint64_t dmesg_timestamp;
989 unsigned apitrace_call_number;
990
991 /* Other state */
992 bool need_check_render_feedback;
993 bool decompression_enabled;
994 bool dpbb_force_off;
995 bool vs_writes_viewport_index;
996 bool vs_disables_clipping_viewport;
997
998 /* Precomputed IA_MULTI_VGT_PARAM */
999 union si_vgt_param_key ia_multi_vgt_param_key;
1000 unsigned ia_multi_vgt_param[SI_NUM_VGT_PARAM_STATES];
1001
1002 /* Bindless descriptors. */
1003 struct si_descriptors bindless_descriptors;
1004 struct util_idalloc bindless_used_slots;
1005 unsigned num_bindless_descriptors;
1006 bool bindless_descriptors_dirty;
1007 bool graphics_bindless_pointer_dirty;
1008 bool compute_bindless_pointer_dirty;
1009
1010 /* Allocated bindless handles */
1011 struct hash_table *tex_handles;
1012 struct hash_table *img_handles;
1013
1014 /* Resident bindless handles */
1015 struct util_dynarray resident_tex_handles;
1016 struct util_dynarray resident_img_handles;
1017
1018 /* Resident bindless handles which need decompression */
1019 struct util_dynarray resident_tex_needs_color_decompress;
1020 struct util_dynarray resident_img_needs_color_decompress;
1021 struct util_dynarray resident_tex_needs_depth_decompress;
1022
1023 /* Bindless state */
1024 bool uses_bindless_samplers;
1025 bool uses_bindless_images;
1026
1027 /* MSAA sample locations.
1028 * The first index is the sample index.
1029 * The second index is the coordinate: X, Y. */
1030 struct {
1031 float x1[1][2];
1032 float x2[2][2];
1033 float x4[4][2];
1034 float x8[8][2];
1035 float x16[16][2];
1036 } sample_positions;
1037 struct pipe_resource *sample_pos_buffer;
1038
1039 /* Misc stats. */
1040 unsigned num_draw_calls;
1041 unsigned num_decompress_calls;
1042 unsigned num_mrt_draw_calls;
1043 unsigned num_prim_restart_calls;
1044 unsigned num_spill_draw_calls;
1045 unsigned num_compute_calls;
1046 unsigned num_spill_compute_calls;
1047 unsigned num_dma_calls;
1048 unsigned num_cp_dma_calls;
1049 unsigned num_vs_flushes;
1050 unsigned num_ps_flushes;
1051 unsigned num_cs_flushes;
1052 unsigned num_cb_cache_flushes;
1053 unsigned num_db_cache_flushes;
1054 unsigned num_L2_invalidates;
1055 unsigned num_L2_writebacks;
1056 unsigned num_resident_handles;
1057 uint64_t num_alloc_tex_transfer_bytes;
1058 unsigned last_tex_ps_draw_ratio; /* for query */
1059 unsigned context_roll_counter;
1060
1061 /* Queries. */
1062 /* Maintain the list of active queries for pausing between IBs. */
1063 int num_occlusion_queries;
1064 int num_perfect_occlusion_queries;
1065 struct list_head active_queries;
1066 unsigned num_cs_dw_queries_suspend;
1067
1068 /* Render condition. */
1069 struct pipe_query *render_cond;
1070 unsigned render_cond_mode;
1071 bool render_cond_invert;
1072 bool render_cond_force_off; /* for u_blitter */
1073
1074 /* Statistics gathering for the DCC enablement heuristic. It can't be
1075 * in si_texture because si_texture can be shared by multiple
1076 * contexts. This is for back buffers only. We shouldn't get too many
1077 * of those.
1078 *
1079 * X11 DRI3 rotates among a finite set of back buffers. They should
1080 * all fit in this array. If they don't, separate DCC might never be
1081 * enabled by DCC stat gathering.
1082 */
1083 struct {
1084 struct si_texture *tex;
1085 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
1086 struct pipe_query *ps_stats[3];
1087 /* If all slots are used and another slot is needed,
1088 * the least recently used slot is evicted based on this. */
1089 int64_t last_use_timestamp;
1090 bool query_active;
1091 } dcc_stats[5];
1092
1093 /* Copy one resource to another using async DMA. */
1094 void (*dma_copy)(struct pipe_context *ctx,
1095 struct pipe_resource *dst,
1096 unsigned dst_level,
1097 unsigned dst_x, unsigned dst_y, unsigned dst_z,
1098 struct pipe_resource *src,
1099 unsigned src_level,
1100 const struct pipe_box *src_box);
1101
1102 struct si_tracked_regs tracked_regs;
1103 };
1104
1105 /* cik_sdma.c */
1106 void cik_init_sdma_functions(struct si_context *sctx);
1107
1108 /* si_blit.c */
1109 enum si_blitter_op /* bitmask */
1110 {
1111 SI_SAVE_TEXTURES = 1,
1112 SI_SAVE_FRAMEBUFFER = 2,
1113 SI_SAVE_FRAGMENT_STATE = 4,
1114 SI_DISABLE_RENDER_COND = 8,
1115 };
1116
1117 void si_blitter_begin(struct si_context *sctx, enum si_blitter_op op);
1118 void si_blitter_end(struct si_context *sctx);
1119 void si_init_blit_functions(struct si_context *sctx);
1120 void si_decompress_textures(struct si_context *sctx, unsigned shader_mask);
1121 void si_resource_copy_region(struct pipe_context *ctx,
1122 struct pipe_resource *dst,
1123 unsigned dst_level,
1124 unsigned dstx, unsigned dsty, unsigned dstz,
1125 struct pipe_resource *src,
1126 unsigned src_level,
1127 const struct pipe_box *src_box);
1128 void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex);
1129 void si_blit_decompress_depth(struct pipe_context *ctx,
1130 struct si_texture *texture,
1131 struct si_texture *staging,
1132 unsigned first_level, unsigned last_level,
1133 unsigned first_layer, unsigned last_layer,
1134 unsigned first_sample, unsigned last_sample);
1135
1136 /* si_buffer.c */
1137 bool si_rings_is_buffer_referenced(struct si_context *sctx,
1138 struct pb_buffer *buf,
1139 enum radeon_bo_usage usage);
1140 void *si_buffer_map_sync_with_rings(struct si_context *sctx,
1141 struct si_resource *resource,
1142 unsigned usage);
1143 void si_init_resource_fields(struct si_screen *sscreen,
1144 struct si_resource *res,
1145 uint64_t size, unsigned alignment);
1146 bool si_alloc_resource(struct si_screen *sscreen,
1147 struct si_resource *res);
1148 struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
1149 unsigned flags, unsigned usage,
1150 unsigned size, unsigned alignment);
1151 struct si_resource *si_aligned_buffer_create(struct pipe_screen *screen,
1152 unsigned flags, unsigned usage,
1153 unsigned size, unsigned alignment);
1154 void si_replace_buffer_storage(struct pipe_context *ctx,
1155 struct pipe_resource *dst,
1156 struct pipe_resource *src);
1157 void si_init_screen_buffer_functions(struct si_screen *sscreen);
1158 void si_init_buffer_functions(struct si_context *sctx);
1159
1160 /* si_clear.c */
1161 enum pipe_format si_simplify_cb_format(enum pipe_format format);
1162 bool vi_alpha_is_on_msb(enum pipe_format format);
1163 void vi_dcc_clear_level(struct si_context *sctx,
1164 struct si_texture *tex,
1165 unsigned level, unsigned clear_value);
1166 void si_init_clear_functions(struct si_context *sctx);
1167
1168 /* si_compute_blit.c */
1169 unsigned si_get_flush_flags(struct si_context *sctx, enum si_coherency coher,
1170 enum si_cache_policy cache_policy);
1171 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
1172 uint64_t offset, uint64_t size, uint32_t *clear_value,
1173 uint32_t clear_value_size, enum si_coherency coher);
1174 void si_copy_buffer(struct si_context *sctx,
1175 struct pipe_resource *dst, struct pipe_resource *src,
1176 uint64_t dst_offset, uint64_t src_offset, unsigned size);
1177 void si_compute_copy_image(struct si_context *sctx,
1178 struct pipe_resource *dst,
1179 unsigned dst_level,
1180 struct pipe_resource *src,
1181 unsigned src_level,
1182 unsigned dstx, unsigned dsty, unsigned dstz,
1183 const struct pipe_box *src_box);
1184 void si_compute_clear_render_target(struct pipe_context *ctx,
1185 struct pipe_surface *dstsurf,
1186 const union pipe_color_union *color,
1187 unsigned dstx, unsigned dsty,
1188 unsigned width, unsigned height);
1189 void si_init_compute_blit_functions(struct si_context *sctx);
1190
1191 /* si_cp_dma.c */
1192 #define SI_CPDMA_SKIP_CHECK_CS_SPACE (1 << 0) /* don't call need_cs_space */
1193 #define SI_CPDMA_SKIP_SYNC_AFTER (1 << 1) /* don't wait for DMA after the copy */
1194 #define SI_CPDMA_SKIP_SYNC_BEFORE (1 << 2) /* don't wait for DMA before the copy (RAW hazards) */
1195 #define SI_CPDMA_SKIP_GFX_SYNC (1 << 3) /* don't flush caches and don't wait for PS/CS */
1196 #define SI_CPDMA_SKIP_BO_LIST_UPDATE (1 << 4) /* don't update the BO list */
1197 #define SI_CPDMA_SKIP_ALL (SI_CPDMA_SKIP_CHECK_CS_SPACE | \
1198 SI_CPDMA_SKIP_SYNC_AFTER | \
1199 SI_CPDMA_SKIP_SYNC_BEFORE | \
1200 SI_CPDMA_SKIP_GFX_SYNC | \
1201 SI_CPDMA_SKIP_BO_LIST_UPDATE)
1202
1203 void si_cp_dma_wait_for_idle(struct si_context *sctx);
1204 void si_cp_dma_clear_buffer(struct si_context *sctx, struct radeon_cmdbuf *cs,
1205 struct pipe_resource *dst, uint64_t offset,
1206 uint64_t size, unsigned value, unsigned user_flags,
1207 enum si_coherency coher, enum si_cache_policy cache_policy);
1208 void si_cp_dma_copy_buffer(struct si_context *sctx,
1209 struct pipe_resource *dst, struct pipe_resource *src,
1210 uint64_t dst_offset, uint64_t src_offset, unsigned size,
1211 unsigned user_flags, enum si_coherency coher,
1212 enum si_cache_policy cache_policy);
1213 void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
1214 uint64_t offset, unsigned size);
1215 void cik_emit_prefetch_L2(struct si_context *sctx, bool vertex_stage_only);
1216 void si_test_gds(struct si_context *sctx);
1217 void si_cp_write_data(struct si_context *sctx, struct si_resource *buf,
1218 unsigned offset, unsigned size, unsigned dst_sel,
1219 unsigned engine, const void *data);
1220
1221 /* si_debug.c */
1222 void si_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
1223 struct radeon_saved_cs *saved, bool get_buffer_list);
1224 void si_clear_saved_cs(struct radeon_saved_cs *saved);
1225 void si_destroy_saved_cs(struct si_saved_cs *scs);
1226 void si_auto_log_cs(void *data, struct u_log_context *log);
1227 void si_log_hw_flush(struct si_context *sctx);
1228 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log);
1229 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log);
1230 void si_init_debug_functions(struct si_context *sctx);
1231 void si_check_vm_faults(struct si_context *sctx,
1232 struct radeon_saved_cs *saved, enum ring_type ring);
1233 bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
1234
1235 /* si_dma.c */
1236 void si_init_dma_functions(struct si_context *sctx);
1237
1238 /* si_dma_cs.c */
1239 void si_dma_emit_timestamp(struct si_context *sctx, struct si_resource *dst,
1240 uint64_t offset);
1241 void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
1242 uint64_t offset, uint64_t size, unsigned clear_value);
1243 void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
1244 struct si_resource *dst, struct si_resource *src);
1245 void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
1246 struct pipe_fence_handle **fence);
1247 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
1248 uint64_t offset, uint64_t size, unsigned value);
1249
1250 /* si_fence.c */
1251 void si_cp_release_mem(struct si_context *ctx,
1252 unsigned event, unsigned event_flags,
1253 unsigned dst_sel, unsigned int_sel, unsigned data_sel,
1254 struct si_resource *buf, uint64_t va,
1255 uint32_t new_fence, unsigned query_type);
1256 unsigned si_cp_write_fence_dwords(struct si_screen *screen);
1257 void si_cp_wait_mem(struct si_context *ctx, struct radeon_cmdbuf *cs,
1258 uint64_t va, uint32_t ref, uint32_t mask, unsigned flags);
1259 void si_init_fence_functions(struct si_context *ctx);
1260 void si_init_screen_fence_functions(struct si_screen *screen);
1261 struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
1262 struct tc_unflushed_batch_token *tc_token);
1263
1264 /* si_get.c */
1265 void si_init_screen_get_functions(struct si_screen *sscreen);
1266
1267 /* si_gfx_cs.c */
1268 void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
1269 struct pipe_fence_handle **fence);
1270 void si_begin_new_gfx_cs(struct si_context *ctx);
1271 void si_need_gfx_cs_space(struct si_context *ctx);
1272
1273 /* si_gpu_load.c */
1274 void si_gpu_load_kill_thread(struct si_screen *sscreen);
1275 uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type);
1276 unsigned si_end_counter(struct si_screen *sscreen, unsigned type,
1277 uint64_t begin);
1278
1279 /* si_compute.c */
1280 void si_init_compute_functions(struct si_context *sctx);
1281
1282 /* si_perfcounters.c */
1283 void si_init_perfcounters(struct si_screen *screen);
1284 void si_destroy_perfcounters(struct si_screen *screen);
1285
1286 /* si_pipe.c */
1287 bool si_check_device_reset(struct si_context *sctx);
1288
1289 /* si_query.c */
1290 void si_init_screen_query_functions(struct si_screen *sscreen);
1291 void si_init_query_functions(struct si_context *sctx);
1292 void si_suspend_queries(struct si_context *sctx);
1293 void si_resume_queries(struct si_context *sctx);
1294
1295 /* si_shaderlib_tgsi.c */
1296 void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type,
1297 unsigned num_layers);
1298 void *si_create_fixed_func_tcs(struct si_context *sctx);
1299 void *si_create_dma_compute_shader(struct pipe_context *ctx,
1300 unsigned num_dwords_per_thread,
1301 bool dst_stream_cache_policy, bool is_copy);
1302 void *si_create_copy_image_compute_shader(struct pipe_context *ctx);
1303 void *si_create_copy_image_compute_shader_1d_array(struct pipe_context *ctx);
1304 void *si_clear_render_target_shader(struct pipe_context *ctx);
1305 void *si_clear_render_target_shader_1d_array(struct pipe_context *ctx);
1306 void *si_create_query_result_cs(struct si_context *sctx);
1307
1308 /* si_test_dma.c */
1309 void si_test_dma(struct si_screen *sscreen);
1310
1311 /* si_test_clearbuffer.c */
1312 void si_test_dma_perf(struct si_screen *sscreen);
1313
1314 /* si_uvd.c */
1315 struct pipe_video_codec *si_uvd_create_decoder(struct pipe_context *context,
1316 const struct pipe_video_codec *templ);
1317
1318 struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe,
1319 const struct pipe_video_buffer *tmpl);
1320
1321 /* si_viewport.c */
1322 void si_update_vs_viewport_state(struct si_context *ctx);
1323 void si_init_viewport_functions(struct si_context *ctx);
1324
1325 /* si_texture.c */
1326 bool si_prepare_for_dma_blit(struct si_context *sctx,
1327 struct si_texture *dst,
1328 unsigned dst_level, unsigned dstx,
1329 unsigned dsty, unsigned dstz,
1330 struct si_texture *src,
1331 unsigned src_level,
1332 const struct pipe_box *src_box);
1333 void si_eliminate_fast_color_clear(struct si_context *sctx,
1334 struct si_texture *tex);
1335 void si_texture_discard_cmask(struct si_screen *sscreen,
1336 struct si_texture *tex);
1337 bool si_init_flushed_depth_texture(struct pipe_context *ctx,
1338 struct pipe_resource *texture,
1339 struct si_texture **staging);
1340 void si_print_texture_info(struct si_screen *sscreen,
1341 struct si_texture *tex, struct u_log_context *log);
1342 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
1343 const struct pipe_resource *templ);
1344 bool vi_dcc_formats_compatible(enum pipe_format format1,
1345 enum pipe_format format2);
1346 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
1347 unsigned level,
1348 enum pipe_format view_format);
1349 void vi_disable_dcc_if_incompatible_format(struct si_context *sctx,
1350 struct pipe_resource *tex,
1351 unsigned level,
1352 enum pipe_format view_format);
1353 struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
1354 struct pipe_resource *texture,
1355 const struct pipe_surface *templ,
1356 unsigned width0, unsigned height0,
1357 unsigned width, unsigned height);
1358 unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap);
1359 void vi_separate_dcc_try_enable(struct si_context *sctx,
1360 struct si_texture *tex);
1361 void vi_separate_dcc_start_query(struct si_context *sctx,
1362 struct si_texture *tex);
1363 void vi_separate_dcc_stop_query(struct si_context *sctx,
1364 struct si_texture *tex);
1365 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
1366 struct si_texture *tex);
1367 bool si_texture_disable_dcc(struct si_context *sctx,
1368 struct si_texture *tex);
1369 void si_init_screen_texture_functions(struct si_screen *sscreen);
1370 void si_init_context_texture_functions(struct si_context *sctx);
1371
1372
1373 /*
1374 * common helpers
1375 */
1376
1377 static inline struct si_resource *si_resource(struct pipe_resource *r)
1378 {
1379 return (struct si_resource*)r;
1380 }
1381
1382 static inline void
1383 si_resource_reference(struct si_resource **ptr, struct si_resource *res)
1384 {
1385 pipe_resource_reference((struct pipe_resource **)ptr,
1386 (struct pipe_resource *)res);
1387 }
1388
1389 static inline void
1390 si_texture_reference(struct si_texture **ptr, struct si_texture *res)
1391 {
1392 pipe_resource_reference((struct pipe_resource **)ptr, &res->buffer.b.b);
1393 }
1394
1395 static inline bool
1396 vi_dcc_enabled(struct si_texture *tex, unsigned level)
1397 {
1398 return tex->dcc_offset && level < tex->surface.num_dcc_levels;
1399 }
1400
1401 static inline unsigned
1402 si_tile_mode_index(struct si_texture *tex, unsigned level, bool stencil)
1403 {
1404 if (stencil)
1405 return tex->surface.u.legacy.stencil_tiling_index[level];
1406 else
1407 return tex->surface.u.legacy.tiling_index[level];
1408 }
1409
1410 static inline void
1411 si_context_add_resource_size(struct si_context *sctx, struct pipe_resource *r)
1412 {
1413 if (r) {
1414 /* Add memory usage for need_gfx_cs_space */
1415 sctx->vram += si_resource(r)->vram_usage;
1416 sctx->gtt += si_resource(r)->gart_usage;
1417 }
1418 }
1419
1420 static inline void
1421 si_invalidate_draw_sh_constants(struct si_context *sctx)
1422 {
1423 sctx->last_base_vertex = SI_BASE_VERTEX_UNKNOWN;
1424 sctx->last_instance_count = SI_INSTANCE_COUNT_UNKNOWN;
1425 }
1426
1427 static inline unsigned
1428 si_get_atom_bit(struct si_context *sctx, struct si_atom *atom)
1429 {
1430 return 1 << (atom - sctx->atoms.array);
1431 }
1432
1433 static inline void
1434 si_set_atom_dirty(struct si_context *sctx, struct si_atom *atom, bool dirty)
1435 {
1436 unsigned bit = si_get_atom_bit(sctx, atom);
1437
1438 if (dirty)
1439 sctx->dirty_atoms |= bit;
1440 else
1441 sctx->dirty_atoms &= ~bit;
1442 }
1443
1444 static inline bool
1445 si_is_atom_dirty(struct si_context *sctx, struct si_atom *atom)
1446 {
1447 return (sctx->dirty_atoms & si_get_atom_bit(sctx, atom)) != 0;
1448 }
1449
1450 static inline void
1451 si_mark_atom_dirty(struct si_context *sctx, struct si_atom *atom)
1452 {
1453 si_set_atom_dirty(sctx, atom, true);
1454 }
1455
1456 static inline struct si_shader_ctx_state *si_get_vs(struct si_context *sctx)
1457 {
1458 if (sctx->gs_shader.cso)
1459 return &sctx->gs_shader;
1460 if (sctx->tes_shader.cso)
1461 return &sctx->tes_shader;
1462
1463 return &sctx->vs_shader;
1464 }
1465
1466 static inline struct tgsi_shader_info *si_get_vs_info(struct si_context *sctx)
1467 {
1468 struct si_shader_ctx_state *vs = si_get_vs(sctx);
1469
1470 return vs->cso ? &vs->cso->info : NULL;
1471 }
1472
1473 static inline struct si_shader* si_get_vs_state(struct si_context *sctx)
1474 {
1475 if (sctx->gs_shader.cso)
1476 return sctx->gs_shader.cso->gs_copy_shader;
1477
1478 struct si_shader_ctx_state *vs = si_get_vs(sctx);
1479 return vs->current ? vs->current : NULL;
1480 }
1481
1482 static inline bool si_can_dump_shader(struct si_screen *sscreen,
1483 unsigned processor)
1484 {
1485 return sscreen->debug_flags & (1 << processor);
1486 }
1487
1488 static inline bool si_get_strmout_en(struct si_context *sctx)
1489 {
1490 return sctx->streamout.streamout_enabled ||
1491 sctx->streamout.prims_gen_query_enabled;
1492 }
1493
1494 static inline unsigned
1495 si_optimal_tcc_alignment(struct si_context *sctx, unsigned upload_size)
1496 {
1497 unsigned alignment, tcc_cache_line_size;
1498
1499 /* If the upload size is less than the cache line size (e.g. 16, 32),
1500 * the whole thing will fit into a cache line if we align it to its size.
1501 * The idea is that multiple small uploads can share a cache line.
1502 * If the upload size is greater, align it to the cache line size.
1503 */
1504 alignment = util_next_power_of_two(upload_size);
1505 tcc_cache_line_size = sctx->screen->info.tcc_cache_line_size;
1506 return MIN2(alignment, tcc_cache_line_size);
1507 }
1508
1509 static inline void
1510 si_saved_cs_reference(struct si_saved_cs **dst, struct si_saved_cs *src)
1511 {
1512 if (pipe_reference(&(*dst)->reference, &src->reference))
1513 si_destroy_saved_cs(*dst);
1514
1515 *dst = src;
1516 }
1517
1518 static inline void
1519 si_make_CB_shader_coherent(struct si_context *sctx, unsigned num_samples,
1520 bool shaders_read_metadata)
1521 {
1522 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_CB |
1523 SI_CONTEXT_INV_VMEM_L1;
1524
1525 if (sctx->chip_class >= GFX9) {
1526 /* Single-sample color is coherent with shaders on GFX9, but
1527 * L2 metadata must be flushed if shaders read metadata.
1528 * (DCC, CMASK).
1529 */
1530 if (num_samples >= 2)
1531 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1532 else if (shaders_read_metadata)
1533 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1534 } else {
1535 /* SI-CI-VI */
1536 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1537 }
1538 }
1539
1540 static inline void
1541 si_make_DB_shader_coherent(struct si_context *sctx, unsigned num_samples,
1542 bool include_stencil, bool shaders_read_metadata)
1543 {
1544 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB |
1545 SI_CONTEXT_INV_VMEM_L1;
1546
1547 if (sctx->chip_class >= GFX9) {
1548 /* Single-sample depth (not stencil) is coherent with shaders
1549 * on GFX9, but L2 metadata must be flushed if shaders read
1550 * metadata.
1551 */
1552 if (num_samples >= 2 || include_stencil)
1553 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1554 else if (shaders_read_metadata)
1555 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1556 } else {
1557 /* SI-CI-VI */
1558 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1559 }
1560 }
1561
1562 static inline bool
1563 si_can_sample_zs(struct si_texture *tex, bool stencil_sampler)
1564 {
1565 return (stencil_sampler && tex->can_sample_s) ||
1566 (!stencil_sampler && tex->can_sample_z);
1567 }
1568
1569 static inline bool
1570 si_htile_enabled(struct si_texture *tex, unsigned level)
1571 {
1572 return tex->htile_offset && level == 0;
1573 }
1574
1575 static inline bool
1576 vi_tc_compat_htile_enabled(struct si_texture *tex, unsigned level)
1577 {
1578 assert(!tex->tc_compatible_htile || tex->htile_offset);
1579 return tex->tc_compatible_htile && level == 0;
1580 }
1581
1582 static inline unsigned si_get_ps_iter_samples(struct si_context *sctx)
1583 {
1584 if (sctx->ps_uses_fbfetch)
1585 return sctx->framebuffer.nr_color_samples;
1586
1587 return MIN2(sctx->ps_iter_samples, sctx->framebuffer.nr_color_samples);
1588 }
1589
1590 static inline unsigned si_get_total_colormask(struct si_context *sctx)
1591 {
1592 if (sctx->queued.named.rasterizer->rasterizer_discard)
1593 return 0;
1594
1595 struct si_shader_selector *ps = sctx->ps_shader.cso;
1596 if (!ps)
1597 return 0;
1598
1599 unsigned colormask = sctx->framebuffer.colorbuf_enabled_4bit &
1600 sctx->queued.named.blend->cb_target_mask;
1601
1602 if (!ps->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
1603 colormask &= ps->colors_written_4bit;
1604 else if (!ps->colors_written_4bit)
1605 colormask = 0; /* color0 writes all cbufs, but it's not written */
1606
1607 return colormask;
1608 }
1609
1610 #define UTIL_ALL_PRIM_LINE_MODES ((1 << PIPE_PRIM_LINES) | \
1611 (1 << PIPE_PRIM_LINE_LOOP) | \
1612 (1 << PIPE_PRIM_LINE_STRIP) | \
1613 (1 << PIPE_PRIM_LINES_ADJACENCY) | \
1614 (1 << PIPE_PRIM_LINE_STRIP_ADJACENCY))
1615
1616 static inline bool util_prim_is_lines(unsigned prim)
1617 {
1618 return ((1 << prim) & UTIL_ALL_PRIM_LINE_MODES) != 0;
1619 }
1620
1621 static inline bool util_prim_is_points_or_lines(unsigned prim)
1622 {
1623 return ((1 << prim) & (UTIL_ALL_PRIM_LINE_MODES |
1624 (1 << PIPE_PRIM_POINTS))) != 0;
1625 }
1626
1627 /**
1628 * Return true if there is enough memory in VRAM and GTT for the buffers
1629 * added so far.
1630 *
1631 * \param vram VRAM memory size not added to the buffer list yet
1632 * \param gtt GTT memory size not added to the buffer list yet
1633 */
1634 static inline bool
1635 radeon_cs_memory_below_limit(struct si_screen *screen,
1636 struct radeon_cmdbuf *cs,
1637 uint64_t vram, uint64_t gtt)
1638 {
1639 vram += cs->used_vram;
1640 gtt += cs->used_gart;
1641
1642 /* Anything that goes above the VRAM size should go to GTT. */
1643 if (vram > screen->info.vram_size)
1644 gtt += vram - screen->info.vram_size;
1645
1646 /* Now we just need to check if we have enough GTT. */
1647 return gtt < screen->info.gart_size * 0.7;
1648 }
1649
1650 /**
1651 * Add a buffer to the buffer list for the given command stream (CS).
1652 *
1653 * All buffers used by a CS must be added to the list. This tells the kernel
1654 * driver which buffers are used by GPU commands. Other buffers can
1655 * be swapped out (not accessible) during execution.
1656 *
1657 * The buffer list becomes empty after every context flush and must be
1658 * rebuilt.
1659 */
1660 static inline void radeon_add_to_buffer_list(struct si_context *sctx,
1661 struct radeon_cmdbuf *cs,
1662 struct si_resource *bo,
1663 enum radeon_bo_usage usage,
1664 enum radeon_bo_priority priority)
1665 {
1666 assert(usage);
1667 sctx->ws->cs_add_buffer(
1668 cs, bo->buf,
1669 (enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED),
1670 bo->domains, priority);
1671 }
1672
1673 /**
1674 * Same as above, but also checks memory usage and flushes the context
1675 * accordingly.
1676 *
1677 * When this SHOULD NOT be used:
1678 *
1679 * - if si_context_add_resource_size has been called for the buffer
1680 * followed by *_need_cs_space for checking the memory usage
1681 *
1682 * - if si_need_dma_space has been called for the buffer
1683 *
1684 * - when emitting state packets and draw packets (because preceding packets
1685 * can't be re-emitted at that point)
1686 *
1687 * - if shader resource "enabled_mask" is not up-to-date or there is
1688 * a different constraint disallowing a context flush
1689 */
1690 static inline void
1691 radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx,
1692 struct si_resource *bo,
1693 enum radeon_bo_usage usage,
1694 enum radeon_bo_priority priority,
1695 bool check_mem)
1696 {
1697 if (check_mem &&
1698 !radeon_cs_memory_below_limit(sctx->screen, sctx->gfx_cs,
1699 sctx->vram + bo->vram_usage,
1700 sctx->gtt + bo->gart_usage))
1701 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
1702
1703 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, bo, usage, priority);
1704 }
1705
1706 #define PRINT_ERR(fmt, args...) \
1707 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)
1708
1709 #endif