93082e262d6afd1ea3510d2d6ab4eaa92f9b0e44
[mesa.git] / src / gallium / drivers / radeonsi / si_pipe.h
1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * on the rights to use, copy, modify, merge, publish, distribute, sub
10 * license, and/or sell copies of the Software, and to permit persons to whom
11 * the Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25 #ifndef SI_PIPE_H
26 #define SI_PIPE_H
27
28 #include "si_shader.h"
29 #include "si_state.h"
30
31 #include "util/u_dynarray.h"
32 #include "util/u_idalloc.h"
33 #include "util/u_threaded_context.h"
34
35 #ifdef PIPE_ARCH_BIG_ENDIAN
36 #define SI_BIG_ENDIAN 1
37 #else
38 #define SI_BIG_ENDIAN 0
39 #endif
40
41 #define ATI_VENDOR_ID 0x1002
42
43 #define SI_NOT_QUERY 0xffffffff
44
45 /* The base vertex and primitive restart can be any number, but we must pick
46 * one which will mean "unknown" for the purpose of state tracking and
47 * the number shouldn't be a commonly-used one. */
48 #define SI_BASE_VERTEX_UNKNOWN INT_MIN
49 #define SI_RESTART_INDEX_UNKNOWN INT_MIN
50 #define SI_NUM_SMOOTH_AA_SAMPLES 8
51 #define SI_GS_PER_ES 128
52 /* Alignment for optimal CP DMA performance. */
53 #define SI_CPDMA_ALIGNMENT 32
54
55 /* Pipeline & streamout query controls. */
56 #define SI_CONTEXT_START_PIPELINE_STATS (1 << 0)
57 #define SI_CONTEXT_STOP_PIPELINE_STATS (1 << 1)
58 #define SI_CONTEXT_FLUSH_FOR_RENDER_COND (1 << 2)
59 /* Instruction cache. */
60 #define SI_CONTEXT_INV_ICACHE (1 << 3)
61 /* SMEM L1, other names: KCACHE, constant cache, DCACHE, data cache */
62 #define SI_CONTEXT_INV_SMEM_L1 (1 << 4)
63 /* VMEM L1 can optionally be bypassed (GLC=1). Other names: TC L1 */
64 #define SI_CONTEXT_INV_VMEM_L1 (1 << 5)
65 /* Used by everything except CB/DB, can be bypassed (SLC=1). Other names: TC L2 */
66 #define SI_CONTEXT_INV_GLOBAL_L2 (1 << 6)
67 /* Write dirty L2 lines back to memory (shader and CP DMA stores), but don't
68 * invalidate L2. SI-CIK can't do it, so they will do complete invalidation. */
69 #define SI_CONTEXT_WRITEBACK_GLOBAL_L2 (1 << 7)
70 /* Writeback & invalidate the L2 metadata cache. It can only be coupled with
71 * a CB or DB flush. */
72 #define SI_CONTEXT_INV_L2_METADATA (1 << 8)
73 /* Framebuffer caches. */
74 #define SI_CONTEXT_FLUSH_AND_INV_DB (1 << 9)
75 #define SI_CONTEXT_FLUSH_AND_INV_DB_META (1 << 10)
76 #define SI_CONTEXT_FLUSH_AND_INV_CB (1 << 11)
77 /* Engine synchronization. */
78 #define SI_CONTEXT_VS_PARTIAL_FLUSH (1 << 12)
79 #define SI_CONTEXT_PS_PARTIAL_FLUSH (1 << 13)
80 #define SI_CONTEXT_CS_PARTIAL_FLUSH (1 << 14)
81 #define SI_CONTEXT_VGT_FLUSH (1 << 15)
82 #define SI_CONTEXT_VGT_STREAMOUT_SYNC (1 << 16)
83
84 #define SI_PREFETCH_VBO_DESCRIPTORS (1 << 0)
85 #define SI_PREFETCH_LS (1 << 1)
86 #define SI_PREFETCH_HS (1 << 2)
87 #define SI_PREFETCH_ES (1 << 3)
88 #define SI_PREFETCH_GS (1 << 4)
89 #define SI_PREFETCH_VS (1 << 5)
90 #define SI_PREFETCH_PS (1 << 6)
91
92 #define SI_MAX_BORDER_COLORS 4096
93 #define SI_MAX_VIEWPORTS 16
94 #define SIX_BITS 0x3F
95 #define SI_MAP_BUFFER_ALIGNMENT 64
96 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
97
98 #define SI_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0)
99 #define SI_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1)
100 #define SI_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
101 #define SI_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3)
102 #define SI_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4)
103 #define SI_RESOURCE_FLAG_READ_ONLY (PIPE_RESOURCE_FLAG_DRV_PRIV << 5)
104 #define SI_RESOURCE_FLAG_32BIT (PIPE_RESOURCE_FLAG_DRV_PRIV << 6)
105
106 /* Debug flags. */
107 enum {
108 /* Shader logging options: */
109 DBG_VS = PIPE_SHADER_VERTEX,
110 DBG_PS = PIPE_SHADER_FRAGMENT,
111 DBG_GS = PIPE_SHADER_GEOMETRY,
112 DBG_TCS = PIPE_SHADER_TESS_CTRL,
113 DBG_TES = PIPE_SHADER_TESS_EVAL,
114 DBG_CS = PIPE_SHADER_COMPUTE,
115 DBG_NO_IR,
116 DBG_NO_TGSI,
117 DBG_NO_ASM,
118 DBG_PREOPT_IR,
119
120 /* Shader compiler options the shader cache should be aware of: */
121 DBG_FS_CORRECT_DERIVS_AFTER_KILL,
122 DBG_UNSAFE_MATH,
123 DBG_SI_SCHED,
124 DBG_GISEL,
125
126 /* Shader compiler options (with no effect on the shader cache): */
127 DBG_CHECK_IR,
128 DBG_NIR,
129 DBG_MONOLITHIC_SHADERS,
130 DBG_NO_OPT_VARIANT,
131
132 /* Information logging options: */
133 DBG_INFO,
134 DBG_TEX,
135 DBG_COMPUTE,
136 DBG_VM,
137
138 /* Driver options: */
139 DBG_FORCE_DMA,
140 DBG_NO_ASYNC_DMA,
141 DBG_NO_WC,
142 DBG_CHECK_VM,
143 DBG_RESERVE_VMID,
144 DBG_ZERO_VRAM,
145
146 /* 3D engine options: */
147 DBG_SWITCH_ON_EOP,
148 DBG_NO_OUT_OF_ORDER,
149 DBG_NO_DPBB,
150 DBG_NO_DFSM,
151 DBG_DPBB,
152 DBG_DFSM,
153 DBG_NO_HYPERZ,
154 DBG_NO_RB_PLUS,
155 DBG_NO_2D_TILING,
156 DBG_NO_TILING,
157 DBG_NO_DCC,
158 DBG_NO_DCC_CLEAR,
159 DBG_NO_DCC_FB,
160 DBG_NO_DCC_MSAA,
161 DBG_NO_FMASK,
162
163 /* Tests: */
164 DBG_TEST_DMA,
165 DBG_TEST_VMFAULT_CP,
166 DBG_TEST_VMFAULT_SDMA,
167 DBG_TEST_VMFAULT_SHADER,
168 DBG_TEST_DMA_PERF,
169 DBG_TEST_GDS,
170 };
171
172 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
173 #define DBG(name) (1ull << DBG_##name)
174
175 struct si_compute;
176 struct hash_table;
177 struct u_suballocator;
178
179 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
180 * at the moment.
181 */
182 struct r600_resource {
183 struct threaded_resource b;
184
185 /* Winsys objects. */
186 struct pb_buffer *buf;
187 uint64_t gpu_address;
188 /* Memory usage if the buffer placement is optimal. */
189 uint64_t vram_usage;
190 uint64_t gart_usage;
191
192 /* Resource properties. */
193 uint64_t bo_size;
194 unsigned bo_alignment;
195 enum radeon_bo_domain domains;
196 enum radeon_bo_flag flags;
197 unsigned bind_history;
198 int max_forced_staging_uploads;
199
200 /* The buffer range which is initialized (with a write transfer,
201 * streamout, DMA, or as a random access target). The rest of
202 * the buffer is considered invalid and can be mapped unsynchronized.
203 *
204 * This allows unsychronized mapping of a buffer range which hasn't
205 * been used yet. It's for applications which forget to use
206 * the unsynchronized map flag and expect the driver to figure it out.
207 */
208 struct util_range valid_buffer_range;
209
210 /* For buffers only. This indicates that a write operation has been
211 * performed by TC L2, but the cache hasn't been flushed.
212 * Any hw block which doesn't use or bypasses TC L2 should check this
213 * flag and flush the cache before using the buffer.
214 *
215 * For example, TC L2 must be flushed if a buffer which has been
216 * modified by a shader store instruction is about to be used as
217 * an index buffer. The reason is that VGT DMA index fetching doesn't
218 * use TC L2.
219 */
220 bool TC_L2_dirty;
221
222 /* Whether this resource is referenced by bindless handles. */
223 bool texture_handle_allocated;
224 bool image_handle_allocated;
225
226 /* Whether the resource has been exported via resource_get_handle. */
227 unsigned external_usage; /* PIPE_HANDLE_USAGE_* */
228 };
229
230 struct si_transfer {
231 struct threaded_transfer b;
232 struct r600_resource *staging;
233 unsigned offset;
234 };
235
236 struct si_texture {
237 struct r600_resource buffer;
238
239 struct radeon_surf surface;
240 uint64_t size;
241 struct si_texture *flushed_depth_texture;
242
243 /* Colorbuffer compression and fast clear. */
244 uint64_t fmask_offset;
245 uint64_t cmask_offset;
246 uint64_t cmask_base_address_reg;
247 struct r600_resource *cmask_buffer;
248 uint64_t dcc_offset; /* 0 = disabled */
249 unsigned cb_color_info; /* fast clear enable bit */
250 unsigned color_clear_value[2];
251 unsigned last_msaa_resolve_target_micro_mode;
252 unsigned num_level0_transfers;
253
254 /* Depth buffer compression and fast clear. */
255 uint64_t htile_offset;
256 float depth_clear_value;
257 uint16_t dirty_level_mask; /* each bit says if that mipmap is compressed */
258 uint16_t stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */
259 enum pipe_format db_render_format:16;
260 uint8_t stencil_clear_value;
261 bool tc_compatible_htile:1;
262 bool depth_cleared:1; /* if it was cleared at least once */
263 bool stencil_cleared:1; /* if it was cleared at least once */
264 bool upgraded_depth:1; /* upgraded from unorm to Z32_FLOAT */
265 bool is_depth:1;
266 bool db_compatible:1;
267 bool can_sample_z:1;
268 bool can_sample_s:1;
269
270 /* We need to track DCC dirtiness, because st/dri usually calls
271 * flush_resource twice per frame (not a bug) and we don't wanna
272 * decompress DCC twice. Also, the dirty tracking must be done even
273 * if DCC isn't used, because it's required by the DCC usage analysis
274 * for a possible future enablement.
275 */
276 bool separate_dcc_dirty:1;
277 /* Statistics gathering for the DCC enablement heuristic. */
278 bool dcc_gather_statistics:1;
279 /* Counter that should be non-zero if the texture is bound to a
280 * framebuffer.
281 */
282 unsigned framebuffers_bound;
283 /* Whether the texture is a displayable back buffer and needs DCC
284 * decompression, which is expensive. Therefore, it's enabled only
285 * if statistics suggest that it will pay off and it's allocated
286 * separately. It can't be bound as a sampler by apps. Limited to
287 * target == 2D and last_level == 0. If enabled, dcc_offset contains
288 * the absolute GPUVM address, not the relative one.
289 */
290 struct r600_resource *dcc_separate_buffer;
291 /* When DCC is temporarily disabled, the separate buffer is here. */
292 struct r600_resource *last_dcc_separate_buffer;
293 /* Estimate of how much this color buffer is written to in units of
294 * full-screen draws: ps_invocations / (width * height)
295 * Shader kills, late Z, and blending with trivial discards make it
296 * inaccurate (we need to count CB updates, not PS invocations).
297 */
298 unsigned ps_draw_ratio;
299 /* The number of clears since the last DCC usage analysis. */
300 unsigned num_slow_clears;
301 };
302
303 struct si_surface {
304 struct pipe_surface base;
305
306 /* These can vary with block-compressed textures. */
307 uint16_t width0;
308 uint16_t height0;
309
310 bool color_initialized:1;
311 bool depth_initialized:1;
312
313 /* Misc. color flags. */
314 bool color_is_int8:1;
315 bool color_is_int10:1;
316 bool dcc_incompatible:1;
317
318 /* Color registers. */
319 unsigned cb_color_info;
320 unsigned cb_color_view;
321 unsigned cb_color_attrib;
322 unsigned cb_color_attrib2; /* GFX9 and later */
323 unsigned cb_dcc_control; /* VI and later */
324 unsigned spi_shader_col_format:8; /* no blending, no alpha-to-coverage. */
325 unsigned spi_shader_col_format_alpha:8; /* alpha-to-coverage */
326 unsigned spi_shader_col_format_blend:8; /* blending without alpha. */
327 unsigned spi_shader_col_format_blend_alpha:8; /* blending with alpha. */
328
329 /* DB registers. */
330 uint64_t db_depth_base; /* DB_Z_READ/WRITE_BASE */
331 uint64_t db_stencil_base;
332 uint64_t db_htile_data_base;
333 unsigned db_depth_info;
334 unsigned db_z_info;
335 unsigned db_z_info2; /* GFX9+ */
336 unsigned db_depth_view;
337 unsigned db_depth_size;
338 unsigned db_depth_slice;
339 unsigned db_stencil_info;
340 unsigned db_stencil_info2; /* GFX9+ */
341 unsigned db_htile_surface;
342 };
343
344 struct si_mmio_counter {
345 unsigned busy;
346 unsigned idle;
347 };
348
349 union si_mmio_counters {
350 struct {
351 /* For global GPU load including SDMA. */
352 struct si_mmio_counter gpu;
353
354 /* GRBM_STATUS */
355 struct si_mmio_counter spi;
356 struct si_mmio_counter gui;
357 struct si_mmio_counter ta;
358 struct si_mmio_counter gds;
359 struct si_mmio_counter vgt;
360 struct si_mmio_counter ia;
361 struct si_mmio_counter sx;
362 struct si_mmio_counter wd;
363 struct si_mmio_counter bci;
364 struct si_mmio_counter sc;
365 struct si_mmio_counter pa;
366 struct si_mmio_counter db;
367 struct si_mmio_counter cp;
368 struct si_mmio_counter cb;
369
370 /* SRBM_STATUS2 */
371 struct si_mmio_counter sdma;
372
373 /* CP_STAT */
374 struct si_mmio_counter pfp;
375 struct si_mmio_counter meq;
376 struct si_mmio_counter me;
377 struct si_mmio_counter surf_sync;
378 struct si_mmio_counter cp_dma;
379 struct si_mmio_counter scratch_ram;
380 } named;
381 unsigned array[0];
382 };
383
384 struct si_memory_object {
385 struct pipe_memory_object b;
386 struct pb_buffer *buf;
387 uint32_t stride;
388 };
389
390 /* Saved CS data for debugging features. */
391 struct radeon_saved_cs {
392 uint32_t *ib;
393 unsigned num_dw;
394
395 struct radeon_bo_list_item *bo_list;
396 unsigned bo_count;
397 };
398
399 struct si_screen {
400 struct pipe_screen b;
401 struct radeon_winsys *ws;
402 struct disk_cache *disk_shader_cache;
403
404 struct radeon_info info;
405 uint64_t debug_flags;
406 char renderer_string[183];
407
408 unsigned pa_sc_raster_config;
409 unsigned pa_sc_raster_config_1;
410 unsigned se_tile_repeat;
411 unsigned gs_table_depth;
412 unsigned tess_offchip_block_dw_size;
413 unsigned tess_offchip_ring_size;
414 unsigned tess_factor_ring_size;
415 unsigned vgt_hs_offchip_param;
416 unsigned eqaa_force_coverage_samples;
417 unsigned eqaa_force_z_samples;
418 unsigned eqaa_force_color_samples;
419 bool has_clear_state;
420 bool has_distributed_tess;
421 bool has_draw_indirect_multi;
422 bool has_out_of_order_rast;
423 bool assume_no_z_fights;
424 bool commutative_blend_add;
425 bool clear_db_cache_before_clear;
426 bool has_msaa_sample_loc_bug;
427 bool has_ls_vgpr_init_bug;
428 bool dpbb_allowed;
429 bool dfsm_allowed;
430 bool llvm_has_working_vgpr_indexing;
431
432 /* Whether shaders are monolithic (1-part) or separate (3-part). */
433 bool use_monolithic_shaders;
434 bool record_llvm_ir;
435 bool has_rbplus; /* if RB+ registers exist */
436 bool rbplus_allowed; /* if RB+ is allowed */
437 bool dcc_msaa_allowed;
438 bool cpdma_prefetch_writes_memory;
439
440 struct slab_parent_pool pool_transfers;
441
442 /* Texture filter settings. */
443 int force_aniso; /* -1 = disabled */
444
445 /* Auxiliary context. Mainly used to initialize resources.
446 * It must be locked prior to using and flushed before unlocking. */
447 struct pipe_context *aux_context;
448 mtx_t aux_context_lock;
449
450 /* This must be in the screen, because UE4 uses one context for
451 * compilation and another one for rendering.
452 */
453 unsigned num_compilations;
454 /* Along with ST_DEBUG=precompile, this should show if applications
455 * are loading shaders on demand. This is a monotonic counter.
456 */
457 unsigned num_shaders_created;
458 unsigned num_shader_cache_hits;
459
460 /* GPU load thread. */
461 mtx_t gpu_load_mutex;
462 thrd_t gpu_load_thread;
463 union si_mmio_counters mmio_counters;
464 volatile unsigned gpu_load_stop_thread; /* bool */
465
466 /* Performance counters. */
467 struct si_perfcounters *perfcounters;
468
469 /* If pipe_screen wants to recompute and re-emit the framebuffer,
470 * sampler, and image states of all contexts, it should atomically
471 * increment this.
472 *
473 * Each context will compare this with its own last known value of
474 * the counter before drawing and re-emit the states accordingly.
475 */
476 unsigned dirty_tex_counter;
477
478 /* Atomically increment this counter when an existing texture's
479 * metadata is enabled or disabled in a way that requires changing
480 * contexts' compressed texture binding masks.
481 */
482 unsigned compressed_colortex_counter;
483
484 struct {
485 /* Context flags to set so that all writes from earlier jobs
486 * in the CP are seen by L2 clients.
487 */
488 unsigned cp_to_L2;
489
490 /* Context flags to set so that all writes from earlier jobs
491 * that end in L2 are seen by CP.
492 */
493 unsigned L2_to_cp;
494 } barrier_flags;
495
496 mtx_t shader_parts_mutex;
497 struct si_shader_part *vs_prologs;
498 struct si_shader_part *tcs_epilogs;
499 struct si_shader_part *gs_prologs;
500 struct si_shader_part *ps_prologs;
501 struct si_shader_part *ps_epilogs;
502
503 /* Shader cache in memory.
504 *
505 * Design & limitations:
506 * - The shader cache is per screen (= per process), never saved to
507 * disk, and skips redundant shader compilations from TGSI to bytecode.
508 * - It can only be used with one-variant-per-shader support, in which
509 * case only the main (typically middle) part of shaders is cached.
510 * - Only VS, TCS, TES, PS are cached, out of which only the hw VS
511 * variants of VS and TES are cached, so LS and ES aren't.
512 * - GS and CS aren't cached, but it's certainly possible to cache
513 * those as well.
514 */
515 mtx_t shader_cache_mutex;
516 struct hash_table *shader_cache;
517
518 /* Shader compiler queue for multithreaded compilation. */
519 struct util_queue shader_compiler_queue;
520 /* Use at most 3 normal compiler threads on quadcore and better.
521 * Hyperthreaded CPUs report the number of threads, but we want
522 * the number of cores. We only need this many threads for shader-db. */
523 struct ac_llvm_compiler compiler[24]; /* used by the queue only */
524
525 struct util_queue shader_compiler_queue_low_priority;
526 /* Use at most 2 low priority threads on quadcore and better.
527 * We want to minimize the impact on multithreaded Mesa. */
528 struct ac_llvm_compiler compiler_lowp[10];
529 };
530
531 struct si_blend_color {
532 struct pipe_blend_color state;
533 bool any_nonzeros;
534 };
535
536 struct si_sampler_view {
537 struct pipe_sampler_view base;
538 /* [0..7] = image descriptor
539 * [4..7] = buffer descriptor */
540 uint32_t state[8];
541 uint32_t fmask_state[8];
542 const struct legacy_surf_level *base_level_info;
543 ubyte base_level;
544 ubyte block_width;
545 bool is_stencil_sampler;
546 bool is_integer;
547 bool dcc_incompatible;
548 };
549
550 #define SI_SAMPLER_STATE_MAGIC 0x34f1c35a
551
552 struct si_sampler_state {
553 #ifdef DEBUG
554 unsigned magic;
555 #endif
556 uint32_t val[4];
557 uint32_t integer_val[4];
558 uint32_t upgraded_depth_val[4];
559 };
560
561 struct si_cs_shader_state {
562 struct si_compute *program;
563 struct si_compute *emitted_program;
564 unsigned offset;
565 bool initialized;
566 bool uses_scratch;
567 };
568
569 struct si_samplers {
570 struct pipe_sampler_view *views[SI_NUM_SAMPLERS];
571 struct si_sampler_state *sampler_states[SI_NUM_SAMPLERS];
572
573 /* The i-th bit is set if that element is enabled (non-NULL resource). */
574 unsigned enabled_mask;
575 uint32_t needs_depth_decompress_mask;
576 uint32_t needs_color_decompress_mask;
577 };
578
579 struct si_images {
580 struct pipe_image_view views[SI_NUM_IMAGES];
581 uint32_t needs_color_decompress_mask;
582 unsigned enabled_mask;
583 };
584
585 struct si_framebuffer {
586 struct pipe_framebuffer_state state;
587 unsigned colorbuf_enabled_4bit;
588 unsigned spi_shader_col_format;
589 unsigned spi_shader_col_format_alpha;
590 unsigned spi_shader_col_format_blend;
591 unsigned spi_shader_col_format_blend_alpha;
592 ubyte nr_samples:5; /* at most 16xAA */
593 ubyte log_samples:3; /* at most 4 = 16xAA */
594 ubyte nr_color_samples; /* at most 8xAA */
595 ubyte compressed_cb_mask;
596 ubyte uncompressed_cb_mask;
597 ubyte color_is_int8;
598 ubyte color_is_int10;
599 ubyte dirty_cbufs;
600 bool dirty_zsbuf;
601 bool any_dst_linear;
602 bool CB_has_shader_readable_metadata;
603 bool DB_has_shader_readable_metadata;
604 };
605
606 struct si_signed_scissor {
607 int minx;
608 int miny;
609 int maxx;
610 int maxy;
611 };
612
613 struct si_scissors {
614 unsigned dirty_mask;
615 struct pipe_scissor_state states[SI_MAX_VIEWPORTS];
616 };
617
618 struct si_viewports {
619 unsigned dirty_mask;
620 unsigned depth_range_dirty_mask;
621 struct pipe_viewport_state states[SI_MAX_VIEWPORTS];
622 struct si_signed_scissor as_scissor[SI_MAX_VIEWPORTS];
623 };
624
625 struct si_clip_state {
626 struct pipe_clip_state state;
627 bool any_nonzeros;
628 };
629
630 struct si_streamout_target {
631 struct pipe_stream_output_target b;
632
633 /* The buffer where BUFFER_FILLED_SIZE is stored. */
634 struct r600_resource *buf_filled_size;
635 unsigned buf_filled_size_offset;
636 bool buf_filled_size_valid;
637
638 unsigned stride_in_dw;
639 };
640
641 struct si_streamout {
642 bool begin_emitted;
643
644 unsigned enabled_mask;
645 unsigned num_targets;
646 struct si_streamout_target *targets[PIPE_MAX_SO_BUFFERS];
647
648 unsigned append_bitmask;
649 bool suspended;
650
651 /* External state which comes from the vertex shader,
652 * it must be set explicitly when binding a shader. */
653 uint16_t *stride_in_dw;
654 unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
655
656 /* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
657 unsigned hw_enabled_mask;
658
659 /* The state of VGT_STRMOUT_(CONFIG|EN). */
660 bool streamout_enabled;
661 bool prims_gen_query_enabled;
662 int num_prims_gen_queries;
663 };
664
665 /* A shader state consists of the shader selector, which is a constant state
666 * object shared by multiple contexts and shouldn't be modified, and
667 * the current shader variant selected for this context.
668 */
669 struct si_shader_ctx_state {
670 struct si_shader_selector *cso;
671 struct si_shader *current;
672 };
673
674 #define SI_NUM_VGT_PARAM_KEY_BITS 12
675 #define SI_NUM_VGT_PARAM_STATES (1 << SI_NUM_VGT_PARAM_KEY_BITS)
676
677 /* The IA_MULTI_VGT_PARAM key used to index the table of precomputed values.
678 * Some fields are set by state-change calls, most are set by draw_vbo.
679 */
680 union si_vgt_param_key {
681 struct {
682 #ifdef PIPE_ARCH_LITTLE_ENDIAN
683 unsigned prim:4;
684 unsigned uses_instancing:1;
685 unsigned multi_instances_smaller_than_primgroup:1;
686 unsigned primitive_restart:1;
687 unsigned count_from_stream_output:1;
688 unsigned line_stipple_enabled:1;
689 unsigned uses_tess:1;
690 unsigned tess_uses_prim_id:1;
691 unsigned uses_gs:1;
692 unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
693 #else /* PIPE_ARCH_BIG_ENDIAN */
694 unsigned _pad:32 - SI_NUM_VGT_PARAM_KEY_BITS;
695 unsigned uses_gs:1;
696 unsigned tess_uses_prim_id:1;
697 unsigned uses_tess:1;
698 unsigned line_stipple_enabled:1;
699 unsigned count_from_stream_output:1;
700 unsigned primitive_restart:1;
701 unsigned multi_instances_smaller_than_primgroup:1;
702 unsigned uses_instancing:1;
703 unsigned prim:4;
704 #endif
705 } u;
706 uint32_t index;
707 };
708
709 struct si_texture_handle
710 {
711 unsigned desc_slot;
712 bool desc_dirty;
713 struct pipe_sampler_view *view;
714 struct si_sampler_state sstate;
715 };
716
717 struct si_image_handle
718 {
719 unsigned desc_slot;
720 bool desc_dirty;
721 struct pipe_image_view view;
722 };
723
724 struct si_saved_cs {
725 struct pipe_reference reference;
726 struct si_context *ctx;
727 struct radeon_saved_cs gfx;
728 struct r600_resource *trace_buf;
729 unsigned trace_id;
730
731 unsigned gfx_last_dw;
732 bool flushed;
733 int64_t time_flush;
734 };
735
736 struct si_context {
737 struct pipe_context b; /* base class */
738
739 enum radeon_family family;
740 enum chip_class chip_class;
741
742 struct radeon_winsys *ws;
743 struct radeon_winsys_ctx *ctx;
744 struct radeon_cmdbuf *gfx_cs;
745 struct radeon_cmdbuf *dma_cs;
746 struct pipe_fence_handle *last_gfx_fence;
747 struct pipe_fence_handle *last_sdma_fence;
748 struct r600_resource *eop_bug_scratch;
749 struct u_upload_mgr *cached_gtt_allocator;
750 struct threaded_context *tc;
751 struct u_suballocator *allocator_zeroed_memory;
752 struct slab_child_pool pool_transfers;
753 struct slab_child_pool pool_transfers_unsync; /* for threaded_context */
754 struct pipe_device_reset_callback device_reset_callback;
755 struct u_log_context *log;
756 void *query_result_shader;
757 struct blitter_context *blitter;
758 void *custom_dsa_flush;
759 void *custom_blend_resolve;
760 void *custom_blend_fmask_decompress;
761 void *custom_blend_eliminate_fastclear;
762 void *custom_blend_dcc_decompress;
763 void *vs_blit_pos;
764 void *vs_blit_pos_layered;
765 void *vs_blit_color;
766 void *vs_blit_color_layered;
767 void *vs_blit_texcoord;
768 struct si_screen *screen;
769 struct pipe_debug_callback debug;
770 struct ac_llvm_compiler compiler; /* only non-threaded compilation */
771 struct si_shader_ctx_state fixed_func_tcs_shader;
772 struct r600_resource *wait_mem_scratch;
773 unsigned wait_mem_number;
774 uint16_t prefetch_L2_mask;
775
776 bool gfx_flush_in_progress:1;
777 bool gfx_last_ib_is_busy:1;
778 bool compute_is_busy:1;
779
780 unsigned num_gfx_cs_flushes;
781 unsigned initial_gfx_cs_size;
782 unsigned gpu_reset_counter;
783 unsigned last_dirty_tex_counter;
784 unsigned last_compressed_colortex_counter;
785 unsigned last_num_draw_calls;
786 unsigned flags; /* flush flags */
787 /* Current unaccounted memory usage. */
788 uint64_t vram;
789 uint64_t gtt;
790
791 /* Atoms (direct states). */
792 union si_state_atoms atoms;
793 unsigned dirty_atoms; /* mask */
794 /* PM4 states (precomputed immutable states) */
795 unsigned dirty_states;
796 union si_state queued;
797 union si_state emitted;
798
799 /* Atom declarations. */
800 struct si_framebuffer framebuffer;
801 unsigned sample_locs_num_samples;
802 uint16_t sample_mask;
803 unsigned last_cb_target_mask;
804 struct si_blend_color blend_color;
805 struct si_clip_state clip_state;
806 struct si_shader_data shader_pointers;
807 struct si_stencil_ref stencil_ref;
808 struct si_scissors scissors;
809 struct si_streamout streamout;
810 struct si_viewports viewports;
811 unsigned num_window_rectangles;
812 bool window_rectangles_include;
813 struct pipe_scissor_state window_rectangles[4];
814
815 /* Precomputed states. */
816 struct si_pm4_state *init_config;
817 struct si_pm4_state *init_config_gs_rings;
818 bool init_config_has_vgt_flush;
819 struct si_pm4_state *vgt_shader_config[4];
820
821 /* shaders */
822 struct si_shader_ctx_state ps_shader;
823 struct si_shader_ctx_state gs_shader;
824 struct si_shader_ctx_state vs_shader;
825 struct si_shader_ctx_state tcs_shader;
826 struct si_shader_ctx_state tes_shader;
827 struct si_cs_shader_state cs_shader_state;
828
829 /* shader information */
830 struct si_vertex_elements *vertex_elements;
831 unsigned sprite_coord_enable;
832 unsigned cs_max_waves_per_sh;
833 bool flatshade;
834 bool do_update_shaders;
835
836 /* vertex buffer descriptors */
837 uint32_t *vb_descriptors_gpu_list;
838 struct r600_resource *vb_descriptors_buffer;
839 unsigned vb_descriptors_offset;
840
841 /* shader descriptors */
842 struct si_descriptors descriptors[SI_NUM_DESCS];
843 unsigned descriptors_dirty;
844 unsigned shader_pointers_dirty;
845 unsigned shader_needs_decompress_mask;
846 struct si_buffer_resources rw_buffers;
847 struct si_buffer_resources const_and_shader_buffers[SI_NUM_SHADERS];
848 struct si_samplers samplers[SI_NUM_SHADERS];
849 struct si_images images[SI_NUM_SHADERS];
850
851 /* other shader resources */
852 struct pipe_constant_buffer null_const_buf; /* used for set_constant_buffer(NULL) on CIK */
853 struct pipe_resource *esgs_ring;
854 struct pipe_resource *gsvs_ring;
855 struct pipe_resource *tess_rings;
856 union pipe_color_union *border_color_table; /* in CPU memory, any endian */
857 struct r600_resource *border_color_buffer;
858 union pipe_color_union *border_color_map; /* in VRAM (slow access), little endian */
859 unsigned border_color_count;
860 unsigned num_vs_blit_sgprs;
861 uint32_t vs_blit_sh_data[SI_VS_BLIT_SGPRS_POS_TEXCOORD];
862 uint32_t cs_user_data[4];
863
864 /* Vertex and index buffers. */
865 bool vertex_buffers_dirty;
866 bool vertex_buffer_pointer_dirty;
867 struct pipe_vertex_buffer vertex_buffer[SI_NUM_VERTEX_BUFFERS];
868
869 /* MSAA config state. */
870 int ps_iter_samples;
871 bool ps_uses_fbfetch;
872 bool smoothing_enabled;
873
874 /* DB render state. */
875 unsigned ps_db_shader_control;
876 unsigned dbcb_copy_sample;
877 bool dbcb_depth_copy_enabled:1;
878 bool dbcb_stencil_copy_enabled:1;
879 bool db_flush_depth_inplace:1;
880 bool db_flush_stencil_inplace:1;
881 bool db_depth_clear:1;
882 bool db_depth_disable_expclear:1;
883 bool db_stencil_clear:1;
884 bool db_stencil_disable_expclear:1;
885 bool occlusion_queries_disabled:1;
886 bool generate_mipmap_for_depth:1;
887
888 /* Emitted draw state. */
889 bool gs_tri_strip_adj_fix:1;
890 bool ls_vgpr_fix:1;
891 int last_index_size;
892 int last_base_vertex;
893 int last_start_instance;
894 int last_drawid;
895 int last_sh_base_reg;
896 int last_primitive_restart_en;
897 int last_restart_index;
898 int last_prim;
899 int last_multi_vgt_param;
900 int last_rast_prim;
901 unsigned last_sc_line_stipple;
902 unsigned current_vs_state;
903 unsigned last_vs_state;
904 enum pipe_prim_type current_rast_prim; /* primitive type after TES, GS */
905
906 /* Scratch buffer */
907 struct r600_resource *scratch_buffer;
908 unsigned scratch_waves;
909 unsigned spi_tmpring_size;
910
911 struct r600_resource *compute_scratch_buffer;
912
913 /* Emitted derived tessellation state. */
914 /* Local shader (VS), or HS if LS-HS are merged. */
915 struct si_shader *last_ls;
916 struct si_shader_selector *last_tcs;
917 int last_num_tcs_input_cp;
918 int last_tes_sh_base;
919 bool last_tess_uses_primid;
920 unsigned last_num_patches;
921 int last_ls_hs_config;
922
923 /* Debug state. */
924 bool is_debug;
925 struct si_saved_cs *current_saved_cs;
926 uint64_t dmesg_timestamp;
927 unsigned apitrace_call_number;
928
929 /* Other state */
930 bool need_check_render_feedback;
931 bool decompression_enabled;
932 bool dpbb_force_off;
933 bool vs_writes_viewport_index;
934 bool vs_disables_clipping_viewport;
935
936 /* Precomputed IA_MULTI_VGT_PARAM */
937 union si_vgt_param_key ia_multi_vgt_param_key;
938 unsigned ia_multi_vgt_param[SI_NUM_VGT_PARAM_STATES];
939
940 /* Bindless descriptors. */
941 struct si_descriptors bindless_descriptors;
942 struct util_idalloc bindless_used_slots;
943 unsigned num_bindless_descriptors;
944 bool bindless_descriptors_dirty;
945 bool graphics_bindless_pointer_dirty;
946 bool compute_bindless_pointer_dirty;
947
948 /* Allocated bindless handles */
949 struct hash_table *tex_handles;
950 struct hash_table *img_handles;
951
952 /* Resident bindless handles */
953 struct util_dynarray resident_tex_handles;
954 struct util_dynarray resident_img_handles;
955
956 /* Resident bindless handles which need decompression */
957 struct util_dynarray resident_tex_needs_color_decompress;
958 struct util_dynarray resident_img_needs_color_decompress;
959 struct util_dynarray resident_tex_needs_depth_decompress;
960
961 /* Bindless state */
962 bool uses_bindless_samplers;
963 bool uses_bindless_images;
964
965 /* MSAA sample locations.
966 * The first index is the sample index.
967 * The second index is the coordinate: X, Y. */
968 struct {
969 float x1[1][2];
970 float x2[2][2];
971 float x4[4][2];
972 float x8[8][2];
973 float x16[16][2];
974 } sample_positions;
975 struct pipe_resource *sample_pos_buffer;
976
977 /* Misc stats. */
978 unsigned num_draw_calls;
979 unsigned num_decompress_calls;
980 unsigned num_mrt_draw_calls;
981 unsigned num_prim_restart_calls;
982 unsigned num_spill_draw_calls;
983 unsigned num_compute_calls;
984 unsigned num_spill_compute_calls;
985 unsigned num_dma_calls;
986 unsigned num_cp_dma_calls;
987 unsigned num_vs_flushes;
988 unsigned num_ps_flushes;
989 unsigned num_cs_flushes;
990 unsigned num_cb_cache_flushes;
991 unsigned num_db_cache_flushes;
992 unsigned num_L2_invalidates;
993 unsigned num_L2_writebacks;
994 unsigned num_resident_handles;
995 uint64_t num_alloc_tex_transfer_bytes;
996 unsigned last_tex_ps_draw_ratio; /* for query */
997
998 /* Queries. */
999 /* Maintain the list of active queries for pausing between IBs. */
1000 int num_occlusion_queries;
1001 int num_perfect_occlusion_queries;
1002 struct list_head active_queries;
1003 unsigned num_cs_dw_queries_suspend;
1004
1005 /* Render condition. */
1006 struct pipe_query *render_cond;
1007 unsigned render_cond_mode;
1008 bool render_cond_invert;
1009 bool render_cond_force_off; /* for u_blitter */
1010
1011 /* Statistics gathering for the DCC enablement heuristic. It can't be
1012 * in si_texture because si_texture can be shared by multiple
1013 * contexts. This is for back buffers only. We shouldn't get too many
1014 * of those.
1015 *
1016 * X11 DRI3 rotates among a finite set of back buffers. They should
1017 * all fit in this array. If they don't, separate DCC might never be
1018 * enabled by DCC stat gathering.
1019 */
1020 struct {
1021 struct si_texture *tex;
1022 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
1023 struct pipe_query *ps_stats[3];
1024 /* If all slots are used and another slot is needed,
1025 * the least recently used slot is evicted based on this. */
1026 int64_t last_use_timestamp;
1027 bool query_active;
1028 } dcc_stats[5];
1029
1030 /* Copy one resource to another using async DMA. */
1031 void (*dma_copy)(struct pipe_context *ctx,
1032 struct pipe_resource *dst,
1033 unsigned dst_level,
1034 unsigned dst_x, unsigned dst_y, unsigned dst_z,
1035 struct pipe_resource *src,
1036 unsigned src_level,
1037 const struct pipe_box *src_box);
1038
1039 struct si_tracked_regs tracked_regs;
1040 };
1041
1042 /* cik_sdma.c */
1043 void cik_init_sdma_functions(struct si_context *sctx);
1044
1045 /* si_blit.c */
1046 enum si_blitter_op /* bitmask */
1047 {
1048 SI_SAVE_TEXTURES = 1,
1049 SI_SAVE_FRAMEBUFFER = 2,
1050 SI_SAVE_FRAGMENT_STATE = 4,
1051 SI_DISABLE_RENDER_COND = 8,
1052 };
1053
1054 void si_blitter_begin(struct si_context *sctx, enum si_blitter_op op);
1055 void si_blitter_end(struct si_context *sctx);
1056 void si_init_blit_functions(struct si_context *sctx);
1057 void si_decompress_textures(struct si_context *sctx, unsigned shader_mask);
1058 void si_resource_copy_region(struct pipe_context *ctx,
1059 struct pipe_resource *dst,
1060 unsigned dst_level,
1061 unsigned dstx, unsigned dsty, unsigned dstz,
1062 struct pipe_resource *src,
1063 unsigned src_level,
1064 const struct pipe_box *src_box);
1065 void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex);
1066 void si_blit_decompress_depth(struct pipe_context *ctx,
1067 struct si_texture *texture,
1068 struct si_texture *staging,
1069 unsigned first_level, unsigned last_level,
1070 unsigned first_layer, unsigned last_layer,
1071 unsigned first_sample, unsigned last_sample);
1072
1073 /* si_buffer.c */
1074 bool si_rings_is_buffer_referenced(struct si_context *sctx,
1075 struct pb_buffer *buf,
1076 enum radeon_bo_usage usage);
1077 void *si_buffer_map_sync_with_rings(struct si_context *sctx,
1078 struct r600_resource *resource,
1079 unsigned usage);
1080 void si_init_resource_fields(struct si_screen *sscreen,
1081 struct r600_resource *res,
1082 uint64_t size, unsigned alignment);
1083 bool si_alloc_resource(struct si_screen *sscreen,
1084 struct r600_resource *res);
1085 struct pipe_resource *pipe_aligned_buffer_create(struct pipe_screen *screen,
1086 unsigned flags, unsigned usage,
1087 unsigned size, unsigned alignment);
1088 struct r600_resource *si_aligned_buffer_create(struct pipe_screen *screen,
1089 unsigned flags, unsigned usage,
1090 unsigned size, unsigned alignment);
1091 void si_replace_buffer_storage(struct pipe_context *ctx,
1092 struct pipe_resource *dst,
1093 struct pipe_resource *src);
1094 void si_init_screen_buffer_functions(struct si_screen *sscreen);
1095 void si_init_buffer_functions(struct si_context *sctx);
1096
1097 /* si_clear.c */
1098 enum pipe_format si_simplify_cb_format(enum pipe_format format);
1099 bool vi_alpha_is_on_msb(enum pipe_format format);
1100 void vi_dcc_clear_level(struct si_context *sctx,
1101 struct si_texture *tex,
1102 unsigned level, unsigned clear_value);
1103 void si_init_clear_functions(struct si_context *sctx);
1104
1105 /* si_cp_dma.c */
1106 #define SI_CPDMA_SKIP_CHECK_CS_SPACE (1 << 0) /* don't call need_cs_space */
1107 #define SI_CPDMA_SKIP_SYNC_AFTER (1 << 1) /* don't wait for DMA after the copy */
1108 #define SI_CPDMA_SKIP_SYNC_BEFORE (1 << 2) /* don't wait for DMA before the copy (RAW hazards) */
1109 #define SI_CPDMA_SKIP_GFX_SYNC (1 << 3) /* don't flush caches and don't wait for PS/CS */
1110 #define SI_CPDMA_SKIP_BO_LIST_UPDATE (1 << 4) /* don't update the BO list */
1111 #define SI_CPDMA_SKIP_ALL (SI_CPDMA_SKIP_CHECK_CS_SPACE | \
1112 SI_CPDMA_SKIP_SYNC_AFTER | \
1113 SI_CPDMA_SKIP_SYNC_BEFORE | \
1114 SI_CPDMA_SKIP_GFX_SYNC | \
1115 SI_CPDMA_SKIP_BO_LIST_UPDATE)
1116
1117 enum si_cache_policy {
1118 L2_BYPASS,
1119 L2_STREAM, /* same as SLC=1 */
1120 L2_LRU, /* same as SLC=0 */
1121 };
1122
1123 enum si_coherency {
1124 SI_COHERENCY_NONE, /* no cache flushes needed */
1125 SI_COHERENCY_SHADER,
1126 SI_COHERENCY_CB_META,
1127 };
1128
1129 void si_cp_dma_wait_for_idle(struct si_context *sctx);
1130 void si_cp_dma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
1131 uint64_t offset, uint64_t size, unsigned value,
1132 enum si_coherency coher,
1133 enum si_cache_policy cache_policy);
1134 void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
1135 uint64_t offset, uint64_t size, unsigned value,
1136 enum si_coherency coher);
1137 void si_cp_dma_copy_buffer(struct si_context *sctx,
1138 struct pipe_resource *dst, struct pipe_resource *src,
1139 uint64_t dst_offset, uint64_t src_offset, unsigned size,
1140 unsigned user_flags, enum si_coherency coher,
1141 enum si_cache_policy cache_policy);
1142 void si_copy_buffer(struct si_context *sctx,
1143 struct pipe_resource *dst, struct pipe_resource *src,
1144 uint64_t dst_offset, uint64_t src_offset, unsigned size);
1145 void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
1146 uint64_t offset, unsigned size);
1147 void cik_emit_prefetch_L2(struct si_context *sctx, bool vertex_stage_only);
1148 void si_test_gds(struct si_context *sctx);
1149 void si_init_cp_dma_functions(struct si_context *sctx);
1150
1151 /* si_debug.c */
1152 void si_save_cs(struct radeon_winsys *ws, struct radeon_cmdbuf *cs,
1153 struct radeon_saved_cs *saved, bool get_buffer_list);
1154 void si_clear_saved_cs(struct radeon_saved_cs *saved);
1155 void si_destroy_saved_cs(struct si_saved_cs *scs);
1156 void si_auto_log_cs(void *data, struct u_log_context *log);
1157 void si_log_hw_flush(struct si_context *sctx);
1158 void si_log_draw_state(struct si_context *sctx, struct u_log_context *log);
1159 void si_log_compute_state(struct si_context *sctx, struct u_log_context *log);
1160 void si_init_debug_functions(struct si_context *sctx);
1161 void si_check_vm_faults(struct si_context *sctx,
1162 struct radeon_saved_cs *saved, enum ring_type ring);
1163 bool si_replace_shader(unsigned num, struct ac_shader_binary *binary);
1164
1165 /* si_dma.c */
1166 void si_init_dma_functions(struct si_context *sctx);
1167
1168 /* si_dma_cs.c */
1169 void si_dma_emit_timestamp(struct si_context *sctx, struct r600_resource *dst,
1170 uint64_t offset);
1171 void si_sdma_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
1172 uint64_t offset, uint64_t size, unsigned clear_value);
1173 void si_need_dma_space(struct si_context *ctx, unsigned num_dw,
1174 struct r600_resource *dst, struct r600_resource *src);
1175 void si_flush_dma_cs(struct si_context *ctx, unsigned flags,
1176 struct pipe_fence_handle **fence);
1177 void si_screen_clear_buffer(struct si_screen *sscreen, struct pipe_resource *dst,
1178 uint64_t offset, uint64_t size, unsigned value);
1179
1180 /* si_fence.c */
1181 void si_cp_release_mem(struct si_context *ctx,
1182 unsigned event, unsigned event_flags,
1183 unsigned dst_sel, unsigned int_sel, unsigned data_sel,
1184 struct r600_resource *buf, uint64_t va,
1185 uint32_t new_fence, unsigned query_type);
1186 unsigned si_cp_write_fence_dwords(struct si_screen *screen);
1187 void si_cp_wait_mem(struct si_context *ctx,
1188 uint64_t va, uint32_t ref, uint32_t mask, unsigned flags);
1189 void si_init_fence_functions(struct si_context *ctx);
1190 void si_init_screen_fence_functions(struct si_screen *screen);
1191 struct pipe_fence_handle *si_create_fence(struct pipe_context *ctx,
1192 struct tc_unflushed_batch_token *tc_token);
1193
1194 /* si_get.c */
1195 void si_init_screen_get_functions(struct si_screen *sscreen);
1196
1197 /* si_gfx_cs.c */
1198 void si_flush_gfx_cs(struct si_context *ctx, unsigned flags,
1199 struct pipe_fence_handle **fence);
1200 void si_begin_new_gfx_cs(struct si_context *ctx);
1201 void si_need_gfx_cs_space(struct si_context *ctx);
1202
1203 /* r600_gpu_load.c */
1204 void si_gpu_load_kill_thread(struct si_screen *sscreen);
1205 uint64_t si_begin_counter(struct si_screen *sscreen, unsigned type);
1206 unsigned si_end_counter(struct si_screen *sscreen, unsigned type,
1207 uint64_t begin);
1208
1209 /* si_compute.c */
1210 void si_init_compute_functions(struct si_context *sctx);
1211
1212 /* r600_perfcounters.c */
1213 void si_perfcounters_destroy(struct si_screen *sscreen);
1214
1215 /* si_perfcounters.c */
1216 void si_init_perfcounters(struct si_screen *screen);
1217
1218 /* si_pipe.c */
1219 bool si_check_device_reset(struct si_context *sctx);
1220
1221 /* si_query.c */
1222 void si_init_screen_query_functions(struct si_screen *sscreen);
1223 void si_init_query_functions(struct si_context *sctx);
1224 void si_suspend_queries(struct si_context *sctx);
1225 void si_resume_queries(struct si_context *sctx);
1226
1227 /* si_shaderlib_tgsi.c */
1228 void *si_get_blitter_vs(struct si_context *sctx, enum blitter_attrib_type type,
1229 unsigned num_layers);
1230 void *si_create_fixed_func_tcs(struct si_context *sctx);
1231 void *si_create_dma_compute_shader(struct pipe_context *ctx,
1232 unsigned num_dwords_per_thread,
1233 bool dst_stream_cache_policy, bool is_copy);
1234 void *si_create_query_result_cs(struct si_context *sctx);
1235
1236 /* si_test_dma.c */
1237 void si_test_dma(struct si_screen *sscreen);
1238
1239 /* si_test_clearbuffer.c */
1240 void si_test_dma_perf(struct si_screen *sscreen);
1241
1242 /* si_uvd.c */
1243 struct pipe_video_codec *si_uvd_create_decoder(struct pipe_context *context,
1244 const struct pipe_video_codec *templ);
1245
1246 struct pipe_video_buffer *si_video_buffer_create(struct pipe_context *pipe,
1247 const struct pipe_video_buffer *tmpl);
1248
1249 /* si_viewport.c */
1250 void si_update_vs_viewport_state(struct si_context *ctx);
1251 void si_init_viewport_functions(struct si_context *ctx);
1252
1253 /* si_texture.c */
1254 bool si_prepare_for_dma_blit(struct si_context *sctx,
1255 struct si_texture *dst,
1256 unsigned dst_level, unsigned dstx,
1257 unsigned dsty, unsigned dstz,
1258 struct si_texture *src,
1259 unsigned src_level,
1260 const struct pipe_box *src_box);
1261 void si_eliminate_fast_color_clear(struct si_context *sctx,
1262 struct si_texture *tex);
1263 void si_texture_discard_cmask(struct si_screen *sscreen,
1264 struct si_texture *tex);
1265 bool si_init_flushed_depth_texture(struct pipe_context *ctx,
1266 struct pipe_resource *texture,
1267 struct si_texture **staging);
1268 void si_print_texture_info(struct si_screen *sscreen,
1269 struct si_texture *tex, struct u_log_context *log);
1270 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
1271 const struct pipe_resource *templ);
1272 bool vi_dcc_formats_compatible(enum pipe_format format1,
1273 enum pipe_format format2);
1274 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
1275 unsigned level,
1276 enum pipe_format view_format);
1277 void vi_disable_dcc_if_incompatible_format(struct si_context *sctx,
1278 struct pipe_resource *tex,
1279 unsigned level,
1280 enum pipe_format view_format);
1281 struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
1282 struct pipe_resource *texture,
1283 const struct pipe_surface *templ,
1284 unsigned width0, unsigned height0,
1285 unsigned width, unsigned height);
1286 unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap);
1287 void vi_separate_dcc_try_enable(struct si_context *sctx,
1288 struct si_texture *tex);
1289 void vi_separate_dcc_start_query(struct si_context *sctx,
1290 struct si_texture *tex);
1291 void vi_separate_dcc_stop_query(struct si_context *sctx,
1292 struct si_texture *tex);
1293 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
1294 struct si_texture *tex);
1295 bool si_texture_disable_dcc(struct si_context *sctx,
1296 struct si_texture *tex);
1297 void si_init_screen_texture_functions(struct si_screen *sscreen);
1298 void si_init_context_texture_functions(struct si_context *sctx);
1299
1300
1301 /*
1302 * common helpers
1303 */
1304
1305 static inline struct r600_resource *r600_resource(struct pipe_resource *r)
1306 {
1307 return (struct r600_resource*)r;
1308 }
1309
1310 static inline void
1311 r600_resource_reference(struct r600_resource **ptr, struct r600_resource *res)
1312 {
1313 pipe_resource_reference((struct pipe_resource **)ptr,
1314 (struct pipe_resource *)res);
1315 }
1316
1317 static inline void
1318 si_texture_reference(struct si_texture **ptr, struct si_texture *res)
1319 {
1320 pipe_resource_reference((struct pipe_resource **)ptr, &res->buffer.b.b);
1321 }
1322
1323 static inline bool
1324 vi_dcc_enabled(struct si_texture *tex, unsigned level)
1325 {
1326 return tex->dcc_offset && level < tex->surface.num_dcc_levels;
1327 }
1328
1329 static inline unsigned
1330 si_tile_mode_index(struct si_texture *tex, unsigned level, bool stencil)
1331 {
1332 if (stencil)
1333 return tex->surface.u.legacy.stencil_tiling_index[level];
1334 else
1335 return tex->surface.u.legacy.tiling_index[level];
1336 }
1337
1338 static inline void
1339 si_context_add_resource_size(struct si_context *sctx, struct pipe_resource *r)
1340 {
1341 if (r) {
1342 /* Add memory usage for need_gfx_cs_space */
1343 sctx->vram += r600_resource(r)->vram_usage;
1344 sctx->gtt += r600_resource(r)->gart_usage;
1345 }
1346 }
1347
1348 static inline void
1349 si_invalidate_draw_sh_constants(struct si_context *sctx)
1350 {
1351 sctx->last_base_vertex = SI_BASE_VERTEX_UNKNOWN;
1352 }
1353
1354 static inline unsigned
1355 si_get_atom_bit(struct si_context *sctx, struct si_atom *atom)
1356 {
1357 return 1 << (atom - sctx->atoms.array);
1358 }
1359
1360 static inline void
1361 si_set_atom_dirty(struct si_context *sctx, struct si_atom *atom, bool dirty)
1362 {
1363 unsigned bit = si_get_atom_bit(sctx, atom);
1364
1365 if (dirty)
1366 sctx->dirty_atoms |= bit;
1367 else
1368 sctx->dirty_atoms &= ~bit;
1369 }
1370
1371 static inline bool
1372 si_is_atom_dirty(struct si_context *sctx, struct si_atom *atom)
1373 {
1374 return (sctx->dirty_atoms & si_get_atom_bit(sctx, atom)) != 0;
1375 }
1376
1377 static inline void
1378 si_mark_atom_dirty(struct si_context *sctx, struct si_atom *atom)
1379 {
1380 si_set_atom_dirty(sctx, atom, true);
1381 }
1382
1383 static inline struct si_shader_ctx_state *si_get_vs(struct si_context *sctx)
1384 {
1385 if (sctx->gs_shader.cso)
1386 return &sctx->gs_shader;
1387 if (sctx->tes_shader.cso)
1388 return &sctx->tes_shader;
1389
1390 return &sctx->vs_shader;
1391 }
1392
1393 static inline struct tgsi_shader_info *si_get_vs_info(struct si_context *sctx)
1394 {
1395 struct si_shader_ctx_state *vs = si_get_vs(sctx);
1396
1397 return vs->cso ? &vs->cso->info : NULL;
1398 }
1399
1400 static inline struct si_shader* si_get_vs_state(struct si_context *sctx)
1401 {
1402 if (sctx->gs_shader.cso)
1403 return sctx->gs_shader.cso->gs_copy_shader;
1404
1405 struct si_shader_ctx_state *vs = si_get_vs(sctx);
1406 return vs->current ? vs->current : NULL;
1407 }
1408
1409 static inline bool si_can_dump_shader(struct si_screen *sscreen,
1410 unsigned processor)
1411 {
1412 return sscreen->debug_flags & (1 << processor);
1413 }
1414
1415 static inline bool si_get_strmout_en(struct si_context *sctx)
1416 {
1417 return sctx->streamout.streamout_enabled ||
1418 sctx->streamout.prims_gen_query_enabled;
1419 }
1420
1421 static inline unsigned
1422 si_optimal_tcc_alignment(struct si_context *sctx, unsigned upload_size)
1423 {
1424 unsigned alignment, tcc_cache_line_size;
1425
1426 /* If the upload size is less than the cache line size (e.g. 16, 32),
1427 * the whole thing will fit into a cache line if we align it to its size.
1428 * The idea is that multiple small uploads can share a cache line.
1429 * If the upload size is greater, align it to the cache line size.
1430 */
1431 alignment = util_next_power_of_two(upload_size);
1432 tcc_cache_line_size = sctx->screen->info.tcc_cache_line_size;
1433 return MIN2(alignment, tcc_cache_line_size);
1434 }
1435
1436 static inline void
1437 si_saved_cs_reference(struct si_saved_cs **dst, struct si_saved_cs *src)
1438 {
1439 if (pipe_reference(&(*dst)->reference, &src->reference))
1440 si_destroy_saved_cs(*dst);
1441
1442 *dst = src;
1443 }
1444
1445 static inline void
1446 si_make_CB_shader_coherent(struct si_context *sctx, unsigned num_samples,
1447 bool shaders_read_metadata)
1448 {
1449 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_CB |
1450 SI_CONTEXT_INV_VMEM_L1;
1451
1452 if (sctx->chip_class >= GFX9) {
1453 /* Single-sample color is coherent with shaders on GFX9, but
1454 * L2 metadata must be flushed if shaders read metadata.
1455 * (DCC, CMASK).
1456 */
1457 if (num_samples >= 2)
1458 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1459 else if (shaders_read_metadata)
1460 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1461 } else {
1462 /* SI-CI-VI */
1463 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1464 }
1465 }
1466
1467 static inline void
1468 si_make_DB_shader_coherent(struct si_context *sctx, unsigned num_samples,
1469 bool include_stencil, bool shaders_read_metadata)
1470 {
1471 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB |
1472 SI_CONTEXT_INV_VMEM_L1;
1473
1474 if (sctx->chip_class >= GFX9) {
1475 /* Single-sample depth (not stencil) is coherent with shaders
1476 * on GFX9, but L2 metadata must be flushed if shaders read
1477 * metadata.
1478 */
1479 if (num_samples >= 2 || include_stencil)
1480 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1481 else if (shaders_read_metadata)
1482 sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
1483 } else {
1484 /* SI-CI-VI */
1485 sctx->flags |= SI_CONTEXT_INV_GLOBAL_L2;
1486 }
1487 }
1488
1489 static inline bool
1490 si_can_sample_zs(struct si_texture *tex, bool stencil_sampler)
1491 {
1492 return (stencil_sampler && tex->can_sample_s) ||
1493 (!stencil_sampler && tex->can_sample_z);
1494 }
1495
1496 static inline bool
1497 si_htile_enabled(struct si_texture *tex, unsigned level)
1498 {
1499 return tex->htile_offset && level == 0;
1500 }
1501
1502 static inline bool
1503 vi_tc_compat_htile_enabled(struct si_texture *tex, unsigned level)
1504 {
1505 assert(!tex->tc_compatible_htile || tex->htile_offset);
1506 return tex->tc_compatible_htile && level == 0;
1507 }
1508
1509 static inline unsigned si_get_ps_iter_samples(struct si_context *sctx)
1510 {
1511 if (sctx->ps_uses_fbfetch)
1512 return sctx->framebuffer.nr_color_samples;
1513
1514 return MIN2(sctx->ps_iter_samples, sctx->framebuffer.nr_color_samples);
1515 }
1516
1517 static inline unsigned si_get_total_colormask(struct si_context *sctx)
1518 {
1519 if (sctx->queued.named.rasterizer->rasterizer_discard)
1520 return 0;
1521
1522 struct si_shader_selector *ps = sctx->ps_shader.cso;
1523 if (!ps)
1524 return 0;
1525
1526 unsigned colormask = sctx->framebuffer.colorbuf_enabled_4bit &
1527 sctx->queued.named.blend->cb_target_mask;
1528
1529 if (!ps->info.properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
1530 colormask &= ps->colors_written_4bit;
1531 else if (!ps->colors_written_4bit)
1532 colormask = 0; /* color0 writes all cbufs, but it's not written */
1533
1534 return colormask;
1535 }
1536
1537 #define UTIL_ALL_PRIM_LINE_MODES ((1 << PIPE_PRIM_LINES) | \
1538 (1 << PIPE_PRIM_LINE_LOOP) | \
1539 (1 << PIPE_PRIM_LINE_STRIP) | \
1540 (1 << PIPE_PRIM_LINES_ADJACENCY) | \
1541 (1 << PIPE_PRIM_LINE_STRIP_ADJACENCY))
1542
1543 static inline bool util_prim_is_lines(unsigned prim)
1544 {
1545 return ((1 << prim) & UTIL_ALL_PRIM_LINE_MODES) != 0;
1546 }
1547
1548 static inline bool util_prim_is_points_or_lines(unsigned prim)
1549 {
1550 return ((1 << prim) & (UTIL_ALL_PRIM_LINE_MODES |
1551 (1 << PIPE_PRIM_POINTS))) != 0;
1552 }
1553
1554 /**
1555 * Return true if there is enough memory in VRAM and GTT for the buffers
1556 * added so far.
1557 *
1558 * \param vram VRAM memory size not added to the buffer list yet
1559 * \param gtt GTT memory size not added to the buffer list yet
1560 */
1561 static inline bool
1562 radeon_cs_memory_below_limit(struct si_screen *screen,
1563 struct radeon_cmdbuf *cs,
1564 uint64_t vram, uint64_t gtt)
1565 {
1566 vram += cs->used_vram;
1567 gtt += cs->used_gart;
1568
1569 /* Anything that goes above the VRAM size should go to GTT. */
1570 if (vram > screen->info.vram_size)
1571 gtt += vram - screen->info.vram_size;
1572
1573 /* Now we just need to check if we have enough GTT. */
1574 return gtt < screen->info.gart_size * 0.7;
1575 }
1576
1577 /**
1578 * Add a buffer to the buffer list for the given command stream (CS).
1579 *
1580 * All buffers used by a CS must be added to the list. This tells the kernel
1581 * driver which buffers are used by GPU commands. Other buffers can
1582 * be swapped out (not accessible) during execution.
1583 *
1584 * The buffer list becomes empty after every context flush and must be
1585 * rebuilt.
1586 */
1587 static inline void radeon_add_to_buffer_list(struct si_context *sctx,
1588 struct radeon_cmdbuf *cs,
1589 struct r600_resource *rbo,
1590 enum radeon_bo_usage usage,
1591 enum radeon_bo_priority priority)
1592 {
1593 assert(usage);
1594 sctx->ws->cs_add_buffer(
1595 cs, rbo->buf,
1596 (enum radeon_bo_usage)(usage | RADEON_USAGE_SYNCHRONIZED),
1597 rbo->domains, priority);
1598 }
1599
1600 /**
1601 * Same as above, but also checks memory usage and flushes the context
1602 * accordingly.
1603 *
1604 * When this SHOULD NOT be used:
1605 *
1606 * - if si_context_add_resource_size has been called for the buffer
1607 * followed by *_need_cs_space for checking the memory usage
1608 *
1609 * - if si_need_dma_space has been called for the buffer
1610 *
1611 * - when emitting state packets and draw packets (because preceding packets
1612 * can't be re-emitted at that point)
1613 *
1614 * - if shader resource "enabled_mask" is not up-to-date or there is
1615 * a different constraint disallowing a context flush
1616 */
1617 static inline void
1618 radeon_add_to_gfx_buffer_list_check_mem(struct si_context *sctx,
1619 struct r600_resource *rbo,
1620 enum radeon_bo_usage usage,
1621 enum radeon_bo_priority priority,
1622 bool check_mem)
1623 {
1624 if (check_mem &&
1625 !radeon_cs_memory_below_limit(sctx->screen, sctx->gfx_cs,
1626 sctx->vram + rbo->vram_usage,
1627 sctx->gtt + rbo->gart_usage))
1628 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
1629
1630 radeon_add_to_buffer_list(sctx, sctx->gfx_cs, rbo, usage, priority);
1631 }
1632
1633 #define PRINT_ERR(fmt, args...) \
1634 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)
1635
1636 #endif