radeonsi: clean up r600_surface
[mesa.git] / src / gallium / drivers / radeon / r600_pipe_common.h
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24 /**
25 * This file contains common screen and context structures and functions
26 * for r600g and radeonsi.
27 */
28
29 #ifndef R600_PIPE_COMMON_H
30 #define R600_PIPE_COMMON_H
31
32 #include <stdio.h>
33
34 #include "amd/common/ac_binary.h"
35
36 #include "radeon/radeon_winsys.h"
37
38 #include "util/disk_cache.h"
39 #include "util/u_blitter.h"
40 #include "util/list.h"
41 #include "util/u_range.h"
42 #include "util/slab.h"
43 #include "util/u_suballoc.h"
44 #include "util/u_transfer.h"
45 #include "util/u_threaded_context.h"
46
47 struct u_log_context;
48
49 #define ATI_VENDOR_ID 0x1002
50
51 #define R600_RESOURCE_FLAG_TRANSFER (PIPE_RESOURCE_FLAG_DRV_PRIV << 0)
52 #define R600_RESOURCE_FLAG_FLUSHED_DEPTH (PIPE_RESOURCE_FLAG_DRV_PRIV << 1)
53 #define R600_RESOURCE_FLAG_FORCE_TILING (PIPE_RESOURCE_FLAG_DRV_PRIV << 2)
54 #define R600_RESOURCE_FLAG_DISABLE_DCC (PIPE_RESOURCE_FLAG_DRV_PRIV << 3)
55 #define R600_RESOURCE_FLAG_UNMAPPABLE (PIPE_RESOURCE_FLAG_DRV_PRIV << 4)
56
57 #define R600_CONTEXT_STREAMOUT_FLUSH (1u << 0)
58 /* Pipeline & streamout query controls. */
59 #define R600_CONTEXT_START_PIPELINE_STATS (1u << 1)
60 #define R600_CONTEXT_STOP_PIPELINE_STATS (1u << 2)
61 #define R600_CONTEXT_FLUSH_FOR_RENDER_COND (1u << 3)
62 #define R600_CONTEXT_PRIVATE_FLAG (1u << 4)
63
64 /* special primitive types */
65 #define R600_PRIM_RECTANGLE_LIST PIPE_PRIM_MAX
66
67 #define R600_NOT_QUERY 0xffffffff
68
69 /* Debug flags. */
70 enum {
71 /* Shader logging options: */
72 DBG_VS = PIPE_SHADER_VERTEX,
73 DBG_PS = PIPE_SHADER_FRAGMENT,
74 DBG_GS = PIPE_SHADER_GEOMETRY,
75 DBG_TCS = PIPE_SHADER_TESS_CTRL,
76 DBG_TES = PIPE_SHADER_TESS_EVAL,
77 DBG_CS = PIPE_SHADER_COMPUTE,
78 DBG_NO_IR,
79 DBG_NO_TGSI,
80 DBG_NO_ASM,
81 DBG_PREOPT_IR,
82
83 /* Shader compiler options the shader cache should be aware of: */
84 DBG_FS_CORRECT_DERIVS_AFTER_KILL,
85 DBG_UNSAFE_MATH,
86 DBG_SI_SCHED,
87
88 /* Shader compiler options (with no effect on the shader cache): */
89 DBG_CHECK_IR,
90 DBG_PRECOMPILE,
91 DBG_NIR,
92 DBG_MONOLITHIC_SHADERS,
93 DBG_NO_OPT_VARIANT,
94
95 /* Information logging options: */
96 DBG_INFO,
97 DBG_TEX,
98 DBG_COMPUTE,
99 DBG_VM,
100
101 /* Driver options: */
102 DBG_FORCE_DMA,
103 DBG_NO_ASYNC_DMA,
104 DBG_NO_WC,
105 DBG_CHECK_VM,
106 DBG_RESERVE_VMID,
107
108 /* 3D engine options: */
109 DBG_SWITCH_ON_EOP,
110 DBG_NO_OUT_OF_ORDER,
111 DBG_NO_DPBB,
112 DBG_NO_DFSM,
113 DBG_DPBB,
114 DBG_DFSM,
115 DBG_NO_HYPERZ,
116 DBG_NO_RB_PLUS,
117 DBG_NO_2D_TILING,
118 DBG_NO_TILING,
119 DBG_NO_DCC,
120 DBG_NO_DCC_CLEAR,
121 DBG_NO_DCC_FB,
122
123 /* Tests: */
124 DBG_TEST_DMA,
125 DBG_TEST_VMFAULT_CP,
126 DBG_TEST_VMFAULT_SDMA,
127 DBG_TEST_VMFAULT_SHADER,
128 };
129
130 #define DBG_ALL_SHADERS (((1 << (DBG_CS + 1)) - 1))
131 #define DBG(name) (1ull << DBG_##name)
132
133 #define R600_MAP_BUFFER_ALIGNMENT 64
134
135 #define SI_MAX_VARIABLE_THREADS_PER_BLOCK 1024
136
137 enum r600_coherency {
138 R600_COHERENCY_NONE, /* no cache flushes needed */
139 R600_COHERENCY_SHADER,
140 R600_COHERENCY_CB_META,
141 };
142
143 #ifdef PIPE_ARCH_BIG_ENDIAN
144 #define R600_BIG_ENDIAN 1
145 #else
146 #define R600_BIG_ENDIAN 0
147 #endif
148
149 struct r600_common_context;
150 struct r600_perfcounters;
151 struct tgsi_shader_info;
152 struct r600_qbo_state;
153
154 void si_radeon_shader_binary_init(struct ac_shader_binary *b);
155 void si_radeon_shader_binary_clean(struct ac_shader_binary *b);
156
157 /* Only 32-bit buffer allocations are supported, gallium doesn't support more
158 * at the moment.
159 */
160 struct r600_resource {
161 struct threaded_resource b;
162
163 /* Winsys objects. */
164 struct pb_buffer *buf;
165 uint64_t gpu_address;
166 /* Memory usage if the buffer placement is optimal. */
167 uint64_t vram_usage;
168 uint64_t gart_usage;
169
170 /* Resource properties. */
171 uint64_t bo_size;
172 unsigned bo_alignment;
173 enum radeon_bo_domain domains;
174 enum radeon_bo_flag flags;
175 unsigned bind_history;
176 int max_forced_staging_uploads;
177
178 /* The buffer range which is initialized (with a write transfer,
179 * streamout, DMA, or as a random access target). The rest of
180 * the buffer is considered invalid and can be mapped unsynchronized.
181 *
182 * This allows unsychronized mapping of a buffer range which hasn't
183 * been used yet. It's for applications which forget to use
184 * the unsynchronized map flag and expect the driver to figure it out.
185 */
186 struct util_range valid_buffer_range;
187
188 /* For buffers only. This indicates that a write operation has been
189 * performed by TC L2, but the cache hasn't been flushed.
190 * Any hw block which doesn't use or bypasses TC L2 should check this
191 * flag and flush the cache before using the buffer.
192 *
193 * For example, TC L2 must be flushed if a buffer which has been
194 * modified by a shader store instruction is about to be used as
195 * an index buffer. The reason is that VGT DMA index fetching doesn't
196 * use TC L2.
197 */
198 bool TC_L2_dirty;
199
200 /* Whether the resource has been exported via resource_get_handle. */
201 unsigned external_usage; /* PIPE_HANDLE_USAGE_* */
202
203 /* Whether this resource is referenced by bindless handles. */
204 bool texture_handle_allocated;
205 bool image_handle_allocated;
206 };
207
208 struct r600_transfer {
209 struct threaded_transfer b;
210 struct r600_resource *staging;
211 unsigned offset;
212 };
213
214 struct r600_fmask_info {
215 uint64_t offset;
216 uint64_t size;
217 unsigned alignment;
218 unsigned pitch_in_pixels;
219 unsigned bank_height;
220 unsigned slice_tile_max;
221 unsigned tile_mode_index;
222 unsigned tile_swizzle;
223 };
224
225 struct r600_cmask_info {
226 uint64_t offset;
227 uint64_t size;
228 unsigned alignment;
229 unsigned slice_tile_max;
230 uint64_t base_address_reg;
231 };
232
233 struct r600_texture {
234 struct r600_resource resource;
235
236 uint64_t size;
237 unsigned num_level0_transfers;
238 enum pipe_format db_render_format;
239 bool is_depth;
240 bool db_compatible;
241 bool can_sample_z;
242 bool can_sample_s;
243 unsigned dirty_level_mask; /* each bit says if that mipmap is compressed */
244 unsigned stencil_dirty_level_mask; /* each bit says if that mipmap is compressed */
245 struct r600_texture *flushed_depth_texture;
246 struct radeon_surf surface;
247
248 /* Colorbuffer compression and fast clear. */
249 struct r600_fmask_info fmask;
250 struct r600_cmask_info cmask;
251 struct r600_resource *cmask_buffer;
252 uint64_t dcc_offset; /* 0 = disabled */
253 unsigned cb_color_info; /* fast clear enable bit */
254 unsigned color_clear_value[2];
255 unsigned last_msaa_resolve_target_micro_mode;
256
257 /* Depth buffer compression and fast clear. */
258 uint64_t htile_offset;
259 bool tc_compatible_htile;
260 bool depth_cleared; /* if it was cleared at least once */
261 float depth_clear_value;
262 bool stencil_cleared; /* if it was cleared at least once */
263 uint8_t stencil_clear_value;
264 bool upgraded_depth; /* upgraded from unorm to Z32_FLOAT */
265
266 /* Whether the texture is a displayable back buffer and needs DCC
267 * decompression, which is expensive. Therefore, it's enabled only
268 * if statistics suggest that it will pay off and it's allocated
269 * separately. It can't be bound as a sampler by apps. Limited to
270 * target == 2D and last_level == 0. If enabled, dcc_offset contains
271 * the absolute GPUVM address, not the relative one.
272 */
273 struct r600_resource *dcc_separate_buffer;
274 /* When DCC is temporarily disabled, the separate buffer is here. */
275 struct r600_resource *last_dcc_separate_buffer;
276 /* We need to track DCC dirtiness, because st/dri usually calls
277 * flush_resource twice per frame (not a bug) and we don't wanna
278 * decompress DCC twice. Also, the dirty tracking must be done even
279 * if DCC isn't used, because it's required by the DCC usage analysis
280 * for a possible future enablement.
281 */
282 bool separate_dcc_dirty;
283 /* Statistics gathering for the DCC enablement heuristic. */
284 bool dcc_gather_statistics;
285 /* Estimate of how much this color buffer is written to in units of
286 * full-screen draws: ps_invocations / (width * height)
287 * Shader kills, late Z, and blending with trivial discards make it
288 * inaccurate (we need to count CB updates, not PS invocations).
289 */
290 unsigned ps_draw_ratio;
291 /* The number of clears since the last DCC usage analysis. */
292 unsigned num_slow_clears;
293
294 /* Counter that should be non-zero if the texture is bound to a
295 * framebuffer. Implemented in radeonsi only.
296 */
297 uint32_t framebuffers_bound;
298 };
299
300 struct r600_surface {
301 struct pipe_surface base;
302
303 /* These can vary with block-compressed textures. */
304 unsigned width0;
305 unsigned height0;
306
307 bool color_initialized;
308 bool depth_initialized;
309
310 /* Misc. color flags. */
311 bool color_is_int8;
312 bool color_is_int10;
313 bool dcc_incompatible;
314
315 /* Color registers. */
316 unsigned cb_color_info;
317 unsigned cb_color_view;
318 unsigned cb_color_attrib;
319 unsigned cb_color_attrib2; /* GFX9 and later */
320 unsigned cb_dcc_control; /* VI and later */
321 unsigned spi_shader_col_format; /* no blending, no alpha-to-coverage. */
322 unsigned spi_shader_col_format_alpha; /* alpha-to-coverage */
323 unsigned spi_shader_col_format_blend; /* blending without alpha. */
324 unsigned spi_shader_col_format_blend_alpha; /* blending with alpha. */
325
326 /* DB registers. */
327 uint64_t db_depth_base; /* DB_Z_READ/WRITE_BASE */
328 uint64_t db_stencil_base;
329 uint64_t db_htile_data_base;
330 unsigned db_depth_info;
331 unsigned db_z_info;
332 unsigned db_z_info2; /* GFX9+ */
333 unsigned db_depth_view;
334 unsigned db_depth_size;
335 unsigned db_depth_slice;
336 unsigned db_stencil_info;
337 unsigned db_stencil_info2; /* GFX9+ */
338 unsigned db_htile_surface;
339 };
340
341 struct r600_mmio_counter {
342 unsigned busy;
343 unsigned idle;
344 };
345
346 union r600_mmio_counters {
347 struct {
348 /* For global GPU load including SDMA. */
349 struct r600_mmio_counter gpu;
350
351 /* GRBM_STATUS */
352 struct r600_mmio_counter spi;
353 struct r600_mmio_counter gui;
354 struct r600_mmio_counter ta;
355 struct r600_mmio_counter gds;
356 struct r600_mmio_counter vgt;
357 struct r600_mmio_counter ia;
358 struct r600_mmio_counter sx;
359 struct r600_mmio_counter wd;
360 struct r600_mmio_counter bci;
361 struct r600_mmio_counter sc;
362 struct r600_mmio_counter pa;
363 struct r600_mmio_counter db;
364 struct r600_mmio_counter cp;
365 struct r600_mmio_counter cb;
366
367 /* SRBM_STATUS2 */
368 struct r600_mmio_counter sdma;
369
370 /* CP_STAT */
371 struct r600_mmio_counter pfp;
372 struct r600_mmio_counter meq;
373 struct r600_mmio_counter me;
374 struct r600_mmio_counter surf_sync;
375 struct r600_mmio_counter cp_dma;
376 struct r600_mmio_counter scratch_ram;
377 } named;
378 unsigned array[0];
379 };
380
381 struct r600_memory_object {
382 struct pipe_memory_object b;
383 struct pb_buffer *buf;
384 uint32_t stride;
385 uint32_t offset;
386 };
387
388 struct r600_common_screen {
389 struct pipe_screen b;
390 struct radeon_winsys *ws;
391 enum radeon_family family;
392 enum chip_class chip_class;
393 struct radeon_info info;
394 uint64_t debug_flags;
395 bool has_cp_dma;
396 bool has_streamout;
397 bool has_rbplus; /* if RB+ registers exist */
398 bool rbplus_allowed; /* if RB+ is allowed */
399
400 struct disk_cache *disk_shader_cache;
401
402 struct slab_parent_pool pool_transfers;
403
404 /* Texture filter settings. */
405 int force_aniso; /* -1 = disabled */
406
407 /* Auxiliary context. Mainly used to initialize resources.
408 * It must be locked prior to using and flushed before unlocking. */
409 struct pipe_context *aux_context;
410 mtx_t aux_context_lock;
411
412 /* This must be in the screen, because UE4 uses one context for
413 * compilation and another one for rendering.
414 */
415 unsigned num_compilations;
416 /* Along with ST_DEBUG=precompile, this should show if applications
417 * are loading shaders on demand. This is a monotonic counter.
418 */
419 unsigned num_shaders_created;
420 unsigned num_shader_cache_hits;
421
422 /* GPU load thread. */
423 mtx_t gpu_load_mutex;
424 thrd_t gpu_load_thread;
425 union r600_mmio_counters mmio_counters;
426 volatile unsigned gpu_load_stop_thread; /* bool */
427
428 char renderer_string[100];
429
430 /* Performance counters. */
431 struct r600_perfcounters *perfcounters;
432
433 /* If pipe_screen wants to recompute and re-emit the framebuffer,
434 * sampler, and image states of all contexts, it should atomically
435 * increment this.
436 *
437 * Each context will compare this with its own last known value of
438 * the counter before drawing and re-emit the states accordingly.
439 */
440 unsigned dirty_tex_counter;
441
442 /* Atomically increment this counter when an existing texture's
443 * metadata is enabled or disabled in a way that requires changing
444 * contexts' compressed texture binding masks.
445 */
446 unsigned compressed_colortex_counter;
447
448 struct {
449 /* Context flags to set so that all writes from earlier jobs
450 * in the CP are seen by L2 clients.
451 */
452 unsigned cp_to_L2;
453
454 /* Context flags to set so that all writes from earlier jobs
455 * that end in L2 are seen by CP.
456 */
457 unsigned L2_to_cp;
458
459 /* Context flags to set so that all writes from earlier
460 * compute jobs are seen by L2 clients.
461 */
462 unsigned compute_to_L2;
463 } barrier_flags;
464
465 void (*query_opaque_metadata)(struct r600_common_screen *rscreen,
466 struct r600_texture *rtex,
467 struct radeon_bo_metadata *md);
468
469 void (*apply_opaque_metadata)(struct r600_common_screen *rscreen,
470 struct r600_texture *rtex,
471 struct radeon_bo_metadata *md);
472 };
473
474 /* This encapsulates a state or an operation which can emitted into the GPU
475 * command stream. */
476 struct r600_atom {
477 void (*emit)(struct r600_common_context *ctx, struct r600_atom *state);
478 unsigned short id;
479 };
480
481 struct r600_ring {
482 struct radeon_winsys_cs *cs;
483 void (*flush)(void *ctx, unsigned flags,
484 struct pipe_fence_handle **fence);
485 };
486
487 /* Saved CS data for debugging features. */
488 struct radeon_saved_cs {
489 uint32_t *ib;
490 unsigned num_dw;
491
492 struct radeon_bo_list_item *bo_list;
493 unsigned bo_count;
494 };
495
496 struct r600_common_context {
497 struct pipe_context b; /* base class */
498
499 struct r600_common_screen *screen;
500 struct radeon_winsys *ws;
501 struct radeon_winsys_ctx *ctx;
502 enum radeon_family family;
503 enum chip_class chip_class;
504 struct r600_ring gfx;
505 struct r600_ring dma;
506 struct pipe_fence_handle *last_gfx_fence;
507 struct pipe_fence_handle *last_sdma_fence;
508 struct r600_resource *eop_bug_scratch;
509 unsigned num_gfx_cs_flushes;
510 unsigned initial_gfx_cs_size;
511 unsigned gpu_reset_counter;
512 unsigned last_dirty_tex_counter;
513 unsigned last_compressed_colortex_counter;
514 unsigned last_num_draw_calls;
515
516 struct threaded_context *tc;
517 struct u_suballocator *allocator_zeroed_memory;
518 struct slab_child_pool pool_transfers;
519 struct slab_child_pool pool_transfers_unsync; /* for threaded_context */
520
521 /* Current unaccounted memory usage. */
522 uint64_t vram;
523 uint64_t gtt;
524
525 /* Additional context states. */
526 unsigned flags; /* flush flags */
527
528 /* Queries. */
529 /* Maintain the list of active queries for pausing between IBs. */
530 int num_occlusion_queries;
531 int num_perfect_occlusion_queries;
532 struct list_head active_queries;
533 unsigned num_cs_dw_queries_suspend;
534 /* Misc stats. */
535 unsigned num_draw_calls;
536 unsigned num_decompress_calls;
537 unsigned num_mrt_draw_calls;
538 unsigned num_prim_restart_calls;
539 unsigned num_spill_draw_calls;
540 unsigned num_compute_calls;
541 unsigned num_spill_compute_calls;
542 unsigned num_dma_calls;
543 unsigned num_cp_dma_calls;
544 unsigned num_vs_flushes;
545 unsigned num_ps_flushes;
546 unsigned num_cs_flushes;
547 unsigned num_cb_cache_flushes;
548 unsigned num_db_cache_flushes;
549 unsigned num_L2_invalidates;
550 unsigned num_L2_writebacks;
551 unsigned num_resident_handles;
552 uint64_t num_alloc_tex_transfer_bytes;
553 unsigned last_tex_ps_draw_ratio; /* for query */
554
555 /* Render condition. */
556 struct r600_atom render_cond_atom;
557 struct pipe_query *render_cond;
558 unsigned render_cond_mode;
559 bool render_cond_invert;
560 bool render_cond_force_off; /* for u_blitter */
561
562 /* Statistics gathering for the DCC enablement heuristic. It can't be
563 * in r600_texture because r600_texture can be shared by multiple
564 * contexts. This is for back buffers only. We shouldn't get too many
565 * of those.
566 *
567 * X11 DRI3 rotates among a finite set of back buffers. They should
568 * all fit in this array. If they don't, separate DCC might never be
569 * enabled by DCC stat gathering.
570 */
571 struct {
572 struct r600_texture *tex;
573 /* Query queue: 0 = usually active, 1 = waiting, 2 = readback. */
574 struct pipe_query *ps_stats[3];
575 /* If all slots are used and another slot is needed,
576 * the least recently used slot is evicted based on this. */
577 int64_t last_use_timestamp;
578 bool query_active;
579 } dcc_stats[5];
580
581 struct pipe_device_reset_callback device_reset_callback;
582 struct u_log_context *log;
583
584 void *query_result_shader;
585
586 /* Copy one resource to another using async DMA. */
587 void (*dma_copy)(struct pipe_context *ctx,
588 struct pipe_resource *dst,
589 unsigned dst_level,
590 unsigned dst_x, unsigned dst_y, unsigned dst_z,
591 struct pipe_resource *src,
592 unsigned src_level,
593 const struct pipe_box *src_box);
594
595 void (*dma_clear_buffer)(struct pipe_context *ctx, struct pipe_resource *dst,
596 uint64_t offset, uint64_t size, unsigned value);
597
598 void (*clear_buffer)(struct pipe_context *ctx, struct pipe_resource *dst,
599 uint64_t offset, uint64_t size, unsigned value,
600 enum r600_coherency coher);
601
602 void (*blit_decompress_depth)(struct pipe_context *ctx,
603 struct r600_texture *texture,
604 struct r600_texture *staging,
605 unsigned first_level, unsigned last_level,
606 unsigned first_layer, unsigned last_layer,
607 unsigned first_sample, unsigned last_sample);
608
609 void (*decompress_dcc)(struct pipe_context *ctx,
610 struct r600_texture *rtex);
611
612 /* Reallocate the buffer and update all resource bindings where
613 * the buffer is bound, including all resource descriptors. */
614 void (*invalidate_buffer)(struct pipe_context *ctx, struct pipe_resource *buf);
615
616 /* Update all resource bindings where the buffer is bound, including
617 * all resource descriptors. This is invalidate_buffer without
618 * the invalidation. */
619 void (*rebind_buffer)(struct pipe_context *ctx, struct pipe_resource *buf,
620 uint64_t old_gpu_address);
621
622 /* Enable or disable occlusion queries. */
623 void (*set_occlusion_query_state)(struct pipe_context *ctx,
624 bool old_enable,
625 bool old_perfect_enable);
626
627 void (*save_qbo_state)(struct pipe_context *ctx, struct r600_qbo_state *st);
628
629 /* This ensures there is enough space in the command stream. */
630 void (*need_gfx_cs_space)(struct pipe_context *ctx, unsigned num_dw,
631 bool include_draw_vbo);
632
633 void (*set_atom_dirty)(struct r600_common_context *ctx,
634 struct r600_atom *atom, bool dirty);
635
636 void (*check_vm_faults)(struct r600_common_context *ctx,
637 struct radeon_saved_cs *saved,
638 enum ring_type ring);
639 };
640
641 /* r600_buffer_common.c */
642 bool si_rings_is_buffer_referenced(struct r600_common_context *ctx,
643 struct pb_buffer *buf,
644 enum radeon_bo_usage usage);
645 void *si_buffer_map_sync_with_rings(struct r600_common_context *ctx,
646 struct r600_resource *resource,
647 unsigned usage);
648 void si_buffer_subdata(struct pipe_context *ctx,
649 struct pipe_resource *buffer,
650 unsigned usage, unsigned offset,
651 unsigned size, const void *data);
652 void si_init_resource_fields(struct r600_common_screen *rscreen,
653 struct r600_resource *res,
654 uint64_t size, unsigned alignment);
655 bool si_alloc_resource(struct r600_common_screen *rscreen,
656 struct r600_resource *res);
657 struct pipe_resource *si_buffer_create(struct pipe_screen *screen,
658 const struct pipe_resource *templ,
659 unsigned alignment);
660 struct pipe_resource *si_aligned_buffer_create(struct pipe_screen *screen,
661 unsigned flags,
662 unsigned usage,
663 unsigned size,
664 unsigned alignment);
665 struct pipe_resource *
666 si_buffer_from_user_memory(struct pipe_screen *screen,
667 const struct pipe_resource *templ,
668 void *user_memory);
669 void si_invalidate_resource(struct pipe_context *ctx,
670 struct pipe_resource *resource);
671 void si_replace_buffer_storage(struct pipe_context *ctx,
672 struct pipe_resource *dst,
673 struct pipe_resource *src);
674
675 /* r600_common_pipe.c */
676 void si_gfx_write_event_eop(struct r600_common_context *ctx,
677 unsigned event, unsigned event_flags,
678 unsigned data_sel,
679 struct r600_resource *buf, uint64_t va,
680 uint32_t new_fence, unsigned query_type);
681 unsigned si_gfx_write_fence_dwords(struct r600_common_screen *screen);
682 void si_gfx_wait_fence(struct r600_common_context *ctx,
683 uint64_t va, uint32_t ref, uint32_t mask);
684 bool si_common_screen_init(struct r600_common_screen *rscreen,
685 struct radeon_winsys *ws);
686 void si_destroy_common_screen(struct r600_common_screen *rscreen);
687 void si_preflush_suspend_features(struct r600_common_context *ctx);
688 void si_postflush_resume_features(struct r600_common_context *ctx);
689 bool si_common_context_init(struct r600_common_context *rctx,
690 struct r600_common_screen *rscreen,
691 unsigned context_flags);
692 void si_common_context_cleanup(struct r600_common_context *rctx);
693 bool si_can_dump_shader(struct r600_common_screen *rscreen,
694 unsigned processor);
695 bool si_extra_shader_checks(struct r600_common_screen *rscreen,
696 unsigned processor);
697 void si_screen_clear_buffer(struct r600_common_screen *rscreen, struct pipe_resource *dst,
698 uint64_t offset, uint64_t size, unsigned value);
699 struct pipe_resource *si_resource_create_common(struct pipe_screen *screen,
700 const struct pipe_resource *templ);
701 void si_need_dma_space(struct r600_common_context *ctx, unsigned num_dw,
702 struct r600_resource *dst, struct r600_resource *src);
703 void si_save_cs(struct radeon_winsys *ws, struct radeon_winsys_cs *cs,
704 struct radeon_saved_cs *saved, bool get_buffer_list);
705 void si_clear_saved_cs(struct radeon_saved_cs *saved);
706 bool si_check_device_reset(struct r600_common_context *rctx);
707
708 /* r600_gpu_load.c */
709 void si_gpu_load_kill_thread(struct r600_common_screen *rscreen);
710 uint64_t si_begin_counter(struct r600_common_screen *rscreen, unsigned type);
711 unsigned si_end_counter(struct r600_common_screen *rscreen, unsigned type,
712 uint64_t begin);
713
714 /* r600_perfcounters.c */
715 void si_perfcounters_destroy(struct r600_common_screen *rscreen);
716
717 /* r600_query.c */
718 void si_init_screen_query_functions(struct r600_common_screen *rscreen);
719 void si_init_query_functions(struct r600_common_context *rctx);
720 void si_suspend_queries(struct r600_common_context *ctx);
721 void si_resume_queries(struct r600_common_context *ctx);
722
723 /* r600_test_dma.c */
724 void si_test_dma(struct r600_common_screen *rscreen);
725
726 /* r600_texture.c */
727 bool si_prepare_for_dma_blit(struct r600_common_context *rctx,
728 struct r600_texture *rdst,
729 unsigned dst_level, unsigned dstx,
730 unsigned dsty, unsigned dstz,
731 struct r600_texture *rsrc,
732 unsigned src_level,
733 const struct pipe_box *src_box);
734 void si_texture_get_fmask_info(struct r600_common_screen *rscreen,
735 struct r600_texture *rtex,
736 unsigned nr_samples,
737 struct r600_fmask_info *out);
738 bool si_init_flushed_depth_texture(struct pipe_context *ctx,
739 struct pipe_resource *texture,
740 struct r600_texture **staging);
741 void si_print_texture_info(struct r600_common_screen *rscreen,
742 struct r600_texture *rtex, struct u_log_context *log);
743 struct pipe_resource *si_texture_create(struct pipe_screen *screen,
744 const struct pipe_resource *templ);
745 bool vi_dcc_formats_compatible(enum pipe_format format1,
746 enum pipe_format format2);
747 bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex,
748 unsigned level,
749 enum pipe_format view_format);
750 void vi_disable_dcc_if_incompatible_format(struct r600_common_context *rctx,
751 struct pipe_resource *tex,
752 unsigned level,
753 enum pipe_format view_format);
754 struct pipe_surface *si_create_surface_custom(struct pipe_context *pipe,
755 struct pipe_resource *texture,
756 const struct pipe_surface *templ,
757 unsigned width0, unsigned height0,
758 unsigned width, unsigned height);
759 unsigned si_translate_colorswap(enum pipe_format format, bool do_endian_swap);
760 void vi_separate_dcc_start_query(struct pipe_context *ctx,
761 struct r600_texture *tex);
762 void vi_separate_dcc_stop_query(struct pipe_context *ctx,
763 struct r600_texture *tex);
764 void vi_separate_dcc_process_and_reset_stats(struct pipe_context *ctx,
765 struct r600_texture *tex);
766 void vi_dcc_clear_level(struct r600_common_context *rctx,
767 struct r600_texture *rtex,
768 unsigned level, unsigned clear_value);
769 void si_do_fast_color_clear(struct r600_common_context *rctx,
770 struct pipe_framebuffer_state *fb,
771 struct r600_atom *fb_state,
772 unsigned *buffers, ubyte *dirty_cbufs,
773 const union pipe_color_union *color);
774 bool si_texture_disable_dcc(struct r600_common_context *rctx,
775 struct r600_texture *rtex);
776 void si_init_screen_texture_functions(struct r600_common_screen *rscreen);
777 void si_init_context_texture_functions(struct r600_common_context *rctx);
778
779
780 /* Inline helpers. */
781
782 static inline struct r600_resource *r600_resource(struct pipe_resource *r)
783 {
784 return (struct r600_resource*)r;
785 }
786
787 static inline void
788 r600_resource_reference(struct r600_resource **ptr, struct r600_resource *res)
789 {
790 pipe_resource_reference((struct pipe_resource **)ptr,
791 (struct pipe_resource *)res);
792 }
793
794 static inline void
795 r600_texture_reference(struct r600_texture **ptr, struct r600_texture *res)
796 {
797 pipe_resource_reference((struct pipe_resource **)ptr, &res->resource.b.b);
798 }
799
800 static inline void
801 r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r)
802 {
803 struct r600_common_context *rctx = (struct r600_common_context *)ctx;
804 struct r600_resource *res = (struct r600_resource *)r;
805
806 if (res) {
807 /* Add memory usage for need_gfx_cs_space */
808 rctx->vram += res->vram_usage;
809 rctx->gtt += res->gart_usage;
810 }
811 }
812
813 #define SQ_TEX_XY_FILTER_POINT 0x00
814 #define SQ_TEX_XY_FILTER_BILINEAR 0x01
815 #define SQ_TEX_XY_FILTER_ANISO_POINT 0x02
816 #define SQ_TEX_XY_FILTER_ANISO_BILINEAR 0x03
817
818 static inline unsigned eg_tex_filter(unsigned filter, unsigned max_aniso)
819 {
820 if (filter == PIPE_TEX_FILTER_LINEAR)
821 return max_aniso > 1 ? SQ_TEX_XY_FILTER_ANISO_BILINEAR
822 : SQ_TEX_XY_FILTER_BILINEAR;
823 else
824 return max_aniso > 1 ? SQ_TEX_XY_FILTER_ANISO_POINT
825 : SQ_TEX_XY_FILTER_POINT;
826 }
827
828 static inline unsigned r600_tex_aniso_filter(unsigned filter)
829 {
830 if (filter < 2)
831 return 0;
832 if (filter < 4)
833 return 1;
834 if (filter < 8)
835 return 2;
836 if (filter < 16)
837 return 3;
838 return 4;
839 }
840
841 static inline enum radeon_bo_priority
842 r600_get_sampler_view_priority(struct r600_resource *res)
843 {
844 if (res->b.b.target == PIPE_BUFFER)
845 return RADEON_PRIO_SAMPLER_BUFFER;
846
847 if (res->b.b.nr_samples > 1)
848 return RADEON_PRIO_SAMPLER_TEXTURE_MSAA;
849
850 return RADEON_PRIO_SAMPLER_TEXTURE;
851 }
852
853 static inline bool
854 r600_can_sample_zs(struct r600_texture *tex, bool stencil_sampler)
855 {
856 return (stencil_sampler && tex->can_sample_s) ||
857 (!stencil_sampler && tex->can_sample_z);
858 }
859
860 static inline bool
861 vi_dcc_enabled(struct r600_texture *tex, unsigned level)
862 {
863 return tex->dcc_offset && level < tex->surface.num_dcc_levels;
864 }
865
866 static inline bool
867 r600_htile_enabled(struct r600_texture *tex, unsigned level)
868 {
869 return tex->htile_offset && level == 0;
870 }
871
872 static inline bool
873 vi_tc_compat_htile_enabled(struct r600_texture *tex, unsigned level)
874 {
875 assert(!tex->tc_compatible_htile || tex->htile_offset);
876 return tex->tc_compatible_htile && level == 0;
877 }
878
879 #define COMPUTE_DBG(rscreen, fmt, args...) \
880 do { \
881 if ((rscreen->b.debug_flags & DBG(COMPUTE))) fprintf(stderr, fmt, ##args); \
882 } while (0);
883
884 #define R600_ERR(fmt, args...) \
885 fprintf(stderr, "EE %s:%d %s - " fmt, __FILE__, __LINE__, __func__, ##args)
886
887 static inline int S_FIXED(float value, unsigned frac_bits)
888 {
889 return value * (1 << frac_bits);
890 }
891
892 #endif