tu: Integrate WFI/WAIT_FOR_ME/WAIT_MEM_WRITES with cache tracking
[mesa.git] / src / freedreno / vulkan / tu_private.h
1 /*
2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
4 *
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 */
27
28 #ifndef TU_PRIVATE_H
29 #define TU_PRIVATE_H
30
31 #include <assert.h>
32 #include <pthread.h>
33 #include <stdbool.h>
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #ifdef HAVE_VALGRIND
39 #include <memcheck.h>
40 #include <valgrind.h>
41 #define VG(x) x
42 #else
43 #define VG(x) ((void)0)
44 #endif
45
46 #include "c11/threads.h"
47 #include "main/macros.h"
48 #include "util/list.h"
49 #include "util/macros.h"
50 #include "util/u_atomic.h"
51 #include "vk_alloc.h"
52 #include "vk_object.h"
53 #include "vk_debug_report.h"
54 #include "wsi_common.h"
55
56 #include "drm-uapi/msm_drm.h"
57 #include "ir3/ir3_compiler.h"
58 #include "ir3/ir3_shader.h"
59
60 #include "adreno_common.xml.h"
61 #include "adreno_pm4.xml.h"
62 #include "a6xx.xml.h"
63 #include "fdl/freedreno_layout.h"
64
65 #include "tu_descriptor_set.h"
66 #include "tu_extensions.h"
67 #include "tu_util.h"
68
69 /* Pre-declarations needed for WSI entrypoints */
70 struct wl_surface;
71 struct wl_display;
72 typedef struct xcb_connection_t xcb_connection_t;
73 typedef uint32_t xcb_visualid_t;
74 typedef uint32_t xcb_window_t;
75
76 #include <vulkan/vk_android_native_buffer.h>
77 #include <vulkan/vk_icd.h>
78 #include <vulkan/vulkan.h>
79 #include <vulkan/vulkan_intel.h>
80
81 #include "tu_entrypoints.h"
82
83 #include "vk_format.h"
84
85 #define MAX_VBS 32
86 #define MAX_VERTEX_ATTRIBS 32
87 #define MAX_RTS 8
88 #define MAX_VSC_PIPES 32
89 #define MAX_VIEWPORTS 1
90 #define MAX_SCISSORS 16
91 #define MAX_DISCARD_RECTANGLES 4
92 #define MAX_PUSH_CONSTANTS_SIZE 128
93 #define MAX_PUSH_DESCRIPTORS 32
94 #define MAX_DYNAMIC_UNIFORM_BUFFERS 16
95 #define MAX_DYNAMIC_STORAGE_BUFFERS 8
96 #define MAX_DYNAMIC_BUFFERS \
97 (MAX_DYNAMIC_UNIFORM_BUFFERS + MAX_DYNAMIC_STORAGE_BUFFERS)
98 #define TU_MAX_DRM_DEVICES 8
99 #define MAX_VIEWS 8
100 #define MAX_BIND_POINTS 2 /* compute + graphics */
101 /* The Qualcomm driver exposes 0x20000058 */
102 #define MAX_STORAGE_BUFFER_RANGE 0x20000000
103 /* We use ldc for uniform buffer loads, just like the Qualcomm driver, so
104 * expose the same maximum range.
105 * TODO: The SIZE bitfield is 15 bits, and in 4-dword units, so the actual
106 * range might be higher.
107 */
108 #define MAX_UNIFORM_BUFFER_RANGE 0x10000
109
110 #define A6XX_TEX_CONST_DWORDS 16
111 #define A6XX_TEX_SAMP_DWORDS 4
112
113 #define tu_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
114
115 static inline uint32_t
116 tu_minify(uint32_t n, uint32_t levels)
117 {
118 if (unlikely(n == 0))
119 return 0;
120 else
121 return MAX2(n >> levels, 1);
122 }
123
124 #define for_each_bit(b, dword) \
125 for (uint32_t __dword = (dword); \
126 (b) = __builtin_ffs(__dword) - 1, __dword; __dword &= ~(1 << (b)))
127
128 #define typed_memcpy(dest, src, count) \
129 ({ \
130 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
131 memcpy((dest), (src), (count) * sizeof(*(src))); \
132 })
133
134 #define COND(bool, val) ((bool) ? (val) : 0)
135 #define BIT(bit) (1u << (bit))
136
137 /* Whenever we generate an error, pass it through this function. Useful for
138 * debugging, where we can break on it. Only call at error site, not when
139 * propagating errors. Might be useful to plug in a stack trace here.
140 */
141
142 struct tu_instance;
143
144 VkResult
145 __vk_errorf(struct tu_instance *instance,
146 VkResult error,
147 const char *file,
148 int line,
149 const char *format,
150 ...);
151
152 #define vk_error(instance, error) \
153 __vk_errorf(instance, error, __FILE__, __LINE__, NULL);
154 #define vk_errorf(instance, error, format, ...) \
155 __vk_errorf(instance, error, __FILE__, __LINE__, format, ##__VA_ARGS__);
156
157 void
158 __tu_finishme(const char *file, int line, const char *format, ...)
159 tu_printflike(3, 4);
160 void
161 tu_loge(const char *format, ...) tu_printflike(1, 2);
162 void
163 tu_logi(const char *format, ...) tu_printflike(1, 2);
164
165 /**
166 * Print a FINISHME message, including its source location.
167 */
168 #define tu_finishme(format, ...) \
169 do { \
170 static bool reported = false; \
171 if (!reported) { \
172 __tu_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
173 reported = true; \
174 } \
175 } while (0)
176
177 #define tu_stub() \
178 do { \
179 tu_finishme("stub %s", __func__); \
180 } while (0)
181
182 void *
183 tu_lookup_entrypoint_unchecked(const char *name);
184 void *
185 tu_lookup_entrypoint_checked(
186 const char *name,
187 uint32_t core_version,
188 const struct tu_instance_extension_table *instance,
189 const struct tu_device_extension_table *device);
190
191 struct tu_physical_device
192 {
193 struct vk_object_base base;
194
195 struct tu_instance *instance;
196
197 char path[20];
198 char name[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE];
199 uint8_t driver_uuid[VK_UUID_SIZE];
200 uint8_t device_uuid[VK_UUID_SIZE];
201 uint8_t cache_uuid[VK_UUID_SIZE];
202
203 struct wsi_device wsi_device;
204
205 int local_fd;
206 int master_fd;
207
208 unsigned gpu_id;
209 uint32_t gmem_size;
210 uint64_t gmem_base;
211 uint32_t ccu_offset_gmem;
212 uint32_t ccu_offset_bypass;
213 /* alignment for size of tiles */
214 uint32_t tile_align_w;
215 #define TILE_ALIGN_H 16
216 /* gmem store/load granularity */
217 #define GMEM_ALIGN_W 16
218 #define GMEM_ALIGN_H 4
219
220 struct {
221 uint32_t PC_UNKNOWN_9805;
222 uint32_t SP_UNKNOWN_A0F8;
223 } magic;
224
225 int msm_major_version;
226 int msm_minor_version;
227
228 /* This is the drivers on-disk cache used as a fallback as opposed to
229 * the pipeline cache defined by apps.
230 */
231 struct disk_cache *disk_cache;
232
233 struct tu_device_extension_table supported_extensions;
234 };
235
236 enum tu_debug_flags
237 {
238 TU_DEBUG_STARTUP = 1 << 0,
239 TU_DEBUG_NIR = 1 << 1,
240 TU_DEBUG_IR3 = 1 << 2,
241 TU_DEBUG_NOBIN = 1 << 3,
242 TU_DEBUG_SYSMEM = 1 << 4,
243 TU_DEBUG_FORCEBIN = 1 << 5,
244 TU_DEBUG_NOUBWC = 1 << 6,
245 };
246
247 struct tu_instance
248 {
249 struct vk_object_base base;
250
251 VkAllocationCallbacks alloc;
252
253 uint32_t api_version;
254 int physical_device_count;
255 struct tu_physical_device physical_devices[TU_MAX_DRM_DEVICES];
256
257 enum tu_debug_flags debug_flags;
258
259 struct vk_debug_report_instance debug_report_callbacks;
260
261 struct tu_instance_extension_table enabled_extensions;
262 };
263
264 VkResult
265 tu_wsi_init(struct tu_physical_device *physical_device);
266 void
267 tu_wsi_finish(struct tu_physical_device *physical_device);
268
269 bool
270 tu_instance_extension_supported(const char *name);
271 uint32_t
272 tu_physical_device_api_version(struct tu_physical_device *dev);
273 bool
274 tu_physical_device_extension_supported(struct tu_physical_device *dev,
275 const char *name);
276
277 struct cache_entry;
278
279 struct tu_pipeline_cache
280 {
281 struct vk_object_base base;
282
283 struct tu_device *device;
284 pthread_mutex_t mutex;
285
286 uint32_t total_size;
287 uint32_t table_size;
288 uint32_t kernel_count;
289 struct cache_entry **hash_table;
290 bool modified;
291
292 VkAllocationCallbacks alloc;
293 };
294
295 struct tu_pipeline_key
296 {
297 };
298
299
300 /* queue types */
301 #define TU_QUEUE_GENERAL 0
302
303 #define TU_MAX_QUEUE_FAMILIES 1
304
305 struct tu_fence
306 {
307 struct vk_object_base base;
308 struct wsi_fence *fence_wsi;
309 bool signaled;
310 int fd;
311 };
312
313 void
314 tu_fence_init(struct tu_fence *fence, bool signaled);
315 void
316 tu_fence_finish(struct tu_fence *fence);
317 void
318 tu_fence_update_fd(struct tu_fence *fence, int fd);
319 void
320 tu_fence_copy(struct tu_fence *fence, const struct tu_fence *src);
321 void
322 tu_fence_signal(struct tu_fence *fence);
323 void
324 tu_fence_wait_idle(struct tu_fence *fence);
325
326 struct tu_queue
327 {
328 struct vk_object_base base;
329
330 struct tu_device *device;
331 uint32_t queue_family_index;
332 int queue_idx;
333 VkDeviceQueueCreateFlags flags;
334
335 uint32_t msm_queue_id;
336 struct tu_fence submit_fence;
337 };
338
339 struct tu_bo
340 {
341 uint32_t gem_handle;
342 uint64_t size;
343 uint64_t iova;
344 void *map;
345 };
346
347 enum global_shader {
348 GLOBAL_SH_VS,
349 GLOBAL_SH_FS_BLIT,
350 GLOBAL_SH_FS_CLEAR0,
351 GLOBAL_SH_FS_CLEAR_MAX = GLOBAL_SH_FS_CLEAR0 + MAX_RTS,
352 GLOBAL_SH_COUNT,
353 };
354
355 /* This struct defines the layout of the global_bo */
356 struct tu6_global
357 {
358 /* 6 bcolor_entry entries, one for each VK_BORDER_COLOR */
359 uint8_t border_color[128 * 6];
360
361 /* clear/blit shaders, all <= 16 instrs (16 instr = 1 instrlen unit) */
362 instr_t shaders[GLOBAL_SH_COUNT][16];
363
364 uint32_t seqno_dummy; /* dummy seqno for CP_EVENT_WRITE */
365 uint32_t _pad0;
366 volatile uint32_t vsc_draw_overflow;
367 uint32_t _pad1;
368 volatile uint32_t vsc_prim_overflow;
369 uint32_t _pad2[3];
370
371 /* scratch space for VPC_SO[i].FLUSH_BASE_LO/HI, start on 32 byte boundary. */
372 struct {
373 uint32_t offset;
374 uint32_t pad[7];
375 } flush_base[4];
376 };
377 #define gb_offset(member) offsetof(struct tu6_global, member)
378 #define global_iova(cmd, member) ((cmd)->device->global_bo.iova + gb_offset(member))
379
380 void tu_init_clear_blit_shaders(struct tu6_global *global);
381
382 /* extra space in vsc draw/prim streams */
383 #define VSC_PAD 0x40
384
385 struct tu_device
386 {
387 struct vk_device vk;
388 struct tu_instance *instance;
389
390 struct tu_queue *queues[TU_MAX_QUEUE_FAMILIES];
391 int queue_count[TU_MAX_QUEUE_FAMILIES];
392
393 struct tu_physical_device *physical_device;
394 int _lost;
395
396 struct ir3_compiler *compiler;
397
398 /* Backup in-memory cache to be used if the app doesn't provide one */
399 struct tu_pipeline_cache *mem_cache;
400
401 #define MIN_SCRATCH_BO_SIZE_LOG2 12 /* A page */
402
403 /* Currently the kernel driver uses a 32-bit GPU address space, but it
404 * should be impossible to go beyond 48 bits.
405 */
406 struct {
407 struct tu_bo bo;
408 mtx_t construct_mtx;
409 bool initialized;
410 } scratch_bos[48 - MIN_SCRATCH_BO_SIZE_LOG2];
411
412 struct tu_bo global_bo;
413
414 struct tu_device_extension_table enabled_extensions;
415
416 uint32_t vsc_draw_strm_pitch;
417 uint32_t vsc_prim_strm_pitch;
418 mtx_t vsc_pitch_mtx;
419 };
420
421 VkResult _tu_device_set_lost(struct tu_device *device,
422 const char *file, int line,
423 const char *msg, ...) PRINTFLIKE(4, 5);
424 #define tu_device_set_lost(dev, ...) \
425 _tu_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
426
427 static inline bool
428 tu_device_is_lost(struct tu_device *device)
429 {
430 return unlikely(p_atomic_read(&device->_lost));
431 }
432
433 VkResult
434 tu_bo_init_new(struct tu_device *dev, struct tu_bo *bo, uint64_t size);
435 VkResult
436 tu_bo_init_dmabuf(struct tu_device *dev,
437 struct tu_bo *bo,
438 uint64_t size,
439 int fd);
440 int
441 tu_bo_export_dmabuf(struct tu_device *dev, struct tu_bo *bo);
442 void
443 tu_bo_finish(struct tu_device *dev, struct tu_bo *bo);
444 VkResult
445 tu_bo_map(struct tu_device *dev, struct tu_bo *bo);
446
447 /* Get a scratch bo for use inside a command buffer. This will always return
448 * the same bo given the same size or similar sizes, so only one scratch bo
449 * can be used at the same time. It's meant for short-lived things where we
450 * need to write to some piece of memory, read from it, and then immediately
451 * discard it.
452 */
453 VkResult
454 tu_get_scratch_bo(struct tu_device *dev, uint64_t size, struct tu_bo **bo);
455
456 struct tu_cs_entry
457 {
458 /* No ownership */
459 const struct tu_bo *bo;
460
461 uint32_t size;
462 uint32_t offset;
463 };
464
465 struct tu_cs_memory {
466 uint32_t *map;
467 uint64_t iova;
468 };
469
470 struct tu_draw_state {
471 uint64_t iova : 48;
472 uint32_t size : 16;
473 };
474
475 enum tu_dynamic_state
476 {
477 /* re-use VK_DYNAMIC_STATE_ enums for non-extended dynamic states */
478 TU_DYNAMIC_STATE_SAMPLE_LOCATIONS = VK_DYNAMIC_STATE_STENCIL_REFERENCE + 1,
479 TU_DYNAMIC_STATE_COUNT,
480 };
481
482 enum tu_draw_state_group_id
483 {
484 TU_DRAW_STATE_PROGRAM,
485 TU_DRAW_STATE_PROGRAM_BINNING,
486 TU_DRAW_STATE_TESS,
487 TU_DRAW_STATE_VB,
488 TU_DRAW_STATE_VI,
489 TU_DRAW_STATE_VI_BINNING,
490 TU_DRAW_STATE_RAST,
491 TU_DRAW_STATE_DS,
492 TU_DRAW_STATE_BLEND,
493 TU_DRAW_STATE_VS_CONST,
494 TU_DRAW_STATE_HS_CONST,
495 TU_DRAW_STATE_DS_CONST,
496 TU_DRAW_STATE_GS_CONST,
497 TU_DRAW_STATE_FS_CONST,
498 TU_DRAW_STATE_DESC_SETS,
499 TU_DRAW_STATE_DESC_SETS_LOAD,
500 TU_DRAW_STATE_VS_PARAMS,
501 TU_DRAW_STATE_INPUT_ATTACHMENTS_GMEM,
502 TU_DRAW_STATE_INPUT_ATTACHMENTS_SYSMEM,
503
504 /* dynamic state related draw states */
505 TU_DRAW_STATE_DYNAMIC,
506 TU_DRAW_STATE_COUNT = TU_DRAW_STATE_DYNAMIC + TU_DYNAMIC_STATE_COUNT,
507 };
508
509 enum tu_cs_mode
510 {
511
512 /*
513 * A command stream in TU_CS_MODE_GROW mode grows automatically whenever it
514 * is full. tu_cs_begin must be called before command packet emission and
515 * tu_cs_end must be called after.
516 *
517 * This mode may create multiple entries internally. The entries must be
518 * submitted together.
519 */
520 TU_CS_MODE_GROW,
521
522 /*
523 * A command stream in TU_CS_MODE_EXTERNAL mode wraps an external,
524 * fixed-size buffer. tu_cs_begin and tu_cs_end are optional and have no
525 * effect on it.
526 *
527 * This mode does not create any entry or any BO.
528 */
529 TU_CS_MODE_EXTERNAL,
530
531 /*
532 * A command stream in TU_CS_MODE_SUB_STREAM mode does not support direct
533 * command packet emission. tu_cs_begin_sub_stream must be called to get a
534 * sub-stream to emit comamnd packets to. When done with the sub-stream,
535 * tu_cs_end_sub_stream must be called.
536 *
537 * This mode does not create any entry internally.
538 */
539 TU_CS_MODE_SUB_STREAM,
540 };
541
542 struct tu_cs
543 {
544 uint32_t *start;
545 uint32_t *cur;
546 uint32_t *reserved_end;
547 uint32_t *end;
548
549 struct tu_device *device;
550 enum tu_cs_mode mode;
551 uint32_t next_bo_size;
552
553 struct tu_cs_entry *entries;
554 uint32_t entry_count;
555 uint32_t entry_capacity;
556
557 struct tu_bo **bos;
558 uint32_t bo_count;
559 uint32_t bo_capacity;
560
561 /* state for cond_exec_start/cond_exec_end */
562 uint32_t cond_flags;
563 uint32_t *cond_dwords;
564 };
565
566 struct tu_device_memory
567 {
568 struct vk_object_base base;
569
570 struct tu_bo bo;
571 VkDeviceSize size;
572
573 /* for dedicated allocations */
574 struct tu_image *image;
575 struct tu_buffer *buffer;
576
577 uint32_t type_index;
578 void *map;
579 void *user_ptr;
580 };
581
582 struct tu_descriptor_range
583 {
584 uint64_t va;
585 uint32_t size;
586 };
587
588 struct tu_descriptor_set
589 {
590 struct vk_object_base base;
591
592 const struct tu_descriptor_set_layout *layout;
593 struct tu_descriptor_pool *pool;
594 uint32_t size;
595
596 uint64_t va;
597 uint32_t *mapped_ptr;
598
599 uint32_t *dynamic_descriptors;
600
601 struct tu_bo *buffers[0];
602 };
603
604 struct tu_push_descriptor_set
605 {
606 struct tu_descriptor_set set;
607 uint32_t capacity;
608 };
609
610 struct tu_descriptor_pool_entry
611 {
612 uint32_t offset;
613 uint32_t size;
614 struct tu_descriptor_set *set;
615 };
616
617 struct tu_descriptor_pool
618 {
619 struct vk_object_base base;
620
621 struct tu_bo bo;
622 uint64_t current_offset;
623 uint64_t size;
624
625 uint8_t *host_memory_base;
626 uint8_t *host_memory_ptr;
627 uint8_t *host_memory_end;
628
629 uint32_t entry_count;
630 uint32_t max_entry_count;
631 struct tu_descriptor_pool_entry entries[0];
632 };
633
634 struct tu_descriptor_update_template_entry
635 {
636 VkDescriptorType descriptor_type;
637
638 /* The number of descriptors to update */
639 uint32_t descriptor_count;
640
641 /* Into mapped_ptr or dynamic_descriptors, in units of the respective array
642 */
643 uint32_t dst_offset;
644
645 /* In dwords. Not valid/used for dynamic descriptors */
646 uint32_t dst_stride;
647
648 uint32_t buffer_offset;
649
650 /* Only valid for combined image samplers and samplers */
651 uint16_t has_sampler;
652
653 /* In bytes */
654 size_t src_offset;
655 size_t src_stride;
656
657 /* For push descriptors */
658 const uint32_t *immutable_samplers;
659 };
660
661 struct tu_descriptor_update_template
662 {
663 struct vk_object_base base;
664
665 uint32_t entry_count;
666 struct tu_descriptor_update_template_entry entry[0];
667 };
668
669 struct tu_buffer
670 {
671 struct vk_object_base base;
672
673 VkDeviceSize size;
674
675 VkBufferUsageFlags usage;
676 VkBufferCreateFlags flags;
677
678 struct tu_bo *bo;
679 VkDeviceSize bo_offset;
680 };
681
682 static inline uint64_t
683 tu_buffer_iova(struct tu_buffer *buffer)
684 {
685 return buffer->bo->iova + buffer->bo_offset;
686 }
687
688 struct tu_vertex_binding
689 {
690 struct tu_buffer *buffer;
691 VkDeviceSize offset;
692 };
693
694 const char *
695 tu_get_debug_option_name(int id);
696
697 const char *
698 tu_get_perftest_option_name(int id);
699
700 struct tu_descriptor_state
701 {
702 struct tu_descriptor_set *sets[MAX_SETS];
703 uint32_t dynamic_descriptors[MAX_DYNAMIC_BUFFERS * A6XX_TEX_CONST_DWORDS];
704 };
705
706 enum tu_cmd_dirty_bits
707 {
708 TU_CMD_DIRTY_VERTEX_BUFFERS = 1 << 2,
709 TU_CMD_DIRTY_DESC_SETS_LOAD = 1 << 3,
710 TU_CMD_DIRTY_COMPUTE_DESC_SETS_LOAD = 1 << 4,
711 TU_CMD_DIRTY_SHADER_CONSTS = 1 << 5,
712 /* all draw states were disabled and need to be re-enabled: */
713 TU_CMD_DIRTY_DRAW_STATE = 1 << 7,
714 };
715
716 /* There are only three cache domains we have to care about: the CCU, or
717 * color cache unit, which is used for color and depth/stencil attachments
718 * and copy/blit destinations, and is split conceptually into color and depth,
719 * and the universal cache or UCHE which is used for pretty much everything
720 * else, except for the CP (uncached) and host. We need to flush whenever data
721 * crosses these boundaries.
722 */
723
724 enum tu_cmd_access_mask {
725 TU_ACCESS_UCHE_READ = 1 << 0,
726 TU_ACCESS_UCHE_WRITE = 1 << 1,
727 TU_ACCESS_CCU_COLOR_READ = 1 << 2,
728 TU_ACCESS_CCU_COLOR_WRITE = 1 << 3,
729 TU_ACCESS_CCU_DEPTH_READ = 1 << 4,
730 TU_ACCESS_CCU_DEPTH_WRITE = 1 << 5,
731
732 /* Experiments have shown that while it's safe to avoid flushing the CCU
733 * after each blit/renderpass, it's not safe to assume that subsequent
734 * lookups with a different attachment state will hit unflushed cache
735 * entries. That is, the CCU needs to be flushed and possibly invalidated
736 * when accessing memory with a different attachment state. Writing to an
737 * attachment under the following conditions after clearing using the
738 * normal 2d engine path is known to have issues:
739 *
740 * - It isn't the 0'th layer.
741 * - There are more than one attachment, and this isn't the 0'th attachment
742 * (this seems to also depend on the cpp of the attachments).
743 *
744 * Our best guess is that the layer/MRT state is used when computing
745 * the location of a cache entry in CCU, to avoid conflicts. We assume that
746 * any access in a renderpass after or before an access by a transfer needs
747 * a flush/invalidate, and use the _INCOHERENT variants to represent access
748 * by a transfer.
749 */
750 TU_ACCESS_CCU_COLOR_INCOHERENT_READ = 1 << 6,
751 TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE = 1 << 7,
752 TU_ACCESS_CCU_DEPTH_INCOHERENT_READ = 1 << 8,
753 TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE = 1 << 9,
754
755 /* Accesses by the host */
756 TU_ACCESS_HOST_READ = 1 << 10,
757 TU_ACCESS_HOST_WRITE = 1 << 11,
758
759 /* Accesses by a GPU engine which bypasses any cache. e.g. writes via
760 * CP_EVENT_WRITE::BLIT and the CP are SYSMEM_WRITE.
761 */
762 TU_ACCESS_SYSMEM_READ = 1 << 12,
763 TU_ACCESS_SYSMEM_WRITE = 1 << 13,
764
765 /* Set if a WFI is required. This can be required for:
766 * - 2D engine which (on some models) doesn't wait for flushes to complete
767 * before starting
768 * - CP draw indirect opcodes, where we need to wait for any flushes to
769 * complete but the CP implicitly waits for WFI's to complete and
770 * therefore we only need a WFI after the flushes.
771 */
772 TU_ACCESS_WFI_READ = 1 << 14,
773
774 /* Set if a CP_WAIT_FOR_ME is required due to the data being read by the CP
775 * without it waiting for any WFI.
776 */
777 TU_ACCESS_WFM_READ = 1 << 15,
778
779 /* Memory writes from the CP start in-order with draws and event writes,
780 * but execute asynchronously and hence need a CP_WAIT_MEM_WRITES if read.
781 */
782 TU_ACCESS_CP_WRITE = 1 << 16,
783
784 TU_ACCESS_READ =
785 TU_ACCESS_UCHE_READ |
786 TU_ACCESS_CCU_COLOR_READ |
787 TU_ACCESS_CCU_DEPTH_READ |
788 TU_ACCESS_CCU_COLOR_INCOHERENT_READ |
789 TU_ACCESS_CCU_DEPTH_INCOHERENT_READ |
790 TU_ACCESS_HOST_READ |
791 TU_ACCESS_SYSMEM_READ |
792 TU_ACCESS_WFI_READ |
793 TU_ACCESS_WFM_READ,
794
795 TU_ACCESS_WRITE =
796 TU_ACCESS_UCHE_WRITE |
797 TU_ACCESS_CCU_COLOR_WRITE |
798 TU_ACCESS_CCU_COLOR_INCOHERENT_WRITE |
799 TU_ACCESS_CCU_DEPTH_WRITE |
800 TU_ACCESS_CCU_DEPTH_INCOHERENT_WRITE |
801 TU_ACCESS_HOST_WRITE |
802 TU_ACCESS_SYSMEM_WRITE |
803 TU_ACCESS_CP_WRITE,
804
805 TU_ACCESS_ALL =
806 TU_ACCESS_READ |
807 TU_ACCESS_WRITE,
808 };
809
810 enum tu_cmd_flush_bits {
811 TU_CMD_FLAG_CCU_FLUSH_DEPTH = 1 << 0,
812 TU_CMD_FLAG_CCU_FLUSH_COLOR = 1 << 1,
813 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH = 1 << 2,
814 TU_CMD_FLAG_CCU_INVALIDATE_COLOR = 1 << 3,
815 TU_CMD_FLAG_CACHE_FLUSH = 1 << 4,
816 TU_CMD_FLAG_CACHE_INVALIDATE = 1 << 5,
817 TU_CMD_FLAG_WAIT_MEM_WRITES = 1 << 6,
818 TU_CMD_FLAG_WAIT_FOR_IDLE = 1 << 7,
819 TU_CMD_FLAG_WAIT_FOR_ME = 1 << 8,
820
821 TU_CMD_FLAG_ALL_FLUSH =
822 TU_CMD_FLAG_CCU_FLUSH_DEPTH |
823 TU_CMD_FLAG_CCU_FLUSH_COLOR |
824 TU_CMD_FLAG_CACHE_FLUSH |
825 /* Treat the CP as a sort of "cache" which may need to be "flushed" via
826 * waiting for writes to land with WAIT_FOR_MEM_WRITES.
827 */
828 TU_CMD_FLAG_WAIT_MEM_WRITES,
829
830 TU_CMD_FLAG_GPU_INVALIDATE =
831 TU_CMD_FLAG_CCU_INVALIDATE_DEPTH |
832 TU_CMD_FLAG_CCU_INVALIDATE_COLOR |
833 TU_CMD_FLAG_CACHE_INVALIDATE,
834
835 TU_CMD_FLAG_ALL_INVALIDATE =
836 TU_CMD_FLAG_GPU_INVALIDATE |
837 /* Treat the CP as a sort of "cache" which may need to be "invalidated"
838 * via waiting for UCHE/CCU flushes to land with WFI/WFM.
839 */
840 TU_CMD_FLAG_WAIT_FOR_IDLE |
841 TU_CMD_FLAG_WAIT_FOR_ME,
842 };
843
844 /* Changing the CCU from sysmem mode to gmem mode or vice-versa is pretty
845 * heavy, involving a CCU cache flush/invalidate and a WFI in order to change
846 * which part of the gmem is used by the CCU. Here we keep track of what the
847 * state of the CCU.
848 */
849 enum tu_cmd_ccu_state {
850 TU_CMD_CCU_SYSMEM,
851 TU_CMD_CCU_GMEM,
852 TU_CMD_CCU_UNKNOWN,
853 };
854
855 struct tu_cache_state {
856 /* Caches which must be made available (flushed) eventually if there are
857 * any users outside that cache domain, and caches which must be
858 * invalidated eventually if there are any reads.
859 */
860 enum tu_cmd_flush_bits pending_flush_bits;
861 /* Pending flushes */
862 enum tu_cmd_flush_bits flush_bits;
863 };
864
865 struct tu_cmd_state
866 {
867 uint32_t dirty;
868
869 struct tu_pipeline *pipeline;
870 struct tu_pipeline *compute_pipeline;
871
872 /* Vertex buffers */
873 struct
874 {
875 struct tu_buffer *buffers[MAX_VBS];
876 VkDeviceSize offsets[MAX_VBS];
877 } vb;
878
879 /* for dynamic states that can't be emitted directly */
880 uint32_t dynamic_stencil_mask;
881 uint32_t dynamic_stencil_wrmask;
882 uint32_t dynamic_stencil_ref;
883 uint32_t dynamic_gras_su_cntl;
884
885 /* saved states to re-emit in TU_CMD_DIRTY_DRAW_STATE case */
886 struct tu_draw_state dynamic_state[TU_DYNAMIC_STATE_COUNT];
887 struct tu_draw_state vertex_buffers;
888 struct tu_draw_state shader_const[MESA_SHADER_STAGES];
889 struct tu_draw_state desc_sets;
890
891 struct tu_draw_state vs_params;
892
893 /* Index buffer */
894 uint64_t index_va;
895 uint32_t max_index_count;
896 uint8_t index_size;
897
898 /* because streamout base has to be 32-byte aligned
899 * there is an extra offset to deal with when it is
900 * unaligned
901 */
902 uint8_t streamout_offset[IR3_MAX_SO_BUFFERS];
903
904 /* Renderpasses are tricky, because we may need to flush differently if
905 * using sysmem vs. gmem and therefore we have to delay any flushing that
906 * happens before a renderpass. So we have to have two copies of the flush
907 * state, one for intra-renderpass flushes (i.e. renderpass dependencies)
908 * and one for outside a renderpass.
909 */
910 struct tu_cache_state cache;
911 struct tu_cache_state renderpass_cache;
912
913 enum tu_cmd_ccu_state ccu_state;
914
915 const struct tu_render_pass *pass;
916 const struct tu_subpass *subpass;
917 const struct tu_framebuffer *framebuffer;
918 VkRect2D render_area;
919
920 struct tu_cs_entry tile_store_ib;
921
922 bool xfb_used;
923 };
924
925 struct tu_cmd_pool
926 {
927 struct vk_object_base base;
928
929 VkAllocationCallbacks alloc;
930 struct list_head cmd_buffers;
931 struct list_head free_cmd_buffers;
932 uint32_t queue_family_index;
933 };
934
935 struct tu_cmd_buffer_upload
936 {
937 uint8_t *map;
938 unsigned offset;
939 uint64_t size;
940 struct list_head list;
941 };
942
943 enum tu_cmd_buffer_status
944 {
945 TU_CMD_BUFFER_STATUS_INVALID,
946 TU_CMD_BUFFER_STATUS_INITIAL,
947 TU_CMD_BUFFER_STATUS_RECORDING,
948 TU_CMD_BUFFER_STATUS_EXECUTABLE,
949 TU_CMD_BUFFER_STATUS_PENDING,
950 };
951
952 struct tu_bo_list
953 {
954 uint32_t count;
955 uint32_t capacity;
956 struct drm_msm_gem_submit_bo *bo_infos;
957 };
958
959 #define TU_BO_LIST_FAILED (~0)
960
961 void
962 tu_bo_list_init(struct tu_bo_list *list);
963 void
964 tu_bo_list_destroy(struct tu_bo_list *list);
965 void
966 tu_bo_list_reset(struct tu_bo_list *list);
967 uint32_t
968 tu_bo_list_add(struct tu_bo_list *list,
969 const struct tu_bo *bo,
970 uint32_t flags);
971 VkResult
972 tu_bo_list_merge(struct tu_bo_list *list, const struct tu_bo_list *other);
973
974 struct tu_cmd_buffer
975 {
976 struct vk_object_base base;
977
978 struct tu_device *device;
979
980 struct tu_cmd_pool *pool;
981 struct list_head pool_link;
982
983 VkCommandBufferUsageFlags usage_flags;
984 VkCommandBufferLevel level;
985 enum tu_cmd_buffer_status status;
986
987 struct tu_cmd_state state;
988 struct tu_vertex_binding vertex_bindings[MAX_VBS];
989 uint32_t vertex_bindings_set;
990 uint32_t queue_family_index;
991
992 uint32_t push_constants[MAX_PUSH_CONSTANTS_SIZE / 4];
993 VkShaderStageFlags push_constant_stages;
994 struct tu_descriptor_set meta_push_descriptors;
995
996 struct tu_descriptor_state descriptors[MAX_BIND_POINTS];
997
998 struct tu_cmd_buffer_upload upload;
999
1000 VkResult record_result;
1001
1002 struct tu_bo_list bo_list;
1003 struct tu_cs cs;
1004 struct tu_cs draw_cs;
1005 struct tu_cs draw_epilogue_cs;
1006 struct tu_cs sub_cs;
1007
1008 bool has_tess;
1009
1010 uint32_t vsc_draw_strm_pitch;
1011 uint32_t vsc_prim_strm_pitch;
1012 };
1013
1014 /* Temporary struct for tracking a register state to be written, used by
1015 * a6xx-pack.h and tu_cs_emit_regs()
1016 */
1017 struct tu_reg_value {
1018 uint32_t reg;
1019 uint64_t value;
1020 bool is_address;
1021 struct tu_bo *bo;
1022 bool bo_write;
1023 uint32_t bo_offset;
1024 uint32_t bo_shift;
1025 };
1026
1027
1028 void tu_emit_cache_flush_renderpass(struct tu_cmd_buffer *cmd_buffer,
1029 struct tu_cs *cs);
1030
1031 void tu_emit_cache_flush_ccu(struct tu_cmd_buffer *cmd_buffer,
1032 struct tu_cs *cs,
1033 enum tu_cmd_ccu_state ccu_state);
1034
1035 void
1036 tu6_emit_event_write(struct tu_cmd_buffer *cmd,
1037 struct tu_cs *cs,
1038 enum vgt_event_type event);
1039
1040 static inline struct tu_descriptor_state *
1041 tu_get_descriptors_state(struct tu_cmd_buffer *cmd_buffer,
1042 VkPipelineBindPoint bind_point)
1043 {
1044 return &cmd_buffer->descriptors[bind_point];
1045 }
1046
1047 struct tu_event
1048 {
1049 struct vk_object_base base;
1050 struct tu_bo bo;
1051 };
1052
1053 struct tu_shader_module
1054 {
1055 struct vk_object_base base;
1056
1057 unsigned char sha1[20];
1058
1059 uint32_t code_size;
1060 const uint32_t *code[0];
1061 };
1062
1063 struct tu_push_constant_range
1064 {
1065 uint32_t lo;
1066 uint32_t count;
1067 };
1068
1069 struct tu_shader
1070 {
1071 struct ir3_shader *ir3_shader;
1072
1073 struct tu_push_constant_range push_consts;
1074 uint8_t active_desc_sets;
1075 };
1076
1077 struct tu_shader *
1078 tu_shader_create(struct tu_device *dev,
1079 gl_shader_stage stage,
1080 const VkPipelineShaderStageCreateInfo *stage_info,
1081 struct tu_pipeline_layout *layout,
1082 const VkAllocationCallbacks *alloc);
1083
1084 void
1085 tu_shader_destroy(struct tu_device *dev,
1086 struct tu_shader *shader,
1087 const VkAllocationCallbacks *alloc);
1088
1089 struct tu_program_descriptor_linkage
1090 {
1091 struct ir3_const_state const_state;
1092
1093 uint32_t constlen;
1094
1095 struct tu_push_constant_range push_consts;
1096 };
1097
1098 struct tu_pipeline
1099 {
1100 struct vk_object_base base;
1101
1102 struct tu_cs cs;
1103
1104 struct tu_pipeline_layout *layout;
1105
1106 bool need_indirect_descriptor_sets;
1107 VkShaderStageFlags active_stages;
1108 uint32_t active_desc_sets;
1109
1110 /* mask of enabled dynamic states
1111 * if BIT(i) is set, pipeline->dynamic_state[i] is *NOT* used
1112 */
1113 uint32_t dynamic_state_mask;
1114 struct tu_draw_state dynamic_state[TU_DYNAMIC_STATE_COUNT];
1115
1116 /* gras_su_cntl without line width, used for dynamic line width state */
1117 uint32_t gras_su_cntl;
1118
1119 /* draw states for the pipeline */
1120 struct tu_draw_state load_state, rast_state, ds_state, blend_state;
1121
1122 struct
1123 {
1124 struct tu_draw_state state;
1125 struct tu_draw_state binning_state;
1126
1127 struct tu_program_descriptor_linkage link[MESA_SHADER_STAGES];
1128 } program;
1129
1130 struct
1131 {
1132 struct tu_draw_state state;
1133 struct tu_draw_state binning_state;
1134 uint32_t bindings_used;
1135 } vi;
1136
1137 struct
1138 {
1139 enum pc_di_primtype primtype;
1140 bool primitive_restart;
1141 } ia;
1142
1143 struct
1144 {
1145 uint32_t patch_type;
1146 uint32_t param_stride;
1147 uint32_t hs_bo_regid;
1148 uint32_t ds_bo_regid;
1149 bool upper_left_domain_origin;
1150 } tess;
1151
1152 struct
1153 {
1154 uint32_t local_size[3];
1155 } compute;
1156 };
1157
1158 void
1159 tu6_emit_viewport(struct tu_cs *cs, const VkViewport *viewport);
1160
1161 void
1162 tu6_emit_scissor(struct tu_cs *cs, const VkRect2D *scissor);
1163
1164 void
1165 tu6_emit_sample_locations(struct tu_cs *cs, const VkSampleLocationsInfoEXT *samp_loc);
1166
1167 void
1168 tu6_emit_depth_bias(struct tu_cs *cs,
1169 float constant_factor,
1170 float clamp,
1171 float slope_factor);
1172
1173 void tu6_emit_msaa(struct tu_cs *cs, VkSampleCountFlagBits samples);
1174
1175 void tu6_emit_window_scissor(struct tu_cs *cs, uint32_t x1, uint32_t y1, uint32_t x2, uint32_t y2);
1176
1177 void tu6_emit_window_offset(struct tu_cs *cs, uint32_t x1, uint32_t y1);
1178
1179 void
1180 tu6_emit_xs_config(struct tu_cs *cs,
1181 gl_shader_stage stage,
1182 const struct ir3_shader_variant *xs,
1183 uint64_t binary_iova);
1184
1185 void
1186 tu6_emit_vpc(struct tu_cs *cs,
1187 const struct ir3_shader_variant *vs,
1188 const struct ir3_shader_variant *hs,
1189 const struct ir3_shader_variant *ds,
1190 const struct ir3_shader_variant *gs,
1191 const struct ir3_shader_variant *fs);
1192
1193 void
1194 tu6_emit_fs_inputs(struct tu_cs *cs, const struct ir3_shader_variant *fs);
1195
1196 struct tu_image_view;
1197
1198 void
1199 tu_resolve_sysmem(struct tu_cmd_buffer *cmd,
1200 struct tu_cs *cs,
1201 struct tu_image_view *src,
1202 struct tu_image_view *dst,
1203 uint32_t layers,
1204 const VkRect2D *rect);
1205
1206 void
1207 tu_clear_sysmem_attachment(struct tu_cmd_buffer *cmd,
1208 struct tu_cs *cs,
1209 uint32_t a,
1210 const VkRenderPassBeginInfo *info);
1211
1212 void
1213 tu_clear_gmem_attachment(struct tu_cmd_buffer *cmd,
1214 struct tu_cs *cs,
1215 uint32_t a,
1216 const VkRenderPassBeginInfo *info);
1217
1218 void
1219 tu_load_gmem_attachment(struct tu_cmd_buffer *cmd,
1220 struct tu_cs *cs,
1221 uint32_t a,
1222 bool force_load);
1223
1224 /* expose this function to be able to emit load without checking LOAD_OP */
1225 void
1226 tu_emit_load_gmem_attachment(struct tu_cmd_buffer *cmd, struct tu_cs *cs, uint32_t a);
1227
1228 /* note: gmem store can also resolve */
1229 void
1230 tu_store_gmem_attachment(struct tu_cmd_buffer *cmd,
1231 struct tu_cs *cs,
1232 uint32_t a,
1233 uint32_t gmem_a);
1234
1235 enum tu_supported_formats {
1236 FMT_VERTEX = 1,
1237 FMT_TEXTURE = 2,
1238 FMT_COLOR = 4,
1239 };
1240
1241 struct tu_native_format
1242 {
1243 enum a6xx_format fmt : 8;
1244 enum a3xx_color_swap swap : 8;
1245 enum a6xx_tile_mode tile_mode : 8;
1246 enum tu_supported_formats supported : 8;
1247 };
1248
1249 struct tu_native_format tu6_format_vtx(VkFormat format);
1250 struct tu_native_format tu6_format_color(VkFormat format, enum a6xx_tile_mode tile_mode);
1251 struct tu_native_format tu6_format_texture(VkFormat format, enum a6xx_tile_mode tile_mode);
1252
1253 static inline enum a6xx_format
1254 tu6_base_format(VkFormat format)
1255 {
1256 /* note: tu6_format_color doesn't care about tiling for .fmt field */
1257 return tu6_format_color(format, TILE6_LINEAR).fmt;
1258 }
1259
1260 struct tu_image
1261 {
1262 struct vk_object_base base;
1263
1264 VkImageType type;
1265 /* The original VkFormat provided by the client. This may not match any
1266 * of the actual surface formats.
1267 */
1268 VkFormat vk_format;
1269 VkImageAspectFlags aspects;
1270 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1271 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1272 VkImageCreateFlags flags; /** VkImageCreateInfo::flags */
1273 VkExtent3D extent;
1274 uint32_t level_count;
1275 uint32_t layer_count;
1276 VkSampleCountFlagBits samples;
1277
1278 struct fdl_layout layout[3];
1279 uint32_t total_size;
1280
1281 unsigned queue_family_mask;
1282 bool exclusive;
1283 bool shareable;
1284
1285 /* For VK_ANDROID_native_buffer, the WSI image owns the memory, */
1286 VkDeviceMemory owned_memory;
1287
1288 /* Set when bound */
1289 struct tu_bo *bo;
1290 VkDeviceSize bo_offset;
1291 };
1292
1293 static inline uint32_t
1294 tu_get_layerCount(const struct tu_image *image,
1295 const VkImageSubresourceRange *range)
1296 {
1297 return range->layerCount == VK_REMAINING_ARRAY_LAYERS
1298 ? image->layer_count - range->baseArrayLayer
1299 : range->layerCount;
1300 }
1301
1302 static inline uint32_t
1303 tu_get_levelCount(const struct tu_image *image,
1304 const VkImageSubresourceRange *range)
1305 {
1306 return range->levelCount == VK_REMAINING_MIP_LEVELS
1307 ? image->level_count - range->baseMipLevel
1308 : range->levelCount;
1309 }
1310
1311 struct tu_image_view
1312 {
1313 struct vk_object_base base;
1314
1315 struct tu_image *image; /**< VkImageViewCreateInfo::image */
1316
1317 uint64_t base_addr;
1318 uint64_t ubwc_addr;
1319 uint32_t layer_size;
1320 uint32_t ubwc_layer_size;
1321
1322 /* used to determine if fast gmem store path can be used */
1323 VkExtent2D extent;
1324 bool need_y2_align;
1325
1326 bool ubwc_enabled;
1327
1328 uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
1329
1330 /* Descriptor for use as a storage image as opposed to a sampled image.
1331 * This has a few differences for cube maps (e.g. type).
1332 */
1333 uint32_t storage_descriptor[A6XX_TEX_CONST_DWORDS];
1334
1335 /* pre-filled register values */
1336 uint32_t PITCH;
1337 uint32_t FLAG_BUFFER_PITCH;
1338
1339 uint32_t RB_MRT_BUF_INFO;
1340 uint32_t SP_FS_MRT_REG;
1341
1342 uint32_t SP_PS_2D_SRC_INFO;
1343 uint32_t SP_PS_2D_SRC_SIZE;
1344
1345 uint32_t RB_2D_DST_INFO;
1346
1347 uint32_t RB_BLIT_DST_INFO;
1348 };
1349
1350 struct tu_sampler_ycbcr_conversion {
1351 struct vk_object_base base;
1352
1353 VkFormat format;
1354 VkSamplerYcbcrModelConversion ycbcr_model;
1355 VkSamplerYcbcrRange ycbcr_range;
1356 VkComponentMapping components;
1357 VkChromaLocation chroma_offsets[2];
1358 VkFilter chroma_filter;
1359 };
1360
1361 struct tu_sampler {
1362 struct vk_object_base base;
1363
1364 uint32_t descriptor[A6XX_TEX_SAMP_DWORDS];
1365 struct tu_sampler_ycbcr_conversion *ycbcr_sampler;
1366 };
1367
1368 void
1369 tu_cs_image_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
1370
1371 void
1372 tu_cs_image_ref_2d(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer, bool src);
1373
1374 void
1375 tu_cs_image_flag_ref(struct tu_cs *cs, const struct tu_image_view *iview, uint32_t layer);
1376
1377 VkResult
1378 tu_image_create(VkDevice _device,
1379 const VkImageCreateInfo *pCreateInfo,
1380 const VkAllocationCallbacks *alloc,
1381 VkImage *pImage,
1382 uint64_t modifier,
1383 const VkSubresourceLayout *plane_layouts);
1384
1385 VkResult
1386 tu_image_from_gralloc(VkDevice device_h,
1387 const VkImageCreateInfo *base_info,
1388 const VkNativeBufferANDROID *gralloc_info,
1389 const VkAllocationCallbacks *alloc,
1390 VkImage *out_image_h);
1391
1392 void
1393 tu_image_view_init(struct tu_image_view *view,
1394 const VkImageViewCreateInfo *pCreateInfo);
1395
1396 struct tu_buffer_view
1397 {
1398 struct vk_object_base base;
1399
1400 uint32_t descriptor[A6XX_TEX_CONST_DWORDS];
1401
1402 struct tu_buffer *buffer;
1403 };
1404 void
1405 tu_buffer_view_init(struct tu_buffer_view *view,
1406 struct tu_device *device,
1407 const VkBufferViewCreateInfo *pCreateInfo);
1408
1409 struct tu_attachment_info
1410 {
1411 struct tu_image_view *attachment;
1412 };
1413
1414 struct tu_framebuffer
1415 {
1416 struct vk_object_base base;
1417
1418 uint32_t width;
1419 uint32_t height;
1420 uint32_t layers;
1421
1422 /* size of the first tile */
1423 VkExtent2D tile0;
1424 /* number of tiles */
1425 VkExtent2D tile_count;
1426
1427 /* size of the first VSC pipe */
1428 VkExtent2D pipe0;
1429 /* number of VSC pipes */
1430 VkExtent2D pipe_count;
1431
1432 /* pipe register values */
1433 uint32_t pipe_config[MAX_VSC_PIPES];
1434 uint32_t pipe_sizes[MAX_VSC_PIPES];
1435
1436 uint32_t attachment_count;
1437 struct tu_attachment_info attachments[0];
1438 };
1439
1440 void
1441 tu_framebuffer_tiling_config(struct tu_framebuffer *fb,
1442 const struct tu_device *device,
1443 const struct tu_render_pass *pass);
1444
1445 struct tu_subpass_barrier {
1446 VkPipelineStageFlags src_stage_mask;
1447 VkAccessFlags src_access_mask;
1448 VkAccessFlags dst_access_mask;
1449 bool incoherent_ccu_color, incoherent_ccu_depth;
1450 };
1451
1452 struct tu_subpass_attachment
1453 {
1454 uint32_t attachment;
1455 };
1456
1457 struct tu_subpass
1458 {
1459 uint32_t input_count;
1460 uint32_t color_count;
1461 struct tu_subpass_attachment *input_attachments;
1462 struct tu_subpass_attachment *color_attachments;
1463 struct tu_subpass_attachment *resolve_attachments;
1464 struct tu_subpass_attachment depth_stencil_attachment;
1465
1466 VkSampleCountFlagBits samples;
1467
1468 uint32_t srgb_cntl;
1469
1470 struct tu_subpass_barrier start_barrier;
1471 };
1472
1473 struct tu_render_pass_attachment
1474 {
1475 VkFormat format;
1476 uint32_t samples;
1477 uint32_t cpp;
1478 VkImageAspectFlags clear_mask;
1479 bool load;
1480 bool store;
1481 int32_t gmem_offset;
1482 };
1483
1484 struct tu_render_pass
1485 {
1486 struct vk_object_base base;
1487
1488 uint32_t attachment_count;
1489 uint32_t subpass_count;
1490 uint32_t gmem_pixels;
1491 uint32_t tile_align_w;
1492 struct tu_subpass_attachment *subpass_attachments;
1493 struct tu_render_pass_attachment *attachments;
1494 struct tu_subpass_barrier end_barrier;
1495 struct tu_subpass subpasses[0];
1496 };
1497
1498 struct tu_query_pool
1499 {
1500 struct vk_object_base base;
1501
1502 VkQueryType type;
1503 uint32_t stride;
1504 uint64_t size;
1505 uint32_t pipeline_statistics;
1506 struct tu_bo bo;
1507 };
1508
1509 enum tu_semaphore_kind
1510 {
1511 TU_SEMAPHORE_NONE,
1512 TU_SEMAPHORE_SYNCOBJ,
1513 };
1514
1515 struct tu_semaphore_part
1516 {
1517 enum tu_semaphore_kind kind;
1518 union {
1519 uint32_t syncobj;
1520 };
1521 };
1522
1523 struct tu_semaphore
1524 {
1525 struct vk_object_base base;
1526
1527 struct tu_semaphore_part permanent;
1528 struct tu_semaphore_part temporary;
1529 };
1530
1531 void
1532 tu_set_descriptor_set(struct tu_cmd_buffer *cmd_buffer,
1533 VkPipelineBindPoint bind_point,
1534 struct tu_descriptor_set *set,
1535 unsigned idx);
1536
1537 void
1538 tu_update_descriptor_sets(struct tu_device *device,
1539 struct tu_cmd_buffer *cmd_buffer,
1540 VkDescriptorSet overrideSet,
1541 uint32_t descriptorWriteCount,
1542 const VkWriteDescriptorSet *pDescriptorWrites,
1543 uint32_t descriptorCopyCount,
1544 const VkCopyDescriptorSet *pDescriptorCopies);
1545
1546 void
1547 tu_update_descriptor_set_with_template(
1548 struct tu_device *device,
1549 struct tu_cmd_buffer *cmd_buffer,
1550 struct tu_descriptor_set *set,
1551 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1552 const void *pData);
1553
1554 int
1555 tu_drm_get_gpu_id(const struct tu_physical_device *dev, uint32_t *id);
1556
1557 int
1558 tu_drm_get_gmem_size(const struct tu_physical_device *dev, uint32_t *size);
1559
1560 int
1561 tu_drm_get_gmem_base(const struct tu_physical_device *dev, uint64_t *base);
1562
1563 int
1564 tu_drm_submitqueue_new(const struct tu_device *dev,
1565 int priority,
1566 uint32_t *queue_id);
1567
1568 void
1569 tu_drm_submitqueue_close(const struct tu_device *dev, uint32_t queue_id);
1570
1571 uint32_t
1572 tu_gem_new(const struct tu_device *dev, uint64_t size, uint32_t flags);
1573 uint32_t
1574 tu_gem_import_dmabuf(const struct tu_device *dev,
1575 int prime_fd,
1576 uint64_t size);
1577 int
1578 tu_gem_export_dmabuf(const struct tu_device *dev, uint32_t gem_handle);
1579 void
1580 tu_gem_close(const struct tu_device *dev, uint32_t gem_handle);
1581 uint64_t
1582 tu_gem_info_offset(const struct tu_device *dev, uint32_t gem_handle);
1583 uint64_t
1584 tu_gem_info_iova(const struct tu_device *dev, uint32_t gem_handle);
1585
1586 #define TU_DEFINE_HANDLE_CASTS(__tu_type, __VkType) \
1587 \
1588 static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
1589 { \
1590 return (struct __tu_type *) _handle; \
1591 } \
1592 \
1593 static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
1594 { \
1595 return (__VkType) _obj; \
1596 }
1597
1598 #define TU_DEFINE_NONDISP_HANDLE_CASTS(__tu_type, __VkType) \
1599 \
1600 static inline struct __tu_type *__tu_type##_from_handle(__VkType _handle) \
1601 { \
1602 return (struct __tu_type *) (uintptr_t) _handle; \
1603 } \
1604 \
1605 static inline __VkType __tu_type##_to_handle(struct __tu_type *_obj) \
1606 { \
1607 return (__VkType)(uintptr_t) _obj; \
1608 }
1609
1610 #define TU_FROM_HANDLE(__tu_type, __name, __handle) \
1611 struct __tu_type *__name = __tu_type##_from_handle(__handle)
1612
1613 TU_DEFINE_HANDLE_CASTS(tu_cmd_buffer, VkCommandBuffer)
1614 TU_DEFINE_HANDLE_CASTS(tu_device, VkDevice)
1615 TU_DEFINE_HANDLE_CASTS(tu_instance, VkInstance)
1616 TU_DEFINE_HANDLE_CASTS(tu_physical_device, VkPhysicalDevice)
1617 TU_DEFINE_HANDLE_CASTS(tu_queue, VkQueue)
1618
1619 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_cmd_pool, VkCommandPool)
1620 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer, VkBuffer)
1621 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_buffer_view, VkBufferView)
1622 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_pool, VkDescriptorPool)
1623 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set, VkDescriptorSet)
1624 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_set_layout,
1625 VkDescriptorSetLayout)
1626 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_descriptor_update_template,
1627 VkDescriptorUpdateTemplate)
1628 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_device_memory, VkDeviceMemory)
1629 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_fence, VkFence)
1630 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_event, VkEvent)
1631 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_framebuffer, VkFramebuffer)
1632 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image, VkImage)
1633 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_image_view, VkImageView);
1634 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_cache, VkPipelineCache)
1635 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline, VkPipeline)
1636 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_pipeline_layout, VkPipelineLayout)
1637 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_query_pool, VkQueryPool)
1638 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_render_pass, VkRenderPass)
1639 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler, VkSampler)
1640 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_sampler_ycbcr_conversion, VkSamplerYcbcrConversion)
1641 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_shader_module, VkShaderModule)
1642 TU_DEFINE_NONDISP_HANDLE_CASTS(tu_semaphore, VkSemaphore)
1643
1644 #endif /* TU_PRIVATE_H */