anv: move BaseVertexID/BaseInstanceID vertex buffer index to 31
[mesa.git] / src / intel / vulkan / anv_private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include <i915_drm.h>
34
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
40 #else
41 #define VG(x)
42 #endif
43
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
51
52 /* Pre-declarations needed for WSI entrypoints */
53 struct wl_surface;
54 struct wl_display;
55 typedef struct xcb_connection_t xcb_connection_t;
56 typedef uint32_t xcb_visualid_t;
57 typedef uint32_t xcb_window_t;
58
59 struct gen_l3_config;
60
61 #include <vulkan/vulkan.h>
62 #include <vulkan/vulkan_intel.h>
63 #include <vulkan/vk_icd.h>
64
65 #include "anv_entrypoints.h"
66 #include "brw_context.h"
67 #include "isl/isl.h"
68
69 #include "wsi_common.h"
70
71 #ifdef __cplusplus
72 extern "C" {
73 #endif
74
75 /* Allowing different clear colors requires us to perform a depth resolve at
76 * the end of certain render passes. This is because while slow clears store
77 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
78 * See the PRMs for examples describing when additional resolves would be
79 * necessary. To enable fast clears without requiring extra resolves, we set
80 * the clear value to a globally-defined one. We could allow different values
81 * if the user doesn't expect coherent data during or after a render passes
82 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
83 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
84 * 1.0f seems to be the only value used. The only application that doesn't set
85 * this value does so through the usage of an seemingly uninitialized clear
86 * value.
87 */
88 #define ANV_HZ_FC_VAL 1.0f
89
90 #define MAX_VBS 31
91 #define MAX_SETS 8
92 #define MAX_RTS 8
93 #define MAX_VIEWPORTS 16
94 #define MAX_SCISSORS 16
95 #define MAX_PUSH_CONSTANTS_SIZE 128
96 #define MAX_DYNAMIC_BUFFERS 16
97 #define MAX_IMAGES 8
98
99 #define ANV_SVGS_VB_INDEX MAX_VBS
100
101 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
102
103 static inline uint32_t
104 align_down_npot_u32(uint32_t v, uint32_t a)
105 {
106 return v - (v % a);
107 }
108
109 static inline uint32_t
110 align_u32(uint32_t v, uint32_t a)
111 {
112 assert(a != 0 && a == (a & -a));
113 return (v + a - 1) & ~(a - 1);
114 }
115
116 static inline uint64_t
117 align_u64(uint64_t v, uint64_t a)
118 {
119 assert(a != 0 && a == (a & -a));
120 return (v + a - 1) & ~(a - 1);
121 }
122
123 static inline int32_t
124 align_i32(int32_t v, int32_t a)
125 {
126 assert(a != 0 && a == (a & -a));
127 return (v + a - 1) & ~(a - 1);
128 }
129
130 /** Alignment must be a power of 2. */
131 static inline bool
132 anv_is_aligned(uintmax_t n, uintmax_t a)
133 {
134 assert(a == (a & -a));
135 return (n & (a - 1)) == 0;
136 }
137
138 static inline uint32_t
139 anv_minify(uint32_t n, uint32_t levels)
140 {
141 if (unlikely(n == 0))
142 return 0;
143 else
144 return MAX2(n >> levels, 1);
145 }
146
147 static inline float
148 anv_clamp_f(float f, float min, float max)
149 {
150 assert(min < max);
151
152 if (f > max)
153 return max;
154 else if (f < min)
155 return min;
156 else
157 return f;
158 }
159
160 static inline bool
161 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
162 {
163 if (*inout_mask & clear_mask) {
164 *inout_mask &= ~clear_mask;
165 return true;
166 } else {
167 return false;
168 }
169 }
170
171 static inline union isl_color_value
172 vk_to_isl_color(VkClearColorValue color)
173 {
174 return (union isl_color_value) {
175 .u32 = {
176 color.uint32[0],
177 color.uint32[1],
178 color.uint32[2],
179 color.uint32[3],
180 },
181 };
182 }
183
184 #define for_each_bit(b, dword) \
185 for (uint32_t __dword = (dword); \
186 (b) = __builtin_ffs(__dword) - 1, __dword; \
187 __dword &= ~(1 << (b)))
188
189 #define typed_memcpy(dest, src, count) ({ \
190 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
191 memcpy((dest), (src), (count) * sizeof(*(src))); \
192 })
193
194 /* Whenever we generate an error, pass it through this function. Useful for
195 * debugging, where we can break on it. Only call at error site, not when
196 * propagating errors. Might be useful to plug in a stack trace here.
197 */
198
199 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
200
201 #ifdef DEBUG
202 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
203 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
204 #define anv_debug(format, ...) fprintf(stderr, "debug: " format, ##__VA_ARGS__)
205 #else
206 #define vk_error(error) error
207 #define vk_errorf(error, format, ...) error
208 #define anv_debug(format, ...)
209 #endif
210
211 /**
212 * Warn on ignored extension structs.
213 *
214 * The Vulkan spec requires us to ignore unsupported or unknown structs in
215 * a pNext chain. In debug mode, emitting warnings for ignored structs may
216 * help us discover structs that we should not have ignored.
217 *
218 *
219 * From the Vulkan 1.0.38 spec:
220 *
221 * Any component of the implementation (the loader, any enabled layers,
222 * and drivers) must skip over, without processing (other than reading the
223 * sType and pNext members) any chained structures with sType values not
224 * defined by extensions supported by that component.
225 */
226 #define anv_debug_ignored_stype(sType) \
227 anv_debug("debug: %s: ignored VkStructureType %u\n", __func__, (sType))
228
229 void __anv_finishme(const char *file, int line, const char *format, ...)
230 anv_printflike(3, 4);
231 void anv_loge(const char *format, ...) anv_printflike(1, 2);
232 void anv_loge_v(const char *format, va_list va);
233
234 /**
235 * Print a FINISHME message, including its source location.
236 */
237 #define anv_finishme(format, ...) \
238 do { \
239 static bool reported = false; \
240 if (!reported) { \
241 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
242 reported = true; \
243 } \
244 } while (0)
245
246 /* A non-fatal assert. Useful for debugging. */
247 #ifdef DEBUG
248 #define anv_assert(x) ({ \
249 if (unlikely(!(x))) \
250 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
251 })
252 #else
253 #define anv_assert(x)
254 #endif
255
256 /**
257 * If a block of code is annotated with anv_validate, then the block runs only
258 * in debug builds.
259 */
260 #ifdef DEBUG
261 #define anv_validate if (1)
262 #else
263 #define anv_validate if (0)
264 #endif
265
266 #define stub_return(v) \
267 do { \
268 anv_finishme("stub %s", __func__); \
269 return (v); \
270 } while (0)
271
272 #define stub() \
273 do { \
274 anv_finishme("stub %s", __func__); \
275 return; \
276 } while (0)
277
278 /**
279 * A dynamically growable, circular buffer. Elements are added at head and
280 * removed from tail. head and tail are free-running uint32_t indices and we
281 * only compute the modulo with size when accessing the array. This way,
282 * number of bytes in the queue is always head - tail, even in case of
283 * wraparound.
284 */
285
286 struct anv_bo {
287 uint32_t gem_handle;
288
289 /* Index into the current validation list. This is used by the
290 * validation list building alrogithm to track which buffers are already
291 * in the validation list so that we can ensure uniqueness.
292 */
293 uint32_t index;
294
295 /* Last known offset. This value is provided by the kernel when we
296 * execbuf and is used as the presumed offset for the next bunch of
297 * relocations.
298 */
299 uint64_t offset;
300
301 uint64_t size;
302 void *map;
303
304 /* We need to set the WRITE flag on winsys bos so GEM will know we're
305 * writing to them and synchronize uses on other rings (eg if the display
306 * server uses the blitter ring).
307 */
308 bool is_winsys_bo;
309 };
310
311 static inline void
312 anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size)
313 {
314 bo->gem_handle = gem_handle;
315 bo->index = 0;
316 bo->offset = -1;
317 bo->size = size;
318 bo->map = NULL;
319 bo->is_winsys_bo = false;
320 }
321
322 /* Represents a lock-free linked list of "free" things. This is used by
323 * both the block pool and the state pools. Unfortunately, in order to
324 * solve the ABA problem, we can't use a single uint32_t head.
325 */
326 union anv_free_list {
327 struct {
328 int32_t offset;
329
330 /* A simple count that is incremented every time the head changes. */
331 uint32_t count;
332 };
333 uint64_t u64;
334 };
335
336 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
337
338 struct anv_block_state {
339 union {
340 struct {
341 uint32_t next;
342 uint32_t end;
343 };
344 uint64_t u64;
345 };
346 };
347
348 struct anv_block_pool {
349 struct anv_device *device;
350
351 struct anv_bo bo;
352
353 /* The offset from the start of the bo to the "center" of the block
354 * pool. Pointers to allocated blocks are given by
355 * bo.map + center_bo_offset + offsets.
356 */
357 uint32_t center_bo_offset;
358
359 /* Current memory map of the block pool. This pointer may or may not
360 * point to the actual beginning of the block pool memory. If
361 * anv_block_pool_alloc_back has ever been called, then this pointer
362 * will point to the "center" position of the buffer and all offsets
363 * (negative or positive) given out by the block pool alloc functions
364 * will be valid relative to this pointer.
365 *
366 * In particular, map == bo.map + center_offset
367 */
368 void *map;
369 int fd;
370
371 /**
372 * Array of mmaps and gem handles owned by the block pool, reclaimed when
373 * the block pool is destroyed.
374 */
375 struct u_vector mmap_cleanups;
376
377 uint32_t block_size;
378
379 union anv_free_list free_list;
380 struct anv_block_state state;
381
382 union anv_free_list back_free_list;
383 struct anv_block_state back_state;
384 };
385
386 /* Block pools are backed by a fixed-size 2GB memfd */
387 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
388
389 /* The center of the block pool is also the middle of the memfd. This may
390 * change in the future if we decide differently for some reason.
391 */
392 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
393
394 static inline uint32_t
395 anv_block_pool_size(struct anv_block_pool *pool)
396 {
397 return pool->state.end + pool->back_state.end;
398 }
399
400 struct anv_state {
401 int32_t offset;
402 uint32_t alloc_size;
403 void *map;
404 };
405
406 struct anv_fixed_size_state_pool {
407 size_t state_size;
408 union anv_free_list free_list;
409 struct anv_block_state block;
410 };
411
412 #define ANV_MIN_STATE_SIZE_LOG2 6
413 #define ANV_MAX_STATE_SIZE_LOG2 20
414
415 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
416
417 struct anv_state_pool {
418 struct anv_block_pool *block_pool;
419 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
420 };
421
422 struct anv_state_stream_block;
423
424 struct anv_state_stream {
425 struct anv_block_pool *block_pool;
426
427 /* The current working block */
428 struct anv_state_stream_block *block;
429
430 /* Offset at which the current block starts */
431 uint32_t start;
432 /* Offset at which to allocate the next state */
433 uint32_t next;
434 /* Offset at which the current block ends */
435 uint32_t end;
436 };
437
438 #define CACHELINE_SIZE 64
439 #define CACHELINE_MASK 63
440
441 static inline void
442 anv_clflush_range(void *start, size_t size)
443 {
444 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
445 void *end = start + size;
446
447 __builtin_ia32_mfence();
448 while (p < end) {
449 __builtin_ia32_clflush(p);
450 p += CACHELINE_SIZE;
451 }
452 }
453
454 static void inline
455 anv_state_clflush(struct anv_state state)
456 {
457 anv_clflush_range(state.map, state.alloc_size);
458 }
459
460 VkResult anv_block_pool_init(struct anv_block_pool *pool,
461 struct anv_device *device, uint32_t block_size);
462 void anv_block_pool_finish(struct anv_block_pool *pool);
463 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
464 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
465 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
466 void anv_state_pool_init(struct anv_state_pool *pool,
467 struct anv_block_pool *block_pool);
468 void anv_state_pool_finish(struct anv_state_pool *pool);
469 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
470 size_t state_size, size_t alignment);
471 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
472 void anv_state_stream_init(struct anv_state_stream *stream,
473 struct anv_block_pool *block_pool);
474 void anv_state_stream_finish(struct anv_state_stream *stream);
475 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
476 uint32_t size, uint32_t alignment);
477
478 /**
479 * Implements a pool of re-usable BOs. The interface is identical to that
480 * of block_pool except that each block is its own BO.
481 */
482 struct anv_bo_pool {
483 struct anv_device *device;
484
485 void *free_list[16];
486 };
487
488 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
489 void anv_bo_pool_finish(struct anv_bo_pool *pool);
490 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
491 uint32_t size);
492 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
493
494 struct anv_scratch_bo {
495 bool exists;
496 struct anv_bo bo;
497 };
498
499 struct anv_scratch_pool {
500 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
501 struct anv_scratch_bo bos[16][MESA_SHADER_STAGES];
502 };
503
504 void anv_scratch_pool_init(struct anv_device *device,
505 struct anv_scratch_pool *pool);
506 void anv_scratch_pool_finish(struct anv_device *device,
507 struct anv_scratch_pool *pool);
508 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
509 struct anv_scratch_pool *pool,
510 gl_shader_stage stage,
511 unsigned per_thread_scratch);
512
513 extern struct anv_dispatch_table dtable;
514
515 struct anv_physical_device {
516 VK_LOADER_DATA _loader_data;
517
518 struct anv_instance * instance;
519 uint32_t chipset_id;
520 char path[20];
521 const char * name;
522 struct gen_device_info info;
523 uint64_t aperture_size;
524 struct brw_compiler * compiler;
525 struct isl_device isl_dev;
526 int cmd_parser_version;
527
528 uint32_t eu_total;
529 uint32_t subslice_total;
530
531 uint8_t uuid[VK_UUID_SIZE];
532
533 struct wsi_device wsi_device;
534 };
535
536 struct anv_instance {
537 VK_LOADER_DATA _loader_data;
538
539 VkAllocationCallbacks alloc;
540
541 uint32_t apiVersion;
542 int physicalDeviceCount;
543 struct anv_physical_device physicalDevice;
544 };
545
546 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
547 void anv_finish_wsi(struct anv_physical_device *physical_device);
548
549 struct anv_queue {
550 VK_LOADER_DATA _loader_data;
551
552 struct anv_device * device;
553
554 struct anv_state_pool * pool;
555 };
556
557 struct anv_pipeline_cache {
558 struct anv_device * device;
559 pthread_mutex_t mutex;
560
561 struct hash_table * cache;
562 };
563
564 struct anv_pipeline_bind_map;
565
566 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
567 struct anv_device *device,
568 bool cache_enabled);
569 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
570
571 struct anv_shader_bin *
572 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
573 const void *key, uint32_t key_size);
574 struct anv_shader_bin *
575 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
576 const void *key_data, uint32_t key_size,
577 const void *kernel_data, uint32_t kernel_size,
578 const struct brw_stage_prog_data *prog_data,
579 uint32_t prog_data_size,
580 const struct anv_pipeline_bind_map *bind_map);
581
582 struct anv_device {
583 VK_LOADER_DATA _loader_data;
584
585 VkAllocationCallbacks alloc;
586
587 struct anv_instance * instance;
588 uint32_t chipset_id;
589 struct gen_device_info info;
590 struct isl_device isl_dev;
591 int context_id;
592 int fd;
593 bool can_chain_batches;
594 bool robust_buffer_access;
595
596 struct anv_bo_pool batch_bo_pool;
597
598 struct anv_block_pool dynamic_state_block_pool;
599 struct anv_state_pool dynamic_state_pool;
600
601 struct anv_block_pool instruction_block_pool;
602 struct anv_state_pool instruction_state_pool;
603
604 struct anv_block_pool surface_state_block_pool;
605 struct anv_state_pool surface_state_pool;
606
607 struct anv_bo workaround_bo;
608
609 struct anv_pipeline_cache blorp_shader_cache;
610 struct blorp_context blorp;
611
612 struct anv_state border_colors;
613
614 struct anv_queue queue;
615
616 struct anv_scratch_pool scratch_pool;
617
618 uint32_t default_mocs;
619
620 pthread_mutex_t mutex;
621 pthread_cond_t queue_submit;
622 };
623
624 void anv_device_init_blorp(struct anv_device *device);
625 void anv_device_finish_blorp(struct anv_device *device);
626
627 VkResult anv_device_execbuf(struct anv_device *device,
628 struct drm_i915_gem_execbuffer2 *execbuf,
629 struct anv_bo **execbuf_bos);
630
631 void* anv_gem_mmap(struct anv_device *device,
632 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
633 void anv_gem_munmap(void *p, uint64_t size);
634 uint32_t anv_gem_create(struct anv_device *device, size_t size);
635 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
636 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
637 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
638 int anv_gem_execbuffer(struct anv_device *device,
639 struct drm_i915_gem_execbuffer2 *execbuf);
640 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
641 uint32_t stride, uint32_t tiling);
642 int anv_gem_create_context(struct anv_device *device);
643 int anv_gem_destroy_context(struct anv_device *device, int context);
644 int anv_gem_get_param(int fd, uint32_t param);
645 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
646 int anv_gem_get_aperture(int fd, uint64_t *size);
647 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
648 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
649 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
650 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
651 uint32_t read_domains, uint32_t write_domain);
652
653 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
654
655 struct anv_reloc_list {
656 size_t num_relocs;
657 size_t array_length;
658 struct drm_i915_gem_relocation_entry * relocs;
659 struct anv_bo ** reloc_bos;
660 };
661
662 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
663 const VkAllocationCallbacks *alloc);
664 void anv_reloc_list_finish(struct anv_reloc_list *list,
665 const VkAllocationCallbacks *alloc);
666
667 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
668 const VkAllocationCallbacks *alloc,
669 uint32_t offset, struct anv_bo *target_bo,
670 uint32_t delta);
671
672 struct anv_batch_bo {
673 /* Link in the anv_cmd_buffer.owned_batch_bos list */
674 struct list_head link;
675
676 struct anv_bo bo;
677
678 /* Bytes actually consumed in this batch BO */
679 size_t length;
680
681 struct anv_reloc_list relocs;
682 };
683
684 struct anv_batch {
685 const VkAllocationCallbacks * alloc;
686
687 void * start;
688 void * end;
689 void * next;
690
691 struct anv_reloc_list * relocs;
692
693 /* This callback is called (with the associated user data) in the event
694 * that the batch runs out of space.
695 */
696 VkResult (*extend_cb)(struct anv_batch *, void *);
697 void * user_data;
698 };
699
700 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
701 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
702 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
703 void *location, struct anv_bo *bo, uint32_t offset);
704 VkResult anv_device_submit_simple_batch(struct anv_device *device,
705 struct anv_batch *batch);
706
707 struct anv_address {
708 struct anv_bo *bo;
709 uint32_t offset;
710 };
711
712 static inline uint64_t
713 _anv_combine_address(struct anv_batch *batch, void *location,
714 const struct anv_address address, uint32_t delta)
715 {
716 if (address.bo == NULL) {
717 return address.offset + delta;
718 } else {
719 assert(batch->start <= location && location < batch->end);
720
721 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
722 }
723 }
724
725 #define __gen_address_type struct anv_address
726 #define __gen_user_data struct anv_batch
727 #define __gen_combine_address _anv_combine_address
728
729 /* Wrapper macros needed to work around preprocessor argument issues. In
730 * particular, arguments don't get pre-evaluated if they are concatenated.
731 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
732 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
733 * We can work around this easily enough with these helpers.
734 */
735 #define __anv_cmd_length(cmd) cmd ## _length
736 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
737 #define __anv_cmd_header(cmd) cmd ## _header
738 #define __anv_cmd_pack(cmd) cmd ## _pack
739 #define __anv_reg_num(reg) reg ## _num
740
741 #define anv_pack_struct(dst, struc, ...) do { \
742 struct struc __template = { \
743 __VA_ARGS__ \
744 }; \
745 __anv_cmd_pack(struc)(NULL, dst, &__template); \
746 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
747 } while (0)
748
749 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
750 void *__dst = anv_batch_emit_dwords(batch, n); \
751 struct cmd __template = { \
752 __anv_cmd_header(cmd), \
753 .DWordLength = n - __anv_cmd_length_bias(cmd), \
754 __VA_ARGS__ \
755 }; \
756 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
757 __dst; \
758 })
759
760 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
761 do { \
762 uint32_t *dw; \
763 \
764 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
765 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
766 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
767 dw[i] = (dwords0)[i] | (dwords1)[i]; \
768 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
769 } while (0)
770
771 #define anv_batch_emit(batch, cmd, name) \
772 for (struct cmd name = { __anv_cmd_header(cmd) }, \
773 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
774 __builtin_expect(_dst != NULL, 1); \
775 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
776 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
777 _dst = NULL; \
778 }))
779
780 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
781 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
782 struct anv_state __state = \
783 anv_state_pool_alloc((pool), __size, align); \
784 struct cmd __template = { \
785 __VA_ARGS__ \
786 }; \
787 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
788 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
789 if (!(pool)->block_pool->device->info.has_llc) \
790 anv_state_clflush(__state); \
791 __state; \
792 })
793
794 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
795 .GraphicsDataTypeGFDT = 0, \
796 .LLCCacheabilityControlLLCCC = 0, \
797 .L3CacheabilityControlL3CC = 1, \
798 }
799
800 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
801 .LLCeLLCCacheabilityControlLLCCC = 0, \
802 .L3CacheabilityControlL3CC = 1, \
803 }
804
805 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
806 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
807 .TargetCache = L3DefertoPATforLLCeLLCselection, \
808 .AgeforQUADLRU = 0 \
809 }
810
811 /* Skylake: MOCS is now an index into an array of 62 different caching
812 * configurations programmed by the kernel.
813 */
814
815 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
816 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
817 .IndextoMOCSTables = 2 \
818 }
819
820 #define GEN9_MOCS_PTE { \
821 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
822 .IndextoMOCSTables = 1 \
823 }
824
825 struct anv_device_memory {
826 struct anv_bo bo;
827 uint32_t type_index;
828 VkDeviceSize map_size;
829 void * map;
830 };
831
832 /**
833 * Header for Vertex URB Entry (VUE)
834 */
835 struct anv_vue_header {
836 uint32_t Reserved;
837 uint32_t RTAIndex; /* RenderTargetArrayIndex */
838 uint32_t ViewportIndex;
839 float PointWidth;
840 };
841
842 struct anv_descriptor_set_binding_layout {
843 #ifndef NDEBUG
844 /* The type of the descriptors in this binding */
845 VkDescriptorType type;
846 #endif
847
848 /* Number of array elements in this binding */
849 uint16_t array_size;
850
851 /* Index into the flattend descriptor set */
852 uint16_t descriptor_index;
853
854 /* Index into the dynamic state array for a dynamic buffer */
855 int16_t dynamic_offset_index;
856
857 /* Index into the descriptor set buffer views */
858 int16_t buffer_index;
859
860 struct {
861 /* Index into the binding table for the associated surface */
862 int16_t surface_index;
863
864 /* Index into the sampler table for the associated sampler */
865 int16_t sampler_index;
866
867 /* Index into the image table for the associated image */
868 int16_t image_index;
869 } stage[MESA_SHADER_STAGES];
870
871 /* Immutable samplers (or NULL if no immutable samplers) */
872 struct anv_sampler **immutable_samplers;
873 };
874
875 struct anv_descriptor_set_layout {
876 /* Number of bindings in this descriptor set */
877 uint16_t binding_count;
878
879 /* Total size of the descriptor set with room for all array entries */
880 uint16_t size;
881
882 /* Shader stages affected by this descriptor set */
883 uint16_t shader_stages;
884
885 /* Number of buffers in this descriptor set */
886 uint16_t buffer_count;
887
888 /* Number of dynamic offsets used by this descriptor set */
889 uint16_t dynamic_offset_count;
890
891 /* Bindings in this descriptor set */
892 struct anv_descriptor_set_binding_layout binding[0];
893 };
894
895 struct anv_descriptor {
896 VkDescriptorType type;
897
898 union {
899 struct {
900 struct anv_image_view *image_view;
901 struct anv_sampler *sampler;
902 };
903
904 struct anv_buffer_view *buffer_view;
905 };
906 };
907
908 struct anv_descriptor_set {
909 const struct anv_descriptor_set_layout *layout;
910 uint32_t size;
911 uint32_t buffer_count;
912 struct anv_buffer_view *buffer_views;
913 struct anv_descriptor descriptors[0];
914 };
915
916 struct anv_descriptor_pool {
917 uint32_t size;
918 uint32_t next;
919 uint32_t free_list;
920
921 struct anv_state_stream surface_state_stream;
922 void *surface_state_free_list;
923
924 char data[0];
925 };
926
927 VkResult
928 anv_descriptor_set_create(struct anv_device *device,
929 struct anv_descriptor_pool *pool,
930 const struct anv_descriptor_set_layout *layout,
931 struct anv_descriptor_set **out_set);
932
933 void
934 anv_descriptor_set_destroy(struct anv_device *device,
935 struct anv_descriptor_pool *pool,
936 struct anv_descriptor_set *set);
937
938 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
939
940 struct anv_pipeline_binding {
941 /* The descriptor set this surface corresponds to. The special value of
942 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
943 * to a color attachment and not a regular descriptor.
944 */
945 uint8_t set;
946
947 /* Binding in the descriptor set */
948 uint8_t binding;
949
950 /* Index in the binding */
951 uint8_t index;
952
953 /* Input attachment index (relative to the subpass) */
954 uint8_t input_attachment_index;
955 };
956
957 struct anv_pipeline_layout {
958 struct {
959 struct anv_descriptor_set_layout *layout;
960 uint32_t dynamic_offset_start;
961 } set[MAX_SETS];
962
963 uint32_t num_sets;
964
965 struct {
966 bool has_dynamic_offsets;
967 } stage[MESA_SHADER_STAGES];
968
969 unsigned char sha1[20];
970 };
971
972 struct anv_buffer {
973 struct anv_device * device;
974 VkDeviceSize size;
975
976 VkBufferUsageFlags usage;
977
978 /* Set when bound */
979 struct anv_bo * bo;
980 VkDeviceSize offset;
981 };
982
983 enum anv_cmd_dirty_bits {
984 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
985 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
986 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
987 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
988 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
989 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
990 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
991 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
992 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
993 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
994 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
995 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
996 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
997 };
998 typedef uint32_t anv_cmd_dirty_mask_t;
999
1000 enum anv_pipe_bits {
1001 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
1002 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
1003 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
1004 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
1005 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
1006 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
1007 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
1008 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1009 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
1010 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
1011 ANV_PIPE_CS_STALL_BIT = (1 << 20),
1012
1013 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1014 * a flush has happened but not a CS stall. The next time we do any sort
1015 * of invalidation we need to insert a CS stall at that time. Otherwise,
1016 * we would have to CS stall on every flush which could be bad.
1017 */
1018 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
1019 };
1020
1021 #define ANV_PIPE_FLUSH_BITS ( \
1022 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1023 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1024 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1025
1026 #define ANV_PIPE_STALL_BITS ( \
1027 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1028 ANV_PIPE_DEPTH_STALL_BIT | \
1029 ANV_PIPE_CS_STALL_BIT)
1030
1031 #define ANV_PIPE_INVALIDATE_BITS ( \
1032 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1033 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1034 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1035 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1036 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1037 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1038
1039 struct anv_vertex_binding {
1040 struct anv_buffer * buffer;
1041 VkDeviceSize offset;
1042 };
1043
1044 struct anv_push_constants {
1045 /* Current allocated size of this push constants data structure.
1046 * Because a decent chunk of it may not be used (images on SKL, for
1047 * instance), we won't actually allocate the entire structure up-front.
1048 */
1049 uint32_t size;
1050
1051 /* Push constant data provided by the client through vkPushConstants */
1052 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1053
1054 /* Our hardware only provides zero-based vertex and instance id so, in
1055 * order to satisfy the vulkan requirements, we may have to push one or
1056 * both of these into the shader.
1057 */
1058 uint32_t base_vertex;
1059 uint32_t base_instance;
1060
1061 /* Offsets and ranges for dynamically bound buffers */
1062 struct {
1063 uint32_t offset;
1064 uint32_t range;
1065 } dynamic[MAX_DYNAMIC_BUFFERS];
1066
1067 /* Image data for image_load_store on pre-SKL */
1068 struct brw_image_param images[MAX_IMAGES];
1069 };
1070
1071 struct anv_dynamic_state {
1072 struct {
1073 uint32_t count;
1074 VkViewport viewports[MAX_VIEWPORTS];
1075 } viewport;
1076
1077 struct {
1078 uint32_t count;
1079 VkRect2D scissors[MAX_SCISSORS];
1080 } scissor;
1081
1082 float line_width;
1083
1084 struct {
1085 float bias;
1086 float clamp;
1087 float slope;
1088 } depth_bias;
1089
1090 float blend_constants[4];
1091
1092 struct {
1093 float min;
1094 float max;
1095 } depth_bounds;
1096
1097 struct {
1098 uint32_t front;
1099 uint32_t back;
1100 } stencil_compare_mask;
1101
1102 struct {
1103 uint32_t front;
1104 uint32_t back;
1105 } stencil_write_mask;
1106
1107 struct {
1108 uint32_t front;
1109 uint32_t back;
1110 } stencil_reference;
1111 };
1112
1113 extern const struct anv_dynamic_state default_dynamic_state;
1114
1115 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1116 const struct anv_dynamic_state *src,
1117 uint32_t copy_mask);
1118
1119 /**
1120 * Attachment state when recording a renderpass instance.
1121 *
1122 * The clear value is valid only if there exists a pending clear.
1123 */
1124 struct anv_attachment_state {
1125 enum isl_aux_usage aux_usage;
1126 enum isl_aux_usage input_aux_usage;
1127 struct anv_state color_rt_state;
1128 struct anv_state input_att_state;
1129
1130 VkImageLayout current_layout;
1131 VkImageAspectFlags pending_clear_aspects;
1132 bool fast_clear;
1133 VkClearValue clear_value;
1134 bool clear_color_is_zero_one;
1135 };
1136
1137 /** State required while building cmd buffer */
1138 struct anv_cmd_state {
1139 /* PIPELINE_SELECT.PipelineSelection */
1140 uint32_t current_pipeline;
1141 const struct gen_l3_config * current_l3_config;
1142 uint32_t vb_dirty;
1143 anv_cmd_dirty_mask_t dirty;
1144 anv_cmd_dirty_mask_t compute_dirty;
1145 enum anv_pipe_bits pending_pipe_bits;
1146 uint32_t num_workgroups_offset;
1147 struct anv_bo *num_workgroups_bo;
1148 VkShaderStageFlags descriptors_dirty;
1149 VkShaderStageFlags push_constants_dirty;
1150 uint32_t scratch_size;
1151 struct anv_pipeline * pipeline;
1152 struct anv_pipeline * compute_pipeline;
1153 struct anv_framebuffer * framebuffer;
1154 struct anv_render_pass * pass;
1155 struct anv_subpass * subpass;
1156 VkRect2D render_area;
1157 uint32_t restart_index;
1158 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1159 struct anv_descriptor_set * descriptors[MAX_SETS];
1160 VkShaderStageFlags push_constant_stages;
1161 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1162 struct anv_state binding_tables[MESA_SHADER_STAGES];
1163 struct anv_state samplers[MESA_SHADER_STAGES];
1164 struct anv_dynamic_state dynamic;
1165 bool need_query_wa;
1166
1167 /**
1168 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1169 * valid only when recording a render pass instance.
1170 */
1171 struct anv_attachment_state * attachments;
1172
1173 /**
1174 * Surface states for color render targets. These are stored in a single
1175 * flat array. For depth-stencil attachments, the surface state is simply
1176 * left blank.
1177 */
1178 struct anv_state render_pass_states;
1179
1180 /**
1181 * A null surface state of the right size to match the framebuffer. This
1182 * is one of the states in render_pass_states.
1183 */
1184 struct anv_state null_surface_state;
1185
1186 struct {
1187 struct anv_buffer * index_buffer;
1188 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1189 uint32_t index_offset;
1190 } gen7;
1191 };
1192
1193 struct anv_cmd_pool {
1194 VkAllocationCallbacks alloc;
1195 struct list_head cmd_buffers;
1196 };
1197
1198 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1199
1200 enum anv_cmd_buffer_exec_mode {
1201 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1202 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1203 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1204 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1205 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1206 };
1207
1208 struct anv_cmd_buffer {
1209 VK_LOADER_DATA _loader_data;
1210
1211 struct anv_device * device;
1212
1213 struct anv_cmd_pool * pool;
1214 struct list_head pool_link;
1215
1216 struct anv_batch batch;
1217
1218 /* Fields required for the actual chain of anv_batch_bo's.
1219 *
1220 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1221 */
1222 struct list_head batch_bos;
1223 enum anv_cmd_buffer_exec_mode exec_mode;
1224
1225 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1226 * referenced by this command buffer
1227 *
1228 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1229 */
1230 struct u_vector seen_bbos;
1231
1232 /* A vector of int32_t's for every block of binding tables.
1233 *
1234 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1235 */
1236 struct u_vector bt_blocks;
1237 uint32_t bt_next;
1238
1239 struct anv_reloc_list surface_relocs;
1240 /** Last seen surface state block pool center bo offset */
1241 uint32_t last_ss_pool_center;
1242
1243 /* Serial for tracking buffer completion */
1244 uint32_t serial;
1245
1246 /* Stream objects for storing temporary data */
1247 struct anv_state_stream surface_state_stream;
1248 struct anv_state_stream dynamic_state_stream;
1249
1250 VkCommandBufferUsageFlags usage_flags;
1251 VkCommandBufferLevel level;
1252
1253 struct anv_cmd_state state;
1254 };
1255
1256 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1257 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1258 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1259 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1260 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1261 struct anv_cmd_buffer *secondary);
1262 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1263 VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
1264 struct anv_cmd_buffer *cmd_buffer);
1265
1266 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
1267
1268 VkResult
1269 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
1270 gl_shader_stage stage, uint32_t size);
1271 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1272 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1273 (offsetof(struct anv_push_constants, field) + \
1274 sizeof(cmd_buffer->state.push_constants[0]->field)))
1275
1276 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1277 const void *data, uint32_t size, uint32_t alignment);
1278 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1279 uint32_t *a, uint32_t *b,
1280 uint32_t dwords, uint32_t alignment);
1281
1282 struct anv_address
1283 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1284 struct anv_state
1285 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1286 uint32_t entries, uint32_t *state_offset);
1287 struct anv_state
1288 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1289 struct anv_state
1290 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1291 uint32_t size, uint32_t alignment);
1292
1293 VkResult
1294 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1295
1296 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1297 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1298 bool depth_clamp_enable);
1299 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1300
1301 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1302 struct anv_render_pass *pass,
1303 struct anv_framebuffer *framebuffer,
1304 const VkClearValue *clear_values);
1305
1306 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1307
1308 struct anv_state
1309 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1310 gl_shader_stage stage);
1311 struct anv_state
1312 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1313
1314 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1315 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1316
1317 const struct anv_image_view *
1318 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1319
1320 struct anv_state
1321 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
1322 uint32_t num_entries,
1323 uint32_t *state_offset);
1324
1325 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1326
1327 enum anv_fence_state {
1328 /** Indicates that this is a new (or newly reset fence) */
1329 ANV_FENCE_STATE_RESET,
1330
1331 /** Indicates that this fence has been submitted to the GPU but is still
1332 * (as far as we know) in use by the GPU.
1333 */
1334 ANV_FENCE_STATE_SUBMITTED,
1335
1336 ANV_FENCE_STATE_SIGNALED,
1337 };
1338
1339 struct anv_fence {
1340 struct anv_bo bo;
1341 struct drm_i915_gem_execbuffer2 execbuf;
1342 struct drm_i915_gem_exec_object2 exec2_objects[1];
1343 enum anv_fence_state state;
1344 };
1345
1346 struct anv_event {
1347 uint64_t semaphore;
1348 struct anv_state state;
1349 };
1350
1351 struct anv_shader_module {
1352 unsigned char sha1[20];
1353 uint32_t size;
1354 char data[0];
1355 };
1356
1357 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1358 struct anv_shader_module *module,
1359 const char *entrypoint,
1360 const struct anv_pipeline_layout *pipeline_layout,
1361 const VkSpecializationInfo *spec_info);
1362
1363 static inline gl_shader_stage
1364 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1365 {
1366 assert(__builtin_popcount(vk_stage) == 1);
1367 return ffs(vk_stage) - 1;
1368 }
1369
1370 static inline VkShaderStageFlagBits
1371 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1372 {
1373 return (1 << mesa_stage);
1374 }
1375
1376 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1377
1378 #define anv_foreach_stage(stage, stage_bits) \
1379 for (gl_shader_stage stage, \
1380 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1381 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1382 __tmp &= ~(1 << (stage)))
1383
1384 struct anv_pipeline_bind_map {
1385 uint32_t surface_count;
1386 uint32_t sampler_count;
1387 uint32_t image_count;
1388
1389 struct anv_pipeline_binding * surface_to_descriptor;
1390 struct anv_pipeline_binding * sampler_to_descriptor;
1391 };
1392
1393 struct anv_shader_bin_key {
1394 uint32_t size;
1395 uint8_t data[0];
1396 };
1397
1398 struct anv_shader_bin {
1399 uint32_t ref_cnt;
1400
1401 const struct anv_shader_bin_key *key;
1402
1403 struct anv_state kernel;
1404 uint32_t kernel_size;
1405
1406 const struct brw_stage_prog_data *prog_data;
1407 uint32_t prog_data_size;
1408
1409 struct anv_pipeline_bind_map bind_map;
1410
1411 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1412 };
1413
1414 struct anv_shader_bin *
1415 anv_shader_bin_create(struct anv_device *device,
1416 const void *key, uint32_t key_size,
1417 const void *kernel, uint32_t kernel_size,
1418 const struct brw_stage_prog_data *prog_data,
1419 uint32_t prog_data_size, const void *prog_data_param,
1420 const struct anv_pipeline_bind_map *bind_map);
1421
1422 void
1423 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
1424
1425 static inline void
1426 anv_shader_bin_ref(struct anv_shader_bin *shader)
1427 {
1428 assert(shader->ref_cnt >= 1);
1429 __sync_fetch_and_add(&shader->ref_cnt, 1);
1430 }
1431
1432 static inline void
1433 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
1434 {
1435 assert(shader->ref_cnt >= 1);
1436 if (__sync_fetch_and_add(&shader->ref_cnt, -1) == 1)
1437 anv_shader_bin_destroy(device, shader);
1438 }
1439
1440 struct anv_pipeline {
1441 struct anv_device * device;
1442 struct anv_batch batch;
1443 uint32_t batch_data[512];
1444 struct anv_reloc_list batch_relocs;
1445 uint32_t dynamic_state_mask;
1446 struct anv_dynamic_state dynamic_state;
1447
1448 struct anv_pipeline_layout * layout;
1449
1450 bool needs_data_cache;
1451
1452 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
1453
1454 struct {
1455 const struct gen_l3_config * l3_config;
1456 uint32_t total_size;
1457 } urb;
1458
1459 VkShaderStageFlags active_stages;
1460 struct anv_state blend_state;
1461
1462 uint32_t vb_used;
1463 uint32_t binding_stride[MAX_VBS];
1464 bool instancing_enable[MAX_VBS];
1465 bool primitive_restart;
1466 uint32_t topology;
1467
1468 uint32_t cs_right_mask;
1469
1470 bool depth_clamp_enable;
1471
1472 struct {
1473 uint32_t sf[7];
1474 uint32_t depth_stencil_state[3];
1475 } gen7;
1476
1477 struct {
1478 uint32_t sf[4];
1479 uint32_t raster[5];
1480 uint32_t wm_depth_stencil[3];
1481 } gen8;
1482
1483 struct {
1484 uint32_t wm_depth_stencil[4];
1485 } gen9;
1486
1487 uint32_t interface_descriptor_data[8];
1488 };
1489
1490 static inline bool
1491 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
1492 gl_shader_stage stage)
1493 {
1494 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
1495 }
1496
1497 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1498 static inline const struct brw_##prefix##_prog_data * \
1499 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
1500 { \
1501 if (anv_pipeline_has_stage(pipeline, stage)) { \
1502 return (const struct brw_##prefix##_prog_data *) \
1503 pipeline->shaders[stage]->prog_data; \
1504 } else { \
1505 return NULL; \
1506 } \
1507 }
1508
1509 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
1510 ANV_DECL_GET_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
1511 ANV_DECL_GET_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
1512 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
1513 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
1514 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
1515
1516 static inline const struct brw_vue_prog_data *
1517 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline)
1518 {
1519 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
1520 return &get_gs_prog_data(pipeline)->base;
1521 else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1522 return &get_tes_prog_data(pipeline)->base;
1523 else
1524 return &get_vs_prog_data(pipeline)->base;
1525 }
1526
1527 VkResult
1528 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1529 struct anv_pipeline_cache *cache,
1530 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1531 const VkAllocationCallbacks *alloc);
1532
1533 VkResult
1534 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1535 struct anv_pipeline_cache *cache,
1536 const VkComputePipelineCreateInfo *info,
1537 struct anv_shader_module *module,
1538 const char *entrypoint,
1539 const VkSpecializationInfo *spec_info);
1540
1541 struct anv_format {
1542 enum isl_format isl_format:16;
1543 struct isl_swizzle swizzle;
1544 };
1545
1546 struct anv_format
1547 anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
1548 VkImageAspectFlags aspect, VkImageTiling tiling);
1549
1550 static inline enum isl_format
1551 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
1552 VkImageAspectFlags aspect, VkImageTiling tiling)
1553 {
1554 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1555 }
1556
1557 static inline struct isl_swizzle
1558 anv_swizzle_for_render(struct isl_swizzle swizzle)
1559 {
1560 /* Sometimes the swizzle will have alpha map to one. We do this to fake
1561 * RGB as RGBA for texturing
1562 */
1563 assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
1564 swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
1565
1566 /* But it doesn't matter what we render to that channel */
1567 swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
1568
1569 return swizzle;
1570 }
1571
1572 void
1573 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
1574
1575 /**
1576 * Subsurface of an anv_image.
1577 */
1578 struct anv_surface {
1579 /** Valid only if isl_surf::size > 0. */
1580 struct isl_surf isl;
1581
1582 /**
1583 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1584 */
1585 uint32_t offset;
1586 };
1587
1588 struct anv_image {
1589 VkImageType type;
1590 /* The original VkFormat provided by the client. This may not match any
1591 * of the actual surface formats.
1592 */
1593 VkFormat vk_format;
1594 VkImageAspectFlags aspects;
1595 VkExtent3D extent;
1596 uint32_t levels;
1597 uint32_t array_size;
1598 uint32_t samples; /**< VkImageCreateInfo::samples */
1599 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1600 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1601
1602 VkDeviceSize size;
1603 uint32_t alignment;
1604
1605 /* Set when bound */
1606 struct anv_bo *bo;
1607 VkDeviceSize offset;
1608
1609 /**
1610 * Image subsurfaces
1611 *
1612 * For each foo, anv_image::foo_surface is valid if and only if
1613 * anv_image::aspects has a foo aspect.
1614 *
1615 * The hardware requires that the depth buffer and stencil buffer be
1616 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1617 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1618 * allocate the depth and stencil buffers as separate surfaces in the same
1619 * bo.
1620 */
1621 union {
1622 struct anv_surface color_surface;
1623
1624 struct {
1625 struct anv_surface depth_surface;
1626 struct anv_surface stencil_surface;
1627 };
1628 };
1629
1630 /**
1631 * For color images, this is the aux usage for this image when not used as a
1632 * color attachment.
1633 *
1634 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the image
1635 * has a HiZ buffer.
1636 */
1637 enum isl_aux_usage aux_usage;
1638
1639 struct anv_surface aux_surface;
1640 };
1641
1642 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
1643 static inline bool
1644 anv_can_sample_with_hiz(uint8_t gen, uint32_t samples)
1645 {
1646 return gen >= 8 && samples == 1;
1647 }
1648
1649 void
1650 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1651 const struct anv_image *image,
1652 enum blorp_hiz_op op);
1653
1654 static inline uint32_t
1655 anv_get_layerCount(const struct anv_image *image,
1656 const VkImageSubresourceRange *range)
1657 {
1658 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1659 image->array_size - range->baseArrayLayer : range->layerCount;
1660 }
1661
1662 static inline uint32_t
1663 anv_get_levelCount(const struct anv_image *image,
1664 const VkImageSubresourceRange *range)
1665 {
1666 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1667 image->levels - range->baseMipLevel : range->levelCount;
1668 }
1669
1670
1671 struct anv_image_view {
1672 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1673 struct anv_bo *bo;
1674 uint32_t offset; /**< Offset into bo. */
1675
1676 struct isl_view isl;
1677
1678 VkImageAspectFlags aspect_mask;
1679 VkFormat vk_format;
1680 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1681
1682 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1683 struct anv_state sampler_surface_state;
1684
1685 /** RENDER_SURFACE_STATE when using image as a storage image. */
1686 struct anv_state storage_surface_state;
1687
1688 struct brw_image_param storage_image_param;
1689 };
1690
1691 struct anv_image_create_info {
1692 const VkImageCreateInfo *vk_info;
1693
1694 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1695 isl_tiling_flags_t isl_tiling_flags;
1696
1697 uint32_t stride;
1698 };
1699
1700 VkResult anv_image_create(VkDevice _device,
1701 const struct anv_image_create_info *info,
1702 const VkAllocationCallbacks* alloc,
1703 VkImage *pImage);
1704
1705 const struct anv_surface *
1706 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
1707 VkImageAspectFlags aspect_mask);
1708
1709 struct anv_buffer_view {
1710 enum isl_format format; /**< VkBufferViewCreateInfo::format */
1711 struct anv_bo *bo;
1712 uint32_t offset; /**< Offset into bo. */
1713 uint64_t range; /**< VkBufferViewCreateInfo::range */
1714
1715 struct anv_state surface_state;
1716 struct anv_state storage_surface_state;
1717
1718 struct brw_image_param storage_image_param;
1719 };
1720
1721 enum isl_format
1722 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1723
1724 static inline struct VkExtent3D
1725 anv_sanitize_image_extent(const VkImageType imageType,
1726 const struct VkExtent3D imageExtent)
1727 {
1728 switch (imageType) {
1729 case VK_IMAGE_TYPE_1D:
1730 return (VkExtent3D) { imageExtent.width, 1, 1 };
1731 case VK_IMAGE_TYPE_2D:
1732 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1733 case VK_IMAGE_TYPE_3D:
1734 return imageExtent;
1735 default:
1736 unreachable("invalid image type");
1737 }
1738 }
1739
1740 static inline struct VkOffset3D
1741 anv_sanitize_image_offset(const VkImageType imageType,
1742 const struct VkOffset3D imageOffset)
1743 {
1744 switch (imageType) {
1745 case VK_IMAGE_TYPE_1D:
1746 return (VkOffset3D) { imageOffset.x, 0, 0 };
1747 case VK_IMAGE_TYPE_2D:
1748 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1749 case VK_IMAGE_TYPE_3D:
1750 return imageOffset;
1751 default:
1752 unreachable("invalid image type");
1753 }
1754 }
1755
1756
1757 void anv_fill_buffer_surface_state(struct anv_device *device,
1758 struct anv_state state,
1759 enum isl_format format,
1760 uint32_t offset, uint32_t range,
1761 uint32_t stride);
1762
1763 void anv_image_view_fill_image_param(struct anv_device *device,
1764 struct anv_image_view *view,
1765 struct brw_image_param *param);
1766 void anv_buffer_view_fill_image_param(struct anv_device *device,
1767 struct anv_buffer_view *view,
1768 struct brw_image_param *param);
1769
1770 struct anv_sampler {
1771 uint32_t state[4];
1772 };
1773
1774 struct anv_framebuffer {
1775 uint32_t width;
1776 uint32_t height;
1777 uint32_t layers;
1778
1779 uint32_t attachment_count;
1780 struct anv_image_view * attachments[0];
1781 };
1782
1783 struct anv_subpass {
1784 uint32_t input_count;
1785 uint32_t * input_attachments;
1786 uint32_t color_count;
1787 uint32_t * color_attachments;
1788 uint32_t * resolve_attachments;
1789
1790 /* TODO: Consider storing the depth/stencil VkAttachmentReference
1791 * instead of its two structure members (below) individually.
1792 */
1793 uint32_t depth_stencil_attachment;
1794 VkImageLayout depth_stencil_layout;
1795
1796 /** Subpass has a depth/stencil self-dependency */
1797 bool has_ds_self_dep;
1798
1799 /** Subpass has at least one resolve attachment */
1800 bool has_resolve;
1801 };
1802
1803 enum anv_subpass_usage {
1804 ANV_SUBPASS_USAGE_DRAW = (1 << 0),
1805 ANV_SUBPASS_USAGE_INPUT = (1 << 1),
1806 ANV_SUBPASS_USAGE_RESOLVE_SRC = (1 << 2),
1807 ANV_SUBPASS_USAGE_RESOLVE_DST = (1 << 3),
1808 };
1809
1810 struct anv_render_pass_attachment {
1811 /* TODO: Consider using VkAttachmentDescription instead of storing each of
1812 * its members individually.
1813 */
1814 VkFormat format;
1815 uint32_t samples;
1816 VkImageUsageFlags usage;
1817 VkAttachmentLoadOp load_op;
1818 VkAttachmentStoreOp store_op;
1819 VkAttachmentLoadOp stencil_load_op;
1820 VkImageLayout initial_layout;
1821 VkImageLayout final_layout;
1822
1823 /* An array, indexed by subpass id, of how the attachment will be used. */
1824 enum anv_subpass_usage * subpass_usage;
1825
1826 /* The subpass id in which the attachment will be used last. */
1827 uint32_t last_subpass_idx;
1828 };
1829
1830 struct anv_render_pass {
1831 uint32_t attachment_count;
1832 uint32_t subpass_count;
1833 uint32_t * subpass_attachments;
1834 enum anv_subpass_usage * subpass_usages;
1835 struct anv_render_pass_attachment * attachments;
1836 struct anv_subpass subpasses[0];
1837 };
1838
1839 struct anv_query_pool_slot {
1840 uint64_t begin;
1841 uint64_t end;
1842 uint64_t available;
1843 };
1844
1845 struct anv_query_pool {
1846 VkQueryType type;
1847 uint32_t slots;
1848 struct anv_bo bo;
1849 };
1850
1851 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
1852 const char *name);
1853
1854 void anv_dump_image_to_ppm(struct anv_device *device,
1855 struct anv_image *image, unsigned miplevel,
1856 unsigned array_layer, VkImageAspectFlagBits aspect,
1857 const char *filename);
1858
1859 enum anv_dump_action {
1860 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
1861 };
1862
1863 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
1864 void anv_dump_finish(void);
1865
1866 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
1867 struct anv_framebuffer *fb);
1868
1869 struct anv_common {
1870 VkStructureType sType;
1871 struct anv_common *pNext;
1872 };
1873
1874 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1875 \
1876 static inline struct __anv_type * \
1877 __anv_type ## _from_handle(__VkType _handle) \
1878 { \
1879 return (struct __anv_type *) _handle; \
1880 } \
1881 \
1882 static inline __VkType \
1883 __anv_type ## _to_handle(struct __anv_type *_obj) \
1884 { \
1885 return (__VkType) _obj; \
1886 }
1887
1888 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1889 \
1890 static inline struct __anv_type * \
1891 __anv_type ## _from_handle(__VkType _handle) \
1892 { \
1893 return (struct __anv_type *)(uintptr_t) _handle; \
1894 } \
1895 \
1896 static inline __VkType \
1897 __anv_type ## _to_handle(struct __anv_type *_obj) \
1898 { \
1899 return (__VkType)(uintptr_t) _obj; \
1900 }
1901
1902 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1903 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1904
1905 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1906 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1907 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1908 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1909 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1910
1911 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1912 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1913 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1914 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1915 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1916 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1917 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1918 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1919 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1920 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1921 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1922 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1923 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1924 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1925 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1926 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1927 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1928 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1929 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1930
1931 /* Gen-specific function declarations */
1932 #ifdef genX
1933 # include "anv_genX.h"
1934 #else
1935 # define genX(x) gen7_##x
1936 # include "anv_genX.h"
1937 # undef genX
1938 # define genX(x) gen75_##x
1939 # include "anv_genX.h"
1940 # undef genX
1941 # define genX(x) gen8_##x
1942 # include "anv_genX.h"
1943 # undef genX
1944 # define genX(x) gen9_##x
1945 # include "anv_genX.h"
1946 # undef genX
1947 #endif
1948
1949 #ifdef __cplusplus
1950 }
1951 #endif
1952
1953 #endif /* ANV_PRIVATE_H */