anv: move buffer_view declaration
[mesa.git] / src / intel / vulkan / anv_private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include <i915_drm.h>
34
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
40 #else
41 #define VG(x)
42 #endif
43
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
51
52 /* Pre-declarations needed for WSI entrypoints */
53 struct wl_surface;
54 struct wl_display;
55 typedef struct xcb_connection_t xcb_connection_t;
56 typedef uint32_t xcb_visualid_t;
57 typedef uint32_t xcb_window_t;
58
59 struct gen_l3_config;
60
61 #include <vulkan/vulkan.h>
62 #include <vulkan/vulkan_intel.h>
63 #include <vulkan/vk_icd.h>
64
65 #include "anv_entrypoints.h"
66 #include "brw_context.h"
67 #include "isl/isl.h"
68
69 #include "wsi_common.h"
70
71 /* Allowing different clear colors requires us to perform a depth resolve at
72 * the end of certain render passes. This is because while slow clears store
73 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
74 * See the PRMs for examples describing when additional resolves would be
75 * necessary. To enable fast clears without requiring extra resolves, we set
76 * the clear value to a globally-defined one. We could allow different values
77 * if the user doesn't expect coherent data during or after a render passes
78 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
79 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
80 * 1.0f seems to be the only value used. The only application that doesn't set
81 * this value does so through the usage of an seemingly uninitialized clear
82 * value.
83 */
84 #define ANV_HZ_FC_VAL 1.0f
85
86 #define MAX_VBS 31
87 #define MAX_SETS 8
88 #define MAX_RTS 8
89 #define MAX_VIEWPORTS 16
90 #define MAX_SCISSORS 16
91 #define MAX_PUSH_CONSTANTS_SIZE 128
92 #define MAX_DYNAMIC_BUFFERS 16
93 #define MAX_IMAGES 8
94
95 #define ANV_SVGS_VB_INDEX MAX_VBS
96 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
97
98 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
99
100 static inline uint32_t
101 align_down_npot_u32(uint32_t v, uint32_t a)
102 {
103 return v - (v % a);
104 }
105
106 static inline uint32_t
107 align_u32(uint32_t v, uint32_t a)
108 {
109 assert(a != 0 && a == (a & -a));
110 return (v + a - 1) & ~(a - 1);
111 }
112
113 static inline uint64_t
114 align_u64(uint64_t v, uint64_t a)
115 {
116 assert(a != 0 && a == (a & -a));
117 return (v + a - 1) & ~(a - 1);
118 }
119
120 static inline int32_t
121 align_i32(int32_t v, int32_t a)
122 {
123 assert(a != 0 && a == (a & -a));
124 return (v + a - 1) & ~(a - 1);
125 }
126
127 /** Alignment must be a power of 2. */
128 static inline bool
129 anv_is_aligned(uintmax_t n, uintmax_t a)
130 {
131 assert(a == (a & -a));
132 return (n & (a - 1)) == 0;
133 }
134
135 static inline uint32_t
136 anv_minify(uint32_t n, uint32_t levels)
137 {
138 if (unlikely(n == 0))
139 return 0;
140 else
141 return MAX2(n >> levels, 1);
142 }
143
144 static inline float
145 anv_clamp_f(float f, float min, float max)
146 {
147 assert(min < max);
148
149 if (f > max)
150 return max;
151 else if (f < min)
152 return min;
153 else
154 return f;
155 }
156
157 static inline bool
158 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
159 {
160 if (*inout_mask & clear_mask) {
161 *inout_mask &= ~clear_mask;
162 return true;
163 } else {
164 return false;
165 }
166 }
167
168 static inline union isl_color_value
169 vk_to_isl_color(VkClearColorValue color)
170 {
171 return (union isl_color_value) {
172 .u32 = {
173 color.uint32[0],
174 color.uint32[1],
175 color.uint32[2],
176 color.uint32[3],
177 },
178 };
179 }
180
181 #define for_each_bit(b, dword) \
182 for (uint32_t __dword = (dword); \
183 (b) = __builtin_ffs(__dword) - 1, __dword; \
184 __dword &= ~(1 << (b)))
185
186 #define typed_memcpy(dest, src, count) ({ \
187 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
188 memcpy((dest), (src), (count) * sizeof(*(src))); \
189 })
190
191 /* Whenever we generate an error, pass it through this function. Useful for
192 * debugging, where we can break on it. Only call at error site, not when
193 * propagating errors. Might be useful to plug in a stack trace here.
194 */
195
196 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
197
198 #ifdef DEBUG
199 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
200 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
201 #define anv_debug(format, ...) fprintf(stderr, "debug: " format, ##__VA_ARGS__)
202 #else
203 #define vk_error(error) error
204 #define vk_errorf(error, format, ...) error
205 #define anv_debug(format, ...)
206 #endif
207
208 /**
209 * Warn on ignored extension structs.
210 *
211 * The Vulkan spec requires us to ignore unsupported or unknown structs in
212 * a pNext chain. In debug mode, emitting warnings for ignored structs may
213 * help us discover structs that we should not have ignored.
214 *
215 *
216 * From the Vulkan 1.0.38 spec:
217 *
218 * Any component of the implementation (the loader, any enabled layers,
219 * and drivers) must skip over, without processing (other than reading the
220 * sType and pNext members) any chained structures with sType values not
221 * defined by extensions supported by that component.
222 */
223 #define anv_debug_ignored_stype(sType) \
224 anv_debug("debug: %s: ignored VkStructureType %u\n", __func__, (sType))
225
226 void __anv_finishme(const char *file, int line, const char *format, ...)
227 anv_printflike(3, 4);
228 void anv_loge(const char *format, ...) anv_printflike(1, 2);
229 void anv_loge_v(const char *format, va_list va);
230
231 /**
232 * Print a FINISHME message, including its source location.
233 */
234 #define anv_finishme(format, ...) \
235 do { \
236 static bool reported = false; \
237 if (!reported) { \
238 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
239 reported = true; \
240 } \
241 } while (0)
242
243 /* A non-fatal assert. Useful for debugging. */
244 #ifdef DEBUG
245 #define anv_assert(x) ({ \
246 if (unlikely(!(x))) \
247 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
248 })
249 #else
250 #define anv_assert(x)
251 #endif
252
253 /**
254 * If a block of code is annotated with anv_validate, then the block runs only
255 * in debug builds.
256 */
257 #ifdef DEBUG
258 #define anv_validate if (1)
259 #else
260 #define anv_validate if (0)
261 #endif
262
263 #define stub_return(v) \
264 do { \
265 anv_finishme("stub %s", __func__); \
266 return (v); \
267 } while (0)
268
269 #define stub() \
270 do { \
271 anv_finishme("stub %s", __func__); \
272 return; \
273 } while (0)
274
275 /**
276 * A dynamically growable, circular buffer. Elements are added at head and
277 * removed from tail. head and tail are free-running uint32_t indices and we
278 * only compute the modulo with size when accessing the array. This way,
279 * number of bytes in the queue is always head - tail, even in case of
280 * wraparound.
281 */
282
283 struct anv_bo {
284 uint32_t gem_handle;
285
286 /* Index into the current validation list. This is used by the
287 * validation list building alrogithm to track which buffers are already
288 * in the validation list so that we can ensure uniqueness.
289 */
290 uint32_t index;
291
292 /* Last known offset. This value is provided by the kernel when we
293 * execbuf and is used as the presumed offset for the next bunch of
294 * relocations.
295 */
296 uint64_t offset;
297
298 uint64_t size;
299 void *map;
300
301 /* We need to set the WRITE flag on winsys bos so GEM will know we're
302 * writing to them and synchronize uses on other rings (eg if the display
303 * server uses the blitter ring).
304 */
305 bool is_winsys_bo;
306 };
307
308 static inline void
309 anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size)
310 {
311 bo->gem_handle = gem_handle;
312 bo->index = 0;
313 bo->offset = -1;
314 bo->size = size;
315 bo->map = NULL;
316 bo->is_winsys_bo = false;
317 }
318
319 /* Represents a lock-free linked list of "free" things. This is used by
320 * both the block pool and the state pools. Unfortunately, in order to
321 * solve the ABA problem, we can't use a single uint32_t head.
322 */
323 union anv_free_list {
324 struct {
325 int32_t offset;
326
327 /* A simple count that is incremented every time the head changes. */
328 uint32_t count;
329 };
330 uint64_t u64;
331 };
332
333 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
334
335 struct anv_block_state {
336 union {
337 struct {
338 uint32_t next;
339 uint32_t end;
340 };
341 uint64_t u64;
342 };
343 };
344
345 struct anv_block_pool {
346 struct anv_device *device;
347
348 struct anv_bo bo;
349
350 /* The offset from the start of the bo to the "center" of the block
351 * pool. Pointers to allocated blocks are given by
352 * bo.map + center_bo_offset + offsets.
353 */
354 uint32_t center_bo_offset;
355
356 /* Current memory map of the block pool. This pointer may or may not
357 * point to the actual beginning of the block pool memory. If
358 * anv_block_pool_alloc_back has ever been called, then this pointer
359 * will point to the "center" position of the buffer and all offsets
360 * (negative or positive) given out by the block pool alloc functions
361 * will be valid relative to this pointer.
362 *
363 * In particular, map == bo.map + center_offset
364 */
365 void *map;
366 int fd;
367
368 /**
369 * Array of mmaps and gem handles owned by the block pool, reclaimed when
370 * the block pool is destroyed.
371 */
372 struct u_vector mmap_cleanups;
373
374 uint32_t block_size;
375
376 union anv_free_list free_list;
377 struct anv_block_state state;
378
379 union anv_free_list back_free_list;
380 struct anv_block_state back_state;
381 };
382
383 /* Block pools are backed by a fixed-size 2GB memfd */
384 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
385
386 /* The center of the block pool is also the middle of the memfd. This may
387 * change in the future if we decide differently for some reason.
388 */
389 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
390
391 static inline uint32_t
392 anv_block_pool_size(struct anv_block_pool *pool)
393 {
394 return pool->state.end + pool->back_state.end;
395 }
396
397 struct anv_state {
398 int32_t offset;
399 uint32_t alloc_size;
400 void *map;
401 };
402
403 struct anv_fixed_size_state_pool {
404 size_t state_size;
405 union anv_free_list free_list;
406 struct anv_block_state block;
407 };
408
409 #define ANV_MIN_STATE_SIZE_LOG2 6
410 #define ANV_MAX_STATE_SIZE_LOG2 20
411
412 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
413
414 struct anv_state_pool {
415 struct anv_block_pool *block_pool;
416 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
417 };
418
419 struct anv_state_stream_block;
420
421 struct anv_state_stream {
422 struct anv_block_pool *block_pool;
423
424 /* The current working block */
425 struct anv_state_stream_block *block;
426
427 /* Offset at which the current block starts */
428 uint32_t start;
429 /* Offset at which to allocate the next state */
430 uint32_t next;
431 /* Offset at which the current block ends */
432 uint32_t end;
433 };
434
435 #define CACHELINE_SIZE 64
436 #define CACHELINE_MASK 63
437
438 static inline void
439 anv_clflush_range(void *start, size_t size)
440 {
441 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
442 void *end = start + size;
443
444 while (p < end) {
445 __builtin_ia32_clflush(p);
446 p += CACHELINE_SIZE;
447 }
448 }
449
450 static inline void
451 anv_flush_range(void *start, size_t size)
452 {
453 __builtin_ia32_mfence();
454 anv_clflush_range(start, size);
455 }
456
457 static inline void
458 anv_invalidate_range(void *start, size_t size)
459 {
460 anv_clflush_range(start, size);
461 __builtin_ia32_mfence();
462 }
463
464 VkResult anv_block_pool_init(struct anv_block_pool *pool,
465 struct anv_device *device, uint32_t block_size);
466 void anv_block_pool_finish(struct anv_block_pool *pool);
467 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
468 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
469 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
470 void anv_state_pool_init(struct anv_state_pool *pool,
471 struct anv_block_pool *block_pool);
472 void anv_state_pool_finish(struct anv_state_pool *pool);
473 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
474 size_t state_size, size_t alignment);
475 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
476 void anv_state_stream_init(struct anv_state_stream *stream,
477 struct anv_block_pool *block_pool);
478 void anv_state_stream_finish(struct anv_state_stream *stream);
479 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
480 uint32_t size, uint32_t alignment);
481
482 /**
483 * Implements a pool of re-usable BOs. The interface is identical to that
484 * of block_pool except that each block is its own BO.
485 */
486 struct anv_bo_pool {
487 struct anv_device *device;
488
489 void *free_list[16];
490 };
491
492 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
493 void anv_bo_pool_finish(struct anv_bo_pool *pool);
494 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
495 uint32_t size);
496 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
497
498 struct anv_scratch_bo {
499 bool exists;
500 struct anv_bo bo;
501 };
502
503 struct anv_scratch_pool {
504 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
505 struct anv_scratch_bo bos[16][MESA_SHADER_STAGES];
506 };
507
508 void anv_scratch_pool_init(struct anv_device *device,
509 struct anv_scratch_pool *pool);
510 void anv_scratch_pool_finish(struct anv_device *device,
511 struct anv_scratch_pool *pool);
512 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
513 struct anv_scratch_pool *pool,
514 gl_shader_stage stage,
515 unsigned per_thread_scratch);
516
517 struct anv_physical_device {
518 VK_LOADER_DATA _loader_data;
519
520 struct anv_instance * instance;
521 uint32_t chipset_id;
522 char path[20];
523 const char * name;
524 struct gen_device_info info;
525 uint64_t aperture_size;
526 struct brw_compiler * compiler;
527 struct isl_device isl_dev;
528 int cmd_parser_version;
529
530 uint32_t eu_total;
531 uint32_t subslice_total;
532
533 uint8_t uuid[VK_UUID_SIZE];
534
535 struct wsi_device wsi_device;
536 int local_fd;
537 };
538
539 struct anv_instance {
540 VK_LOADER_DATA _loader_data;
541
542 VkAllocationCallbacks alloc;
543
544 uint32_t apiVersion;
545 int physicalDeviceCount;
546 struct anv_physical_device physicalDevice;
547 };
548
549 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
550 void anv_finish_wsi(struct anv_physical_device *physical_device);
551
552 struct anv_queue {
553 VK_LOADER_DATA _loader_data;
554
555 struct anv_device * device;
556
557 struct anv_state_pool * pool;
558 };
559
560 struct anv_pipeline_cache {
561 struct anv_device * device;
562 pthread_mutex_t mutex;
563
564 struct hash_table * cache;
565 };
566
567 struct anv_pipeline_bind_map;
568
569 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
570 struct anv_device *device,
571 bool cache_enabled);
572 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
573
574 struct anv_shader_bin *
575 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
576 const void *key, uint32_t key_size);
577 struct anv_shader_bin *
578 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
579 const void *key_data, uint32_t key_size,
580 const void *kernel_data, uint32_t kernel_size,
581 const struct brw_stage_prog_data *prog_data,
582 uint32_t prog_data_size,
583 const struct anv_pipeline_bind_map *bind_map);
584
585 struct anv_device {
586 VK_LOADER_DATA _loader_data;
587
588 VkAllocationCallbacks alloc;
589
590 struct anv_instance * instance;
591 uint32_t chipset_id;
592 struct gen_device_info info;
593 struct isl_device isl_dev;
594 int context_id;
595 int fd;
596 bool can_chain_batches;
597 bool robust_buffer_access;
598
599 struct anv_bo_pool batch_bo_pool;
600
601 struct anv_block_pool dynamic_state_block_pool;
602 struct anv_state_pool dynamic_state_pool;
603
604 struct anv_block_pool instruction_block_pool;
605 struct anv_state_pool instruction_state_pool;
606
607 struct anv_block_pool surface_state_block_pool;
608 struct anv_state_pool surface_state_pool;
609
610 struct anv_bo workaround_bo;
611
612 struct anv_pipeline_cache blorp_shader_cache;
613 struct blorp_context blorp;
614
615 struct anv_state border_colors;
616
617 struct anv_queue queue;
618
619 struct anv_scratch_pool scratch_pool;
620
621 uint32_t default_mocs;
622
623 pthread_mutex_t mutex;
624 pthread_cond_t queue_submit;
625 };
626
627 static void inline
628 anv_state_flush(struct anv_device *device, struct anv_state state)
629 {
630 if (device->info.has_llc)
631 return;
632
633 anv_flush_range(state.map, state.alloc_size);
634 }
635
636 void anv_device_init_blorp(struct anv_device *device);
637 void anv_device_finish_blorp(struct anv_device *device);
638
639 VkResult anv_device_execbuf(struct anv_device *device,
640 struct drm_i915_gem_execbuffer2 *execbuf,
641 struct anv_bo **execbuf_bos);
642
643 void* anv_gem_mmap(struct anv_device *device,
644 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
645 void anv_gem_munmap(void *p, uint64_t size);
646 uint32_t anv_gem_create(struct anv_device *device, size_t size);
647 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
648 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
649 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
650 int anv_gem_execbuffer(struct anv_device *device,
651 struct drm_i915_gem_execbuffer2 *execbuf);
652 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
653 uint32_t stride, uint32_t tiling);
654 int anv_gem_create_context(struct anv_device *device);
655 int anv_gem_destroy_context(struct anv_device *device, int context);
656 int anv_gem_get_param(int fd, uint32_t param);
657 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
658 int anv_gem_get_aperture(int fd, uint64_t *size);
659 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
660 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
661 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
662 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
663 uint32_t read_domains, uint32_t write_domain);
664
665 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
666
667 struct anv_reloc_list {
668 size_t num_relocs;
669 size_t array_length;
670 struct drm_i915_gem_relocation_entry * relocs;
671 struct anv_bo ** reloc_bos;
672 };
673
674 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
675 const VkAllocationCallbacks *alloc);
676 void anv_reloc_list_finish(struct anv_reloc_list *list,
677 const VkAllocationCallbacks *alloc);
678
679 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
680 const VkAllocationCallbacks *alloc,
681 uint32_t offset, struct anv_bo *target_bo,
682 uint32_t delta);
683
684 struct anv_batch_bo {
685 /* Link in the anv_cmd_buffer.owned_batch_bos list */
686 struct list_head link;
687
688 struct anv_bo bo;
689
690 /* Bytes actually consumed in this batch BO */
691 size_t length;
692
693 struct anv_reloc_list relocs;
694 };
695
696 struct anv_batch {
697 const VkAllocationCallbacks * alloc;
698
699 void * start;
700 void * end;
701 void * next;
702
703 struct anv_reloc_list * relocs;
704
705 /* This callback is called (with the associated user data) in the event
706 * that the batch runs out of space.
707 */
708 VkResult (*extend_cb)(struct anv_batch *, void *);
709 void * user_data;
710 };
711
712 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
713 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
714 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
715 void *location, struct anv_bo *bo, uint32_t offset);
716 VkResult anv_device_submit_simple_batch(struct anv_device *device,
717 struct anv_batch *batch);
718
719 struct anv_address {
720 struct anv_bo *bo;
721 uint32_t offset;
722 };
723
724 static inline uint64_t
725 _anv_combine_address(struct anv_batch *batch, void *location,
726 const struct anv_address address, uint32_t delta)
727 {
728 if (address.bo == NULL) {
729 return address.offset + delta;
730 } else {
731 assert(batch->start <= location && location < batch->end);
732
733 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
734 }
735 }
736
737 #define __gen_address_type struct anv_address
738 #define __gen_user_data struct anv_batch
739 #define __gen_combine_address _anv_combine_address
740
741 /* Wrapper macros needed to work around preprocessor argument issues. In
742 * particular, arguments don't get pre-evaluated if they are concatenated.
743 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
744 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
745 * We can work around this easily enough with these helpers.
746 */
747 #define __anv_cmd_length(cmd) cmd ## _length
748 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
749 #define __anv_cmd_header(cmd) cmd ## _header
750 #define __anv_cmd_pack(cmd) cmd ## _pack
751 #define __anv_reg_num(reg) reg ## _num
752
753 #define anv_pack_struct(dst, struc, ...) do { \
754 struct struc __template = { \
755 __VA_ARGS__ \
756 }; \
757 __anv_cmd_pack(struc)(NULL, dst, &__template); \
758 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
759 } while (0)
760
761 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
762 void *__dst = anv_batch_emit_dwords(batch, n); \
763 struct cmd __template = { \
764 __anv_cmd_header(cmd), \
765 .DWordLength = n - __anv_cmd_length_bias(cmd), \
766 __VA_ARGS__ \
767 }; \
768 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
769 __dst; \
770 })
771
772 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
773 do { \
774 uint32_t *dw; \
775 \
776 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
777 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
778 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
779 dw[i] = (dwords0)[i] | (dwords1)[i]; \
780 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
781 } while (0)
782
783 #define anv_batch_emit(batch, cmd, name) \
784 for (struct cmd name = { __anv_cmd_header(cmd) }, \
785 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
786 __builtin_expect(_dst != NULL, 1); \
787 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
788 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
789 _dst = NULL; \
790 }))
791
792 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
793 .GraphicsDataTypeGFDT = 0, \
794 .LLCCacheabilityControlLLCCC = 0, \
795 .L3CacheabilityControlL3CC = 1, \
796 }
797
798 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
799 .LLCeLLCCacheabilityControlLLCCC = 0, \
800 .L3CacheabilityControlL3CC = 1, \
801 }
802
803 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
804 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
805 .TargetCache = L3DefertoPATforLLCeLLCselection, \
806 .AgeforQUADLRU = 0 \
807 }
808
809 /* Skylake: MOCS is now an index into an array of 62 different caching
810 * configurations programmed by the kernel.
811 */
812
813 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
814 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
815 .IndextoMOCSTables = 2 \
816 }
817
818 #define GEN9_MOCS_PTE { \
819 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
820 .IndextoMOCSTables = 1 \
821 }
822
823 struct anv_device_memory {
824 struct anv_bo bo;
825 uint32_t type_index;
826 VkDeviceSize map_size;
827 void * map;
828 };
829
830 /**
831 * Header for Vertex URB Entry (VUE)
832 */
833 struct anv_vue_header {
834 uint32_t Reserved;
835 uint32_t RTAIndex; /* RenderTargetArrayIndex */
836 uint32_t ViewportIndex;
837 float PointWidth;
838 };
839
840 struct anv_descriptor_set_binding_layout {
841 #ifndef NDEBUG
842 /* The type of the descriptors in this binding */
843 VkDescriptorType type;
844 #endif
845
846 /* Number of array elements in this binding */
847 uint16_t array_size;
848
849 /* Index into the flattend descriptor set */
850 uint16_t descriptor_index;
851
852 /* Index into the dynamic state array for a dynamic buffer */
853 int16_t dynamic_offset_index;
854
855 /* Index into the descriptor set buffer views */
856 int16_t buffer_index;
857
858 struct {
859 /* Index into the binding table for the associated surface */
860 int16_t surface_index;
861
862 /* Index into the sampler table for the associated sampler */
863 int16_t sampler_index;
864
865 /* Index into the image table for the associated image */
866 int16_t image_index;
867 } stage[MESA_SHADER_STAGES];
868
869 /* Immutable samplers (or NULL if no immutable samplers) */
870 struct anv_sampler **immutable_samplers;
871 };
872
873 struct anv_descriptor_set_layout {
874 /* Number of bindings in this descriptor set */
875 uint16_t binding_count;
876
877 /* Total size of the descriptor set with room for all array entries */
878 uint16_t size;
879
880 /* Shader stages affected by this descriptor set */
881 uint16_t shader_stages;
882
883 /* Number of buffers in this descriptor set */
884 uint16_t buffer_count;
885
886 /* Number of dynamic offsets used by this descriptor set */
887 uint16_t dynamic_offset_count;
888
889 /* Bindings in this descriptor set */
890 struct anv_descriptor_set_binding_layout binding[0];
891 };
892
893 struct anv_descriptor {
894 VkDescriptorType type;
895
896 union {
897 struct {
898 struct anv_image_view *image_view;
899 struct anv_sampler *sampler;
900 };
901
902 struct anv_buffer_view *buffer_view;
903 };
904 };
905
906 struct anv_descriptor_set {
907 const struct anv_descriptor_set_layout *layout;
908 uint32_t size;
909 uint32_t buffer_count;
910 struct anv_buffer_view *buffer_views;
911 struct anv_descriptor descriptors[0];
912 };
913
914 struct anv_buffer_view {
915 enum isl_format format; /**< VkBufferViewCreateInfo::format */
916 struct anv_bo *bo;
917 uint32_t offset; /**< Offset into bo. */
918 uint64_t range; /**< VkBufferViewCreateInfo::range */
919
920 struct anv_state surface_state;
921 struct anv_state storage_surface_state;
922 struct anv_state writeonly_storage_surface_state;
923
924 struct brw_image_param storage_image_param;
925 };
926
927 struct anv_descriptor_pool {
928 uint32_t size;
929 uint32_t next;
930 uint32_t free_list;
931
932 struct anv_state_stream surface_state_stream;
933 void *surface_state_free_list;
934
935 char data[0];
936 };
937
938 VkResult
939 anv_descriptor_set_create(struct anv_device *device,
940 struct anv_descriptor_pool *pool,
941 const struct anv_descriptor_set_layout *layout,
942 struct anv_descriptor_set **out_set);
943
944 void
945 anv_descriptor_set_destroy(struct anv_device *device,
946 struct anv_descriptor_pool *pool,
947 struct anv_descriptor_set *set);
948
949 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
950
951 struct anv_pipeline_binding {
952 /* The descriptor set this surface corresponds to. The special value of
953 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
954 * to a color attachment and not a regular descriptor.
955 */
956 uint8_t set;
957
958 /* Binding in the descriptor set */
959 uint8_t binding;
960
961 /* Index in the binding */
962 uint8_t index;
963
964 /* Input attachment index (relative to the subpass) */
965 uint8_t input_attachment_index;
966
967 /* For a storage image, whether it is write-only */
968 bool write_only;
969 };
970
971 struct anv_pipeline_layout {
972 struct {
973 struct anv_descriptor_set_layout *layout;
974 uint32_t dynamic_offset_start;
975 } set[MAX_SETS];
976
977 uint32_t num_sets;
978
979 struct {
980 bool has_dynamic_offsets;
981 } stage[MESA_SHADER_STAGES];
982
983 unsigned char sha1[20];
984 };
985
986 struct anv_buffer {
987 struct anv_device * device;
988 VkDeviceSize size;
989
990 VkBufferUsageFlags usage;
991
992 /* Set when bound */
993 struct anv_bo * bo;
994 VkDeviceSize offset;
995 };
996
997 enum anv_cmd_dirty_bits {
998 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
999 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1000 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1001 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1002 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1003 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1004 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1005 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1006 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1007 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
1008 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
1009 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
1010 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
1011 };
1012 typedef uint32_t anv_cmd_dirty_mask_t;
1013
1014 enum anv_pipe_bits {
1015 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
1016 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
1017 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
1018 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
1019 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
1020 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
1021 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
1022 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1023 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
1024 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
1025 ANV_PIPE_CS_STALL_BIT = (1 << 20),
1026
1027 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1028 * a flush has happened but not a CS stall. The next time we do any sort
1029 * of invalidation we need to insert a CS stall at that time. Otherwise,
1030 * we would have to CS stall on every flush which could be bad.
1031 */
1032 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
1033 };
1034
1035 #define ANV_PIPE_FLUSH_BITS ( \
1036 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1037 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1038 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1039
1040 #define ANV_PIPE_STALL_BITS ( \
1041 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1042 ANV_PIPE_DEPTH_STALL_BIT | \
1043 ANV_PIPE_CS_STALL_BIT)
1044
1045 #define ANV_PIPE_INVALIDATE_BITS ( \
1046 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1047 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1048 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1049 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1050 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1051 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1052
1053 struct anv_vertex_binding {
1054 struct anv_buffer * buffer;
1055 VkDeviceSize offset;
1056 };
1057
1058 struct anv_push_constants {
1059 /* Current allocated size of this push constants data structure.
1060 * Because a decent chunk of it may not be used (images on SKL, for
1061 * instance), we won't actually allocate the entire structure up-front.
1062 */
1063 uint32_t size;
1064
1065 /* Push constant data provided by the client through vkPushConstants */
1066 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1067
1068 /* Our hardware only provides zero-based vertex and instance id so, in
1069 * order to satisfy the vulkan requirements, we may have to push one or
1070 * both of these into the shader.
1071 */
1072 uint32_t base_vertex;
1073 uint32_t base_instance;
1074
1075 /* Offsets and ranges for dynamically bound buffers */
1076 struct {
1077 uint32_t offset;
1078 uint32_t range;
1079 } dynamic[MAX_DYNAMIC_BUFFERS];
1080
1081 /* Image data for image_load_store on pre-SKL */
1082 struct brw_image_param images[MAX_IMAGES];
1083 };
1084
1085 struct anv_dynamic_state {
1086 struct {
1087 uint32_t count;
1088 VkViewport viewports[MAX_VIEWPORTS];
1089 } viewport;
1090
1091 struct {
1092 uint32_t count;
1093 VkRect2D scissors[MAX_SCISSORS];
1094 } scissor;
1095
1096 float line_width;
1097
1098 struct {
1099 float bias;
1100 float clamp;
1101 float slope;
1102 } depth_bias;
1103
1104 float blend_constants[4];
1105
1106 struct {
1107 float min;
1108 float max;
1109 } depth_bounds;
1110
1111 struct {
1112 uint32_t front;
1113 uint32_t back;
1114 } stencil_compare_mask;
1115
1116 struct {
1117 uint32_t front;
1118 uint32_t back;
1119 } stencil_write_mask;
1120
1121 struct {
1122 uint32_t front;
1123 uint32_t back;
1124 } stencil_reference;
1125 };
1126
1127 extern const struct anv_dynamic_state default_dynamic_state;
1128
1129 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1130 const struct anv_dynamic_state *src,
1131 uint32_t copy_mask);
1132
1133 /**
1134 * Attachment state when recording a renderpass instance.
1135 *
1136 * The clear value is valid only if there exists a pending clear.
1137 */
1138 struct anv_attachment_state {
1139 enum isl_aux_usage aux_usage;
1140 enum isl_aux_usage input_aux_usage;
1141 struct anv_state color_rt_state;
1142 struct anv_state input_att_state;
1143
1144 VkImageLayout current_layout;
1145 VkImageAspectFlags pending_clear_aspects;
1146 bool fast_clear;
1147 VkClearValue clear_value;
1148 bool clear_color_is_zero_one;
1149 };
1150
1151 /** State required while building cmd buffer */
1152 struct anv_cmd_state {
1153 /* PIPELINE_SELECT.PipelineSelection */
1154 uint32_t current_pipeline;
1155 const struct gen_l3_config * current_l3_config;
1156 uint32_t vb_dirty;
1157 anv_cmd_dirty_mask_t dirty;
1158 anv_cmd_dirty_mask_t compute_dirty;
1159 enum anv_pipe_bits pending_pipe_bits;
1160 uint32_t num_workgroups_offset;
1161 struct anv_bo *num_workgroups_bo;
1162 VkShaderStageFlags descriptors_dirty;
1163 VkShaderStageFlags push_constants_dirty;
1164 uint32_t scratch_size;
1165 struct anv_pipeline * pipeline;
1166 struct anv_pipeline * compute_pipeline;
1167 struct anv_framebuffer * framebuffer;
1168 struct anv_render_pass * pass;
1169 struct anv_subpass * subpass;
1170 VkRect2D render_area;
1171 uint32_t restart_index;
1172 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1173 struct anv_descriptor_set * descriptors[MAX_SETS];
1174 VkShaderStageFlags push_constant_stages;
1175 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1176 struct anv_state binding_tables[MESA_SHADER_STAGES];
1177 struct anv_state samplers[MESA_SHADER_STAGES];
1178 struct anv_dynamic_state dynamic;
1179 bool need_query_wa;
1180
1181 /**
1182 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1183 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1184 * and before invoking the secondary in ExecuteCommands.
1185 */
1186 bool pma_fix_enabled;
1187
1188 /**
1189 * Whether or not we know for certain that HiZ is enabled for the current
1190 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1191 * enabled or not, this will be false.
1192 */
1193 bool hiz_enabled;
1194
1195 /**
1196 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1197 * valid only when recording a render pass instance.
1198 */
1199 struct anv_attachment_state * attachments;
1200
1201 /**
1202 * Surface states for color render targets. These are stored in a single
1203 * flat array. For depth-stencil attachments, the surface state is simply
1204 * left blank.
1205 */
1206 struct anv_state render_pass_states;
1207
1208 /**
1209 * A null surface state of the right size to match the framebuffer. This
1210 * is one of the states in render_pass_states.
1211 */
1212 struct anv_state null_surface_state;
1213
1214 struct {
1215 struct anv_buffer * index_buffer;
1216 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1217 uint32_t index_offset;
1218 } gen7;
1219 };
1220
1221 struct anv_cmd_pool {
1222 VkAllocationCallbacks alloc;
1223 struct list_head cmd_buffers;
1224 };
1225
1226 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1227
1228 enum anv_cmd_buffer_exec_mode {
1229 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1230 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1231 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1232 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1233 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1234 };
1235
1236 struct anv_cmd_buffer {
1237 VK_LOADER_DATA _loader_data;
1238
1239 struct anv_device * device;
1240
1241 struct anv_cmd_pool * pool;
1242 struct list_head pool_link;
1243
1244 struct anv_batch batch;
1245
1246 /* Fields required for the actual chain of anv_batch_bo's.
1247 *
1248 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1249 */
1250 struct list_head batch_bos;
1251 enum anv_cmd_buffer_exec_mode exec_mode;
1252
1253 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1254 * referenced by this command buffer
1255 *
1256 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1257 */
1258 struct u_vector seen_bbos;
1259
1260 /* A vector of int32_t's for every block of binding tables.
1261 *
1262 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1263 */
1264 struct u_vector bt_blocks;
1265 uint32_t bt_next;
1266
1267 struct anv_reloc_list surface_relocs;
1268 /** Last seen surface state block pool center bo offset */
1269 uint32_t last_ss_pool_center;
1270
1271 /* Serial for tracking buffer completion */
1272 uint32_t serial;
1273
1274 /* Stream objects for storing temporary data */
1275 struct anv_state_stream surface_state_stream;
1276 struct anv_state_stream dynamic_state_stream;
1277
1278 VkCommandBufferUsageFlags usage_flags;
1279 VkCommandBufferLevel level;
1280
1281 struct anv_cmd_state state;
1282 };
1283
1284 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1285 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1286 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1287 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1288 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1289 struct anv_cmd_buffer *secondary);
1290 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1291 VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
1292 struct anv_cmd_buffer *cmd_buffer);
1293
1294 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
1295
1296 VkResult
1297 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
1298 gl_shader_stage stage, uint32_t size);
1299 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1300 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1301 (offsetof(struct anv_push_constants, field) + \
1302 sizeof(cmd_buffer->state.push_constants[0]->field)))
1303
1304 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1305 const void *data, uint32_t size, uint32_t alignment);
1306 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1307 uint32_t *a, uint32_t *b,
1308 uint32_t dwords, uint32_t alignment);
1309
1310 struct anv_address
1311 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1312 struct anv_state
1313 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1314 uint32_t entries, uint32_t *state_offset);
1315 struct anv_state
1316 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1317 struct anv_state
1318 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1319 uint32_t size, uint32_t alignment);
1320
1321 VkResult
1322 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1323
1324 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1325 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1326 bool depth_clamp_enable);
1327 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1328
1329 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1330 struct anv_render_pass *pass,
1331 struct anv_framebuffer *framebuffer,
1332 const VkClearValue *clear_values);
1333
1334 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1335
1336 struct anv_state
1337 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1338 gl_shader_stage stage);
1339 struct anv_state
1340 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1341
1342 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1343 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1344
1345 const struct anv_image_view *
1346 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1347
1348 struct anv_state
1349 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
1350 uint32_t num_entries,
1351 uint32_t *state_offset);
1352
1353 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1354
1355 enum anv_fence_state {
1356 /** Indicates that this is a new (or newly reset fence) */
1357 ANV_FENCE_STATE_RESET,
1358
1359 /** Indicates that this fence has been submitted to the GPU but is still
1360 * (as far as we know) in use by the GPU.
1361 */
1362 ANV_FENCE_STATE_SUBMITTED,
1363
1364 ANV_FENCE_STATE_SIGNALED,
1365 };
1366
1367 struct anv_fence {
1368 struct anv_bo bo;
1369 struct drm_i915_gem_execbuffer2 execbuf;
1370 struct drm_i915_gem_exec_object2 exec2_objects[1];
1371 enum anv_fence_state state;
1372 };
1373
1374 struct anv_event {
1375 uint64_t semaphore;
1376 struct anv_state state;
1377 };
1378
1379 struct anv_shader_module {
1380 unsigned char sha1[20];
1381 uint32_t size;
1382 char data[0];
1383 };
1384
1385 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1386 struct anv_shader_module *module,
1387 const char *entrypoint,
1388 const struct anv_pipeline_layout *pipeline_layout,
1389 const VkSpecializationInfo *spec_info);
1390
1391 static inline gl_shader_stage
1392 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1393 {
1394 assert(__builtin_popcount(vk_stage) == 1);
1395 return ffs(vk_stage) - 1;
1396 }
1397
1398 static inline VkShaderStageFlagBits
1399 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1400 {
1401 return (1 << mesa_stage);
1402 }
1403
1404 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1405
1406 #define anv_foreach_stage(stage, stage_bits) \
1407 for (gl_shader_stage stage, \
1408 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1409 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1410 __tmp &= ~(1 << (stage)))
1411
1412 struct anv_pipeline_bind_map {
1413 uint32_t surface_count;
1414 uint32_t sampler_count;
1415 uint32_t image_count;
1416
1417 struct anv_pipeline_binding * surface_to_descriptor;
1418 struct anv_pipeline_binding * sampler_to_descriptor;
1419 };
1420
1421 struct anv_shader_bin_key {
1422 uint32_t size;
1423 uint8_t data[0];
1424 };
1425
1426 struct anv_shader_bin {
1427 uint32_t ref_cnt;
1428
1429 const struct anv_shader_bin_key *key;
1430
1431 struct anv_state kernel;
1432 uint32_t kernel_size;
1433
1434 const struct brw_stage_prog_data *prog_data;
1435 uint32_t prog_data_size;
1436
1437 struct anv_pipeline_bind_map bind_map;
1438
1439 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1440 };
1441
1442 struct anv_shader_bin *
1443 anv_shader_bin_create(struct anv_device *device,
1444 const void *key, uint32_t key_size,
1445 const void *kernel, uint32_t kernel_size,
1446 const struct brw_stage_prog_data *prog_data,
1447 uint32_t prog_data_size, const void *prog_data_param,
1448 const struct anv_pipeline_bind_map *bind_map);
1449
1450 void
1451 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
1452
1453 static inline void
1454 anv_shader_bin_ref(struct anv_shader_bin *shader)
1455 {
1456 assert(shader->ref_cnt >= 1);
1457 __sync_fetch_and_add(&shader->ref_cnt, 1);
1458 }
1459
1460 static inline void
1461 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
1462 {
1463 assert(shader->ref_cnt >= 1);
1464 if (__sync_fetch_and_add(&shader->ref_cnt, -1) == 1)
1465 anv_shader_bin_destroy(device, shader);
1466 }
1467
1468 struct anv_pipeline {
1469 struct anv_device * device;
1470 struct anv_batch batch;
1471 uint32_t batch_data[512];
1472 struct anv_reloc_list batch_relocs;
1473 uint32_t dynamic_state_mask;
1474 struct anv_dynamic_state dynamic_state;
1475
1476 struct anv_pipeline_layout * layout;
1477
1478 bool needs_data_cache;
1479
1480 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
1481
1482 struct {
1483 const struct gen_l3_config * l3_config;
1484 uint32_t total_size;
1485 } urb;
1486
1487 VkShaderStageFlags active_stages;
1488 struct anv_state blend_state;
1489
1490 uint32_t vb_used;
1491 uint32_t binding_stride[MAX_VBS];
1492 bool instancing_enable[MAX_VBS];
1493 bool primitive_restart;
1494 uint32_t topology;
1495
1496 uint32_t cs_right_mask;
1497
1498 bool writes_depth;
1499 bool depth_test_enable;
1500 bool writes_stencil;
1501 bool stencil_test_enable;
1502 bool depth_clamp_enable;
1503 bool kill_pixel;
1504
1505 struct {
1506 uint32_t sf[7];
1507 uint32_t depth_stencil_state[3];
1508 } gen7;
1509
1510 struct {
1511 uint32_t sf[4];
1512 uint32_t raster[5];
1513 uint32_t wm_depth_stencil[3];
1514 } gen8;
1515
1516 struct {
1517 uint32_t wm_depth_stencil[4];
1518 } gen9;
1519
1520 uint32_t interface_descriptor_data[8];
1521 };
1522
1523 static inline bool
1524 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
1525 gl_shader_stage stage)
1526 {
1527 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
1528 }
1529
1530 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1531 static inline const struct brw_##prefix##_prog_data * \
1532 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
1533 { \
1534 if (anv_pipeline_has_stage(pipeline, stage)) { \
1535 return (const struct brw_##prefix##_prog_data *) \
1536 pipeline->shaders[stage]->prog_data; \
1537 } else { \
1538 return NULL; \
1539 } \
1540 }
1541
1542 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
1543 ANV_DECL_GET_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
1544 ANV_DECL_GET_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
1545 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
1546 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
1547 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
1548
1549 static inline const struct brw_vue_prog_data *
1550 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline)
1551 {
1552 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
1553 return &get_gs_prog_data(pipeline)->base;
1554 else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1555 return &get_tes_prog_data(pipeline)->base;
1556 else
1557 return &get_vs_prog_data(pipeline)->base;
1558 }
1559
1560 VkResult
1561 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1562 struct anv_pipeline_cache *cache,
1563 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1564 const VkAllocationCallbacks *alloc);
1565
1566 VkResult
1567 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1568 struct anv_pipeline_cache *cache,
1569 const VkComputePipelineCreateInfo *info,
1570 struct anv_shader_module *module,
1571 const char *entrypoint,
1572 const VkSpecializationInfo *spec_info);
1573
1574 struct anv_format {
1575 enum isl_format isl_format:16;
1576 struct isl_swizzle swizzle;
1577 };
1578
1579 struct anv_format
1580 anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
1581 VkImageAspectFlags aspect, VkImageTiling tiling);
1582
1583 static inline enum isl_format
1584 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
1585 VkImageAspectFlags aspect, VkImageTiling tiling)
1586 {
1587 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1588 }
1589
1590 static inline struct isl_swizzle
1591 anv_swizzle_for_render(struct isl_swizzle swizzle)
1592 {
1593 /* Sometimes the swizzle will have alpha map to one. We do this to fake
1594 * RGB as RGBA for texturing
1595 */
1596 assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
1597 swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
1598
1599 /* But it doesn't matter what we render to that channel */
1600 swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
1601
1602 return swizzle;
1603 }
1604
1605 void
1606 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
1607
1608 /**
1609 * Subsurface of an anv_image.
1610 */
1611 struct anv_surface {
1612 /** Valid only if isl_surf::size > 0. */
1613 struct isl_surf isl;
1614
1615 /**
1616 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1617 */
1618 uint32_t offset;
1619 };
1620
1621 struct anv_image {
1622 VkImageType type;
1623 /* The original VkFormat provided by the client. This may not match any
1624 * of the actual surface formats.
1625 */
1626 VkFormat vk_format;
1627 VkImageAspectFlags aspects;
1628 VkExtent3D extent;
1629 uint32_t levels;
1630 uint32_t array_size;
1631 uint32_t samples; /**< VkImageCreateInfo::samples */
1632 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1633 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1634
1635 VkDeviceSize size;
1636 uint32_t alignment;
1637
1638 /* Set when bound */
1639 struct anv_bo *bo;
1640 VkDeviceSize offset;
1641
1642 /**
1643 * Image subsurfaces
1644 *
1645 * For each foo, anv_image::foo_surface is valid if and only if
1646 * anv_image::aspects has a foo aspect.
1647 *
1648 * The hardware requires that the depth buffer and stencil buffer be
1649 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1650 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1651 * allocate the depth and stencil buffers as separate surfaces in the same
1652 * bo.
1653 */
1654 union {
1655 struct anv_surface color_surface;
1656
1657 struct {
1658 struct anv_surface depth_surface;
1659 struct anv_surface stencil_surface;
1660 };
1661 };
1662
1663 /**
1664 * For color images, this is the aux usage for this image when not used as a
1665 * color attachment.
1666 *
1667 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the image
1668 * has a HiZ buffer.
1669 */
1670 enum isl_aux_usage aux_usage;
1671
1672 struct anv_surface aux_surface;
1673 };
1674
1675 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
1676 static inline bool
1677 anv_can_sample_with_hiz(uint8_t gen, uint32_t samples)
1678 {
1679 return gen >= 8 && samples == 1;
1680 }
1681
1682 void
1683 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1684 const struct anv_image *image,
1685 enum blorp_hiz_op op);
1686
1687 static inline uint32_t
1688 anv_get_layerCount(const struct anv_image *image,
1689 const VkImageSubresourceRange *range)
1690 {
1691 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1692 image->array_size - range->baseArrayLayer : range->layerCount;
1693 }
1694
1695 static inline uint32_t
1696 anv_get_levelCount(const struct anv_image *image,
1697 const VkImageSubresourceRange *range)
1698 {
1699 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1700 image->levels - range->baseMipLevel : range->levelCount;
1701 }
1702
1703
1704 struct anv_image_view {
1705 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1706 struct anv_bo *bo;
1707 uint32_t offset; /**< Offset into bo. */
1708
1709 struct isl_view isl;
1710
1711 VkImageAspectFlags aspect_mask;
1712 VkFormat vk_format;
1713 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1714
1715 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1716 struct anv_state sampler_surface_state;
1717
1718 /**
1719 * RENDER_SURFACE_STATE when using image as a storage image. Separate states
1720 * for write-only and readable, using the real format for write-only and the
1721 * lowered format for readable.
1722 */
1723 struct anv_state storage_surface_state;
1724 struct anv_state writeonly_storage_surface_state;
1725
1726 struct brw_image_param storage_image_param;
1727 };
1728
1729 struct anv_image_create_info {
1730 const VkImageCreateInfo *vk_info;
1731
1732 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1733 isl_tiling_flags_t isl_tiling_flags;
1734
1735 uint32_t stride;
1736 };
1737
1738 VkResult anv_image_create(VkDevice _device,
1739 const struct anv_image_create_info *info,
1740 const VkAllocationCallbacks* alloc,
1741 VkImage *pImage);
1742
1743 const struct anv_surface *
1744 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
1745 VkImageAspectFlags aspect_mask);
1746
1747 enum isl_format
1748 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1749
1750 static inline struct VkExtent3D
1751 anv_sanitize_image_extent(const VkImageType imageType,
1752 const struct VkExtent3D imageExtent)
1753 {
1754 switch (imageType) {
1755 case VK_IMAGE_TYPE_1D:
1756 return (VkExtent3D) { imageExtent.width, 1, 1 };
1757 case VK_IMAGE_TYPE_2D:
1758 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1759 case VK_IMAGE_TYPE_3D:
1760 return imageExtent;
1761 default:
1762 unreachable("invalid image type");
1763 }
1764 }
1765
1766 static inline struct VkOffset3D
1767 anv_sanitize_image_offset(const VkImageType imageType,
1768 const struct VkOffset3D imageOffset)
1769 {
1770 switch (imageType) {
1771 case VK_IMAGE_TYPE_1D:
1772 return (VkOffset3D) { imageOffset.x, 0, 0 };
1773 case VK_IMAGE_TYPE_2D:
1774 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1775 case VK_IMAGE_TYPE_3D:
1776 return imageOffset;
1777 default:
1778 unreachable("invalid image type");
1779 }
1780 }
1781
1782
1783 void anv_fill_buffer_surface_state(struct anv_device *device,
1784 struct anv_state state,
1785 enum isl_format format,
1786 uint32_t offset, uint32_t range,
1787 uint32_t stride);
1788
1789 void anv_image_view_fill_image_param(struct anv_device *device,
1790 struct anv_image_view *view,
1791 struct brw_image_param *param);
1792 void anv_buffer_view_fill_image_param(struct anv_device *device,
1793 struct anv_buffer_view *view,
1794 struct brw_image_param *param);
1795
1796 struct anv_sampler {
1797 uint32_t state[4];
1798 };
1799
1800 struct anv_framebuffer {
1801 uint32_t width;
1802 uint32_t height;
1803 uint32_t layers;
1804
1805 uint32_t attachment_count;
1806 struct anv_image_view * attachments[0];
1807 };
1808
1809 struct anv_subpass {
1810 uint32_t input_count;
1811 uint32_t * input_attachments;
1812 uint32_t color_count;
1813 uint32_t * color_attachments;
1814 uint32_t * resolve_attachments;
1815
1816 /* TODO: Consider storing the depth/stencil VkAttachmentReference
1817 * instead of its two structure members (below) individually.
1818 */
1819 uint32_t depth_stencil_attachment;
1820 VkImageLayout depth_stencil_layout;
1821
1822 /** Subpass has a depth/stencil self-dependency */
1823 bool has_ds_self_dep;
1824
1825 /** Subpass has at least one resolve attachment */
1826 bool has_resolve;
1827 };
1828
1829 enum anv_subpass_usage {
1830 ANV_SUBPASS_USAGE_DRAW = (1 << 0),
1831 ANV_SUBPASS_USAGE_INPUT = (1 << 1),
1832 ANV_SUBPASS_USAGE_RESOLVE_SRC = (1 << 2),
1833 ANV_SUBPASS_USAGE_RESOLVE_DST = (1 << 3),
1834 };
1835
1836 struct anv_render_pass_attachment {
1837 /* TODO: Consider using VkAttachmentDescription instead of storing each of
1838 * its members individually.
1839 */
1840 VkFormat format;
1841 uint32_t samples;
1842 VkImageUsageFlags usage;
1843 VkAttachmentLoadOp load_op;
1844 VkAttachmentStoreOp store_op;
1845 VkAttachmentLoadOp stencil_load_op;
1846 VkImageLayout initial_layout;
1847 VkImageLayout final_layout;
1848
1849 /* An array, indexed by subpass id, of how the attachment will be used. */
1850 enum anv_subpass_usage * subpass_usage;
1851
1852 /* The subpass id in which the attachment will be used last. */
1853 uint32_t last_subpass_idx;
1854 };
1855
1856 struct anv_render_pass {
1857 uint32_t attachment_count;
1858 uint32_t subpass_count;
1859 uint32_t * subpass_attachments;
1860 enum anv_subpass_usage * subpass_usages;
1861 struct anv_render_pass_attachment * attachments;
1862 struct anv_subpass subpasses[0];
1863 };
1864
1865 struct anv_query_pool_slot {
1866 uint64_t begin;
1867 uint64_t end;
1868 uint64_t available;
1869 };
1870
1871 struct anv_query_pool {
1872 VkQueryType type;
1873 uint32_t slots;
1874 struct anv_bo bo;
1875 };
1876
1877 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
1878 const char *name);
1879
1880 void anv_dump_image_to_ppm(struct anv_device *device,
1881 struct anv_image *image, unsigned miplevel,
1882 unsigned array_layer, VkImageAspectFlagBits aspect,
1883 const char *filename);
1884
1885 enum anv_dump_action {
1886 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
1887 };
1888
1889 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
1890 void anv_dump_finish(void);
1891
1892 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
1893 struct anv_framebuffer *fb);
1894
1895 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1896 \
1897 static inline struct __anv_type * \
1898 __anv_type ## _from_handle(__VkType _handle) \
1899 { \
1900 return (struct __anv_type *) _handle; \
1901 } \
1902 \
1903 static inline __VkType \
1904 __anv_type ## _to_handle(struct __anv_type *_obj) \
1905 { \
1906 return (__VkType) _obj; \
1907 }
1908
1909 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1910 \
1911 static inline struct __anv_type * \
1912 __anv_type ## _from_handle(__VkType _handle) \
1913 { \
1914 return (struct __anv_type *)(uintptr_t) _handle; \
1915 } \
1916 \
1917 static inline __VkType \
1918 __anv_type ## _to_handle(struct __anv_type *_obj) \
1919 { \
1920 return (__VkType)(uintptr_t) _obj; \
1921 }
1922
1923 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1924 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1925
1926 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1927 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1928 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1929 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1930 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1931
1932 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1933 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1934 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1935 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1936 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1937 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1938 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1939 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1940 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1941 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1942 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1943 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1944 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1945 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1946 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1947 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1948 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1949 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1950 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1951
1952 /* Gen-specific function declarations */
1953 #ifdef genX
1954 # include "anv_genX.h"
1955 #else
1956 # define genX(x) gen7_##x
1957 # include "anv_genX.h"
1958 # undef genX
1959 # define genX(x) gen75_##x
1960 # include "anv_genX.h"
1961 # undef genX
1962 # define genX(x) gen8_##x
1963 # include "anv_genX.h"
1964 # undef genX
1965 # define genX(x) gen9_##x
1966 # include "anv_genX.h"
1967 # undef genX
1968 #endif
1969
1970 #endif /* ANV_PRIVATE_H */