anv: make layout size computation helper available across compilation units
[mesa.git] / src / intel / vulkan / anv_private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include <i915_drm.h>
34
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
40 #else
41 #define VG(x)
42 #endif
43
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
51
52 /* Pre-declarations needed for WSI entrypoints */
53 struct wl_surface;
54 struct wl_display;
55 typedef struct xcb_connection_t xcb_connection_t;
56 typedef uint32_t xcb_visualid_t;
57 typedef uint32_t xcb_window_t;
58
59 struct gen_l3_config;
60
61 #include <vulkan/vulkan.h>
62 #include <vulkan/vulkan_intel.h>
63 #include <vulkan/vk_icd.h>
64
65 #include "anv_entrypoints.h"
66 #include "brw_context.h"
67 #include "isl/isl.h"
68
69 #include "wsi_common.h"
70
71 /* Allowing different clear colors requires us to perform a depth resolve at
72 * the end of certain render passes. This is because while slow clears store
73 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
74 * See the PRMs for examples describing when additional resolves would be
75 * necessary. To enable fast clears without requiring extra resolves, we set
76 * the clear value to a globally-defined one. We could allow different values
77 * if the user doesn't expect coherent data during or after a render passes
78 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
79 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
80 * 1.0f seems to be the only value used. The only application that doesn't set
81 * this value does so through the usage of an seemingly uninitialized clear
82 * value.
83 */
84 #define ANV_HZ_FC_VAL 1.0f
85
86 #define MAX_VBS 31
87 #define MAX_SETS 8
88 #define MAX_RTS 8
89 #define MAX_VIEWPORTS 16
90 #define MAX_SCISSORS 16
91 #define MAX_PUSH_CONSTANTS_SIZE 128
92 #define MAX_DYNAMIC_BUFFERS 16
93 #define MAX_IMAGES 8
94
95 #define ANV_SVGS_VB_INDEX MAX_VBS
96 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
97
98 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
99
100 static inline uint32_t
101 align_down_npot_u32(uint32_t v, uint32_t a)
102 {
103 return v - (v % a);
104 }
105
106 static inline uint32_t
107 align_u32(uint32_t v, uint32_t a)
108 {
109 assert(a != 0 && a == (a & -a));
110 return (v + a - 1) & ~(a - 1);
111 }
112
113 static inline uint64_t
114 align_u64(uint64_t v, uint64_t a)
115 {
116 assert(a != 0 && a == (a & -a));
117 return (v + a - 1) & ~(a - 1);
118 }
119
120 static inline int32_t
121 align_i32(int32_t v, int32_t a)
122 {
123 assert(a != 0 && a == (a & -a));
124 return (v + a - 1) & ~(a - 1);
125 }
126
127 /** Alignment must be a power of 2. */
128 static inline bool
129 anv_is_aligned(uintmax_t n, uintmax_t a)
130 {
131 assert(a == (a & -a));
132 return (n & (a - 1)) == 0;
133 }
134
135 static inline uint32_t
136 anv_minify(uint32_t n, uint32_t levels)
137 {
138 if (unlikely(n == 0))
139 return 0;
140 else
141 return MAX2(n >> levels, 1);
142 }
143
144 static inline float
145 anv_clamp_f(float f, float min, float max)
146 {
147 assert(min < max);
148
149 if (f > max)
150 return max;
151 else if (f < min)
152 return min;
153 else
154 return f;
155 }
156
157 static inline bool
158 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
159 {
160 if (*inout_mask & clear_mask) {
161 *inout_mask &= ~clear_mask;
162 return true;
163 } else {
164 return false;
165 }
166 }
167
168 static inline union isl_color_value
169 vk_to_isl_color(VkClearColorValue color)
170 {
171 return (union isl_color_value) {
172 .u32 = {
173 color.uint32[0],
174 color.uint32[1],
175 color.uint32[2],
176 color.uint32[3],
177 },
178 };
179 }
180
181 #define for_each_bit(b, dword) \
182 for (uint32_t __dword = (dword); \
183 (b) = __builtin_ffs(__dword) - 1, __dword; \
184 __dword &= ~(1 << (b)))
185
186 #define typed_memcpy(dest, src, count) ({ \
187 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
188 memcpy((dest), (src), (count) * sizeof(*(src))); \
189 })
190
191 /* Whenever we generate an error, pass it through this function. Useful for
192 * debugging, where we can break on it. Only call at error site, not when
193 * propagating errors. Might be useful to plug in a stack trace here.
194 */
195
196 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
197
198 #ifdef DEBUG
199 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
200 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
201 #define anv_debug(format, ...) fprintf(stderr, "debug: " format, ##__VA_ARGS__)
202 #else
203 #define vk_error(error) error
204 #define vk_errorf(error, format, ...) error
205 #define anv_debug(format, ...)
206 #endif
207
208 /**
209 * Warn on ignored extension structs.
210 *
211 * The Vulkan spec requires us to ignore unsupported or unknown structs in
212 * a pNext chain. In debug mode, emitting warnings for ignored structs may
213 * help us discover structs that we should not have ignored.
214 *
215 *
216 * From the Vulkan 1.0.38 spec:
217 *
218 * Any component of the implementation (the loader, any enabled layers,
219 * and drivers) must skip over, without processing (other than reading the
220 * sType and pNext members) any chained structures with sType values not
221 * defined by extensions supported by that component.
222 */
223 #define anv_debug_ignored_stype(sType) \
224 anv_debug("debug: %s: ignored VkStructureType %u\n", __func__, (sType))
225
226 void __anv_finishme(const char *file, int line, const char *format, ...)
227 anv_printflike(3, 4);
228 void anv_loge(const char *format, ...) anv_printflike(1, 2);
229 void anv_loge_v(const char *format, va_list va);
230
231 /**
232 * Print a FINISHME message, including its source location.
233 */
234 #define anv_finishme(format, ...) \
235 do { \
236 static bool reported = false; \
237 if (!reported) { \
238 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
239 reported = true; \
240 } \
241 } while (0)
242
243 /* A non-fatal assert. Useful for debugging. */
244 #ifdef DEBUG
245 #define anv_assert(x) ({ \
246 if (unlikely(!(x))) \
247 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
248 })
249 #else
250 #define anv_assert(x)
251 #endif
252
253 /**
254 * If a block of code is annotated with anv_validate, then the block runs only
255 * in debug builds.
256 */
257 #ifdef DEBUG
258 #define anv_validate if (1)
259 #else
260 #define anv_validate if (0)
261 #endif
262
263 #define stub_return(v) \
264 do { \
265 anv_finishme("stub %s", __func__); \
266 return (v); \
267 } while (0)
268
269 #define stub() \
270 do { \
271 anv_finishme("stub %s", __func__); \
272 return; \
273 } while (0)
274
275 /**
276 * A dynamically growable, circular buffer. Elements are added at head and
277 * removed from tail. head and tail are free-running uint32_t indices and we
278 * only compute the modulo with size when accessing the array. This way,
279 * number of bytes in the queue is always head - tail, even in case of
280 * wraparound.
281 */
282
283 struct anv_bo {
284 uint32_t gem_handle;
285
286 /* Index into the current validation list. This is used by the
287 * validation list building alrogithm to track which buffers are already
288 * in the validation list so that we can ensure uniqueness.
289 */
290 uint32_t index;
291
292 /* Last known offset. This value is provided by the kernel when we
293 * execbuf and is used as the presumed offset for the next bunch of
294 * relocations.
295 */
296 uint64_t offset;
297
298 uint64_t size;
299 void *map;
300
301 /* We need to set the WRITE flag on winsys bos so GEM will know we're
302 * writing to them and synchronize uses on other rings (eg if the display
303 * server uses the blitter ring).
304 */
305 bool is_winsys_bo;
306 };
307
308 static inline void
309 anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size)
310 {
311 bo->gem_handle = gem_handle;
312 bo->index = 0;
313 bo->offset = -1;
314 bo->size = size;
315 bo->map = NULL;
316 bo->is_winsys_bo = false;
317 }
318
319 /* Represents a lock-free linked list of "free" things. This is used by
320 * both the block pool and the state pools. Unfortunately, in order to
321 * solve the ABA problem, we can't use a single uint32_t head.
322 */
323 union anv_free_list {
324 struct {
325 int32_t offset;
326
327 /* A simple count that is incremented every time the head changes. */
328 uint32_t count;
329 };
330 uint64_t u64;
331 };
332
333 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
334
335 struct anv_block_state {
336 union {
337 struct {
338 uint32_t next;
339 uint32_t end;
340 };
341 uint64_t u64;
342 };
343 };
344
345 struct anv_block_pool {
346 struct anv_device *device;
347
348 struct anv_bo bo;
349
350 /* The offset from the start of the bo to the "center" of the block
351 * pool. Pointers to allocated blocks are given by
352 * bo.map + center_bo_offset + offsets.
353 */
354 uint32_t center_bo_offset;
355
356 /* Current memory map of the block pool. This pointer may or may not
357 * point to the actual beginning of the block pool memory. If
358 * anv_block_pool_alloc_back has ever been called, then this pointer
359 * will point to the "center" position of the buffer and all offsets
360 * (negative or positive) given out by the block pool alloc functions
361 * will be valid relative to this pointer.
362 *
363 * In particular, map == bo.map + center_offset
364 */
365 void *map;
366 int fd;
367
368 /**
369 * Array of mmaps and gem handles owned by the block pool, reclaimed when
370 * the block pool is destroyed.
371 */
372 struct u_vector mmap_cleanups;
373
374 uint32_t block_size;
375
376 union anv_free_list free_list;
377 struct anv_block_state state;
378
379 union anv_free_list back_free_list;
380 struct anv_block_state back_state;
381 };
382
383 /* Block pools are backed by a fixed-size 2GB memfd */
384 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
385
386 /* The center of the block pool is also the middle of the memfd. This may
387 * change in the future if we decide differently for some reason.
388 */
389 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
390
391 static inline uint32_t
392 anv_block_pool_size(struct anv_block_pool *pool)
393 {
394 return pool->state.end + pool->back_state.end;
395 }
396
397 struct anv_state {
398 int32_t offset;
399 uint32_t alloc_size;
400 void *map;
401 };
402
403 struct anv_fixed_size_state_pool {
404 size_t state_size;
405 union anv_free_list free_list;
406 struct anv_block_state block;
407 };
408
409 #define ANV_MIN_STATE_SIZE_LOG2 6
410 #define ANV_MAX_STATE_SIZE_LOG2 20
411
412 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
413
414 struct anv_state_pool {
415 struct anv_block_pool *block_pool;
416 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
417 };
418
419 struct anv_state_stream_block;
420
421 struct anv_state_stream {
422 struct anv_block_pool *block_pool;
423
424 /* The current working block */
425 struct anv_state_stream_block *block;
426
427 /* Offset at which the current block starts */
428 uint32_t start;
429 /* Offset at which to allocate the next state */
430 uint32_t next;
431 /* Offset at which the current block ends */
432 uint32_t end;
433 };
434
435 #define CACHELINE_SIZE 64
436 #define CACHELINE_MASK 63
437
438 static inline void
439 anv_clflush_range(void *start, size_t size)
440 {
441 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
442 void *end = start + size;
443
444 while (p < end) {
445 __builtin_ia32_clflush(p);
446 p += CACHELINE_SIZE;
447 }
448 }
449
450 static inline void
451 anv_flush_range(void *start, size_t size)
452 {
453 __builtin_ia32_mfence();
454 anv_clflush_range(start, size);
455 }
456
457 static inline void
458 anv_invalidate_range(void *start, size_t size)
459 {
460 anv_clflush_range(start, size);
461 __builtin_ia32_mfence();
462 }
463
464 VkResult anv_block_pool_init(struct anv_block_pool *pool,
465 struct anv_device *device, uint32_t block_size);
466 void anv_block_pool_finish(struct anv_block_pool *pool);
467 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
468 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
469 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
470 void anv_state_pool_init(struct anv_state_pool *pool,
471 struct anv_block_pool *block_pool);
472 void anv_state_pool_finish(struct anv_state_pool *pool);
473 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
474 size_t state_size, size_t alignment);
475 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
476 void anv_state_stream_init(struct anv_state_stream *stream,
477 struct anv_block_pool *block_pool);
478 void anv_state_stream_finish(struct anv_state_stream *stream);
479 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
480 uint32_t size, uint32_t alignment);
481
482 /**
483 * Implements a pool of re-usable BOs. The interface is identical to that
484 * of block_pool except that each block is its own BO.
485 */
486 struct anv_bo_pool {
487 struct anv_device *device;
488
489 void *free_list[16];
490 };
491
492 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
493 void anv_bo_pool_finish(struct anv_bo_pool *pool);
494 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
495 uint32_t size);
496 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
497
498 struct anv_scratch_bo {
499 bool exists;
500 struct anv_bo bo;
501 };
502
503 struct anv_scratch_pool {
504 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
505 struct anv_scratch_bo bos[16][MESA_SHADER_STAGES];
506 };
507
508 void anv_scratch_pool_init(struct anv_device *device,
509 struct anv_scratch_pool *pool);
510 void anv_scratch_pool_finish(struct anv_device *device,
511 struct anv_scratch_pool *pool);
512 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
513 struct anv_scratch_pool *pool,
514 gl_shader_stage stage,
515 unsigned per_thread_scratch);
516
517 struct anv_physical_device {
518 VK_LOADER_DATA _loader_data;
519
520 struct anv_instance * instance;
521 uint32_t chipset_id;
522 char path[20];
523 const char * name;
524 struct gen_device_info info;
525 uint64_t aperture_size;
526 struct brw_compiler * compiler;
527 struct isl_device isl_dev;
528 int cmd_parser_version;
529
530 uint32_t eu_total;
531 uint32_t subslice_total;
532
533 uint8_t uuid[VK_UUID_SIZE];
534
535 struct wsi_device wsi_device;
536 int local_fd;
537 };
538
539 struct anv_instance {
540 VK_LOADER_DATA _loader_data;
541
542 VkAllocationCallbacks alloc;
543
544 uint32_t apiVersion;
545 int physicalDeviceCount;
546 struct anv_physical_device physicalDevice;
547 };
548
549 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
550 void anv_finish_wsi(struct anv_physical_device *physical_device);
551
552 struct anv_queue {
553 VK_LOADER_DATA _loader_data;
554
555 struct anv_device * device;
556
557 struct anv_state_pool * pool;
558 };
559
560 struct anv_pipeline_cache {
561 struct anv_device * device;
562 pthread_mutex_t mutex;
563
564 struct hash_table * cache;
565 };
566
567 struct anv_pipeline_bind_map;
568
569 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
570 struct anv_device *device,
571 bool cache_enabled);
572 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
573
574 struct anv_shader_bin *
575 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
576 const void *key, uint32_t key_size);
577 struct anv_shader_bin *
578 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
579 const void *key_data, uint32_t key_size,
580 const void *kernel_data, uint32_t kernel_size,
581 const struct brw_stage_prog_data *prog_data,
582 uint32_t prog_data_size,
583 const struct anv_pipeline_bind_map *bind_map);
584
585 struct anv_device {
586 VK_LOADER_DATA _loader_data;
587
588 VkAllocationCallbacks alloc;
589
590 struct anv_instance * instance;
591 uint32_t chipset_id;
592 struct gen_device_info info;
593 struct isl_device isl_dev;
594 int context_id;
595 int fd;
596 bool can_chain_batches;
597 bool robust_buffer_access;
598
599 struct anv_bo_pool batch_bo_pool;
600
601 struct anv_block_pool dynamic_state_block_pool;
602 struct anv_state_pool dynamic_state_pool;
603
604 struct anv_block_pool instruction_block_pool;
605 struct anv_state_pool instruction_state_pool;
606
607 struct anv_block_pool surface_state_block_pool;
608 struct anv_state_pool surface_state_pool;
609
610 struct anv_bo workaround_bo;
611
612 struct anv_pipeline_cache blorp_shader_cache;
613 struct blorp_context blorp;
614
615 struct anv_state border_colors;
616
617 struct anv_queue queue;
618
619 struct anv_scratch_pool scratch_pool;
620
621 uint32_t default_mocs;
622
623 pthread_mutex_t mutex;
624 pthread_cond_t queue_submit;
625 };
626
627 static void inline
628 anv_state_flush(struct anv_device *device, struct anv_state state)
629 {
630 if (device->info.has_llc)
631 return;
632
633 anv_flush_range(state.map, state.alloc_size);
634 }
635
636 void anv_device_init_blorp(struct anv_device *device);
637 void anv_device_finish_blorp(struct anv_device *device);
638
639 VkResult anv_device_execbuf(struct anv_device *device,
640 struct drm_i915_gem_execbuffer2 *execbuf,
641 struct anv_bo **execbuf_bos);
642
643 void* anv_gem_mmap(struct anv_device *device,
644 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
645 void anv_gem_munmap(void *p, uint64_t size);
646 uint32_t anv_gem_create(struct anv_device *device, size_t size);
647 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
648 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
649 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
650 int anv_gem_execbuffer(struct anv_device *device,
651 struct drm_i915_gem_execbuffer2 *execbuf);
652 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
653 uint32_t stride, uint32_t tiling);
654 int anv_gem_create_context(struct anv_device *device);
655 int anv_gem_destroy_context(struct anv_device *device, int context);
656 int anv_gem_get_param(int fd, uint32_t param);
657 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
658 int anv_gem_get_aperture(int fd, uint64_t *size);
659 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
660 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
661 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
662 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
663 uint32_t read_domains, uint32_t write_domain);
664
665 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
666
667 struct anv_reloc_list {
668 size_t num_relocs;
669 size_t array_length;
670 struct drm_i915_gem_relocation_entry * relocs;
671 struct anv_bo ** reloc_bos;
672 };
673
674 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
675 const VkAllocationCallbacks *alloc);
676 void anv_reloc_list_finish(struct anv_reloc_list *list,
677 const VkAllocationCallbacks *alloc);
678
679 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
680 const VkAllocationCallbacks *alloc,
681 uint32_t offset, struct anv_bo *target_bo,
682 uint32_t delta);
683
684 struct anv_batch_bo {
685 /* Link in the anv_cmd_buffer.owned_batch_bos list */
686 struct list_head link;
687
688 struct anv_bo bo;
689
690 /* Bytes actually consumed in this batch BO */
691 size_t length;
692
693 struct anv_reloc_list relocs;
694 };
695
696 struct anv_batch {
697 const VkAllocationCallbacks * alloc;
698
699 void * start;
700 void * end;
701 void * next;
702
703 struct anv_reloc_list * relocs;
704
705 /* This callback is called (with the associated user data) in the event
706 * that the batch runs out of space.
707 */
708 VkResult (*extend_cb)(struct anv_batch *, void *);
709 void * user_data;
710 };
711
712 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
713 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
714 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
715 void *location, struct anv_bo *bo, uint32_t offset);
716 VkResult anv_device_submit_simple_batch(struct anv_device *device,
717 struct anv_batch *batch);
718
719 struct anv_address {
720 struct anv_bo *bo;
721 uint32_t offset;
722 };
723
724 static inline uint64_t
725 _anv_combine_address(struct anv_batch *batch, void *location,
726 const struct anv_address address, uint32_t delta)
727 {
728 if (address.bo == NULL) {
729 return address.offset + delta;
730 } else {
731 assert(batch->start <= location && location < batch->end);
732
733 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
734 }
735 }
736
737 #define __gen_address_type struct anv_address
738 #define __gen_user_data struct anv_batch
739 #define __gen_combine_address _anv_combine_address
740
741 /* Wrapper macros needed to work around preprocessor argument issues. In
742 * particular, arguments don't get pre-evaluated if they are concatenated.
743 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
744 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
745 * We can work around this easily enough with these helpers.
746 */
747 #define __anv_cmd_length(cmd) cmd ## _length
748 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
749 #define __anv_cmd_header(cmd) cmd ## _header
750 #define __anv_cmd_pack(cmd) cmd ## _pack
751 #define __anv_reg_num(reg) reg ## _num
752
753 #define anv_pack_struct(dst, struc, ...) do { \
754 struct struc __template = { \
755 __VA_ARGS__ \
756 }; \
757 __anv_cmd_pack(struc)(NULL, dst, &__template); \
758 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
759 } while (0)
760
761 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
762 void *__dst = anv_batch_emit_dwords(batch, n); \
763 struct cmd __template = { \
764 __anv_cmd_header(cmd), \
765 .DWordLength = n - __anv_cmd_length_bias(cmd), \
766 __VA_ARGS__ \
767 }; \
768 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
769 __dst; \
770 })
771
772 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
773 do { \
774 uint32_t *dw; \
775 \
776 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
777 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
778 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
779 dw[i] = (dwords0)[i] | (dwords1)[i]; \
780 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
781 } while (0)
782
783 #define anv_batch_emit(batch, cmd, name) \
784 for (struct cmd name = { __anv_cmd_header(cmd) }, \
785 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
786 __builtin_expect(_dst != NULL, 1); \
787 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
788 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
789 _dst = NULL; \
790 }))
791
792 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
793 .GraphicsDataTypeGFDT = 0, \
794 .LLCCacheabilityControlLLCCC = 0, \
795 .L3CacheabilityControlL3CC = 1, \
796 }
797
798 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
799 .LLCeLLCCacheabilityControlLLCCC = 0, \
800 .L3CacheabilityControlL3CC = 1, \
801 }
802
803 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
804 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
805 .TargetCache = L3DefertoPATforLLCeLLCselection, \
806 .AgeforQUADLRU = 0 \
807 }
808
809 /* Skylake: MOCS is now an index into an array of 62 different caching
810 * configurations programmed by the kernel.
811 */
812
813 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
814 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
815 .IndextoMOCSTables = 2 \
816 }
817
818 #define GEN9_MOCS_PTE { \
819 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
820 .IndextoMOCSTables = 1 \
821 }
822
823 struct anv_device_memory {
824 struct anv_bo bo;
825 uint32_t type_index;
826 VkDeviceSize map_size;
827 void * map;
828 };
829
830 /**
831 * Header for Vertex URB Entry (VUE)
832 */
833 struct anv_vue_header {
834 uint32_t Reserved;
835 uint32_t RTAIndex; /* RenderTargetArrayIndex */
836 uint32_t ViewportIndex;
837 float PointWidth;
838 };
839
840 struct anv_descriptor_set_binding_layout {
841 #ifndef NDEBUG
842 /* The type of the descriptors in this binding */
843 VkDescriptorType type;
844 #endif
845
846 /* Number of array elements in this binding */
847 uint16_t array_size;
848
849 /* Index into the flattend descriptor set */
850 uint16_t descriptor_index;
851
852 /* Index into the dynamic state array for a dynamic buffer */
853 int16_t dynamic_offset_index;
854
855 /* Index into the descriptor set buffer views */
856 int16_t buffer_index;
857
858 struct {
859 /* Index into the binding table for the associated surface */
860 int16_t surface_index;
861
862 /* Index into the sampler table for the associated sampler */
863 int16_t sampler_index;
864
865 /* Index into the image table for the associated image */
866 int16_t image_index;
867 } stage[MESA_SHADER_STAGES];
868
869 /* Immutable samplers (or NULL if no immutable samplers) */
870 struct anv_sampler **immutable_samplers;
871 };
872
873 struct anv_descriptor_set_layout {
874 /* Number of bindings in this descriptor set */
875 uint16_t binding_count;
876
877 /* Total size of the descriptor set with room for all array entries */
878 uint16_t size;
879
880 /* Shader stages affected by this descriptor set */
881 uint16_t shader_stages;
882
883 /* Number of buffers in this descriptor set */
884 uint16_t buffer_count;
885
886 /* Number of dynamic offsets used by this descriptor set */
887 uint16_t dynamic_offset_count;
888
889 /* Bindings in this descriptor set */
890 struct anv_descriptor_set_binding_layout binding[0];
891 };
892
893 struct anv_descriptor {
894 VkDescriptorType type;
895
896 union {
897 struct {
898 struct anv_image_view *image_view;
899 struct anv_sampler *sampler;
900 };
901
902 struct anv_buffer_view *buffer_view;
903 };
904 };
905
906 struct anv_descriptor_set {
907 const struct anv_descriptor_set_layout *layout;
908 uint32_t size;
909 uint32_t buffer_count;
910 struct anv_buffer_view *buffer_views;
911 struct anv_descriptor descriptors[0];
912 };
913
914 struct anv_buffer_view {
915 enum isl_format format; /**< VkBufferViewCreateInfo::format */
916 struct anv_bo *bo;
917 uint32_t offset; /**< Offset into bo. */
918 uint64_t range; /**< VkBufferViewCreateInfo::range */
919
920 struct anv_state surface_state;
921 struct anv_state storage_surface_state;
922 struct anv_state writeonly_storage_surface_state;
923
924 struct brw_image_param storage_image_param;
925 };
926
927 struct anv_descriptor_pool {
928 uint32_t size;
929 uint32_t next;
930 uint32_t free_list;
931
932 struct anv_state_stream surface_state_stream;
933 void *surface_state_free_list;
934
935 char data[0];
936 };
937
938 size_t
939 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout);
940
941 VkResult
942 anv_descriptor_set_create(struct anv_device *device,
943 struct anv_descriptor_pool *pool,
944 const struct anv_descriptor_set_layout *layout,
945 struct anv_descriptor_set **out_set);
946
947 void
948 anv_descriptor_set_destroy(struct anv_device *device,
949 struct anv_descriptor_pool *pool,
950 struct anv_descriptor_set *set);
951
952 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
953
954 struct anv_pipeline_binding {
955 /* The descriptor set this surface corresponds to. The special value of
956 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
957 * to a color attachment and not a regular descriptor.
958 */
959 uint8_t set;
960
961 /* Binding in the descriptor set */
962 uint8_t binding;
963
964 /* Index in the binding */
965 uint8_t index;
966
967 /* Input attachment index (relative to the subpass) */
968 uint8_t input_attachment_index;
969
970 /* For a storage image, whether it is write-only */
971 bool write_only;
972 };
973
974 struct anv_pipeline_layout {
975 struct {
976 struct anv_descriptor_set_layout *layout;
977 uint32_t dynamic_offset_start;
978 } set[MAX_SETS];
979
980 uint32_t num_sets;
981
982 struct {
983 bool has_dynamic_offsets;
984 } stage[MESA_SHADER_STAGES];
985
986 unsigned char sha1[20];
987 };
988
989 struct anv_buffer {
990 struct anv_device * device;
991 VkDeviceSize size;
992
993 VkBufferUsageFlags usage;
994
995 /* Set when bound */
996 struct anv_bo * bo;
997 VkDeviceSize offset;
998 };
999
1000 enum anv_cmd_dirty_bits {
1001 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1002 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1003 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1004 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1005 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1006 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1007 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1008 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1009 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1010 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
1011 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
1012 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
1013 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
1014 };
1015 typedef uint32_t anv_cmd_dirty_mask_t;
1016
1017 enum anv_pipe_bits {
1018 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
1019 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
1020 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
1021 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
1022 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
1023 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
1024 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
1025 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1026 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
1027 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
1028 ANV_PIPE_CS_STALL_BIT = (1 << 20),
1029
1030 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1031 * a flush has happened but not a CS stall. The next time we do any sort
1032 * of invalidation we need to insert a CS stall at that time. Otherwise,
1033 * we would have to CS stall on every flush which could be bad.
1034 */
1035 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
1036 };
1037
1038 #define ANV_PIPE_FLUSH_BITS ( \
1039 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1040 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1041 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1042
1043 #define ANV_PIPE_STALL_BITS ( \
1044 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1045 ANV_PIPE_DEPTH_STALL_BIT | \
1046 ANV_PIPE_CS_STALL_BIT)
1047
1048 #define ANV_PIPE_INVALIDATE_BITS ( \
1049 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1050 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1051 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1052 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1053 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1054 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1055
1056 struct anv_vertex_binding {
1057 struct anv_buffer * buffer;
1058 VkDeviceSize offset;
1059 };
1060
1061 struct anv_push_constants {
1062 /* Current allocated size of this push constants data structure.
1063 * Because a decent chunk of it may not be used (images on SKL, for
1064 * instance), we won't actually allocate the entire structure up-front.
1065 */
1066 uint32_t size;
1067
1068 /* Push constant data provided by the client through vkPushConstants */
1069 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1070
1071 /* Our hardware only provides zero-based vertex and instance id so, in
1072 * order to satisfy the vulkan requirements, we may have to push one or
1073 * both of these into the shader.
1074 */
1075 uint32_t base_vertex;
1076 uint32_t base_instance;
1077
1078 /* Offsets and ranges for dynamically bound buffers */
1079 struct {
1080 uint32_t offset;
1081 uint32_t range;
1082 } dynamic[MAX_DYNAMIC_BUFFERS];
1083
1084 /* Image data for image_load_store on pre-SKL */
1085 struct brw_image_param images[MAX_IMAGES];
1086 };
1087
1088 struct anv_dynamic_state {
1089 struct {
1090 uint32_t count;
1091 VkViewport viewports[MAX_VIEWPORTS];
1092 } viewport;
1093
1094 struct {
1095 uint32_t count;
1096 VkRect2D scissors[MAX_SCISSORS];
1097 } scissor;
1098
1099 float line_width;
1100
1101 struct {
1102 float bias;
1103 float clamp;
1104 float slope;
1105 } depth_bias;
1106
1107 float blend_constants[4];
1108
1109 struct {
1110 float min;
1111 float max;
1112 } depth_bounds;
1113
1114 struct {
1115 uint32_t front;
1116 uint32_t back;
1117 } stencil_compare_mask;
1118
1119 struct {
1120 uint32_t front;
1121 uint32_t back;
1122 } stencil_write_mask;
1123
1124 struct {
1125 uint32_t front;
1126 uint32_t back;
1127 } stencil_reference;
1128 };
1129
1130 extern const struct anv_dynamic_state default_dynamic_state;
1131
1132 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1133 const struct anv_dynamic_state *src,
1134 uint32_t copy_mask);
1135
1136 /**
1137 * Attachment state when recording a renderpass instance.
1138 *
1139 * The clear value is valid only if there exists a pending clear.
1140 */
1141 struct anv_attachment_state {
1142 enum isl_aux_usage aux_usage;
1143 enum isl_aux_usage input_aux_usage;
1144 struct anv_state color_rt_state;
1145 struct anv_state input_att_state;
1146
1147 VkImageLayout current_layout;
1148 VkImageAspectFlags pending_clear_aspects;
1149 bool fast_clear;
1150 VkClearValue clear_value;
1151 bool clear_color_is_zero_one;
1152 };
1153
1154 /** State required while building cmd buffer */
1155 struct anv_cmd_state {
1156 /* PIPELINE_SELECT.PipelineSelection */
1157 uint32_t current_pipeline;
1158 const struct gen_l3_config * current_l3_config;
1159 uint32_t vb_dirty;
1160 anv_cmd_dirty_mask_t dirty;
1161 anv_cmd_dirty_mask_t compute_dirty;
1162 enum anv_pipe_bits pending_pipe_bits;
1163 uint32_t num_workgroups_offset;
1164 struct anv_bo *num_workgroups_bo;
1165 VkShaderStageFlags descriptors_dirty;
1166 VkShaderStageFlags push_constants_dirty;
1167 uint32_t scratch_size;
1168 struct anv_pipeline * pipeline;
1169 struct anv_pipeline * compute_pipeline;
1170 struct anv_framebuffer * framebuffer;
1171 struct anv_render_pass * pass;
1172 struct anv_subpass * subpass;
1173 VkRect2D render_area;
1174 uint32_t restart_index;
1175 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1176 struct anv_descriptor_set * descriptors[MAX_SETS];
1177 VkShaderStageFlags push_constant_stages;
1178 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1179 struct anv_state binding_tables[MESA_SHADER_STAGES];
1180 struct anv_state samplers[MESA_SHADER_STAGES];
1181 struct anv_dynamic_state dynamic;
1182 bool need_query_wa;
1183
1184 /**
1185 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1186 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1187 * and before invoking the secondary in ExecuteCommands.
1188 */
1189 bool pma_fix_enabled;
1190
1191 /**
1192 * Whether or not we know for certain that HiZ is enabled for the current
1193 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1194 * enabled or not, this will be false.
1195 */
1196 bool hiz_enabled;
1197
1198 /**
1199 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1200 * valid only when recording a render pass instance.
1201 */
1202 struct anv_attachment_state * attachments;
1203
1204 /**
1205 * Surface states for color render targets. These are stored in a single
1206 * flat array. For depth-stencil attachments, the surface state is simply
1207 * left blank.
1208 */
1209 struct anv_state render_pass_states;
1210
1211 /**
1212 * A null surface state of the right size to match the framebuffer. This
1213 * is one of the states in render_pass_states.
1214 */
1215 struct anv_state null_surface_state;
1216
1217 struct {
1218 struct anv_buffer * index_buffer;
1219 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1220 uint32_t index_offset;
1221 } gen7;
1222 };
1223
1224 struct anv_cmd_pool {
1225 VkAllocationCallbacks alloc;
1226 struct list_head cmd_buffers;
1227 };
1228
1229 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1230
1231 enum anv_cmd_buffer_exec_mode {
1232 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1233 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1234 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1235 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1236 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1237 };
1238
1239 struct anv_cmd_buffer {
1240 VK_LOADER_DATA _loader_data;
1241
1242 struct anv_device * device;
1243
1244 struct anv_cmd_pool * pool;
1245 struct list_head pool_link;
1246
1247 struct anv_batch batch;
1248
1249 /* Fields required for the actual chain of anv_batch_bo's.
1250 *
1251 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1252 */
1253 struct list_head batch_bos;
1254 enum anv_cmd_buffer_exec_mode exec_mode;
1255
1256 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1257 * referenced by this command buffer
1258 *
1259 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1260 */
1261 struct u_vector seen_bbos;
1262
1263 /* A vector of int32_t's for every block of binding tables.
1264 *
1265 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1266 */
1267 struct u_vector bt_blocks;
1268 uint32_t bt_next;
1269
1270 struct anv_reloc_list surface_relocs;
1271 /** Last seen surface state block pool center bo offset */
1272 uint32_t last_ss_pool_center;
1273
1274 /* Serial for tracking buffer completion */
1275 uint32_t serial;
1276
1277 /* Stream objects for storing temporary data */
1278 struct anv_state_stream surface_state_stream;
1279 struct anv_state_stream dynamic_state_stream;
1280
1281 VkCommandBufferUsageFlags usage_flags;
1282 VkCommandBufferLevel level;
1283
1284 struct anv_cmd_state state;
1285 };
1286
1287 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1288 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1289 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1290 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1291 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1292 struct anv_cmd_buffer *secondary);
1293 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1294 VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
1295 struct anv_cmd_buffer *cmd_buffer);
1296
1297 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
1298
1299 VkResult
1300 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
1301 gl_shader_stage stage, uint32_t size);
1302 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1303 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1304 (offsetof(struct anv_push_constants, field) + \
1305 sizeof(cmd_buffer->state.push_constants[0]->field)))
1306
1307 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1308 const void *data, uint32_t size, uint32_t alignment);
1309 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1310 uint32_t *a, uint32_t *b,
1311 uint32_t dwords, uint32_t alignment);
1312
1313 struct anv_address
1314 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1315 struct anv_state
1316 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1317 uint32_t entries, uint32_t *state_offset);
1318 struct anv_state
1319 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1320 struct anv_state
1321 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1322 uint32_t size, uint32_t alignment);
1323
1324 VkResult
1325 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1326
1327 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1328 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1329 bool depth_clamp_enable);
1330 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1331
1332 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1333 struct anv_render_pass *pass,
1334 struct anv_framebuffer *framebuffer,
1335 const VkClearValue *clear_values);
1336
1337 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1338
1339 struct anv_state
1340 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1341 gl_shader_stage stage);
1342 struct anv_state
1343 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1344
1345 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1346 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1347
1348 const struct anv_image_view *
1349 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1350
1351 struct anv_state
1352 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
1353 uint32_t num_entries,
1354 uint32_t *state_offset);
1355
1356 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1357
1358 enum anv_fence_state {
1359 /** Indicates that this is a new (or newly reset fence) */
1360 ANV_FENCE_STATE_RESET,
1361
1362 /** Indicates that this fence has been submitted to the GPU but is still
1363 * (as far as we know) in use by the GPU.
1364 */
1365 ANV_FENCE_STATE_SUBMITTED,
1366
1367 ANV_FENCE_STATE_SIGNALED,
1368 };
1369
1370 struct anv_fence {
1371 struct anv_bo bo;
1372 struct drm_i915_gem_execbuffer2 execbuf;
1373 struct drm_i915_gem_exec_object2 exec2_objects[1];
1374 enum anv_fence_state state;
1375 };
1376
1377 struct anv_event {
1378 uint64_t semaphore;
1379 struct anv_state state;
1380 };
1381
1382 struct anv_shader_module {
1383 unsigned char sha1[20];
1384 uint32_t size;
1385 char data[0];
1386 };
1387
1388 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1389 struct anv_shader_module *module,
1390 const char *entrypoint,
1391 const struct anv_pipeline_layout *pipeline_layout,
1392 const VkSpecializationInfo *spec_info);
1393
1394 static inline gl_shader_stage
1395 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1396 {
1397 assert(__builtin_popcount(vk_stage) == 1);
1398 return ffs(vk_stage) - 1;
1399 }
1400
1401 static inline VkShaderStageFlagBits
1402 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1403 {
1404 return (1 << mesa_stage);
1405 }
1406
1407 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1408
1409 #define anv_foreach_stage(stage, stage_bits) \
1410 for (gl_shader_stage stage, \
1411 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1412 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1413 __tmp &= ~(1 << (stage)))
1414
1415 struct anv_pipeline_bind_map {
1416 uint32_t surface_count;
1417 uint32_t sampler_count;
1418 uint32_t image_count;
1419
1420 struct anv_pipeline_binding * surface_to_descriptor;
1421 struct anv_pipeline_binding * sampler_to_descriptor;
1422 };
1423
1424 struct anv_shader_bin_key {
1425 uint32_t size;
1426 uint8_t data[0];
1427 };
1428
1429 struct anv_shader_bin {
1430 uint32_t ref_cnt;
1431
1432 const struct anv_shader_bin_key *key;
1433
1434 struct anv_state kernel;
1435 uint32_t kernel_size;
1436
1437 const struct brw_stage_prog_data *prog_data;
1438 uint32_t prog_data_size;
1439
1440 struct anv_pipeline_bind_map bind_map;
1441
1442 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1443 };
1444
1445 struct anv_shader_bin *
1446 anv_shader_bin_create(struct anv_device *device,
1447 const void *key, uint32_t key_size,
1448 const void *kernel, uint32_t kernel_size,
1449 const struct brw_stage_prog_data *prog_data,
1450 uint32_t prog_data_size, const void *prog_data_param,
1451 const struct anv_pipeline_bind_map *bind_map);
1452
1453 void
1454 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
1455
1456 static inline void
1457 anv_shader_bin_ref(struct anv_shader_bin *shader)
1458 {
1459 assert(shader->ref_cnt >= 1);
1460 __sync_fetch_and_add(&shader->ref_cnt, 1);
1461 }
1462
1463 static inline void
1464 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
1465 {
1466 assert(shader->ref_cnt >= 1);
1467 if (__sync_fetch_and_add(&shader->ref_cnt, -1) == 1)
1468 anv_shader_bin_destroy(device, shader);
1469 }
1470
1471 struct anv_pipeline {
1472 struct anv_device * device;
1473 struct anv_batch batch;
1474 uint32_t batch_data[512];
1475 struct anv_reloc_list batch_relocs;
1476 uint32_t dynamic_state_mask;
1477 struct anv_dynamic_state dynamic_state;
1478
1479 struct anv_pipeline_layout * layout;
1480
1481 bool needs_data_cache;
1482
1483 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
1484
1485 struct {
1486 const struct gen_l3_config * l3_config;
1487 uint32_t total_size;
1488 } urb;
1489
1490 VkShaderStageFlags active_stages;
1491 struct anv_state blend_state;
1492
1493 uint32_t vb_used;
1494 uint32_t binding_stride[MAX_VBS];
1495 bool instancing_enable[MAX_VBS];
1496 bool primitive_restart;
1497 uint32_t topology;
1498
1499 uint32_t cs_right_mask;
1500
1501 bool writes_depth;
1502 bool depth_test_enable;
1503 bool writes_stencil;
1504 bool stencil_test_enable;
1505 bool depth_clamp_enable;
1506 bool kill_pixel;
1507
1508 struct {
1509 uint32_t sf[7];
1510 uint32_t depth_stencil_state[3];
1511 } gen7;
1512
1513 struct {
1514 uint32_t sf[4];
1515 uint32_t raster[5];
1516 uint32_t wm_depth_stencil[3];
1517 } gen8;
1518
1519 struct {
1520 uint32_t wm_depth_stencil[4];
1521 } gen9;
1522
1523 uint32_t interface_descriptor_data[8];
1524 };
1525
1526 static inline bool
1527 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
1528 gl_shader_stage stage)
1529 {
1530 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
1531 }
1532
1533 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1534 static inline const struct brw_##prefix##_prog_data * \
1535 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
1536 { \
1537 if (anv_pipeline_has_stage(pipeline, stage)) { \
1538 return (const struct brw_##prefix##_prog_data *) \
1539 pipeline->shaders[stage]->prog_data; \
1540 } else { \
1541 return NULL; \
1542 } \
1543 }
1544
1545 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
1546 ANV_DECL_GET_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
1547 ANV_DECL_GET_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
1548 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
1549 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
1550 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
1551
1552 static inline const struct brw_vue_prog_data *
1553 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline)
1554 {
1555 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
1556 return &get_gs_prog_data(pipeline)->base;
1557 else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1558 return &get_tes_prog_data(pipeline)->base;
1559 else
1560 return &get_vs_prog_data(pipeline)->base;
1561 }
1562
1563 VkResult
1564 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1565 struct anv_pipeline_cache *cache,
1566 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1567 const VkAllocationCallbacks *alloc);
1568
1569 VkResult
1570 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1571 struct anv_pipeline_cache *cache,
1572 const VkComputePipelineCreateInfo *info,
1573 struct anv_shader_module *module,
1574 const char *entrypoint,
1575 const VkSpecializationInfo *spec_info);
1576
1577 struct anv_format {
1578 enum isl_format isl_format:16;
1579 struct isl_swizzle swizzle;
1580 };
1581
1582 struct anv_format
1583 anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
1584 VkImageAspectFlags aspect, VkImageTiling tiling);
1585
1586 static inline enum isl_format
1587 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
1588 VkImageAspectFlags aspect, VkImageTiling tiling)
1589 {
1590 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1591 }
1592
1593 static inline struct isl_swizzle
1594 anv_swizzle_for_render(struct isl_swizzle swizzle)
1595 {
1596 /* Sometimes the swizzle will have alpha map to one. We do this to fake
1597 * RGB as RGBA for texturing
1598 */
1599 assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
1600 swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
1601
1602 /* But it doesn't matter what we render to that channel */
1603 swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
1604
1605 return swizzle;
1606 }
1607
1608 void
1609 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
1610
1611 /**
1612 * Subsurface of an anv_image.
1613 */
1614 struct anv_surface {
1615 /** Valid only if isl_surf::size > 0. */
1616 struct isl_surf isl;
1617
1618 /**
1619 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1620 */
1621 uint32_t offset;
1622 };
1623
1624 struct anv_image {
1625 VkImageType type;
1626 /* The original VkFormat provided by the client. This may not match any
1627 * of the actual surface formats.
1628 */
1629 VkFormat vk_format;
1630 VkImageAspectFlags aspects;
1631 VkExtent3D extent;
1632 uint32_t levels;
1633 uint32_t array_size;
1634 uint32_t samples; /**< VkImageCreateInfo::samples */
1635 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1636 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1637
1638 VkDeviceSize size;
1639 uint32_t alignment;
1640
1641 /* Set when bound */
1642 struct anv_bo *bo;
1643 VkDeviceSize offset;
1644
1645 /**
1646 * Image subsurfaces
1647 *
1648 * For each foo, anv_image::foo_surface is valid if and only if
1649 * anv_image::aspects has a foo aspect.
1650 *
1651 * The hardware requires that the depth buffer and stencil buffer be
1652 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1653 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1654 * allocate the depth and stencil buffers as separate surfaces in the same
1655 * bo.
1656 */
1657 union {
1658 struct anv_surface color_surface;
1659
1660 struct {
1661 struct anv_surface depth_surface;
1662 struct anv_surface stencil_surface;
1663 };
1664 };
1665
1666 /**
1667 * For color images, this is the aux usage for this image when not used as a
1668 * color attachment.
1669 *
1670 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the image
1671 * has a HiZ buffer.
1672 */
1673 enum isl_aux_usage aux_usage;
1674
1675 struct anv_surface aux_surface;
1676 };
1677
1678 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
1679 static inline bool
1680 anv_can_sample_with_hiz(uint8_t gen, uint32_t samples)
1681 {
1682 return gen >= 8 && samples == 1;
1683 }
1684
1685 void
1686 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1687 const struct anv_image *image,
1688 enum blorp_hiz_op op);
1689
1690 static inline uint32_t
1691 anv_get_layerCount(const struct anv_image *image,
1692 const VkImageSubresourceRange *range)
1693 {
1694 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1695 image->array_size - range->baseArrayLayer : range->layerCount;
1696 }
1697
1698 static inline uint32_t
1699 anv_get_levelCount(const struct anv_image *image,
1700 const VkImageSubresourceRange *range)
1701 {
1702 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1703 image->levels - range->baseMipLevel : range->levelCount;
1704 }
1705
1706
1707 struct anv_image_view {
1708 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1709 struct anv_bo *bo;
1710 uint32_t offset; /**< Offset into bo. */
1711
1712 struct isl_view isl;
1713
1714 VkImageAspectFlags aspect_mask;
1715 VkFormat vk_format;
1716 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1717
1718 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1719 struct anv_state sampler_surface_state;
1720
1721 /**
1722 * RENDER_SURFACE_STATE when using image as a storage image. Separate states
1723 * for write-only and readable, using the real format for write-only and the
1724 * lowered format for readable.
1725 */
1726 struct anv_state storage_surface_state;
1727 struct anv_state writeonly_storage_surface_state;
1728
1729 struct brw_image_param storage_image_param;
1730 };
1731
1732 struct anv_image_create_info {
1733 const VkImageCreateInfo *vk_info;
1734
1735 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1736 isl_tiling_flags_t isl_tiling_flags;
1737
1738 uint32_t stride;
1739 };
1740
1741 VkResult anv_image_create(VkDevice _device,
1742 const struct anv_image_create_info *info,
1743 const VkAllocationCallbacks* alloc,
1744 VkImage *pImage);
1745
1746 const struct anv_surface *
1747 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
1748 VkImageAspectFlags aspect_mask);
1749
1750 enum isl_format
1751 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1752
1753 static inline struct VkExtent3D
1754 anv_sanitize_image_extent(const VkImageType imageType,
1755 const struct VkExtent3D imageExtent)
1756 {
1757 switch (imageType) {
1758 case VK_IMAGE_TYPE_1D:
1759 return (VkExtent3D) { imageExtent.width, 1, 1 };
1760 case VK_IMAGE_TYPE_2D:
1761 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1762 case VK_IMAGE_TYPE_3D:
1763 return imageExtent;
1764 default:
1765 unreachable("invalid image type");
1766 }
1767 }
1768
1769 static inline struct VkOffset3D
1770 anv_sanitize_image_offset(const VkImageType imageType,
1771 const struct VkOffset3D imageOffset)
1772 {
1773 switch (imageType) {
1774 case VK_IMAGE_TYPE_1D:
1775 return (VkOffset3D) { imageOffset.x, 0, 0 };
1776 case VK_IMAGE_TYPE_2D:
1777 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1778 case VK_IMAGE_TYPE_3D:
1779 return imageOffset;
1780 default:
1781 unreachable("invalid image type");
1782 }
1783 }
1784
1785
1786 void anv_fill_buffer_surface_state(struct anv_device *device,
1787 struct anv_state state,
1788 enum isl_format format,
1789 uint32_t offset, uint32_t range,
1790 uint32_t stride);
1791
1792 void anv_image_view_fill_image_param(struct anv_device *device,
1793 struct anv_image_view *view,
1794 struct brw_image_param *param);
1795 void anv_buffer_view_fill_image_param(struct anv_device *device,
1796 struct anv_buffer_view *view,
1797 struct brw_image_param *param);
1798
1799 struct anv_sampler {
1800 uint32_t state[4];
1801 };
1802
1803 struct anv_framebuffer {
1804 uint32_t width;
1805 uint32_t height;
1806 uint32_t layers;
1807
1808 uint32_t attachment_count;
1809 struct anv_image_view * attachments[0];
1810 };
1811
1812 struct anv_subpass {
1813 uint32_t input_count;
1814 uint32_t * input_attachments;
1815 uint32_t color_count;
1816 uint32_t * color_attachments;
1817 uint32_t * resolve_attachments;
1818
1819 /* TODO: Consider storing the depth/stencil VkAttachmentReference
1820 * instead of its two structure members (below) individually.
1821 */
1822 uint32_t depth_stencil_attachment;
1823 VkImageLayout depth_stencil_layout;
1824
1825 /** Subpass has a depth/stencil self-dependency */
1826 bool has_ds_self_dep;
1827
1828 /** Subpass has at least one resolve attachment */
1829 bool has_resolve;
1830 };
1831
1832 enum anv_subpass_usage {
1833 ANV_SUBPASS_USAGE_DRAW = (1 << 0),
1834 ANV_SUBPASS_USAGE_INPUT = (1 << 1),
1835 ANV_SUBPASS_USAGE_RESOLVE_SRC = (1 << 2),
1836 ANV_SUBPASS_USAGE_RESOLVE_DST = (1 << 3),
1837 };
1838
1839 struct anv_render_pass_attachment {
1840 /* TODO: Consider using VkAttachmentDescription instead of storing each of
1841 * its members individually.
1842 */
1843 VkFormat format;
1844 uint32_t samples;
1845 VkImageUsageFlags usage;
1846 VkAttachmentLoadOp load_op;
1847 VkAttachmentStoreOp store_op;
1848 VkAttachmentLoadOp stencil_load_op;
1849 VkImageLayout initial_layout;
1850 VkImageLayout final_layout;
1851
1852 /* An array, indexed by subpass id, of how the attachment will be used. */
1853 enum anv_subpass_usage * subpass_usage;
1854
1855 /* The subpass id in which the attachment will be used last. */
1856 uint32_t last_subpass_idx;
1857 };
1858
1859 struct anv_render_pass {
1860 uint32_t attachment_count;
1861 uint32_t subpass_count;
1862 uint32_t * subpass_attachments;
1863 enum anv_subpass_usage * subpass_usages;
1864 struct anv_render_pass_attachment * attachments;
1865 struct anv_subpass subpasses[0];
1866 };
1867
1868 struct anv_query_pool_slot {
1869 uint64_t begin;
1870 uint64_t end;
1871 uint64_t available;
1872 };
1873
1874 struct anv_query_pool {
1875 VkQueryType type;
1876 uint32_t slots;
1877 struct anv_bo bo;
1878 };
1879
1880 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
1881 const char *name);
1882
1883 void anv_dump_image_to_ppm(struct anv_device *device,
1884 struct anv_image *image, unsigned miplevel,
1885 unsigned array_layer, VkImageAspectFlagBits aspect,
1886 const char *filename);
1887
1888 enum anv_dump_action {
1889 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
1890 };
1891
1892 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
1893 void anv_dump_finish(void);
1894
1895 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
1896 struct anv_framebuffer *fb);
1897
1898 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1899 \
1900 static inline struct __anv_type * \
1901 __anv_type ## _from_handle(__VkType _handle) \
1902 { \
1903 return (struct __anv_type *) _handle; \
1904 } \
1905 \
1906 static inline __VkType \
1907 __anv_type ## _to_handle(struct __anv_type *_obj) \
1908 { \
1909 return (__VkType) _obj; \
1910 }
1911
1912 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1913 \
1914 static inline struct __anv_type * \
1915 __anv_type ## _from_handle(__VkType _handle) \
1916 { \
1917 return (struct __anv_type *)(uintptr_t) _handle; \
1918 } \
1919 \
1920 static inline __VkType \
1921 __anv_type ## _to_handle(struct __anv_type *_obj) \
1922 { \
1923 return (__VkType)(uintptr_t) _obj; \
1924 }
1925
1926 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1927 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1928
1929 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1930 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1931 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1932 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1933 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1934
1935 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1936 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1937 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1938 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1939 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1940 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1941 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1942 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1943 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1944 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1945 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1946 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1947 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1948 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1949 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1950 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1951 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1952 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1953 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1954
1955 /* Gen-specific function declarations */
1956 #ifdef genX
1957 # include "anv_genX.h"
1958 #else
1959 # define genX(x) gen7_##x
1960 # include "anv_genX.h"
1961 # undef genX
1962 # define genX(x) gen75_##x
1963 # include "anv_genX.h"
1964 # undef genX
1965 # define genX(x) gen8_##x
1966 # include "anv_genX.h"
1967 # undef genX
1968 # define genX(x) gen9_##x
1969 # include "anv_genX.h"
1970 # undef genX
1971 #endif
1972
1973 #endif /* ANV_PRIVATE_H */