anv: Add an invalidate_range helper
[mesa.git] / src / intel / vulkan / anv_private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include <i915_drm.h>
34
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
40 #else
41 #define VG(x)
42 #endif
43
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
51
52 /* Pre-declarations needed for WSI entrypoints */
53 struct wl_surface;
54 struct wl_display;
55 typedef struct xcb_connection_t xcb_connection_t;
56 typedef uint32_t xcb_visualid_t;
57 typedef uint32_t xcb_window_t;
58
59 struct gen_l3_config;
60
61 #include <vulkan/vulkan.h>
62 #include <vulkan/vulkan_intel.h>
63 #include <vulkan/vk_icd.h>
64
65 #include "anv_entrypoints.h"
66 #include "brw_context.h"
67 #include "isl/isl.h"
68
69 #include "wsi_common.h"
70
71 /* Allowing different clear colors requires us to perform a depth resolve at
72 * the end of certain render passes. This is because while slow clears store
73 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
74 * See the PRMs for examples describing when additional resolves would be
75 * necessary. To enable fast clears without requiring extra resolves, we set
76 * the clear value to a globally-defined one. We could allow different values
77 * if the user doesn't expect coherent data during or after a render passes
78 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
79 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
80 * 1.0f seems to be the only value used. The only application that doesn't set
81 * this value does so through the usage of an seemingly uninitialized clear
82 * value.
83 */
84 #define ANV_HZ_FC_VAL 1.0f
85
86 #define MAX_VBS 31
87 #define MAX_SETS 8
88 #define MAX_RTS 8
89 #define MAX_VIEWPORTS 16
90 #define MAX_SCISSORS 16
91 #define MAX_PUSH_CONSTANTS_SIZE 128
92 #define MAX_DYNAMIC_BUFFERS 16
93 #define MAX_IMAGES 8
94
95 #define ANV_SVGS_VB_INDEX MAX_VBS
96 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
97
98 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
99
100 static inline uint32_t
101 align_down_npot_u32(uint32_t v, uint32_t a)
102 {
103 return v - (v % a);
104 }
105
106 static inline uint32_t
107 align_u32(uint32_t v, uint32_t a)
108 {
109 assert(a != 0 && a == (a & -a));
110 return (v + a - 1) & ~(a - 1);
111 }
112
113 static inline uint64_t
114 align_u64(uint64_t v, uint64_t a)
115 {
116 assert(a != 0 && a == (a & -a));
117 return (v + a - 1) & ~(a - 1);
118 }
119
120 static inline int32_t
121 align_i32(int32_t v, int32_t a)
122 {
123 assert(a != 0 && a == (a & -a));
124 return (v + a - 1) & ~(a - 1);
125 }
126
127 /** Alignment must be a power of 2. */
128 static inline bool
129 anv_is_aligned(uintmax_t n, uintmax_t a)
130 {
131 assert(a == (a & -a));
132 return (n & (a - 1)) == 0;
133 }
134
135 static inline uint32_t
136 anv_minify(uint32_t n, uint32_t levels)
137 {
138 if (unlikely(n == 0))
139 return 0;
140 else
141 return MAX2(n >> levels, 1);
142 }
143
144 static inline float
145 anv_clamp_f(float f, float min, float max)
146 {
147 assert(min < max);
148
149 if (f > max)
150 return max;
151 else if (f < min)
152 return min;
153 else
154 return f;
155 }
156
157 static inline bool
158 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
159 {
160 if (*inout_mask & clear_mask) {
161 *inout_mask &= ~clear_mask;
162 return true;
163 } else {
164 return false;
165 }
166 }
167
168 static inline union isl_color_value
169 vk_to_isl_color(VkClearColorValue color)
170 {
171 return (union isl_color_value) {
172 .u32 = {
173 color.uint32[0],
174 color.uint32[1],
175 color.uint32[2],
176 color.uint32[3],
177 },
178 };
179 }
180
181 #define for_each_bit(b, dword) \
182 for (uint32_t __dword = (dword); \
183 (b) = __builtin_ffs(__dword) - 1, __dword; \
184 __dword &= ~(1 << (b)))
185
186 #define typed_memcpy(dest, src, count) ({ \
187 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
188 memcpy((dest), (src), (count) * sizeof(*(src))); \
189 })
190
191 /* Whenever we generate an error, pass it through this function. Useful for
192 * debugging, where we can break on it. Only call at error site, not when
193 * propagating errors. Might be useful to plug in a stack trace here.
194 */
195
196 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
197
198 #ifdef DEBUG
199 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
200 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
201 #define anv_debug(format, ...) fprintf(stderr, "debug: " format, ##__VA_ARGS__)
202 #else
203 #define vk_error(error) error
204 #define vk_errorf(error, format, ...) error
205 #define anv_debug(format, ...)
206 #endif
207
208 /**
209 * Warn on ignored extension structs.
210 *
211 * The Vulkan spec requires us to ignore unsupported or unknown structs in
212 * a pNext chain. In debug mode, emitting warnings for ignored structs may
213 * help us discover structs that we should not have ignored.
214 *
215 *
216 * From the Vulkan 1.0.38 spec:
217 *
218 * Any component of the implementation (the loader, any enabled layers,
219 * and drivers) must skip over, without processing (other than reading the
220 * sType and pNext members) any chained structures with sType values not
221 * defined by extensions supported by that component.
222 */
223 #define anv_debug_ignored_stype(sType) \
224 anv_debug("debug: %s: ignored VkStructureType %u\n", __func__, (sType))
225
226 void __anv_finishme(const char *file, int line, const char *format, ...)
227 anv_printflike(3, 4);
228 void anv_loge(const char *format, ...) anv_printflike(1, 2);
229 void anv_loge_v(const char *format, va_list va);
230
231 /**
232 * Print a FINISHME message, including its source location.
233 */
234 #define anv_finishme(format, ...) \
235 do { \
236 static bool reported = false; \
237 if (!reported) { \
238 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
239 reported = true; \
240 } \
241 } while (0)
242
243 /* A non-fatal assert. Useful for debugging. */
244 #ifdef DEBUG
245 #define anv_assert(x) ({ \
246 if (unlikely(!(x))) \
247 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
248 })
249 #else
250 #define anv_assert(x)
251 #endif
252
253 /**
254 * If a block of code is annotated with anv_validate, then the block runs only
255 * in debug builds.
256 */
257 #ifdef DEBUG
258 #define anv_validate if (1)
259 #else
260 #define anv_validate if (0)
261 #endif
262
263 #define stub_return(v) \
264 do { \
265 anv_finishme("stub %s", __func__); \
266 return (v); \
267 } while (0)
268
269 #define stub() \
270 do { \
271 anv_finishme("stub %s", __func__); \
272 return; \
273 } while (0)
274
275 /**
276 * A dynamically growable, circular buffer. Elements are added at head and
277 * removed from tail. head and tail are free-running uint32_t indices and we
278 * only compute the modulo with size when accessing the array. This way,
279 * number of bytes in the queue is always head - tail, even in case of
280 * wraparound.
281 */
282
283 struct anv_bo {
284 uint32_t gem_handle;
285
286 /* Index into the current validation list. This is used by the
287 * validation list building alrogithm to track which buffers are already
288 * in the validation list so that we can ensure uniqueness.
289 */
290 uint32_t index;
291
292 /* Last known offset. This value is provided by the kernel when we
293 * execbuf and is used as the presumed offset for the next bunch of
294 * relocations.
295 */
296 uint64_t offset;
297
298 uint64_t size;
299 void *map;
300
301 /* We need to set the WRITE flag on winsys bos so GEM will know we're
302 * writing to them and synchronize uses on other rings (eg if the display
303 * server uses the blitter ring).
304 */
305 bool is_winsys_bo;
306 };
307
308 static inline void
309 anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size)
310 {
311 bo->gem_handle = gem_handle;
312 bo->index = 0;
313 bo->offset = -1;
314 bo->size = size;
315 bo->map = NULL;
316 bo->is_winsys_bo = false;
317 }
318
319 /* Represents a lock-free linked list of "free" things. This is used by
320 * both the block pool and the state pools. Unfortunately, in order to
321 * solve the ABA problem, we can't use a single uint32_t head.
322 */
323 union anv_free_list {
324 struct {
325 int32_t offset;
326
327 /* A simple count that is incremented every time the head changes. */
328 uint32_t count;
329 };
330 uint64_t u64;
331 };
332
333 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
334
335 struct anv_block_state {
336 union {
337 struct {
338 uint32_t next;
339 uint32_t end;
340 };
341 uint64_t u64;
342 };
343 };
344
345 struct anv_block_pool {
346 struct anv_device *device;
347
348 struct anv_bo bo;
349
350 /* The offset from the start of the bo to the "center" of the block
351 * pool. Pointers to allocated blocks are given by
352 * bo.map + center_bo_offset + offsets.
353 */
354 uint32_t center_bo_offset;
355
356 /* Current memory map of the block pool. This pointer may or may not
357 * point to the actual beginning of the block pool memory. If
358 * anv_block_pool_alloc_back has ever been called, then this pointer
359 * will point to the "center" position of the buffer and all offsets
360 * (negative or positive) given out by the block pool alloc functions
361 * will be valid relative to this pointer.
362 *
363 * In particular, map == bo.map + center_offset
364 */
365 void *map;
366 int fd;
367
368 /**
369 * Array of mmaps and gem handles owned by the block pool, reclaimed when
370 * the block pool is destroyed.
371 */
372 struct u_vector mmap_cleanups;
373
374 uint32_t block_size;
375
376 union anv_free_list free_list;
377 struct anv_block_state state;
378
379 union anv_free_list back_free_list;
380 struct anv_block_state back_state;
381 };
382
383 /* Block pools are backed by a fixed-size 2GB memfd */
384 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
385
386 /* The center of the block pool is also the middle of the memfd. This may
387 * change in the future if we decide differently for some reason.
388 */
389 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
390
391 static inline uint32_t
392 anv_block_pool_size(struct anv_block_pool *pool)
393 {
394 return pool->state.end + pool->back_state.end;
395 }
396
397 struct anv_state {
398 int32_t offset;
399 uint32_t alloc_size;
400 void *map;
401 };
402
403 struct anv_fixed_size_state_pool {
404 size_t state_size;
405 union anv_free_list free_list;
406 struct anv_block_state block;
407 };
408
409 #define ANV_MIN_STATE_SIZE_LOG2 6
410 #define ANV_MAX_STATE_SIZE_LOG2 20
411
412 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
413
414 struct anv_state_pool {
415 struct anv_block_pool *block_pool;
416 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
417 };
418
419 struct anv_state_stream_block;
420
421 struct anv_state_stream {
422 struct anv_block_pool *block_pool;
423
424 /* The current working block */
425 struct anv_state_stream_block *block;
426
427 /* Offset at which the current block starts */
428 uint32_t start;
429 /* Offset at which to allocate the next state */
430 uint32_t next;
431 /* Offset at which the current block ends */
432 uint32_t end;
433 };
434
435 #define CACHELINE_SIZE 64
436 #define CACHELINE_MASK 63
437
438 static inline void
439 anv_clflush_range(void *start, size_t size)
440 {
441 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
442 void *end = start + size;
443
444 __builtin_ia32_mfence();
445 while (p < end) {
446 __builtin_ia32_clflush(p);
447 p += CACHELINE_SIZE;
448 }
449 }
450
451 static inline void
452 anv_invalidate_range(void *start, size_t size)
453 {
454 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
455 void *end = start + size;
456
457 while (p < end) {
458 __builtin_ia32_clflush(p);
459 p += CACHELINE_SIZE;
460 }
461 __builtin_ia32_mfence();
462 }
463
464 static void inline
465 anv_state_clflush(struct anv_state state)
466 {
467 anv_clflush_range(state.map, state.alloc_size);
468 }
469
470 VkResult anv_block_pool_init(struct anv_block_pool *pool,
471 struct anv_device *device, uint32_t block_size);
472 void anv_block_pool_finish(struct anv_block_pool *pool);
473 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
474 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
475 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
476 void anv_state_pool_init(struct anv_state_pool *pool,
477 struct anv_block_pool *block_pool);
478 void anv_state_pool_finish(struct anv_state_pool *pool);
479 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
480 size_t state_size, size_t alignment);
481 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
482 void anv_state_stream_init(struct anv_state_stream *stream,
483 struct anv_block_pool *block_pool);
484 void anv_state_stream_finish(struct anv_state_stream *stream);
485 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
486 uint32_t size, uint32_t alignment);
487
488 /**
489 * Implements a pool of re-usable BOs. The interface is identical to that
490 * of block_pool except that each block is its own BO.
491 */
492 struct anv_bo_pool {
493 struct anv_device *device;
494
495 void *free_list[16];
496 };
497
498 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
499 void anv_bo_pool_finish(struct anv_bo_pool *pool);
500 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
501 uint32_t size);
502 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
503
504 struct anv_scratch_bo {
505 bool exists;
506 struct anv_bo bo;
507 };
508
509 struct anv_scratch_pool {
510 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
511 struct anv_scratch_bo bos[16][MESA_SHADER_STAGES];
512 };
513
514 void anv_scratch_pool_init(struct anv_device *device,
515 struct anv_scratch_pool *pool);
516 void anv_scratch_pool_finish(struct anv_device *device,
517 struct anv_scratch_pool *pool);
518 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
519 struct anv_scratch_pool *pool,
520 gl_shader_stage stage,
521 unsigned per_thread_scratch);
522
523 struct anv_physical_device {
524 VK_LOADER_DATA _loader_data;
525
526 struct anv_instance * instance;
527 uint32_t chipset_id;
528 char path[20];
529 const char * name;
530 struct gen_device_info info;
531 uint64_t aperture_size;
532 struct brw_compiler * compiler;
533 struct isl_device isl_dev;
534 int cmd_parser_version;
535
536 uint32_t eu_total;
537 uint32_t subslice_total;
538
539 uint8_t uuid[VK_UUID_SIZE];
540
541 struct wsi_device wsi_device;
542 int local_fd;
543 };
544
545 struct anv_instance {
546 VK_LOADER_DATA _loader_data;
547
548 VkAllocationCallbacks alloc;
549
550 uint32_t apiVersion;
551 int physicalDeviceCount;
552 struct anv_physical_device physicalDevice;
553 };
554
555 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
556 void anv_finish_wsi(struct anv_physical_device *physical_device);
557
558 struct anv_queue {
559 VK_LOADER_DATA _loader_data;
560
561 struct anv_device * device;
562
563 struct anv_state_pool * pool;
564 };
565
566 struct anv_pipeline_cache {
567 struct anv_device * device;
568 pthread_mutex_t mutex;
569
570 struct hash_table * cache;
571 };
572
573 struct anv_pipeline_bind_map;
574
575 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
576 struct anv_device *device,
577 bool cache_enabled);
578 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
579
580 struct anv_shader_bin *
581 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
582 const void *key, uint32_t key_size);
583 struct anv_shader_bin *
584 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
585 const void *key_data, uint32_t key_size,
586 const void *kernel_data, uint32_t kernel_size,
587 const struct brw_stage_prog_data *prog_data,
588 uint32_t prog_data_size,
589 const struct anv_pipeline_bind_map *bind_map);
590
591 struct anv_device {
592 VK_LOADER_DATA _loader_data;
593
594 VkAllocationCallbacks alloc;
595
596 struct anv_instance * instance;
597 uint32_t chipset_id;
598 struct gen_device_info info;
599 struct isl_device isl_dev;
600 int context_id;
601 int fd;
602 bool can_chain_batches;
603 bool robust_buffer_access;
604
605 struct anv_bo_pool batch_bo_pool;
606
607 struct anv_block_pool dynamic_state_block_pool;
608 struct anv_state_pool dynamic_state_pool;
609
610 struct anv_block_pool instruction_block_pool;
611 struct anv_state_pool instruction_state_pool;
612
613 struct anv_block_pool surface_state_block_pool;
614 struct anv_state_pool surface_state_pool;
615
616 struct anv_bo workaround_bo;
617
618 struct anv_pipeline_cache blorp_shader_cache;
619 struct blorp_context blorp;
620
621 struct anv_state border_colors;
622
623 struct anv_queue queue;
624
625 struct anv_scratch_pool scratch_pool;
626
627 uint32_t default_mocs;
628
629 pthread_mutex_t mutex;
630 pthread_cond_t queue_submit;
631 };
632
633 void anv_device_init_blorp(struct anv_device *device);
634 void anv_device_finish_blorp(struct anv_device *device);
635
636 VkResult anv_device_execbuf(struct anv_device *device,
637 struct drm_i915_gem_execbuffer2 *execbuf,
638 struct anv_bo **execbuf_bos);
639
640 void* anv_gem_mmap(struct anv_device *device,
641 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
642 void anv_gem_munmap(void *p, uint64_t size);
643 uint32_t anv_gem_create(struct anv_device *device, size_t size);
644 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
645 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
646 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
647 int anv_gem_execbuffer(struct anv_device *device,
648 struct drm_i915_gem_execbuffer2 *execbuf);
649 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
650 uint32_t stride, uint32_t tiling);
651 int anv_gem_create_context(struct anv_device *device);
652 int anv_gem_destroy_context(struct anv_device *device, int context);
653 int anv_gem_get_param(int fd, uint32_t param);
654 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
655 int anv_gem_get_aperture(int fd, uint64_t *size);
656 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
657 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
658 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
659 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
660 uint32_t read_domains, uint32_t write_domain);
661
662 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
663
664 struct anv_reloc_list {
665 size_t num_relocs;
666 size_t array_length;
667 struct drm_i915_gem_relocation_entry * relocs;
668 struct anv_bo ** reloc_bos;
669 };
670
671 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
672 const VkAllocationCallbacks *alloc);
673 void anv_reloc_list_finish(struct anv_reloc_list *list,
674 const VkAllocationCallbacks *alloc);
675
676 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
677 const VkAllocationCallbacks *alloc,
678 uint32_t offset, struct anv_bo *target_bo,
679 uint32_t delta);
680
681 struct anv_batch_bo {
682 /* Link in the anv_cmd_buffer.owned_batch_bos list */
683 struct list_head link;
684
685 struct anv_bo bo;
686
687 /* Bytes actually consumed in this batch BO */
688 size_t length;
689
690 struct anv_reloc_list relocs;
691 };
692
693 struct anv_batch {
694 const VkAllocationCallbacks * alloc;
695
696 void * start;
697 void * end;
698 void * next;
699
700 struct anv_reloc_list * relocs;
701
702 /* This callback is called (with the associated user data) in the event
703 * that the batch runs out of space.
704 */
705 VkResult (*extend_cb)(struct anv_batch *, void *);
706 void * user_data;
707 };
708
709 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
710 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
711 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
712 void *location, struct anv_bo *bo, uint32_t offset);
713 VkResult anv_device_submit_simple_batch(struct anv_device *device,
714 struct anv_batch *batch);
715
716 struct anv_address {
717 struct anv_bo *bo;
718 uint32_t offset;
719 };
720
721 static inline uint64_t
722 _anv_combine_address(struct anv_batch *batch, void *location,
723 const struct anv_address address, uint32_t delta)
724 {
725 if (address.bo == NULL) {
726 return address.offset + delta;
727 } else {
728 assert(batch->start <= location && location < batch->end);
729
730 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
731 }
732 }
733
734 #define __gen_address_type struct anv_address
735 #define __gen_user_data struct anv_batch
736 #define __gen_combine_address _anv_combine_address
737
738 /* Wrapper macros needed to work around preprocessor argument issues. In
739 * particular, arguments don't get pre-evaluated if they are concatenated.
740 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
741 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
742 * We can work around this easily enough with these helpers.
743 */
744 #define __anv_cmd_length(cmd) cmd ## _length
745 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
746 #define __anv_cmd_header(cmd) cmd ## _header
747 #define __anv_cmd_pack(cmd) cmd ## _pack
748 #define __anv_reg_num(reg) reg ## _num
749
750 #define anv_pack_struct(dst, struc, ...) do { \
751 struct struc __template = { \
752 __VA_ARGS__ \
753 }; \
754 __anv_cmd_pack(struc)(NULL, dst, &__template); \
755 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
756 } while (0)
757
758 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
759 void *__dst = anv_batch_emit_dwords(batch, n); \
760 struct cmd __template = { \
761 __anv_cmd_header(cmd), \
762 .DWordLength = n - __anv_cmd_length_bias(cmd), \
763 __VA_ARGS__ \
764 }; \
765 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
766 __dst; \
767 })
768
769 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
770 do { \
771 uint32_t *dw; \
772 \
773 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
774 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
775 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
776 dw[i] = (dwords0)[i] | (dwords1)[i]; \
777 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
778 } while (0)
779
780 #define anv_batch_emit(batch, cmd, name) \
781 for (struct cmd name = { __anv_cmd_header(cmd) }, \
782 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
783 __builtin_expect(_dst != NULL, 1); \
784 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
785 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
786 _dst = NULL; \
787 }))
788
789 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
790 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
791 struct anv_state __state = \
792 anv_state_pool_alloc((pool), __size, align); \
793 struct cmd __template = { \
794 __VA_ARGS__ \
795 }; \
796 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
797 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
798 if (!(pool)->block_pool->device->info.has_llc) \
799 anv_state_clflush(__state); \
800 __state; \
801 })
802
803 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
804 .GraphicsDataTypeGFDT = 0, \
805 .LLCCacheabilityControlLLCCC = 0, \
806 .L3CacheabilityControlL3CC = 1, \
807 }
808
809 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
810 .LLCeLLCCacheabilityControlLLCCC = 0, \
811 .L3CacheabilityControlL3CC = 1, \
812 }
813
814 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
815 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
816 .TargetCache = L3DefertoPATforLLCeLLCselection, \
817 .AgeforQUADLRU = 0 \
818 }
819
820 /* Skylake: MOCS is now an index into an array of 62 different caching
821 * configurations programmed by the kernel.
822 */
823
824 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
825 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
826 .IndextoMOCSTables = 2 \
827 }
828
829 #define GEN9_MOCS_PTE { \
830 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
831 .IndextoMOCSTables = 1 \
832 }
833
834 struct anv_device_memory {
835 struct anv_bo bo;
836 uint32_t type_index;
837 VkDeviceSize map_size;
838 void * map;
839 };
840
841 /**
842 * Header for Vertex URB Entry (VUE)
843 */
844 struct anv_vue_header {
845 uint32_t Reserved;
846 uint32_t RTAIndex; /* RenderTargetArrayIndex */
847 uint32_t ViewportIndex;
848 float PointWidth;
849 };
850
851 struct anv_descriptor_set_binding_layout {
852 #ifndef NDEBUG
853 /* The type of the descriptors in this binding */
854 VkDescriptorType type;
855 #endif
856
857 /* Number of array elements in this binding */
858 uint16_t array_size;
859
860 /* Index into the flattend descriptor set */
861 uint16_t descriptor_index;
862
863 /* Index into the dynamic state array for a dynamic buffer */
864 int16_t dynamic_offset_index;
865
866 /* Index into the descriptor set buffer views */
867 int16_t buffer_index;
868
869 struct {
870 /* Index into the binding table for the associated surface */
871 int16_t surface_index;
872
873 /* Index into the sampler table for the associated sampler */
874 int16_t sampler_index;
875
876 /* Index into the image table for the associated image */
877 int16_t image_index;
878 } stage[MESA_SHADER_STAGES];
879
880 /* Immutable samplers (or NULL if no immutable samplers) */
881 struct anv_sampler **immutable_samplers;
882 };
883
884 struct anv_descriptor_set_layout {
885 /* Number of bindings in this descriptor set */
886 uint16_t binding_count;
887
888 /* Total size of the descriptor set with room for all array entries */
889 uint16_t size;
890
891 /* Shader stages affected by this descriptor set */
892 uint16_t shader_stages;
893
894 /* Number of buffers in this descriptor set */
895 uint16_t buffer_count;
896
897 /* Number of dynamic offsets used by this descriptor set */
898 uint16_t dynamic_offset_count;
899
900 /* Bindings in this descriptor set */
901 struct anv_descriptor_set_binding_layout binding[0];
902 };
903
904 struct anv_descriptor {
905 VkDescriptorType type;
906
907 union {
908 struct {
909 struct anv_image_view *image_view;
910 struct anv_sampler *sampler;
911 };
912
913 struct anv_buffer_view *buffer_view;
914 };
915 };
916
917 struct anv_descriptor_set {
918 const struct anv_descriptor_set_layout *layout;
919 uint32_t size;
920 uint32_t buffer_count;
921 struct anv_buffer_view *buffer_views;
922 struct anv_descriptor descriptors[0];
923 };
924
925 struct anv_descriptor_pool {
926 uint32_t size;
927 uint32_t next;
928 uint32_t free_list;
929
930 struct anv_state_stream surface_state_stream;
931 void *surface_state_free_list;
932
933 char data[0];
934 };
935
936 VkResult
937 anv_descriptor_set_create(struct anv_device *device,
938 struct anv_descriptor_pool *pool,
939 const struct anv_descriptor_set_layout *layout,
940 struct anv_descriptor_set **out_set);
941
942 void
943 anv_descriptor_set_destroy(struct anv_device *device,
944 struct anv_descriptor_pool *pool,
945 struct anv_descriptor_set *set);
946
947 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
948
949 struct anv_pipeline_binding {
950 /* The descriptor set this surface corresponds to. The special value of
951 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
952 * to a color attachment and not a regular descriptor.
953 */
954 uint8_t set;
955
956 /* Binding in the descriptor set */
957 uint8_t binding;
958
959 /* Index in the binding */
960 uint8_t index;
961
962 /* Input attachment index (relative to the subpass) */
963 uint8_t input_attachment_index;
964
965 /* For a storage image, whether it is write-only */
966 bool write_only;
967 };
968
969 struct anv_pipeline_layout {
970 struct {
971 struct anv_descriptor_set_layout *layout;
972 uint32_t dynamic_offset_start;
973 } set[MAX_SETS];
974
975 uint32_t num_sets;
976
977 struct {
978 bool has_dynamic_offsets;
979 } stage[MESA_SHADER_STAGES];
980
981 unsigned char sha1[20];
982 };
983
984 struct anv_buffer {
985 struct anv_device * device;
986 VkDeviceSize size;
987
988 VkBufferUsageFlags usage;
989
990 /* Set when bound */
991 struct anv_bo * bo;
992 VkDeviceSize offset;
993 };
994
995 enum anv_cmd_dirty_bits {
996 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
997 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
998 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
999 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1000 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1001 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1002 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1003 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1004 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1005 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
1006 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
1007 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
1008 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
1009 };
1010 typedef uint32_t anv_cmd_dirty_mask_t;
1011
1012 enum anv_pipe_bits {
1013 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
1014 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
1015 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
1016 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
1017 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
1018 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
1019 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
1020 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1021 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
1022 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
1023 ANV_PIPE_CS_STALL_BIT = (1 << 20),
1024
1025 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1026 * a flush has happened but not a CS stall. The next time we do any sort
1027 * of invalidation we need to insert a CS stall at that time. Otherwise,
1028 * we would have to CS stall on every flush which could be bad.
1029 */
1030 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
1031 };
1032
1033 #define ANV_PIPE_FLUSH_BITS ( \
1034 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1035 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1036 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1037
1038 #define ANV_PIPE_STALL_BITS ( \
1039 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1040 ANV_PIPE_DEPTH_STALL_BIT | \
1041 ANV_PIPE_CS_STALL_BIT)
1042
1043 #define ANV_PIPE_INVALIDATE_BITS ( \
1044 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1045 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1046 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1047 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1048 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1049 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1050
1051 struct anv_vertex_binding {
1052 struct anv_buffer * buffer;
1053 VkDeviceSize offset;
1054 };
1055
1056 struct anv_push_constants {
1057 /* Current allocated size of this push constants data structure.
1058 * Because a decent chunk of it may not be used (images on SKL, for
1059 * instance), we won't actually allocate the entire structure up-front.
1060 */
1061 uint32_t size;
1062
1063 /* Push constant data provided by the client through vkPushConstants */
1064 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1065
1066 /* Our hardware only provides zero-based vertex and instance id so, in
1067 * order to satisfy the vulkan requirements, we may have to push one or
1068 * both of these into the shader.
1069 */
1070 uint32_t base_vertex;
1071 uint32_t base_instance;
1072
1073 /* Offsets and ranges for dynamically bound buffers */
1074 struct {
1075 uint32_t offset;
1076 uint32_t range;
1077 } dynamic[MAX_DYNAMIC_BUFFERS];
1078
1079 /* Image data for image_load_store on pre-SKL */
1080 struct brw_image_param images[MAX_IMAGES];
1081 };
1082
1083 struct anv_dynamic_state {
1084 struct {
1085 uint32_t count;
1086 VkViewport viewports[MAX_VIEWPORTS];
1087 } viewport;
1088
1089 struct {
1090 uint32_t count;
1091 VkRect2D scissors[MAX_SCISSORS];
1092 } scissor;
1093
1094 float line_width;
1095
1096 struct {
1097 float bias;
1098 float clamp;
1099 float slope;
1100 } depth_bias;
1101
1102 float blend_constants[4];
1103
1104 struct {
1105 float min;
1106 float max;
1107 } depth_bounds;
1108
1109 struct {
1110 uint32_t front;
1111 uint32_t back;
1112 } stencil_compare_mask;
1113
1114 struct {
1115 uint32_t front;
1116 uint32_t back;
1117 } stencil_write_mask;
1118
1119 struct {
1120 uint32_t front;
1121 uint32_t back;
1122 } stencil_reference;
1123 };
1124
1125 extern const struct anv_dynamic_state default_dynamic_state;
1126
1127 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1128 const struct anv_dynamic_state *src,
1129 uint32_t copy_mask);
1130
1131 /**
1132 * Attachment state when recording a renderpass instance.
1133 *
1134 * The clear value is valid only if there exists a pending clear.
1135 */
1136 struct anv_attachment_state {
1137 enum isl_aux_usage aux_usage;
1138 enum isl_aux_usage input_aux_usage;
1139 struct anv_state color_rt_state;
1140 struct anv_state input_att_state;
1141
1142 VkImageLayout current_layout;
1143 VkImageAspectFlags pending_clear_aspects;
1144 bool fast_clear;
1145 VkClearValue clear_value;
1146 bool clear_color_is_zero_one;
1147 };
1148
1149 /** State required while building cmd buffer */
1150 struct anv_cmd_state {
1151 /* PIPELINE_SELECT.PipelineSelection */
1152 uint32_t current_pipeline;
1153 const struct gen_l3_config * current_l3_config;
1154 uint32_t vb_dirty;
1155 anv_cmd_dirty_mask_t dirty;
1156 anv_cmd_dirty_mask_t compute_dirty;
1157 enum anv_pipe_bits pending_pipe_bits;
1158 uint32_t num_workgroups_offset;
1159 struct anv_bo *num_workgroups_bo;
1160 VkShaderStageFlags descriptors_dirty;
1161 VkShaderStageFlags push_constants_dirty;
1162 uint32_t scratch_size;
1163 struct anv_pipeline * pipeline;
1164 struct anv_pipeline * compute_pipeline;
1165 struct anv_framebuffer * framebuffer;
1166 struct anv_render_pass * pass;
1167 struct anv_subpass * subpass;
1168 VkRect2D render_area;
1169 uint32_t restart_index;
1170 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1171 struct anv_descriptor_set * descriptors[MAX_SETS];
1172 VkShaderStageFlags push_constant_stages;
1173 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1174 struct anv_state binding_tables[MESA_SHADER_STAGES];
1175 struct anv_state samplers[MESA_SHADER_STAGES];
1176 struct anv_dynamic_state dynamic;
1177 bool need_query_wa;
1178
1179 /**
1180 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
1181 * of any command buffer it is disabled by disabling it in EndCommandBuffer
1182 * and before invoking the secondary in ExecuteCommands.
1183 */
1184 bool pma_fix_enabled;
1185
1186 /**
1187 * Whether or not we know for certain that HiZ is enabled for the current
1188 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
1189 * enabled or not, this will be false.
1190 */
1191 bool hiz_enabled;
1192
1193 /**
1194 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1195 * valid only when recording a render pass instance.
1196 */
1197 struct anv_attachment_state * attachments;
1198
1199 /**
1200 * Surface states for color render targets. These are stored in a single
1201 * flat array. For depth-stencil attachments, the surface state is simply
1202 * left blank.
1203 */
1204 struct anv_state render_pass_states;
1205
1206 /**
1207 * A null surface state of the right size to match the framebuffer. This
1208 * is one of the states in render_pass_states.
1209 */
1210 struct anv_state null_surface_state;
1211
1212 struct {
1213 struct anv_buffer * index_buffer;
1214 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1215 uint32_t index_offset;
1216 } gen7;
1217 };
1218
1219 struct anv_cmd_pool {
1220 VkAllocationCallbacks alloc;
1221 struct list_head cmd_buffers;
1222 };
1223
1224 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1225
1226 enum anv_cmd_buffer_exec_mode {
1227 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1228 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1229 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1230 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1231 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1232 };
1233
1234 struct anv_cmd_buffer {
1235 VK_LOADER_DATA _loader_data;
1236
1237 struct anv_device * device;
1238
1239 struct anv_cmd_pool * pool;
1240 struct list_head pool_link;
1241
1242 struct anv_batch batch;
1243
1244 /* Fields required for the actual chain of anv_batch_bo's.
1245 *
1246 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1247 */
1248 struct list_head batch_bos;
1249 enum anv_cmd_buffer_exec_mode exec_mode;
1250
1251 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1252 * referenced by this command buffer
1253 *
1254 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1255 */
1256 struct u_vector seen_bbos;
1257
1258 /* A vector of int32_t's for every block of binding tables.
1259 *
1260 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1261 */
1262 struct u_vector bt_blocks;
1263 uint32_t bt_next;
1264
1265 struct anv_reloc_list surface_relocs;
1266 /** Last seen surface state block pool center bo offset */
1267 uint32_t last_ss_pool_center;
1268
1269 /* Serial for tracking buffer completion */
1270 uint32_t serial;
1271
1272 /* Stream objects for storing temporary data */
1273 struct anv_state_stream surface_state_stream;
1274 struct anv_state_stream dynamic_state_stream;
1275
1276 VkCommandBufferUsageFlags usage_flags;
1277 VkCommandBufferLevel level;
1278
1279 struct anv_cmd_state state;
1280 };
1281
1282 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1283 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1284 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1285 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1286 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1287 struct anv_cmd_buffer *secondary);
1288 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1289 VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
1290 struct anv_cmd_buffer *cmd_buffer);
1291
1292 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
1293
1294 VkResult
1295 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
1296 gl_shader_stage stage, uint32_t size);
1297 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1298 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1299 (offsetof(struct anv_push_constants, field) + \
1300 sizeof(cmd_buffer->state.push_constants[0]->field)))
1301
1302 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1303 const void *data, uint32_t size, uint32_t alignment);
1304 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1305 uint32_t *a, uint32_t *b,
1306 uint32_t dwords, uint32_t alignment);
1307
1308 struct anv_address
1309 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1310 struct anv_state
1311 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1312 uint32_t entries, uint32_t *state_offset);
1313 struct anv_state
1314 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1315 struct anv_state
1316 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1317 uint32_t size, uint32_t alignment);
1318
1319 VkResult
1320 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1321
1322 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1323 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1324 bool depth_clamp_enable);
1325 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1326
1327 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1328 struct anv_render_pass *pass,
1329 struct anv_framebuffer *framebuffer,
1330 const VkClearValue *clear_values);
1331
1332 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1333
1334 struct anv_state
1335 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1336 gl_shader_stage stage);
1337 struct anv_state
1338 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1339
1340 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1341 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1342
1343 const struct anv_image_view *
1344 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1345
1346 struct anv_state
1347 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
1348 uint32_t num_entries,
1349 uint32_t *state_offset);
1350
1351 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1352
1353 enum anv_fence_state {
1354 /** Indicates that this is a new (or newly reset fence) */
1355 ANV_FENCE_STATE_RESET,
1356
1357 /** Indicates that this fence has been submitted to the GPU but is still
1358 * (as far as we know) in use by the GPU.
1359 */
1360 ANV_FENCE_STATE_SUBMITTED,
1361
1362 ANV_FENCE_STATE_SIGNALED,
1363 };
1364
1365 struct anv_fence {
1366 struct anv_bo bo;
1367 struct drm_i915_gem_execbuffer2 execbuf;
1368 struct drm_i915_gem_exec_object2 exec2_objects[1];
1369 enum anv_fence_state state;
1370 };
1371
1372 struct anv_event {
1373 uint64_t semaphore;
1374 struct anv_state state;
1375 };
1376
1377 struct anv_shader_module {
1378 unsigned char sha1[20];
1379 uint32_t size;
1380 char data[0];
1381 };
1382
1383 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1384 struct anv_shader_module *module,
1385 const char *entrypoint,
1386 const struct anv_pipeline_layout *pipeline_layout,
1387 const VkSpecializationInfo *spec_info);
1388
1389 static inline gl_shader_stage
1390 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1391 {
1392 assert(__builtin_popcount(vk_stage) == 1);
1393 return ffs(vk_stage) - 1;
1394 }
1395
1396 static inline VkShaderStageFlagBits
1397 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1398 {
1399 return (1 << mesa_stage);
1400 }
1401
1402 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1403
1404 #define anv_foreach_stage(stage, stage_bits) \
1405 for (gl_shader_stage stage, \
1406 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1407 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1408 __tmp &= ~(1 << (stage)))
1409
1410 struct anv_pipeline_bind_map {
1411 uint32_t surface_count;
1412 uint32_t sampler_count;
1413 uint32_t image_count;
1414
1415 struct anv_pipeline_binding * surface_to_descriptor;
1416 struct anv_pipeline_binding * sampler_to_descriptor;
1417 };
1418
1419 struct anv_shader_bin_key {
1420 uint32_t size;
1421 uint8_t data[0];
1422 };
1423
1424 struct anv_shader_bin {
1425 uint32_t ref_cnt;
1426
1427 const struct anv_shader_bin_key *key;
1428
1429 struct anv_state kernel;
1430 uint32_t kernel_size;
1431
1432 const struct brw_stage_prog_data *prog_data;
1433 uint32_t prog_data_size;
1434
1435 struct anv_pipeline_bind_map bind_map;
1436
1437 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1438 };
1439
1440 struct anv_shader_bin *
1441 anv_shader_bin_create(struct anv_device *device,
1442 const void *key, uint32_t key_size,
1443 const void *kernel, uint32_t kernel_size,
1444 const struct brw_stage_prog_data *prog_data,
1445 uint32_t prog_data_size, const void *prog_data_param,
1446 const struct anv_pipeline_bind_map *bind_map);
1447
1448 void
1449 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
1450
1451 static inline void
1452 anv_shader_bin_ref(struct anv_shader_bin *shader)
1453 {
1454 assert(shader->ref_cnt >= 1);
1455 __sync_fetch_and_add(&shader->ref_cnt, 1);
1456 }
1457
1458 static inline void
1459 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
1460 {
1461 assert(shader->ref_cnt >= 1);
1462 if (__sync_fetch_and_add(&shader->ref_cnt, -1) == 1)
1463 anv_shader_bin_destroy(device, shader);
1464 }
1465
1466 struct anv_pipeline {
1467 struct anv_device * device;
1468 struct anv_batch batch;
1469 uint32_t batch_data[512];
1470 struct anv_reloc_list batch_relocs;
1471 uint32_t dynamic_state_mask;
1472 struct anv_dynamic_state dynamic_state;
1473
1474 struct anv_pipeline_layout * layout;
1475
1476 bool needs_data_cache;
1477
1478 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
1479
1480 struct {
1481 const struct gen_l3_config * l3_config;
1482 uint32_t total_size;
1483 } urb;
1484
1485 VkShaderStageFlags active_stages;
1486 struct anv_state blend_state;
1487
1488 uint32_t vb_used;
1489 uint32_t binding_stride[MAX_VBS];
1490 bool instancing_enable[MAX_VBS];
1491 bool primitive_restart;
1492 uint32_t topology;
1493
1494 uint32_t cs_right_mask;
1495
1496 bool writes_depth;
1497 bool depth_test_enable;
1498 bool writes_stencil;
1499 bool stencil_test_enable;
1500 bool depth_clamp_enable;
1501 bool kill_pixel;
1502
1503 struct {
1504 uint32_t sf[7];
1505 uint32_t depth_stencil_state[3];
1506 } gen7;
1507
1508 struct {
1509 uint32_t sf[4];
1510 uint32_t raster[5];
1511 uint32_t wm_depth_stencil[3];
1512 } gen8;
1513
1514 struct {
1515 uint32_t wm_depth_stencil[4];
1516 } gen9;
1517
1518 uint32_t interface_descriptor_data[8];
1519 };
1520
1521 static inline bool
1522 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
1523 gl_shader_stage stage)
1524 {
1525 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
1526 }
1527
1528 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1529 static inline const struct brw_##prefix##_prog_data * \
1530 get_##prefix##_prog_data(const struct anv_pipeline *pipeline) \
1531 { \
1532 if (anv_pipeline_has_stage(pipeline, stage)) { \
1533 return (const struct brw_##prefix##_prog_data *) \
1534 pipeline->shaders[stage]->prog_data; \
1535 } else { \
1536 return NULL; \
1537 } \
1538 }
1539
1540 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
1541 ANV_DECL_GET_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
1542 ANV_DECL_GET_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
1543 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
1544 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
1545 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
1546
1547 static inline const struct brw_vue_prog_data *
1548 anv_pipeline_get_last_vue_prog_data(const struct anv_pipeline *pipeline)
1549 {
1550 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
1551 return &get_gs_prog_data(pipeline)->base;
1552 else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1553 return &get_tes_prog_data(pipeline)->base;
1554 else
1555 return &get_vs_prog_data(pipeline)->base;
1556 }
1557
1558 VkResult
1559 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1560 struct anv_pipeline_cache *cache,
1561 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1562 const VkAllocationCallbacks *alloc);
1563
1564 VkResult
1565 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1566 struct anv_pipeline_cache *cache,
1567 const VkComputePipelineCreateInfo *info,
1568 struct anv_shader_module *module,
1569 const char *entrypoint,
1570 const VkSpecializationInfo *spec_info);
1571
1572 struct anv_format {
1573 enum isl_format isl_format:16;
1574 struct isl_swizzle swizzle;
1575 };
1576
1577 struct anv_format
1578 anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
1579 VkImageAspectFlags aspect, VkImageTiling tiling);
1580
1581 static inline enum isl_format
1582 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
1583 VkImageAspectFlags aspect, VkImageTiling tiling)
1584 {
1585 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1586 }
1587
1588 static inline struct isl_swizzle
1589 anv_swizzle_for_render(struct isl_swizzle swizzle)
1590 {
1591 /* Sometimes the swizzle will have alpha map to one. We do this to fake
1592 * RGB as RGBA for texturing
1593 */
1594 assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
1595 swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
1596
1597 /* But it doesn't matter what we render to that channel */
1598 swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
1599
1600 return swizzle;
1601 }
1602
1603 void
1604 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
1605
1606 /**
1607 * Subsurface of an anv_image.
1608 */
1609 struct anv_surface {
1610 /** Valid only if isl_surf::size > 0. */
1611 struct isl_surf isl;
1612
1613 /**
1614 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1615 */
1616 uint32_t offset;
1617 };
1618
1619 struct anv_image {
1620 VkImageType type;
1621 /* The original VkFormat provided by the client. This may not match any
1622 * of the actual surface formats.
1623 */
1624 VkFormat vk_format;
1625 VkImageAspectFlags aspects;
1626 VkExtent3D extent;
1627 uint32_t levels;
1628 uint32_t array_size;
1629 uint32_t samples; /**< VkImageCreateInfo::samples */
1630 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1631 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1632
1633 VkDeviceSize size;
1634 uint32_t alignment;
1635
1636 /* Set when bound */
1637 struct anv_bo *bo;
1638 VkDeviceSize offset;
1639
1640 /**
1641 * Image subsurfaces
1642 *
1643 * For each foo, anv_image::foo_surface is valid if and only if
1644 * anv_image::aspects has a foo aspect.
1645 *
1646 * The hardware requires that the depth buffer and stencil buffer be
1647 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1648 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1649 * allocate the depth and stencil buffers as separate surfaces in the same
1650 * bo.
1651 */
1652 union {
1653 struct anv_surface color_surface;
1654
1655 struct {
1656 struct anv_surface depth_surface;
1657 struct anv_surface stencil_surface;
1658 };
1659 };
1660
1661 /**
1662 * For color images, this is the aux usage for this image when not used as a
1663 * color attachment.
1664 *
1665 * For depth/stencil images, this is set to ISL_AUX_USAGE_HIZ if the image
1666 * has a HiZ buffer.
1667 */
1668 enum isl_aux_usage aux_usage;
1669
1670 struct anv_surface aux_surface;
1671 };
1672
1673 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
1674 static inline bool
1675 anv_can_sample_with_hiz(uint8_t gen, uint32_t samples)
1676 {
1677 return gen >= 8 && samples == 1;
1678 }
1679
1680 void
1681 anv_gen8_hiz_op_resolve(struct anv_cmd_buffer *cmd_buffer,
1682 const struct anv_image *image,
1683 enum blorp_hiz_op op);
1684
1685 static inline uint32_t
1686 anv_get_layerCount(const struct anv_image *image,
1687 const VkImageSubresourceRange *range)
1688 {
1689 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1690 image->array_size - range->baseArrayLayer : range->layerCount;
1691 }
1692
1693 static inline uint32_t
1694 anv_get_levelCount(const struct anv_image *image,
1695 const VkImageSubresourceRange *range)
1696 {
1697 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1698 image->levels - range->baseMipLevel : range->levelCount;
1699 }
1700
1701
1702 struct anv_image_view {
1703 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1704 struct anv_bo *bo;
1705 uint32_t offset; /**< Offset into bo. */
1706
1707 struct isl_view isl;
1708
1709 VkImageAspectFlags aspect_mask;
1710 VkFormat vk_format;
1711 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1712
1713 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1714 struct anv_state sampler_surface_state;
1715
1716 /**
1717 * RENDER_SURFACE_STATE when using image as a storage image. Separate states
1718 * for write-only and readable, using the real format for write-only and the
1719 * lowered format for readable.
1720 */
1721 struct anv_state storage_surface_state;
1722 struct anv_state writeonly_storage_surface_state;
1723
1724 struct brw_image_param storage_image_param;
1725 };
1726
1727 struct anv_image_create_info {
1728 const VkImageCreateInfo *vk_info;
1729
1730 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1731 isl_tiling_flags_t isl_tiling_flags;
1732
1733 uint32_t stride;
1734 };
1735
1736 VkResult anv_image_create(VkDevice _device,
1737 const struct anv_image_create_info *info,
1738 const VkAllocationCallbacks* alloc,
1739 VkImage *pImage);
1740
1741 const struct anv_surface *
1742 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
1743 VkImageAspectFlags aspect_mask);
1744
1745 struct anv_buffer_view {
1746 enum isl_format format; /**< VkBufferViewCreateInfo::format */
1747 struct anv_bo *bo;
1748 uint32_t offset; /**< Offset into bo. */
1749 uint64_t range; /**< VkBufferViewCreateInfo::range */
1750
1751 struct anv_state surface_state;
1752 struct anv_state storage_surface_state;
1753 struct anv_state writeonly_storage_surface_state;
1754
1755 struct brw_image_param storage_image_param;
1756 };
1757
1758 enum isl_format
1759 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1760
1761 static inline struct VkExtent3D
1762 anv_sanitize_image_extent(const VkImageType imageType,
1763 const struct VkExtent3D imageExtent)
1764 {
1765 switch (imageType) {
1766 case VK_IMAGE_TYPE_1D:
1767 return (VkExtent3D) { imageExtent.width, 1, 1 };
1768 case VK_IMAGE_TYPE_2D:
1769 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1770 case VK_IMAGE_TYPE_3D:
1771 return imageExtent;
1772 default:
1773 unreachable("invalid image type");
1774 }
1775 }
1776
1777 static inline struct VkOffset3D
1778 anv_sanitize_image_offset(const VkImageType imageType,
1779 const struct VkOffset3D imageOffset)
1780 {
1781 switch (imageType) {
1782 case VK_IMAGE_TYPE_1D:
1783 return (VkOffset3D) { imageOffset.x, 0, 0 };
1784 case VK_IMAGE_TYPE_2D:
1785 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1786 case VK_IMAGE_TYPE_3D:
1787 return imageOffset;
1788 default:
1789 unreachable("invalid image type");
1790 }
1791 }
1792
1793
1794 void anv_fill_buffer_surface_state(struct anv_device *device,
1795 struct anv_state state,
1796 enum isl_format format,
1797 uint32_t offset, uint32_t range,
1798 uint32_t stride);
1799
1800 void anv_image_view_fill_image_param(struct anv_device *device,
1801 struct anv_image_view *view,
1802 struct brw_image_param *param);
1803 void anv_buffer_view_fill_image_param(struct anv_device *device,
1804 struct anv_buffer_view *view,
1805 struct brw_image_param *param);
1806
1807 struct anv_sampler {
1808 uint32_t state[4];
1809 };
1810
1811 struct anv_framebuffer {
1812 uint32_t width;
1813 uint32_t height;
1814 uint32_t layers;
1815
1816 uint32_t attachment_count;
1817 struct anv_image_view * attachments[0];
1818 };
1819
1820 struct anv_subpass {
1821 uint32_t input_count;
1822 uint32_t * input_attachments;
1823 uint32_t color_count;
1824 uint32_t * color_attachments;
1825 uint32_t * resolve_attachments;
1826
1827 /* TODO: Consider storing the depth/stencil VkAttachmentReference
1828 * instead of its two structure members (below) individually.
1829 */
1830 uint32_t depth_stencil_attachment;
1831 VkImageLayout depth_stencil_layout;
1832
1833 /** Subpass has a depth/stencil self-dependency */
1834 bool has_ds_self_dep;
1835
1836 /** Subpass has at least one resolve attachment */
1837 bool has_resolve;
1838 };
1839
1840 enum anv_subpass_usage {
1841 ANV_SUBPASS_USAGE_DRAW = (1 << 0),
1842 ANV_SUBPASS_USAGE_INPUT = (1 << 1),
1843 ANV_SUBPASS_USAGE_RESOLVE_SRC = (1 << 2),
1844 ANV_SUBPASS_USAGE_RESOLVE_DST = (1 << 3),
1845 };
1846
1847 struct anv_render_pass_attachment {
1848 /* TODO: Consider using VkAttachmentDescription instead of storing each of
1849 * its members individually.
1850 */
1851 VkFormat format;
1852 uint32_t samples;
1853 VkImageUsageFlags usage;
1854 VkAttachmentLoadOp load_op;
1855 VkAttachmentStoreOp store_op;
1856 VkAttachmentLoadOp stencil_load_op;
1857 VkImageLayout initial_layout;
1858 VkImageLayout final_layout;
1859
1860 /* An array, indexed by subpass id, of how the attachment will be used. */
1861 enum anv_subpass_usage * subpass_usage;
1862
1863 /* The subpass id in which the attachment will be used last. */
1864 uint32_t last_subpass_idx;
1865 };
1866
1867 struct anv_render_pass {
1868 uint32_t attachment_count;
1869 uint32_t subpass_count;
1870 uint32_t * subpass_attachments;
1871 enum anv_subpass_usage * subpass_usages;
1872 struct anv_render_pass_attachment * attachments;
1873 struct anv_subpass subpasses[0];
1874 };
1875
1876 struct anv_query_pool_slot {
1877 uint64_t begin;
1878 uint64_t end;
1879 uint64_t available;
1880 };
1881
1882 struct anv_query_pool {
1883 VkQueryType type;
1884 uint32_t slots;
1885 struct anv_bo bo;
1886 };
1887
1888 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
1889 const char *name);
1890
1891 void anv_dump_image_to_ppm(struct anv_device *device,
1892 struct anv_image *image, unsigned miplevel,
1893 unsigned array_layer, VkImageAspectFlagBits aspect,
1894 const char *filename);
1895
1896 enum anv_dump_action {
1897 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
1898 };
1899
1900 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
1901 void anv_dump_finish(void);
1902
1903 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
1904 struct anv_framebuffer *fb);
1905
1906 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1907 \
1908 static inline struct __anv_type * \
1909 __anv_type ## _from_handle(__VkType _handle) \
1910 { \
1911 return (struct __anv_type *) _handle; \
1912 } \
1913 \
1914 static inline __VkType \
1915 __anv_type ## _to_handle(struct __anv_type *_obj) \
1916 { \
1917 return (__VkType) _obj; \
1918 }
1919
1920 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1921 \
1922 static inline struct __anv_type * \
1923 __anv_type ## _from_handle(__VkType _handle) \
1924 { \
1925 return (struct __anv_type *)(uintptr_t) _handle; \
1926 } \
1927 \
1928 static inline __VkType \
1929 __anv_type ## _to_handle(struct __anv_type *_obj) \
1930 { \
1931 return (__VkType)(uintptr_t) _obj; \
1932 }
1933
1934 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1935 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1936
1937 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1938 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1939 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1940 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1941 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1942
1943 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1944 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1945 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1946 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1947 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1948 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1949 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1950 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1951 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1952 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1953 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1954 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1955 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1956 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1957 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1958 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1959 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1960 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1961 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1962
1963 /* Gen-specific function declarations */
1964 #ifdef genX
1965 # include "anv_genX.h"
1966 #else
1967 # define genX(x) gen7_##x
1968 # include "anv_genX.h"
1969 # undef genX
1970 # define genX(x) gen75_##x
1971 # include "anv_genX.h"
1972 # undef genX
1973 # define genX(x) gen8_##x
1974 # include "anv_genX.h"
1975 # undef genX
1976 # define genX(x) gen9_##x
1977 # include "anv_genX.h"
1978 # undef genX
1979 #endif
1980
1981 #endif /* ANV_PRIVATE_H */