anv: use correct header guards
[mesa.git] / src / intel / vulkan / anv_private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include <i915_drm.h>
34
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
40 #else
41 #define VG(x)
42 #endif
43
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49
50 /* Pre-declarations needed for WSI entrypoints */
51 struct wl_surface;
52 struct wl_display;
53 typedef struct xcb_connection_t xcb_connection_t;
54 typedef uint32_t xcb_visualid_t;
55 typedef uint32_t xcb_window_t;
56
57 struct gen_l3_config;
58
59 #include <vulkan/vulkan.h>
60 #include <vulkan/vulkan_intel.h>
61 #include <vulkan/vk_icd.h>
62
63 #include "anv_entrypoints.h"
64 #include "brw_context.h"
65 #include "isl/isl.h"
66
67 #ifdef __cplusplus
68 extern "C" {
69 #endif
70
71 #define MAX_VBS 32
72 #define MAX_SETS 8
73 #define MAX_RTS 8
74 #define MAX_VIEWPORTS 16
75 #define MAX_SCISSORS 16
76 #define MAX_PUSH_CONSTANTS_SIZE 128
77 #define MAX_DYNAMIC_BUFFERS 16
78 #define MAX_IMAGES 8
79 #define MAX_SAMPLES_LOG2 4 /* SKL supports 16 samples */
80
81 #define anv_noreturn __attribute__((__noreturn__))
82 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
83
84 #define MIN(a, b) ((a) < (b) ? (a) : (b))
85 #define MAX(a, b) ((a) > (b) ? (a) : (b))
86
87 static inline uint32_t
88 align_down_npot_u32(uint32_t v, uint32_t a)
89 {
90 return v - (v % a);
91 }
92
93 static inline uint32_t
94 align_u32(uint32_t v, uint32_t a)
95 {
96 assert(a != 0 && a == (a & -a));
97 return (v + a - 1) & ~(a - 1);
98 }
99
100 static inline uint64_t
101 align_u64(uint64_t v, uint64_t a)
102 {
103 assert(a != 0 && a == (a & -a));
104 return (v + a - 1) & ~(a - 1);
105 }
106
107 static inline int32_t
108 align_i32(int32_t v, int32_t a)
109 {
110 assert(a != 0 && a == (a & -a));
111 return (v + a - 1) & ~(a - 1);
112 }
113
114 /** Alignment must be a power of 2. */
115 static inline bool
116 anv_is_aligned(uintmax_t n, uintmax_t a)
117 {
118 assert(a == (a & -a));
119 return (n & (a - 1)) == 0;
120 }
121
122 static inline uint32_t
123 anv_minify(uint32_t n, uint32_t levels)
124 {
125 if (unlikely(n == 0))
126 return 0;
127 else
128 return MAX(n >> levels, 1);
129 }
130
131 static inline float
132 anv_clamp_f(float f, float min, float max)
133 {
134 assert(min < max);
135
136 if (f > max)
137 return max;
138 else if (f < min)
139 return min;
140 else
141 return f;
142 }
143
144 static inline bool
145 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
146 {
147 if (*inout_mask & clear_mask) {
148 *inout_mask &= ~clear_mask;
149 return true;
150 } else {
151 return false;
152 }
153 }
154
155 #define for_each_bit(b, dword) \
156 for (uint32_t __dword = (dword); \
157 (b) = __builtin_ffs(__dword) - 1, __dword; \
158 __dword &= ~(1 << (b)))
159
160 #define typed_memcpy(dest, src, count) ({ \
161 static_assert(sizeof(*src) == sizeof(*dest), ""); \
162 memcpy((dest), (src), (count) * sizeof(*(src))); \
163 })
164
165 #define zero(x) (memset(&(x), 0, sizeof(x)))
166
167 /* Define no kernel as 1, since that's an illegal offset for a kernel */
168 #define NO_KERNEL 1
169
170 struct anv_common {
171 VkStructureType sType;
172 const void* pNext;
173 };
174
175 /* Whenever we generate an error, pass it through this function. Useful for
176 * debugging, where we can break on it. Only call at error site, not when
177 * propagating errors. Might be useful to plug in a stack trace here.
178 */
179
180 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
181
182 #ifdef DEBUG
183 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
184 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
185 #else
186 #define vk_error(error) error
187 #define vk_errorf(error, format, ...) error
188 #endif
189
190 void __anv_finishme(const char *file, int line, const char *format, ...)
191 anv_printflike(3, 4);
192 void anv_loge(const char *format, ...) anv_printflike(1, 2);
193 void anv_loge_v(const char *format, va_list va);
194
195 /**
196 * Print a FINISHME message, including its source location.
197 */
198 #define anv_finishme(format, ...) \
199 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
200
201 /* A non-fatal assert. Useful for debugging. */
202 #ifdef DEBUG
203 #define anv_assert(x) ({ \
204 if (unlikely(!(x))) \
205 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
206 })
207 #else
208 #define anv_assert(x)
209 #endif
210
211 /**
212 * If a block of code is annotated with anv_validate, then the block runs only
213 * in debug builds.
214 */
215 #ifdef DEBUG
216 #define anv_validate if (1)
217 #else
218 #define anv_validate if (0)
219 #endif
220
221 void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
222 void anv_abortfv(const char *format, va_list va) anv_noreturn;
223
224 #define stub_return(v) \
225 do { \
226 anv_finishme("stub %s", __func__); \
227 return (v); \
228 } while (0)
229
230 #define stub() \
231 do { \
232 anv_finishme("stub %s", __func__); \
233 return; \
234 } while (0)
235
236 /**
237 * A dynamically growable, circular buffer. Elements are added at head and
238 * removed from tail. head and tail are free-running uint32_t indices and we
239 * only compute the modulo with size when accessing the array. This way,
240 * number of bytes in the queue is always head - tail, even in case of
241 * wraparound.
242 */
243
244 struct anv_vector {
245 uint32_t head;
246 uint32_t tail;
247 uint32_t element_size;
248 uint32_t size;
249 void *data;
250 };
251
252 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
253 void *anv_vector_add(struct anv_vector *queue);
254 void *anv_vector_remove(struct anv_vector *queue);
255
256 static inline int
257 anv_vector_length(struct anv_vector *queue)
258 {
259 return (queue->head - queue->tail) / queue->element_size;
260 }
261
262 static inline void *
263 anv_vector_head(struct anv_vector *vector)
264 {
265 assert(vector->tail < vector->head);
266 return (void *)((char *)vector->data +
267 ((vector->head - vector->element_size) &
268 (vector->size - 1)));
269 }
270
271 static inline void *
272 anv_vector_tail(struct anv_vector *vector)
273 {
274 return (void *)((char *)vector->data + (vector->tail & (vector->size - 1)));
275 }
276
277 static inline void
278 anv_vector_finish(struct anv_vector *queue)
279 {
280 free(queue->data);
281 }
282
283 #define anv_vector_foreach(elem, queue) \
284 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
285 for (uint32_t __anv_vector_offset = (queue)->tail; \
286 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
287 __anv_vector_offset += (queue)->element_size)
288
289 struct anv_bo {
290 uint32_t gem_handle;
291
292 /* Index into the current validation list. This is used by the
293 * validation list building alrogithm to track which buffers are already
294 * in the validation list so that we can ensure uniqueness.
295 */
296 uint32_t index;
297
298 /* Last known offset. This value is provided by the kernel when we
299 * execbuf and is used as the presumed offset for the next bunch of
300 * relocations.
301 */
302 uint64_t offset;
303
304 uint64_t size;
305 void *map;
306
307 /* We need to set the WRITE flag on winsys bos so GEM will know we're
308 * writing to them and synchronize uses on other rings (eg if the display
309 * server uses the blitter ring).
310 */
311 bool is_winsys_bo;
312 };
313
314 /* Represents a lock-free linked list of "free" things. This is used by
315 * both the block pool and the state pools. Unfortunately, in order to
316 * solve the ABA problem, we can't use a single uint32_t head.
317 */
318 union anv_free_list {
319 struct {
320 int32_t offset;
321
322 /* A simple count that is incremented every time the head changes. */
323 uint32_t count;
324 };
325 uint64_t u64;
326 };
327
328 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
329
330 struct anv_block_state {
331 union {
332 struct {
333 uint32_t next;
334 uint32_t end;
335 };
336 uint64_t u64;
337 };
338 };
339
340 struct anv_block_pool {
341 struct anv_device *device;
342
343 struct anv_bo bo;
344
345 /* The offset from the start of the bo to the "center" of the block
346 * pool. Pointers to allocated blocks are given by
347 * bo.map + center_bo_offset + offsets.
348 */
349 uint32_t center_bo_offset;
350
351 /* Current memory map of the block pool. This pointer may or may not
352 * point to the actual beginning of the block pool memory. If
353 * anv_block_pool_alloc_back has ever been called, then this pointer
354 * will point to the "center" position of the buffer and all offsets
355 * (negative or positive) given out by the block pool alloc functions
356 * will be valid relative to this pointer.
357 *
358 * In particular, map == bo.map + center_offset
359 */
360 void *map;
361 int fd;
362
363 /**
364 * Array of mmaps and gem handles owned by the block pool, reclaimed when
365 * the block pool is destroyed.
366 */
367 struct anv_vector mmap_cleanups;
368
369 uint32_t block_size;
370
371 union anv_free_list free_list;
372 struct anv_block_state state;
373
374 union anv_free_list back_free_list;
375 struct anv_block_state back_state;
376 };
377
378 /* Block pools are backed by a fixed-size 2GB memfd */
379 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
380
381 /* The center of the block pool is also the middle of the memfd. This may
382 * change in the future if we decide differently for some reason.
383 */
384 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
385
386 static inline uint32_t
387 anv_block_pool_size(struct anv_block_pool *pool)
388 {
389 return pool->state.end + pool->back_state.end;
390 }
391
392 struct anv_state {
393 int32_t offset;
394 uint32_t alloc_size;
395 void *map;
396 };
397
398 struct anv_fixed_size_state_pool {
399 size_t state_size;
400 union anv_free_list free_list;
401 struct anv_block_state block;
402 };
403
404 #define ANV_MIN_STATE_SIZE_LOG2 6
405 #define ANV_MAX_STATE_SIZE_LOG2 17
406
407 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
408
409 struct anv_state_pool {
410 struct anv_block_pool *block_pool;
411 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
412 };
413
414 struct anv_state_stream_block;
415
416 struct anv_state_stream {
417 struct anv_block_pool *block_pool;
418
419 /* The current working block */
420 struct anv_state_stream_block *block;
421
422 /* Offset at which the current block starts */
423 uint32_t start;
424 /* Offset at which to allocate the next state */
425 uint32_t next;
426 /* Offset at which the current block ends */
427 uint32_t end;
428 };
429
430 #define CACHELINE_SIZE 64
431 #define CACHELINE_MASK 63
432
433 static inline void
434 anv_clflush_range(void *start, size_t size)
435 {
436 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
437 void *end = start + size;
438
439 __builtin_ia32_mfence();
440 while (p < end) {
441 __builtin_ia32_clflush(p);
442 p += CACHELINE_SIZE;
443 }
444 }
445
446 static void inline
447 anv_state_clflush(struct anv_state state)
448 {
449 anv_clflush_range(state.map, state.alloc_size);
450 }
451
452 void anv_block_pool_init(struct anv_block_pool *pool,
453 struct anv_device *device, uint32_t block_size);
454 void anv_block_pool_finish(struct anv_block_pool *pool);
455 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
456 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
457 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
458 void anv_state_pool_init(struct anv_state_pool *pool,
459 struct anv_block_pool *block_pool);
460 void anv_state_pool_finish(struct anv_state_pool *pool);
461 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
462 size_t state_size, size_t alignment);
463 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
464 void anv_state_stream_init(struct anv_state_stream *stream,
465 struct anv_block_pool *block_pool);
466 void anv_state_stream_finish(struct anv_state_stream *stream);
467 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
468 uint32_t size, uint32_t alignment);
469
470 /**
471 * Implements a pool of re-usable BOs. The interface is identical to that
472 * of block_pool except that each block is its own BO.
473 */
474 struct anv_bo_pool {
475 struct anv_device *device;
476
477 void *free_list[16];
478 };
479
480 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
481 void anv_bo_pool_finish(struct anv_bo_pool *pool);
482 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
483 uint32_t size);
484 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
485
486 struct anv_scratch_pool {
487 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
488 struct anv_bo bos[16][MESA_SHADER_STAGES];
489 };
490
491 void anv_scratch_pool_init(struct anv_device *device,
492 struct anv_scratch_pool *pool);
493 void anv_scratch_pool_finish(struct anv_device *device,
494 struct anv_scratch_pool *pool);
495 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
496 struct anv_scratch_pool *pool,
497 gl_shader_stage stage,
498 unsigned per_thread_scratch);
499
500 void *anv_resolve_entrypoint(uint32_t index);
501
502 extern struct anv_dispatch_table dtable;
503
504 #define ANV_CALL(func) ({ \
505 if (dtable.func == NULL) { \
506 size_t idx = offsetof(struct anv_dispatch_table, func) / sizeof(void *); \
507 dtable.entrypoints[idx] = anv_resolve_entrypoint(idx); \
508 } \
509 dtable.func; \
510 })
511
512 static inline void *
513 anv_alloc(const VkAllocationCallbacks *alloc,
514 size_t size, size_t align,
515 VkSystemAllocationScope scope)
516 {
517 return alloc->pfnAllocation(alloc->pUserData, size, align, scope);
518 }
519
520 static inline void *
521 anv_realloc(const VkAllocationCallbacks *alloc,
522 void *ptr, size_t size, size_t align,
523 VkSystemAllocationScope scope)
524 {
525 return alloc->pfnReallocation(alloc->pUserData, ptr, size, align, scope);
526 }
527
528 static inline void
529 anv_free(const VkAllocationCallbacks *alloc, void *data)
530 {
531 alloc->pfnFree(alloc->pUserData, data);
532 }
533
534 static inline void *
535 anv_alloc2(const VkAllocationCallbacks *parent_alloc,
536 const VkAllocationCallbacks *alloc,
537 size_t size, size_t align,
538 VkSystemAllocationScope scope)
539 {
540 if (alloc)
541 return anv_alloc(alloc, size, align, scope);
542 else
543 return anv_alloc(parent_alloc, size, align, scope);
544 }
545
546 static inline void
547 anv_free2(const VkAllocationCallbacks *parent_alloc,
548 const VkAllocationCallbacks *alloc,
549 void *data)
550 {
551 if (alloc)
552 anv_free(alloc, data);
553 else
554 anv_free(parent_alloc, data);
555 }
556
557 struct anv_wsi_interaface;
558
559 #define VK_ICD_WSI_PLATFORM_MAX 5
560
561 struct anv_physical_device {
562 VK_LOADER_DATA _loader_data;
563
564 struct anv_instance * instance;
565 uint32_t chipset_id;
566 char path[20];
567 const char * name;
568 struct gen_device_info info;
569 uint64_t aperture_size;
570 struct brw_compiler * compiler;
571 struct isl_device isl_dev;
572 int cmd_parser_version;
573
574 uint32_t eu_total;
575 uint32_t subslice_total;
576
577 struct anv_wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX];
578 };
579
580 struct anv_instance {
581 VK_LOADER_DATA _loader_data;
582
583 VkAllocationCallbacks alloc;
584
585 uint32_t apiVersion;
586 int physicalDeviceCount;
587 struct anv_physical_device physicalDevice;
588 };
589
590 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
591 void anv_finish_wsi(struct anv_physical_device *physical_device);
592
593 struct anv_meta_state {
594 VkAllocationCallbacks alloc;
595
596 /**
597 * Use array element `i` for images with `2^i` samples.
598 */
599 struct {
600 /**
601 * Pipeline N is used to clear color attachment N of the current
602 * subpass.
603 *
604 * HACK: We use one pipeline per color attachment to work around the
605 * compiler's inability to dynamically set the render target index of
606 * the render target write message.
607 */
608 struct anv_pipeline *color_pipelines[MAX_RTS];
609
610 struct anv_pipeline *depth_only_pipeline;
611 struct anv_pipeline *stencil_only_pipeline;
612 struct anv_pipeline *depthstencil_pipeline;
613 } clear[1 + MAX_SAMPLES_LOG2];
614
615 struct {
616 VkRenderPass render_pass;
617
618 /** Pipeline that blits from a 1D image. */
619 VkPipeline pipeline_1d_src;
620
621 /** Pipeline that blits from a 2D image. */
622 VkPipeline pipeline_2d_src;
623
624 /** Pipeline that blits from a 3D image. */
625 VkPipeline pipeline_3d_src;
626
627 VkPipelineLayout pipeline_layout;
628 VkDescriptorSetLayout ds_layout;
629 } blit;
630
631 struct {
632 VkRenderPass render_pass;
633
634 VkPipelineLayout img_p_layout;
635 VkDescriptorSetLayout img_ds_layout;
636 VkPipelineLayout buf_p_layout;
637 VkDescriptorSetLayout buf_ds_layout;
638
639 /* Pipelines indexed by source and destination type. See the
640 * blit2d_src_type and blit2d_dst_type enums in anv_meta_blit2d.c to
641 * see what these mean.
642 */
643 VkPipeline pipelines[2][3];
644 } blit2d;
645
646 struct {
647 /** Pipeline [i] resolves an image with 2^(i+1) samples. */
648 VkPipeline pipelines[MAX_SAMPLES_LOG2];
649
650 VkRenderPass pass;
651 VkPipelineLayout pipeline_layout;
652 VkDescriptorSetLayout ds_layout;
653 } resolve;
654 };
655
656 struct anv_queue {
657 VK_LOADER_DATA _loader_data;
658
659 struct anv_device * device;
660
661 struct anv_state_pool * pool;
662 };
663
664 struct anv_pipeline_cache {
665 struct anv_device * device;
666 pthread_mutex_t mutex;
667
668 struct hash_table * cache;
669 };
670
671 struct anv_pipeline_bind_map;
672
673 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
674 struct anv_device *device,
675 bool cache_enabled);
676 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
677
678 struct anv_shader_bin *
679 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
680 const void *key, uint32_t key_size);
681 struct anv_shader_bin *
682 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
683 const void *key_data, uint32_t key_size,
684 const void *kernel_data, uint32_t kernel_size,
685 const void *prog_data, uint32_t prog_data_size,
686 const struct anv_pipeline_bind_map *bind_map);
687
688 struct anv_device {
689 VK_LOADER_DATA _loader_data;
690
691 VkAllocationCallbacks alloc;
692
693 struct anv_instance * instance;
694 uint32_t chipset_id;
695 struct gen_device_info info;
696 struct isl_device isl_dev;
697 int context_id;
698 int fd;
699 bool can_chain_batches;
700 bool robust_buffer_access;
701
702 struct anv_bo_pool batch_bo_pool;
703
704 struct anv_block_pool dynamic_state_block_pool;
705 struct anv_state_pool dynamic_state_pool;
706
707 struct anv_block_pool instruction_block_pool;
708 struct anv_state_pool instruction_state_pool;
709
710 struct anv_block_pool surface_state_block_pool;
711 struct anv_state_pool surface_state_pool;
712
713 struct anv_bo workaround_bo;
714
715 struct anv_meta_state meta_state;
716
717 struct anv_pipeline_cache blorp_shader_cache;
718 struct blorp_context blorp;
719
720 struct anv_state border_colors;
721
722 struct anv_queue queue;
723
724 struct anv_scratch_pool scratch_pool;
725
726 uint32_t default_mocs;
727
728 pthread_mutex_t mutex;
729 };
730
731 void anv_device_get_cache_uuid(void *uuid);
732
733 void anv_device_init_blorp(struct anv_device *device);
734 void anv_device_finish_blorp(struct anv_device *device);
735
736 void* anv_gem_mmap(struct anv_device *device,
737 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
738 void anv_gem_munmap(void *p, uint64_t size);
739 uint32_t anv_gem_create(struct anv_device *device, size_t size);
740 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
741 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
742 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
743 int anv_gem_execbuffer(struct anv_device *device,
744 struct drm_i915_gem_execbuffer2 *execbuf);
745 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
746 uint32_t stride, uint32_t tiling);
747 int anv_gem_create_context(struct anv_device *device);
748 int anv_gem_destroy_context(struct anv_device *device, int context);
749 int anv_gem_get_param(int fd, uint32_t param);
750 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
751 int anv_gem_get_aperture(int fd, uint64_t *size);
752 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
753 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
754 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
755 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
756 uint32_t read_domains, uint32_t write_domain);
757
758 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
759
760 struct anv_reloc_list {
761 size_t num_relocs;
762 size_t array_length;
763 struct drm_i915_gem_relocation_entry * relocs;
764 struct anv_bo ** reloc_bos;
765 };
766
767 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
768 const VkAllocationCallbacks *alloc);
769 void anv_reloc_list_finish(struct anv_reloc_list *list,
770 const VkAllocationCallbacks *alloc);
771
772 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
773 const VkAllocationCallbacks *alloc,
774 uint32_t offset, struct anv_bo *target_bo,
775 uint32_t delta);
776
777 struct anv_batch_bo {
778 /* Link in the anv_cmd_buffer.owned_batch_bos list */
779 struct list_head link;
780
781 struct anv_bo bo;
782
783 /* Bytes actually consumed in this batch BO */
784 size_t length;
785
786 /* Last seen surface state block pool bo offset */
787 uint32_t last_ss_pool_bo_offset;
788
789 struct anv_reloc_list relocs;
790 };
791
792 struct anv_batch {
793 const VkAllocationCallbacks * alloc;
794
795 void * start;
796 void * end;
797 void * next;
798
799 struct anv_reloc_list * relocs;
800
801 /* This callback is called (with the associated user data) in the event
802 * that the batch runs out of space.
803 */
804 VkResult (*extend_cb)(struct anv_batch *, void *);
805 void * user_data;
806 };
807
808 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
809 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
810 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
811 void *location, struct anv_bo *bo, uint32_t offset);
812 VkResult anv_device_submit_simple_batch(struct anv_device *device,
813 struct anv_batch *batch);
814
815 struct anv_address {
816 struct anv_bo *bo;
817 uint32_t offset;
818 };
819
820 static inline uint64_t
821 _anv_combine_address(struct anv_batch *batch, void *location,
822 const struct anv_address address, uint32_t delta)
823 {
824 if (address.bo == NULL) {
825 return address.offset + delta;
826 } else {
827 assert(batch->start <= location && location < batch->end);
828
829 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
830 }
831 }
832
833 #define __gen_address_type struct anv_address
834 #define __gen_user_data struct anv_batch
835 #define __gen_combine_address _anv_combine_address
836
837 /* Wrapper macros needed to work around preprocessor argument issues. In
838 * particular, arguments don't get pre-evaluated if they are concatenated.
839 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
840 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
841 * We can work around this easily enough with these helpers.
842 */
843 #define __anv_cmd_length(cmd) cmd ## _length
844 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
845 #define __anv_cmd_header(cmd) cmd ## _header
846 #define __anv_cmd_pack(cmd) cmd ## _pack
847 #define __anv_reg_num(reg) reg ## _num
848
849 #define anv_pack_struct(dst, struc, ...) do { \
850 struct struc __template = { \
851 __VA_ARGS__ \
852 }; \
853 __anv_cmd_pack(struc)(NULL, dst, &__template); \
854 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
855 } while (0)
856
857 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
858 void *__dst = anv_batch_emit_dwords(batch, n); \
859 struct cmd __template = { \
860 __anv_cmd_header(cmd), \
861 .DWordLength = n - __anv_cmd_length_bias(cmd), \
862 __VA_ARGS__ \
863 }; \
864 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
865 __dst; \
866 })
867
868 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
869 do { \
870 uint32_t *dw; \
871 \
872 static_assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1), "mismatch merge"); \
873 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
874 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
875 dw[i] = (dwords0)[i] | (dwords1)[i]; \
876 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
877 } while (0)
878
879 #define anv_batch_emit(batch, cmd, name) \
880 for (struct cmd name = { __anv_cmd_header(cmd) }, \
881 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
882 __builtin_expect(_dst != NULL, 1); \
883 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
884 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
885 _dst = NULL; \
886 }))
887
888 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
889 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
890 struct anv_state __state = \
891 anv_state_pool_alloc((pool), __size, align); \
892 struct cmd __template = { \
893 __VA_ARGS__ \
894 }; \
895 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
896 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
897 if (!(pool)->block_pool->device->info.has_llc) \
898 anv_state_clflush(__state); \
899 __state; \
900 })
901
902 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
903 .GraphicsDataTypeGFDT = 0, \
904 .LLCCacheabilityControlLLCCC = 0, \
905 .L3CacheabilityControlL3CC = 1, \
906 }
907
908 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
909 .LLCeLLCCacheabilityControlLLCCC = 0, \
910 .L3CacheabilityControlL3CC = 1, \
911 }
912
913 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
914 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
915 .TargetCache = L3DefertoPATforLLCeLLCselection, \
916 .AgeforQUADLRU = 0 \
917 }
918
919 /* Skylake: MOCS is now an index into an array of 62 different caching
920 * configurations programmed by the kernel.
921 */
922
923 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
924 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
925 .IndextoMOCSTables = 2 \
926 }
927
928 #define GEN9_MOCS_PTE { \
929 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
930 .IndextoMOCSTables = 1 \
931 }
932
933 struct anv_device_memory {
934 struct anv_bo bo;
935 uint32_t type_index;
936 VkDeviceSize map_size;
937 void * map;
938 };
939
940 /**
941 * Header for Vertex URB Entry (VUE)
942 */
943 struct anv_vue_header {
944 uint32_t Reserved;
945 uint32_t RTAIndex; /* RenderTargetArrayIndex */
946 uint32_t ViewportIndex;
947 float PointWidth;
948 };
949
950 struct anv_descriptor_set_binding_layout {
951 #ifndef NDEBUG
952 /* The type of the descriptors in this binding */
953 VkDescriptorType type;
954 #endif
955
956 /* Number of array elements in this binding */
957 uint16_t array_size;
958
959 /* Index into the flattend descriptor set */
960 uint16_t descriptor_index;
961
962 /* Index into the dynamic state array for a dynamic buffer */
963 int16_t dynamic_offset_index;
964
965 /* Index into the descriptor set buffer views */
966 int16_t buffer_index;
967
968 struct {
969 /* Index into the binding table for the associated surface */
970 int16_t surface_index;
971
972 /* Index into the sampler table for the associated sampler */
973 int16_t sampler_index;
974
975 /* Index into the image table for the associated image */
976 int16_t image_index;
977 } stage[MESA_SHADER_STAGES];
978
979 /* Immutable samplers (or NULL if no immutable samplers) */
980 struct anv_sampler **immutable_samplers;
981 };
982
983 struct anv_descriptor_set_layout {
984 /* Number of bindings in this descriptor set */
985 uint16_t binding_count;
986
987 /* Total size of the descriptor set with room for all array entries */
988 uint16_t size;
989
990 /* Shader stages affected by this descriptor set */
991 uint16_t shader_stages;
992
993 /* Number of buffers in this descriptor set */
994 uint16_t buffer_count;
995
996 /* Number of dynamic offsets used by this descriptor set */
997 uint16_t dynamic_offset_count;
998
999 /* Bindings in this descriptor set */
1000 struct anv_descriptor_set_binding_layout binding[0];
1001 };
1002
1003 struct anv_descriptor {
1004 VkDescriptorType type;
1005
1006 union {
1007 struct {
1008 struct anv_image_view *image_view;
1009 struct anv_sampler *sampler;
1010 };
1011
1012 struct anv_buffer_view *buffer_view;
1013 };
1014 };
1015
1016 struct anv_descriptor_set {
1017 const struct anv_descriptor_set_layout *layout;
1018 uint32_t size;
1019 uint32_t buffer_count;
1020 struct anv_buffer_view *buffer_views;
1021 struct anv_descriptor descriptors[0];
1022 };
1023
1024 struct anv_descriptor_pool {
1025 uint32_t size;
1026 uint32_t next;
1027 uint32_t free_list;
1028
1029 struct anv_state_stream surface_state_stream;
1030 void *surface_state_free_list;
1031
1032 char data[0];
1033 };
1034
1035 VkResult
1036 anv_descriptor_set_create(struct anv_device *device,
1037 struct anv_descriptor_pool *pool,
1038 const struct anv_descriptor_set_layout *layout,
1039 struct anv_descriptor_set **out_set);
1040
1041 void
1042 anv_descriptor_set_destroy(struct anv_device *device,
1043 struct anv_descriptor_pool *pool,
1044 struct anv_descriptor_set *set);
1045
1046 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
1047
1048 struct anv_pipeline_binding {
1049 /* The descriptor set this surface corresponds to. The special value of
1050 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1051 * to a color attachment and not a regular descriptor.
1052 */
1053 uint8_t set;
1054
1055 /* Binding in the descriptor set */
1056 uint8_t binding;
1057
1058 /* Index in the binding */
1059 uint8_t index;
1060 };
1061
1062 struct anv_pipeline_layout {
1063 struct {
1064 struct anv_descriptor_set_layout *layout;
1065 uint32_t dynamic_offset_start;
1066 } set[MAX_SETS];
1067
1068 uint32_t num_sets;
1069
1070 struct {
1071 bool has_dynamic_offsets;
1072 } stage[MESA_SHADER_STAGES];
1073
1074 unsigned char sha1[20];
1075 };
1076
1077 struct anv_buffer {
1078 struct anv_device * device;
1079 VkDeviceSize size;
1080
1081 VkBufferUsageFlags usage;
1082
1083 /* Set when bound */
1084 struct anv_bo * bo;
1085 VkDeviceSize offset;
1086 };
1087
1088 enum anv_cmd_dirty_bits {
1089 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1090 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1091 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1092 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1093 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1094 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1095 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1096 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1097 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1098 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
1099 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
1100 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
1101 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
1102 };
1103 typedef uint32_t anv_cmd_dirty_mask_t;
1104
1105 enum anv_pipe_bits {
1106 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
1107 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
1108 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
1109 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
1110 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
1111 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
1112 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
1113 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1114 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
1115 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
1116 ANV_PIPE_CS_STALL_BIT = (1 << 20),
1117
1118 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1119 * a flush has happened but not a CS stall. The next time we do any sort
1120 * of invalidation we need to insert a CS stall at that time. Otherwise,
1121 * we would have to CS stall on every flush which could be bad.
1122 */
1123 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
1124 };
1125
1126 #define ANV_PIPE_FLUSH_BITS ( \
1127 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1128 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1129 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1130
1131 #define ANV_PIPE_STALL_BITS ( \
1132 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1133 ANV_PIPE_DEPTH_STALL_BIT | \
1134 ANV_PIPE_CS_STALL_BIT)
1135
1136 #define ANV_PIPE_INVALIDATE_BITS ( \
1137 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1138 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1139 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1140 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1141 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1142 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1143
1144 struct anv_vertex_binding {
1145 struct anv_buffer * buffer;
1146 VkDeviceSize offset;
1147 };
1148
1149 struct anv_push_constants {
1150 /* Current allocated size of this push constants data structure.
1151 * Because a decent chunk of it may not be used (images on SKL, for
1152 * instance), we won't actually allocate the entire structure up-front.
1153 */
1154 uint32_t size;
1155
1156 /* Push constant data provided by the client through vkPushConstants */
1157 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1158
1159 /* Our hardware only provides zero-based vertex and instance id so, in
1160 * order to satisfy the vulkan requirements, we may have to push one or
1161 * both of these into the shader.
1162 */
1163 uint32_t base_vertex;
1164 uint32_t base_instance;
1165
1166 /* Offsets and ranges for dynamically bound buffers */
1167 struct {
1168 uint32_t offset;
1169 uint32_t range;
1170 } dynamic[MAX_DYNAMIC_BUFFERS];
1171
1172 /* Image data for image_load_store on pre-SKL */
1173 struct brw_image_param images[MAX_IMAGES];
1174 };
1175
1176 struct anv_dynamic_state {
1177 struct {
1178 uint32_t count;
1179 VkViewport viewports[MAX_VIEWPORTS];
1180 } viewport;
1181
1182 struct {
1183 uint32_t count;
1184 VkRect2D scissors[MAX_SCISSORS];
1185 } scissor;
1186
1187 float line_width;
1188
1189 struct {
1190 float bias;
1191 float clamp;
1192 float slope;
1193 } depth_bias;
1194
1195 float blend_constants[4];
1196
1197 struct {
1198 float min;
1199 float max;
1200 } depth_bounds;
1201
1202 struct {
1203 uint32_t front;
1204 uint32_t back;
1205 } stencil_compare_mask;
1206
1207 struct {
1208 uint32_t front;
1209 uint32_t back;
1210 } stencil_write_mask;
1211
1212 struct {
1213 uint32_t front;
1214 uint32_t back;
1215 } stencil_reference;
1216 };
1217
1218 extern const struct anv_dynamic_state default_dynamic_state;
1219
1220 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1221 const struct anv_dynamic_state *src,
1222 uint32_t copy_mask);
1223
1224 /**
1225 * Attachment state when recording a renderpass instance.
1226 *
1227 * The clear value is valid only if there exists a pending clear.
1228 */
1229 struct anv_attachment_state {
1230 VkImageAspectFlags pending_clear_aspects;
1231 VkClearValue clear_value;
1232 };
1233
1234 /** State required while building cmd buffer */
1235 struct anv_cmd_state {
1236 /* PIPELINE_SELECT.PipelineSelection */
1237 uint32_t current_pipeline;
1238 const struct gen_l3_config * current_l3_config;
1239 uint32_t vb_dirty;
1240 anv_cmd_dirty_mask_t dirty;
1241 anv_cmd_dirty_mask_t compute_dirty;
1242 enum anv_pipe_bits pending_pipe_bits;
1243 uint32_t num_workgroups_offset;
1244 struct anv_bo *num_workgroups_bo;
1245 VkShaderStageFlags descriptors_dirty;
1246 VkShaderStageFlags push_constants_dirty;
1247 uint32_t scratch_size;
1248 struct anv_pipeline * pipeline;
1249 struct anv_pipeline * compute_pipeline;
1250 struct anv_framebuffer * framebuffer;
1251 struct anv_render_pass * pass;
1252 struct anv_subpass * subpass;
1253 VkRect2D render_area;
1254 uint32_t restart_index;
1255 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1256 struct anv_descriptor_set * descriptors[MAX_SETS];
1257 VkShaderStageFlags push_constant_stages;
1258 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1259 struct anv_state binding_tables[MESA_SHADER_STAGES];
1260 struct anv_state samplers[MESA_SHADER_STAGES];
1261 struct anv_dynamic_state dynamic;
1262 bool need_query_wa;
1263
1264 /**
1265 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1266 * valid only when recording a render pass instance.
1267 */
1268 struct anv_attachment_state * attachments;
1269
1270 struct {
1271 struct anv_buffer * index_buffer;
1272 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1273 uint32_t index_offset;
1274 } gen7;
1275 };
1276
1277 struct anv_cmd_pool {
1278 VkAllocationCallbacks alloc;
1279 struct list_head cmd_buffers;
1280 };
1281
1282 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1283
1284 enum anv_cmd_buffer_exec_mode {
1285 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1286 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1287 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1288 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1289 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1290 };
1291
1292 struct anv_cmd_buffer {
1293 VK_LOADER_DATA _loader_data;
1294
1295 struct anv_device * device;
1296
1297 struct anv_cmd_pool * pool;
1298 struct list_head pool_link;
1299
1300 struct anv_batch batch;
1301
1302 /* Fields required for the actual chain of anv_batch_bo's.
1303 *
1304 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1305 */
1306 struct list_head batch_bos;
1307 enum anv_cmd_buffer_exec_mode exec_mode;
1308
1309 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1310 * referenced by this command buffer
1311 *
1312 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1313 */
1314 struct anv_vector seen_bbos;
1315
1316 /* A vector of int32_t's for every block of binding tables.
1317 *
1318 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1319 */
1320 struct anv_vector bt_blocks;
1321 uint32_t bt_next;
1322 struct anv_reloc_list surface_relocs;
1323
1324 /* Information needed for execbuf
1325 *
1326 * These fields are generated by anv_cmd_buffer_prepare_execbuf().
1327 */
1328 struct {
1329 struct drm_i915_gem_execbuffer2 execbuf;
1330
1331 struct drm_i915_gem_exec_object2 * objects;
1332 uint32_t bo_count;
1333 struct anv_bo ** bos;
1334
1335 /* Allocated length of the 'objects' and 'bos' arrays */
1336 uint32_t array_length;
1337
1338 bool need_reloc;
1339 } execbuf2;
1340
1341 /* Serial for tracking buffer completion */
1342 uint32_t serial;
1343
1344 /* Stream objects for storing temporary data */
1345 struct anv_state_stream surface_state_stream;
1346 struct anv_state_stream dynamic_state_stream;
1347
1348 VkCommandBufferUsageFlags usage_flags;
1349 VkCommandBufferLevel level;
1350
1351 struct anv_cmd_state state;
1352 };
1353
1354 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1355 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1356 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1357 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1358 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1359 struct anv_cmd_buffer *secondary);
1360 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1361
1362 VkResult anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
1363 unsigned stage, struct anv_state *bt_state);
1364 VkResult anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
1365 unsigned stage, struct anv_state *state);
1366 uint32_t anv_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer);
1367
1368 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1369 const void *data, uint32_t size, uint32_t alignment);
1370 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1371 uint32_t *a, uint32_t *b,
1372 uint32_t dwords, uint32_t alignment);
1373
1374 struct anv_address
1375 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1376 struct anv_state
1377 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1378 uint32_t entries, uint32_t *state_offset);
1379 struct anv_state
1380 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1381 struct anv_state
1382 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1383 uint32_t size, uint32_t alignment);
1384
1385 VkResult
1386 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1387
1388 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1389 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1390 bool depth_clamp_enable);
1391 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1392
1393 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1394
1395 void anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1396 const VkRenderPassBeginInfo *info);
1397
1398 struct anv_state
1399 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1400 gl_shader_stage stage);
1401 struct anv_state
1402 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1403
1404 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1405 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1406
1407 const struct anv_image_view *
1408 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1409
1410 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1411
1412 struct anv_fence {
1413 struct anv_bo bo;
1414 struct drm_i915_gem_execbuffer2 execbuf;
1415 struct drm_i915_gem_exec_object2 exec2_objects[1];
1416 bool ready;
1417 };
1418
1419 struct anv_event {
1420 uint64_t semaphore;
1421 struct anv_state state;
1422 };
1423
1424 struct nir_shader;
1425
1426 struct anv_shader_module {
1427 struct nir_shader * nir;
1428
1429 unsigned char sha1[20];
1430 uint32_t size;
1431 char data[0];
1432 };
1433
1434 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1435 struct anv_shader_module *module,
1436 const char *entrypoint,
1437 const struct anv_pipeline_layout *pipeline_layout,
1438 const VkSpecializationInfo *spec_info);
1439
1440 static inline gl_shader_stage
1441 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1442 {
1443 assert(__builtin_popcount(vk_stage) == 1);
1444 return ffs(vk_stage) - 1;
1445 }
1446
1447 static inline VkShaderStageFlagBits
1448 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1449 {
1450 return (1 << mesa_stage);
1451 }
1452
1453 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1454
1455 #define anv_foreach_stage(stage, stage_bits) \
1456 for (gl_shader_stage stage, \
1457 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1458 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1459 __tmp &= ~(1 << (stage)))
1460
1461 struct anv_pipeline_bind_map {
1462 uint32_t surface_count;
1463 uint32_t sampler_count;
1464 uint32_t image_count;
1465
1466 struct anv_pipeline_binding * surface_to_descriptor;
1467 struct anv_pipeline_binding * sampler_to_descriptor;
1468 };
1469
1470 struct anv_shader_bin {
1471 uint32_t ref_cnt;
1472
1473 struct anv_state kernel;
1474 uint32_t kernel_size;
1475
1476 struct anv_pipeline_bind_map bind_map;
1477
1478 uint32_t prog_data_size;
1479
1480 /* Prog data follows, then the key, both aligned to 8-bytes */
1481 };
1482
1483 struct anv_shader_bin *
1484 anv_shader_bin_create(struct anv_device *device,
1485 const void *key, uint32_t key_size,
1486 const void *kernel, uint32_t kernel_size,
1487 const void *prog_data, uint32_t prog_data_size,
1488 const struct anv_pipeline_bind_map *bind_map);
1489
1490 void
1491 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
1492
1493 static inline void
1494 anv_shader_bin_ref(struct anv_shader_bin *shader)
1495 {
1496 assert(shader->ref_cnt >= 1);
1497 __sync_fetch_and_add(&shader->ref_cnt, 1);
1498 }
1499
1500 static inline void
1501 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
1502 {
1503 assert(shader->ref_cnt >= 1);
1504 if (__sync_fetch_and_add(&shader->ref_cnt, -1) == 1)
1505 anv_shader_bin_destroy(device, shader);
1506 }
1507
1508 static inline const struct brw_stage_prog_data *
1509 anv_shader_bin_get_prog_data(const struct anv_shader_bin *shader)
1510 {
1511 const void *data = shader;
1512 data += align_u32(sizeof(struct anv_shader_bin), 8);
1513 return data;
1514 }
1515
1516 struct anv_pipeline {
1517 struct anv_device * device;
1518 struct anv_batch batch;
1519 uint32_t batch_data[512];
1520 struct anv_reloc_list batch_relocs;
1521 uint32_t dynamic_state_mask;
1522 struct anv_dynamic_state dynamic_state;
1523
1524 struct anv_pipeline_layout * layout;
1525
1526 bool use_repclear;
1527 bool needs_data_cache;
1528
1529 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
1530
1531 struct {
1532 const struct gen_l3_config * l3_config;
1533 uint32_t total_size;
1534 } urb;
1535
1536 VkShaderStageFlags active_stages;
1537 struct anv_state blend_state;
1538 uint32_t vs_simd8;
1539 uint32_t vs_vec4;
1540 uint32_t ps_ksp0;
1541 uint32_t gs_kernel;
1542 uint32_t cs_simd;
1543
1544 uint32_t vb_used;
1545 uint32_t binding_stride[MAX_VBS];
1546 bool instancing_enable[MAX_VBS];
1547 bool primitive_restart;
1548 uint32_t topology;
1549
1550 uint32_t cs_right_mask;
1551
1552 bool depth_clamp_enable;
1553
1554 struct {
1555 uint32_t sf[7];
1556 uint32_t depth_stencil_state[3];
1557 } gen7;
1558
1559 struct {
1560 uint32_t sf[4];
1561 uint32_t raster[5];
1562 uint32_t wm_depth_stencil[3];
1563 } gen8;
1564
1565 struct {
1566 uint32_t wm_depth_stencil[4];
1567 } gen9;
1568 };
1569
1570 static inline bool
1571 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
1572 gl_shader_stage stage)
1573 {
1574 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
1575 }
1576
1577 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1578 static inline const struct brw_##prefix##_prog_data * \
1579 get_##prefix##_prog_data(struct anv_pipeline *pipeline) \
1580 { \
1581 if (anv_pipeline_has_stage(pipeline, stage)) { \
1582 return (const struct brw_##prefix##_prog_data *) \
1583 anv_shader_bin_get_prog_data(pipeline->shaders[stage]); \
1584 } else { \
1585 return NULL; \
1586 } \
1587 }
1588
1589 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
1590 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
1591 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
1592 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
1593
1594 struct anv_graphics_pipeline_create_info {
1595 /**
1596 * If non-negative, overrides the color attachment count of the pipeline's
1597 * subpass.
1598 */
1599 int8_t color_attachment_count;
1600
1601 bool use_repclear;
1602 bool disable_vs;
1603 bool use_rectlist;
1604 };
1605
1606 VkResult
1607 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1608 struct anv_pipeline_cache *cache,
1609 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1610 const struct anv_graphics_pipeline_create_info *extra,
1611 const VkAllocationCallbacks *alloc);
1612
1613 VkResult
1614 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1615 struct anv_pipeline_cache *cache,
1616 const VkComputePipelineCreateInfo *info,
1617 struct anv_shader_module *module,
1618 const char *entrypoint,
1619 const VkSpecializationInfo *spec_info);
1620
1621 VkResult
1622 anv_graphics_pipeline_create(VkDevice device,
1623 VkPipelineCache cache,
1624 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1625 const struct anv_graphics_pipeline_create_info *extra,
1626 const VkAllocationCallbacks *alloc,
1627 VkPipeline *pPipeline);
1628
1629 struct anv_format {
1630 enum isl_format isl_format:16;
1631 struct isl_swizzle swizzle;
1632 };
1633
1634 struct anv_format
1635 anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
1636 VkImageAspectFlags aspect, VkImageTiling tiling);
1637
1638 static inline enum isl_format
1639 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
1640 VkImageAspectFlags aspect, VkImageTiling tiling)
1641 {
1642 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1643 }
1644
1645 void
1646 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
1647
1648 /**
1649 * Subsurface of an anv_image.
1650 */
1651 struct anv_surface {
1652 /** Valid only if isl_surf::size > 0. */
1653 struct isl_surf isl;
1654
1655 /**
1656 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1657 */
1658 uint32_t offset;
1659 };
1660
1661 struct anv_image {
1662 VkImageType type;
1663 /* The original VkFormat provided by the client. This may not match any
1664 * of the actual surface formats.
1665 */
1666 VkFormat vk_format;
1667 VkImageAspectFlags aspects;
1668 VkExtent3D extent;
1669 uint32_t levels;
1670 uint32_t array_size;
1671 uint32_t samples; /**< VkImageCreateInfo::samples */
1672 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1673 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1674
1675 VkDeviceSize size;
1676 uint32_t alignment;
1677
1678 /* Set when bound */
1679 struct anv_bo *bo;
1680 VkDeviceSize offset;
1681
1682 /**
1683 * Image subsurfaces
1684 *
1685 * For each foo, anv_image::foo_surface is valid if and only if
1686 * anv_image::aspects has a foo aspect.
1687 *
1688 * The hardware requires that the depth buffer and stencil buffer be
1689 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1690 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1691 * allocate the depth and stencil buffers as separate surfaces in the same
1692 * bo.
1693 */
1694 union {
1695 struct anv_surface color_surface;
1696
1697 struct {
1698 struct anv_surface depth_surface;
1699 struct anv_surface hiz_surface;
1700 struct anv_surface stencil_surface;
1701 };
1702 };
1703 };
1704
1705 static inline uint32_t
1706 anv_get_layerCount(const struct anv_image *image,
1707 const VkImageSubresourceRange *range)
1708 {
1709 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1710 image->array_size - range->baseArrayLayer : range->layerCount;
1711 }
1712
1713 static inline uint32_t
1714 anv_get_levelCount(const struct anv_image *image,
1715 const VkImageSubresourceRange *range)
1716 {
1717 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1718 image->levels - range->baseMipLevel : range->levelCount;
1719 }
1720
1721
1722 struct anv_image_view {
1723 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1724 struct anv_bo *bo;
1725 uint32_t offset; /**< Offset into bo. */
1726
1727 VkImageAspectFlags aspect_mask;
1728 VkFormat vk_format;
1729 uint32_t base_layer;
1730 uint32_t base_mip;
1731 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1732
1733 /** RENDER_SURFACE_STATE when using image as a color render target. */
1734 struct anv_state color_rt_surface_state;
1735
1736 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1737 struct anv_state sampler_surface_state;
1738
1739 /** RENDER_SURFACE_STATE when using image as a storage image. */
1740 struct anv_state storage_surface_state;
1741
1742 struct brw_image_param storage_image_param;
1743 };
1744
1745 struct anv_image_create_info {
1746 const VkImageCreateInfo *vk_info;
1747
1748 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1749 isl_tiling_flags_t isl_tiling_flags;
1750
1751 uint32_t stride;
1752 };
1753
1754 VkResult anv_image_create(VkDevice _device,
1755 const struct anv_image_create_info *info,
1756 const VkAllocationCallbacks* alloc,
1757 VkImage *pImage);
1758
1759 const struct anv_surface *
1760 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
1761 VkImageAspectFlags aspect_mask);
1762
1763 static inline bool
1764 anv_image_has_hiz(const struct anv_image *image)
1765 {
1766 /* We must check the aspect because anv_image::hiz_surface belongs to
1767 * a union.
1768 */
1769 return (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1770 image->hiz_surface.isl.size > 0;
1771 }
1772
1773 void anv_image_view_init(struct anv_image_view *view,
1774 struct anv_device *device,
1775 const VkImageViewCreateInfo* pCreateInfo,
1776 struct anv_cmd_buffer *cmd_buffer,
1777 VkImageUsageFlags usage_mask);
1778
1779 struct anv_buffer_view {
1780 enum isl_format format; /**< VkBufferViewCreateInfo::format */
1781 struct anv_bo *bo;
1782 uint32_t offset; /**< Offset into bo. */
1783 uint64_t range; /**< VkBufferViewCreateInfo::range */
1784
1785 struct anv_state surface_state;
1786 struct anv_state storage_surface_state;
1787
1788 struct brw_image_param storage_image_param;
1789 };
1790
1791 void anv_buffer_view_init(struct anv_buffer_view *view,
1792 struct anv_device *device,
1793 const VkBufferViewCreateInfo* pCreateInfo,
1794 struct anv_cmd_buffer *cmd_buffer);
1795
1796 enum isl_format
1797 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1798
1799 static inline struct VkExtent3D
1800 anv_sanitize_image_extent(const VkImageType imageType,
1801 const struct VkExtent3D imageExtent)
1802 {
1803 switch (imageType) {
1804 case VK_IMAGE_TYPE_1D:
1805 return (VkExtent3D) { imageExtent.width, 1, 1 };
1806 case VK_IMAGE_TYPE_2D:
1807 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1808 case VK_IMAGE_TYPE_3D:
1809 return imageExtent;
1810 default:
1811 unreachable("invalid image type");
1812 }
1813 }
1814
1815 static inline struct VkOffset3D
1816 anv_sanitize_image_offset(const VkImageType imageType,
1817 const struct VkOffset3D imageOffset)
1818 {
1819 switch (imageType) {
1820 case VK_IMAGE_TYPE_1D:
1821 return (VkOffset3D) { imageOffset.x, 0, 0 };
1822 case VK_IMAGE_TYPE_2D:
1823 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1824 case VK_IMAGE_TYPE_3D:
1825 return imageOffset;
1826 default:
1827 unreachable("invalid image type");
1828 }
1829 }
1830
1831
1832 void anv_fill_buffer_surface_state(struct anv_device *device,
1833 struct anv_state state,
1834 enum isl_format format,
1835 uint32_t offset, uint32_t range,
1836 uint32_t stride);
1837
1838 void anv_image_view_fill_image_param(struct anv_device *device,
1839 struct anv_image_view *view,
1840 struct brw_image_param *param);
1841 void anv_buffer_view_fill_image_param(struct anv_device *device,
1842 struct anv_buffer_view *view,
1843 struct brw_image_param *param);
1844
1845 struct anv_sampler {
1846 uint32_t state[4];
1847 };
1848
1849 struct anv_framebuffer {
1850 uint32_t width;
1851 uint32_t height;
1852 uint32_t layers;
1853
1854 uint32_t attachment_count;
1855 struct anv_image_view * attachments[0];
1856 };
1857
1858 struct anv_subpass {
1859 uint32_t input_count;
1860 uint32_t * input_attachments;
1861 uint32_t color_count;
1862 uint32_t * color_attachments;
1863 uint32_t * resolve_attachments;
1864 uint32_t depth_stencil_attachment;
1865
1866 /** Subpass has at least one resolve attachment */
1867 bool has_resolve;
1868 };
1869
1870 struct anv_render_pass_attachment {
1871 VkFormat format;
1872 uint32_t samples;
1873 VkAttachmentLoadOp load_op;
1874 VkAttachmentStoreOp store_op;
1875 VkAttachmentLoadOp stencil_load_op;
1876 };
1877
1878 struct anv_render_pass {
1879 uint32_t attachment_count;
1880 uint32_t subpass_count;
1881 uint32_t * subpass_attachments;
1882 struct anv_render_pass_attachment * attachments;
1883 struct anv_subpass subpasses[0];
1884 };
1885
1886 extern struct anv_render_pass anv_meta_dummy_renderpass;
1887
1888 struct anv_query_pool_slot {
1889 uint64_t begin;
1890 uint64_t end;
1891 uint64_t available;
1892 };
1893
1894 struct anv_query_pool {
1895 VkQueryType type;
1896 uint32_t slots;
1897 struct anv_bo bo;
1898 };
1899
1900 VkResult anv_device_init_meta(struct anv_device *device);
1901 void anv_device_finish_meta(struct anv_device *device);
1902
1903 void *anv_lookup_entrypoint(const char *name);
1904
1905 void anv_dump_image_to_ppm(struct anv_device *device,
1906 struct anv_image *image, unsigned miplevel,
1907 unsigned array_layer, VkImageAspectFlagBits aspect,
1908 const char *filename);
1909
1910 enum anv_dump_action {
1911 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
1912 };
1913
1914 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
1915 void anv_dump_finish(void);
1916
1917 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
1918 struct anv_framebuffer *fb);
1919
1920 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1921 \
1922 static inline struct __anv_type * \
1923 __anv_type ## _from_handle(__VkType _handle) \
1924 { \
1925 return (struct __anv_type *) _handle; \
1926 } \
1927 \
1928 static inline __VkType \
1929 __anv_type ## _to_handle(struct __anv_type *_obj) \
1930 { \
1931 return (__VkType) _obj; \
1932 }
1933
1934 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1935 \
1936 static inline struct __anv_type * \
1937 __anv_type ## _from_handle(__VkType _handle) \
1938 { \
1939 return (struct __anv_type *)(uintptr_t) _handle; \
1940 } \
1941 \
1942 static inline __VkType \
1943 __anv_type ## _to_handle(struct __anv_type *_obj) \
1944 { \
1945 return (__VkType)(uintptr_t) _obj; \
1946 }
1947
1948 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1949 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1950
1951 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1952 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1953 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1954 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1955 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1956
1957 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1958 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1959 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1960 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1961 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1962 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1963 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1964 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1965 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1966 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1967 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1968 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1969 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1970 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1971 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1972 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1973 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1974 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1975 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1976
1977 #define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \
1978 \
1979 static inline const __VkType * \
1980 __anv_type ## _to_ ## __VkType(const struct __anv_type *__anv_obj) \
1981 { \
1982 return (const __VkType *) __anv_obj; \
1983 }
1984
1985 #define ANV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
1986 const __VkType *__vk_name = anv_common_to_ ## __VkType(__common_name)
1987
1988 ANV_DEFINE_STRUCT_CASTS(anv_common, VkMemoryBarrier)
1989 ANV_DEFINE_STRUCT_CASTS(anv_common, VkBufferMemoryBarrier)
1990 ANV_DEFINE_STRUCT_CASTS(anv_common, VkImageMemoryBarrier)
1991
1992 /* Gen-specific function declarations */
1993 #ifdef genX
1994 # include "anv_genX.h"
1995 #else
1996 # define genX(x) gen7_##x
1997 # include "anv_genX.h"
1998 # undef genX
1999 # define genX(x) gen75_##x
2000 # include "anv_genX.h"
2001 # undef genX
2002 # define genX(x) gen8_##x
2003 # include "anv_genX.h"
2004 # undef genX
2005 # define genX(x) gen9_##x
2006 # include "anv_genX.h"
2007 # undef genX
2008 #endif
2009
2010 #ifdef __cplusplus
2011 }
2012 #endif
2013
2014 #endif /* ANV_PRIVATE_H */