anv: Don't presume to know what address is in a surface relocation
[mesa.git] / src / intel / vulkan / anv_private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include <i915_drm.h>
34
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
40 #else
41 #define VG(x)
42 #endif
43
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
51
52 /* Pre-declarations needed for WSI entrypoints */
53 struct wl_surface;
54 struct wl_display;
55 typedef struct xcb_connection_t xcb_connection_t;
56 typedef uint32_t xcb_visualid_t;
57 typedef uint32_t xcb_window_t;
58
59 struct gen_l3_config;
60
61 #include <vulkan/vulkan.h>
62 #include <vulkan/vulkan_intel.h>
63 #include <vulkan/vk_icd.h>
64
65 #include "anv_entrypoints.h"
66 #include "brw_context.h"
67 #include "isl/isl.h"
68
69 #include "wsi_common.h"
70
71 #ifdef __cplusplus
72 extern "C" {
73 #endif
74
75 #define MAX_VBS 32
76 #define MAX_SETS 8
77 #define MAX_RTS 8
78 #define MAX_VIEWPORTS 16
79 #define MAX_SCISSORS 16
80 #define MAX_PUSH_CONSTANTS_SIZE 128
81 #define MAX_DYNAMIC_BUFFERS 16
82 #define MAX_IMAGES 8
83 #define MAX_SAMPLES_LOG2 4 /* SKL supports 16 samples */
84
85 #define anv_noreturn __attribute__((__noreturn__))
86 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
87
88 static inline uint32_t
89 align_down_npot_u32(uint32_t v, uint32_t a)
90 {
91 return v - (v % a);
92 }
93
94 static inline uint32_t
95 align_u32(uint32_t v, uint32_t a)
96 {
97 assert(a != 0 && a == (a & -a));
98 return (v + a - 1) & ~(a - 1);
99 }
100
101 static inline uint64_t
102 align_u64(uint64_t v, uint64_t a)
103 {
104 assert(a != 0 && a == (a & -a));
105 return (v + a - 1) & ~(a - 1);
106 }
107
108 static inline int32_t
109 align_i32(int32_t v, int32_t a)
110 {
111 assert(a != 0 && a == (a & -a));
112 return (v + a - 1) & ~(a - 1);
113 }
114
115 /** Alignment must be a power of 2. */
116 static inline bool
117 anv_is_aligned(uintmax_t n, uintmax_t a)
118 {
119 assert(a == (a & -a));
120 return (n & (a - 1)) == 0;
121 }
122
123 static inline uint32_t
124 anv_minify(uint32_t n, uint32_t levels)
125 {
126 if (unlikely(n == 0))
127 return 0;
128 else
129 return MAX2(n >> levels, 1);
130 }
131
132 static inline float
133 anv_clamp_f(float f, float min, float max)
134 {
135 assert(min < max);
136
137 if (f > max)
138 return max;
139 else if (f < min)
140 return min;
141 else
142 return f;
143 }
144
145 static inline bool
146 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
147 {
148 if (*inout_mask & clear_mask) {
149 *inout_mask &= ~clear_mask;
150 return true;
151 } else {
152 return false;
153 }
154 }
155
156 #define for_each_bit(b, dword) \
157 for (uint32_t __dword = (dword); \
158 (b) = __builtin_ffs(__dword) - 1, __dword; \
159 __dword &= ~(1 << (b)))
160
161 #define typed_memcpy(dest, src, count) ({ \
162 static_assert(sizeof(*src) == sizeof(*dest), ""); \
163 memcpy((dest), (src), (count) * sizeof(*(src))); \
164 })
165
166 /* Define no kernel as 1, since that's an illegal offset for a kernel */
167 #define NO_KERNEL 1
168
169 struct anv_common {
170 VkStructureType sType;
171 const void* pNext;
172 };
173
174 /* Whenever we generate an error, pass it through this function. Useful for
175 * debugging, where we can break on it. Only call at error site, not when
176 * propagating errors. Might be useful to plug in a stack trace here.
177 */
178
179 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
180
181 #ifdef DEBUG
182 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
183 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
184 #else
185 #define vk_error(error) error
186 #define vk_errorf(error, format, ...) error
187 #endif
188
189 void __anv_finishme(const char *file, int line, const char *format, ...)
190 anv_printflike(3, 4);
191 void anv_loge(const char *format, ...) anv_printflike(1, 2);
192 void anv_loge_v(const char *format, va_list va);
193
194 /**
195 * Print a FINISHME message, including its source location.
196 */
197 #define anv_finishme(format, ...) ({ \
198 static bool reported = false; \
199 if (!reported) { \
200 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
201 reported = true; \
202 } \
203 })
204
205 /* A non-fatal assert. Useful for debugging. */
206 #ifdef DEBUG
207 #define anv_assert(x) ({ \
208 if (unlikely(!(x))) \
209 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
210 })
211 #else
212 #define anv_assert(x)
213 #endif
214
215 /**
216 * If a block of code is annotated with anv_validate, then the block runs only
217 * in debug builds.
218 */
219 #ifdef DEBUG
220 #define anv_validate if (1)
221 #else
222 #define anv_validate if (0)
223 #endif
224
225 void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
226 void anv_abortfv(const char *format, va_list va) anv_noreturn;
227
228 #define stub_return(v) \
229 do { \
230 anv_finishme("stub %s", __func__); \
231 return (v); \
232 } while (0)
233
234 #define stub() \
235 do { \
236 anv_finishme("stub %s", __func__); \
237 return; \
238 } while (0)
239
240 /**
241 * A dynamically growable, circular buffer. Elements are added at head and
242 * removed from tail. head and tail are free-running uint32_t indices and we
243 * only compute the modulo with size when accessing the array. This way,
244 * number of bytes in the queue is always head - tail, even in case of
245 * wraparound.
246 */
247
248 struct anv_bo {
249 uint32_t gem_handle;
250
251 /* Index into the current validation list. This is used by the
252 * validation list building alrogithm to track which buffers are already
253 * in the validation list so that we can ensure uniqueness.
254 */
255 uint32_t index;
256
257 /* Last known offset. This value is provided by the kernel when we
258 * execbuf and is used as the presumed offset for the next bunch of
259 * relocations.
260 */
261 uint64_t offset;
262
263 uint64_t size;
264 void *map;
265
266 /* We need to set the WRITE flag on winsys bos so GEM will know we're
267 * writing to them and synchronize uses on other rings (eg if the display
268 * server uses the blitter ring).
269 */
270 bool is_winsys_bo;
271 };
272
273 /* Represents a lock-free linked list of "free" things. This is used by
274 * both the block pool and the state pools. Unfortunately, in order to
275 * solve the ABA problem, we can't use a single uint32_t head.
276 */
277 union anv_free_list {
278 struct {
279 int32_t offset;
280
281 /* A simple count that is incremented every time the head changes. */
282 uint32_t count;
283 };
284 uint64_t u64;
285 };
286
287 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
288
289 struct anv_block_state {
290 union {
291 struct {
292 uint32_t next;
293 uint32_t end;
294 };
295 uint64_t u64;
296 };
297 };
298
299 struct anv_block_pool {
300 struct anv_device *device;
301
302 struct anv_bo bo;
303
304 /* The offset from the start of the bo to the "center" of the block
305 * pool. Pointers to allocated blocks are given by
306 * bo.map + center_bo_offset + offsets.
307 */
308 uint32_t center_bo_offset;
309
310 /* Current memory map of the block pool. This pointer may or may not
311 * point to the actual beginning of the block pool memory. If
312 * anv_block_pool_alloc_back has ever been called, then this pointer
313 * will point to the "center" position of the buffer and all offsets
314 * (negative or positive) given out by the block pool alloc functions
315 * will be valid relative to this pointer.
316 *
317 * In particular, map == bo.map + center_offset
318 */
319 void *map;
320 int fd;
321
322 /**
323 * Array of mmaps and gem handles owned by the block pool, reclaimed when
324 * the block pool is destroyed.
325 */
326 struct u_vector mmap_cleanups;
327
328 uint32_t block_size;
329
330 union anv_free_list free_list;
331 struct anv_block_state state;
332
333 union anv_free_list back_free_list;
334 struct anv_block_state back_state;
335 };
336
337 /* Block pools are backed by a fixed-size 2GB memfd */
338 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
339
340 /* The center of the block pool is also the middle of the memfd. This may
341 * change in the future if we decide differently for some reason.
342 */
343 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
344
345 static inline uint32_t
346 anv_block_pool_size(struct anv_block_pool *pool)
347 {
348 return pool->state.end + pool->back_state.end;
349 }
350
351 struct anv_state {
352 int32_t offset;
353 uint32_t alloc_size;
354 void *map;
355 };
356
357 struct anv_fixed_size_state_pool {
358 size_t state_size;
359 union anv_free_list free_list;
360 struct anv_block_state block;
361 };
362
363 #define ANV_MIN_STATE_SIZE_LOG2 6
364 #define ANV_MAX_STATE_SIZE_LOG2 17
365
366 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
367
368 struct anv_state_pool {
369 struct anv_block_pool *block_pool;
370 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
371 };
372
373 struct anv_state_stream_block;
374
375 struct anv_state_stream {
376 struct anv_block_pool *block_pool;
377
378 /* The current working block */
379 struct anv_state_stream_block *block;
380
381 /* Offset at which the current block starts */
382 uint32_t start;
383 /* Offset at which to allocate the next state */
384 uint32_t next;
385 /* Offset at which the current block ends */
386 uint32_t end;
387 };
388
389 #define CACHELINE_SIZE 64
390 #define CACHELINE_MASK 63
391
392 static inline void
393 anv_clflush_range(void *start, size_t size)
394 {
395 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
396 void *end = start + size;
397
398 __builtin_ia32_mfence();
399 while (p < end) {
400 __builtin_ia32_clflush(p);
401 p += CACHELINE_SIZE;
402 }
403 }
404
405 static void inline
406 anv_state_clflush(struct anv_state state)
407 {
408 anv_clflush_range(state.map, state.alloc_size);
409 }
410
411 void anv_block_pool_init(struct anv_block_pool *pool,
412 struct anv_device *device, uint32_t block_size);
413 void anv_block_pool_finish(struct anv_block_pool *pool);
414 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
415 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
416 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
417 void anv_state_pool_init(struct anv_state_pool *pool,
418 struct anv_block_pool *block_pool);
419 void anv_state_pool_finish(struct anv_state_pool *pool);
420 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
421 size_t state_size, size_t alignment);
422 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
423 void anv_state_stream_init(struct anv_state_stream *stream,
424 struct anv_block_pool *block_pool);
425 void anv_state_stream_finish(struct anv_state_stream *stream);
426 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
427 uint32_t size, uint32_t alignment);
428
429 /**
430 * Implements a pool of re-usable BOs. The interface is identical to that
431 * of block_pool except that each block is its own BO.
432 */
433 struct anv_bo_pool {
434 struct anv_device *device;
435
436 void *free_list[16];
437 };
438
439 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
440 void anv_bo_pool_finish(struct anv_bo_pool *pool);
441 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
442 uint32_t size);
443 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
444
445 struct anv_scratch_pool {
446 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
447 struct anv_bo bos[16][MESA_SHADER_STAGES];
448 };
449
450 void anv_scratch_pool_init(struct anv_device *device,
451 struct anv_scratch_pool *pool);
452 void anv_scratch_pool_finish(struct anv_device *device,
453 struct anv_scratch_pool *pool);
454 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
455 struct anv_scratch_pool *pool,
456 gl_shader_stage stage,
457 unsigned per_thread_scratch);
458
459 extern struct anv_dispatch_table dtable;
460
461 #define VK_ICD_WSI_PLATFORM_MAX 5
462
463 struct anv_physical_device {
464 VK_LOADER_DATA _loader_data;
465
466 struct anv_instance * instance;
467 uint32_t chipset_id;
468 char path[20];
469 const char * name;
470 struct gen_device_info info;
471 uint64_t aperture_size;
472 struct brw_compiler * compiler;
473 struct isl_device isl_dev;
474 int cmd_parser_version;
475
476 uint32_t eu_total;
477 uint32_t subslice_total;
478
479 struct wsi_device wsi_device;
480 };
481
482 struct anv_instance {
483 VK_LOADER_DATA _loader_data;
484
485 VkAllocationCallbacks alloc;
486
487 uint32_t apiVersion;
488 int physicalDeviceCount;
489 struct anv_physical_device physicalDevice;
490 };
491
492 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
493 void anv_finish_wsi(struct anv_physical_device *physical_device);
494
495 struct anv_queue {
496 VK_LOADER_DATA _loader_data;
497
498 struct anv_device * device;
499
500 struct anv_state_pool * pool;
501 };
502
503 struct anv_pipeline_cache {
504 struct anv_device * device;
505 pthread_mutex_t mutex;
506
507 struct hash_table * cache;
508 };
509
510 struct anv_pipeline_bind_map;
511
512 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
513 struct anv_device *device,
514 bool cache_enabled);
515 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
516
517 struct anv_shader_bin *
518 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
519 const void *key, uint32_t key_size);
520 struct anv_shader_bin *
521 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
522 const void *key_data, uint32_t key_size,
523 const void *kernel_data, uint32_t kernel_size,
524 const struct brw_stage_prog_data *prog_data,
525 uint32_t prog_data_size,
526 const struct anv_pipeline_bind_map *bind_map);
527
528 struct anv_device {
529 VK_LOADER_DATA _loader_data;
530
531 VkAllocationCallbacks alloc;
532
533 struct anv_instance * instance;
534 uint32_t chipset_id;
535 struct gen_device_info info;
536 struct isl_device isl_dev;
537 int context_id;
538 int fd;
539 bool can_chain_batches;
540 bool robust_buffer_access;
541
542 struct anv_bo_pool batch_bo_pool;
543
544 struct anv_block_pool dynamic_state_block_pool;
545 struct anv_state_pool dynamic_state_pool;
546
547 struct anv_block_pool instruction_block_pool;
548 struct anv_state_pool instruction_state_pool;
549
550 struct anv_block_pool surface_state_block_pool;
551 struct anv_state_pool surface_state_pool;
552
553 struct anv_bo workaround_bo;
554
555 struct anv_pipeline_cache blorp_shader_cache;
556 struct blorp_context blorp;
557
558 struct anv_state border_colors;
559
560 struct anv_queue queue;
561
562 struct anv_scratch_pool scratch_pool;
563
564 uint32_t default_mocs;
565
566 pthread_mutex_t mutex;
567 };
568
569 void anv_device_get_cache_uuid(void *uuid);
570
571 void anv_device_init_blorp(struct anv_device *device);
572 void anv_device_finish_blorp(struct anv_device *device);
573
574 VkResult anv_device_execbuf(struct anv_device *device,
575 struct drm_i915_gem_execbuffer2 *execbuf,
576 struct anv_bo **execbuf_bos);
577
578 void* anv_gem_mmap(struct anv_device *device,
579 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
580 void anv_gem_munmap(void *p, uint64_t size);
581 uint32_t anv_gem_create(struct anv_device *device, size_t size);
582 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
583 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
584 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
585 int anv_gem_execbuffer(struct anv_device *device,
586 struct drm_i915_gem_execbuffer2 *execbuf);
587 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
588 uint32_t stride, uint32_t tiling);
589 int anv_gem_create_context(struct anv_device *device);
590 int anv_gem_destroy_context(struct anv_device *device, int context);
591 int anv_gem_get_param(int fd, uint32_t param);
592 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
593 int anv_gem_get_aperture(int fd, uint64_t *size);
594 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
595 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
596 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
597 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
598 uint32_t read_domains, uint32_t write_domain);
599
600 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
601
602 struct anv_reloc_list {
603 size_t num_relocs;
604 size_t array_length;
605 struct drm_i915_gem_relocation_entry * relocs;
606 struct anv_bo ** reloc_bos;
607 };
608
609 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
610 const VkAllocationCallbacks *alloc);
611 void anv_reloc_list_finish(struct anv_reloc_list *list,
612 const VkAllocationCallbacks *alloc);
613
614 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
615 const VkAllocationCallbacks *alloc,
616 uint32_t offset, struct anv_bo *target_bo,
617 uint32_t delta);
618
619 struct anv_batch_bo {
620 /* Link in the anv_cmd_buffer.owned_batch_bos list */
621 struct list_head link;
622
623 struct anv_bo bo;
624
625 /* Bytes actually consumed in this batch BO */
626 size_t length;
627
628 /* Last seen surface state block pool bo offset */
629 uint32_t last_ss_pool_bo_offset;
630
631 struct anv_reloc_list relocs;
632 };
633
634 struct anv_batch {
635 const VkAllocationCallbacks * alloc;
636
637 void * start;
638 void * end;
639 void * next;
640
641 struct anv_reloc_list * relocs;
642
643 /* This callback is called (with the associated user data) in the event
644 * that the batch runs out of space.
645 */
646 VkResult (*extend_cb)(struct anv_batch *, void *);
647 void * user_data;
648 };
649
650 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
651 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
652 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
653 void *location, struct anv_bo *bo, uint32_t offset);
654 VkResult anv_device_submit_simple_batch(struct anv_device *device,
655 struct anv_batch *batch);
656
657 struct anv_address {
658 struct anv_bo *bo;
659 uint32_t offset;
660 };
661
662 static inline uint64_t
663 _anv_combine_address(struct anv_batch *batch, void *location,
664 const struct anv_address address, uint32_t delta)
665 {
666 if (address.bo == NULL) {
667 return address.offset + delta;
668 } else {
669 assert(batch->start <= location && location < batch->end);
670
671 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
672 }
673 }
674
675 #define __gen_address_type struct anv_address
676 #define __gen_user_data struct anv_batch
677 #define __gen_combine_address _anv_combine_address
678
679 /* Wrapper macros needed to work around preprocessor argument issues. In
680 * particular, arguments don't get pre-evaluated if they are concatenated.
681 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
682 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
683 * We can work around this easily enough with these helpers.
684 */
685 #define __anv_cmd_length(cmd) cmd ## _length
686 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
687 #define __anv_cmd_header(cmd) cmd ## _header
688 #define __anv_cmd_pack(cmd) cmd ## _pack
689 #define __anv_reg_num(reg) reg ## _num
690
691 #define anv_pack_struct(dst, struc, ...) do { \
692 struct struc __template = { \
693 __VA_ARGS__ \
694 }; \
695 __anv_cmd_pack(struc)(NULL, dst, &__template); \
696 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
697 } while (0)
698
699 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
700 void *__dst = anv_batch_emit_dwords(batch, n); \
701 struct cmd __template = { \
702 __anv_cmd_header(cmd), \
703 .DWordLength = n - __anv_cmd_length_bias(cmd), \
704 __VA_ARGS__ \
705 }; \
706 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
707 __dst; \
708 })
709
710 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
711 do { \
712 uint32_t *dw; \
713 \
714 static_assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1), "mismatch merge"); \
715 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
716 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
717 dw[i] = (dwords0)[i] | (dwords1)[i]; \
718 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
719 } while (0)
720
721 #define anv_batch_emit(batch, cmd, name) \
722 for (struct cmd name = { __anv_cmd_header(cmd) }, \
723 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
724 __builtin_expect(_dst != NULL, 1); \
725 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
726 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
727 _dst = NULL; \
728 }))
729
730 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
731 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
732 struct anv_state __state = \
733 anv_state_pool_alloc((pool), __size, align); \
734 struct cmd __template = { \
735 __VA_ARGS__ \
736 }; \
737 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
738 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
739 if (!(pool)->block_pool->device->info.has_llc) \
740 anv_state_clflush(__state); \
741 __state; \
742 })
743
744 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
745 .GraphicsDataTypeGFDT = 0, \
746 .LLCCacheabilityControlLLCCC = 0, \
747 .L3CacheabilityControlL3CC = 1, \
748 }
749
750 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
751 .LLCeLLCCacheabilityControlLLCCC = 0, \
752 .L3CacheabilityControlL3CC = 1, \
753 }
754
755 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
756 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
757 .TargetCache = L3DefertoPATforLLCeLLCselection, \
758 .AgeforQUADLRU = 0 \
759 }
760
761 /* Skylake: MOCS is now an index into an array of 62 different caching
762 * configurations programmed by the kernel.
763 */
764
765 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
766 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
767 .IndextoMOCSTables = 2 \
768 }
769
770 #define GEN9_MOCS_PTE { \
771 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
772 .IndextoMOCSTables = 1 \
773 }
774
775 struct anv_device_memory {
776 struct anv_bo bo;
777 uint32_t type_index;
778 VkDeviceSize map_size;
779 void * map;
780 };
781
782 /**
783 * Header for Vertex URB Entry (VUE)
784 */
785 struct anv_vue_header {
786 uint32_t Reserved;
787 uint32_t RTAIndex; /* RenderTargetArrayIndex */
788 uint32_t ViewportIndex;
789 float PointWidth;
790 };
791
792 struct anv_descriptor_set_binding_layout {
793 #ifndef NDEBUG
794 /* The type of the descriptors in this binding */
795 VkDescriptorType type;
796 #endif
797
798 /* Number of array elements in this binding */
799 uint16_t array_size;
800
801 /* Index into the flattend descriptor set */
802 uint16_t descriptor_index;
803
804 /* Index into the dynamic state array for a dynamic buffer */
805 int16_t dynamic_offset_index;
806
807 /* Index into the descriptor set buffer views */
808 int16_t buffer_index;
809
810 struct {
811 /* Index into the binding table for the associated surface */
812 int16_t surface_index;
813
814 /* Index into the sampler table for the associated sampler */
815 int16_t sampler_index;
816
817 /* Index into the image table for the associated image */
818 int16_t image_index;
819 } stage[MESA_SHADER_STAGES];
820
821 /* Immutable samplers (or NULL if no immutable samplers) */
822 struct anv_sampler **immutable_samplers;
823 };
824
825 struct anv_descriptor_set_layout {
826 /* Number of bindings in this descriptor set */
827 uint16_t binding_count;
828
829 /* Total size of the descriptor set with room for all array entries */
830 uint16_t size;
831
832 /* Shader stages affected by this descriptor set */
833 uint16_t shader_stages;
834
835 /* Number of buffers in this descriptor set */
836 uint16_t buffer_count;
837
838 /* Number of dynamic offsets used by this descriptor set */
839 uint16_t dynamic_offset_count;
840
841 /* Bindings in this descriptor set */
842 struct anv_descriptor_set_binding_layout binding[0];
843 };
844
845 struct anv_descriptor {
846 VkDescriptorType type;
847
848 union {
849 struct {
850 struct anv_image_view *image_view;
851 struct anv_sampler *sampler;
852 };
853
854 struct anv_buffer_view *buffer_view;
855 };
856 };
857
858 struct anv_descriptor_set {
859 const struct anv_descriptor_set_layout *layout;
860 uint32_t size;
861 uint32_t buffer_count;
862 struct anv_buffer_view *buffer_views;
863 struct anv_descriptor descriptors[0];
864 };
865
866 struct anv_descriptor_pool {
867 uint32_t size;
868 uint32_t next;
869 uint32_t free_list;
870
871 struct anv_state_stream surface_state_stream;
872 void *surface_state_free_list;
873
874 char data[0];
875 };
876
877 VkResult
878 anv_descriptor_set_create(struct anv_device *device,
879 struct anv_descriptor_pool *pool,
880 const struct anv_descriptor_set_layout *layout,
881 struct anv_descriptor_set **out_set);
882
883 void
884 anv_descriptor_set_destroy(struct anv_device *device,
885 struct anv_descriptor_pool *pool,
886 struct anv_descriptor_set *set);
887
888 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
889
890 struct anv_pipeline_binding {
891 /* The descriptor set this surface corresponds to. The special value of
892 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
893 * to a color attachment and not a regular descriptor.
894 */
895 uint8_t set;
896
897 /* Binding in the descriptor set */
898 uint8_t binding;
899
900 /* Index in the binding */
901 uint8_t index;
902 };
903
904 struct anv_pipeline_layout {
905 struct {
906 struct anv_descriptor_set_layout *layout;
907 uint32_t dynamic_offset_start;
908 } set[MAX_SETS];
909
910 uint32_t num_sets;
911
912 struct {
913 bool has_dynamic_offsets;
914 } stage[MESA_SHADER_STAGES];
915
916 unsigned char sha1[20];
917 };
918
919 struct anv_buffer {
920 struct anv_device * device;
921 VkDeviceSize size;
922
923 VkBufferUsageFlags usage;
924
925 /* Set when bound */
926 struct anv_bo * bo;
927 VkDeviceSize offset;
928 };
929
930 enum anv_cmd_dirty_bits {
931 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
932 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
933 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
934 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
935 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
936 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
937 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
938 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
939 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
940 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
941 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
942 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
943 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
944 };
945 typedef uint32_t anv_cmd_dirty_mask_t;
946
947 enum anv_pipe_bits {
948 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
949 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
950 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
951 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
952 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
953 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
954 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
955 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
956 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
957 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
958 ANV_PIPE_CS_STALL_BIT = (1 << 20),
959
960 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
961 * a flush has happened but not a CS stall. The next time we do any sort
962 * of invalidation we need to insert a CS stall at that time. Otherwise,
963 * we would have to CS stall on every flush which could be bad.
964 */
965 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
966 };
967
968 #define ANV_PIPE_FLUSH_BITS ( \
969 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
970 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
971 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
972
973 #define ANV_PIPE_STALL_BITS ( \
974 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
975 ANV_PIPE_DEPTH_STALL_BIT | \
976 ANV_PIPE_CS_STALL_BIT)
977
978 #define ANV_PIPE_INVALIDATE_BITS ( \
979 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
980 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
981 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
982 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
983 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
984 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
985
986 struct anv_vertex_binding {
987 struct anv_buffer * buffer;
988 VkDeviceSize offset;
989 };
990
991 struct anv_push_constants {
992 /* Current allocated size of this push constants data structure.
993 * Because a decent chunk of it may not be used (images on SKL, for
994 * instance), we won't actually allocate the entire structure up-front.
995 */
996 uint32_t size;
997
998 /* Push constant data provided by the client through vkPushConstants */
999 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1000
1001 /* Our hardware only provides zero-based vertex and instance id so, in
1002 * order to satisfy the vulkan requirements, we may have to push one or
1003 * both of these into the shader.
1004 */
1005 uint32_t base_vertex;
1006 uint32_t base_instance;
1007
1008 /* Offsets and ranges for dynamically bound buffers */
1009 struct {
1010 uint32_t offset;
1011 uint32_t range;
1012 } dynamic[MAX_DYNAMIC_BUFFERS];
1013
1014 /* Image data for image_load_store on pre-SKL */
1015 struct brw_image_param images[MAX_IMAGES];
1016 };
1017
1018 struct anv_dynamic_state {
1019 struct {
1020 uint32_t count;
1021 VkViewport viewports[MAX_VIEWPORTS];
1022 } viewport;
1023
1024 struct {
1025 uint32_t count;
1026 VkRect2D scissors[MAX_SCISSORS];
1027 } scissor;
1028
1029 float line_width;
1030
1031 struct {
1032 float bias;
1033 float clamp;
1034 float slope;
1035 } depth_bias;
1036
1037 float blend_constants[4];
1038
1039 struct {
1040 float min;
1041 float max;
1042 } depth_bounds;
1043
1044 struct {
1045 uint32_t front;
1046 uint32_t back;
1047 } stencil_compare_mask;
1048
1049 struct {
1050 uint32_t front;
1051 uint32_t back;
1052 } stencil_write_mask;
1053
1054 struct {
1055 uint32_t front;
1056 uint32_t back;
1057 } stencil_reference;
1058 };
1059
1060 extern const struct anv_dynamic_state default_dynamic_state;
1061
1062 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1063 const struct anv_dynamic_state *src,
1064 uint32_t copy_mask);
1065
1066 /**
1067 * Attachment state when recording a renderpass instance.
1068 *
1069 * The clear value is valid only if there exists a pending clear.
1070 */
1071 struct anv_attachment_state {
1072 VkImageAspectFlags pending_clear_aspects;
1073 VkClearValue clear_value;
1074 };
1075
1076 /** State required while building cmd buffer */
1077 struct anv_cmd_state {
1078 /* PIPELINE_SELECT.PipelineSelection */
1079 uint32_t current_pipeline;
1080 const struct gen_l3_config * current_l3_config;
1081 uint32_t vb_dirty;
1082 anv_cmd_dirty_mask_t dirty;
1083 anv_cmd_dirty_mask_t compute_dirty;
1084 enum anv_pipe_bits pending_pipe_bits;
1085 uint32_t num_workgroups_offset;
1086 struct anv_bo *num_workgroups_bo;
1087 VkShaderStageFlags descriptors_dirty;
1088 VkShaderStageFlags push_constants_dirty;
1089 uint32_t scratch_size;
1090 struct anv_pipeline * pipeline;
1091 struct anv_pipeline * compute_pipeline;
1092 struct anv_framebuffer * framebuffer;
1093 struct anv_render_pass * pass;
1094 struct anv_subpass * subpass;
1095 VkRect2D render_area;
1096 uint32_t restart_index;
1097 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1098 struct anv_descriptor_set * descriptors[MAX_SETS];
1099 VkShaderStageFlags push_constant_stages;
1100 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1101 struct anv_state binding_tables[MESA_SHADER_STAGES];
1102 struct anv_state samplers[MESA_SHADER_STAGES];
1103 struct anv_dynamic_state dynamic;
1104 bool need_query_wa;
1105
1106 /**
1107 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1108 * valid only when recording a render pass instance.
1109 */
1110 struct anv_attachment_state * attachments;
1111
1112 struct {
1113 struct anv_buffer * index_buffer;
1114 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1115 uint32_t index_offset;
1116 } gen7;
1117 };
1118
1119 struct anv_cmd_pool {
1120 VkAllocationCallbacks alloc;
1121 struct list_head cmd_buffers;
1122 };
1123
1124 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1125
1126 enum anv_cmd_buffer_exec_mode {
1127 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1128 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1129 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1130 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1131 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1132 };
1133
1134 struct anv_cmd_buffer {
1135 VK_LOADER_DATA _loader_data;
1136
1137 struct anv_device * device;
1138
1139 struct anv_cmd_pool * pool;
1140 struct list_head pool_link;
1141
1142 struct anv_batch batch;
1143
1144 /* Fields required for the actual chain of anv_batch_bo's.
1145 *
1146 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1147 */
1148 struct list_head batch_bos;
1149 enum anv_cmd_buffer_exec_mode exec_mode;
1150
1151 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1152 * referenced by this command buffer
1153 *
1154 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1155 */
1156 struct u_vector seen_bbos;
1157
1158 /* A vector of int32_t's for every block of binding tables.
1159 *
1160 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1161 */
1162 struct u_vector bt_blocks;
1163 uint32_t bt_next;
1164 struct anv_reloc_list surface_relocs;
1165
1166 /* Information needed for execbuf
1167 *
1168 * These fields are generated by anv_cmd_buffer_prepare_execbuf().
1169 */
1170 struct {
1171 struct drm_i915_gem_execbuffer2 execbuf;
1172
1173 struct drm_i915_gem_exec_object2 * objects;
1174 uint32_t bo_count;
1175 struct anv_bo ** bos;
1176
1177 /* Allocated length of the 'objects' and 'bos' arrays */
1178 uint32_t array_length;
1179 } execbuf2;
1180
1181 /* Serial for tracking buffer completion */
1182 uint32_t serial;
1183
1184 /* Stream objects for storing temporary data */
1185 struct anv_state_stream surface_state_stream;
1186 struct anv_state_stream dynamic_state_stream;
1187
1188 VkCommandBufferUsageFlags usage_flags;
1189 VkCommandBufferLevel level;
1190
1191 struct anv_cmd_state state;
1192 };
1193
1194 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1195 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1196 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1197 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1198 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1199 struct anv_cmd_buffer *secondary);
1200 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1201 VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
1202 struct anv_cmd_buffer *cmd_buffer);
1203
1204 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
1205
1206 VkResult
1207 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
1208 gl_shader_stage stage, uint32_t size);
1209 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1210 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1211 (offsetof(struct anv_push_constants, field) + \
1212 sizeof(cmd_buffer->state.push_constants[0]->field)))
1213
1214 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1215 const void *data, uint32_t size, uint32_t alignment);
1216 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1217 uint32_t *a, uint32_t *b,
1218 uint32_t dwords, uint32_t alignment);
1219
1220 struct anv_address
1221 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1222 struct anv_state
1223 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1224 uint32_t entries, uint32_t *state_offset);
1225 struct anv_state
1226 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1227 struct anv_state
1228 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1229 uint32_t size, uint32_t alignment);
1230
1231 VkResult
1232 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1233
1234 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1235 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1236 bool depth_clamp_enable);
1237 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1238
1239 void anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1240 const VkRenderPassBeginInfo *info);
1241
1242 struct anv_state
1243 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1244 gl_shader_stage stage);
1245 struct anv_state
1246 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1247
1248 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1249 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1250
1251 const struct anv_image_view *
1252 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1253
1254 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1255
1256 struct anv_fence {
1257 struct anv_bo bo;
1258 struct drm_i915_gem_execbuffer2 execbuf;
1259 struct drm_i915_gem_exec_object2 exec2_objects[1];
1260 bool ready;
1261 };
1262
1263 struct anv_event {
1264 uint64_t semaphore;
1265 struct anv_state state;
1266 };
1267
1268 struct anv_shader_module {
1269 unsigned char sha1[20];
1270 uint32_t size;
1271 char data[0];
1272 };
1273
1274 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1275 struct anv_shader_module *module,
1276 const char *entrypoint,
1277 const struct anv_pipeline_layout *pipeline_layout,
1278 const VkSpecializationInfo *spec_info);
1279
1280 static inline gl_shader_stage
1281 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1282 {
1283 assert(__builtin_popcount(vk_stage) == 1);
1284 return ffs(vk_stage) - 1;
1285 }
1286
1287 static inline VkShaderStageFlagBits
1288 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1289 {
1290 return (1 << mesa_stage);
1291 }
1292
1293 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1294
1295 #define anv_foreach_stage(stage, stage_bits) \
1296 for (gl_shader_stage stage, \
1297 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1298 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1299 __tmp &= ~(1 << (stage)))
1300
1301 struct anv_pipeline_bind_map {
1302 uint32_t surface_count;
1303 uint32_t sampler_count;
1304 uint32_t image_count;
1305
1306 struct anv_pipeline_binding * surface_to_descriptor;
1307 struct anv_pipeline_binding * sampler_to_descriptor;
1308 };
1309
1310 struct anv_shader_bin_key {
1311 uint32_t size;
1312 uint8_t data[0];
1313 };
1314
1315 struct anv_shader_bin {
1316 uint32_t ref_cnt;
1317
1318 const struct anv_shader_bin_key *key;
1319
1320 struct anv_state kernel;
1321 uint32_t kernel_size;
1322
1323 const struct brw_stage_prog_data *prog_data;
1324 uint32_t prog_data_size;
1325
1326 struct anv_pipeline_bind_map bind_map;
1327
1328 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1329 };
1330
1331 struct anv_shader_bin *
1332 anv_shader_bin_create(struct anv_device *device,
1333 const void *key, uint32_t key_size,
1334 const void *kernel, uint32_t kernel_size,
1335 const struct brw_stage_prog_data *prog_data,
1336 uint32_t prog_data_size, const void *prog_data_param,
1337 const struct anv_pipeline_bind_map *bind_map);
1338
1339 void
1340 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
1341
1342 static inline void
1343 anv_shader_bin_ref(struct anv_shader_bin *shader)
1344 {
1345 assert(shader->ref_cnt >= 1);
1346 __sync_fetch_and_add(&shader->ref_cnt, 1);
1347 }
1348
1349 static inline void
1350 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
1351 {
1352 assert(shader->ref_cnt >= 1);
1353 if (__sync_fetch_and_add(&shader->ref_cnt, -1) == 1)
1354 anv_shader_bin_destroy(device, shader);
1355 }
1356
1357 struct anv_pipeline {
1358 struct anv_device * device;
1359 struct anv_batch batch;
1360 uint32_t batch_data[512];
1361 struct anv_reloc_list batch_relocs;
1362 uint32_t dynamic_state_mask;
1363 struct anv_dynamic_state dynamic_state;
1364
1365 struct anv_pipeline_layout * layout;
1366
1367 bool needs_data_cache;
1368
1369 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
1370
1371 struct {
1372 const struct gen_l3_config * l3_config;
1373 uint32_t total_size;
1374 } urb;
1375
1376 VkShaderStageFlags active_stages;
1377 struct anv_state blend_state;
1378 uint32_t vs_simd8;
1379 uint32_t vs_vec4;
1380 uint32_t ps_ksp0;
1381 uint32_t gs_kernel;
1382 uint32_t cs_simd;
1383
1384 uint32_t vb_used;
1385 uint32_t binding_stride[MAX_VBS];
1386 bool instancing_enable[MAX_VBS];
1387 bool primitive_restart;
1388 uint32_t topology;
1389
1390 uint32_t cs_right_mask;
1391
1392 bool depth_clamp_enable;
1393
1394 struct {
1395 uint32_t sf[7];
1396 uint32_t depth_stencil_state[3];
1397 } gen7;
1398
1399 struct {
1400 uint32_t sf[4];
1401 uint32_t raster[5];
1402 uint32_t wm_depth_stencil[3];
1403 } gen8;
1404
1405 struct {
1406 uint32_t wm_depth_stencil[4];
1407 } gen9;
1408 };
1409
1410 static inline bool
1411 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
1412 gl_shader_stage stage)
1413 {
1414 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
1415 }
1416
1417 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1418 static inline const struct brw_##prefix##_prog_data * \
1419 get_##prefix##_prog_data(struct anv_pipeline *pipeline) \
1420 { \
1421 if (anv_pipeline_has_stage(pipeline, stage)) { \
1422 return (const struct brw_##prefix##_prog_data *) \
1423 pipeline->shaders[stage]->prog_data; \
1424 } else { \
1425 return NULL; \
1426 } \
1427 }
1428
1429 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
1430 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
1431 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
1432 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
1433
1434 VkResult
1435 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1436 struct anv_pipeline_cache *cache,
1437 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1438 const VkAllocationCallbacks *alloc);
1439
1440 VkResult
1441 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1442 struct anv_pipeline_cache *cache,
1443 const VkComputePipelineCreateInfo *info,
1444 struct anv_shader_module *module,
1445 const char *entrypoint,
1446 const VkSpecializationInfo *spec_info);
1447
1448 struct anv_format {
1449 enum isl_format isl_format:16;
1450 struct isl_swizzle swizzle;
1451 };
1452
1453 struct anv_format
1454 anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
1455 VkImageAspectFlags aspect, VkImageTiling tiling);
1456
1457 static inline enum isl_format
1458 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
1459 VkImageAspectFlags aspect, VkImageTiling tiling)
1460 {
1461 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1462 }
1463
1464 void
1465 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
1466
1467 /**
1468 * Subsurface of an anv_image.
1469 */
1470 struct anv_surface {
1471 /** Valid only if isl_surf::size > 0. */
1472 struct isl_surf isl;
1473
1474 /**
1475 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1476 */
1477 uint32_t offset;
1478 };
1479
1480 struct anv_image {
1481 VkImageType type;
1482 /* The original VkFormat provided by the client. This may not match any
1483 * of the actual surface formats.
1484 */
1485 VkFormat vk_format;
1486 VkImageAspectFlags aspects;
1487 VkExtent3D extent;
1488 uint32_t levels;
1489 uint32_t array_size;
1490 uint32_t samples; /**< VkImageCreateInfo::samples */
1491 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1492 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1493
1494 VkDeviceSize size;
1495 uint32_t alignment;
1496
1497 /* Set when bound */
1498 struct anv_bo *bo;
1499 VkDeviceSize offset;
1500
1501 /**
1502 * Image subsurfaces
1503 *
1504 * For each foo, anv_image::foo_surface is valid if and only if
1505 * anv_image::aspects has a foo aspect.
1506 *
1507 * The hardware requires that the depth buffer and stencil buffer be
1508 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1509 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1510 * allocate the depth and stencil buffers as separate surfaces in the same
1511 * bo.
1512 */
1513 union {
1514 struct anv_surface color_surface;
1515
1516 struct {
1517 struct anv_surface depth_surface;
1518 struct anv_surface hiz_surface;
1519 struct anv_surface stencil_surface;
1520 };
1521 };
1522 };
1523
1524 static inline uint32_t
1525 anv_get_layerCount(const struct anv_image *image,
1526 const VkImageSubresourceRange *range)
1527 {
1528 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1529 image->array_size - range->baseArrayLayer : range->layerCount;
1530 }
1531
1532 static inline uint32_t
1533 anv_get_levelCount(const struct anv_image *image,
1534 const VkImageSubresourceRange *range)
1535 {
1536 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1537 image->levels - range->baseMipLevel : range->levelCount;
1538 }
1539
1540
1541 struct anv_image_view {
1542 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1543 struct anv_bo *bo;
1544 uint32_t offset; /**< Offset into bo. */
1545
1546 struct isl_view isl;
1547
1548 VkImageAspectFlags aspect_mask;
1549 VkFormat vk_format;
1550 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1551
1552 /** RENDER_SURFACE_STATE when using image as a color render target. */
1553 struct anv_state color_rt_surface_state;
1554
1555 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1556 struct anv_state sampler_surface_state;
1557
1558 /** RENDER_SURFACE_STATE when using image as a storage image. */
1559 struct anv_state storage_surface_state;
1560
1561 struct brw_image_param storage_image_param;
1562 };
1563
1564 struct anv_image_create_info {
1565 const VkImageCreateInfo *vk_info;
1566
1567 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1568 isl_tiling_flags_t isl_tiling_flags;
1569
1570 uint32_t stride;
1571 };
1572
1573 VkResult anv_image_create(VkDevice _device,
1574 const struct anv_image_create_info *info,
1575 const VkAllocationCallbacks* alloc,
1576 VkImage *pImage);
1577
1578 const struct anv_surface *
1579 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
1580 VkImageAspectFlags aspect_mask);
1581
1582 static inline bool
1583 anv_image_has_hiz(const struct anv_image *image)
1584 {
1585 /* We must check the aspect because anv_image::hiz_surface belongs to
1586 * a union.
1587 */
1588 return (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1589 image->hiz_surface.isl.size > 0;
1590 }
1591
1592 struct anv_buffer_view {
1593 enum isl_format format; /**< VkBufferViewCreateInfo::format */
1594 struct anv_bo *bo;
1595 uint32_t offset; /**< Offset into bo. */
1596 uint64_t range; /**< VkBufferViewCreateInfo::range */
1597
1598 struct anv_state surface_state;
1599 struct anv_state storage_surface_state;
1600
1601 struct brw_image_param storage_image_param;
1602 };
1603
1604 enum isl_format
1605 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1606
1607 static inline struct VkExtent3D
1608 anv_sanitize_image_extent(const VkImageType imageType,
1609 const struct VkExtent3D imageExtent)
1610 {
1611 switch (imageType) {
1612 case VK_IMAGE_TYPE_1D:
1613 return (VkExtent3D) { imageExtent.width, 1, 1 };
1614 case VK_IMAGE_TYPE_2D:
1615 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1616 case VK_IMAGE_TYPE_3D:
1617 return imageExtent;
1618 default:
1619 unreachable("invalid image type");
1620 }
1621 }
1622
1623 static inline struct VkOffset3D
1624 anv_sanitize_image_offset(const VkImageType imageType,
1625 const struct VkOffset3D imageOffset)
1626 {
1627 switch (imageType) {
1628 case VK_IMAGE_TYPE_1D:
1629 return (VkOffset3D) { imageOffset.x, 0, 0 };
1630 case VK_IMAGE_TYPE_2D:
1631 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1632 case VK_IMAGE_TYPE_3D:
1633 return imageOffset;
1634 default:
1635 unreachable("invalid image type");
1636 }
1637 }
1638
1639
1640 void anv_fill_buffer_surface_state(struct anv_device *device,
1641 struct anv_state state,
1642 enum isl_format format,
1643 uint32_t offset, uint32_t range,
1644 uint32_t stride);
1645
1646 void anv_image_view_fill_image_param(struct anv_device *device,
1647 struct anv_image_view *view,
1648 struct brw_image_param *param);
1649 void anv_buffer_view_fill_image_param(struct anv_device *device,
1650 struct anv_buffer_view *view,
1651 struct brw_image_param *param);
1652
1653 struct anv_sampler {
1654 uint32_t state[4];
1655 };
1656
1657 struct anv_framebuffer {
1658 uint32_t width;
1659 uint32_t height;
1660 uint32_t layers;
1661
1662 uint32_t attachment_count;
1663 struct anv_image_view * attachments[0];
1664 };
1665
1666 struct anv_subpass {
1667 uint32_t input_count;
1668 uint32_t * input_attachments;
1669 uint32_t color_count;
1670 uint32_t * color_attachments;
1671 uint32_t * resolve_attachments;
1672 uint32_t depth_stencil_attachment;
1673
1674 /** Subpass has at least one resolve attachment */
1675 bool has_resolve;
1676 };
1677
1678 struct anv_render_pass_attachment {
1679 VkFormat format;
1680 uint32_t samples;
1681 VkAttachmentLoadOp load_op;
1682 VkAttachmentStoreOp store_op;
1683 VkAttachmentLoadOp stencil_load_op;
1684 };
1685
1686 struct anv_render_pass {
1687 uint32_t attachment_count;
1688 uint32_t subpass_count;
1689 uint32_t * subpass_attachments;
1690 struct anv_render_pass_attachment * attachments;
1691 struct anv_subpass subpasses[0];
1692 };
1693
1694 struct anv_query_pool_slot {
1695 uint64_t begin;
1696 uint64_t end;
1697 uint64_t available;
1698 };
1699
1700 struct anv_query_pool {
1701 VkQueryType type;
1702 uint32_t slots;
1703 struct anv_bo bo;
1704 };
1705
1706 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
1707 const char *name);
1708
1709 void anv_dump_image_to_ppm(struct anv_device *device,
1710 struct anv_image *image, unsigned miplevel,
1711 unsigned array_layer, VkImageAspectFlagBits aspect,
1712 const char *filename);
1713
1714 enum anv_dump_action {
1715 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
1716 };
1717
1718 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
1719 void anv_dump_finish(void);
1720
1721 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
1722 struct anv_framebuffer *fb);
1723
1724 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1725 \
1726 static inline struct __anv_type * \
1727 __anv_type ## _from_handle(__VkType _handle) \
1728 { \
1729 return (struct __anv_type *) _handle; \
1730 } \
1731 \
1732 static inline __VkType \
1733 __anv_type ## _to_handle(struct __anv_type *_obj) \
1734 { \
1735 return (__VkType) _obj; \
1736 }
1737
1738 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1739 \
1740 static inline struct __anv_type * \
1741 __anv_type ## _from_handle(__VkType _handle) \
1742 { \
1743 return (struct __anv_type *)(uintptr_t) _handle; \
1744 } \
1745 \
1746 static inline __VkType \
1747 __anv_type ## _to_handle(struct __anv_type *_obj) \
1748 { \
1749 return (__VkType)(uintptr_t) _obj; \
1750 }
1751
1752 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1753 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1754
1755 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1756 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1757 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1758 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1759 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1760
1761 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1762 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1763 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1764 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1765 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1766 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1767 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1768 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1769 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1770 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1771 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1772 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1773 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1774 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1775 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1776 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1777 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1778 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1779 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1780
1781 #define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \
1782 \
1783 static inline const __VkType * \
1784 __anv_type ## _to_ ## __VkType(const struct __anv_type *__anv_obj) \
1785 { \
1786 return (const __VkType *) __anv_obj; \
1787 }
1788
1789 #define ANV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
1790 const __VkType *__vk_name = anv_common_to_ ## __VkType(__common_name)
1791
1792 ANV_DEFINE_STRUCT_CASTS(anv_common, VkMemoryBarrier)
1793 ANV_DEFINE_STRUCT_CASTS(anv_common, VkBufferMemoryBarrier)
1794 ANV_DEFINE_STRUCT_CASTS(anv_common, VkImageMemoryBarrier)
1795
1796 /* Gen-specific function declarations */
1797 #ifdef genX
1798 # include "anv_genX.h"
1799 #else
1800 # define genX(x) gen7_##x
1801 # include "anv_genX.h"
1802 # undef genX
1803 # define genX(x) gen75_##x
1804 # include "anv_genX.h"
1805 # undef genX
1806 # define genX(x) gen8_##x
1807 # include "anv_genX.h"
1808 # undef genX
1809 # define genX(x) gen9_##x
1810 # include "anv_genX.h"
1811 # undef genX
1812 #endif
1813
1814 #ifdef __cplusplus
1815 }
1816 #endif
1817
1818 #endif /* ANV_PRIVATE_H */