Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / intel / vulkan / anv_private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef ANV_PRIVATE_H
25 #define ANV_PRIVATE_H
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <pthread.h>
31 #include <assert.h>
32 #include <stdint.h>
33 #include "drm-uapi/i915_drm.h"
34
35 #ifdef HAVE_VALGRIND
36 #include <valgrind.h>
37 #include <memcheck.h>
38 #define VG(x) x
39 #ifndef NDEBUG
40 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
41 #endif
42 #else
43 #define VG(x) ((void)0)
44 #endif
45
46 #include "common/gen_clflush.h"
47 #include "common/gen_decoder.h"
48 #include "common/gen_gem.h"
49 #include "common/gen_l3_config.h"
50 #include "dev/gen_device_info.h"
51 #include "blorp/blorp.h"
52 #include "compiler/brw_compiler.h"
53 #include "util/bitset.h"
54 #include "util/macros.h"
55 #include "util/hash_table.h"
56 #include "util/list.h"
57 #include "util/sparse_array.h"
58 #include "util/u_atomic.h"
59 #include "util/u_vector.h"
60 #include "util/u_math.h"
61 #include "util/vma.h"
62 #include "util/xmlconfig.h"
63 #include "vk_alloc.h"
64 #include "vk_debug_report.h"
65 #include "vk_object.h"
66
67 /* Pre-declarations needed for WSI entrypoints */
68 struct wl_surface;
69 struct wl_display;
70 typedef struct xcb_connection_t xcb_connection_t;
71 typedef uint32_t xcb_visualid_t;
72 typedef uint32_t xcb_window_t;
73
74 struct anv_batch;
75 struct anv_buffer;
76 struct anv_buffer_view;
77 struct anv_image_view;
78 struct anv_instance;
79
80 struct gen_aux_map_context;
81 struct gen_perf_config;
82 struct gen_perf_counter_pass;
83 struct gen_perf_query_result;
84
85 #include <vulkan/vulkan.h>
86 #include <vulkan/vulkan_intel.h>
87 #include <vulkan/vk_icd.h>
88
89 #include "anv_android.h"
90 #include "anv_entrypoints.h"
91 #include "anv_extensions.h"
92 #include "isl/isl.h"
93
94 #include "dev/gen_debug.h"
95 #include "common/intel_log.h"
96 #include "wsi_common.h"
97
98 #define NSEC_PER_SEC 1000000000ull
99
100 /* anv Virtual Memory Layout
101 * =========================
102 *
103 * When the anv driver is determining the virtual graphics addresses of memory
104 * objects itself using the softpin mechanism, the following memory ranges
105 * will be used.
106 *
107 * Three special considerations to notice:
108 *
109 * (1) the dynamic state pool is located within the same 4 GiB as the low
110 * heap. This is to work around a VF cache issue described in a comment in
111 * anv_physical_device_init_heaps.
112 *
113 * (2) the binding table pool is located at lower addresses than the surface
114 * state pool, within a 4 GiB range. This allows surface state base addresses
115 * to cover both binding tables (16 bit offsets) and surface states (32 bit
116 * offsets).
117 *
118 * (3) the last 4 GiB of the address space is withheld from the high
119 * heap. Various hardware units will read past the end of an object for
120 * various reasons. This healthy margin prevents reads from wrapping around
121 * 48-bit addresses.
122 */
123 #define LOW_HEAP_MIN_ADDRESS 0x000000001000ULL /* 4 KiB */
124 #define LOW_HEAP_MAX_ADDRESS 0x0000bfffffffULL
125 #define DYNAMIC_STATE_POOL_MIN_ADDRESS 0x0000c0000000ULL /* 3 GiB */
126 #define DYNAMIC_STATE_POOL_MAX_ADDRESS 0x0000ffffffffULL
127 #define BINDING_TABLE_POOL_MIN_ADDRESS 0x000100000000ULL /* 4 GiB */
128 #define BINDING_TABLE_POOL_MAX_ADDRESS 0x00013fffffffULL
129 #define SURFACE_STATE_POOL_MIN_ADDRESS 0x000140000000ULL /* 5 GiB */
130 #define SURFACE_STATE_POOL_MAX_ADDRESS 0x00017fffffffULL
131 #define INSTRUCTION_STATE_POOL_MIN_ADDRESS 0x000180000000ULL /* 6 GiB */
132 #define INSTRUCTION_STATE_POOL_MAX_ADDRESS 0x0001bfffffffULL
133 #define CLIENT_VISIBLE_HEAP_MIN_ADDRESS 0x0001c0000000ULL /* 7 GiB */
134 #define CLIENT_VISIBLE_HEAP_MAX_ADDRESS 0x0002bfffffffULL
135 #define HIGH_HEAP_MIN_ADDRESS 0x0002c0000000ULL /* 11 GiB */
136
137 #define LOW_HEAP_SIZE \
138 (LOW_HEAP_MAX_ADDRESS - LOW_HEAP_MIN_ADDRESS + 1)
139 #define DYNAMIC_STATE_POOL_SIZE \
140 (DYNAMIC_STATE_POOL_MAX_ADDRESS - DYNAMIC_STATE_POOL_MIN_ADDRESS + 1)
141 #define BINDING_TABLE_POOL_SIZE \
142 (BINDING_TABLE_POOL_MAX_ADDRESS - BINDING_TABLE_POOL_MIN_ADDRESS + 1)
143 #define SURFACE_STATE_POOL_SIZE \
144 (SURFACE_STATE_POOL_MAX_ADDRESS - SURFACE_STATE_POOL_MIN_ADDRESS + 1)
145 #define INSTRUCTION_STATE_POOL_SIZE \
146 (INSTRUCTION_STATE_POOL_MAX_ADDRESS - INSTRUCTION_STATE_POOL_MIN_ADDRESS + 1)
147 #define CLIENT_VISIBLE_HEAP_SIZE \
148 (CLIENT_VISIBLE_HEAP_MAX_ADDRESS - CLIENT_VISIBLE_HEAP_MIN_ADDRESS + 1)
149
150 /* Allowing different clear colors requires us to perform a depth resolve at
151 * the end of certain render passes. This is because while slow clears store
152 * the clear color in the HiZ buffer, fast clears (without a resolve) don't.
153 * See the PRMs for examples describing when additional resolves would be
154 * necessary. To enable fast clears without requiring extra resolves, we set
155 * the clear value to a globally-defined one. We could allow different values
156 * if the user doesn't expect coherent data during or after a render passes
157 * (VK_ATTACHMENT_STORE_OP_DONT_CARE), but such users (aside from the CTS)
158 * don't seem to exist yet. In almost all Vulkan applications tested thus far,
159 * 1.0f seems to be the only value used. The only application that doesn't set
160 * this value does so through the usage of an seemingly uninitialized clear
161 * value.
162 */
163 #define ANV_HZ_FC_VAL 1.0f
164
165 #define MAX_VBS 28
166 #define MAX_XFB_BUFFERS 4
167 #define MAX_XFB_STREAMS 4
168 #define MAX_SETS 8
169 #define MAX_RTS 8
170 #define MAX_VIEWPORTS 16
171 #define MAX_SCISSORS 16
172 #define MAX_PUSH_CONSTANTS_SIZE 128
173 #define MAX_DYNAMIC_BUFFERS 16
174 #define MAX_IMAGES 64
175 #define MAX_PUSH_DESCRIPTORS 32 /* Minimum requirement */
176 #define MAX_INLINE_UNIFORM_BLOCK_SIZE 4096
177 #define MAX_INLINE_UNIFORM_BLOCK_DESCRIPTORS 32
178 /* We need 16 for UBO block reads to work and 32 for push UBOs. However, we
179 * use 64 here to avoid cache issues. This could most likely bring it back to
180 * 32 if we had different virtual addresses for the different views on a given
181 * GEM object.
182 */
183 #define ANV_UBO_ALIGNMENT 64
184 #define ANV_SSBO_ALIGNMENT 4
185 #define ANV_SSBO_BOUNDS_CHECK_ALIGNMENT 4
186 #define MAX_VIEWS_FOR_PRIMITIVE_REPLICATION 16
187
188 /* From the Skylake PRM Vol. 7 "Binding Table Surface State Model":
189 *
190 * "The surface state model is used when a Binding Table Index (specified
191 * in the message descriptor) of less than 240 is specified. In this model,
192 * the Binding Table Index is used to index into the binding table, and the
193 * binding table entry contains a pointer to the SURFACE_STATE."
194 *
195 * Binding table values above 240 are used for various things in the hardware
196 * such as stateless, stateless with incoherent cache, SLM, and bindless.
197 */
198 #define MAX_BINDING_TABLE_SIZE 240
199
200 /* The kernel relocation API has a limitation of a 32-bit delta value
201 * applied to the address before it is written which, in spite of it being
202 * unsigned, is treated as signed . Because of the way that this maps to
203 * the Vulkan API, we cannot handle an offset into a buffer that does not
204 * fit into a signed 32 bits. The only mechanism we have for dealing with
205 * this at the moment is to limit all VkDeviceMemory objects to a maximum
206 * of 2GB each. The Vulkan spec allows us to do this:
207 *
208 * "Some platforms may have a limit on the maximum size of a single
209 * allocation. For example, certain systems may fail to create
210 * allocations with a size greater than or equal to 4GB. Such a limit is
211 * implementation-dependent, and if such a failure occurs then the error
212 * VK_ERROR_OUT_OF_DEVICE_MEMORY should be returned."
213 *
214 * We don't use vk_error here because it's not an error so much as an
215 * indication to the application that the allocation is too large.
216 */
217 #define MAX_MEMORY_ALLOCATION_SIZE (1ull << 31)
218
219 #define ANV_SVGS_VB_INDEX MAX_VBS
220 #define ANV_DRAWID_VB_INDEX (MAX_VBS + 1)
221
222 /* We reserve this MI ALU register for the purpose of handling predication.
223 * Other code which uses the MI ALU should leave it alone.
224 */
225 #define ANV_PREDICATE_RESULT_REG 0x2678 /* MI_ALU_REG15 */
226
227 /* We reserve this MI ALU register to pass around an offset computed from
228 * VkPerformanceQuerySubmitInfoKHR::counterPassIndex VK_KHR_performance_query.
229 * Other code which uses the MI ALU should leave it alone.
230 */
231 #define ANV_PERF_QUERY_OFFSET_REG 0x2670 /* MI_ALU_REG14 */
232
233 /* For gen12 we set the streamout buffers using 4 separate commands
234 * (3DSTATE_SO_BUFFER_INDEX_*) instead of 3DSTATE_SO_BUFFER. However the layout
235 * of the 3DSTATE_SO_BUFFER_INDEX_* commands is identical to that of
236 * 3DSTATE_SO_BUFFER apart from the SOBufferIndex field, so for now we use the
237 * 3DSTATE_SO_BUFFER command, but change the 3DCommandSubOpcode.
238 * SO_BUFFER_INDEX_0_CMD is actually the 3DCommandSubOpcode for
239 * 3DSTATE_SO_BUFFER_INDEX_0.
240 */
241 #define SO_BUFFER_INDEX_0_CMD 0x60
242 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
243
244 static inline uint32_t
245 align_down_npot_u32(uint32_t v, uint32_t a)
246 {
247 return v - (v % a);
248 }
249
250 static inline uint32_t
251 align_down_u32(uint32_t v, uint32_t a)
252 {
253 assert(a != 0 && a == (a & -a));
254 return v & ~(a - 1);
255 }
256
257 static inline uint32_t
258 align_u32(uint32_t v, uint32_t a)
259 {
260 assert(a != 0 && a == (a & -a));
261 return align_down_u32(v + a - 1, a);
262 }
263
264 static inline uint64_t
265 align_down_u64(uint64_t v, uint64_t a)
266 {
267 assert(a != 0 && a == (a & -a));
268 return v & ~(a - 1);
269 }
270
271 static inline uint64_t
272 align_u64(uint64_t v, uint64_t a)
273 {
274 return align_down_u64(v + a - 1, a);
275 }
276
277 static inline int32_t
278 align_i32(int32_t v, int32_t a)
279 {
280 assert(a != 0 && a == (a & -a));
281 return (v + a - 1) & ~(a - 1);
282 }
283
284 /** Alignment must be a power of 2. */
285 static inline bool
286 anv_is_aligned(uintmax_t n, uintmax_t a)
287 {
288 assert(a == (a & -a));
289 return (n & (a - 1)) == 0;
290 }
291
292 static inline uint32_t
293 anv_minify(uint32_t n, uint32_t levels)
294 {
295 if (unlikely(n == 0))
296 return 0;
297 else
298 return MAX2(n >> levels, 1);
299 }
300
301 static inline float
302 anv_clamp_f(float f, float min, float max)
303 {
304 assert(min < max);
305
306 if (f > max)
307 return max;
308 else if (f < min)
309 return min;
310 else
311 return f;
312 }
313
314 static inline bool
315 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
316 {
317 if (*inout_mask & clear_mask) {
318 *inout_mask &= ~clear_mask;
319 return true;
320 } else {
321 return false;
322 }
323 }
324
325 static inline union isl_color_value
326 vk_to_isl_color(VkClearColorValue color)
327 {
328 return (union isl_color_value) {
329 .u32 = {
330 color.uint32[0],
331 color.uint32[1],
332 color.uint32[2],
333 color.uint32[3],
334 },
335 };
336 }
337
338 static inline void *anv_unpack_ptr(uintptr_t ptr, int bits, int *flags)
339 {
340 uintptr_t mask = (1ull << bits) - 1;
341 *flags = ptr & mask;
342 return (void *) (ptr & ~mask);
343 }
344
345 static inline uintptr_t anv_pack_ptr(void *ptr, int bits, int flags)
346 {
347 uintptr_t value = (uintptr_t) ptr;
348 uintptr_t mask = (1ull << bits) - 1;
349 return value | (mask & flags);
350 }
351
352 #define for_each_bit(b, dword) \
353 for (uint32_t __dword = (dword); \
354 (b) = __builtin_ffs(__dword) - 1, __dword; \
355 __dword &= ~(1 << (b)))
356
357 #define typed_memcpy(dest, src, count) ({ \
358 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
359 memcpy((dest), (src), (count) * sizeof(*(src))); \
360 })
361
362 /* Mapping from anv object to VkDebugReportObjectTypeEXT. New types need
363 * to be added here in order to utilize mapping in debug/error/perf macros.
364 */
365 #define REPORT_OBJECT_TYPE(o) \
366 __builtin_choose_expr ( \
367 __builtin_types_compatible_p (__typeof (o), struct anv_instance*), \
368 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, \
369 __builtin_choose_expr ( \
370 __builtin_types_compatible_p (__typeof (o), struct anv_physical_device*), \
371 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, \
372 __builtin_choose_expr ( \
373 __builtin_types_compatible_p (__typeof (o), struct anv_device*), \
374 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
375 __builtin_choose_expr ( \
376 __builtin_types_compatible_p (__typeof (o), const struct anv_device*), \
377 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, \
378 __builtin_choose_expr ( \
379 __builtin_types_compatible_p (__typeof (o), struct anv_queue*), \
380 VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, \
381 __builtin_choose_expr ( \
382 __builtin_types_compatible_p (__typeof (o), struct anv_semaphore*), \
383 VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, \
384 __builtin_choose_expr ( \
385 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_buffer*), \
386 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, \
387 __builtin_choose_expr ( \
388 __builtin_types_compatible_p (__typeof (o), struct anv_fence*), \
389 VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT, \
390 __builtin_choose_expr ( \
391 __builtin_types_compatible_p (__typeof (o), struct anv_device_memory*), \
392 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, \
393 __builtin_choose_expr ( \
394 __builtin_types_compatible_p (__typeof (o), struct anv_buffer*), \
395 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, \
396 __builtin_choose_expr ( \
397 __builtin_types_compatible_p (__typeof (o), struct anv_image*), \
398 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
399 __builtin_choose_expr ( \
400 __builtin_types_compatible_p (__typeof (o), const struct anv_image*), \
401 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, \
402 __builtin_choose_expr ( \
403 __builtin_types_compatible_p (__typeof (o), struct anv_event*), \
404 VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT, \
405 __builtin_choose_expr ( \
406 __builtin_types_compatible_p (__typeof (o), struct anv_query_pool*), \
407 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, \
408 __builtin_choose_expr ( \
409 __builtin_types_compatible_p (__typeof (o), struct anv_buffer_view*), \
410 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT, \
411 __builtin_choose_expr ( \
412 __builtin_types_compatible_p (__typeof (o), struct anv_image_view*), \
413 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT, \
414 __builtin_choose_expr ( \
415 __builtin_types_compatible_p (__typeof (o), struct anv_shader_module*), \
416 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, \
417 __builtin_choose_expr ( \
418 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_cache*), \
419 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, \
420 __builtin_choose_expr ( \
421 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline_layout*), \
422 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, \
423 __builtin_choose_expr ( \
424 __builtin_types_compatible_p (__typeof (o), struct anv_render_pass*), \
425 VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, \
426 __builtin_choose_expr ( \
427 __builtin_types_compatible_p (__typeof (o), struct anv_pipeline*), \
428 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, \
429 __builtin_choose_expr ( \
430 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set_layout*), \
431 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, \
432 __builtin_choose_expr ( \
433 __builtin_types_compatible_p (__typeof (o), struct anv_sampler*), \
434 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT, \
435 __builtin_choose_expr ( \
436 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_pool*), \
437 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, \
438 __builtin_choose_expr ( \
439 __builtin_types_compatible_p (__typeof (o), struct anv_descriptor_set*), \
440 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, \
441 __builtin_choose_expr ( \
442 __builtin_types_compatible_p (__typeof (o), struct anv_framebuffer*), \
443 VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT, \
444 __builtin_choose_expr ( \
445 __builtin_types_compatible_p (__typeof (o), struct anv_cmd_pool*), \
446 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, \
447 __builtin_choose_expr ( \
448 __builtin_types_compatible_p (__typeof (o), struct anv_surface*), \
449 VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT, \
450 __builtin_choose_expr ( \
451 __builtin_types_compatible_p (__typeof (o), struct wsi_swapchain*), \
452 VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT, \
453 __builtin_choose_expr ( \
454 __builtin_types_compatible_p (__typeof (o), struct vk_debug_callback*), \
455 VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, \
456 __builtin_choose_expr ( \
457 __builtin_types_compatible_p (__typeof (o), void*), \
458 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, \
459 /* The void expression results in a compile-time error \
460 when assigning the result to something. */ \
461 (void)0)))))))))))))))))))))))))))))))
462
463 /* Whenever we generate an error, pass it through this function. Useful for
464 * debugging, where we can break on it. Only call at error site, not when
465 * propagating errors. Might be useful to plug in a stack trace here.
466 */
467
468 VkResult __vk_errorv(struct anv_instance *instance, const void *object,
469 VkDebugReportObjectTypeEXT type, VkResult error,
470 const char *file, int line, const char *format,
471 va_list args);
472
473 VkResult __vk_errorf(struct anv_instance *instance, const void *object,
474 VkDebugReportObjectTypeEXT type, VkResult error,
475 const char *file, int line, const char *format, ...)
476 anv_printflike(7, 8);
477
478 #ifdef DEBUG
479 #define vk_error(error) __vk_errorf(NULL, NULL,\
480 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,\
481 error, __FILE__, __LINE__, NULL)
482 #define vk_errorfi(instance, obj, error, format, ...)\
483 __vk_errorf(instance, obj, REPORT_OBJECT_TYPE(obj), error,\
484 __FILE__, __LINE__, format, ## __VA_ARGS__)
485 #define vk_errorf(device, obj, error, format, ...)\
486 vk_errorfi(anv_device_instance_or_null(device),\
487 obj, error, format, ## __VA_ARGS__)
488 #else
489 #define vk_error(error) error
490 #define vk_errorfi(instance, obj, error, format, ...) error
491 #define vk_errorf(device, obj, error, format, ...) error
492 #endif
493
494 /**
495 * Warn on ignored extension structs.
496 *
497 * The Vulkan spec requires us to ignore unsupported or unknown structs in
498 * a pNext chain. In debug mode, emitting warnings for ignored structs may
499 * help us discover structs that we should not have ignored.
500 *
501 *
502 * From the Vulkan 1.0.38 spec:
503 *
504 * Any component of the implementation (the loader, any enabled layers,
505 * and drivers) must skip over, without processing (other than reading the
506 * sType and pNext members) any chained structures with sType values not
507 * defined by extensions supported by that component.
508 */
509 #define anv_debug_ignored_stype(sType) \
510 intel_logd("%s: ignored VkStructureType %u\n", __func__, (sType))
511
512 void __anv_perf_warn(struct anv_device *device, const void *object,
513 VkDebugReportObjectTypeEXT type, const char *file,
514 int line, const char *format, ...)
515 anv_printflike(6, 7);
516 void anv_loge(const char *format, ...) anv_printflike(1, 2);
517 void anv_loge_v(const char *format, va_list va);
518
519 /**
520 * Print a FINISHME message, including its source location.
521 */
522 #define anv_finishme(format, ...) \
523 do { \
524 static bool reported = false; \
525 if (!reported) { \
526 intel_logw("%s:%d: FINISHME: " format, __FILE__, __LINE__, \
527 ##__VA_ARGS__); \
528 reported = true; \
529 } \
530 } while (0)
531
532 /**
533 * Print a perf warning message. Set INTEL_DEBUG=perf to see these.
534 */
535 #define anv_perf_warn(instance, obj, format, ...) \
536 do { \
537 static bool reported = false; \
538 if (!reported && unlikely(INTEL_DEBUG & DEBUG_PERF)) { \
539 __anv_perf_warn(instance, obj, REPORT_OBJECT_TYPE(obj), __FILE__, __LINE__,\
540 format, ##__VA_ARGS__); \
541 reported = true; \
542 } \
543 } while (0)
544
545 /* A non-fatal assert. Useful for debugging. */
546 #ifdef DEBUG
547 #define anv_assert(x) ({ \
548 if (unlikely(!(x))) \
549 intel_loge("%s:%d ASSERT: %s", __FILE__, __LINE__, #x); \
550 })
551 #else
552 #define anv_assert(x)
553 #endif
554
555 /* A multi-pointer allocator
556 *
557 * When copying data structures from the user (such as a render pass), it's
558 * common to need to allocate data for a bunch of different things. Instead
559 * of doing several allocations and having to handle all of the error checking
560 * that entails, it can be easier to do a single allocation. This struct
561 * helps facilitate that. The intended usage looks like this:
562 *
563 * ANV_MULTIALLOC(ma)
564 * anv_multialloc_add(&ma, &main_ptr, 1);
565 * anv_multialloc_add(&ma, &substruct1, substruct1Count);
566 * anv_multialloc_add(&ma, &substruct2, substruct2Count);
567 *
568 * if (!anv_multialloc_alloc(&ma, pAllocator, VK_ALLOCATION_SCOPE_FOO))
569 * return vk_error(VK_ERROR_OUT_OF_HOST_MEORY);
570 */
571 struct anv_multialloc {
572 size_t size;
573 size_t align;
574
575 uint32_t ptr_count;
576 void **ptrs[8];
577 };
578
579 #define ANV_MULTIALLOC_INIT \
580 ((struct anv_multialloc) { 0, })
581
582 #define ANV_MULTIALLOC(_name) \
583 struct anv_multialloc _name = ANV_MULTIALLOC_INIT
584
585 __attribute__((always_inline))
586 static inline void
587 _anv_multialloc_add(struct anv_multialloc *ma,
588 void **ptr, size_t size, size_t align)
589 {
590 size_t offset = align_u64(ma->size, align);
591 ma->size = offset + size;
592 ma->align = MAX2(ma->align, align);
593
594 /* Store the offset in the pointer. */
595 *ptr = (void *)(uintptr_t)offset;
596
597 assert(ma->ptr_count < ARRAY_SIZE(ma->ptrs));
598 ma->ptrs[ma->ptr_count++] = ptr;
599 }
600
601 #define anv_multialloc_add_size(_ma, _ptr, _size) \
602 _anv_multialloc_add((_ma), (void **)(_ptr), (_size), __alignof__(**(_ptr)))
603
604 #define anv_multialloc_add(_ma, _ptr, _count) \
605 anv_multialloc_add_size(_ma, _ptr, (_count) * sizeof(**(_ptr)));
606
607 __attribute__((always_inline))
608 static inline void *
609 anv_multialloc_alloc(struct anv_multialloc *ma,
610 const VkAllocationCallbacks *alloc,
611 VkSystemAllocationScope scope)
612 {
613 void *ptr = vk_alloc(alloc, ma->size, ma->align, scope);
614 if (!ptr)
615 return NULL;
616
617 /* Fill out each of the pointers with their final value.
618 *
619 * for (uint32_t i = 0; i < ma->ptr_count; i++)
620 * *ma->ptrs[i] = ptr + (uintptr_t)*ma->ptrs[i];
621 *
622 * Unfortunately, even though ma->ptr_count is basically guaranteed to be a
623 * constant, GCC is incapable of figuring this out and unrolling the loop
624 * so we have to give it a little help.
625 */
626 STATIC_ASSERT(ARRAY_SIZE(ma->ptrs) == 8);
627 #define _ANV_MULTIALLOC_UPDATE_POINTER(_i) \
628 if ((_i) < ma->ptr_count) \
629 *ma->ptrs[_i] = ptr + (uintptr_t)*ma->ptrs[_i]
630 _ANV_MULTIALLOC_UPDATE_POINTER(0);
631 _ANV_MULTIALLOC_UPDATE_POINTER(1);
632 _ANV_MULTIALLOC_UPDATE_POINTER(2);
633 _ANV_MULTIALLOC_UPDATE_POINTER(3);
634 _ANV_MULTIALLOC_UPDATE_POINTER(4);
635 _ANV_MULTIALLOC_UPDATE_POINTER(5);
636 _ANV_MULTIALLOC_UPDATE_POINTER(6);
637 _ANV_MULTIALLOC_UPDATE_POINTER(7);
638 #undef _ANV_MULTIALLOC_UPDATE_POINTER
639
640 return ptr;
641 }
642
643 __attribute__((always_inline))
644 static inline void *
645 anv_multialloc_alloc2(struct anv_multialloc *ma,
646 const VkAllocationCallbacks *parent_alloc,
647 const VkAllocationCallbacks *alloc,
648 VkSystemAllocationScope scope)
649 {
650 return anv_multialloc_alloc(ma, alloc ? alloc : parent_alloc, scope);
651 }
652
653 struct anv_bo {
654 uint32_t gem_handle;
655
656 uint32_t refcount;
657
658 /* Index into the current validation list. This is used by the
659 * validation list building alrogithm to track which buffers are already
660 * in the validation list so that we can ensure uniqueness.
661 */
662 uint32_t index;
663
664 /* Index for use with util_sparse_array_free_list */
665 uint32_t free_index;
666
667 /* Last known offset. This value is provided by the kernel when we
668 * execbuf and is used as the presumed offset for the next bunch of
669 * relocations.
670 */
671 uint64_t offset;
672
673 /** Size of the buffer not including implicit aux */
674 uint64_t size;
675
676 /* Map for internally mapped BOs.
677 *
678 * If ANV_BO_WRAPPER is set in flags, map points to the wrapped BO.
679 */
680 void *map;
681
682 /** Size of the implicit CCS range at the end of the buffer
683 *
684 * On Gen12, CCS data is always a direct 1/256 scale-down. A single 64K
685 * page of main surface data maps to a 256B chunk of CCS data and that
686 * mapping is provided on TGL-LP by the AUX table which maps virtual memory
687 * addresses in the main surface to virtual memory addresses for CCS data.
688 *
689 * Because we can't change these maps around easily and because Vulkan
690 * allows two VkImages to be bound to overlapping memory regions (as long
691 * as the app is careful), it's not feasible to make this mapping part of
692 * the image. (On Gen11 and earlier, the mapping was provided via
693 * RENDER_SURFACE_STATE so each image had its own main -> CCS mapping.)
694 * Instead, we attach the CCS data directly to the buffer object and setup
695 * the AUX table mapping at BO creation time.
696 *
697 * This field is for internal tracking use by the BO allocator only and
698 * should not be touched by other parts of the code. If something wants to
699 * know if a BO has implicit CCS data, it should instead look at the
700 * has_implicit_ccs boolean below.
701 *
702 * This data is not included in maps of this buffer.
703 */
704 uint32_t _ccs_size;
705
706 /** Flags to pass to the kernel through drm_i915_exec_object2::flags */
707 uint32_t flags;
708
709 /** True if this BO may be shared with other processes */
710 bool is_external:1;
711
712 /** True if this BO is a wrapper
713 *
714 * When set to true, none of the fields in this BO are meaningful except
715 * for anv_bo::is_wrapper and anv_bo::map which points to the actual BO.
716 * See also anv_bo_unwrap(). Wrapper BOs are not allowed when use_softpin
717 * is set in the physical device.
718 */
719 bool is_wrapper:1;
720
721 /** See also ANV_BO_ALLOC_FIXED_ADDRESS */
722 bool has_fixed_address:1;
723
724 /** True if this BO wraps a host pointer */
725 bool from_host_ptr:1;
726
727 /** See also ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS */
728 bool has_client_visible_address:1;
729
730 /** True if this BO has implicit CCS data attached to it */
731 bool has_implicit_ccs:1;
732 };
733
734 static inline struct anv_bo *
735 anv_bo_ref(struct anv_bo *bo)
736 {
737 p_atomic_inc(&bo->refcount);
738 return bo;
739 }
740
741 static inline struct anv_bo *
742 anv_bo_unwrap(struct anv_bo *bo)
743 {
744 while (bo->is_wrapper)
745 bo = bo->map;
746 return bo;
747 }
748
749 /* Represents a lock-free linked list of "free" things. This is used by
750 * both the block pool and the state pools. Unfortunately, in order to
751 * solve the ABA problem, we can't use a single uint32_t head.
752 */
753 union anv_free_list {
754 struct {
755 uint32_t offset;
756
757 /* A simple count that is incremented every time the head changes. */
758 uint32_t count;
759 };
760 /* Make sure it's aligned to 64 bits. This will make atomic operations
761 * faster on 32 bit platforms.
762 */
763 uint64_t u64 __attribute__ ((aligned (8)));
764 };
765
766 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { UINT32_MAX, 0 } })
767
768 struct anv_block_state {
769 union {
770 struct {
771 uint32_t next;
772 uint32_t end;
773 };
774 /* Make sure it's aligned to 64 bits. This will make atomic operations
775 * faster on 32 bit platforms.
776 */
777 uint64_t u64 __attribute__ ((aligned (8)));
778 };
779 };
780
781 #define anv_block_pool_foreach_bo(bo, pool) \
782 for (struct anv_bo **_pp_bo = (pool)->bos, *bo; \
783 _pp_bo != &(pool)->bos[(pool)->nbos] && (bo = *_pp_bo, true); \
784 _pp_bo++)
785
786 #define ANV_MAX_BLOCK_POOL_BOS 20
787
788 struct anv_block_pool {
789 struct anv_device *device;
790 bool use_softpin;
791
792 /* Wrapper BO for use in relocation lists. This BO is simply a wrapper
793 * around the actual BO so that we grow the pool after the wrapper BO has
794 * been put in a relocation list. This is only used in the non-softpin
795 * case.
796 */
797 struct anv_bo wrapper_bo;
798
799 struct anv_bo *bos[ANV_MAX_BLOCK_POOL_BOS];
800 struct anv_bo *bo;
801 uint32_t nbos;
802
803 uint64_t size;
804
805 /* The address where the start of the pool is pinned. The various bos that
806 * are created as the pool grows will have addresses in the range
807 * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
808 */
809 uint64_t start_address;
810
811 /* The offset from the start of the bo to the "center" of the block
812 * pool. Pointers to allocated blocks are given by
813 * bo.map + center_bo_offset + offsets.
814 */
815 uint32_t center_bo_offset;
816
817 /* Current memory map of the block pool. This pointer may or may not
818 * point to the actual beginning of the block pool memory. If
819 * anv_block_pool_alloc_back has ever been called, then this pointer
820 * will point to the "center" position of the buffer and all offsets
821 * (negative or positive) given out by the block pool alloc functions
822 * will be valid relative to this pointer.
823 *
824 * In particular, map == bo.map + center_offset
825 *
826 * DO NOT access this pointer directly. Use anv_block_pool_map() instead,
827 * since it will handle the softpin case as well, where this points to NULL.
828 */
829 void *map;
830 int fd;
831
832 /**
833 * Array of mmaps and gem handles owned by the block pool, reclaimed when
834 * the block pool is destroyed.
835 */
836 struct u_vector mmap_cleanups;
837
838 struct anv_block_state state;
839
840 struct anv_block_state back_state;
841 };
842
843 /* Block pools are backed by a fixed-size 1GB memfd */
844 #define BLOCK_POOL_MEMFD_SIZE (1ul << 30)
845
846 /* The center of the block pool is also the middle of the memfd. This may
847 * change in the future if we decide differently for some reason.
848 */
849 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
850
851 static inline uint32_t
852 anv_block_pool_size(struct anv_block_pool *pool)
853 {
854 return pool->state.end + pool->back_state.end;
855 }
856
857 struct anv_state {
858 int32_t offset;
859 uint32_t alloc_size;
860 void *map;
861 uint32_t idx;
862 };
863
864 #define ANV_STATE_NULL ((struct anv_state) { .alloc_size = 0 })
865
866 struct anv_fixed_size_state_pool {
867 union anv_free_list free_list;
868 struct anv_block_state block;
869 };
870
871 #define ANV_MIN_STATE_SIZE_LOG2 6
872 #define ANV_MAX_STATE_SIZE_LOG2 21
873
874 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
875
876 struct anv_free_entry {
877 uint32_t next;
878 struct anv_state state;
879 };
880
881 struct anv_state_table {
882 struct anv_device *device;
883 int fd;
884 struct anv_free_entry *map;
885 uint32_t size;
886 struct anv_block_state state;
887 struct u_vector cleanups;
888 };
889
890 struct anv_state_pool {
891 struct anv_block_pool block_pool;
892
893 /* Offset into the relevant state base address where the state pool starts
894 * allocating memory.
895 */
896 int32_t start_offset;
897
898 struct anv_state_table table;
899
900 /* The size of blocks which will be allocated from the block pool */
901 uint32_t block_size;
902
903 /** Free list for "back" allocations */
904 union anv_free_list back_alloc_free_list;
905
906 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
907 };
908
909 struct anv_state_reserved_pool {
910 struct anv_state_pool *pool;
911 union anv_free_list reserved_blocks;
912 uint32_t count;
913 };
914
915 struct anv_state_stream {
916 struct anv_state_pool *state_pool;
917
918 /* The size of blocks to allocate from the state pool */
919 uint32_t block_size;
920
921 /* Current block we're allocating from */
922 struct anv_state block;
923
924 /* Offset into the current block at which to allocate the next state */
925 uint32_t next;
926
927 /* List of all blocks allocated from this pool */
928 struct util_dynarray all_blocks;
929 };
930
931 /* The block_pool functions exported for testing only. The block pool should
932 * only be used via a state pool (see below).
933 */
934 VkResult anv_block_pool_init(struct anv_block_pool *pool,
935 struct anv_device *device,
936 uint64_t start_address,
937 uint32_t initial_size);
938 void anv_block_pool_finish(struct anv_block_pool *pool);
939 int32_t anv_block_pool_alloc(struct anv_block_pool *pool,
940 uint32_t block_size, uint32_t *padding);
941 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool,
942 uint32_t block_size);
943 void* anv_block_pool_map(struct anv_block_pool *pool, int32_t offset, uint32_t
944 size);
945
946 VkResult anv_state_pool_init(struct anv_state_pool *pool,
947 struct anv_device *device,
948 uint64_t base_address,
949 int32_t start_offset,
950 uint32_t block_size);
951 void anv_state_pool_finish(struct anv_state_pool *pool);
952 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
953 uint32_t state_size, uint32_t alignment);
954 struct anv_state anv_state_pool_alloc_back(struct anv_state_pool *pool);
955 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
956 void anv_state_stream_init(struct anv_state_stream *stream,
957 struct anv_state_pool *state_pool,
958 uint32_t block_size);
959 void anv_state_stream_finish(struct anv_state_stream *stream);
960 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
961 uint32_t size, uint32_t alignment);
962
963 void anv_state_reserved_pool_init(struct anv_state_reserved_pool *pool,
964 struct anv_state_pool *parent,
965 uint32_t count, uint32_t size,
966 uint32_t alignment);
967 void anv_state_reserved_pool_finish(struct anv_state_reserved_pool *pool);
968 struct anv_state anv_state_reserved_pool_alloc(struct anv_state_reserved_pool *pool);
969 void anv_state_reserved_pool_free(struct anv_state_reserved_pool *pool,
970 struct anv_state state);
971
972 VkResult anv_state_table_init(struct anv_state_table *table,
973 struct anv_device *device,
974 uint32_t initial_entries);
975 void anv_state_table_finish(struct anv_state_table *table);
976 VkResult anv_state_table_add(struct anv_state_table *table, uint32_t *idx,
977 uint32_t count);
978 void anv_free_list_push(union anv_free_list *list,
979 struct anv_state_table *table,
980 uint32_t idx, uint32_t count);
981 struct anv_state* anv_free_list_pop(union anv_free_list *list,
982 struct anv_state_table *table);
983
984
985 static inline struct anv_state *
986 anv_state_table_get(struct anv_state_table *table, uint32_t idx)
987 {
988 return &table->map[idx].state;
989 }
990 /**
991 * Implements a pool of re-usable BOs. The interface is identical to that
992 * of block_pool except that each block is its own BO.
993 */
994 struct anv_bo_pool {
995 struct anv_device *device;
996
997 struct util_sparse_array_free_list free_list[16];
998 };
999
1000 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
1001 void anv_bo_pool_finish(struct anv_bo_pool *pool);
1002 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, uint32_t size,
1003 struct anv_bo **bo_out);
1004 void anv_bo_pool_free(struct anv_bo_pool *pool, struct anv_bo *bo);
1005
1006 struct anv_scratch_pool {
1007 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
1008 struct anv_bo *bos[16][MESA_SHADER_STAGES];
1009 };
1010
1011 void anv_scratch_pool_init(struct anv_device *device,
1012 struct anv_scratch_pool *pool);
1013 void anv_scratch_pool_finish(struct anv_device *device,
1014 struct anv_scratch_pool *pool);
1015 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
1016 struct anv_scratch_pool *pool,
1017 gl_shader_stage stage,
1018 unsigned per_thread_scratch);
1019
1020 /** Implements a BO cache that ensures a 1-1 mapping of GEM BOs to anv_bos */
1021 struct anv_bo_cache {
1022 struct util_sparse_array bo_map;
1023 pthread_mutex_t mutex;
1024 };
1025
1026 VkResult anv_bo_cache_init(struct anv_bo_cache *cache);
1027 void anv_bo_cache_finish(struct anv_bo_cache *cache);
1028
1029 struct anv_memory_type {
1030 /* Standard bits passed on to the client */
1031 VkMemoryPropertyFlags propertyFlags;
1032 uint32_t heapIndex;
1033 };
1034
1035 struct anv_memory_heap {
1036 /* Standard bits passed on to the client */
1037 VkDeviceSize size;
1038 VkMemoryHeapFlags flags;
1039
1040 /** Driver-internal book-keeping.
1041 *
1042 * Align it to 64 bits to make atomic operations faster on 32 bit platforms.
1043 */
1044 VkDeviceSize used __attribute__ ((aligned (8)));
1045 };
1046
1047 struct anv_physical_device {
1048 struct vk_object_base base;
1049
1050 /* Link in anv_instance::physical_devices */
1051 struct list_head link;
1052
1053 struct anv_instance * instance;
1054 bool no_hw;
1055 char path[20];
1056 const char * name;
1057 struct {
1058 uint16_t domain;
1059 uint8_t bus;
1060 uint8_t device;
1061 uint8_t function;
1062 } pci_info;
1063 struct gen_device_info info;
1064 /** Amount of "GPU memory" we want to advertise
1065 *
1066 * Clearly, this value is bogus since Intel is a UMA architecture. On
1067 * gen7 platforms, we are limited by GTT size unless we want to implement
1068 * fine-grained tracking and GTT splitting. On Broadwell and above we are
1069 * practically unlimited. However, we will never report more than 3/4 of
1070 * the total system ram to try and avoid running out of RAM.
1071 */
1072 bool supports_48bit_addresses;
1073 struct brw_compiler * compiler;
1074 struct isl_device isl_dev;
1075 struct gen_perf_config * perf;
1076 int cmd_parser_version;
1077 bool has_softpin;
1078 bool has_exec_async;
1079 bool has_exec_capture;
1080 bool has_exec_fence;
1081 bool has_syncobj;
1082 bool has_syncobj_wait;
1083 bool has_syncobj_wait_available;
1084 bool has_context_priority;
1085 bool has_context_isolation;
1086 bool has_thread_submit;
1087 bool has_mem_available;
1088 bool has_mmap_offset;
1089 uint64_t gtt_size;
1090
1091 bool use_softpin;
1092 bool always_use_bindless;
1093 bool use_call_secondary;
1094
1095 /** True if we can access buffers using A64 messages */
1096 bool has_a64_buffer_access;
1097 /** True if we can use bindless access for images */
1098 bool has_bindless_images;
1099 /** True if we can use bindless access for samplers */
1100 bool has_bindless_samplers;
1101 /** True if we can use timeline semaphores through execbuf */
1102 bool has_exec_timeline;
1103
1104 /** True if we can read the GPU timestamp register
1105 *
1106 * When running in a virtual context, the timestamp register is unreadable
1107 * on Gen12+.
1108 */
1109 bool has_reg_timestamp;
1110
1111 /** True if this device has implicit AUX
1112 *
1113 * If true, CCS is handled as an implicit attachment to the BO rather than
1114 * as an explicitly bound surface.
1115 */
1116 bool has_implicit_ccs;
1117
1118 bool always_flush_cache;
1119
1120 struct anv_device_extension_table supported_extensions;
1121
1122 uint32_t eu_total;
1123 uint32_t subslice_total;
1124
1125 struct {
1126 uint32_t type_count;
1127 struct anv_memory_type types[VK_MAX_MEMORY_TYPES];
1128 uint32_t heap_count;
1129 struct anv_memory_heap heaps[VK_MAX_MEMORY_HEAPS];
1130 } memory;
1131
1132 uint8_t driver_build_sha1[20];
1133 uint8_t pipeline_cache_uuid[VK_UUID_SIZE];
1134 uint8_t driver_uuid[VK_UUID_SIZE];
1135 uint8_t device_uuid[VK_UUID_SIZE];
1136
1137 struct disk_cache * disk_cache;
1138
1139 struct wsi_device wsi_device;
1140 int local_fd;
1141 int master_fd;
1142 };
1143
1144 struct anv_app_info {
1145 const char* app_name;
1146 uint32_t app_version;
1147 const char* engine_name;
1148 uint32_t engine_version;
1149 uint32_t api_version;
1150 };
1151
1152 struct anv_instance {
1153 struct vk_object_base base;
1154
1155 VkAllocationCallbacks alloc;
1156
1157 struct anv_app_info app_info;
1158
1159 struct anv_instance_extension_table enabled_extensions;
1160 struct anv_instance_dispatch_table dispatch;
1161 struct anv_physical_device_dispatch_table physical_device_dispatch;
1162 struct anv_device_dispatch_table device_dispatch;
1163
1164 bool physical_devices_enumerated;
1165 struct list_head physical_devices;
1166
1167 bool pipeline_cache_enabled;
1168
1169 struct vk_debug_report_instance debug_report_callbacks;
1170
1171 struct driOptionCache dri_options;
1172 struct driOptionCache available_dri_options;
1173 };
1174
1175 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
1176 void anv_finish_wsi(struct anv_physical_device *physical_device);
1177
1178 uint32_t anv_physical_device_api_version(struct anv_physical_device *dev);
1179 bool anv_physical_device_extension_supported(struct anv_physical_device *dev,
1180 const char *name);
1181
1182 struct anv_queue_submit {
1183 struct anv_cmd_buffer * cmd_buffer;
1184
1185 uint32_t fence_count;
1186 uint32_t fence_array_length;
1187 struct drm_i915_gem_exec_fence * fences;
1188 uint64_t * fence_values;
1189
1190 uint32_t temporary_semaphore_count;
1191 uint32_t temporary_semaphore_array_length;
1192 struct anv_semaphore_impl * temporary_semaphores;
1193
1194 /* Semaphores to be signaled with a SYNC_FD. */
1195 struct anv_semaphore ** sync_fd_semaphores;
1196 uint32_t sync_fd_semaphore_count;
1197 uint32_t sync_fd_semaphore_array_length;
1198
1199 /* Allocated only with non shareable timelines. */
1200 union {
1201 struct anv_timeline ** wait_timelines;
1202 uint32_t * wait_timeline_syncobjs;
1203 };
1204 uint32_t wait_timeline_count;
1205 uint32_t wait_timeline_array_length;
1206 uint64_t * wait_timeline_values;
1207
1208 struct anv_timeline ** signal_timelines;
1209 uint32_t signal_timeline_count;
1210 uint32_t signal_timeline_array_length;
1211 uint64_t * signal_timeline_values;
1212
1213 int in_fence;
1214 bool need_out_fence;
1215 int out_fence;
1216
1217 uint32_t fence_bo_count;
1218 uint32_t fence_bo_array_length;
1219 /* An array of struct anv_bo pointers with lower bit used as a flag to
1220 * signal we will wait on that BO (see anv_(un)pack_ptr).
1221 */
1222 uintptr_t * fence_bos;
1223
1224 int perf_query_pass;
1225
1226 const VkAllocationCallbacks * alloc;
1227 VkSystemAllocationScope alloc_scope;
1228
1229 struct anv_bo * simple_bo;
1230 uint32_t simple_bo_size;
1231
1232 struct list_head link;
1233 };
1234
1235 struct anv_queue {
1236 struct vk_object_base base;
1237
1238 struct anv_device * device;
1239
1240 VkDeviceQueueCreateFlags flags;
1241
1242 /* Set once from the device api calls. */
1243 bool lost_signaled;
1244
1245 /* Only set once atomically by the queue */
1246 int lost;
1247 int error_line;
1248 const char * error_file;
1249 char error_msg[80];
1250
1251 /*
1252 * This mutext protects the variables below.
1253 */
1254 pthread_mutex_t mutex;
1255
1256 pthread_t thread;
1257 pthread_cond_t cond;
1258
1259 /*
1260 * A list of struct anv_queue_submit to be submitted to i915.
1261 */
1262 struct list_head queued_submits;
1263
1264 /* Set to true to stop the submission thread */
1265 bool quit;
1266 };
1267
1268 struct anv_pipeline_cache {
1269 struct vk_object_base base;
1270 struct anv_device * device;
1271 pthread_mutex_t mutex;
1272
1273 struct hash_table * nir_cache;
1274
1275 struct hash_table * cache;
1276
1277 bool external_sync;
1278 };
1279
1280 struct nir_xfb_info;
1281 struct anv_pipeline_bind_map;
1282
1283 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
1284 struct anv_device *device,
1285 bool cache_enabled,
1286 bool external_sync);
1287 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
1288
1289 struct anv_shader_bin *
1290 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
1291 const void *key, uint32_t key_size);
1292 struct anv_shader_bin *
1293 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
1294 gl_shader_stage stage,
1295 const void *key_data, uint32_t key_size,
1296 const void *kernel_data, uint32_t kernel_size,
1297 const struct brw_stage_prog_data *prog_data,
1298 uint32_t prog_data_size,
1299 const struct brw_compile_stats *stats,
1300 uint32_t num_stats,
1301 const struct nir_xfb_info *xfb_info,
1302 const struct anv_pipeline_bind_map *bind_map);
1303
1304 struct anv_shader_bin *
1305 anv_device_search_for_kernel(struct anv_device *device,
1306 struct anv_pipeline_cache *cache,
1307 const void *key_data, uint32_t key_size,
1308 bool *user_cache_bit);
1309
1310 struct anv_shader_bin *
1311 anv_device_upload_kernel(struct anv_device *device,
1312 struct anv_pipeline_cache *cache,
1313 gl_shader_stage stage,
1314 const void *key_data, uint32_t key_size,
1315 const void *kernel_data, uint32_t kernel_size,
1316 const struct brw_stage_prog_data *prog_data,
1317 uint32_t prog_data_size,
1318 const struct brw_compile_stats *stats,
1319 uint32_t num_stats,
1320 const struct nir_xfb_info *xfb_info,
1321 const struct anv_pipeline_bind_map *bind_map);
1322
1323 struct nir_shader;
1324 struct nir_shader_compiler_options;
1325
1326 struct nir_shader *
1327 anv_device_search_for_nir(struct anv_device *device,
1328 struct anv_pipeline_cache *cache,
1329 const struct nir_shader_compiler_options *nir_options,
1330 unsigned char sha1_key[20],
1331 void *mem_ctx);
1332
1333 void
1334 anv_device_upload_nir(struct anv_device *device,
1335 struct anv_pipeline_cache *cache,
1336 const struct nir_shader *nir,
1337 unsigned char sha1_key[20]);
1338
1339 struct anv_address {
1340 struct anv_bo *bo;
1341 uint32_t offset;
1342 };
1343
1344 struct anv_device {
1345 struct vk_device vk;
1346
1347 struct anv_physical_device * physical;
1348 bool no_hw;
1349 struct gen_device_info info;
1350 struct isl_device isl_dev;
1351 int context_id;
1352 int fd;
1353 bool can_chain_batches;
1354 bool robust_buffer_access;
1355 bool has_thread_submit;
1356 struct anv_device_extension_table enabled_extensions;
1357 struct anv_device_dispatch_table dispatch;
1358
1359 pthread_mutex_t vma_mutex;
1360 struct util_vma_heap vma_lo;
1361 struct util_vma_heap vma_cva;
1362 struct util_vma_heap vma_hi;
1363
1364 /** List of all anv_device_memory objects */
1365 struct list_head memory_objects;
1366
1367 struct anv_bo_pool batch_bo_pool;
1368
1369 struct anv_bo_cache bo_cache;
1370
1371 struct anv_state_pool dynamic_state_pool;
1372 struct anv_state_pool instruction_state_pool;
1373 struct anv_state_pool binding_table_pool;
1374 struct anv_state_pool surface_state_pool;
1375
1376 struct anv_state_reserved_pool custom_border_colors;
1377
1378 /** BO used for various workarounds
1379 *
1380 * There are a number of workarounds on our hardware which require writing
1381 * data somewhere and it doesn't really matter where. For that, we use
1382 * this BO and just write to the first dword or so.
1383 *
1384 * We also need to be able to handle NULL buffers bound as pushed UBOs.
1385 * For that, we use the high bytes (>= 1024) of the workaround BO.
1386 */
1387 struct anv_bo * workaround_bo;
1388 struct anv_address workaround_address;
1389
1390 struct anv_bo * trivial_batch_bo;
1391 struct anv_bo * hiz_clear_bo;
1392 struct anv_state null_surface_state;
1393
1394 struct anv_pipeline_cache default_pipeline_cache;
1395 struct blorp_context blorp;
1396
1397 struct anv_state border_colors;
1398
1399 struct anv_state slice_hash;
1400
1401 struct anv_queue queue;
1402
1403 struct anv_scratch_pool scratch_pool;
1404
1405 pthread_mutex_t mutex;
1406 pthread_cond_t queue_submit;
1407 int _lost;
1408 int lost_reported;
1409
1410 struct gen_batch_decode_ctx decoder_ctx;
1411 /*
1412 * When decoding a anv_cmd_buffer, we might need to search for BOs through
1413 * the cmd_buffer's list.
1414 */
1415 struct anv_cmd_buffer *cmd_buffer_being_decoded;
1416
1417 int perf_fd; /* -1 if no opened */
1418 uint64_t perf_metric; /* 0 if unset */
1419
1420 struct gen_aux_map_context *aux_map_ctx;
1421
1422 struct gen_debug_block_frame *debug_frame_desc;
1423 };
1424
1425 static inline struct anv_instance *
1426 anv_device_instance_or_null(const struct anv_device *device)
1427 {
1428 return device ? device->physical->instance : NULL;
1429 }
1430
1431 static inline struct anv_state_pool *
1432 anv_binding_table_pool(struct anv_device *device)
1433 {
1434 if (device->physical->use_softpin)
1435 return &device->binding_table_pool;
1436 else
1437 return &device->surface_state_pool;
1438 }
1439
1440 static inline struct anv_state
1441 anv_binding_table_pool_alloc(struct anv_device *device) {
1442 if (device->physical->use_softpin)
1443 return anv_state_pool_alloc(&device->binding_table_pool,
1444 device->binding_table_pool.block_size, 0);
1445 else
1446 return anv_state_pool_alloc_back(&device->surface_state_pool);
1447 }
1448
1449 static inline void
1450 anv_binding_table_pool_free(struct anv_device *device, struct anv_state state) {
1451 anv_state_pool_free(anv_binding_table_pool(device), state);
1452 }
1453
1454 static inline uint32_t
1455 anv_mocs_for_bo(const struct anv_device *device, const struct anv_bo *bo)
1456 {
1457 if (bo->is_external)
1458 return device->isl_dev.mocs.external;
1459 else
1460 return device->isl_dev.mocs.internal;
1461 }
1462
1463 void anv_device_init_blorp(struct anv_device *device);
1464 void anv_device_finish_blorp(struct anv_device *device);
1465
1466 void _anv_device_report_lost(struct anv_device *device);
1467 VkResult _anv_device_set_lost(struct anv_device *device,
1468 const char *file, int line,
1469 const char *msg, ...)
1470 anv_printflike(4, 5);
1471 VkResult _anv_queue_set_lost(struct anv_queue *queue,
1472 const char *file, int line,
1473 const char *msg, ...)
1474 anv_printflike(4, 5);
1475 #define anv_device_set_lost(dev, ...) \
1476 _anv_device_set_lost(dev, __FILE__, __LINE__, __VA_ARGS__)
1477 #define anv_queue_set_lost(queue, ...) \
1478 (queue)->device->has_thread_submit ? \
1479 _anv_queue_set_lost(queue, __FILE__, __LINE__, __VA_ARGS__) : \
1480 _anv_device_set_lost(queue->device, __FILE__, __LINE__, __VA_ARGS__)
1481
1482 static inline bool
1483 anv_device_is_lost(struct anv_device *device)
1484 {
1485 int lost = p_atomic_read(&device->_lost);
1486 if (unlikely(lost && !device->lost_reported))
1487 _anv_device_report_lost(device);
1488 return lost;
1489 }
1490
1491 VkResult anv_device_query_status(struct anv_device *device);
1492
1493
1494 enum anv_bo_alloc_flags {
1495 /** Specifies that the BO must have a 32-bit address
1496 *
1497 * This is the opposite of EXEC_OBJECT_SUPPORTS_48B_ADDRESS.
1498 */
1499 ANV_BO_ALLOC_32BIT_ADDRESS = (1 << 0),
1500
1501 /** Specifies that the BO may be shared externally */
1502 ANV_BO_ALLOC_EXTERNAL = (1 << 1),
1503
1504 /** Specifies that the BO should be mapped */
1505 ANV_BO_ALLOC_MAPPED = (1 << 2),
1506
1507 /** Specifies that the BO should be snooped so we get coherency */
1508 ANV_BO_ALLOC_SNOOPED = (1 << 3),
1509
1510 /** Specifies that the BO should be captured in error states */
1511 ANV_BO_ALLOC_CAPTURE = (1 << 4),
1512
1513 /** Specifies that the BO will have an address assigned by the caller
1514 *
1515 * Such BOs do not exist in any VMA heap.
1516 */
1517 ANV_BO_ALLOC_FIXED_ADDRESS = (1 << 5),
1518
1519 /** Enables implicit synchronization on the BO
1520 *
1521 * This is the opposite of EXEC_OBJECT_ASYNC.
1522 */
1523 ANV_BO_ALLOC_IMPLICIT_SYNC = (1 << 6),
1524
1525 /** Enables implicit synchronization on the BO
1526 *
1527 * This is equivalent to EXEC_OBJECT_WRITE.
1528 */
1529 ANV_BO_ALLOC_IMPLICIT_WRITE = (1 << 7),
1530
1531 /** Has an address which is visible to the client */
1532 ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS = (1 << 8),
1533
1534 /** This buffer has implicit CCS data attached to it */
1535 ANV_BO_ALLOC_IMPLICIT_CCS = (1 << 9),
1536 };
1537
1538 VkResult anv_device_alloc_bo(struct anv_device *device, uint64_t size,
1539 enum anv_bo_alloc_flags alloc_flags,
1540 uint64_t explicit_address,
1541 struct anv_bo **bo);
1542 VkResult anv_device_import_bo_from_host_ptr(struct anv_device *device,
1543 void *host_ptr, uint32_t size,
1544 enum anv_bo_alloc_flags alloc_flags,
1545 uint64_t client_address,
1546 struct anv_bo **bo_out);
1547 VkResult anv_device_import_bo(struct anv_device *device, int fd,
1548 enum anv_bo_alloc_flags alloc_flags,
1549 uint64_t client_address,
1550 struct anv_bo **bo);
1551 VkResult anv_device_export_bo(struct anv_device *device,
1552 struct anv_bo *bo, int *fd_out);
1553 void anv_device_release_bo(struct anv_device *device,
1554 struct anv_bo *bo);
1555
1556 static inline struct anv_bo *
1557 anv_device_lookup_bo(struct anv_device *device, uint32_t gem_handle)
1558 {
1559 return util_sparse_array_get(&device->bo_cache.bo_map, gem_handle);
1560 }
1561
1562 VkResult anv_device_bo_busy(struct anv_device *device, struct anv_bo *bo);
1563 VkResult anv_device_wait(struct anv_device *device, struct anv_bo *bo,
1564 int64_t timeout);
1565
1566 VkResult anv_queue_init(struct anv_device *device, struct anv_queue *queue);
1567 void anv_queue_finish(struct anv_queue *queue);
1568
1569 VkResult anv_queue_execbuf_locked(struct anv_queue *queue, struct anv_queue_submit *submit);
1570 VkResult anv_queue_submit_simple_batch(struct anv_queue *queue,
1571 struct anv_batch *batch);
1572
1573 uint64_t anv_gettime_ns(void);
1574 uint64_t anv_get_absolute_timeout(uint64_t timeout);
1575
1576 void* anv_gem_mmap(struct anv_device *device,
1577 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
1578 void anv_gem_munmap(struct anv_device *device, void *p, uint64_t size);
1579 uint32_t anv_gem_create(struct anv_device *device, uint64_t size);
1580 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
1581 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
1582 int anv_gem_busy(struct anv_device *device, uint32_t gem_handle);
1583 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
1584 int anv_gem_execbuffer(struct anv_device *device,
1585 struct drm_i915_gem_execbuffer2 *execbuf);
1586 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
1587 uint32_t stride, uint32_t tiling);
1588 int anv_gem_create_context(struct anv_device *device);
1589 bool anv_gem_has_context_priority(int fd);
1590 int anv_gem_destroy_context(struct anv_device *device, int context);
1591 int anv_gem_set_context_param(int fd, int context, uint32_t param,
1592 uint64_t value);
1593 int anv_gem_get_context_param(int fd, int context, uint32_t param,
1594 uint64_t *value);
1595 int anv_gem_get_param(int fd, uint32_t param);
1596 uint64_t anv_gem_get_drm_cap(int fd, uint32_t capability);
1597 int anv_gem_get_tiling(struct anv_device *device, uint32_t gem_handle);
1598 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
1599 int anv_gem_gpu_get_reset_stats(struct anv_device *device,
1600 uint32_t *active, uint32_t *pending);
1601 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
1602 int anv_gem_reg_read(int fd, uint32_t offset, uint64_t *result);
1603 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
1604 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
1605 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
1606 uint32_t read_domains, uint32_t write_domain);
1607 int anv_gem_sync_file_merge(struct anv_device *device, int fd1, int fd2);
1608 uint32_t anv_gem_syncobj_create(struct anv_device *device, uint32_t flags);
1609 void anv_gem_syncobj_destroy(struct anv_device *device, uint32_t handle);
1610 int anv_gem_syncobj_handle_to_fd(struct anv_device *device, uint32_t handle);
1611 uint32_t anv_gem_syncobj_fd_to_handle(struct anv_device *device, int fd);
1612 int anv_gem_syncobj_export_sync_file(struct anv_device *device,
1613 uint32_t handle);
1614 int anv_gem_syncobj_import_sync_file(struct anv_device *device,
1615 uint32_t handle, int fd);
1616 void anv_gem_syncobj_reset(struct anv_device *device, uint32_t handle);
1617 bool anv_gem_supports_syncobj_wait(int fd);
1618 int anv_gem_syncobj_wait(struct anv_device *device,
1619 const uint32_t *handles, uint32_t num_handles,
1620 int64_t abs_timeout_ns, bool wait_all);
1621 int anv_gem_syncobj_timeline_wait(struct anv_device *device,
1622 const uint32_t *handles, const uint64_t *points,
1623 uint32_t num_items, int64_t abs_timeout_ns,
1624 bool wait_all, bool wait_materialize);
1625 int anv_gem_syncobj_timeline_signal(struct anv_device *device,
1626 const uint32_t *handles, const uint64_t *points,
1627 uint32_t num_items);
1628 int anv_gem_syncobj_timeline_query(struct anv_device *device,
1629 const uint32_t *handles, uint64_t *points,
1630 uint32_t num_items);
1631
1632 uint64_t anv_vma_alloc(struct anv_device *device,
1633 uint64_t size, uint64_t align,
1634 enum anv_bo_alloc_flags alloc_flags,
1635 uint64_t client_address);
1636 void anv_vma_free(struct anv_device *device,
1637 uint64_t address, uint64_t size);
1638
1639 struct anv_reloc_list {
1640 uint32_t num_relocs;
1641 uint32_t array_length;
1642 struct drm_i915_gem_relocation_entry * relocs;
1643 struct anv_bo ** reloc_bos;
1644 uint32_t dep_words;
1645 BITSET_WORD * deps;
1646 };
1647
1648 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
1649 const VkAllocationCallbacks *alloc);
1650 void anv_reloc_list_finish(struct anv_reloc_list *list,
1651 const VkAllocationCallbacks *alloc);
1652
1653 VkResult anv_reloc_list_add(struct anv_reloc_list *list,
1654 const VkAllocationCallbacks *alloc,
1655 uint32_t offset, struct anv_bo *target_bo,
1656 uint32_t delta, uint64_t *address_u64_out);
1657
1658 struct anv_batch_bo {
1659 /* Link in the anv_cmd_buffer.owned_batch_bos list */
1660 struct list_head link;
1661
1662 struct anv_bo * bo;
1663
1664 /* Bytes actually consumed in this batch BO */
1665 uint32_t length;
1666
1667 struct anv_reloc_list relocs;
1668 };
1669
1670 struct anv_batch {
1671 const VkAllocationCallbacks * alloc;
1672
1673 struct anv_address start_addr;
1674
1675 void * start;
1676 void * end;
1677 void * next;
1678
1679 struct anv_reloc_list * relocs;
1680
1681 /* This callback is called (with the associated user data) in the event
1682 * that the batch runs out of space.
1683 */
1684 VkResult (*extend_cb)(struct anv_batch *, void *);
1685 void * user_data;
1686
1687 /**
1688 * Current error status of the command buffer. Used to track inconsistent
1689 * or incomplete command buffer states that are the consequence of run-time
1690 * errors such as out of memory scenarios. We want to track this in the
1691 * batch because the command buffer object is not visible to some parts
1692 * of the driver.
1693 */
1694 VkResult status;
1695 };
1696
1697 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
1698 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
1699 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
1700 void *location, struct anv_bo *bo, uint32_t offset);
1701 struct anv_address anv_batch_address(struct anv_batch *batch, void *batch_location);
1702
1703 static inline void
1704 anv_batch_set_storage(struct anv_batch *batch, struct anv_address addr,
1705 void *map, size_t size)
1706 {
1707 batch->start_addr = addr;
1708 batch->next = batch->start = map;
1709 batch->end = map + size;
1710 }
1711
1712 static inline VkResult
1713 anv_batch_set_error(struct anv_batch *batch, VkResult error)
1714 {
1715 assert(error != VK_SUCCESS);
1716 if (batch->status == VK_SUCCESS)
1717 batch->status = error;
1718 return batch->status;
1719 }
1720
1721 static inline bool
1722 anv_batch_has_error(struct anv_batch *batch)
1723 {
1724 return batch->status != VK_SUCCESS;
1725 }
1726
1727 #define ANV_NULL_ADDRESS ((struct anv_address) { NULL, 0 })
1728
1729 static inline bool
1730 anv_address_is_null(struct anv_address addr)
1731 {
1732 return addr.bo == NULL && addr.offset == 0;
1733 }
1734
1735 static inline uint64_t
1736 anv_address_physical(struct anv_address addr)
1737 {
1738 if (addr.bo && (addr.bo->flags & EXEC_OBJECT_PINNED))
1739 return gen_canonical_address(addr.bo->offset + addr.offset);
1740 else
1741 return gen_canonical_address(addr.offset);
1742 }
1743
1744 static inline struct anv_address
1745 anv_address_add(struct anv_address addr, uint64_t offset)
1746 {
1747 addr.offset += offset;
1748 return addr;
1749 }
1750
1751 static inline void
1752 write_reloc(const struct anv_device *device, void *p, uint64_t v, bool flush)
1753 {
1754 unsigned reloc_size = 0;
1755 if (device->info.gen >= 8) {
1756 reloc_size = sizeof(uint64_t);
1757 *(uint64_t *)p = gen_canonical_address(v);
1758 } else {
1759 reloc_size = sizeof(uint32_t);
1760 *(uint32_t *)p = v;
1761 }
1762
1763 if (flush && !device->info.has_llc)
1764 gen_flush_range(p, reloc_size);
1765 }
1766
1767 static inline uint64_t
1768 _anv_combine_address(struct anv_batch *batch, void *location,
1769 const struct anv_address address, uint32_t delta)
1770 {
1771 if (address.bo == NULL) {
1772 return address.offset + delta;
1773 } else {
1774 assert(batch->start <= location && location < batch->end);
1775
1776 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
1777 }
1778 }
1779
1780 #define __gen_address_type struct anv_address
1781 #define __gen_user_data struct anv_batch
1782 #define __gen_combine_address _anv_combine_address
1783
1784 /* Wrapper macros needed to work around preprocessor argument issues. In
1785 * particular, arguments don't get pre-evaluated if they are concatenated.
1786 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
1787 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
1788 * We can work around this easily enough with these helpers.
1789 */
1790 #define __anv_cmd_length(cmd) cmd ## _length
1791 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
1792 #define __anv_cmd_header(cmd) cmd ## _header
1793 #define __anv_cmd_pack(cmd) cmd ## _pack
1794 #define __anv_reg_num(reg) reg ## _num
1795
1796 #define anv_pack_struct(dst, struc, ...) do { \
1797 struct struc __template = { \
1798 __VA_ARGS__ \
1799 }; \
1800 __anv_cmd_pack(struc)(NULL, dst, &__template); \
1801 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
1802 } while (0)
1803
1804 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
1805 void *__dst = anv_batch_emit_dwords(batch, n); \
1806 if (__dst) { \
1807 struct cmd __template = { \
1808 __anv_cmd_header(cmd), \
1809 .DWordLength = n - __anv_cmd_length_bias(cmd), \
1810 __VA_ARGS__ \
1811 }; \
1812 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
1813 } \
1814 __dst; \
1815 })
1816
1817 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
1818 do { \
1819 uint32_t *dw; \
1820 \
1821 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
1822 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
1823 if (!dw) \
1824 break; \
1825 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
1826 dw[i] = (dwords0)[i] | (dwords1)[i]; \
1827 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
1828 } while (0)
1829
1830 #define anv_batch_emit(batch, cmd, name) \
1831 for (struct cmd name = { __anv_cmd_header(cmd) }, \
1832 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
1833 __builtin_expect(_dst != NULL, 1); \
1834 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
1835 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
1836 _dst = NULL; \
1837 }))
1838
1839 /* #define __gen_get_batch_dwords anv_batch_emit_dwords */
1840 /* #define __gen_get_batch_address anv_batch_address */
1841 /* #define __gen_address_value anv_address_physical */
1842 /* #define __gen_address_offset anv_address_add */
1843
1844 struct anv_device_memory {
1845 struct vk_object_base base;
1846
1847 struct list_head link;
1848
1849 struct anv_bo * bo;
1850 struct anv_memory_type * type;
1851 VkDeviceSize map_size;
1852 void * map;
1853
1854 /* If set, we are holding reference to AHardwareBuffer
1855 * which we must release when memory is freed.
1856 */
1857 struct AHardwareBuffer * ahw;
1858
1859 /* If set, this memory comes from a host pointer. */
1860 void * host_ptr;
1861 };
1862
1863 /**
1864 * Header for Vertex URB Entry (VUE)
1865 */
1866 struct anv_vue_header {
1867 uint32_t Reserved;
1868 uint32_t RTAIndex; /* RenderTargetArrayIndex */
1869 uint32_t ViewportIndex;
1870 float PointWidth;
1871 };
1872
1873 /** Struct representing a sampled image descriptor
1874 *
1875 * This descriptor layout is used for sampled images, bare sampler, and
1876 * combined image/sampler descriptors.
1877 */
1878 struct anv_sampled_image_descriptor {
1879 /** Bindless image handle
1880 *
1881 * This is expected to already be shifted such that the 20-bit
1882 * SURFACE_STATE table index is in the top 20 bits.
1883 */
1884 uint32_t image;
1885
1886 /** Bindless sampler handle
1887 *
1888 * This is assumed to be a 32B-aligned SAMPLER_STATE pointer relative
1889 * to the dynamic state base address.
1890 */
1891 uint32_t sampler;
1892 };
1893
1894 struct anv_texture_swizzle_descriptor {
1895 /** Texture swizzle
1896 *
1897 * See also nir_intrinsic_channel_select_intel
1898 */
1899 uint8_t swizzle[4];
1900
1901 /** Unused padding to ensure the struct is a multiple of 64 bits */
1902 uint32_t _pad;
1903 };
1904
1905 /** Struct representing a storage image descriptor */
1906 struct anv_storage_image_descriptor {
1907 /** Bindless image handles
1908 *
1909 * These are expected to already be shifted such that the 20-bit
1910 * SURFACE_STATE table index is in the top 20 bits.
1911 */
1912 uint32_t read_write;
1913 uint32_t write_only;
1914 };
1915
1916 /** Struct representing a address/range descriptor
1917 *
1918 * The fields of this struct correspond directly to the data layout of
1919 * nir_address_format_64bit_bounded_global addresses. The last field is the
1920 * offset in the NIR address so it must be zero so that when you load the
1921 * descriptor you get a pointer to the start of the range.
1922 */
1923 struct anv_address_range_descriptor {
1924 uint64_t address;
1925 uint32_t range;
1926 uint32_t zero;
1927 };
1928
1929 enum anv_descriptor_data {
1930 /** The descriptor contains a BTI reference to a surface state */
1931 ANV_DESCRIPTOR_SURFACE_STATE = (1 << 0),
1932 /** The descriptor contains a BTI reference to a sampler state */
1933 ANV_DESCRIPTOR_SAMPLER_STATE = (1 << 1),
1934 /** The descriptor contains an actual buffer view */
1935 ANV_DESCRIPTOR_BUFFER_VIEW = (1 << 2),
1936 /** The descriptor contains auxiliary image layout data */
1937 ANV_DESCRIPTOR_IMAGE_PARAM = (1 << 3),
1938 /** The descriptor contains auxiliary image layout data */
1939 ANV_DESCRIPTOR_INLINE_UNIFORM = (1 << 4),
1940 /** anv_address_range_descriptor with a buffer address and range */
1941 ANV_DESCRIPTOR_ADDRESS_RANGE = (1 << 5),
1942 /** Bindless surface handle */
1943 ANV_DESCRIPTOR_SAMPLED_IMAGE = (1 << 6),
1944 /** Storage image handles */
1945 ANV_DESCRIPTOR_STORAGE_IMAGE = (1 << 7),
1946 /** Storage image handles */
1947 ANV_DESCRIPTOR_TEXTURE_SWIZZLE = (1 << 8),
1948 };
1949
1950 struct anv_descriptor_set_binding_layout {
1951 #ifndef NDEBUG
1952 /* The type of the descriptors in this binding */
1953 VkDescriptorType type;
1954 #endif
1955
1956 /* Flags provided when this binding was created */
1957 VkDescriptorBindingFlagsEXT flags;
1958
1959 /* Bitfield representing the type of data this descriptor contains */
1960 enum anv_descriptor_data data;
1961
1962 /* Maximum number of YCbCr texture/sampler planes */
1963 uint8_t max_plane_count;
1964
1965 /* Number of array elements in this binding (or size in bytes for inline
1966 * uniform data)
1967 */
1968 uint16_t array_size;
1969
1970 /* Index into the flattend descriptor set */
1971 uint16_t descriptor_index;
1972
1973 /* Index into the dynamic state array for a dynamic buffer */
1974 int16_t dynamic_offset_index;
1975
1976 /* Index into the descriptor set buffer views */
1977 int16_t buffer_view_index;
1978
1979 /* Offset into the descriptor buffer where this descriptor lives */
1980 uint32_t descriptor_offset;
1981
1982 /* Immutable samplers (or NULL if no immutable samplers) */
1983 struct anv_sampler **immutable_samplers;
1984 };
1985
1986 unsigned anv_descriptor_size(const struct anv_descriptor_set_binding_layout *layout);
1987
1988 unsigned anv_descriptor_type_size(const struct anv_physical_device *pdevice,
1989 VkDescriptorType type);
1990
1991 bool anv_descriptor_supports_bindless(const struct anv_physical_device *pdevice,
1992 const struct anv_descriptor_set_binding_layout *binding,
1993 bool sampler);
1994
1995 bool anv_descriptor_requires_bindless(const struct anv_physical_device *pdevice,
1996 const struct anv_descriptor_set_binding_layout *binding,
1997 bool sampler);
1998
1999 struct anv_descriptor_set_layout {
2000 struct vk_object_base base;
2001
2002 /* Descriptor set layouts can be destroyed at almost any time */
2003 uint32_t ref_cnt;
2004
2005 /* Number of bindings in this descriptor set */
2006 uint16_t binding_count;
2007
2008 /* Total size of the descriptor set with room for all array entries */
2009 uint16_t size;
2010
2011 /* Shader stages affected by this descriptor set */
2012 uint16_t shader_stages;
2013
2014 /* Number of buffer views in this descriptor set */
2015 uint16_t buffer_view_count;
2016
2017 /* Number of dynamic offsets used by this descriptor set */
2018 uint16_t dynamic_offset_count;
2019
2020 /* For each dynamic buffer, which VkShaderStageFlagBits stages are using
2021 * this buffer
2022 */
2023 VkShaderStageFlags dynamic_offset_stages[MAX_DYNAMIC_BUFFERS];
2024
2025 /* Size of the descriptor buffer for this descriptor set */
2026 uint32_t descriptor_buffer_size;
2027
2028 /* Bindings in this descriptor set */
2029 struct anv_descriptor_set_binding_layout binding[0];
2030 };
2031
2032 void anv_descriptor_set_layout_destroy(struct anv_device *device,
2033 struct anv_descriptor_set_layout *layout);
2034
2035 static inline void
2036 anv_descriptor_set_layout_ref(struct anv_descriptor_set_layout *layout)
2037 {
2038 assert(layout && layout->ref_cnt >= 1);
2039 p_atomic_inc(&layout->ref_cnt);
2040 }
2041
2042 static inline void
2043 anv_descriptor_set_layout_unref(struct anv_device *device,
2044 struct anv_descriptor_set_layout *layout)
2045 {
2046 assert(layout && layout->ref_cnt >= 1);
2047 if (p_atomic_dec_zero(&layout->ref_cnt))
2048 anv_descriptor_set_layout_destroy(device, layout);
2049 }
2050
2051 struct anv_descriptor {
2052 VkDescriptorType type;
2053
2054 union {
2055 struct {
2056 VkImageLayout layout;
2057 struct anv_image_view *image_view;
2058 struct anv_sampler *sampler;
2059 };
2060
2061 struct {
2062 struct anv_buffer *buffer;
2063 uint64_t offset;
2064 uint64_t range;
2065 };
2066
2067 struct anv_buffer_view *buffer_view;
2068 };
2069 };
2070
2071 struct anv_descriptor_set {
2072 struct vk_object_base base;
2073
2074 struct anv_descriptor_pool *pool;
2075 struct anv_descriptor_set_layout *layout;
2076
2077 /* Amount of space occupied in the the pool by this descriptor set. It can
2078 * be larger than the size of the descriptor set.
2079 */
2080 uint32_t size;
2081
2082 /* State relative to anv_descriptor_pool::bo */
2083 struct anv_state desc_mem;
2084 /* Surface state for the descriptor buffer */
2085 struct anv_state desc_surface_state;
2086
2087 uint32_t buffer_view_count;
2088 struct anv_buffer_view *buffer_views;
2089
2090 /* Link to descriptor pool's desc_sets list . */
2091 struct list_head pool_link;
2092
2093 struct anv_descriptor descriptors[0];
2094 };
2095
2096 struct anv_buffer_view {
2097 struct vk_object_base base;
2098
2099 enum isl_format format; /**< VkBufferViewCreateInfo::format */
2100 uint64_t range; /**< VkBufferViewCreateInfo::range */
2101
2102 struct anv_address address;
2103
2104 struct anv_state surface_state;
2105 struct anv_state storage_surface_state;
2106 struct anv_state writeonly_storage_surface_state;
2107
2108 struct brw_image_param storage_image_param;
2109 };
2110
2111 struct anv_push_descriptor_set {
2112 struct anv_descriptor_set set;
2113
2114 /* Put this field right behind anv_descriptor_set so it fills up the
2115 * descriptors[0] field. */
2116 struct anv_descriptor descriptors[MAX_PUSH_DESCRIPTORS];
2117
2118 /** True if the descriptor set buffer has been referenced by a draw or
2119 * dispatch command.
2120 */
2121 bool set_used_on_gpu;
2122
2123 struct anv_buffer_view buffer_views[MAX_PUSH_DESCRIPTORS];
2124 };
2125
2126 struct anv_descriptor_pool {
2127 struct vk_object_base base;
2128
2129 uint32_t size;
2130 uint32_t next;
2131 uint32_t free_list;
2132
2133 struct anv_bo *bo;
2134 struct util_vma_heap bo_heap;
2135
2136 struct anv_state_stream surface_state_stream;
2137 void *surface_state_free_list;
2138
2139 struct list_head desc_sets;
2140
2141 char data[0];
2142 };
2143
2144 enum anv_descriptor_template_entry_type {
2145 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_IMAGE,
2146 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER,
2147 ANV_DESCRIPTOR_TEMPLATE_ENTRY_TYPE_BUFFER_VIEW
2148 };
2149
2150 struct anv_descriptor_template_entry {
2151 /* The type of descriptor in this entry */
2152 VkDescriptorType type;
2153
2154 /* Binding in the descriptor set */
2155 uint32_t binding;
2156
2157 /* Offset at which to write into the descriptor set binding */
2158 uint32_t array_element;
2159
2160 /* Number of elements to write into the descriptor set binding */
2161 uint32_t array_count;
2162
2163 /* Offset into the user provided data */
2164 size_t offset;
2165
2166 /* Stride between elements into the user provided data */
2167 size_t stride;
2168 };
2169
2170 struct anv_descriptor_update_template {
2171 struct vk_object_base base;
2172
2173 VkPipelineBindPoint bind_point;
2174
2175 /* The descriptor set this template corresponds to. This value is only
2176 * valid if the template was created with the templateType
2177 * VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET.
2178 */
2179 uint8_t set;
2180
2181 /* Number of entries in this template */
2182 uint32_t entry_count;
2183
2184 /* Entries of the template */
2185 struct anv_descriptor_template_entry entries[0];
2186 };
2187
2188 size_t
2189 anv_descriptor_set_layout_size(const struct anv_descriptor_set_layout *layout);
2190
2191 void
2192 anv_descriptor_set_write_image_view(struct anv_device *device,
2193 struct anv_descriptor_set *set,
2194 const VkDescriptorImageInfo * const info,
2195 VkDescriptorType type,
2196 uint32_t binding,
2197 uint32_t element);
2198
2199 void
2200 anv_descriptor_set_write_buffer_view(struct anv_device *device,
2201 struct anv_descriptor_set *set,
2202 VkDescriptorType type,
2203 struct anv_buffer_view *buffer_view,
2204 uint32_t binding,
2205 uint32_t element);
2206
2207 void
2208 anv_descriptor_set_write_buffer(struct anv_device *device,
2209 struct anv_descriptor_set *set,
2210 struct anv_state_stream *alloc_stream,
2211 VkDescriptorType type,
2212 struct anv_buffer *buffer,
2213 uint32_t binding,
2214 uint32_t element,
2215 VkDeviceSize offset,
2216 VkDeviceSize range);
2217 void
2218 anv_descriptor_set_write_inline_uniform_data(struct anv_device *device,
2219 struct anv_descriptor_set *set,
2220 uint32_t binding,
2221 const void *data,
2222 size_t offset,
2223 size_t size);
2224
2225 void
2226 anv_descriptor_set_write_template(struct anv_device *device,
2227 struct anv_descriptor_set *set,
2228 struct anv_state_stream *alloc_stream,
2229 const struct anv_descriptor_update_template *template,
2230 const void *data);
2231
2232 VkResult
2233 anv_descriptor_set_create(struct anv_device *device,
2234 struct anv_descriptor_pool *pool,
2235 struct anv_descriptor_set_layout *layout,
2236 struct anv_descriptor_set **out_set);
2237
2238 void
2239 anv_descriptor_set_destroy(struct anv_device *device,
2240 struct anv_descriptor_pool *pool,
2241 struct anv_descriptor_set *set);
2242
2243 #define ANV_DESCRIPTOR_SET_NULL (UINT8_MAX - 5)
2244 #define ANV_DESCRIPTOR_SET_PUSH_CONSTANTS (UINT8_MAX - 4)
2245 #define ANV_DESCRIPTOR_SET_DESCRIPTORS (UINT8_MAX - 3)
2246 #define ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS (UINT8_MAX - 2)
2247 #define ANV_DESCRIPTOR_SET_SHADER_CONSTANTS (UINT8_MAX - 1)
2248 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
2249
2250 struct anv_pipeline_binding {
2251 /** Index in the descriptor set
2252 *
2253 * This is a flattened index; the descriptor set layout is already taken
2254 * into account.
2255 */
2256 uint32_t index;
2257
2258 /** The descriptor set this surface corresponds to.
2259 *
2260 * The special ANV_DESCRIPTOR_SET_* values above indicates that this
2261 * binding is not a normal descriptor set but something else.
2262 */
2263 uint8_t set;
2264
2265 union {
2266 /** Plane in the binding index for images */
2267 uint8_t plane;
2268
2269 /** Input attachment index (relative to the subpass) */
2270 uint8_t input_attachment_index;
2271
2272 /** Dynamic offset index (for dynamic UBOs and SSBOs) */
2273 uint8_t dynamic_offset_index;
2274 };
2275
2276 /** For a storage image, whether it is write-only */
2277 uint8_t write_only;
2278
2279 /** Pad to 64 bits so that there are no holes and we can safely memcmp
2280 * assuming POD zero-initialization.
2281 */
2282 uint8_t pad;
2283 };
2284
2285 struct anv_push_range {
2286 /** Index in the descriptor set */
2287 uint32_t index;
2288
2289 /** Descriptor set index */
2290 uint8_t set;
2291
2292 /** Dynamic offset index (for dynamic UBOs) */
2293 uint8_t dynamic_offset_index;
2294
2295 /** Start offset in units of 32B */
2296 uint8_t start;
2297
2298 /** Range in units of 32B */
2299 uint8_t length;
2300 };
2301
2302 struct anv_pipeline_layout {
2303 struct vk_object_base base;
2304
2305 struct {
2306 struct anv_descriptor_set_layout *layout;
2307 uint32_t dynamic_offset_start;
2308 } set[MAX_SETS];
2309
2310 uint32_t num_sets;
2311
2312 unsigned char sha1[20];
2313 };
2314
2315 struct anv_buffer {
2316 struct vk_object_base base;
2317
2318 struct anv_device * device;
2319 VkDeviceSize size;
2320
2321 VkBufferUsageFlags usage;
2322
2323 /* Set when bound */
2324 struct anv_address address;
2325 };
2326
2327 static inline uint64_t
2328 anv_buffer_get_range(struct anv_buffer *buffer, uint64_t offset, uint64_t range)
2329 {
2330 assert(offset <= buffer->size);
2331 if (range == VK_WHOLE_SIZE) {
2332 return buffer->size - offset;
2333 } else {
2334 assert(range + offset >= range);
2335 assert(range + offset <= buffer->size);
2336 return range;
2337 }
2338 }
2339
2340 enum anv_cmd_dirty_bits {
2341 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
2342 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
2343 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
2344 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
2345 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
2346 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
2347 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
2348 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
2349 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
2350 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
2351 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
2352 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
2353 ANV_CMD_DIRTY_XFB_ENABLE = 1 << 12,
2354 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE = 1 << 13, /* VK_DYNAMIC_STATE_LINE_STIPPLE_EXT */
2355 ANV_CMD_DIRTY_DYNAMIC_CULL_MODE = 1 << 14, /* VK_DYNAMIC_STATE_CULL_MODE_EXT */
2356 ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE = 1 << 15, /* VK_DYNAMIC_STATE_FRONT_FACE_EXT */
2357 ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY = 1 << 16, /* VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT */
2358 ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE = 1 << 17, /* VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT */
2359 ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE = 1 << 18, /* VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT */
2360 ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE = 1 << 19, /* VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT */
2361 ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP = 1 << 20, /* VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT */
2362 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE = 1 << 21, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT */
2363 ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE = 1 << 22, /* VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT */
2364 ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP = 1 << 23, /* VK_DYNAMIC_STATE_STENCIL_OP_EXT */
2365 };
2366 typedef uint32_t anv_cmd_dirty_mask_t;
2367
2368 #define ANV_CMD_DIRTY_DYNAMIC_ALL \
2369 (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT | \
2370 ANV_CMD_DIRTY_DYNAMIC_SCISSOR | \
2371 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH | \
2372 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS | \
2373 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS | \
2374 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS | \
2375 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK | \
2376 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | \
2377 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | \
2378 ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE | \
2379 ANV_CMD_DIRTY_DYNAMIC_CULL_MODE | \
2380 ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE | \
2381 ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY | \
2382 ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE | \
2383 ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE | \
2384 ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE | \
2385 ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP | \
2386 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE | \
2387 ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE | \
2388 ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP)
2389
2390 static inline enum anv_cmd_dirty_bits
2391 anv_cmd_dirty_bit_for_vk_dynamic_state(VkDynamicState vk_state)
2392 {
2393 switch (vk_state) {
2394 case VK_DYNAMIC_STATE_VIEWPORT:
2395 case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT:
2396 return ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2397 case VK_DYNAMIC_STATE_SCISSOR:
2398 case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT:
2399 return ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
2400 case VK_DYNAMIC_STATE_LINE_WIDTH:
2401 return ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2402 case VK_DYNAMIC_STATE_DEPTH_BIAS:
2403 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2404 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
2405 return ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2406 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
2407 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2408 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
2409 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2410 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
2411 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2412 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
2413 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2414 case VK_DYNAMIC_STATE_LINE_STIPPLE_EXT:
2415 return ANV_CMD_DIRTY_DYNAMIC_LINE_STIPPLE;
2416 case VK_DYNAMIC_STATE_CULL_MODE_EXT:
2417 return ANV_CMD_DIRTY_DYNAMIC_CULL_MODE;
2418 case VK_DYNAMIC_STATE_FRONT_FACE_EXT:
2419 return ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE;
2420 case VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT:
2421 return ANV_CMD_DIRTY_DYNAMIC_PRIMITIVE_TOPOLOGY;
2422 case VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT:
2423 return ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE;
2424 case VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT:
2425 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_TEST_ENABLE;
2426 case VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT:
2427 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_WRITE_ENABLE;
2428 case VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT:
2429 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_COMPARE_OP;
2430 case VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT:
2431 return ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS_TEST_ENABLE;
2432 case VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT:
2433 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_TEST_ENABLE;
2434 case VK_DYNAMIC_STATE_STENCIL_OP_EXT:
2435 return ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP;
2436 default:
2437 assert(!"Unsupported dynamic state");
2438 return 0;
2439 }
2440 }
2441
2442
2443 enum anv_pipe_bits {
2444 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
2445 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
2446 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
2447 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
2448 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
2449 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
2450 ANV_PIPE_TILE_CACHE_FLUSH_BIT = (1 << 6),
2451 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
2452 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
2453 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
2454 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
2455 ANV_PIPE_CS_STALL_BIT = (1 << 20),
2456 ANV_PIPE_END_OF_PIPE_SYNC_BIT = (1 << 21),
2457
2458 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
2459 * a flush has happened but not a CS stall. The next time we do any sort
2460 * of invalidation we need to insert a CS stall at that time. Otherwise,
2461 * we would have to CS stall on every flush which could be bad.
2462 */
2463 ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT = (1 << 22),
2464
2465 /* This bit does not exist directly in PIPE_CONTROL. It means that render
2466 * target operations related to transfer commands with VkBuffer as
2467 * destination are ongoing. Some operations like copies on the command
2468 * streamer might need to be aware of this to trigger the appropriate stall
2469 * before they can proceed with the copy.
2470 */
2471 ANV_PIPE_RENDER_TARGET_BUFFER_WRITES = (1 << 23),
2472
2473 /* This bit does not exist directly in PIPE_CONTROL. It means that Gen12
2474 * AUX-TT data has changed and we need to invalidate AUX-TT data. This is
2475 * done by writing the AUX-TT register.
2476 */
2477 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT = (1 << 24),
2478
2479 /* This bit does not exist directly in PIPE_CONTROL. It means that a
2480 * PIPE_CONTROL with a post-sync operation will follow. This is used to
2481 * implement a workaround for Gen9.
2482 */
2483 ANV_PIPE_POST_SYNC_BIT = (1 << 25),
2484 };
2485
2486 #define ANV_PIPE_FLUSH_BITS ( \
2487 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
2488 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2489 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | \
2490 ANV_PIPE_TILE_CACHE_FLUSH_BIT)
2491
2492 #define ANV_PIPE_STALL_BITS ( \
2493 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
2494 ANV_PIPE_DEPTH_STALL_BIT | \
2495 ANV_PIPE_CS_STALL_BIT)
2496
2497 #define ANV_PIPE_INVALIDATE_BITS ( \
2498 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
2499 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
2500 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
2501 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
2502 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
2503 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT | \
2504 ANV_PIPE_AUX_TABLE_INVALIDATE_BIT)
2505
2506 static inline enum anv_pipe_bits
2507 anv_pipe_flush_bits_for_access_flags(VkAccessFlags flags)
2508 {
2509 enum anv_pipe_bits pipe_bits = 0;
2510
2511 unsigned b;
2512 for_each_bit(b, flags) {
2513 switch ((VkAccessFlagBits)(1 << b)) {
2514 case VK_ACCESS_SHADER_WRITE_BIT:
2515 /* We're transitioning a buffer that was previously used as write
2516 * destination through the data port. To make its content available
2517 * to future operations, flush the data cache.
2518 */
2519 pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2520 break;
2521 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
2522 /* We're transitioning a buffer that was previously used as render
2523 * target. To make its content available to future operations, flush
2524 * the render target cache.
2525 */
2526 pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2527 break;
2528 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
2529 /* We're transitioning a buffer that was previously used as depth
2530 * buffer. To make its content available to future operations, flush
2531 * the depth cache.
2532 */
2533 pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2534 break;
2535 case VK_ACCESS_TRANSFER_WRITE_BIT:
2536 /* We're transitioning a buffer that was previously used as a
2537 * transfer write destination. Generic write operations include color
2538 * & depth operations as well as buffer operations like :
2539 * - vkCmdClearColorImage()
2540 * - vkCmdClearDepthStencilImage()
2541 * - vkCmdBlitImage()
2542 * - vkCmdCopy*(), vkCmdUpdate*(), vkCmdFill*()
2543 *
2544 * Most of these operations are implemented using Blorp which writes
2545 * through the render target, so flush that cache to make it visible
2546 * to future operations. And for depth related operations we also
2547 * need to flush the depth cache.
2548 */
2549 pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2550 pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2551 break;
2552 case VK_ACCESS_MEMORY_WRITE_BIT:
2553 /* We're transitioning a buffer for generic write operations. Flush
2554 * all the caches.
2555 */
2556 pipe_bits |= ANV_PIPE_FLUSH_BITS;
2557 break;
2558 default:
2559 break; /* Nothing to do */
2560 }
2561 }
2562
2563 return pipe_bits;
2564 }
2565
2566 static inline enum anv_pipe_bits
2567 anv_pipe_invalidate_bits_for_access_flags(VkAccessFlags flags)
2568 {
2569 enum anv_pipe_bits pipe_bits = 0;
2570
2571 unsigned b;
2572 for_each_bit(b, flags) {
2573 switch ((VkAccessFlagBits)(1 << b)) {
2574 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2575 /* Indirect draw commands take a buffer as input that we're going to
2576 * read from the command streamer to load some of the HW registers
2577 * (see genX_cmd_buffer.c:load_indirect_parameters). This requires a
2578 * command streamer stall so that all the cache flushes have
2579 * completed before the command streamer loads from memory.
2580 */
2581 pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2582 /* Indirect draw commands also set gl_BaseVertex & gl_BaseIndex
2583 * through a vertex buffer, so invalidate that cache.
2584 */
2585 pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2586 /* For CmdDipatchIndirect, we also load gl_NumWorkGroups through a
2587 * UBO from the buffer, so we need to invalidate constant cache.
2588 */
2589 pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2590 break;
2591 case VK_ACCESS_INDEX_READ_BIT:
2592 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2593 /* We transitioning a buffer to be used for as input for vkCmdDraw*
2594 * commands, so we invalidate the VF cache to make sure there is no
2595 * stale data when we start rendering.
2596 */
2597 pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2598 break;
2599 case VK_ACCESS_UNIFORM_READ_BIT:
2600 /* We transitioning a buffer to be used as uniform data. Because
2601 * uniform is accessed through the data port & sampler, we need to
2602 * invalidate the texture cache (sampler) & constant cache (data
2603 * port) to avoid stale data.
2604 */
2605 pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2606 pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2607 break;
2608 case VK_ACCESS_SHADER_READ_BIT:
2609 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2610 case VK_ACCESS_TRANSFER_READ_BIT:
2611 /* Transitioning a buffer to be read through the sampler, so
2612 * invalidate the texture cache, we don't want any stale data.
2613 */
2614 pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2615 break;
2616 case VK_ACCESS_MEMORY_READ_BIT:
2617 /* Transitioning a buffer for generic read, invalidate all the
2618 * caches.
2619 */
2620 pipe_bits |= ANV_PIPE_INVALIDATE_BITS;
2621 break;
2622 case VK_ACCESS_MEMORY_WRITE_BIT:
2623 /* Generic write, make sure all previously written things land in
2624 * memory.
2625 */
2626 pipe_bits |= ANV_PIPE_FLUSH_BITS;
2627 break;
2628 case VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT:
2629 /* Transitioning a buffer for conditional rendering. We'll load the
2630 * content of this buffer into HW registers using the command
2631 * streamer, so we need to stall the command streamer to make sure
2632 * any in-flight flush operations have completed.
2633 */
2634 pipe_bits |= ANV_PIPE_CS_STALL_BIT;
2635 break;
2636 default:
2637 break; /* Nothing to do */
2638 }
2639 }
2640
2641 return pipe_bits;
2642 }
2643
2644 #define VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV ( \
2645 VK_IMAGE_ASPECT_COLOR_BIT | \
2646 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2647 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2648 VK_IMAGE_ASPECT_PLANE_2_BIT)
2649 #define VK_IMAGE_ASPECT_PLANES_BITS_ANV ( \
2650 VK_IMAGE_ASPECT_PLANE_0_BIT | \
2651 VK_IMAGE_ASPECT_PLANE_1_BIT | \
2652 VK_IMAGE_ASPECT_PLANE_2_BIT)
2653
2654 struct anv_vertex_binding {
2655 struct anv_buffer * buffer;
2656 VkDeviceSize offset;
2657 VkDeviceSize stride;
2658 VkDeviceSize size;
2659 };
2660
2661 struct anv_xfb_binding {
2662 struct anv_buffer * buffer;
2663 VkDeviceSize offset;
2664 VkDeviceSize size;
2665 };
2666
2667 struct anv_push_constants {
2668 /** Push constant data provided by the client through vkPushConstants */
2669 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
2670
2671 /** Dynamic offsets for dynamic UBOs and SSBOs */
2672 uint32_t dynamic_offsets[MAX_DYNAMIC_BUFFERS];
2673
2674 /* Robust access pushed registers. */
2675 uint64_t push_reg_mask[MESA_SHADER_STAGES];
2676
2677 /** Pad out to a multiple of 32 bytes */
2678 uint32_t pad[2];
2679
2680 struct {
2681 /** Base workgroup ID
2682 *
2683 * Used for vkCmdDispatchBase.
2684 */
2685 uint32_t base_work_group_id[3];
2686
2687 /** Subgroup ID
2688 *
2689 * This is never set by software but is implicitly filled out when
2690 * uploading the push constants for compute shaders.
2691 */
2692 uint32_t subgroup_id;
2693 } cs;
2694 };
2695
2696 struct anv_dynamic_state {
2697 struct {
2698 uint32_t count;
2699 VkViewport viewports[MAX_VIEWPORTS];
2700 } viewport;
2701
2702 struct {
2703 uint32_t count;
2704 VkRect2D scissors[MAX_SCISSORS];
2705 } scissor;
2706
2707 float line_width;
2708
2709 struct {
2710 float bias;
2711 float clamp;
2712 float slope;
2713 } depth_bias;
2714
2715 float blend_constants[4];
2716
2717 struct {
2718 float min;
2719 float max;
2720 } depth_bounds;
2721
2722 struct {
2723 uint32_t front;
2724 uint32_t back;
2725 } stencil_compare_mask;
2726
2727 struct {
2728 uint32_t front;
2729 uint32_t back;
2730 } stencil_write_mask;
2731
2732 struct {
2733 uint32_t front;
2734 uint32_t back;
2735 } stencil_reference;
2736
2737 struct {
2738 struct {
2739 VkStencilOp fail_op;
2740 VkStencilOp pass_op;
2741 VkStencilOp depth_fail_op;
2742 VkCompareOp compare_op;
2743 } front;
2744 struct {
2745 VkStencilOp fail_op;
2746 VkStencilOp pass_op;
2747 VkStencilOp depth_fail_op;
2748 VkCompareOp compare_op;
2749 } back;
2750 } stencil_op;
2751
2752 struct {
2753 uint32_t factor;
2754 uint16_t pattern;
2755 } line_stipple;
2756
2757 VkCullModeFlags cull_mode;
2758 VkFrontFace front_face;
2759 VkPrimitiveTopology primitive_topology;
2760 bool depth_test_enable;
2761 bool depth_write_enable;
2762 VkCompareOp depth_compare_op;
2763 bool depth_bounds_test_enable;
2764 bool stencil_test_enable;
2765 bool dyn_vbo_stride;
2766 bool dyn_vbo_size;
2767 };
2768
2769 extern const struct anv_dynamic_state default_dynamic_state;
2770
2771 uint32_t anv_dynamic_state_copy(struct anv_dynamic_state *dest,
2772 const struct anv_dynamic_state *src,
2773 uint32_t copy_mask);
2774
2775 struct anv_surface_state {
2776 struct anv_state state;
2777 /** Address of the surface referred to by this state
2778 *
2779 * This address is relative to the start of the BO.
2780 */
2781 struct anv_address address;
2782 /* Address of the aux surface, if any
2783 *
2784 * This field is ANV_NULL_ADDRESS if and only if no aux surface exists.
2785 *
2786 * With the exception of gen8, the bottom 12 bits of this address' offset
2787 * include extra aux information.
2788 */
2789 struct anv_address aux_address;
2790 /* Address of the clear color, if any
2791 *
2792 * This address is relative to the start of the BO.
2793 */
2794 struct anv_address clear_address;
2795 };
2796
2797 /**
2798 * Attachment state when recording a renderpass instance.
2799 *
2800 * The clear value is valid only if there exists a pending clear.
2801 */
2802 struct anv_attachment_state {
2803 enum isl_aux_usage aux_usage;
2804 struct anv_surface_state color;
2805 struct anv_surface_state input;
2806
2807 VkImageLayout current_layout;
2808 VkImageLayout current_stencil_layout;
2809 VkImageAspectFlags pending_clear_aspects;
2810 VkImageAspectFlags pending_load_aspects;
2811 bool fast_clear;
2812 VkClearValue clear_value;
2813
2814 /* When multiview is active, attachments with a renderpass clear
2815 * operation have their respective layers cleared on the first
2816 * subpass that uses them, and only in that subpass. We keep track
2817 * of this using a bitfield to indicate which layers of an attachment
2818 * have not been cleared yet when multiview is active.
2819 */
2820 uint32_t pending_clear_views;
2821 struct anv_image_view * image_view;
2822 };
2823
2824 /** State tracking for vertex buffer flushes
2825 *
2826 * On Gen8-9, the VF cache only considers the bottom 32 bits of memory
2827 * addresses. If you happen to have two vertex buffers which get placed
2828 * exactly 4 GiB apart and use them in back-to-back draw calls, you can get
2829 * collisions. In order to solve this problem, we track vertex address ranges
2830 * which are live in the cache and invalidate the cache if one ever exceeds 32
2831 * bits.
2832 */
2833 struct anv_vb_cache_range {
2834 /* Virtual address at which the live vertex buffer cache range starts for
2835 * this vertex buffer index.
2836 */
2837 uint64_t start;
2838
2839 /* Virtual address of the byte after where vertex buffer cache range ends.
2840 * This is exclusive such that end - start is the size of the range.
2841 */
2842 uint64_t end;
2843 };
2844
2845 /** State tracking for particular pipeline bind point
2846 *
2847 * This struct is the base struct for anv_cmd_graphics_state and
2848 * anv_cmd_compute_state. These are used to track state which is bound to a
2849 * particular type of pipeline. Generic state that applies per-stage such as
2850 * binding table offsets and push constants is tracked generically with a
2851 * per-stage array in anv_cmd_state.
2852 */
2853 struct anv_cmd_pipeline_state {
2854 struct anv_descriptor_set *descriptors[MAX_SETS];
2855 struct anv_push_descriptor_set *push_descriptors[MAX_SETS];
2856
2857 struct anv_push_constants push_constants;
2858
2859 /* Push constant state allocated when flushing push constants. */
2860 struct anv_state push_constants_state;
2861 };
2862
2863 /** State tracking for graphics pipeline
2864 *
2865 * This has anv_cmd_pipeline_state as a base struct to track things which get
2866 * bound to a graphics pipeline. Along with general pipeline bind point state
2867 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2868 * state which is graphics-specific.
2869 */
2870 struct anv_cmd_graphics_state {
2871 struct anv_cmd_pipeline_state base;
2872
2873 struct anv_graphics_pipeline *pipeline;
2874
2875 anv_cmd_dirty_mask_t dirty;
2876 uint32_t vb_dirty;
2877
2878 struct anv_vb_cache_range ib_bound_range;
2879 struct anv_vb_cache_range ib_dirty_range;
2880 struct anv_vb_cache_range vb_bound_ranges[33];
2881 struct anv_vb_cache_range vb_dirty_ranges[33];
2882
2883 VkShaderStageFlags push_constant_stages;
2884
2885 struct anv_dynamic_state dynamic;
2886
2887 uint32_t primitive_topology;
2888
2889 struct {
2890 struct anv_buffer *index_buffer;
2891 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
2892 uint32_t index_offset;
2893 } gen7;
2894 };
2895
2896 /** State tracking for compute pipeline
2897 *
2898 * This has anv_cmd_pipeline_state as a base struct to track things which get
2899 * bound to a compute pipeline. Along with general pipeline bind point state
2900 * which is in the anv_cmd_pipeline_state base struct, it also contains other
2901 * state which is compute-specific.
2902 */
2903 struct anv_cmd_compute_state {
2904 struct anv_cmd_pipeline_state base;
2905
2906 struct anv_compute_pipeline *pipeline;
2907
2908 bool pipeline_dirty;
2909
2910 struct anv_address num_workgroups;
2911 };
2912
2913 /** State required while building cmd buffer */
2914 struct anv_cmd_state {
2915 /* PIPELINE_SELECT.PipelineSelection */
2916 uint32_t current_pipeline;
2917 const struct gen_l3_config * current_l3_config;
2918 uint32_t last_aux_map_state;
2919
2920 struct anv_cmd_graphics_state gfx;
2921 struct anv_cmd_compute_state compute;
2922
2923 enum anv_pipe_bits pending_pipe_bits;
2924 VkShaderStageFlags descriptors_dirty;
2925 VkShaderStageFlags push_constants_dirty;
2926
2927 struct anv_framebuffer * framebuffer;
2928 struct anv_render_pass * pass;
2929 struct anv_subpass * subpass;
2930 VkRect2D render_area;
2931 uint32_t restart_index;
2932 struct anv_vertex_binding vertex_bindings[MAX_VBS];
2933 bool xfb_enabled;
2934 struct anv_xfb_binding xfb_bindings[MAX_XFB_BUFFERS];
2935 struct anv_state binding_tables[MESA_SHADER_STAGES];
2936 struct anv_state samplers[MESA_SHADER_STAGES];
2937
2938 unsigned char sampler_sha1s[MESA_SHADER_STAGES][20];
2939 unsigned char surface_sha1s[MESA_SHADER_STAGES][20];
2940 unsigned char push_sha1s[MESA_SHADER_STAGES][20];
2941
2942 /**
2943 * Whether or not the gen8 PMA fix is enabled. We ensure that, at the top
2944 * of any command buffer it is disabled by disabling it in EndCommandBuffer
2945 * and before invoking the secondary in ExecuteCommands.
2946 */
2947 bool pma_fix_enabled;
2948
2949 /**
2950 * Whether or not we know for certain that HiZ is enabled for the current
2951 * subpass. If, for whatever reason, we are unsure as to whether HiZ is
2952 * enabled or not, this will be false.
2953 */
2954 bool hiz_enabled;
2955
2956 bool conditional_render_enabled;
2957
2958 /**
2959 * Last rendering scale argument provided to
2960 * genX(cmd_buffer_emit_hashing_mode)().
2961 */
2962 unsigned current_hash_scale;
2963
2964 /**
2965 * Array length is anv_cmd_state::pass::attachment_count. Array content is
2966 * valid only when recording a render pass instance.
2967 */
2968 struct anv_attachment_state * attachments;
2969
2970 /**
2971 * Surface states for color render targets. These are stored in a single
2972 * flat array. For depth-stencil attachments, the surface state is simply
2973 * left blank.
2974 */
2975 struct anv_state attachment_states;
2976
2977 /**
2978 * A null surface state of the right size to match the framebuffer. This
2979 * is one of the states in attachment_states.
2980 */
2981 struct anv_state null_surface_state;
2982 };
2983
2984 struct anv_cmd_pool {
2985 struct vk_object_base base;
2986 VkAllocationCallbacks alloc;
2987 struct list_head cmd_buffers;
2988 };
2989
2990 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
2991
2992 enum anv_cmd_buffer_exec_mode {
2993 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
2994 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
2995 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
2996 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
2997 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
2998 ANV_CMD_BUFFER_EXEC_MODE_CALL_AND_RETURN,
2999 };
3000
3001 struct anv_cmd_buffer {
3002 struct vk_object_base base;
3003
3004 struct anv_device * device;
3005
3006 struct anv_cmd_pool * pool;
3007 struct list_head pool_link;
3008
3009 struct anv_batch batch;
3010
3011 /* Fields required for the actual chain of anv_batch_bo's.
3012 *
3013 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
3014 */
3015 struct list_head batch_bos;
3016 enum anv_cmd_buffer_exec_mode exec_mode;
3017
3018 /* A vector of anv_batch_bo pointers for every batch or surface buffer
3019 * referenced by this command buffer
3020 *
3021 * initialized by anv_cmd_buffer_init_batch_bo_chain()
3022 */
3023 struct u_vector seen_bbos;
3024
3025 /* A vector of int32_t's for every block of binding tables.
3026 *
3027 * initialized by anv_cmd_buffer_init_batch_bo_chain()
3028 */
3029 struct u_vector bt_block_states;
3030 struct anv_state bt_next;
3031
3032 struct anv_reloc_list surface_relocs;
3033 /** Last seen surface state block pool center bo offset */
3034 uint32_t last_ss_pool_center;
3035
3036 /* Serial for tracking buffer completion */
3037 uint32_t serial;
3038
3039 /* Stream objects for storing temporary data */
3040 struct anv_state_stream surface_state_stream;
3041 struct anv_state_stream dynamic_state_stream;
3042
3043 VkCommandBufferUsageFlags usage_flags;
3044 VkCommandBufferLevel level;
3045
3046 struct anv_query_pool *perf_query_pool;
3047
3048 struct anv_cmd_state state;
3049
3050 struct anv_address return_addr;
3051
3052 /* Set by SetPerformanceMarkerINTEL, written into queries by CmdBeginQuery */
3053 uint64_t intel_perf_marker;
3054 };
3055
3056 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
3057 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
3058 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
3059 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
3060 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
3061 struct anv_cmd_buffer *secondary);
3062 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
3063 VkResult anv_cmd_buffer_execbuf(struct anv_queue *queue,
3064 struct anv_cmd_buffer *cmd_buffer,
3065 const VkSemaphore *in_semaphores,
3066 const uint64_t *in_wait_values,
3067 uint32_t num_in_semaphores,
3068 const VkSemaphore *out_semaphores,
3069 const uint64_t *out_signal_values,
3070 uint32_t num_out_semaphores,
3071 VkFence fence,
3072 int perf_query_pass);
3073
3074 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
3075
3076 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
3077 const void *data, uint32_t size, uint32_t alignment);
3078 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
3079 uint32_t *a, uint32_t *b,
3080 uint32_t dwords, uint32_t alignment);
3081
3082 struct anv_address
3083 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
3084 struct anv_state
3085 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
3086 uint32_t entries, uint32_t *state_offset);
3087 struct anv_state
3088 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
3089 struct anv_state
3090 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
3091 uint32_t size, uint32_t alignment);
3092
3093 VkResult
3094 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
3095
3096 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
3097 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
3098 bool depth_clamp_enable);
3099 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
3100
3101 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
3102 struct anv_render_pass *pass,
3103 struct anv_framebuffer *framebuffer,
3104 const VkClearValue *clear_values);
3105
3106 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
3107
3108 struct anv_state
3109 anv_cmd_buffer_gfx_push_constants(struct anv_cmd_buffer *cmd_buffer);
3110 struct anv_state
3111 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
3112
3113 const struct anv_image_view *
3114 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
3115
3116 VkResult
3117 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
3118 uint32_t num_entries,
3119 uint32_t *state_offset,
3120 struct anv_state *bt_state);
3121
3122 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
3123
3124 void anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer);
3125
3126 enum anv_fence_type {
3127 ANV_FENCE_TYPE_NONE = 0,
3128 ANV_FENCE_TYPE_BO,
3129 ANV_FENCE_TYPE_WSI_BO,
3130 ANV_FENCE_TYPE_SYNCOBJ,
3131 ANV_FENCE_TYPE_WSI,
3132 };
3133
3134 enum anv_bo_fence_state {
3135 /** Indicates that this is a new (or newly reset fence) */
3136 ANV_BO_FENCE_STATE_RESET,
3137
3138 /** Indicates that this fence has been submitted to the GPU but is still
3139 * (as far as we know) in use by the GPU.
3140 */
3141 ANV_BO_FENCE_STATE_SUBMITTED,
3142
3143 ANV_BO_FENCE_STATE_SIGNALED,
3144 };
3145
3146 struct anv_fence_impl {
3147 enum anv_fence_type type;
3148
3149 union {
3150 /** Fence implementation for BO fences
3151 *
3152 * These fences use a BO and a set of CPU-tracked state flags. The BO
3153 * is added to the object list of the last execbuf call in a QueueSubmit
3154 * and is marked EXEC_WRITE. The state flags track when the BO has been
3155 * submitted to the kernel. We need to do this because Vulkan lets you
3156 * wait on a fence that has not yet been submitted and I915_GEM_BUSY
3157 * will say it's idle in this case.
3158 */
3159 struct {
3160 struct anv_bo *bo;
3161 enum anv_bo_fence_state state;
3162 } bo;
3163
3164 /** DRM syncobj handle for syncobj-based fences */
3165 uint32_t syncobj;
3166
3167 /** WSI fence */
3168 struct wsi_fence *fence_wsi;
3169 };
3170 };
3171
3172 struct anv_fence {
3173 struct vk_object_base base;
3174
3175 /* Permanent fence state. Every fence has some form of permanent state
3176 * (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on (for
3177 * cross-process fences) or it could just be a dummy for use internally.
3178 */
3179 struct anv_fence_impl permanent;
3180
3181 /* Temporary fence state. A fence *may* have temporary state. That state
3182 * is added to the fence by an import operation and is reset back to
3183 * ANV_SEMAPHORE_TYPE_NONE when the fence is reset. A fence with temporary
3184 * state cannot be signaled because the fence must already be signaled
3185 * before the temporary state can be exported from the fence in the other
3186 * process and imported here.
3187 */
3188 struct anv_fence_impl temporary;
3189 };
3190
3191 void anv_fence_reset_temporary(struct anv_device *device,
3192 struct anv_fence *fence);
3193
3194 struct anv_event {
3195 struct vk_object_base base;
3196 uint64_t semaphore;
3197 struct anv_state state;
3198 };
3199
3200 enum anv_semaphore_type {
3201 ANV_SEMAPHORE_TYPE_NONE = 0,
3202 ANV_SEMAPHORE_TYPE_DUMMY,
3203 ANV_SEMAPHORE_TYPE_BO,
3204 ANV_SEMAPHORE_TYPE_WSI_BO,
3205 ANV_SEMAPHORE_TYPE_SYNC_FILE,
3206 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ,
3207 ANV_SEMAPHORE_TYPE_TIMELINE,
3208 ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ_TIMELINE,
3209 };
3210
3211 struct anv_timeline_point {
3212 struct list_head link;
3213
3214 uint64_t serial;
3215
3216 /* Number of waiter on this point, when > 0 the point should not be garbage
3217 * collected.
3218 */
3219 int waiting;
3220
3221 /* BO used for synchronization. */
3222 struct anv_bo *bo;
3223 };
3224
3225 struct anv_timeline {
3226 pthread_mutex_t mutex;
3227 pthread_cond_t cond;
3228
3229 uint64_t highest_past;
3230 uint64_t highest_pending;
3231
3232 struct list_head points;
3233 struct list_head free_points;
3234 };
3235
3236 struct anv_semaphore_impl {
3237 enum anv_semaphore_type type;
3238
3239 union {
3240 /* A BO representing this semaphore when type == ANV_SEMAPHORE_TYPE_BO
3241 * or type == ANV_SEMAPHORE_TYPE_WSI_BO. This BO will be added to the
3242 * object list on any execbuf2 calls for which this semaphore is used as
3243 * a wait or signal fence. When used as a signal fence or when type ==
3244 * ANV_SEMAPHORE_TYPE_WSI_BO, the EXEC_OBJECT_WRITE flag will be set.
3245 */
3246 struct anv_bo *bo;
3247
3248 /* The sync file descriptor when type == ANV_SEMAPHORE_TYPE_SYNC_FILE.
3249 * If the semaphore is in the unsignaled state due to either just being
3250 * created or because it has been used for a wait, fd will be -1.
3251 */
3252 int fd;
3253
3254 /* Sync object handle when type == ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ.
3255 * Unlike GEM BOs, DRM sync objects aren't deduplicated by the kernel on
3256 * import so we don't need to bother with a userspace cache.
3257 */
3258 uint32_t syncobj;
3259
3260 /* Non shareable timeline semaphore
3261 *
3262 * Used when kernel don't have support for timeline semaphores.
3263 */
3264 struct anv_timeline timeline;
3265 };
3266 };
3267
3268 struct anv_semaphore {
3269 struct vk_object_base base;
3270
3271 uint32_t refcount;
3272
3273 /* Permanent semaphore state. Every semaphore has some form of permanent
3274 * state (type != ANV_SEMAPHORE_TYPE_NONE). This may be a BO to fence on
3275 * (for cross-process semaphores0 or it could just be a dummy for use
3276 * internally.
3277 */
3278 struct anv_semaphore_impl permanent;
3279
3280 /* Temporary semaphore state. A semaphore *may* have temporary state.
3281 * That state is added to the semaphore by an import operation and is reset
3282 * back to ANV_SEMAPHORE_TYPE_NONE when the semaphore is waited on. A
3283 * semaphore with temporary state cannot be signaled because the semaphore
3284 * must already be signaled before the temporary state can be exported from
3285 * the semaphore in the other process and imported here.
3286 */
3287 struct anv_semaphore_impl temporary;
3288 };
3289
3290 void anv_semaphore_reset_temporary(struct anv_device *device,
3291 struct anv_semaphore *semaphore);
3292
3293 struct anv_shader_module {
3294 struct vk_object_base base;
3295
3296 unsigned char sha1[20];
3297 uint32_t size;
3298 char data[0];
3299 };
3300
3301 static inline gl_shader_stage
3302 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
3303 {
3304 assert(__builtin_popcount(vk_stage) == 1);
3305 return ffs(vk_stage) - 1;
3306 }
3307
3308 static inline VkShaderStageFlagBits
3309 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
3310 {
3311 return (1 << mesa_stage);
3312 }
3313
3314 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
3315
3316 #define anv_foreach_stage(stage, stage_bits) \
3317 for (gl_shader_stage stage, \
3318 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
3319 stage = __builtin_ffs(__tmp) - 1, __tmp; \
3320 __tmp &= ~(1 << (stage)))
3321
3322 enum anv_shader_reloc {
3323 ANV_SHADER_RELOC_CONST_DATA_ADDR_LOW,
3324 ANV_SHADER_RELOC_CONST_DATA_ADDR_HIGH,
3325 };
3326
3327 struct anv_pipeline_bind_map {
3328 unsigned char surface_sha1[20];
3329 unsigned char sampler_sha1[20];
3330 unsigned char push_sha1[20];
3331
3332 uint32_t surface_count;
3333 uint32_t sampler_count;
3334
3335 struct anv_pipeline_binding * surface_to_descriptor;
3336 struct anv_pipeline_binding * sampler_to_descriptor;
3337
3338 struct anv_push_range push_ranges[4];
3339 };
3340
3341 struct anv_shader_bin_key {
3342 uint32_t size;
3343 uint8_t data[0];
3344 };
3345
3346 struct anv_shader_bin {
3347 uint32_t ref_cnt;
3348
3349 gl_shader_stage stage;
3350
3351 const struct anv_shader_bin_key *key;
3352
3353 struct anv_state kernel;
3354 uint32_t kernel_size;
3355
3356 const struct brw_stage_prog_data *prog_data;
3357 uint32_t prog_data_size;
3358
3359 struct brw_compile_stats stats[3];
3360 uint32_t num_stats;
3361
3362 struct nir_xfb_info *xfb_info;
3363
3364 struct anv_pipeline_bind_map bind_map;
3365 };
3366
3367 struct anv_shader_bin *
3368 anv_shader_bin_create(struct anv_device *device,
3369 gl_shader_stage stage,
3370 const void *key, uint32_t key_size,
3371 const void *kernel, uint32_t kernel_size,
3372 const struct brw_stage_prog_data *prog_data,
3373 uint32_t prog_data_size,
3374 const struct brw_compile_stats *stats, uint32_t num_stats,
3375 const struct nir_xfb_info *xfb_info,
3376 const struct anv_pipeline_bind_map *bind_map);
3377
3378 void
3379 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
3380
3381 static inline void
3382 anv_shader_bin_ref(struct anv_shader_bin *shader)
3383 {
3384 assert(shader && shader->ref_cnt >= 1);
3385 p_atomic_inc(&shader->ref_cnt);
3386 }
3387
3388 static inline void
3389 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
3390 {
3391 assert(shader && shader->ref_cnt >= 1);
3392 if (p_atomic_dec_zero(&shader->ref_cnt))
3393 anv_shader_bin_destroy(device, shader);
3394 }
3395
3396 struct anv_pipeline_executable {
3397 gl_shader_stage stage;
3398
3399 struct brw_compile_stats stats;
3400
3401 char *nir;
3402 char *disasm;
3403 };
3404
3405 enum anv_pipeline_type {
3406 ANV_PIPELINE_GRAPHICS,
3407 ANV_PIPELINE_COMPUTE,
3408 };
3409
3410 struct anv_pipeline {
3411 struct vk_object_base base;
3412
3413 struct anv_device * device;
3414
3415 struct anv_batch batch;
3416 struct anv_reloc_list batch_relocs;
3417
3418 void * mem_ctx;
3419
3420 enum anv_pipeline_type type;
3421 VkPipelineCreateFlags flags;
3422
3423 struct util_dynarray executables;
3424
3425 const struct gen_l3_config * l3_config;
3426 };
3427
3428 struct anv_graphics_pipeline {
3429 struct anv_pipeline base;
3430
3431 uint32_t batch_data[512];
3432
3433 anv_cmd_dirty_mask_t dynamic_state_mask;
3434 struct anv_dynamic_state dynamic_state;
3435
3436 uint32_t topology;
3437
3438 struct anv_subpass * subpass;
3439
3440 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
3441
3442 VkShaderStageFlags active_stages;
3443
3444 bool primitive_restart;
3445 bool writes_depth;
3446 bool depth_test_enable;
3447 bool writes_stencil;
3448 bool stencil_test_enable;
3449 bool depth_clamp_enable;
3450 bool depth_clip_enable;
3451 bool sample_shading_enable;
3452 bool kill_pixel;
3453 bool depth_bounds_test_enable;
3454
3455 /* When primitive replication is used, subpass->view_mask will describe what
3456 * views to replicate.
3457 */
3458 bool use_primitive_replication;
3459
3460 struct anv_state blend_state;
3461
3462 uint32_t vb_used;
3463 struct anv_pipeline_vertex_binding {
3464 uint32_t stride;
3465 bool instanced;
3466 uint32_t instance_divisor;
3467 } vb[MAX_VBS];
3468
3469 struct {
3470 uint32_t sf[7];
3471 uint32_t depth_stencil_state[3];
3472 uint32_t clip[4];
3473 } gen7;
3474
3475 struct {
3476 uint32_t sf[4];
3477 uint32_t raster[5];
3478 uint32_t wm_depth_stencil[3];
3479 } gen8;
3480
3481 struct {
3482 uint32_t wm_depth_stencil[4];
3483 } gen9;
3484 };
3485
3486 struct anv_compute_pipeline {
3487 struct anv_pipeline base;
3488
3489 struct anv_shader_bin * cs;
3490 uint32_t cs_right_mask;
3491 uint32_t batch_data[9];
3492 uint32_t interface_descriptor_data[8];
3493 };
3494
3495 #define ANV_DECL_PIPELINE_DOWNCAST(pipe_type, pipe_enum) \
3496 static inline struct anv_##pipe_type##_pipeline * \
3497 anv_pipeline_to_##pipe_type(struct anv_pipeline *pipeline) \
3498 { \
3499 assert(pipeline->type == pipe_enum); \
3500 return (struct anv_##pipe_type##_pipeline *) pipeline; \
3501 }
3502
3503 ANV_DECL_PIPELINE_DOWNCAST(graphics, ANV_PIPELINE_GRAPHICS)
3504 ANV_DECL_PIPELINE_DOWNCAST(compute, ANV_PIPELINE_COMPUTE)
3505
3506 static inline bool
3507 anv_pipeline_has_stage(const struct anv_graphics_pipeline *pipeline,
3508 gl_shader_stage stage)
3509 {
3510 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
3511 }
3512
3513 #define ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(prefix, stage) \
3514 static inline const struct brw_##prefix##_prog_data * \
3515 get_##prefix##_prog_data(const struct anv_graphics_pipeline *pipeline) \
3516 { \
3517 if (anv_pipeline_has_stage(pipeline, stage)) { \
3518 return (const struct brw_##prefix##_prog_data *) \
3519 pipeline->shaders[stage]->prog_data; \
3520 } else { \
3521 return NULL; \
3522 } \
3523 }
3524
3525 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
3526 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tcs, MESA_SHADER_TESS_CTRL)
3527 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(tes, MESA_SHADER_TESS_EVAL)
3528 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
3529 ANV_DECL_GET_GRAPHICS_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
3530
3531 static inline const struct brw_cs_prog_data *
3532 get_cs_prog_data(const struct anv_compute_pipeline *pipeline)
3533 {
3534 assert(pipeline->cs);
3535 return (const struct brw_cs_prog_data *) pipeline->cs->prog_data;
3536 }
3537
3538 static inline const struct brw_vue_prog_data *
3539 anv_pipeline_get_last_vue_prog_data(const struct anv_graphics_pipeline *pipeline)
3540 {
3541 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY))
3542 return &get_gs_prog_data(pipeline)->base;
3543 else if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
3544 return &get_tes_prog_data(pipeline)->base;
3545 else
3546 return &get_vs_prog_data(pipeline)->base;
3547 }
3548
3549 VkResult
3550 anv_pipeline_init(struct anv_pipeline *pipeline,
3551 struct anv_device *device,
3552 enum anv_pipeline_type type,
3553 VkPipelineCreateFlags flags,
3554 const VkAllocationCallbacks *pAllocator);
3555
3556 void
3557 anv_pipeline_finish(struct anv_pipeline *pipeline,
3558 struct anv_device *device,
3559 const VkAllocationCallbacks *pAllocator);
3560
3561 VkResult
3562 anv_graphics_pipeline_init(struct anv_graphics_pipeline *pipeline, struct anv_device *device,
3563 struct anv_pipeline_cache *cache,
3564 const VkGraphicsPipelineCreateInfo *pCreateInfo,
3565 const VkAllocationCallbacks *alloc);
3566
3567 VkResult
3568 anv_pipeline_compile_cs(struct anv_compute_pipeline *pipeline,
3569 struct anv_pipeline_cache *cache,
3570 const VkComputePipelineCreateInfo *info,
3571 const struct anv_shader_module *module,
3572 const char *entrypoint,
3573 const VkSpecializationInfo *spec_info);
3574
3575 struct anv_cs_parameters {
3576 uint32_t group_size;
3577 uint32_t simd_size;
3578 uint32_t threads;
3579 };
3580
3581 struct anv_cs_parameters
3582 anv_cs_parameters(const struct anv_compute_pipeline *pipeline);
3583
3584 struct anv_format_plane {
3585 enum isl_format isl_format:16;
3586 struct isl_swizzle swizzle;
3587
3588 /* Whether this plane contains chroma channels */
3589 bool has_chroma;
3590
3591 /* For downscaling of YUV planes */
3592 uint8_t denominator_scales[2];
3593
3594 /* How to map sampled ycbcr planes to a single 4 component element. */
3595 struct isl_swizzle ycbcr_swizzle;
3596
3597 /* What aspect is associated to this plane */
3598 VkImageAspectFlags aspect;
3599 };
3600
3601
3602 struct anv_format {
3603 struct anv_format_plane planes[3];
3604 VkFormat vk_format;
3605 uint8_t n_planes;
3606 bool can_ycbcr;
3607 };
3608
3609 /**
3610 * Return the aspect's _format_ plane, not its _memory_ plane (using the
3611 * vocabulary of VK_EXT_image_drm_format_modifier). As a consequence, \a
3612 * aspect_mask may contain VK_IMAGE_ASPECT_PLANE_*, but must not contain
3613 * VK_IMAGE_ASPECT_MEMORY_PLANE_* .
3614 */
3615 static inline uint32_t
3616 anv_image_aspect_to_plane(VkImageAspectFlags image_aspects,
3617 VkImageAspectFlags aspect_mask)
3618 {
3619 switch (aspect_mask) {
3620 case VK_IMAGE_ASPECT_COLOR_BIT:
3621 case VK_IMAGE_ASPECT_DEPTH_BIT:
3622 case VK_IMAGE_ASPECT_PLANE_0_BIT:
3623 return 0;
3624 case VK_IMAGE_ASPECT_STENCIL_BIT:
3625 if ((image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) == 0)
3626 return 0;
3627 /* Fall-through */
3628 case VK_IMAGE_ASPECT_PLANE_1_BIT:
3629 return 1;
3630 case VK_IMAGE_ASPECT_PLANE_2_BIT:
3631 return 2;
3632 default:
3633 /* Purposefully assert with depth/stencil aspects. */
3634 unreachable("invalid image aspect");
3635 }
3636 }
3637
3638 static inline VkImageAspectFlags
3639 anv_plane_to_aspect(VkImageAspectFlags image_aspects,
3640 uint32_t plane)
3641 {
3642 if (image_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
3643 if (util_bitcount(image_aspects) > 1)
3644 return VK_IMAGE_ASPECT_PLANE_0_BIT << plane;
3645 return VK_IMAGE_ASPECT_COLOR_BIT;
3646 }
3647 if (image_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
3648 return VK_IMAGE_ASPECT_DEPTH_BIT << plane;
3649 assert(image_aspects == VK_IMAGE_ASPECT_STENCIL_BIT);
3650 return VK_IMAGE_ASPECT_STENCIL_BIT;
3651 }
3652
3653 #define anv_foreach_image_aspect_bit(b, image, aspects) \
3654 for_each_bit(b, anv_image_expand_aspects(image, aspects))
3655
3656 const struct anv_format *
3657 anv_get_format(VkFormat format);
3658
3659 static inline uint32_t
3660 anv_get_format_planes(VkFormat vk_format)
3661 {
3662 const struct anv_format *format = anv_get_format(vk_format);
3663
3664 return format != NULL ? format->n_planes : 0;
3665 }
3666
3667 struct anv_format_plane
3668 anv_get_format_plane(const struct gen_device_info *devinfo, VkFormat vk_format,
3669 VkImageAspectFlagBits aspect, VkImageTiling tiling);
3670
3671 static inline enum isl_format
3672 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
3673 VkImageAspectFlags aspect, VkImageTiling tiling)
3674 {
3675 return anv_get_format_plane(devinfo, vk_format, aspect, tiling).isl_format;
3676 }
3677
3678 bool anv_formats_ccs_e_compatible(const struct gen_device_info *devinfo,
3679 VkImageCreateFlags create_flags,
3680 VkFormat vk_format,
3681 VkImageTiling vk_tiling,
3682 const VkImageFormatListCreateInfoKHR *fmt_list);
3683
3684 static inline struct isl_swizzle
3685 anv_swizzle_for_render(struct isl_swizzle swizzle)
3686 {
3687 /* Sometimes the swizzle will have alpha map to one. We do this to fake
3688 * RGB as RGBA for texturing
3689 */
3690 assert(swizzle.a == ISL_CHANNEL_SELECT_ONE ||
3691 swizzle.a == ISL_CHANNEL_SELECT_ALPHA);
3692
3693 /* But it doesn't matter what we render to that channel */
3694 swizzle.a = ISL_CHANNEL_SELECT_ALPHA;
3695
3696 return swizzle;
3697 }
3698
3699 void
3700 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
3701
3702 /**
3703 * Subsurface of an anv_image.
3704 */
3705 struct anv_surface {
3706 /** Valid only if isl_surf::size_B > 0. */
3707 struct isl_surf isl;
3708
3709 /**
3710 * Offset from VkImage's base address, as bound by vkBindImageMemory().
3711 */
3712 uint32_t offset;
3713 };
3714
3715 struct anv_image {
3716 struct vk_object_base base;
3717
3718 VkImageType type; /**< VkImageCreateInfo::imageType */
3719 /* The original VkFormat provided by the client. This may not match any
3720 * of the actual surface formats.
3721 */
3722 VkFormat vk_format;
3723 const struct anv_format *format;
3724
3725 VkImageAspectFlags aspects;
3726 VkExtent3D extent;
3727 uint32_t levels;
3728 uint32_t array_size;
3729 uint32_t samples; /**< VkImageCreateInfo::samples */
3730 uint32_t n_planes;
3731 VkImageUsageFlags usage; /**< VkImageCreateInfo::usage. */
3732 VkImageUsageFlags stencil_usage;
3733 VkImageCreateFlags create_flags; /* Flags used when creating image. */
3734 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
3735
3736 /** True if this is needs to be bound to an appropriately tiled BO.
3737 *
3738 * When not using modifiers, consumers such as X11, Wayland, and KMS need
3739 * the tiling passed via I915_GEM_SET_TILING. When exporting these buffers
3740 * we require a dedicated allocation so that we can know to allocate a
3741 * tiled buffer.
3742 */
3743 bool needs_set_tiling;
3744
3745 /**
3746 * Must be DRM_FORMAT_MOD_INVALID unless tiling is
3747 * VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.
3748 */
3749 uint64_t drm_format_mod;
3750
3751 VkDeviceSize size;
3752 uint32_t alignment;
3753
3754 /* Whether the image is made of several underlying buffer objects rather a
3755 * single one with different offsets.
3756 */
3757 bool disjoint;
3758
3759 /* Image was created with external format. */
3760 bool external_format;
3761
3762 /**
3763 * Image subsurfaces
3764 *
3765 * For each foo, anv_image::planes[x].surface is valid if and only if
3766 * anv_image::aspects has a x aspect. Refer to anv_image_aspect_to_plane()
3767 * to figure the number associated with a given aspect.
3768 *
3769 * The hardware requires that the depth buffer and stencil buffer be
3770 * separate surfaces. From Vulkan's perspective, though, depth and stencil
3771 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
3772 * allocate the depth and stencil buffers as separate surfaces in the same
3773 * bo.
3774 *
3775 * Memory layout :
3776 *
3777 * -----------------------
3778 * | surface0 | /|\
3779 * ----------------------- |
3780 * | shadow surface0 | |
3781 * ----------------------- | Plane 0
3782 * | aux surface0 | |
3783 * ----------------------- |
3784 * | fast clear colors0 | \|/
3785 * -----------------------
3786 * | surface1 | /|\
3787 * ----------------------- |
3788 * | shadow surface1 | |
3789 * ----------------------- | Plane 1
3790 * | aux surface1 | |
3791 * ----------------------- |
3792 * | fast clear colors1 | \|/
3793 * -----------------------
3794 * | ... |
3795 * | |
3796 * -----------------------
3797 */
3798 struct {
3799 /**
3800 * Offset of the entire plane (whenever the image is disjoint this is
3801 * set to 0).
3802 */
3803 uint32_t offset;
3804
3805 VkDeviceSize size;
3806 uint32_t alignment;
3807
3808 struct anv_surface surface;
3809
3810 /**
3811 * A surface which shadows the main surface and may have different
3812 * tiling. This is used for sampling using a tiling that isn't supported
3813 * for other operations.
3814 */
3815 struct anv_surface shadow_surface;
3816
3817 /**
3818 * The base aux usage for this image. For color images, this can be
3819 * either CCS_E or CCS_D depending on whether or not we can reliably
3820 * leave CCS on all the time.
3821 */
3822 enum isl_aux_usage aux_usage;
3823
3824 struct anv_surface aux_surface;
3825
3826 /**
3827 * Offset of the fast clear state (used to compute the
3828 * fast_clear_state_offset of the following planes).
3829 */
3830 uint32_t fast_clear_state_offset;
3831
3832 /**
3833 * BO associated with this plane, set when bound.
3834 */
3835 struct anv_address address;
3836
3837 /**
3838 * When destroying the image, also free the bo.
3839 * */
3840 bool bo_is_owned;
3841 } planes[3];
3842 };
3843
3844 /* The ordering of this enum is important */
3845 enum anv_fast_clear_type {
3846 /** Image does not have/support any fast-clear blocks */
3847 ANV_FAST_CLEAR_NONE = 0,
3848 /** Image has/supports fast-clear but only to the default value */
3849 ANV_FAST_CLEAR_DEFAULT_VALUE = 1,
3850 /** Image has/supports fast-clear with an arbitrary fast-clear value */
3851 ANV_FAST_CLEAR_ANY = 2,
3852 };
3853
3854 /* Returns the number of auxiliary buffer levels attached to an image. */
3855 static inline uint8_t
3856 anv_image_aux_levels(const struct anv_image * const image,
3857 VkImageAspectFlagBits aspect)
3858 {
3859 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3860 if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
3861 return 0;
3862
3863 /* The Gen12 CCS aux surface is represented with only one level. */
3864 return image->planes[plane].aux_surface.isl.tiling == ISL_TILING_GEN12_CCS ?
3865 image->planes[plane].surface.isl.levels :
3866 image->planes[plane].aux_surface.isl.levels;
3867 }
3868
3869 /* Returns the number of auxiliary buffer layers attached to an image. */
3870 static inline uint32_t
3871 anv_image_aux_layers(const struct anv_image * const image,
3872 VkImageAspectFlagBits aspect,
3873 const uint8_t miplevel)
3874 {
3875 assert(image);
3876
3877 /* The miplevel must exist in the main buffer. */
3878 assert(miplevel < image->levels);
3879
3880 if (miplevel >= anv_image_aux_levels(image, aspect)) {
3881 /* There are no layers with auxiliary data because the miplevel has no
3882 * auxiliary data.
3883 */
3884 return 0;
3885 } else {
3886 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3887
3888 /* The Gen12 CCS aux surface is represented with only one layer. */
3889 const struct isl_extent4d *aux_logical_level0_px =
3890 image->planes[plane].aux_surface.isl.tiling == ISL_TILING_GEN12_CCS ?
3891 &image->planes[plane].surface.isl.logical_level0_px :
3892 &image->planes[plane].aux_surface.isl.logical_level0_px;
3893
3894 return MAX2(aux_logical_level0_px->array_len,
3895 aux_logical_level0_px->depth >> miplevel);
3896 }
3897 }
3898
3899 static inline struct anv_address
3900 anv_image_get_clear_color_addr(UNUSED const struct anv_device *device,
3901 const struct anv_image *image,
3902 VkImageAspectFlagBits aspect)
3903 {
3904 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
3905
3906 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3907 return anv_address_add(image->planes[plane].address,
3908 image->planes[plane].fast_clear_state_offset);
3909 }
3910
3911 static inline struct anv_address
3912 anv_image_get_fast_clear_type_addr(const struct anv_device *device,
3913 const struct anv_image *image,
3914 VkImageAspectFlagBits aspect)
3915 {
3916 struct anv_address addr =
3917 anv_image_get_clear_color_addr(device, image, aspect);
3918
3919 const unsigned clear_color_state_size = device->info.gen >= 10 ?
3920 device->isl_dev.ss.clear_color_state_size :
3921 device->isl_dev.ss.clear_value_size;
3922 return anv_address_add(addr, clear_color_state_size);
3923 }
3924
3925 static inline struct anv_address
3926 anv_image_get_compression_state_addr(const struct anv_device *device,
3927 const struct anv_image *image,
3928 VkImageAspectFlagBits aspect,
3929 uint32_t level, uint32_t array_layer)
3930 {
3931 assert(level < anv_image_aux_levels(image, aspect));
3932 assert(array_layer < anv_image_aux_layers(image, aspect, level));
3933 UNUSED uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
3934 assert(image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E);
3935
3936 struct anv_address addr =
3937 anv_image_get_fast_clear_type_addr(device, image, aspect);
3938 addr.offset += 4; /* Go past the fast clear type */
3939
3940 if (image->type == VK_IMAGE_TYPE_3D) {
3941 for (uint32_t l = 0; l < level; l++)
3942 addr.offset += anv_minify(image->extent.depth, l) * 4;
3943 } else {
3944 addr.offset += level * image->array_size * 4;
3945 }
3946 addr.offset += array_layer * 4;
3947
3948 assert(addr.offset <
3949 image->planes[plane].address.offset + image->planes[plane].size);
3950 return addr;
3951 }
3952
3953 /* Returns true if a HiZ-enabled depth buffer can be sampled from. */
3954 static inline bool
3955 anv_can_sample_with_hiz(const struct gen_device_info * const devinfo,
3956 const struct anv_image *image)
3957 {
3958 if (!(image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
3959 return false;
3960
3961 /* For Gen8-11, there are some restrictions around sampling from HiZ.
3962 * The Skylake PRM docs for RENDER_SURFACE_STATE::AuxiliarySurfaceMode
3963 * say:
3964 *
3965 * "If this field is set to AUX_HIZ, Number of Multisamples must
3966 * be MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D."
3967 */
3968 if (image->type == VK_IMAGE_TYPE_3D)
3969 return false;
3970
3971 /* Allow this feature on BDW even though it is disabled in the BDW devinfo
3972 * struct. There's documentation which suggests that this feature actually
3973 * reduces performance on BDW, but it has only been observed to help so
3974 * far. Sampling fast-cleared blocks on BDW must also be handled with care
3975 * (see depth_stencil_attachment_compute_aux_usage() for more info).
3976 */
3977 if (devinfo->gen != 8 && !devinfo->has_sample_with_hiz)
3978 return false;
3979
3980 return image->samples == 1;
3981 }
3982
3983 static inline bool
3984 anv_image_plane_uses_aux_map(const struct anv_device *device,
3985 const struct anv_image *image,
3986 uint32_t plane)
3987 {
3988 return device->info.has_aux_map &&
3989 isl_aux_usage_has_ccs(image->planes[plane].aux_usage);
3990 }
3991
3992 void
3993 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
3994 const struct anv_image *image,
3995 VkImageAspectFlagBits aspect,
3996 enum isl_aux_usage aux_usage,
3997 uint32_t level,
3998 uint32_t base_layer,
3999 uint32_t layer_count);
4000
4001 void
4002 anv_image_clear_color(struct anv_cmd_buffer *cmd_buffer,
4003 const struct anv_image *image,
4004 VkImageAspectFlagBits aspect,
4005 enum isl_aux_usage aux_usage,
4006 enum isl_format format, struct isl_swizzle swizzle,
4007 uint32_t level, uint32_t base_layer, uint32_t layer_count,
4008 VkRect2D area, union isl_color_value clear_color);
4009 void
4010 anv_image_clear_depth_stencil(struct anv_cmd_buffer *cmd_buffer,
4011 const struct anv_image *image,
4012 VkImageAspectFlags aspects,
4013 enum isl_aux_usage depth_aux_usage,
4014 uint32_t level,
4015 uint32_t base_layer, uint32_t layer_count,
4016 VkRect2D area,
4017 float depth_value, uint8_t stencil_value);
4018 void
4019 anv_image_msaa_resolve(struct anv_cmd_buffer *cmd_buffer,
4020 const struct anv_image *src_image,
4021 enum isl_aux_usage src_aux_usage,
4022 uint32_t src_level, uint32_t src_base_layer,
4023 const struct anv_image *dst_image,
4024 enum isl_aux_usage dst_aux_usage,
4025 uint32_t dst_level, uint32_t dst_base_layer,
4026 VkImageAspectFlagBits aspect,
4027 uint32_t src_x, uint32_t src_y,
4028 uint32_t dst_x, uint32_t dst_y,
4029 uint32_t width, uint32_t height,
4030 uint32_t layer_count,
4031 enum blorp_filter filter);
4032 void
4033 anv_image_hiz_op(struct anv_cmd_buffer *cmd_buffer,
4034 const struct anv_image *image,
4035 VkImageAspectFlagBits aspect, uint32_t level,
4036 uint32_t base_layer, uint32_t layer_count,
4037 enum isl_aux_op hiz_op);
4038 void
4039 anv_image_hiz_clear(struct anv_cmd_buffer *cmd_buffer,
4040 const struct anv_image *image,
4041 VkImageAspectFlags aspects,
4042 uint32_t level,
4043 uint32_t base_layer, uint32_t layer_count,
4044 VkRect2D area, uint8_t stencil_value);
4045 void
4046 anv_image_mcs_op(struct anv_cmd_buffer *cmd_buffer,
4047 const struct anv_image *image,
4048 enum isl_format format, struct isl_swizzle swizzle,
4049 VkImageAspectFlagBits aspect,
4050 uint32_t base_layer, uint32_t layer_count,
4051 enum isl_aux_op mcs_op, union isl_color_value *clear_value,
4052 bool predicate);
4053 void
4054 anv_image_ccs_op(struct anv_cmd_buffer *cmd_buffer,
4055 const struct anv_image *image,
4056 enum isl_format format, struct isl_swizzle swizzle,
4057 VkImageAspectFlagBits aspect, uint32_t level,
4058 uint32_t base_layer, uint32_t layer_count,
4059 enum isl_aux_op ccs_op, union isl_color_value *clear_value,
4060 bool predicate);
4061
4062 void
4063 anv_image_copy_to_shadow(struct anv_cmd_buffer *cmd_buffer,
4064 const struct anv_image *image,
4065 VkImageAspectFlagBits aspect,
4066 uint32_t base_level, uint32_t level_count,
4067 uint32_t base_layer, uint32_t layer_count);
4068
4069 enum isl_aux_state
4070 anv_layout_to_aux_state(const struct gen_device_info * const devinfo,
4071 const struct anv_image *image,
4072 const VkImageAspectFlagBits aspect,
4073 const VkImageLayout layout);
4074
4075 enum isl_aux_usage
4076 anv_layout_to_aux_usage(const struct gen_device_info * const devinfo,
4077 const struct anv_image *image,
4078 const VkImageAspectFlagBits aspect,
4079 const VkImageUsageFlagBits usage,
4080 const VkImageLayout layout);
4081
4082 enum anv_fast_clear_type
4083 anv_layout_to_fast_clear_type(const struct gen_device_info * const devinfo,
4084 const struct anv_image * const image,
4085 const VkImageAspectFlagBits aspect,
4086 const VkImageLayout layout);
4087
4088 /* This is defined as a macro so that it works for both
4089 * VkImageSubresourceRange and VkImageSubresourceLayers
4090 */
4091 #define anv_get_layerCount(_image, _range) \
4092 ((_range)->layerCount == VK_REMAINING_ARRAY_LAYERS ? \
4093 (_image)->array_size - (_range)->baseArrayLayer : (_range)->layerCount)
4094
4095 static inline uint32_t
4096 anv_get_levelCount(const struct anv_image *image,
4097 const VkImageSubresourceRange *range)
4098 {
4099 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
4100 image->levels - range->baseMipLevel : range->levelCount;
4101 }
4102
4103 static inline VkImageAspectFlags
4104 anv_image_expand_aspects(const struct anv_image *image,
4105 VkImageAspectFlags aspects)
4106 {
4107 /* If the underlying image has color plane aspects and
4108 * VK_IMAGE_ASPECT_COLOR_BIT has been requested, then return the aspects of
4109 * the underlying image. */
4110 if ((image->aspects & VK_IMAGE_ASPECT_PLANES_BITS_ANV) != 0 &&
4111 aspects == VK_IMAGE_ASPECT_COLOR_BIT)
4112 return image->aspects;
4113
4114 return aspects;
4115 }
4116
4117 static inline bool
4118 anv_image_aspects_compatible(VkImageAspectFlags aspects1,
4119 VkImageAspectFlags aspects2)
4120 {
4121 if (aspects1 == aspects2)
4122 return true;
4123
4124 /* Only 1 color aspects are compatibles. */
4125 if ((aspects1 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
4126 (aspects2 & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) != 0 &&
4127 util_bitcount(aspects1) == util_bitcount(aspects2))
4128 return true;
4129
4130 return false;
4131 }
4132
4133 struct anv_image_view {
4134 struct vk_object_base base;
4135
4136 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
4137
4138 VkImageAspectFlags aspect_mask;
4139 VkFormat vk_format;
4140 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
4141
4142 unsigned n_planes;
4143 struct {
4144 uint32_t image_plane;
4145
4146 struct isl_view isl;
4147
4148 /**
4149 * RENDER_SURFACE_STATE when using image as a sampler surface with an
4150 * image layout of SHADER_READ_ONLY_OPTIMAL or
4151 * DEPTH_STENCIL_READ_ONLY_OPTIMAL.
4152 */
4153 struct anv_surface_state optimal_sampler_surface_state;
4154
4155 /**
4156 * RENDER_SURFACE_STATE when using image as a sampler surface with an
4157 * image layout of GENERAL.
4158 */
4159 struct anv_surface_state general_sampler_surface_state;
4160
4161 /**
4162 * RENDER_SURFACE_STATE when using image as a storage image. Separate
4163 * states for write-only and readable, using the real format for
4164 * write-only and the lowered format for readable.
4165 */
4166 struct anv_surface_state storage_surface_state;
4167 struct anv_surface_state writeonly_storage_surface_state;
4168
4169 struct brw_image_param storage_image_param;
4170 } planes[3];
4171 };
4172
4173 enum anv_image_view_state_flags {
4174 ANV_IMAGE_VIEW_STATE_STORAGE_WRITE_ONLY = (1 << 0),
4175 ANV_IMAGE_VIEW_STATE_TEXTURE_OPTIMAL = (1 << 1),
4176 };
4177
4178 void anv_image_fill_surface_state(struct anv_device *device,
4179 const struct anv_image *image,
4180 VkImageAspectFlagBits aspect,
4181 const struct isl_view *view,
4182 isl_surf_usage_flags_t view_usage,
4183 enum isl_aux_usage aux_usage,
4184 const union isl_color_value *clear_color,
4185 enum anv_image_view_state_flags flags,
4186 struct anv_surface_state *state_inout,
4187 struct brw_image_param *image_param_out);
4188
4189 struct anv_image_create_info {
4190 const VkImageCreateInfo *vk_info;
4191
4192 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
4193 isl_tiling_flags_t isl_tiling_flags;
4194
4195 /** These flags will be added to any derived from VkImageCreateInfo. */
4196 isl_surf_usage_flags_t isl_extra_usage_flags;
4197
4198 uint32_t stride;
4199 bool external_format;
4200 };
4201
4202 VkResult anv_image_create(VkDevice _device,
4203 const struct anv_image_create_info *info,
4204 const VkAllocationCallbacks* alloc,
4205 VkImage *pImage);
4206
4207 enum isl_format
4208 anv_isl_format_for_descriptor_type(VkDescriptorType type);
4209
4210 static inline VkExtent3D
4211 anv_sanitize_image_extent(const VkImageType imageType,
4212 const VkExtent3D imageExtent)
4213 {
4214 switch (imageType) {
4215 case VK_IMAGE_TYPE_1D:
4216 return (VkExtent3D) { imageExtent.width, 1, 1 };
4217 case VK_IMAGE_TYPE_2D:
4218 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
4219 case VK_IMAGE_TYPE_3D:
4220 return imageExtent;
4221 default:
4222 unreachable("invalid image type");
4223 }
4224 }
4225
4226 static inline VkOffset3D
4227 anv_sanitize_image_offset(const VkImageType imageType,
4228 const VkOffset3D imageOffset)
4229 {
4230 switch (imageType) {
4231 case VK_IMAGE_TYPE_1D:
4232 return (VkOffset3D) { imageOffset.x, 0, 0 };
4233 case VK_IMAGE_TYPE_2D:
4234 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
4235 case VK_IMAGE_TYPE_3D:
4236 return imageOffset;
4237 default:
4238 unreachable("invalid image type");
4239 }
4240 }
4241
4242 VkFormatFeatureFlags
4243 anv_get_image_format_features(const struct gen_device_info *devinfo,
4244 VkFormat vk_format,
4245 const struct anv_format *anv_format,
4246 VkImageTiling vk_tiling);
4247
4248 void anv_fill_buffer_surface_state(struct anv_device *device,
4249 struct anv_state state,
4250 enum isl_format format,
4251 struct anv_address address,
4252 uint32_t range, uint32_t stride);
4253
4254 static inline void
4255 anv_clear_color_from_att_state(union isl_color_value *clear_color,
4256 const struct anv_attachment_state *att_state,
4257 const struct anv_image_view *iview)
4258 {
4259 const struct isl_format_layout *view_fmtl =
4260 isl_format_get_layout(iview->planes[0].isl.format);
4261
4262 #define COPY_CLEAR_COLOR_CHANNEL(c, i) \
4263 if (view_fmtl->channels.c.bits) \
4264 clear_color->u32[i] = att_state->clear_value.color.uint32[i]
4265
4266 COPY_CLEAR_COLOR_CHANNEL(r, 0);
4267 COPY_CLEAR_COLOR_CHANNEL(g, 1);
4268 COPY_CLEAR_COLOR_CHANNEL(b, 2);
4269 COPY_CLEAR_COLOR_CHANNEL(a, 3);
4270
4271 #undef COPY_CLEAR_COLOR_CHANNEL
4272 }
4273
4274
4275 /* Haswell border color is a bit of a disaster. Float and unorm formats use a
4276 * straightforward 32-bit float color in the first 64 bytes. Instead of using
4277 * a nice float/integer union like Gen8+, Haswell specifies the integer border
4278 * color as a separate entry /after/ the float color. The layout of this entry
4279 * also depends on the format's bpp (with extra hacks for RG32), and overlaps.
4280 *
4281 * Since we don't know the format/bpp, we can't make any of the border colors
4282 * containing '1' work for all formats, as it would be in the wrong place for
4283 * some of them. We opt to make 32-bit integers work as this seems like the
4284 * most common option. Fortunately, transparent black works regardless, as
4285 * all zeroes is the same in every bit-size.
4286 */
4287 struct hsw_border_color {
4288 float float32[4];
4289 uint32_t _pad0[12];
4290 uint32_t uint32[4];
4291 uint32_t _pad1[108];
4292 };
4293
4294 struct gen8_border_color {
4295 union {
4296 float float32[4];
4297 uint32_t uint32[4];
4298 };
4299 /* Pad out to 64 bytes */
4300 uint32_t _pad[12];
4301 };
4302
4303 struct anv_ycbcr_conversion {
4304 struct vk_object_base base;
4305
4306 const struct anv_format * format;
4307 VkSamplerYcbcrModelConversion ycbcr_model;
4308 VkSamplerYcbcrRange ycbcr_range;
4309 VkComponentSwizzle mapping[4];
4310 VkChromaLocation chroma_offsets[2];
4311 VkFilter chroma_filter;
4312 bool chroma_reconstruction;
4313 };
4314
4315 struct anv_sampler {
4316 struct vk_object_base base;
4317
4318 uint32_t state[3][4];
4319 uint32_t n_planes;
4320 struct anv_ycbcr_conversion *conversion;
4321
4322 /* Blob of sampler state data which is guaranteed to be 32-byte aligned
4323 * and with a 32-byte stride for use as bindless samplers.
4324 */
4325 struct anv_state bindless_state;
4326
4327 struct anv_state custom_border_color;
4328 };
4329
4330 struct anv_framebuffer {
4331 struct vk_object_base base;
4332
4333 uint32_t width;
4334 uint32_t height;
4335 uint32_t layers;
4336
4337 uint32_t attachment_count;
4338 struct anv_image_view * attachments[0];
4339 };
4340
4341 struct anv_subpass_attachment {
4342 VkImageUsageFlagBits usage;
4343 uint32_t attachment;
4344 VkImageLayout layout;
4345
4346 /* Used only with attachment containing stencil data. */
4347 VkImageLayout stencil_layout;
4348 };
4349
4350 struct anv_subpass {
4351 uint32_t attachment_count;
4352
4353 /**
4354 * A pointer to all attachment references used in this subpass.
4355 * Only valid if ::attachment_count > 0.
4356 */
4357 struct anv_subpass_attachment * attachments;
4358 uint32_t input_count;
4359 struct anv_subpass_attachment * input_attachments;
4360 uint32_t color_count;
4361 struct anv_subpass_attachment * color_attachments;
4362 struct anv_subpass_attachment * resolve_attachments;
4363
4364 struct anv_subpass_attachment * depth_stencil_attachment;
4365 struct anv_subpass_attachment * ds_resolve_attachment;
4366 VkResolveModeFlagBitsKHR depth_resolve_mode;
4367 VkResolveModeFlagBitsKHR stencil_resolve_mode;
4368
4369 uint32_t view_mask;
4370
4371 /** Subpass has a depth/stencil self-dependency */
4372 bool has_ds_self_dep;
4373
4374 /** Subpass has at least one color resolve attachment */
4375 bool has_color_resolve;
4376 };
4377
4378 static inline unsigned
4379 anv_subpass_view_count(const struct anv_subpass *subpass)
4380 {
4381 return MAX2(1, util_bitcount(subpass->view_mask));
4382 }
4383
4384 struct anv_render_pass_attachment {
4385 /* TODO: Consider using VkAttachmentDescription instead of storing each of
4386 * its members individually.
4387 */
4388 VkFormat format;
4389 uint32_t samples;
4390 VkImageUsageFlags usage;
4391 VkAttachmentLoadOp load_op;
4392 VkAttachmentStoreOp store_op;
4393 VkAttachmentLoadOp stencil_load_op;
4394 VkImageLayout initial_layout;
4395 VkImageLayout final_layout;
4396 VkImageLayout first_subpass_layout;
4397
4398 VkImageLayout stencil_initial_layout;
4399 VkImageLayout stencil_final_layout;
4400
4401 /* The subpass id in which the attachment will be used last. */
4402 uint32_t last_subpass_idx;
4403 };
4404
4405 struct anv_render_pass {
4406 struct vk_object_base base;
4407
4408 uint32_t attachment_count;
4409 uint32_t subpass_count;
4410 /* An array of subpass_count+1 flushes, one per subpass boundary */
4411 enum anv_pipe_bits * subpass_flushes;
4412 struct anv_render_pass_attachment * attachments;
4413 struct anv_subpass subpasses[0];
4414 };
4415
4416 #define ANV_PIPELINE_STATISTICS_MASK 0x000007ff
4417
4418 #define OA_SNAPSHOT_SIZE (256)
4419 #define ANV_KHR_PERF_QUERY_SIZE (ALIGN(sizeof(uint64_t), 64) + 2 * OA_SNAPSHOT_SIZE)
4420
4421 struct anv_query_pool {
4422 struct vk_object_base base;
4423
4424 VkQueryType type;
4425 VkQueryPipelineStatisticFlags pipeline_statistics;
4426 /** Stride between slots, in bytes */
4427 uint32_t stride;
4428 /** Number of slots in this query pool */
4429 uint32_t slots;
4430 struct anv_bo * bo;
4431
4432 /* Perf queries : */
4433 struct anv_bo reset_bo;
4434 uint32_t n_counters;
4435 struct gen_perf_counter_pass *counter_pass;
4436 uint32_t n_passes;
4437 struct gen_perf_query_info **pass_query;
4438 };
4439
4440 static inline uint32_t khr_perf_query_preamble_offset(struct anv_query_pool *pool,
4441 uint32_t pass)
4442 {
4443 return pass * ANV_KHR_PERF_QUERY_SIZE + 8;
4444 }
4445
4446 int anv_get_instance_entrypoint_index(const char *name);
4447 int anv_get_device_entrypoint_index(const char *name);
4448 int anv_get_physical_device_entrypoint_index(const char *name);
4449
4450 const char *anv_get_instance_entry_name(int index);
4451 const char *anv_get_physical_device_entry_name(int index);
4452 const char *anv_get_device_entry_name(int index);
4453
4454 bool
4455 anv_instance_entrypoint_is_enabled(int index, uint32_t core_version,
4456 const struct anv_instance_extension_table *instance);
4457 bool
4458 anv_physical_device_entrypoint_is_enabled(int index, uint32_t core_version,
4459 const struct anv_instance_extension_table *instance);
4460 bool
4461 anv_device_entrypoint_is_enabled(int index, uint32_t core_version,
4462 const struct anv_instance_extension_table *instance,
4463 const struct anv_device_extension_table *device);
4464
4465 void *anv_resolve_device_entrypoint(const struct gen_device_info *devinfo,
4466 uint32_t index);
4467 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
4468 const char *name);
4469
4470 void anv_dump_image_to_ppm(struct anv_device *device,
4471 struct anv_image *image, unsigned miplevel,
4472 unsigned array_layer, VkImageAspectFlagBits aspect,
4473 const char *filename);
4474
4475 enum anv_dump_action {
4476 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
4477 };
4478
4479 #ifdef DEBUG
4480 PUBLIC
4481 #endif
4482 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
4483 #ifdef DEBUG
4484 PUBLIC
4485 #endif
4486 void anv_dump_finish(void);
4487
4488 void anv_dump_add_attachments(struct anv_cmd_buffer *cmd_buffer);
4489
4490 static inline uint32_t
4491 anv_get_subpass_id(const struct anv_cmd_state * const cmd_state)
4492 {
4493 /* This function must be called from within a subpass. */
4494 assert(cmd_state->pass && cmd_state->subpass);
4495
4496 const uint32_t subpass_id = cmd_state->subpass - cmd_state->pass->subpasses;
4497
4498 /* The id of this subpass shouldn't exceed the number of subpasses in this
4499 * render pass minus 1.
4500 */
4501 assert(subpass_id < cmd_state->pass->subpass_count);
4502 return subpass_id;
4503 }
4504
4505 struct anv_performance_configuration_intel {
4506 struct vk_object_base base;
4507
4508 struct gen_perf_registers *register_config;
4509
4510 uint64_t config_id;
4511 };
4512
4513 struct gen_perf_config *anv_get_perf(const struct gen_device_info *devinfo, int fd);
4514 void anv_device_perf_init(struct anv_device *device);
4515 void anv_perf_write_pass_results(struct gen_perf_config *perf,
4516 struct anv_query_pool *pool, uint32_t pass,
4517 const struct gen_perf_query_result *accumulated_results,
4518 union VkPerformanceCounterResultKHR *results);
4519
4520 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
4521 VK_FROM_HANDLE(__anv_type, __name, __handle)
4522
4523 VK_DEFINE_HANDLE_CASTS(anv_cmd_buffer, base, VkCommandBuffer,
4524 VK_OBJECT_TYPE_COMMAND_BUFFER)
4525 VK_DEFINE_HANDLE_CASTS(anv_device, vk.base, VkDevice, VK_OBJECT_TYPE_DEVICE)
4526 VK_DEFINE_HANDLE_CASTS(anv_instance, base, VkInstance, VK_OBJECT_TYPE_INSTANCE)
4527 VK_DEFINE_HANDLE_CASTS(anv_physical_device, base, VkPhysicalDevice,
4528 VK_OBJECT_TYPE_PHYSICAL_DEVICE)
4529 VK_DEFINE_HANDLE_CASTS(anv_queue, base, VkQueue, VK_OBJECT_TYPE_QUEUE)
4530
4531 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, base, VkCommandPool,
4532 VK_OBJECT_TYPE_COMMAND_POOL)
4533 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, base, VkBuffer,
4534 VK_OBJECT_TYPE_BUFFER)
4535 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, base, VkBufferView,
4536 VK_OBJECT_TYPE_BUFFER_VIEW)
4537 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, base, VkDescriptorPool,
4538 VK_OBJECT_TYPE_DESCRIPTOR_POOL)
4539 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, base, VkDescriptorSet,
4540 VK_OBJECT_TYPE_DESCRIPTOR_SET)
4541 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, base,
4542 VkDescriptorSetLayout,
4543 VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT)
4544 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_update_template, base,
4545 VkDescriptorUpdateTemplate,
4546 VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE)
4547 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, base, VkDeviceMemory,
4548 VK_OBJECT_TYPE_DEVICE_MEMORY)
4549 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, base, VkFence, VK_OBJECT_TYPE_FENCE)
4550 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_event, base, VkEvent, VK_OBJECT_TYPE_EVENT)
4551 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, base, VkFramebuffer,
4552 VK_OBJECT_TYPE_FRAMEBUFFER)
4553 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image, base, VkImage, VK_OBJECT_TYPE_IMAGE)
4554 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, base, VkImageView,
4555 VK_OBJECT_TYPE_IMAGE_VIEW);
4556 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, base, VkPipelineCache,
4557 VK_OBJECT_TYPE_PIPELINE_CACHE)
4558 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, base, VkPipeline,
4559 VK_OBJECT_TYPE_PIPELINE)
4560 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, base, VkPipelineLayout,
4561 VK_OBJECT_TYPE_PIPELINE_LAYOUT)
4562 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, base, VkQueryPool,
4563 VK_OBJECT_TYPE_QUERY_POOL)
4564 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, base, VkRenderPass,
4565 VK_OBJECT_TYPE_RENDER_PASS)
4566 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, base, VkSampler,
4567 VK_OBJECT_TYPE_SAMPLER)
4568 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_semaphore, base, VkSemaphore,
4569 VK_OBJECT_TYPE_SEMAPHORE)
4570 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, base, VkShaderModule,
4571 VK_OBJECT_TYPE_SHADER_MODULE)
4572 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_ycbcr_conversion, base,
4573 VkSamplerYcbcrConversion,
4574 VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION)
4575 VK_DEFINE_NONDISP_HANDLE_CASTS(anv_performance_configuration_intel, base,
4576 VkPerformanceConfigurationINTEL,
4577 VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL)
4578
4579 /* Gen-specific function declarations */
4580 #ifdef genX
4581 # include "anv_genX.h"
4582 #else
4583 # define genX(x) gen7_##x
4584 # include "anv_genX.h"
4585 # undef genX
4586 # define genX(x) gen75_##x
4587 # include "anv_genX.h"
4588 # undef genX
4589 # define genX(x) gen8_##x
4590 # include "anv_genX.h"
4591 # undef genX
4592 # define genX(x) gen9_##x
4593 # include "anv_genX.h"
4594 # undef genX
4595 # define genX(x) gen10_##x
4596 # include "anv_genX.h"
4597 # undef genX
4598 # define genX(x) gen11_##x
4599 # include "anv_genX.h"
4600 # undef genX
4601 # define genX(x) gen12_##x
4602 # include "anv_genX.h"
4603 # undef genX
4604 #endif
4605
4606 #endif /* ANV_PRIVATE_H */