vk/formats: Document new meaning of anv_format::cpp
[mesa.git] / src / vulkan / private.h
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #pragma once
25
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <stdbool.h>
29 #include <pthread.h>
30 #include <assert.h>
31 #include <i915_drm.h>
32
33 #ifdef HAVE_VALGRIND
34 #include <valgrind.h>
35 #include <memcheck.h>
36 #define VG(x) x
37 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
38 #else
39 #define VG(x)
40 #endif
41
42 #include "brw_device_info.h"
43 #include "util/macros.h"
44
45 #define VK_PROTOTYPES
46 #include <vulkan/vulkan.h>
47 #include <vulkan/vulkan_intel.h>
48 #include <vulkan/vk_wsi_lunarg.h>
49
50 #include "entrypoints.h"
51
52 #include "brw_context.h"
53
54 #ifdef __cplusplus
55 extern "C" {
56 #endif
57
58 #define anv_noreturn __attribute__((__noreturn__))
59 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
60
61 #define MIN(a, b) ((a) < (b) ? (a) : (b))
62 #define MAX(a, b) ((a) > (b) ? (a) : (b))
63
64 static inline uint32_t
65 align_u32(uint32_t v, uint32_t a)
66 {
67 return (v + a - 1) & ~(a - 1);
68 }
69
70 static inline int32_t
71 align_i32(int32_t v, int32_t a)
72 {
73 return (v + a - 1) & ~(a - 1);
74 }
75
76 /** Alignment must be a power of 2. */
77 static inline bool
78 anv_is_aligned(uintmax_t n, uintmax_t a)
79 {
80 assert(a == (a & -a));
81 return (n & (a - 1)) == 0;
82 }
83
84 static inline uint32_t
85 anv_minify(uint32_t n, uint32_t levels)
86 {
87 if (unlikely(n == 0))
88 return 0;
89 else
90 return MAX(n >> levels, 1);
91 }
92
93 #define for_each_bit(b, dword) \
94 for (uint32_t __dword = (dword); \
95 (b) = __builtin_ffs(__dword) - 1, __dword; \
96 __dword &= ~(1 << (b)))
97
98 /* Define no kernel as 1, since that's an illegal offset for a kernel */
99 #define NO_KERNEL 1
100
101 struct anv_common {
102 VkStructureType sType;
103 const void* pNext;
104 };
105
106 /* Whenever we generate an error, pass it through this function. Useful for
107 * debugging, where we can break on it. Only call at error site, not when
108 * propagating errors. Might be useful to plug in a stack trace here.
109 */
110
111 static inline VkResult
112 vk_error(VkResult error)
113 {
114 #ifdef DEBUG
115 fprintf(stderr, "vk_error: %x\n", error);
116 #endif
117
118 return error;
119 }
120
121 void __anv_finishme(const char *file, int line, const char *format, ...)
122 anv_printflike(3, 4);
123 void anv_loge(const char *format, ...) anv_printflike(1, 2);
124 void anv_loge_v(const char *format, va_list va);
125
126 /**
127 * Print a FINISHME message, including its source location.
128 */
129 #define anv_finishme(format, ...) \
130 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
131
132 /* A non-fatal assert. Useful for debugging. */
133 #ifdef DEBUG
134 #define anv_assert(x) ({ \
135 if (unlikely(!(x))) \
136 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
137 })
138 #else
139 #define anv_assert(x)
140 #endif
141
142 void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
143 void anv_abortfv(const char *format, va_list va) anv_noreturn;
144
145 #define stub_return(v) \
146 do { \
147 anv_finishme("stub %s", __func__); \
148 return (v); \
149 } while (0)
150
151 #define stub(v) \
152 do { \
153 anv_finishme("stub %s", __func__); \
154 return; \
155 } while (0)
156
157 /**
158 * A dynamically growable, circular buffer. Elements are added at head and
159 * removed from tail. head and tail are free-running uint32_t indices and we
160 * only compute the modulo with size when accessing the array. This way,
161 * number of bytes in the queue is always head - tail, even in case of
162 * wraparound.
163 */
164
165 struct anv_vector {
166 uint32_t head;
167 uint32_t tail;
168 uint32_t element_size;
169 uint32_t size;
170 void *data;
171 };
172
173 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
174 void *anv_vector_add(struct anv_vector *queue);
175 void *anv_vector_remove(struct anv_vector *queue);
176
177 static inline int
178 anv_vector_length(struct anv_vector *queue)
179 {
180 return (queue->head - queue->tail) / queue->element_size;
181 }
182
183 static inline void
184 anv_vector_finish(struct anv_vector *queue)
185 {
186 free(queue->data);
187 }
188
189 #define anv_vector_foreach(elem, queue) \
190 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
191 for (uint32_t __anv_vector_offset = (queue)->tail; \
192 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
193 __anv_vector_offset += (queue)->element_size)
194
195 struct anv_bo {
196 int gem_handle;
197 uint32_t index;
198 uint64_t offset;
199 uint64_t size;
200
201 /* This field is here for the benefit of the aub dumper. It can (and for
202 * userptr bos it must) be set to the cpu map of the buffer. Destroying
203 * the bo won't clean up the mmap, it's still the responsibility of the bo
204 * user to do that. */
205 void *map;
206 };
207
208 /* Represents a lock-free linked list of "free" things. This is used by
209 * both the block pool and the state pools. Unfortunately, in order to
210 * solve the ABA problem, we can't use a single uint32_t head.
211 */
212 union anv_free_list {
213 struct {
214 uint32_t offset;
215
216 /* A simple count that is incremented every time the head changes. */
217 uint32_t count;
218 };
219 uint64_t u64;
220 };
221
222 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
223
224 struct anv_block_pool {
225 struct anv_device *device;
226
227 struct anv_bo bo;
228 void *map;
229 int fd;
230 uint32_t size;
231
232 /**
233 * Array of mmaps and gem handles owned by the block pool, reclaimed when
234 * the block pool is destroyed.
235 */
236 struct anv_vector mmap_cleanups;
237
238 uint32_t block_size;
239
240 uint32_t next_block;
241 union anv_free_list free_list;
242 };
243
244 struct anv_block_state {
245 union {
246 struct {
247 uint32_t next;
248 uint32_t end;
249 };
250 uint64_t u64;
251 };
252 };
253
254 struct anv_state {
255 uint32_t offset;
256 uint32_t alloc_size;
257 void *map;
258 };
259
260 struct anv_fixed_size_state_pool {
261 size_t state_size;
262 union anv_free_list free_list;
263 struct anv_block_state block;
264 };
265
266 #define ANV_MIN_STATE_SIZE_LOG2 6
267 #define ANV_MAX_STATE_SIZE_LOG2 10
268
269 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
270
271 struct anv_state_pool {
272 struct anv_block_pool *block_pool;
273 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
274 };
275
276 struct anv_state_stream {
277 struct anv_block_pool *block_pool;
278 uint32_t next;
279 uint32_t current_block;
280 uint32_t end;
281 };
282
283 void anv_block_pool_init(struct anv_block_pool *pool,
284 struct anv_device *device, uint32_t block_size);
285 void anv_block_pool_finish(struct anv_block_pool *pool);
286 uint32_t anv_block_pool_alloc(struct anv_block_pool *pool);
287 void anv_block_pool_free(struct anv_block_pool *pool, uint32_t offset);
288 void anv_state_pool_init(struct anv_state_pool *pool,
289 struct anv_block_pool *block_pool);
290 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
291 size_t state_size, size_t alignment);
292 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
293 void anv_state_stream_init(struct anv_state_stream *stream,
294 struct anv_block_pool *block_pool);
295 void anv_state_stream_finish(struct anv_state_stream *stream);
296 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
297 uint32_t size, uint32_t alignment);
298
299 /**
300 * Implements a pool of re-usable BOs. The interface is identical to that
301 * of block_pool except that each block is its own BO.
302 */
303 struct anv_bo_pool {
304 struct anv_device *device;
305
306 uint32_t bo_size;
307
308 void *free_list;
309 };
310
311 void anv_bo_pool_init(struct anv_bo_pool *pool,
312 struct anv_device *device, uint32_t block_size);
313 void anv_bo_pool_finish(struct anv_bo_pool *pool);
314 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
315 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
316
317 struct anv_object;
318 struct anv_device;
319
320 typedef void (*anv_object_destructor_cb)(struct anv_device *,
321 struct anv_object *,
322 VkObjectType);
323
324 struct anv_object {
325 anv_object_destructor_cb destructor;
326 };
327
328 struct anv_physical_device {
329 struct anv_instance * instance;
330 uint32_t chipset_id;
331 bool no_hw;
332 const char * path;
333 const char * name;
334 const struct brw_device_info * info;
335 };
336
337 struct anv_instance {
338 void * pAllocUserData;
339 PFN_vkAllocFunction pfnAlloc;
340 PFN_vkFreeFunction pfnFree;
341 uint32_t apiVersion;
342 uint32_t physicalDeviceCount;
343 struct anv_physical_device physicalDevice;
344 };
345
346 struct anv_meta_state {
347 struct {
348 VkPipeline pipeline;
349 } clear;
350
351 struct {
352 VkPipeline pipeline;
353 VkPipelineLayout pipeline_layout;
354 VkDescriptorSetLayout ds_layout;
355 } blit;
356
357 struct {
358 VkDynamicRsState rs_state;
359 VkDynamicCbState cb_state;
360 VkDynamicDsState ds_state;
361 } shared;
362 };
363
364 struct anv_queue {
365 struct anv_device * device;
366
367 struct anv_state_pool * pool;
368
369 /**
370 * Serial number of the most recently completed batch executed on the
371 * engine.
372 */
373 struct anv_state completed_serial;
374
375 /**
376 * The next batch submitted to the engine will be assigned this serial
377 * number.
378 */
379 uint32_t next_serial;
380
381 uint32_t last_collected_serial;
382 };
383
384 struct anv_device {
385 struct anv_instance * instance;
386 uint32_t chipset_id;
387 struct brw_device_info info;
388 int context_id;
389 int fd;
390 bool no_hw;
391 bool dump_aub;
392
393 struct anv_bo_pool batch_bo_pool;
394
395 struct anv_block_pool dynamic_state_block_pool;
396 struct anv_state_pool dynamic_state_pool;
397
398 struct anv_block_pool instruction_block_pool;
399 struct anv_block_pool surface_state_block_pool;
400 struct anv_state_pool surface_state_pool;
401
402 struct anv_meta_state meta_state;
403
404 struct anv_state float_border_colors;
405 struct anv_state uint32_border_colors;
406
407 struct anv_queue queue;
408
409 struct anv_block_pool scratch_block_pool;
410
411 struct anv_compiler * compiler;
412 struct anv_aub_writer * aub_writer;
413 pthread_mutex_t mutex;
414 };
415
416 void *
417 anv_device_alloc(struct anv_device * device,
418 size_t size,
419 size_t alignment,
420 VkSystemAllocType allocType);
421
422 void
423 anv_device_free(struct anv_device * device,
424 void * mem);
425
426 void* anv_gem_mmap(struct anv_device *device,
427 uint32_t gem_handle, uint64_t offset, uint64_t size);
428 void anv_gem_munmap(void *p, uint64_t size);
429 uint32_t anv_gem_create(struct anv_device *device, size_t size);
430 void anv_gem_close(struct anv_device *device, int gem_handle);
431 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
432 int anv_gem_wait(struct anv_device *device, int gem_handle, int64_t *timeout_ns);
433 int anv_gem_execbuffer(struct anv_device *device,
434 struct drm_i915_gem_execbuffer2 *execbuf);
435 int anv_gem_set_tiling(struct anv_device *device, int gem_handle,
436 uint32_t stride, uint32_t tiling);
437 int anv_gem_create_context(struct anv_device *device);
438 int anv_gem_destroy_context(struct anv_device *device, int context);
439 int anv_gem_get_param(int fd, uint32_t param);
440 int anv_gem_get_aperture(struct anv_device *device, uint64_t *size);
441 int anv_gem_handle_to_fd(struct anv_device *device, int gem_handle);
442 int anv_gem_fd_to_handle(struct anv_device *device, int fd);
443 int anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
444
445 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
446
447 struct anv_reloc_list {
448 size_t num_relocs;
449 size_t array_length;
450 struct drm_i915_gem_relocation_entry * relocs;
451 struct anv_bo ** reloc_bos;
452 };
453
454 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
455 struct anv_device *device);
456 void anv_reloc_list_finish(struct anv_reloc_list *list,
457 struct anv_device *device);
458
459 struct anv_batch_bo {
460 struct anv_bo bo;
461
462 /* Bytes actually consumed in this batch BO */
463 size_t length;
464
465 /* These offsets reference the per-batch reloc list */
466 size_t first_reloc;
467 size_t num_relocs;
468
469 struct anv_batch_bo * prev_batch_bo;
470 };
471
472 struct anv_batch {
473 struct anv_device * device;
474
475 void * start;
476 void * end;
477 void * next;
478
479 struct anv_reloc_list relocs;
480
481 /* This callback is called (with the associated user data) in the event
482 * that the batch runs out of space.
483 */
484 VkResult (*extend_cb)(struct anv_batch *, void *);
485 void * user_data;
486 };
487
488 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
489 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
490 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
491 void *location, struct anv_bo *bo, uint32_t offset);
492
493 struct anv_address {
494 struct anv_bo *bo;
495 uint32_t offset;
496 };
497
498 #define __gen_address_type struct anv_address
499 #define __gen_user_data struct anv_batch
500
501 static inline uint64_t
502 __gen_combine_address(struct anv_batch *batch, void *location,
503 const struct anv_address address, uint32_t delta)
504 {
505 if (address.bo == NULL) {
506 return delta;
507 } else {
508 assert(batch->start <= location && location < batch->end);
509
510 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
511 }
512 }
513
514 #include "gen7_pack.h"
515 #include "gen75_pack.h"
516 #undef GEN8_3DSTATE_MULTISAMPLE
517 #include "gen8_pack.h"
518
519 #define anv_batch_emit(batch, cmd, ...) do { \
520 struct cmd __template = { \
521 cmd ## _header, \
522 __VA_ARGS__ \
523 }; \
524 void *__dst = anv_batch_emit_dwords(batch, cmd ## _length); \
525 cmd ## _pack(batch, __dst, &__template); \
526 } while (0)
527
528 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
529 struct cmd __template = { \
530 cmd ## _header, \
531 .DwordLength = n - cmd ## _length_bias, \
532 __VA_ARGS__ \
533 }; \
534 void *__dst = anv_batch_emit_dwords(batch, n); \
535 cmd ## _pack(batch, __dst, &__template); \
536 __dst; \
537 })
538
539 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
540 do { \
541 uint32_t *dw; \
542 \
543 assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
544 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
545 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
546 dw[i] = (dwords0)[i] | (dwords1)[i]; \
547 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
548 } while (0)
549
550 #define GEN8_MOCS { \
551 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
552 .TargetCache = L3DefertoPATforLLCeLLCselection, \
553 .AgeforQUADLRU = 0 \
554 }
555
556 struct anv_device_memory {
557 struct anv_bo bo;
558 VkDeviceSize map_size;
559 void * map;
560 };
561
562 struct anv_dynamic_vp_state {
563 struct anv_object base;
564 struct anv_state sf_clip_vp;
565 struct anv_state cc_vp;
566 struct anv_state scissor;
567 };
568
569 struct anv_dynamic_rs_state {
570 uint32_t state_sf[GEN8_3DSTATE_SF_length];
571 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
572 };
573
574 struct anv_dynamic_ds_state {
575 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
576 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
577 };
578
579 struct anv_dynamic_cb_state {
580 uint32_t state_color_calc[GEN8_COLOR_CALC_STATE_length];
581
582 };
583
584 struct anv_descriptor_slot {
585 int8_t dynamic_slot;
586 uint8_t index;
587 };
588
589 struct anv_descriptor_set_layout {
590 struct {
591 uint32_t surface_count;
592 struct anv_descriptor_slot *surface_start;
593 uint32_t sampler_count;
594 struct anv_descriptor_slot *sampler_start;
595 } stage[VK_NUM_SHADER_STAGE];
596
597 uint32_t count;
598 uint32_t num_dynamic_buffers;
599 uint32_t shader_stages;
600 struct anv_descriptor_slot entries[0];
601 };
602
603 struct anv_descriptor {
604 struct anv_sampler *sampler;
605 struct anv_surface_view *view;
606 };
607
608 struct anv_descriptor_set {
609 struct anv_descriptor descriptors[0];
610 };
611
612 #define MAX_VBS 32
613 #define MAX_SETS 8
614 #define MAX_RTS 8
615
616 struct anv_pipeline_layout {
617 struct {
618 struct anv_descriptor_set_layout *layout;
619 uint32_t surface_start[VK_NUM_SHADER_STAGE];
620 uint32_t sampler_start[VK_NUM_SHADER_STAGE];
621 } set[MAX_SETS];
622
623 uint32_t num_sets;
624
625 struct {
626 uint32_t surface_count;
627 uint32_t sampler_count;
628 } stage[VK_NUM_SHADER_STAGE];
629 };
630
631 struct anv_buffer {
632 struct anv_device * device;
633 VkDeviceSize size;
634
635 /* Set when bound */
636 struct anv_bo * bo;
637 VkDeviceSize offset;
638 };
639
640 #define ANV_CMD_BUFFER_PIPELINE_DIRTY (1 << 0)
641 #define ANV_CMD_BUFFER_RS_DIRTY (1 << 2)
642 #define ANV_CMD_BUFFER_DS_DIRTY (1 << 3)
643 #define ANV_CMD_BUFFER_CB_DIRTY (1 << 4)
644 #define ANV_CMD_BUFFER_VP_DIRTY (1 << 5)
645
646 struct anv_vertex_binding {
647 struct anv_buffer * buffer;
648 VkDeviceSize offset;
649 };
650
651 struct anv_descriptor_set_binding {
652 struct anv_descriptor_set * set;
653 uint32_t dynamic_offsets[128];
654 };
655
656 struct anv_cmd_buffer {
657 struct anv_object base;
658 struct anv_device * device;
659
660 struct drm_i915_gem_execbuffer2 execbuf;
661 struct drm_i915_gem_exec_object2 * exec2_objects;
662 struct anv_bo ** exec2_bos;
663 uint32_t exec2_array_length;
664 bool need_reloc;
665 uint32_t serial;
666
667 uint32_t bo_count;
668 struct anv_batch batch;
669 struct anv_batch_bo * last_batch_bo;
670 struct anv_batch_bo * surface_batch_bo;
671 uint32_t surface_next;
672 struct anv_reloc_list surface_relocs;
673 struct anv_state_stream surface_state_stream;
674 struct anv_state_stream dynamic_state_stream;
675
676 /* State required while building cmd buffer */
677 uint32_t current_pipeline;
678 uint32_t vb_dirty;
679 uint32_t dirty;
680 uint32_t compute_dirty;
681 uint32_t descriptors_dirty;
682 uint32_t scratch_size;
683 struct anv_pipeline * pipeline;
684 struct anv_pipeline * compute_pipeline;
685 struct anv_framebuffer * framebuffer;
686 struct anv_dynamic_rs_state * rs_state;
687 struct anv_dynamic_ds_state * ds_state;
688 struct anv_dynamic_vp_state * vp_state;
689 struct anv_dynamic_cb_state * cb_state;
690 struct anv_vertex_binding vertex_bindings[MAX_VBS];
691 struct anv_descriptor_set_binding descriptors[MAX_SETS];
692 };
693
694 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
695 void anv_aub_writer_destroy(struct anv_aub_writer *writer);
696
697 struct anv_fence {
698 struct anv_object base;
699 struct anv_bo bo;
700 struct drm_i915_gem_execbuffer2 execbuf;
701 struct drm_i915_gem_exec_object2 exec2_objects[1];
702 bool ready;
703 };
704
705 struct anv_shader {
706 uint32_t size;
707 char data[0];
708 };
709
710 struct anv_pipeline {
711 struct anv_object base;
712 struct anv_device * device;
713 struct anv_batch batch;
714 uint32_t batch_data[256];
715 struct anv_shader * shaders[VK_NUM_SHADER_STAGE];
716 struct anv_pipeline_layout * layout;
717 bool use_repclear;
718
719 struct brw_vs_prog_data vs_prog_data;
720 struct brw_wm_prog_data wm_prog_data;
721 struct brw_gs_prog_data gs_prog_data;
722 struct brw_cs_prog_data cs_prog_data;
723 struct brw_stage_prog_data * prog_data[VK_NUM_SHADER_STAGE];
724 uint32_t scratch_start[VK_NUM_SHADER_STAGE];
725 uint32_t total_scratch;
726 struct {
727 uint32_t vs_start;
728 uint32_t vs_size;
729 uint32_t nr_vs_entries;
730 uint32_t gs_start;
731 uint32_t gs_size;
732 uint32_t nr_gs_entries;
733 } urb;
734
735 uint32_t active_stages;
736 struct anv_state_stream program_stream;
737 struct anv_state blend_state;
738 uint32_t vs_simd8;
739 uint32_t ps_simd8;
740 uint32_t ps_simd16;
741 uint32_t gs_vec4;
742 uint32_t gs_vertex_count;
743 uint32_t cs_simd;
744
745 uint32_t vb_used;
746 uint32_t binding_stride[MAX_VBS];
747
748 uint32_t state_sf[GEN8_3DSTATE_SF_length];
749 uint32_t state_raster[GEN8_3DSTATE_RASTER_length];
750 uint32_t state_wm_depth_stencil[GEN8_3DSTATE_WM_DEPTH_STENCIL_length];
751
752 uint32_t cs_thread_width_max;
753 uint32_t cs_right_mask;
754 };
755
756 struct anv_pipeline_create_info {
757 bool use_repclear;
758 bool disable_viewport;
759 bool disable_scissor;
760 bool disable_vs;
761 bool use_rectlist;
762 };
763
764 VkResult
765 anv_pipeline_create(VkDevice device,
766 const VkGraphicsPipelineCreateInfo *pCreateInfo,
767 const struct anv_pipeline_create_info *extra,
768 VkPipeline *pPipeline);
769
770 struct anv_compiler *anv_compiler_create(struct anv_device *device);
771 void anv_compiler_destroy(struct anv_compiler *compiler);
772 int anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline);
773 void anv_compiler_free(struct anv_pipeline *pipeline);
774
775 struct anv_format {
776 const char *name;
777 uint16_t surface_format; /**< RENDER_SURFACE_STATE.SurfaceFormat */
778 uint8_t cpp; /**< Bytes-per-pixel of anv_format::surface_format. */
779 uint8_t num_channels;
780 uint8_t depth_format; /**< 3DSTATE_DEPTH_BUFFER.SurfaceFormat */
781 bool has_stencil;
782 };
783
784 const struct anv_format *
785 anv_format_for_vk_format(VkFormat format);
786
787 /**
788 * A proxy for the color surfaces, depth surfaces, and stencil surfaces.
789 */
790 struct anv_surface {
791 /**
792 * Offset from VkImage's base address, as bound by vkBindImageMemory().
793 */
794 uint32_t offset;
795
796 uint32_t stride; /**< RENDER_SURFACE_STATE.SurfacePitch */
797 uint16_t qpitch; /**< RENDER_SURFACE_STATE.QPitch */
798
799 /**
800 * \name Alignment of miptree images, in units of pixels.
801 *
802 * These fields contain the real alignment values, not the values to be
803 * given to the GPU. For example, if h_align is 4, then program the GPU
804 * with HALIGN_4.
805 * \{
806 */
807 uint8_t h_align; /**< RENDER_SURFACE_STATE.SurfaceHorizontalAlignment */
808 uint8_t v_align; /**< RENDER_SURFACE_STATE.SurfaceVerticalAlignment */
809 /** \} */
810
811 uint8_t tile_mode; /**< RENDER_SURFACE_STATE.TileMode */
812 };
813
814 struct anv_image {
815 VkImageType type;
816 VkExtent3D extent;
817 VkFormat format;
818 uint32_t levels;
819 uint32_t array_size;
820
821 VkDeviceSize size;
822 uint32_t alignment;
823
824 /* Set when bound */
825 struct anv_bo *bo;
826 VkDeviceSize offset;
827
828 struct anv_swap_chain *swap_chain;
829
830 /** RENDER_SURFACE_STATE.SurfaceType */
831 uint8_t surf_type;
832
833 /** Primary surface is either color or depth. */
834 struct anv_surface primary_surface;
835
836 /** Stencil surface is optional. */
837 struct anv_surface stencil_surface;
838 };
839
840 struct anv_surface_view {
841 struct anv_object base;
842
843 struct anv_state surface_state;
844 struct anv_bo * bo;
845 uint32_t offset;
846 uint32_t range;
847 VkExtent3D extent;
848 VkFormat format;
849 };
850
851 struct anv_image_create_info {
852 const VkImageCreateInfo *vk_info;
853 bool force_tile_mode;
854 uint8_t tile_mode;
855 };
856
857 VkResult anv_image_create(VkDevice _device,
858 const struct anv_image_create_info *info,
859 VkImage *pImage);
860
861 void anv_image_view_init(struct anv_surface_view *view,
862 struct anv_device *device,
863 const VkImageViewCreateInfo* pCreateInfo,
864 struct anv_cmd_buffer *cmd_buffer);
865
866 void anv_color_attachment_view_init(struct anv_surface_view *view,
867 struct anv_device *device,
868 const VkColorAttachmentViewCreateInfo* pCreateInfo,
869 struct anv_cmd_buffer *cmd_buffer);
870
871 void anv_surface_view_destroy(struct anv_device *device,
872 struct anv_object *obj, VkObjectType obj_type);
873
874 struct anv_sampler {
875 uint32_t state[4];
876 };
877
878 struct anv_depth_stencil_view {
879 struct anv_bo *bo;
880
881 uint32_t depth_offset; /**< Offset into bo. */
882 uint32_t depth_stride; /**< 3DSTATE_DEPTH_BUFFER.SurfacePitch */
883 uint32_t depth_format; /**< 3DSTATE_DEPTH_BUFFER.SurfaceFormat */
884 uint16_t depth_qpitch; /**< 3DSTATE_DEPTH_BUFFER.SurfaceQPitch */
885
886 uint32_t stencil_offset; /**< Offset into bo. */
887 uint32_t stencil_stride; /**< 3DSTATE_STENCIL_BUFFER.SurfacePitch */
888 uint16_t stencil_qpitch; /**< 3DSTATE_STENCIL_BUFFER.SurfaceQPitch */
889 };
890
891 struct anv_framebuffer {
892 struct anv_object base;
893 uint32_t color_attachment_count;
894 const struct anv_surface_view * color_attachments[MAX_RTS];
895 const struct anv_depth_stencil_view * depth_stencil;
896
897 uint32_t sample_count;
898 uint32_t width;
899 uint32_t height;
900 uint32_t layers;
901
902 /* Viewport for clears */
903 VkDynamicVpState vp_state;
904 };
905
906 struct anv_render_pass_layer {
907 VkAttachmentLoadOp color_load_op;
908 VkClearColor clear_color;
909 };
910
911 struct anv_render_pass {
912 VkRect render_area;
913
914 uint32_t num_clear_layers;
915 uint32_t num_layers;
916 struct anv_render_pass_layer layers[0];
917 };
918
919 void anv_device_init_meta(struct anv_device *device);
920 void anv_device_finish_meta(struct anv_device *device);
921
922 void
923 anv_cmd_buffer_clear(struct anv_cmd_buffer *cmd_buffer,
924 struct anv_render_pass *pass);
925
926 void *
927 anv_lookup_entrypoint(const char *name);
928
929 #ifdef __cplusplus
930 }
931 #endif